Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bump codecov/codecov-action from 3.1.1 to 3.1.4 #54

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ jobs:
command: tarpaulin
args: --run-types Tests,Doctests --features full --workspace --out Xml
- name: Upload to codecov.io
uses: codecov/[email protected].1
uses: codecov/[email protected].4
with:
token: ${{ secrets.CODECOV_TOKEN }}
fail_ci_if_error: true
Expand Down
36 changes: 22 additions & 14 deletions examples/async_example.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,47 +3,55 @@ use stretto::AsyncCache;

#[tokio::main]
async fn main() {
let c: AsyncCache<&str, &str> = AsyncCache::new(12960, 1e6 as i64, tokio::spawn).unwrap();
// cache is intended to take ownership of key and value
let c: AsyncCache<String, String> = AsyncCache::new(12960, 1e6 as i64, tokio::spawn).unwrap();

// set a value with a cost of 1
c.insert("a", "a", 1).await;
c.insert("key1".to_string(), "value1".to_string(), 1).await;

// set a value with a cost of 1 and ttl
c.insert_with_ttl("b", "b", 1, Duration::from_secs(3)).await;
c.insert_with_ttl(
"key2".to_string(),
"value2".to_string(),
1,
Duration::from_secs(3),
)
.await;

// wait for value to pass through buffers
c.wait().await.unwrap();

// Create a search key
let key1 = "key1".to_string();
// when we get the value, we will get a ValueRef, which contains a RwLockReadGuard
// so when we finish use this value, we must release the ValueRef
let v = c.get(&"a").await.unwrap();
assert_eq!(v.value(), &"a");
let v = c.get(&key1).await.unwrap();
assert_eq!(v.value(), &"value1");
// release the value
v.release(); // or drop(v)

// lock will be auto released when out of scope
{
// when we get the value, we will get a ValueRef, which contains a RwLockWriteGuard
// so when we finish use this value, we must release the ValueRefMut
let mut v = c.get_mut(&"a").await.unwrap();
v.write("aa");
assert_eq!(v.value(), &"aa");
let mut v = c.get_mut(&key1).await.unwrap();
v.write("value2".to_string());
assert_eq!(v.value(), &"value2");
// release the value
}

// if you just want to do one operation
let v = c.get_mut(&"a").await.unwrap();
v.write_once("aaa");
let v = c.get_mut(&key1).await.unwrap();
v.write_once("value3".to_string());

let v = c.get(&"a").await.unwrap();
println!("{}", v);
assert_eq!(v.value(), &"aaa");
let v = c.get(&key1).await.unwrap();
assert_eq!(v.value(), &"value3");
v.release();

// clear the cache
c.clear().await.unwrap();
// wait all the operations are finished
c.wait().await.unwrap();

assert!(c.get(&"a").await.is_none());
assert!(c.get(&key1).await.is_none());
}
32 changes: 20 additions & 12 deletions examples/sync_example.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,43 +2,51 @@ use std::time::Duration;
use stretto::Cache;

fn main() {
// cache is intended to take ownership of key and value
let c = Cache::new(12960, 1e6 as i64).unwrap();

// set a value with a cost of 1
c.insert("a", "a", 1);
c.insert("key1".to_string(), "value1".to_string(), 1);
// set a value with a cost of 1 and ttl
c.insert_with_ttl("b", "b", 1, Duration::from_secs(3));
c.insert_with_ttl(
"key2".to_string(),
"value2".to_string(),
1,
Duration::from_secs(3),
);

// wait for value to pass through buffers
c.wait().unwrap();

// Create a search key
let key1 = "key1".to_string();
// when we get the value, we will get a ValueRef, which contains a RwLockReadGuard
// so when we finish use this value, we must release the ValueRef
let v = c.get(&"a").unwrap();
assert_eq!(v.value(), &"a");
let v = c.get(&key1).unwrap();
assert_eq!(v.value(), &"value1");
v.release();

// lock will be auto released when out of scope
{
// when we get the value, we will get a ValueRef, which contains a RwLockWriteGuard
// so when we finish use this value, we must release the ValueRefMut
let mut v = c.get_mut(&"a").unwrap();
v.write("aa");
assert_eq!(v.value(), &"aa");
let mut v = c.get_mut(&key1).unwrap();
v.write("value3".to_string());
assert_eq!(v.value(), &"value3");
// release the value
}

// if you just want to do one operation
let v = c.get_mut(&"a").unwrap();
v.write_once("aaa");
let v = c.get_mut(&key1).unwrap();
v.write_once("value4".to_string());

let v = c.get(&"a").unwrap();
assert_eq!(v.value(), &"aaa");
let v = c.get(&key1).unwrap();
assert_eq!(v.value(), &"value4");
v.release();

// clear the cache
c.clear().unwrap();
// wait all the operations are finished
c.wait().unwrap();
assert!(c.get(&"a").is_none());
assert!(c.get(&key1).is_none());
}
5 changes: 3 additions & 2 deletions src/bbloom.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,8 @@ struct EntriesLocs {
locs: u64,
}

fn calc_size_by_wrong_positives(num_entries: f64, wrongs: f64) -> EntriesLocs {
fn calc_size_by_wrong_positives(num_entries: usize, wrongs: f64) -> EntriesLocs {
let num_entries = num_entries as f64;
let size = -1f64 * num_entries * wrongs.ln() / LN_2.powf(2f64);
let locs = (LN_2 * size / num_entries).ceil();

Expand All @@ -56,7 +57,7 @@ impl Bloom {
pub fn new(cap: usize, false_positive_ratio: f64) -> Self {
let entries_locs = {
if false_positive_ratio < 1f64 {
calc_size_by_wrong_positives(cap as f64, false_positive_ratio)
calc_size_by_wrong_positives(cap, false_positive_ratio)
} else {
EntriesLocs {
entries: cap as u64,
Expand Down
140 changes: 21 additions & 119 deletions src/cache/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -140,23 +140,9 @@ where
/// set num_counters to 100,000. The important thing is the *number of unique items* in the full cache,
/// not necessarily the `max_cost` value.
#[inline]
pub fn set_num_counters(self, num_counters: usize) -> Self {
Self {
num_counters,
max_cost: self.max_cost,
buffer_items: self.buffer_items,
insert_buffer_size: self.insert_buffer_size,
metrics: self.metrics,
callback: self.callback,
key_to_hash: self.key_to_hash,
update_validator: self.update_validator,
coster: self.coster,
ignore_internal_cost: self.ignore_internal_cost,
cleanup_duration: self.cleanup_duration,
marker_k: self.marker_k,
marker_v: self.marker_v,
hasher: self.hasher,
}
pub fn set_num_counters(mut self, num_counters: usize) -> Self {
self.num_counters = num_counters;
self
}

/// Set the max_cost for the Cache.
Expand All @@ -170,43 +156,15 @@ where
///
/// `max_cost` could be anything as long as it matches how you're using the cost values when calling `insert`.
#[inline]
pub fn set_max_cost(self, max_cost: i64) -> Self {
Self {
num_counters: self.num_counters,
max_cost,
buffer_items: self.buffer_items,
insert_buffer_size: self.insert_buffer_size,
metrics: self.metrics,
callback: self.callback,
key_to_hash: self.key_to_hash,
update_validator: self.update_validator,
coster: self.coster,
ignore_internal_cost: self.ignore_internal_cost,
cleanup_duration: self.cleanup_duration,
hasher: self.hasher,
marker_k: self.marker_k,
marker_v: self.marker_v,
}
pub fn set_max_cost(mut self, max_cost: i64) -> Self {
self.max_cost = max_cost;
self
}

#[inline]
pub fn set_buffer_items(self, sz: usize) -> Self {
Self {
num_counters: self.num_counters,
max_cost: self.max_cost,
buffer_items: sz,
insert_buffer_size: self.insert_buffer_size,
metrics: self.metrics,
callback: self.callback,
key_to_hash: self.key_to_hash,
update_validator: self.update_validator,
coster: self.coster,
ignore_internal_cost: self.ignore_internal_cost,
cleanup_duration: self.cleanup_duration,
hasher: self.hasher,
marker_k: self.marker_k,
marker_v: self.marker_v,
}
pub fn set_buffer_items(mut self, sz: usize) -> Self {
self.buffer_items = sz;
self
}

/// Set the insert buffer size for the Cache.
Expand All @@ -217,47 +175,19 @@ where
/// try increasing this value in increments of 32 * 1024.
/// This is a fine-tuning mechanism and you probably won't have to touch this.
#[inline]
pub fn set_buffer_size(self, sz: usize) -> Self {
Self {
num_counters: self.num_counters,
max_cost: self.max_cost,
buffer_items: self.buffer_items,
insert_buffer_size: sz,
metrics: self.metrics,
callback: self.callback,
key_to_hash: self.key_to_hash,
update_validator: self.update_validator,
coster: self.coster,
ignore_internal_cost: self.ignore_internal_cost,
cleanup_duration: self.cleanup_duration,
hasher: self.hasher,
marker_k: self.marker_k,
marker_v: self.marker_v,
}
pub fn set_buffer_size(mut self, sz: usize) -> Self {
self.insert_buffer_size = sz;
self
}

/// Set whether record the metrics or not.
///
/// Metrics is true when you want real-time logging of a variety of stats.
/// The reason this is a CacheBuilderCore flag is because there's a 10% throughput performance overhead.
#[inline]
pub fn set_metrics(self, val: bool) -> Self {
Self {
num_counters: self.num_counters,
max_cost: self.max_cost,
buffer_items: self.buffer_items,
insert_buffer_size: self.insert_buffer_size,
metrics: val,
callback: self.callback,
key_to_hash: self.key_to_hash,
update_validator: self.update_validator,
coster: self.coster,
ignore_internal_cost: self.ignore_internal_cost,
cleanup_duration: self.cleanup_duration,
hasher: self.hasher,
marker_k: self.marker_k,
marker_v: self.marker_v,
}
pub fn set_metrics(mut self, val: bool) -> Self {
self.metrics = val;
self
}

/// Set whether ignore the internal cost or not.
Expand All @@ -266,44 +196,16 @@ where
/// because the size of stored item in Cache is 56(excluding the size of value).
/// Set it to true to ignore the internal cost.
#[inline]
pub fn set_ignore_internal_cost(self, val: bool) -> Self {
Self {
num_counters: self.num_counters,
max_cost: self.max_cost,
buffer_items: self.buffer_items,
insert_buffer_size: self.insert_buffer_size,
metrics: self.metrics,
callback: self.callback,
key_to_hash: self.key_to_hash,
update_validator: self.update_validator,
coster: self.coster,
ignore_internal_cost: val,
cleanup_duration: self.cleanup_duration,
hasher: self.hasher,
marker_k: self.marker_k,
marker_v: self.marker_v,
}
pub fn set_ignore_internal_cost(mut self, val: bool) -> Self {
self.ignore_internal_cost = val;
self
}

/// Set the cleanup ticker for Cache, each tick the Cache will clean the expired entries.
#[inline]
pub fn set_cleanup_duration(self, d: Duration) -> Self {
Self {
num_counters: self.num_counters,
max_cost: self.max_cost,
buffer_items: self.buffer_items,
insert_buffer_size: self.insert_buffer_size,
metrics: self.metrics,
callback: self.callback,
key_to_hash: self.key_to_hash,
update_validator: self.update_validator,
coster: self.coster,
ignore_internal_cost: self.ignore_internal_cost,
cleanup_duration: d,
hasher: self.hasher,
marker_k: self.marker_k,
marker_v: self.marker_v,
}
pub fn set_cleanup_duration(mut self, d: Duration) -> Self {
self.cleanup_duration = d;
self
}

/// Set the [`KeyBuilder`] for the Cache
Expand Down