Skip to content

Commit

Permalink
Remove the temporary caching feature flag (#622)
Browse files Browse the repository at this point in the history
* Remove the temporary caching feature flag

Signed-off-by: Alessandro Passaro <[email protected]>

* Update changelog

Signed-off-by: Alessandro Passaro <[email protected]>

---------

Signed-off-by: Alessandro Passaro <[email protected]>
  • Loading branch information
passaro authored Nov 22, 2023
1 parent 3c5f93d commit 6d5bb1f
Show file tree
Hide file tree
Showing 3 changed files with 52 additions and 66 deletions.
6 changes: 6 additions & 0 deletions mountpoint-s3/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,11 @@
## Unreleased

### New features
* Introduced optional caching of object metadata and content, in order to allow reduced cost and improved performance for repeated reads to the same files. ([#622](https://github.com/awslabs/mountpoint-s3/pull/622))

### Breaking changes
* No breaking changes.

## v1.1.1 (November 14, 2023)

### Breaking changes
Expand Down
3 changes: 0 additions & 3 deletions mountpoint-s3/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -68,9 +68,6 @@ tokio = { version = "1.24.2", features = ["rt", "macros"] }
walkdir = "2.3.3"

[features]
# Experimental features
caching = []

# Test features
fips_tests = []
fuse_tests = []
Expand Down
109 changes: 46 additions & 63 deletions mountpoint-s3/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,12 @@ use std::time::Duration;
use anyhow::{anyhow, Context as _};
use clap::{value_parser, Parser};
use fuser::{MountOption, Session};
#[cfg(feature = "caching")]
use mountpoint_s3::data_cache::ManagedCacheDir;
use mountpoint_s3::fs::S3FilesystemConfig;
use mountpoint_s3::data_cache::{CacheLimit, DiskDataCache, DiskDataCacheConfig, ManagedCacheDir};
use mountpoint_s3::fs::{CacheConfig, S3FilesystemConfig};
use mountpoint_s3::fuse::session::FuseSession;
use mountpoint_s3::fuse::S3FuseFilesystem;
use mountpoint_s3::logging::{init_logging, LoggingConfig};
use mountpoint_s3::prefetch::{default_prefetch, Prefetch};
use mountpoint_s3::prefetch::{caching_prefetch, default_prefetch, Prefetch};
use mountpoint_s3::prefix::Prefix;
use mountpoint_s3::{autoconfigure, metrics};
use mountpoint_s3_client::config::{AddressingStyle, EndpointConfig, S3ClientAuthConfig, S3ClientConfig};
Expand All @@ -37,7 +36,6 @@ const MOUNT_OPTIONS_HEADER: &str = "Mount options";
const BUCKET_OPTIONS_HEADER: &str = "Bucket options";
const AWS_CREDENTIALS_OPTIONS_HEADER: &str = "AWS credentials options";
const LOGGING_OPTIONS_HEADER: &str = "Logging options";
#[cfg(feature = "caching")]
const CACHING_OPTIONS_HEADER: &str = "Caching options";
const ADVANCED_OPTIONS_HEADER: &str = "Advanced options";

Expand Down Expand Up @@ -222,7 +220,6 @@ struct CliArgs {
)]
pub no_log: bool,

#[cfg(feature = "caching")]
#[clap(
long,
help = "Enable caching of object metadata and content to the given directory",
Expand All @@ -231,7 +228,6 @@ struct CliArgs {
)]
pub cache: Option<PathBuf>,

#[cfg(feature = "caching")]
#[clap(
long,
help = "Time-to-live (TTL) for cached metadata in seconds [default: 1s]",
Expand All @@ -242,7 +238,6 @@ struct CliArgs {
)]
pub metadata_ttl: Option<Duration>,

#[cfg(feature = "caching")]
#[clap(
long,
help = "Maximum size of the cache directory in MiB [default: preserve 5% of available space]",
Expand Down Expand Up @@ -506,13 +501,11 @@ fn mount(args: CliArgs) -> anyhow::Result<FuseSession> {
if args.read_only {
user_agent.value("mp-readonly");
}
#[cfg(feature = "caching")]
{
if args.cache.is_some() {
user_agent.value("mp-cache");
if let Some(ttl) = args.metadata_ttl {
user_agent.key_value("mp-cache-ttl", &ttl.as_secs().to_string());
}

if args.cache.is_some() {
user_agent.value("mp-cache");
if let Some(ttl) = args.metadata_ttl {
user_agent.key_value("mp-cache-ttl", &ttl.as_secs().to_string());
}
}

Expand Down Expand Up @@ -561,55 +554,46 @@ fn mount(args: CliArgs) -> anyhow::Result<FuseSession> {

let prefetcher_config = Default::default();

#[cfg(feature = "caching")]
{
use mountpoint_s3::data_cache::{CacheLimit, DiskDataCache, DiskDataCacheConfig};
use mountpoint_s3::fs::CacheConfig;
use mountpoint_s3::prefetch::caching_prefetch;

if let Some(path) = args.cache {
let metadata_cache_ttl = args.metadata_ttl.unwrap_or(Duration::from_secs(1));
filesystem_config.cache_config = CacheConfig {
serve_lookup_from_cache: true,
dir_ttl: metadata_cache_ttl,
file_ttl: metadata_cache_ttl,
};

let cache_config = match args.max_cache_size {
// Fallback to no data cache.
Some(0) => None,
Some(max_size_in_mib) => Some(DiskDataCacheConfig {
limit: CacheLimit::TotalSize {
max_size: (max_size_in_mib * 1024 * 1024) as usize,
},
..Default::default()
}),
None => Some(DiskDataCacheConfig::default()),
};
if let Some(path) = args.cache {
let metadata_cache_ttl = args.metadata_ttl.unwrap_or(Duration::from_secs(1));
filesystem_config.cache_config = CacheConfig {
serve_lookup_from_cache: true,
dir_ttl: metadata_cache_ttl,
file_ttl: metadata_cache_ttl,
};

if let Some(cache_config) = cache_config {
let managed_cache_dir =
ManagedCacheDir::new_from_parent(path).context("failed to create cache directory")?;
let cache = DiskDataCache::new(managed_cache_dir.as_path_buf(), cache_config);
let prefetcher = caching_prefetch(cache, runtime, prefetcher_config);
let mut fuse_session = create_filesystem(
client,
prefetcher,
&args.bucket_name,
&prefix,
filesystem_config,
fuse_config,
&bucket_description,
);

if let Ok(session) = &mut fuse_session {
session.run_on_close(Box::new(move || {
drop(managed_cache_dir);
}));
}
let cache_config = match args.max_cache_size {
// Fallback to no data cache.
Some(0) => None,
Some(max_size_in_mib) => Some(DiskDataCacheConfig {
limit: CacheLimit::TotalSize {
max_size: (max_size_in_mib * 1024 * 1024) as usize,
},
..Default::default()
}),
None => Some(DiskDataCacheConfig::default()),
};

return fuse_session;
}
if let Some(cache_config) = cache_config {
let managed_cache_dir =
ManagedCacheDir::new_from_parent(path).context("failed to create cache directory")?;
let cache = DiskDataCache::new(managed_cache_dir.as_path_buf(), cache_config);
let prefetcher = caching_prefetch(cache, runtime, prefetcher_config);
let mut fuse_session = create_filesystem(
client,
prefetcher,
&args.bucket_name,
&prefix,
filesystem_config,
fuse_config,
&bucket_description,
)?;

fuse_session.run_on_close(Box::new(move || {
drop(managed_cache_dir);
}));

return Ok(fuse_session);
}
}

Expand Down Expand Up @@ -742,7 +726,6 @@ fn parse_bucket_name(bucket_name: &str) -> anyhow::Result<String> {
Ok(bucket_name.to_owned())
}

#[cfg(feature = "caching")]
fn parse_duration_seconds(seconds_str: &str) -> anyhow::Result<Duration> {
let seconds = seconds_str.parse()?;
let duration = Duration::from_secs(seconds);
Expand Down

1 comment on commit 6d5bb1f

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Performance Alert ⚠️

Possible performance regression was detected for benchmark.
Benchmark result of this commit is worse than the previous benchmark result exceeding threshold 2.

Benchmark suite Current: 6d5bb1f Previous: 3c5f93d Ratio
random_read_four_threads 9.26748046875 MiB/s 24.1294921875 MiB/s 2.60
random_read 1.57490234375 MiB/s 3.64873046875 MiB/s 2.32

This comment was automatically generated by workflow using github-action-benchmark.

Please sign in to comment.