diff --git a/fclones/src/config.rs b/fclones/src/config.rs index 78a4452..a051016 100644 --- a/fclones/src/config.rs +++ b/fclones/src/config.rs @@ -749,8 +749,16 @@ const fn after_help() -> &'static str { #[derive(clap::Parser, Debug)] #[command(about, author, version, after_help = after_help(), max_term_width = 100)] pub struct Config { - /// Suppress progress reporting - #[arg(short('q'), long)] + /// Override progress reporting, by default (=auto) only report when stderr is a terminal. + /// Possible values: true, false, auto. + #[arg(long, value_name = "VAL", require_equals = true, + value_parser(["auto", "true", "false"]), default_value = "auto", + + hide_possible_values = true, hide_default_value = true)] + pub progress: String, + + // compatibility with fclones <= 0.34, overrides --progress + #[arg(short('q'), long, hide = true)] pub quiet: bool, /// Find files diff --git a/fclones/src/group.rs b/fclones/src/group.rs index 4a2a0b6..9069391 100644 --- a/fclones/src/group.rs +++ b/fclones/src/group.rs @@ -889,9 +889,7 @@ fn update_file_locations(ctx: &GroupCtx<'_>, groups: &mut (impl FileCollection + // Do not print a notice about slower access when fetching file extents has // failed because a file vanished -- now it will never be accessed anyhow. const ENOENT_NO_SUCH_FILE: i32 = 2; - if e.raw_os_error() - .map_or(true, |err| err != ENOENT_NO_SUCH_FILE) - { + if e.raw_os_error() != Some(ENOENT_NO_SUCH_FILE) { handle_fetch_physical_location_err(ctx, &err_counters, fi, e) } } diff --git a/fclones/src/hasher.rs b/fclones/src/hasher.rs index 53a6190..f2c6b55 100644 --- a/fclones/src/hasher.rs +++ b/fclones/src/hasher.rs @@ -437,7 +437,7 @@ impl FileHasher<'_> { } } -impl<'a> Drop for FileHasher<'a> { +impl Drop for FileHasher<'_> { fn drop(&mut self) { if let Some(cache) = self.cache.take() { if let Err(e) = cache.close() { diff --git a/fclones/src/main.rs b/fclones/src/main.rs index 0c15ebe..05c8e85 100644 --- a/fclones/src/main.rs +++ b/fclones/src/main.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; use std::ffi::{OsStr, OsString}; use std::fs::File; -use std::io::{stdin, Write}; +use std::io::{stderr, stdin, IsTerminal, Write}; use std::process::exit; use std::sync::Arc; use std::{fs, io}; @@ -251,9 +251,12 @@ fn main() { } let mut log = StdLog::new(); - if config.quiet { - log.no_progress = true; - } + log.no_progress = match (config.quiet, config.progress.as_str()) { + (true, _) => true, + (_, "false") => true, + (_, "true") => false, + (_, _auto) => !stderr().is_terminal(), + }; let cwd = match std::env::current_dir() { Ok(cwd) => cwd, diff --git a/fclones/src/reflink.rs b/fclones/src/reflink.rs index 0b66f6c..83f4410 100644 --- a/fclones/src/reflink.rs +++ b/fclones/src/reflink.rs @@ -134,7 +134,11 @@ fn reflink_overwrite(target: &std::path::Path, link: &std::path::Path) -> io::Re let src = fs::File::open(target)?; // This operation does not require `.truncate(true)` because the files are already of the same size. - let dest = fs::OpenOptions::new().create(true).write(true).open(link)?; + let dest = fs::OpenOptions::new() + .create(true) + .truncate(false) + .write(true) + .open(link)?; // From /usr/include/linux/fs.h: // #define FICLONE _IOW(0x94, 9, int) @@ -363,7 +367,7 @@ pub mod test { } } - impl<'a> Drop for CrossTest<'a> { + impl Drop for CrossTest<'_> { fn drop(&mut self) { *CROSSTEST.lock().unwrap() = false; } @@ -398,7 +402,7 @@ pub mod test { with_dir(test_root, |root| { // Always clean up files in /dev/shm, even after failure struct CleanupGuard<'a>(&'a str); - impl<'a> Drop for CleanupGuard<'a> { + impl Drop for CleanupGuard<'_> { fn drop(&mut self) { fs::remove_dir_all(self.0).unwrap(); } diff --git a/fclones/src/report.rs b/fclones/src/report.rs index 21d14cd..1d46cd3 100644 --- a/fclones/src/report.rs +++ b/fclones/src/report.rs @@ -708,7 +708,7 @@ mod test { let input = output.reopen().unwrap(); let mut writer = ReportWriter::new(output, false); - writer.write_as_text(&header1, groups.into_iter()).unwrap(); + writer.write_as_text(&header1, groups).unwrap(); let mut reader = TextReportReader::new(BufReader::new(input)); let header2 = reader.read_header().unwrap(); @@ -841,7 +841,7 @@ mod test { let input = output.reopen().unwrap(); let mut writer = ReportWriter::new(output, false); - writer.write_as_json(&header1, groups.into_iter()).unwrap(); + writer.write_as_json(&header1, groups).unwrap(); let mut reader = JsonReportReader::new(input).unwrap(); let header2 = reader.read_header().unwrap(); diff --git a/fclones/src/semaphore.rs b/fclones/src/semaphore.rs index 399360a..e4796a4 100644 --- a/fclones/src/semaphore.rs +++ b/fclones/src/semaphore.rs @@ -96,7 +96,7 @@ impl Semaphore { } } -impl<'a> Drop for SemaphoreGuard<'a> { +impl Drop for SemaphoreGuard<'_> { fn drop(&mut self) { self.sem.release(); } diff --git a/fclones/src/walk.rs b/fclones/src/walk.rs index 25c0cea..f0bed86 100644 --- a/fclones/src/walk.rs +++ b/fclones/src/walk.rs @@ -198,7 +198,7 @@ impl<'a> Walk<'a> { for p in roots.into_iter() { let p = self.absolute(p); let ignore = ignore.clone(); - match fs::metadata(&p.to_path_buf()) { + match fs::metadata(p.to_path_buf()) { Ok(metadata) if metadata.is_dir() && self.depth == 0 => self.log_warn(format!( "Skipping directory {} because recursive scan is disabled.", p.display() @@ -384,7 +384,7 @@ impl<'a> Walk<'a> { } #[cfg(unix)] - fn sort_dir_entries_by_inode(entries: &mut Vec) { + fn sort_dir_entries_by_inode(entries: &mut [DirEntry]) { use rayon::prelude::ParallelSliceMut; use std::os::unix::fs::DirEntryExt; entries.par_sort_unstable_by_key(|entry| entry.ino()) @@ -471,7 +471,7 @@ impl<'a> Walk<'a> { } } -impl<'a> Default for Walk<'a> { +impl Default for Walk<'_> { fn default() -> Self { Self::new() }