From 5bca24564356fa2149c89ecc13d006364582df34 Mon Sep 17 00:00:00 2001 From: Dennis Schwerdel Date: Thu, 16 Mar 2017 20:05:58 +0100 Subject: [PATCH] Arguments --- src/{ => cli}/algotest.rs | 4 +- src/cli/args.rs | 110 ++++++++++++++++++++ src/cli/mod.rs | 143 +++++++++++++++++++++++++ src/main.rs | 207 +++---------------------------------- src/repository/metadata.rs | 10 +- src/repository/mod.rs | 2 +- src/util/cli.rs | 51 +++++++++ src/util/mod.rs | 36 +------ 8 files changed, 324 insertions(+), 239 deletions(-) rename src/{ => cli}/algotest.rs (99%) create mode 100644 src/cli/args.rs create mode 100644 src/cli/mod.rs create mode 100644 src/util/cli.rs diff --git a/src/algotest.rs b/src/cli/algotest.rs similarity index 99% rename from src/algotest.rs rename to src/cli/algotest.rs index 1a67b45..f53681c 100644 --- a/src/algotest.rs +++ b/src/cli/algotest.rs @@ -2,8 +2,8 @@ use std::io::{Cursor, Read}; use std::fs::File; use std::time; -use super::chunker::*; -use super::util::*; +use ::chunker::*; +use ::util::*; fn speed_chunk(chunker: &mut C, data: &[u8]) { let mut input = Cursor::new(data); diff --git a/src/cli/args.rs b/src/cli/args.rs new file mode 100644 index 0000000..03a0741 --- /dev/null +++ b/src/cli/args.rs @@ -0,0 +1,110 @@ +use docopt::Docopt; + +use ::chunker::ChunkerType; +use ::util::{ChecksumType, Compression, HashMethod}; + + +static USAGE: &'static str = " +Usage: + zvault init [--bundle-size SIZE] [--chunker METHOD] [--chunk-size SIZE] [--compression COMPRESSION] + zvault backup [--full] + zvault restore [] + zvault check [--full] + zvault backups + zvault info + zvault list [--tree] + zvault stats + zvault bundles + zvault algotest + +Options: + --tree Print the whole (sub-)tree from the backup + --full Whether to verify the repository by loading all bundles + --bundle-size SIZE The target size of a full bundle in MiB [default: 25] + --chunker METHOD The chunking algorithm to use [default: fastcdc] + --chunk-size SIZE The target average chunk size in KiB [default: 8] + --compression COMPRESSION The compression to use [default: brotli/3] +"; + + +#[derive(RustcDecodable, Debug)] +pub struct DocoptArgs { + pub cmd_init: bool, + pub cmd_backup: bool, + pub cmd_restore: bool, + pub cmd_check: bool, + + pub cmd_backups: bool, + pub cmd_info: bool, + pub cmd_list: bool, + + pub cmd_stats: bool, + pub cmd_bundles: bool, + + pub cmd_algotest: bool, + pub cmd_stat: bool, + + pub arg_file: Option, + pub arg_repo: Option, + pub arg_path: Option, + pub arg_src: Option, + pub arg_dst: Option, + pub arg_backup: Option, + + pub flag_full: bool, + pub flag_bundle_size: usize, + pub flag_chunker: String, + pub flag_chunk_size: usize, + pub flag_compression: String, + pub flag_tree: bool +} + + +pub enum Arguments { + Init { + repo_path: String, + bundle_size: usize, + chunker: ChunkerType, + chunk_size: usize, + compresion: Compression + }, + Backup { + repo_path: String, + backup_name: String, + src_path: String, + full: bool + }, + Restore { + repo_path: String, + backup_name: String, + inode: Option, + dst_path: String + }, + Check { + repo_path: String, + backup_name: Option, + inode: Option, + full: bool + }, + List { + repo_path: String, + backup_name: Option, + inode: Option + }, + Info { + repo_path: String, + backup_name: Option, + inode: Option + }, + ListBundles { + repo_path: String + }, + AlgoTest { + file: String + } +} + + +pub fn parse() -> DocoptArgs { + Docopt::new(USAGE).and_then(|d| d.decode()).unwrap_or_else(|e| e.exit()) +} diff --git a/src/cli/mod.rs b/src/cli/mod.rs new file mode 100644 index 0000000..c79e450 --- /dev/null +++ b/src/cli/mod.rs @@ -0,0 +1,143 @@ +mod args; +mod algotest; + +use chrono::prelude::*; + +use ::chunker::ChunkerType; +use ::repository::{Repository, Config, Inode}; +use ::util::{ChecksumType, Compression, HashMethod}; +use ::util::cli::*; + + +pub fn run() { + let args = args::parse(); + + if args.cmd_algotest { + let file = args.arg_file.unwrap(); + algotest::run(&file); + return + } + + if args.cmd_init { + let chunker = ChunkerType::from(&args.flag_chunker, args.flag_chunk_size*1024, 0).expect("No such chunk algorithm"); + let compression = if args.flag_compression == "none" { + None + } else { + Some(Compression::from_string(&args.flag_compression).expect("Failed to parse compression")) + }; + Repository::create(&args.arg_repo.unwrap(), Config { + bundle_size: args.flag_bundle_size*1024*1024, + checksum: ChecksumType::Blake2_256, + chunker: chunker, + compression: compression, + hash: HashMethod::Blake2 + }).unwrap(); + return + } + + if args.cmd_stat { + println!("{:?}", Inode::get_from(&args.arg_path.unwrap()).unwrap()); + return + } + + + let mut repo; + if let Some(path) = args.arg_repo { + repo = Repository::open(path).unwrap(); + } else if let Some(ref backup) = args.arg_backup { + let path = backup.splitn(2, "::").nth(0).unwrap(); + repo = Repository::open(path).unwrap(); + } else { + panic!("Repository is needed"); + } + + if args.cmd_check { + repo.check(args.flag_full).unwrap(); + return + } + + if args.cmd_stats { + let info = repo.info(); + println!("Bundles: {}", info.bundle_count); + println!("Total size: {}", to_file_size(info.encoded_data_size)); + println!("Uncompressed size: {}", to_file_size(info.raw_data_size)); + println!("Compression ratio: {:.1}%", info.compression_ratio * 100.0); + println!("Chunk count: {}", info.chunk_count); + println!("Average chunk size: {}", to_file_size(info.avg_chunk_size as u64)); + let index_usage = info.index_entries as f32 / info.index_capacity as f32; + println!("Index: {}, {:.0}% full", to_file_size(info.index_size as u64), index_usage * 100.0); + return + } + + if args.cmd_backups { + for backup in repo.list_backups().unwrap() { + println!("{}", backup); + } + return + } + + if args.cmd_bundles { + for bundle in repo.list_bundles() { + println!("Bundle {}", bundle.id); + println!(" - Chunks: {}", bundle.chunk_count); + println!(" - Size: {}", to_file_size(bundle.encoded_size as u64)); + println!(" - Data size: {}", to_file_size(bundle.raw_size as u64)); + let ratio = bundle.encoded_size as f32 / bundle.raw_size as f32; + let compression = if let Some(ref c) = bundle.compression { + c.to_string() + } else { + "none".to_string() + }; + println!(" - Compression: {}, ratio: {:.1}%", compression, ratio * 100.0); + println!(); + } + return + } + + let backup_name = args.arg_backup.unwrap().splitn(2, "::").nth(1).unwrap().to_string(); + + if args.cmd_backup { + let backup = repo.create_full_backup(&args.arg_path.unwrap()).unwrap(); + repo.save_backup(&backup, &backup_name).unwrap(); + return + } + + let backup = repo.get_backup(&backup_name).unwrap(); + + if args.cmd_info { + println!("Date: {}", Local.timestamp(backup.date, 0).to_rfc2822()); + println!("Duration: {}", to_duration(backup.duration)); + println!("Entries: {} files, {} dirs", backup.file_count, backup.dir_count); + println!("Total backup size: {}", to_file_size(backup.total_data_size)); + println!("Modified data size: {}", to_file_size(backup.changed_data_size)); + let dedup_ratio = backup.deduplicated_data_size as f32 / backup.changed_data_size as f32; + println!("Deduplicated size: {}, {:.1}% saved", to_file_size(backup.deduplicated_data_size), (1.0 - dedup_ratio)*100.0); + let compress_ratio = backup.encoded_data_size as f32 / backup.deduplicated_data_size as f32; + println!("Compressed size: {} in {} bundles, {:.1}% saved", to_file_size(backup.encoded_data_size), backup.bundle_count, (1.0 - compress_ratio)*100.0); + println!("Chunk count: {}, avg size: {}", backup.chunk_count, to_file_size(backup.avg_chunk_size as u64)); + return + } + + if args.cmd_restore { + let dst = args.arg_dst.unwrap(); + if let Some(src) = args.arg_src { + let inode = repo.get_backup_inode(&backup, src).unwrap(); + repo.restore_inode_tree(inode, &dst).unwrap(); + } else { + repo.restore_backup(&backup, &dst).unwrap(); + } + return + } + + if args.cmd_list { + let inode = repo.get_backup_inode(&backup, &args.arg_path.unwrap()).unwrap(); + println!("{}", format_inode_one_line(&inode)); + if let Some(children) = inode.children { + for chunks in children.values() { + let inode = repo.get_inode(chunks).unwrap(); + println!("- {}", format_inode_one_line(&inode)); + } + } + return + } +} diff --git a/src/main.rs b/src/main.rs index b506e27..0f892ca 100644 --- a/src/main.rs +++ b/src/main.rs @@ -16,199 +16,22 @@ pub mod bundle; pub mod index; mod chunker; mod repository; -mod algotest; - -use docopt::Docopt; -use chrono::prelude::*; - -use chunker::ChunkerType; -use repository::{Repository, Config, Inode}; -use util::{ChecksumType, Compression, HashMethod, to_file_size, to_duration}; - - -static USAGE: &'static str = " -Usage: - zvault init [--bundle-size SIZE] [--chunker METHOD] [--chunk-size SIZE] [--compression COMPRESSION] - zvault backup [--full] - zvault restore [] - zvault check [--full] - zvault backups - zvault info - zvault list - zvault stats - zvault bundles - zvault algotest - zvault stat - -Options: - --full Whether to verify the repository by loading all bundles - --bundle-size SIZE The target size of a full bundle in MiB [default: 25] - --chunker METHOD The chunking algorithm to use [default: fastcdc] - --chunk-size SIZE The target average chunk size in KiB [default: 8] - --compression COMPRESSION The compression to use [default: brotli/3] -"; - - -#[derive(RustcDecodable, Debug)] -struct Args { - cmd_init: bool, - cmd_backup: bool, - cmd_restore: bool, - cmd_check: bool, - - cmd_backups: bool, - cmd_info: bool, - cmd_list: bool, - - cmd_stats: bool, - cmd_bundles: bool, - - cmd_algotest: bool, - cmd_stat: bool, - - arg_repo: Option, - arg_path: Option, - arg_src: Option, - arg_dst: Option, - arg_backup: Option, - - flag_full: bool, - flag_bundle_size: usize, - flag_chunker: String, - flag_chunk_size: usize, - flag_compression: String -} +mod cli; +// TODO: Seperate remote folder +// TODO: Copy backup files to remote folder +// TODO: Keep meta bundles also locally +// TODO: Store bundle type in bundle +// TODO: Remove backups (based on age like attic) +// TODO: Backup files tree structure +// TODO: Recompress & combine bundles +// TODO: Check backup integrity +// TODO: Encryption +// TODO: list --tree +// TODO: Partial backups +// TODO: Load and compare remote bundles to bundle map +// TODO: Nice errors / checks for CLI fn main() { - let args: Args = Docopt::new(USAGE).and_then(|d| d.decode()).unwrap_or_else(|e| e.exit()); - //println!("{:?}", args); - - if args.cmd_algotest { - algotest::run(&args.arg_path.unwrap()); - return - } - - if args.cmd_init { - let chunker = ChunkerType::from(&args.flag_chunker, args.flag_chunk_size*1024, 0).expect("No such chunk algorithm"); - let compression = if args.flag_compression == "none" { - None - } else { - Some(Compression::from_string(&args.flag_compression).expect("Failed to parse compression")) - }; - Repository::create(&args.arg_repo.unwrap(), Config { - bundle_size: args.flag_bundle_size*1024*1024, - checksum: ChecksumType::Blake2_256, - chunker: chunker, - compression: compression, - hash: HashMethod::Blake2 - }).unwrap(); - return - } - - if args.cmd_stat { - println!("{:?}", Inode::get_from(&args.arg_path.unwrap()).unwrap()); - return - } - - - let mut repo; - if let Some(path) = args.arg_repo { - repo = Repository::open(path).unwrap(); - } else if let Some(ref backup) = args.arg_backup { - let path = backup.splitn(2, "::").nth(0).unwrap(); - repo = Repository::open(path).unwrap(); - } else { - panic!("Repository is needed"); - } - - if args.cmd_check { - repo.check(args.flag_full).unwrap(); - return - } - - if args.cmd_stats { - let info = repo.info(); - println!("Bundles: {}", info.bundle_count); - println!("Total size: {}", to_file_size(info.encoded_data_size)); - println!("Uncompressed size: {}", to_file_size(info.raw_data_size)); - println!("Compression ratio: {:.1}%", info.compression_ratio * 100.0); - println!("Chunk count: {}", info.chunk_count); - println!("Average chunk size: {}", to_file_size(info.avg_chunk_size as u64)); - let index_usage = info.index_entries as f32 / info.index_capacity as f32; - println!("Index: {}, {:.0}% full", to_file_size(info.index_size as u64), index_usage * 100.0); - return - } - - if args.cmd_backups { - for backup in repo.list_backups().unwrap() { - println!("{}", backup); - } - return - } - - if args.cmd_bundles { - for bundle in repo.list_bundles() { - println!("Bundle {}", bundle.id); - println!(" - Chunks: {}", bundle.chunk_count); - println!(" - Size: {}", to_file_size(bundle.encoded_size as u64)); - println!(" - Data size: {}", to_file_size(bundle.raw_size as u64)); - let ratio = bundle.encoded_size as f32 / bundle.raw_size as f32; - let compression = if let Some(ref c) = bundle.compression { - c.to_string() - } else { - "none".to_string() - }; - println!(" - Compression: {}, ratio: {:.1}%", compression, ratio * 100.0); - println!(); - } - return - } - - let backup_name = args.arg_backup.unwrap().splitn(2, "::").nth(1).unwrap().to_string(); - - if args.cmd_backup { - let backup = repo.create_full_backup(&args.arg_path.unwrap()).unwrap(); - repo.save_backup(&backup, &backup_name).unwrap(); - return - } - - let backup = repo.get_backup(&backup_name).unwrap(); - - if args.cmd_info { - println!("Date: {}", Local.timestamp(backup.date, 0).to_rfc2822()); - println!("Duration: {}", to_duration(backup.duration)); - println!("Entries: {} files, {} dirs", backup.file_count, backup.dir_count); - println!("Total backup size: {}", to_file_size(backup.total_data_size)); - println!("Modified data size: {}", to_file_size(backup.changed_data_size)); - let dedup_ratio = backup.deduplicated_data_size as f32 / backup.changed_data_size as f32; - println!("Deduplicated size: {}, {:.1}% saved", to_file_size(backup.deduplicated_data_size), (1.0 - dedup_ratio)*100.0); - let compress_ratio = backup.encoded_data_size as f32 / backup.deduplicated_data_size as f32; - println!("Compressed size: {} in {} bundles, {:.1}% saved", to_file_size(backup.encoded_data_size), backup.bundle_count, (1.0 - compress_ratio)*100.0); - println!("Chunk count: {}, avg size: {}", backup.chunk_count, to_file_size(backup.avg_chunk_size as u64)); - return - } - - if args.cmd_restore { - let dst = args.arg_dst.unwrap(); - if let Some(src) = args.arg_src { - let inode = repo.get_backup_inode(&backup, src).unwrap(); - repo.restore_inode_tree(inode, &dst).unwrap(); - } else { - repo.restore_backup(&backup, &dst).unwrap(); - } - return - } - - if args.cmd_list { - let inode = repo.get_backup_inode(&backup, &args.arg_path.unwrap()).unwrap(); - println!("{}", inode.format_one_line()); - if let Some(children) = inode.children { - for chunks in children.values() { - let inode = repo.get_inode(&chunks).unwrap(); - println!("- {}", inode.format_one_line()); - } - } - return - } + cli::run(); } diff --git a/src/repository/metadata.rs b/src/repository/metadata.rs index 3c9720b..e868e08 100644 --- a/src/repository/metadata.rs +++ b/src/repository/metadata.rs @@ -143,14 +143,6 @@ impl Inode { // https://crates.io/crates/filetime Ok(file) } - - pub fn format_one_line(&self) -> String { - match self.file_type { - FileType::Directory => format!("{:25}\t{} entries", format!("{}/", self.name), self.children.as_ref().unwrap().len()), - FileType::File => format!("{:25}\t{}", self.name, to_file_size(self.size)), - FileType::Symlink => format!("{:25}\t -> {}", self.name, self.symlink_target.as_ref().unwrap()), - } - } } @@ -169,7 +161,7 @@ impl Repository { inode.contents = Some(FileContents::ChunkedDirect(chunks)); } else { let chunks_data = try!(msgpack::encode(&chunks)); - chunks = try!(self.put_data(Mode::Meta, &chunks_data)); + chunks = try!(self.put_data(Mode::Content, &chunks_data)); inode.contents = Some(FileContents::ChunkedIndirect(chunks)); } } diff --git a/src/repository/mod.rs b/src/repository/mod.rs index f21716a..fbeee89 100644 --- a/src/repository/mod.rs +++ b/src/repository/mod.rs @@ -18,7 +18,7 @@ use super::chunker::Chunker; pub use self::error::RepositoryError; pub use self::config::Config; -pub use self::metadata::Inode; +pub use self::metadata::{Inode, FileType}; pub use self::basic_io::Chunk; pub use self::backup::Backup; use self::bundle_map::BundleMap; diff --git a/src/util/cli.rs b/src/util/cli.rs new file mode 100644 index 0000000..48662a9 --- /dev/null +++ b/src/util/cli.rs @@ -0,0 +1,51 @@ +use ::repository::{Inode, FileType}; + +pub fn split_repo_path(repo_path: &str) -> (&str, Option<&str>, Option<&str>) { + let mut parts = repo_path.splitn(3, "::"); + let repo = parts.next().unwrap(); + let backup = parts.next(); + let inode = parts.next(); + (repo, backup, inode) +} + +pub fn to_file_size(size: u64) -> String { + let mut size = size as f32; + if size >= 512.0 { + size /= 1024.0; + } else { + return format!("{:.0} Bytes", size); + } + if size >= 512.0 { + size /= 1024.0; + } else { + return format!("{:.1} KiB", size); + } + if size >= 512.0 { + size /= 1024.0; + } else { + return format!("{:.1} MiB", size); + } + if size >= 512.0 { + size /= 1024.0; + } else { + return format!("{:.1} GiB", size); + } + format!("{:.1} TiB", size) +} + +pub fn to_duration(dur: f32) -> String { + let secs = dur.floor() as u64; + let subsecs = dur - dur.floor(); + let hours = secs / 3600; + let mins = (secs / 60) % 60; + let secs = (secs % 60) as f32 + subsecs; + format!("{}:{:02}:{:04.1}", hours, mins, secs) +} + +pub fn format_inode_one_line(inode: &Inode) -> String { + match inode.file_type { + FileType::Directory => format!("{:25}\t{} entries", format!("{}/", inode.name), inode.children.as_ref().unwrap().len()), + FileType::File => format!("{:25}\t{}", inode.name, to_file_size(inode.size)), + FileType::Symlink => format!("{:25}\t -> {}", inode.name, inode.symlink_target.as_ref().unwrap()), + } +} diff --git a/src/util/mod.rs b/src/util/mod.rs index f5673db..e560fda 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -3,6 +3,7 @@ mod compression; mod encryption; mod hash; mod lru_cache; +pub mod cli; pub mod msgpack; pub use self::checksum::*; @@ -10,38 +11,3 @@ pub use self::compression::*; pub use self::encryption::*; pub use self::hash::*; pub use self::lru_cache::*; - - -pub fn to_file_size(size: u64) -> String { - let mut size = size as f32; - if size >= 512.0 { - size /= 1024.0; - } else { - return format!("{:.0} Bytes", size); - } - if size >= 512.0 { - size /= 1024.0; - } else { - return format!("{:.1} KiB", size); - } - if size >= 512.0 { - size /= 1024.0; - } else { - return format!("{:.1} MiB", size); - } - if size >= 512.0 { - size /= 1024.0; - } else { - return format!("{:.1} GiB", size); - } - format!("{:.1} TiB", size) -} - -pub fn to_duration(dur: f32) -> String { - let secs = dur.floor() as u64; - let subsecs = dur - dur.floor(); - let hours = secs / 3600; - let mins = (secs / 60) % 60; - let secs = (secs % 60) as f32 + subsecs; - format!("{}:{:02}:{:04.1}", hours, mins, secs) -}