mirror of https://github.com/dswd/zvault
Arguments
This commit is contained in:
parent
542ecb4ae0
commit
5bca245643
|
@ -2,8 +2,8 @@ use std::io::{Cursor, Read};
|
|||
use std::fs::File;
|
||||
use std::time;
|
||||
|
||||
use super::chunker::*;
|
||||
use super::util::*;
|
||||
use ::chunker::*;
|
||||
use ::util::*;
|
||||
|
||||
fn speed_chunk<C: IChunker>(chunker: &mut C, data: &[u8]) {
|
||||
let mut input = Cursor::new(data);
|
|
@ -0,0 +1,110 @@
|
|||
use docopt::Docopt;
|
||||
|
||||
use ::chunker::ChunkerType;
|
||||
use ::util::{ChecksumType, Compression, HashMethod};
|
||||
|
||||
|
||||
static USAGE: &'static str = "
|
||||
Usage:
|
||||
zvault init [--bundle-size SIZE] [--chunker METHOD] [--chunk-size SIZE] [--compression COMPRESSION] <repo>
|
||||
zvault backup [--full] <backup> <path>
|
||||
zvault restore <backup> [<src>] <dst>
|
||||
zvault check [--full] <repo>
|
||||
zvault backups <repo>
|
||||
zvault info <backup>
|
||||
zvault list [--tree] <backup> <path>
|
||||
zvault stats <repo>
|
||||
zvault bundles <repo>
|
||||
zvault algotest <file>
|
||||
|
||||
Options:
|
||||
--tree Print the whole (sub-)tree from the backup
|
||||
--full Whether to verify the repository by loading all bundles
|
||||
--bundle-size SIZE The target size of a full bundle in MiB [default: 25]
|
||||
--chunker METHOD The chunking algorithm to use [default: fastcdc]
|
||||
--chunk-size SIZE The target average chunk size in KiB [default: 8]
|
||||
--compression COMPRESSION The compression to use [default: brotli/3]
|
||||
";
|
||||
|
||||
|
||||
#[derive(RustcDecodable, Debug)]
|
||||
pub struct DocoptArgs {
|
||||
pub cmd_init: bool,
|
||||
pub cmd_backup: bool,
|
||||
pub cmd_restore: bool,
|
||||
pub cmd_check: bool,
|
||||
|
||||
pub cmd_backups: bool,
|
||||
pub cmd_info: bool,
|
||||
pub cmd_list: bool,
|
||||
|
||||
pub cmd_stats: bool,
|
||||
pub cmd_bundles: bool,
|
||||
|
||||
pub cmd_algotest: bool,
|
||||
pub cmd_stat: bool,
|
||||
|
||||
pub arg_file: Option<String>,
|
||||
pub arg_repo: Option<String>,
|
||||
pub arg_path: Option<String>,
|
||||
pub arg_src: Option<String>,
|
||||
pub arg_dst: Option<String>,
|
||||
pub arg_backup: Option<String>,
|
||||
|
||||
pub flag_full: bool,
|
||||
pub flag_bundle_size: usize,
|
||||
pub flag_chunker: String,
|
||||
pub flag_chunk_size: usize,
|
||||
pub flag_compression: String,
|
||||
pub flag_tree: bool
|
||||
}
|
||||
|
||||
|
||||
pub enum Arguments {
|
||||
Init {
|
||||
repo_path: String,
|
||||
bundle_size: usize,
|
||||
chunker: ChunkerType,
|
||||
chunk_size: usize,
|
||||
compresion: Compression
|
||||
},
|
||||
Backup {
|
||||
repo_path: String,
|
||||
backup_name: String,
|
||||
src_path: String,
|
||||
full: bool
|
||||
},
|
||||
Restore {
|
||||
repo_path: String,
|
||||
backup_name: String,
|
||||
inode: Option<String>,
|
||||
dst_path: String
|
||||
},
|
||||
Check {
|
||||
repo_path: String,
|
||||
backup_name: Option<String>,
|
||||
inode: Option<String>,
|
||||
full: bool
|
||||
},
|
||||
List {
|
||||
repo_path: String,
|
||||
backup_name: Option<String>,
|
||||
inode: Option<String>
|
||||
},
|
||||
Info {
|
||||
repo_path: String,
|
||||
backup_name: Option<String>,
|
||||
inode: Option<String>
|
||||
},
|
||||
ListBundles {
|
||||
repo_path: String
|
||||
},
|
||||
AlgoTest {
|
||||
file: String
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub fn parse() -> DocoptArgs {
|
||||
Docopt::new(USAGE).and_then(|d| d.decode()).unwrap_or_else(|e| e.exit())
|
||||
}
|
|
@ -0,0 +1,143 @@
|
|||
mod args;
|
||||
mod algotest;
|
||||
|
||||
use chrono::prelude::*;
|
||||
|
||||
use ::chunker::ChunkerType;
|
||||
use ::repository::{Repository, Config, Inode};
|
||||
use ::util::{ChecksumType, Compression, HashMethod};
|
||||
use ::util::cli::*;
|
||||
|
||||
|
||||
pub fn run() {
|
||||
let args = args::parse();
|
||||
|
||||
if args.cmd_algotest {
|
||||
let file = args.arg_file.unwrap();
|
||||
algotest::run(&file);
|
||||
return
|
||||
}
|
||||
|
||||
if args.cmd_init {
|
||||
let chunker = ChunkerType::from(&args.flag_chunker, args.flag_chunk_size*1024, 0).expect("No such chunk algorithm");
|
||||
let compression = if args.flag_compression == "none" {
|
||||
None
|
||||
} else {
|
||||
Some(Compression::from_string(&args.flag_compression).expect("Failed to parse compression"))
|
||||
};
|
||||
Repository::create(&args.arg_repo.unwrap(), Config {
|
||||
bundle_size: args.flag_bundle_size*1024*1024,
|
||||
checksum: ChecksumType::Blake2_256,
|
||||
chunker: chunker,
|
||||
compression: compression,
|
||||
hash: HashMethod::Blake2
|
||||
}).unwrap();
|
||||
return
|
||||
}
|
||||
|
||||
if args.cmd_stat {
|
||||
println!("{:?}", Inode::get_from(&args.arg_path.unwrap()).unwrap());
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
let mut repo;
|
||||
if let Some(path) = args.arg_repo {
|
||||
repo = Repository::open(path).unwrap();
|
||||
} else if let Some(ref backup) = args.arg_backup {
|
||||
let path = backup.splitn(2, "::").nth(0).unwrap();
|
||||
repo = Repository::open(path).unwrap();
|
||||
} else {
|
||||
panic!("Repository is needed");
|
||||
}
|
||||
|
||||
if args.cmd_check {
|
||||
repo.check(args.flag_full).unwrap();
|
||||
return
|
||||
}
|
||||
|
||||
if args.cmd_stats {
|
||||
let info = repo.info();
|
||||
println!("Bundles: {}", info.bundle_count);
|
||||
println!("Total size: {}", to_file_size(info.encoded_data_size));
|
||||
println!("Uncompressed size: {}", to_file_size(info.raw_data_size));
|
||||
println!("Compression ratio: {:.1}%", info.compression_ratio * 100.0);
|
||||
println!("Chunk count: {}", info.chunk_count);
|
||||
println!("Average chunk size: {}", to_file_size(info.avg_chunk_size as u64));
|
||||
let index_usage = info.index_entries as f32 / info.index_capacity as f32;
|
||||
println!("Index: {}, {:.0}% full", to_file_size(info.index_size as u64), index_usage * 100.0);
|
||||
return
|
||||
}
|
||||
|
||||
if args.cmd_backups {
|
||||
for backup in repo.list_backups().unwrap() {
|
||||
println!("{}", backup);
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if args.cmd_bundles {
|
||||
for bundle in repo.list_bundles() {
|
||||
println!("Bundle {}", bundle.id);
|
||||
println!(" - Chunks: {}", bundle.chunk_count);
|
||||
println!(" - Size: {}", to_file_size(bundle.encoded_size as u64));
|
||||
println!(" - Data size: {}", to_file_size(bundle.raw_size as u64));
|
||||
let ratio = bundle.encoded_size as f32 / bundle.raw_size as f32;
|
||||
let compression = if let Some(ref c) = bundle.compression {
|
||||
c.to_string()
|
||||
} else {
|
||||
"none".to_string()
|
||||
};
|
||||
println!(" - Compression: {}, ratio: {:.1}%", compression, ratio * 100.0);
|
||||
println!();
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
let backup_name = args.arg_backup.unwrap().splitn(2, "::").nth(1).unwrap().to_string();
|
||||
|
||||
if args.cmd_backup {
|
||||
let backup = repo.create_full_backup(&args.arg_path.unwrap()).unwrap();
|
||||
repo.save_backup(&backup, &backup_name).unwrap();
|
||||
return
|
||||
}
|
||||
|
||||
let backup = repo.get_backup(&backup_name).unwrap();
|
||||
|
||||
if args.cmd_info {
|
||||
println!("Date: {}", Local.timestamp(backup.date, 0).to_rfc2822());
|
||||
println!("Duration: {}", to_duration(backup.duration));
|
||||
println!("Entries: {} files, {} dirs", backup.file_count, backup.dir_count);
|
||||
println!("Total backup size: {}", to_file_size(backup.total_data_size));
|
||||
println!("Modified data size: {}", to_file_size(backup.changed_data_size));
|
||||
let dedup_ratio = backup.deduplicated_data_size as f32 / backup.changed_data_size as f32;
|
||||
println!("Deduplicated size: {}, {:.1}% saved", to_file_size(backup.deduplicated_data_size), (1.0 - dedup_ratio)*100.0);
|
||||
let compress_ratio = backup.encoded_data_size as f32 / backup.deduplicated_data_size as f32;
|
||||
println!("Compressed size: {} in {} bundles, {:.1}% saved", to_file_size(backup.encoded_data_size), backup.bundle_count, (1.0 - compress_ratio)*100.0);
|
||||
println!("Chunk count: {}, avg size: {}", backup.chunk_count, to_file_size(backup.avg_chunk_size as u64));
|
||||
return
|
||||
}
|
||||
|
||||
if args.cmd_restore {
|
||||
let dst = args.arg_dst.unwrap();
|
||||
if let Some(src) = args.arg_src {
|
||||
let inode = repo.get_backup_inode(&backup, src).unwrap();
|
||||
repo.restore_inode_tree(inode, &dst).unwrap();
|
||||
} else {
|
||||
repo.restore_backup(&backup, &dst).unwrap();
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if args.cmd_list {
|
||||
let inode = repo.get_backup_inode(&backup, &args.arg_path.unwrap()).unwrap();
|
||||
println!("{}", format_inode_one_line(&inode));
|
||||
if let Some(children) = inode.children {
|
||||
for chunks in children.values() {
|
||||
let inode = repo.get_inode(chunks).unwrap();
|
||||
println!("- {}", format_inode_one_line(&inode));
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
207
src/main.rs
207
src/main.rs
|
@ -16,199 +16,22 @@ pub mod bundle;
|
|||
pub mod index;
|
||||
mod chunker;
|
||||
mod repository;
|
||||
mod algotest;
|
||||
|
||||
use docopt::Docopt;
|
||||
use chrono::prelude::*;
|
||||
|
||||
use chunker::ChunkerType;
|
||||
use repository::{Repository, Config, Inode};
|
||||
use util::{ChecksumType, Compression, HashMethod, to_file_size, to_duration};
|
||||
|
||||
|
||||
static USAGE: &'static str = "
|
||||
Usage:
|
||||
zvault init [--bundle-size SIZE] [--chunker METHOD] [--chunk-size SIZE] [--compression COMPRESSION] <repo>
|
||||
zvault backup [--full] <backup> <path>
|
||||
zvault restore <backup> [<src>] <dst>
|
||||
zvault check [--full] <repo>
|
||||
zvault backups <repo>
|
||||
zvault info <backup>
|
||||
zvault list <backup> <path>
|
||||
zvault stats <repo>
|
||||
zvault bundles <repo>
|
||||
zvault algotest <path>
|
||||
zvault stat <path>
|
||||
|
||||
Options:
|
||||
--full Whether to verify the repository by loading all bundles
|
||||
--bundle-size SIZE The target size of a full bundle in MiB [default: 25]
|
||||
--chunker METHOD The chunking algorithm to use [default: fastcdc]
|
||||
--chunk-size SIZE The target average chunk size in KiB [default: 8]
|
||||
--compression COMPRESSION The compression to use [default: brotli/3]
|
||||
";
|
||||
|
||||
|
||||
#[derive(RustcDecodable, Debug)]
|
||||
struct Args {
|
||||
cmd_init: bool,
|
||||
cmd_backup: bool,
|
||||
cmd_restore: bool,
|
||||
cmd_check: bool,
|
||||
|
||||
cmd_backups: bool,
|
||||
cmd_info: bool,
|
||||
cmd_list: bool,
|
||||
|
||||
cmd_stats: bool,
|
||||
cmd_bundles: bool,
|
||||
|
||||
cmd_algotest: bool,
|
||||
cmd_stat: bool,
|
||||
|
||||
arg_repo: Option<String>,
|
||||
arg_path: Option<String>,
|
||||
arg_src: Option<String>,
|
||||
arg_dst: Option<String>,
|
||||
arg_backup: Option<String>,
|
||||
|
||||
flag_full: bool,
|
||||
flag_bundle_size: usize,
|
||||
flag_chunker: String,
|
||||
flag_chunk_size: usize,
|
||||
flag_compression: String
|
||||
}
|
||||
mod cli;
|
||||
|
||||
// TODO: Seperate remote folder
|
||||
// TODO: Copy backup files to remote folder
|
||||
// TODO: Keep meta bundles also locally
|
||||
// TODO: Store bundle type in bundle
|
||||
// TODO: Remove backups (based on age like attic)
|
||||
// TODO: Backup files tree structure
|
||||
// TODO: Recompress & combine bundles
|
||||
// TODO: Check backup integrity
|
||||
// TODO: Encryption
|
||||
// TODO: list --tree
|
||||
// TODO: Partial backups
|
||||
// TODO: Load and compare remote bundles to bundle map
|
||||
// TODO: Nice errors / checks for CLI
|
||||
|
||||
fn main() {
|
||||
let args: Args = Docopt::new(USAGE).and_then(|d| d.decode()).unwrap_or_else(|e| e.exit());
|
||||
//println!("{:?}", args);
|
||||
|
||||
if args.cmd_algotest {
|
||||
algotest::run(&args.arg_path.unwrap());
|
||||
return
|
||||
}
|
||||
|
||||
if args.cmd_init {
|
||||
let chunker = ChunkerType::from(&args.flag_chunker, args.flag_chunk_size*1024, 0).expect("No such chunk algorithm");
|
||||
let compression = if args.flag_compression == "none" {
|
||||
None
|
||||
} else {
|
||||
Some(Compression::from_string(&args.flag_compression).expect("Failed to parse compression"))
|
||||
};
|
||||
Repository::create(&args.arg_repo.unwrap(), Config {
|
||||
bundle_size: args.flag_bundle_size*1024*1024,
|
||||
checksum: ChecksumType::Blake2_256,
|
||||
chunker: chunker,
|
||||
compression: compression,
|
||||
hash: HashMethod::Blake2
|
||||
}).unwrap();
|
||||
return
|
||||
}
|
||||
|
||||
if args.cmd_stat {
|
||||
println!("{:?}", Inode::get_from(&args.arg_path.unwrap()).unwrap());
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
let mut repo;
|
||||
if let Some(path) = args.arg_repo {
|
||||
repo = Repository::open(path).unwrap();
|
||||
} else if let Some(ref backup) = args.arg_backup {
|
||||
let path = backup.splitn(2, "::").nth(0).unwrap();
|
||||
repo = Repository::open(path).unwrap();
|
||||
} else {
|
||||
panic!("Repository is needed");
|
||||
}
|
||||
|
||||
if args.cmd_check {
|
||||
repo.check(args.flag_full).unwrap();
|
||||
return
|
||||
}
|
||||
|
||||
if args.cmd_stats {
|
||||
let info = repo.info();
|
||||
println!("Bundles: {}", info.bundle_count);
|
||||
println!("Total size: {}", to_file_size(info.encoded_data_size));
|
||||
println!("Uncompressed size: {}", to_file_size(info.raw_data_size));
|
||||
println!("Compression ratio: {:.1}%", info.compression_ratio * 100.0);
|
||||
println!("Chunk count: {}", info.chunk_count);
|
||||
println!("Average chunk size: {}", to_file_size(info.avg_chunk_size as u64));
|
||||
let index_usage = info.index_entries as f32 / info.index_capacity as f32;
|
||||
println!("Index: {}, {:.0}% full", to_file_size(info.index_size as u64), index_usage * 100.0);
|
||||
return
|
||||
}
|
||||
|
||||
if args.cmd_backups {
|
||||
for backup in repo.list_backups().unwrap() {
|
||||
println!("{}", backup);
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if args.cmd_bundles {
|
||||
for bundle in repo.list_bundles() {
|
||||
println!("Bundle {}", bundle.id);
|
||||
println!(" - Chunks: {}", bundle.chunk_count);
|
||||
println!(" - Size: {}", to_file_size(bundle.encoded_size as u64));
|
||||
println!(" - Data size: {}", to_file_size(bundle.raw_size as u64));
|
||||
let ratio = bundle.encoded_size as f32 / bundle.raw_size as f32;
|
||||
let compression = if let Some(ref c) = bundle.compression {
|
||||
c.to_string()
|
||||
} else {
|
||||
"none".to_string()
|
||||
};
|
||||
println!(" - Compression: {}, ratio: {:.1}%", compression, ratio * 100.0);
|
||||
println!();
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
let backup_name = args.arg_backup.unwrap().splitn(2, "::").nth(1).unwrap().to_string();
|
||||
|
||||
if args.cmd_backup {
|
||||
let backup = repo.create_full_backup(&args.arg_path.unwrap()).unwrap();
|
||||
repo.save_backup(&backup, &backup_name).unwrap();
|
||||
return
|
||||
}
|
||||
|
||||
let backup = repo.get_backup(&backup_name).unwrap();
|
||||
|
||||
if args.cmd_info {
|
||||
println!("Date: {}", Local.timestamp(backup.date, 0).to_rfc2822());
|
||||
println!("Duration: {}", to_duration(backup.duration));
|
||||
println!("Entries: {} files, {} dirs", backup.file_count, backup.dir_count);
|
||||
println!("Total backup size: {}", to_file_size(backup.total_data_size));
|
||||
println!("Modified data size: {}", to_file_size(backup.changed_data_size));
|
||||
let dedup_ratio = backup.deduplicated_data_size as f32 / backup.changed_data_size as f32;
|
||||
println!("Deduplicated size: {}, {:.1}% saved", to_file_size(backup.deduplicated_data_size), (1.0 - dedup_ratio)*100.0);
|
||||
let compress_ratio = backup.encoded_data_size as f32 / backup.deduplicated_data_size as f32;
|
||||
println!("Compressed size: {} in {} bundles, {:.1}% saved", to_file_size(backup.encoded_data_size), backup.bundle_count, (1.0 - compress_ratio)*100.0);
|
||||
println!("Chunk count: {}, avg size: {}", backup.chunk_count, to_file_size(backup.avg_chunk_size as u64));
|
||||
return
|
||||
}
|
||||
|
||||
if args.cmd_restore {
|
||||
let dst = args.arg_dst.unwrap();
|
||||
if let Some(src) = args.arg_src {
|
||||
let inode = repo.get_backup_inode(&backup, src).unwrap();
|
||||
repo.restore_inode_tree(inode, &dst).unwrap();
|
||||
} else {
|
||||
repo.restore_backup(&backup, &dst).unwrap();
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if args.cmd_list {
|
||||
let inode = repo.get_backup_inode(&backup, &args.arg_path.unwrap()).unwrap();
|
||||
println!("{}", inode.format_one_line());
|
||||
if let Some(children) = inode.children {
|
||||
for chunks in children.values() {
|
||||
let inode = repo.get_inode(&chunks).unwrap();
|
||||
println!("- {}", inode.format_one_line());
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
cli::run();
|
||||
}
|
||||
|
|
|
@ -143,14 +143,6 @@ impl Inode {
|
|||
// https://crates.io/crates/filetime
|
||||
Ok(file)
|
||||
}
|
||||
|
||||
pub fn format_one_line(&self) -> String {
|
||||
match self.file_type {
|
||||
FileType::Directory => format!("{:25}\t{} entries", format!("{}/", self.name), self.children.as_ref().unwrap().len()),
|
||||
FileType::File => format!("{:25}\t{}", self.name, to_file_size(self.size)),
|
||||
FileType::Symlink => format!("{:25}\t -> {}", self.name, self.symlink_target.as_ref().unwrap()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -169,7 +161,7 @@ impl Repository {
|
|||
inode.contents = Some(FileContents::ChunkedDirect(chunks));
|
||||
} else {
|
||||
let chunks_data = try!(msgpack::encode(&chunks));
|
||||
chunks = try!(self.put_data(Mode::Meta, &chunks_data));
|
||||
chunks = try!(self.put_data(Mode::Content, &chunks_data));
|
||||
inode.contents = Some(FileContents::ChunkedIndirect(chunks));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ use super::chunker::Chunker;
|
|||
|
||||
pub use self::error::RepositoryError;
|
||||
pub use self::config::Config;
|
||||
pub use self::metadata::Inode;
|
||||
pub use self::metadata::{Inode, FileType};
|
||||
pub use self::basic_io::Chunk;
|
||||
pub use self::backup::Backup;
|
||||
use self::bundle_map::BundleMap;
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
use ::repository::{Inode, FileType};
|
||||
|
||||
pub fn split_repo_path(repo_path: &str) -> (&str, Option<&str>, Option<&str>) {
|
||||
let mut parts = repo_path.splitn(3, "::");
|
||||
let repo = parts.next().unwrap();
|
||||
let backup = parts.next();
|
||||
let inode = parts.next();
|
||||
(repo, backup, inode)
|
||||
}
|
||||
|
||||
pub fn to_file_size(size: u64) -> String {
|
||||
let mut size = size as f32;
|
||||
if size >= 512.0 {
|
||||
size /= 1024.0;
|
||||
} else {
|
||||
return format!("{:.0} Bytes", size);
|
||||
}
|
||||
if size >= 512.0 {
|
||||
size /= 1024.0;
|
||||
} else {
|
||||
return format!("{:.1} KiB", size);
|
||||
}
|
||||
if size >= 512.0 {
|
||||
size /= 1024.0;
|
||||
} else {
|
||||
return format!("{:.1} MiB", size);
|
||||
}
|
||||
if size >= 512.0 {
|
||||
size /= 1024.0;
|
||||
} else {
|
||||
return format!("{:.1} GiB", size);
|
||||
}
|
||||
format!("{:.1} TiB", size)
|
||||
}
|
||||
|
||||
pub fn to_duration(dur: f32) -> String {
|
||||
let secs = dur.floor() as u64;
|
||||
let subsecs = dur - dur.floor();
|
||||
let hours = secs / 3600;
|
||||
let mins = (secs / 60) % 60;
|
||||
let secs = (secs % 60) as f32 + subsecs;
|
||||
format!("{}:{:02}:{:04.1}", hours, mins, secs)
|
||||
}
|
||||
|
||||
pub fn format_inode_one_line(inode: &Inode) -> String {
|
||||
match inode.file_type {
|
||||
FileType::Directory => format!("{:25}\t{} entries", format!("{}/", inode.name), inode.children.as_ref().unwrap().len()),
|
||||
FileType::File => format!("{:25}\t{}", inode.name, to_file_size(inode.size)),
|
||||
FileType::Symlink => format!("{:25}\t -> {}", inode.name, inode.symlink_target.as_ref().unwrap()),
|
||||
}
|
||||
}
|
|
@ -3,6 +3,7 @@ mod compression;
|
|||
mod encryption;
|
||||
mod hash;
|
||||
mod lru_cache;
|
||||
pub mod cli;
|
||||
pub mod msgpack;
|
||||
|
||||
pub use self::checksum::*;
|
||||
|
@ -10,38 +11,3 @@ pub use self::compression::*;
|
|||
pub use self::encryption::*;
|
||||
pub use self::hash::*;
|
||||
pub use self::lru_cache::*;
|
||||
|
||||
|
||||
pub fn to_file_size(size: u64) -> String {
|
||||
let mut size = size as f32;
|
||||
if size >= 512.0 {
|
||||
size /= 1024.0;
|
||||
} else {
|
||||
return format!("{:.0} Bytes", size);
|
||||
}
|
||||
if size >= 512.0 {
|
||||
size /= 1024.0;
|
||||
} else {
|
||||
return format!("{:.1} KiB", size);
|
||||
}
|
||||
if size >= 512.0 {
|
||||
size /= 1024.0;
|
||||
} else {
|
||||
return format!("{:.1} MiB", size);
|
||||
}
|
||||
if size >= 512.0 {
|
||||
size /= 1024.0;
|
||||
} else {
|
||||
return format!("{:.1} GiB", size);
|
||||
}
|
||||
format!("{:.1} TiB", size)
|
||||
}
|
||||
|
||||
pub fn to_duration(dur: f32) -> String {
|
||||
let secs = dur.floor() as u64;
|
||||
let subsecs = dur - dur.floor();
|
||||
let hours = secs / 3600;
|
||||
let mins = (secs / 60) % 60;
|
||||
let secs = (secs % 60) as f32 + subsecs;
|
||||
format!("{}:{:02}:{:04.1}", hours, mins, secs)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue