Come code cleanup

This commit is contained in:
Dennis Schwerdel 2017-03-20 18:11:03 +01:00 committed by Dennis Schwerdel
parent 7cadaaf359
commit a8ff046c04
9 changed files with 54 additions and 93 deletions

View File

@ -239,8 +239,8 @@ impl Bundle {
if version != HEADER_VERSION {
return Err(BundleError::WrongVersion(path.clone(), version))
}
let header: BundleInfo = try!(msgpack::decode_from_stream(&mut file)
.map_err(|e| BundleError::Decode(e, path.clone())));
let header: BundleInfo = try!(msgpack::decode_from_stream(&mut file).context(&path as &Path));
debug!("Load bundle {}", header.id);
let mut chunk_data = Vec::with_capacity(header.chunk_info_size);
chunk_data.resize(header.chunk_info_size, 0);
try!(file.read_exact(&mut chunk_data).context(&path as &Path));
@ -254,6 +254,7 @@ impl Bundle {
#[inline]
fn load_encoded_contents(&self) -> Result<Vec<u8>, BundleError> {
debug!("Load bundle data {}", self.info.id);
let mut file = BufReader::new(try!(File::open(&self.path).context(&self.path as &Path)));
try!(file.seek(SeekFrom::Start(self.content_start as u64)).context(&self.path as &Path));
let mut data = Vec::with_capacity(max(self.info.encoded_size, self.info.raw_size)+1024);

View File

@ -145,9 +145,9 @@ pub fn run(path: &str, bundle_size: usize, chunker: ChunkerType, compression: Op
println!();
let (public, secret) = gen_keypair();
let mut crypto = Crypto::new();
let mut crypto = Crypto::dummy();
crypto.add_secret_key(public, secret);
let encryption = (EncryptionMethod::Sodium, public[..].iter().cloned().collect::<Vec<u8>>().into());
let encryption = (EncryptionMethod::Sodium, public[..].to_vec().into());
println!("Encrypting bundles...");
let mut encrypted_bundles = Vec::with_capacity(bundles.len());

View File

@ -28,7 +28,6 @@ pub enum Arguments {
Remove {
repo_path: String,
backup_name: String,
vacuum: bool,
inode: Option<String>
},
Prune {
@ -38,13 +37,12 @@ pub enum Arguments {
weekly: Option<usize>,
monthly: Option<usize>,
yearly: Option<usize>,
vacuum: bool,
simulate: bool
force: bool
},
Vacuum {
repo_path: String,
ratio: f32,
simulate: bool
force: bool
},
Check {
repo_path: String,
@ -216,7 +214,6 @@ pub fn parse() -> Arguments {
)
(@subcommand remove =>
(about: "removes a backup or a subpath")
(@arg vacuum: --vacuum "run vacuum afterwards to reclaim space")
(@arg BACKUP: +required "repository::backup[::subpath] path")
)
(@subcommand prune =>
@ -226,14 +223,13 @@ pub fn parse() -> Arguments {
(@arg weekly: --weekly +takes_value "keep this number of weekly backups")
(@arg monthly: --monthly +takes_value "keep this number of monthly backups")
(@arg yearly: --yearly +takes_value "keep this number of yearly backups")
(@arg vacuum: --vacuum "run vacuum afterwards to reclaim space")
(@arg simulate: --simulate "only simulate the prune, do not remove any backups")
(@arg force: --force -f "actually run the prunce instead of simulating it")
(@arg REPO: +required "path of the repository")
)
(@subcommand vacuum =>
(about: "saves space by combining and recompressing bundles")
(@arg ratio: --ratio -r +takes_value "ratio of unused chunks in a bundle to rewrite that bundle")
(@arg simulate: --simulate "only simulate the vacuum, do not remove any bundles")
(@arg force: --force -f "actually run the vacuum instead of simulating it")
(@arg REPO: +required "path of the repository")
)
(@subcommand check =>
@ -342,7 +338,6 @@ pub fn parse() -> Arguments {
return Arguments::Remove {
repo_path: repository.to_string(),
backup_name: backup.unwrap().to_string(),
vacuum: args.is_present("vacuum"),
inode: inode.map(|v| v.to_string())
}
}
@ -355,8 +350,7 @@ pub fn parse() -> Arguments {
return Arguments::Prune {
repo_path: repository.to_string(),
prefix: args.value_of("prefix").unwrap_or("").to_string(),
vacuum: args.is_present("vacuum"),
simulate: args.is_present("simulate"),
force: args.is_present("force"),
daily: args.value_of("daily").map(|v| parse_num(v, "daily backups") as usize),
weekly: args.value_of("weekly").map(|v| parse_num(v, "weekly backups") as usize),
monthly: args.value_of("monthly").map(|v| parse_num(v, "monthly backups") as usize),
@ -371,7 +365,7 @@ pub fn parse() -> Arguments {
}
return Arguments::Vacuum {
repo_path: repository.to_string(),
simulate: args.is_present("simulate"),
force: args.is_present("force"),
ratio: parse_float(args.value_of("ratio").unwrap_or("0.5"), "ratio") as f32
}
}

View File

@ -8,18 +8,18 @@ struct Logger;
impl log::Log for Logger {
fn enabled(&self, metadata: &LogMetadata) -> bool {
metadata.level() <= LogLevel::Info
metadata.level() <= LogLevel::Debug
}
fn log(&self, record: &LogRecord) {
if self.enabled(record.metadata()) {
let lvl = record.level();
match lvl {
LogLevel::Error => println!("{} - {}", Color::Red.bold().paint("error"), record.args()),
LogLevel::Warn => println!("{} - {}", Color::Yellow.bold().paint("warning"), record.args()),
LogLevel::Info => println!("{} - {}", Color::Green.bold().paint("info"), record.args()),
LogLevel::Debug => println!("{} - {}", Style::new().bold().paint("debug"), record.args()),
LogLevel::Trace => println!("{} - {}", "trace", record.args())
LogLevel::Error => println!("{}: {}", Color::Red.bold().paint("error"), record.args()),
LogLevel::Warn => println!("{}: {}", Color::Yellow.bold().paint("warning"), record.args()),
LogLevel::Info => println!("{}: {}", Color::Green.bold().paint("info"), record.args()),
LogLevel::Debug => println!("{}: {}", Style::new().bold().paint("debug"), record.args()),
LogLevel::Trace => println!("{}: {}", "trace", record.args())
}
}
}
@ -27,7 +27,7 @@ impl log::Log for Logger {
pub fn init() -> Result<(), SetLoggerError> {
log::set_logger(|max_log_level| {
max_log_level.set(LogLevelFilter::Info);
max_log_level.set(LogLevelFilter::Debug);
Box::new(Logger)
})
}

View File

@ -30,6 +30,7 @@ fn get_backup(repo: &Repository, backup_name: &str) -> Backup {
}
}
#[allow(unknown_lints,cyclomatic_complexity)]
pub fn run() {
if let Err(err) = logger::init() {
println!("Failed to initialize the logger: {}", err);
@ -71,8 +72,8 @@ pub fn run() {
repo.restore_backup(&backup, &dst_path).unwrap();
}
},
Arguments::Remove{repo_path, backup_name, inode, vacuum} => {
let mut repo = open_repository(&repo_path);
Arguments::Remove{repo_path, backup_name, inode} => {
let repo = open_repository(&repo_path);
if let Some(_inode) = inode {
let _backup = get_backup(&repo, &backup_name);
error!("Removing backup subtrees is not implemented yet");
@ -81,24 +82,24 @@ pub fn run() {
repo.delete_backup(&backup_name).unwrap();
info!("The backup has been deleted, run vacuum to reclaim space");
}
if vacuum {
repo.vacuum(0.5, false).unwrap();
}
},
Arguments::Prune{repo_path, prefix, daily, weekly, monthly, yearly, simulate, vacuum} => {
let mut repo = open_repository(&repo_path);
Arguments::Prune{repo_path, prefix, daily, weekly, monthly, yearly, force} => {
let repo = open_repository(&repo_path);
if daily.is_none() && weekly.is_none() && monthly.is_none() && yearly.is_none() {
error!("This would remove all those backups");
exit(1);
}
repo.prune_backups(&prefix, daily, weekly, monthly, yearly, simulate).unwrap();
if !simulate && vacuum {
repo.vacuum(0.5, false).unwrap();
repo.prune_backups(&prefix, daily, weekly, monthly, yearly, force).unwrap();
if !force {
info!("Run with --force to actually execute this command");
}
},
Arguments::Vacuum{repo_path, ratio, simulate} => {
Arguments::Vacuum{repo_path, ratio, force} => {
let mut repo = open_repository(&repo_path);
repo.vacuum(ratio, simulate).unwrap();
repo.vacuum(ratio, force).unwrap();
if !force {
info!("Run with --force to actually execute this command");
}
return
},
Arguments::Check{repo_path, backup_name, inode, full} => {
@ -130,7 +131,7 @@ pub fn run() {
}
} else {
for (name, backup) in repo.list_backups().unwrap() {
println!("{} - {} - {} files, {} dirs, {}", name, Local.timestamp(backup.date, 0).to_rfc2822(), backup.file_count, backup.dir_count, to_file_size(backup.total_data_size));
println!("{:25} {:>32} {:5} files, {:4} dirs, {:>10}", name, Local.timestamp(backup.date, 0).to_rfc2822(), backup.file_count, backup.dir_count, to_file_size(backup.total_data_size));
}
}
},

View File

@ -28,11 +28,12 @@ mod cli;
// TODO: - Keep meta bundles also locally
// TODO: - Load and compare remote bundles to bundle map
// TODO: - Write backup files there as well
// TODO: - Avoid loading remote backups
// TODO: Remove backup subtrees
// TODO: Recompress & combine bundles
// TODO: Encrypt backup files too
// TODO: list --tree
// TODO: Partial backups
// TODO: Partial backups via reference inode
// TODO: Import repository from remote folder
// TODO: Continue on errors

View File

@ -88,7 +88,8 @@ impl Repository {
Ok(())
}
pub fn prune_backups(&self, prefix: &str, daily: Option<usize>, weekly: Option<usize>, monthly: Option<usize>, yearly: Option<usize>, simulate: bool) -> Result<(), RepositoryError> {
pub fn prune_backups(&self, prefix: &str, daily: Option<usize>, weekly: Option<usize>, monthly: Option<usize>, yearly: Option<usize>, force: bool) -> Result<(), RepositoryError> {
let mut backups = Vec::new();
for (name, backup) in try!(self.list_backups()) {
if name.starts_with(prefix) {
@ -98,13 +99,15 @@ impl Repository {
}
backups.sort_by_key(|backup| backup.2.date);
let mut keep = Bitmap::new(backups.len());
if let Some(max) = yearly {
fn mark_needed<K: Eq, F: Fn(&DateTime<Local>) -> K>(backups: &[(String, DateTime<Local>, Backup)], keep: &mut Bitmap, max: usize, keyfn: F) {
let mut unique = VecDeque::with_capacity(max+1);
let mut last = None;
for (i, backup) in backups.iter().enumerate() {
let val = backup.1.year();
if Some(val) != last {
last = Some(val);
let val = keyfn(&backup.1);
let cur = Some(val);
if cur != last {
last = cur;
unique.push_back(i);
if unique.len() > max {
unique.pop_front();
@ -115,56 +118,17 @@ impl Repository {
keep.set(i);
}
}
if let Some(max) = yearly {
mark_needed(&backups, &mut keep, max, |d| d.year());
}
if let Some(max) = monthly {
let mut unique = VecDeque::with_capacity(max+1);
let mut last = None;
for (i, backup) in backups.iter().enumerate() {
let val = (backup.1.year(), backup.1.month());
if Some(val) != last {
last = Some(val);
unique.push_back(i);
if unique.len() > max {
unique.pop_front();
}
}
}
for i in unique {
keep.set(i);
}
mark_needed(&backups, &mut keep, max, |d| (d.year(), d.month()));
}
if let Some(max) = weekly {
let mut unique = VecDeque::with_capacity(max+1);
let mut last = None;
for (i, backup) in backups.iter().enumerate() {
let val = (backup.1.isoweekdate().0, backup.1.isoweekdate().1);
if Some(val) != last {
last = Some(val);
unique.push_back(i);
if unique.len() > max {
unique.pop_front();
}
}
}
for i in unique {
keep.set(i);
}
mark_needed(&backups, &mut keep, max, |d| (d.isoweekdate().0, d.isoweekdate().1));
}
if let Some(max) = daily {
let mut unique = VecDeque::with_capacity(max+1);
let mut last = None;
for (i, backup) in backups.iter().enumerate() {
let val = (backup.1.year(), backup.1.month(), backup.1.day());
if Some(val) != last {
last = Some(val);
unique.push_back(i);
if unique.len() > max {
unique.pop_front();
}
}
}
for i in unique {
keep.set(i);
}
mark_needed(&backups, &mut keep, max, |d| (d.year(), d.month(), d.day()));
}
let mut remove = Vec::new();
for (i, backup) in backups.into_iter().enumerate() {
@ -173,7 +137,7 @@ impl Repository {
}
}
info!("Removing the following backups: {:?}", remove);
if !simulate {
if force {
for name in remove {
try!(self.delete_backup(&name));
}

View File

@ -48,7 +48,7 @@ impl Repository {
used_size: 0
});
}
for (_name, backup) in try!(self.list_backups()).into_iter() {
for (_name, backup) in try!(self.list_backups()) {
let mut todo = VecDeque::new();
todo.push_back(backup.root);
while let Some(chunks) = todo.pop_front() {
@ -90,7 +90,7 @@ impl Repository {
}
}
pub fn vacuum(&mut self, ratio: f32, simulate: bool) -> Result<(), RepositoryError> {
pub fn vacuum(&mut self, ratio: f32, force: bool) -> Result<(), RepositoryError> {
try!(self.flush());
info!("Analyzing chunk usage");
let usage = try!(self.analyze_usage());
@ -106,11 +106,11 @@ impl Repository {
}
}
info!("Reclaiming {} by rewriting {} bundles", to_file_size(reclaim_space as u64), rewrite_bundles.len());
if simulate {
if !force {
return Ok(())
}
for id in &rewrite_bundles {
let bundle = usage.get(id).unwrap();
let bundle = &usage[id];
let bundle_id = self.bundle_map.get(*id).unwrap().id();
for chunk in 0..bundle.chunk_count {
let data = try!(self.bundles.get_chunk(&bundle_id, chunk));

View File

@ -107,7 +107,7 @@ pub struct Crypto {
impl Crypto {
#[inline]
pub fn new() -> Self {
pub fn dummy() -> Self {
Crypto { path: PathBuf::new(), keys: HashMap::new() }
}