Check backups and inodes

This commit is contained in:
Dennis Schwerdel 2017-03-22 11:10:13 +01:00
parent 1aca00c027
commit fa01e0bdba
7 changed files with 186 additions and 93 deletions

View File

@ -6,6 +6,7 @@ use ::prelude::*;
use chrono::prelude::*;
use std::process::exit;
use std::collections::HashMap;
use self::args::Arguments;
@ -63,6 +64,70 @@ fn find_reference_backup(repo: &Repository, path: &str) -> Option<Backup> {
matching.pop()
}
fn print_backup(backup: &Backup) {
println!("Date: {}", Local.timestamp(backup.date, 0).to_rfc2822());
println!("Duration: {}", to_duration(backup.duration));
println!("Entries: {} files, {} dirs", backup.file_count, backup.dir_count);
println!("Total backup size: {}", to_file_size(backup.total_data_size));
println!("Modified data size: {}", to_file_size(backup.changed_data_size));
let dedup_ratio = backup.deduplicated_data_size as f32 / backup.changed_data_size as f32;
println!("Deduplicated size: {}, {:.1}% saved", to_file_size(backup.deduplicated_data_size), (1.0 - dedup_ratio)*100.0);
let compress_ratio = backup.encoded_data_size as f32 / backup.deduplicated_data_size as f32;
println!("Compressed size: {} in {} bundles, {:.1}% saved", to_file_size(backup.encoded_data_size), backup.bundle_count, (1.0 - compress_ratio)*100.0);
println!("Chunk count: {}, avg size: {}", backup.chunk_count, to_file_size(backup.avg_chunk_size as u64));
}
fn print_backups(backup_map: &HashMap<String, Backup>) {
for (name, backup) in backup_map {
println!("{:25} {:>32} {:5} files, {:4} dirs, {:>10}",
name, Local.timestamp(backup.date, 0).to_rfc2822(), backup.file_count,
backup.dir_count, to_file_size(backup.total_data_size));
}
}
fn print_repoinfo(info: &RepositoryInfo) {
println!("Bundles: {}", info.bundle_count);
println!("Total size: {}", to_file_size(info.encoded_data_size));
println!("Uncompressed size: {}", to_file_size(info.raw_data_size));
println!("Compression ratio: {:.1}%", info.compression_ratio * 100.0);
println!("Chunk count: {}", info.chunk_count);
println!("Average chunk size: {}", to_file_size(info.avg_chunk_size as u64));
let index_usage = info.index_entries as f32 / info.index_capacity as f32;
println!("Index: {}, {:.0}% full", to_file_size(info.index_size as u64), index_usage * 100.0);
}
fn print_bundle(bundle: &BundleInfo) {
println!("Bundle {}", bundle.id);
println!(" - Mode: {:?}", bundle.mode);
println!(" - Hash method: {:?}", bundle.hash_method);
println!(" - Chunks: {}", bundle.chunk_count);
println!(" - Size: {}", to_file_size(bundle.encoded_size as u64));
println!(" - Data size: {}", to_file_size(bundle.raw_size as u64));
let ratio = bundle.encoded_size as f32 / bundle.raw_size as f32;
let compression = if let Some(ref c) = bundle.compression {
c.to_string()
} else {
"none".to_string()
};
println!(" - Compression: {}, ratio: {:.1}%", compression, ratio * 100.0);
}
fn print_config(config: &Config) {
println!("Bundle size: {}", to_file_size(config.bundle_size as u64));
println!("Chunker: {}", config.chunker.to_string());
if let Some(ref compression) = config.compression {
println!("Compression: {}", compression.to_string());
} else {
println!("Compression: none");
}
if let Some(ref encryption) = config.encryption {
println!("Encryption: {}", to_hex(&encryption.1[..]));
} else {
println!("Encryption: none");
}
println!("Hash method: {}", config.hash.name());
}
#[allow(unknown_lints,cyclomatic_complexity)]
pub fn run() {
@ -86,6 +151,7 @@ pub fn run() {
repo.set_encryption(Some(&public));
repo.register_key(public, secret).unwrap();
repo.save_config().unwrap();
print_config(&repo.config);
}
},
Arguments::Backup{repo_path, backup_name, src_path, full, reference} => {
@ -102,8 +168,19 @@ pub fn run() {
info!("No reference backup found, doing a full scan instead");
}
}
let backup = repo.create_backup(&src_path, reference_backup.as_ref()).unwrap();
let backup = match repo.create_backup(&src_path, reference_backup.as_ref()) {
Ok(backup) => backup,
Err(RepositoryError::Backup(BackupError::FailedPaths(backup, _failed_paths))) => {
warn!("Some files are missing form the backup");
backup
},
Err(err) => {
error!("Backup failed: {}", err);
exit(3)
}
};
repo.save_backup(&backup, &backup_name).unwrap();
print_backup(&backup);
},
Arguments::Restore{repo_path, backup_name, inode, dst_path} => {
let mut repo = open_repository(&repo_path);
@ -148,13 +225,12 @@ pub fn run() {
Arguments::Check{repo_path, backup_name, inode, full} => {
let mut repo = open_repository(&repo_path);
if let Some(backup_name) = backup_name {
let _backup = get_backup(&repo, &backup_name);
if let Some(_inode) = inode {
error!("Checking backup subtrees is not implemented yet");
return
let backup = get_backup(&repo, &backup_name);
if let Some(inode) = inode {
let inode = repo.get_backup_inode(&backup, inode).unwrap();
repo.check_inode(&inode).unwrap()
} else {
error!("Checking backups is not implemented yet");
return
repo.check_backup(&backup).unwrap()
}
} else {
repo.check(full).unwrap()
@ -184,11 +260,7 @@ pub fn run() {
exit(3)
}
};
for (name, backup) in backup_map {
println!("{:25} {:>32} {:5} files, {:4} dirs, {:>10}",
name, Local.timestamp(backup.date, 0).to_rfc2822(), backup.file_count,
backup.dir_count, to_file_size(backup.total_data_size));
}
print_backups(&backup_map);
}
},
Arguments::Info{repo_path, backup_name, inode} => {
@ -199,45 +271,16 @@ pub fn run() {
error!("Displaying information on single inodes is not implemented yet");
return
} else {
println!("Date: {}", Local.timestamp(backup.date, 0).to_rfc2822());
println!("Duration: {}", to_duration(backup.duration));
println!("Entries: {} files, {} dirs", backup.file_count, backup.dir_count);
println!("Total backup size: {}", to_file_size(backup.total_data_size));
println!("Modified data size: {}", to_file_size(backup.changed_data_size));
let dedup_ratio = backup.deduplicated_data_size as f32 / backup.changed_data_size as f32;
println!("Deduplicated size: {}, {:.1}% saved", to_file_size(backup.deduplicated_data_size), (1.0 - dedup_ratio)*100.0);
let compress_ratio = backup.encoded_data_size as f32 / backup.deduplicated_data_size as f32;
println!("Compressed size: {} in {} bundles, {:.1}% saved", to_file_size(backup.encoded_data_size), backup.bundle_count, (1.0 - compress_ratio)*100.0);
println!("Chunk count: {}, avg size: {}", backup.chunk_count, to_file_size(backup.avg_chunk_size as u64));
print_backup(&backup);
}
} else {
let info = repo.info();
println!("Bundles: {}", info.bundle_count);
println!("Total size: {}", to_file_size(info.encoded_data_size));
println!("Uncompressed size: {}", to_file_size(info.raw_data_size));
println!("Compression ratio: {:.1}%", info.compression_ratio * 100.0);
println!("Chunk count: {}", info.chunk_count);
println!("Average chunk size: {}", to_file_size(info.avg_chunk_size as u64));
let index_usage = info.index_entries as f32 / info.index_capacity as f32;
println!("Index: {}, {:.0}% full", to_file_size(info.index_size as u64), index_usage * 100.0);
print_repoinfo(&repo.info());
}
},
Arguments::ListBundles{repo_path} => {
let repo = open_repository(&repo_path);
for bundle in repo.list_bundles() {
println!("Bundle {}", bundle.id);
println!(" - Mode: {:?}", bundle.mode);
println!(" - Hash method: {:?}", bundle.hash_method);
println!(" - Chunks: {}", bundle.chunk_count);
println!(" - Size: {}", to_file_size(bundle.encoded_size as u64));
println!(" - Data size: {}", to_file_size(bundle.raw_size as u64));
let ratio = bundle.encoded_size as f32 / bundle.raw_size as f32;
let compression = if let Some(ref c) = bundle.compression {
c.to_string()
} else {
"none".to_string()
};
println!(" - Compression: {}, ratio: {:.1}%", compression, ratio * 100.0);
print_bundle(bundle);
println!();
}
},
@ -265,19 +308,7 @@ pub fn run() {
repo.config.hash = hash
}
repo.save_config().unwrap();
println!("Bundle size: {}", to_file_size(repo.config.bundle_size as u64));
println!("Chunker: {}", repo.config.chunker.to_string());
if let Some(ref compression) = repo.config.compression {
println!("Compression: {}", compression.to_string());
} else {
println!("Compression: none");
}
if let Some(ref encryption) = repo.config.encryption {
println!("Encryption: {}", to_hex(&encryption.1[..]));
} else {
println!("Encryption: none");
}
println!("Hash method: {}", repo.config.hash.name());
print_config(&repo.config);
},
Arguments::GenKey{} => {
let (public, secret) = gen_keypair();

View File

@ -31,7 +31,6 @@ mod prelude;
// TODO: Recompress & combine bundles
// TODO: list --tree
// TODO: Import repository from remote folder
// TODO: Continue on errors (return summary as error)
// TODO: More detailed errors with nicer text
// TODO: Allow to use tar files for backup and restore (--tar, http://alexcrichton.com/tar-rs/tar/index.html)

View File

@ -1,7 +1,7 @@
pub use ::util::*;
pub use ::bundledb::{BundleReader, BundleMode, BundleWriter, BundleInfo, BundleId, BundleDbError, BundleDb, BundleWriterError};
pub use ::chunker::{ChunkerType, Chunker, ChunkerStatus, IChunker, ChunkerError};
pub use ::repository::{Repository, Backup, Config, RepositoryError, RepositoryInfo, Inode, FileType, RepositoryIntegrityError, BackupFileError};
pub use ::repository::{Repository, Backup, Config, RepositoryError, RepositoryInfo, Inode, FileType, RepositoryIntegrityError, BackupFileError, BackupError};
pub use ::index::{Index, Location, IndexError};
pub use serde::{Serialize, Deserialize};

View File

@ -178,6 +178,10 @@ impl Backup {
quick_error!{
#[derive(Debug)]
pub enum BackupError {
FailedPaths(backup: Backup, failed: Vec<PathBuf>) {
description("Some paths could not be backed up")
display("Backup error: some paths could not be backed up")
}
}
}
@ -307,10 +311,19 @@ impl Repository {
backup.path = path.as_ref().to_string_lossy().to_string();
let info_before = self.info();
let start = Local::now();
let mut failed_paths = vec![];
while let Some((path, reference_inode)) = scan_stack.pop() {
// Create an inode for this path containing all attributes and contents
// (for files) but no children (for directories)
let mut inode = try!(self.create_inode(&path, reference_inode.as_ref()));
let mut inode = match self.create_inode(&path, reference_inode.as_ref()) {
Ok(inode) => inode,
Err(RepositoryError::Inode(err)) => {
warn!("Failed to backup inode {}", err);
failed_paths.push(path);
continue
},
Err(err) => return Err(err)
};
backup.total_data_size += inode.size;
if let Some(ref ref_inode) = reference_inode {
if !ref_inode.is_unchanged(&inode) {
@ -326,8 +339,22 @@ impl Repository {
save_stack.push(path.clone());
inode.children = Some(HashMap::new());
directories.insert(path.clone(), inode);
for ch in try!(fs::read_dir(&path)) {
let child = try!(ch);
let dirlist = match fs::read_dir(&path) {
Ok(dirlist) => dirlist,
Err(err) => {
warn!("Failed to read {:?}: {}", &path, err);
failed_paths.push(path);
continue
}
};
for ch in dirlist {
let child = match ch {
Ok(child) => child,
Err(err) => {
warn!("Failed to read {:?}: {}", &path, err);
continue
}
};
let name = child.file_name().to_string_lossy().to_string();
let ref_child = reference_inode.as_ref()
.and_then(|inode| inode.children.as_ref())
@ -383,7 +410,11 @@ impl Repository {
backup.bundle_count = info_after.bundle_count - info_before.bundle_count;
backup.chunk_count = info_after.chunk_count - info_before.chunk_count;
backup.avg_chunk_size = backup.deduplicated_data_size as f32 / backup.chunk_count as f32;
if failed_paths.is_empty() {
Ok(backup)
} else {
Err(BackupError::FailedPaths(backup, failed_paths).into())
}
}
pub fn get_backup_inode<P: AsRef<Path>>(&mut self, backup: &Backup, path: P) -> Result<Inode, RepositoryError> {

View File

@ -3,7 +3,7 @@ use ::prelude::*;
use std::io;
use std::path::PathBuf;
use super::backup::BackupFileError;
use super::backup::{BackupFileError, BackupError};
use super::bundle_map::BundleMapError;
use super::config::ConfigError;
use super::metadata::InodeError;
@ -72,6 +72,12 @@ quick_error!{
description("Integrity error")
display("Repository error: integrity error\n\tcaused by: {}", err)
}
Backup(err: BackupError) {
from()
cause(err)
description("Failed to create a backup")
display("Repository error: failed to create backup\n\tcaused by: {}", err)
}
Io(err: io::Error) {
from()

View File

@ -83,6 +83,59 @@ impl Repository {
Ok(new)
}
fn check_inode_contents(&mut self, inode: &Inode, checked: &mut Bitmap) -> Result<(), RepositoryError> {
match inode.contents {
Some(FileContents::ChunkedDirect(ref chunks)) => {
try!(self.check_chunks(checked, chunks));
},
Some(FileContents::ChunkedIndirect(ref chunks)) => {
if try!(self.check_chunks(checked, chunks)) {
let chunk_data = try!(self.get_data(&chunks));
let chunks = ChunkList::read_from(&chunk_data);
try!(self.check_chunks(checked, &chunks));
}
}
_ => ()
}
Ok(())
}
fn check_subtree(&mut self, chunks: &[Chunk], checked: &mut Bitmap) -> Result<(), RepositoryError> {
let mut todo = VecDeque::new();
todo.push_back(ChunkList::from(chunks.to_vec()));
while let Some(chunks) = todo.pop_front() {
if !try!(self.check_chunks(checked, &chunks)) {
continue
}
let inode = try!(self.get_inode(&chunks));
// Mark the content chunks as used
try!(self.check_inode_contents(&inode, checked));
// Put children in todo
if let Some(children) = inode.children {
for (_name, chunks) in children {
todo.push_back(chunks);
}
}
}
Ok(())
}
pub fn check_backup(&mut self, backup: &Backup) -> Result<(), RepositoryError> {
let mut checked = Bitmap::new(self.index.capacity());
self.check_subtree(&backup.root, &mut checked)
}
pub fn check_inode(&mut self, inode: &Inode) -> Result<(), RepositoryError> {
let mut checked = Bitmap::new(self.index.capacity());
try!(self.check_inode_contents(inode, &mut checked));
if let Some(ref children) = inode.children {
for chunks in children.values() {
try!(self.check_subtree(chunks, &mut checked))
}
}
Ok(())
}
fn check_backups(&mut self) -> Result<(), RepositoryError> {
let mut checked = Bitmap::new(self.index.capacity());
let backup_map = match self.get_backups() {
@ -94,34 +147,7 @@ impl Repository {
Err(err) => return Err(err)
};
for (_name, backup) in backup_map {
let mut todo = VecDeque::new();
todo.push_back(backup.root);
while let Some(chunks) = todo.pop_front() {
if !try!(self.check_chunks(&mut checked, &chunks)) {
continue
}
let inode = try!(self.get_inode(&chunks));
// Mark the content chunks as used
match inode.contents {
Some(FileContents::ChunkedDirect(chunks)) => {
try!(self.check_chunks(&mut checked, &chunks));
},
Some(FileContents::ChunkedIndirect(chunks)) => {
if try!(self.check_chunks(&mut checked, &chunks)) {
let chunk_data = try!(self.get_data(&chunks));
let chunks = ChunkList::read_from(&chunk_data);
try!(self.check_chunks(&mut checked, &chunks));
}
}
_ => ()
}
// Put children in todo
if let Some(children) = inode.children {
for (_name, chunks) in children {
todo.push_back(chunks);
}
}
}
try!(self.check_subtree(&backup.root, &mut checked));
}
Ok(())
}

View File

@ -19,7 +19,7 @@ use std::sync::{Arc, Mutex};
pub use self::error::RepositoryError;
pub use self::config::Config;
pub use self::metadata::{Inode, FileType};
pub use self::backup::{Backup, BackupFileError};
pub use self::backup::{Backup, BackupFileError, BackupError};
pub use self::integrity::RepositoryIntegrityError;
pub use self::info::RepositoryInfo;
use self::bundle_map::BundleMap;