mirror of https://github.com/dswd/zvault
First stage of refactoring complete
This commit is contained in:
parent
31c6650374
commit
46e94bc7a6
|
@ -39,55 +39,83 @@ pub enum DiffType {
|
|||
}
|
||||
|
||||
|
||||
impl BackupRepository {
|
||||
pub fn get_all_backups(&self) -> Result<HashMap<String, BackupFile>, RepositoryError> {
|
||||
Ok(try!(BackupFile::get_all_from(
|
||||
&self.crypto,
|
||||
self.layout.backups_path()
|
||||
)))
|
||||
pub trait RepositoryBackupIO {
|
||||
|
||||
fn get_all_backups(&self) -> Result<HashMap<String, BackupFile>, RepositoryError>;
|
||||
fn get_backups<P: AsRef<Path>>(&self, path: P
|
||||
) -> Result<HashMap<String, BackupFile>, RepositoryError>;
|
||||
fn has_backup(&self, name: &str) -> bool;
|
||||
fn get_backup(&self, name: &str) -> Result<BackupFile, RepositoryError>;
|
||||
fn save_backup(&mut self, backup: &BackupFile, name: &str, lock: &BackupMode
|
||||
) -> Result<(), RepositoryError>;
|
||||
fn delete_backup(&mut self, name: &str, lock: &BackupMode) -> Result<(), RepositoryError>;
|
||||
fn prune_backups(&mut self, prefix: &str, daily: usize, weekly: usize, monthly: usize,
|
||||
yearly: usize, force: bool, lock: &BackupMode) -> Result<(), RepositoryError>;
|
||||
fn restore_inode_tree<P: AsRef<Path>>(&mut self, backup: &BackupFile, inode: Inode, path: P,
|
||||
lock: &OnlineMode) -> Result<(), RepositoryError>;
|
||||
fn create_backup_recurse<P: AsRef<Path>>(&mut self, path: P, reference: Option<&Inode>,
|
||||
options: &BackupOptions, backup: &mut BackupFile, failed_paths: &mut Vec<PathBuf>,
|
||||
lock: &BackupMode) -> Result<Inode, RepositoryError>;
|
||||
fn create_backup<P: AsRef<Path>>(&mut self, path: P, name: &str,
|
||||
reference: Option<&BackupFile>, options: &BackupOptions, lock: &BackupMode
|
||||
) -> Result<BackupFile, RepositoryError>;
|
||||
fn remove_backup_path<P: AsRef<Path>>(&mut self, backup: &mut BackupFile, path: P,
|
||||
lock: &BackupMode) -> Result<(), RepositoryError>;
|
||||
fn get_backup_path<P: AsRef<Path>>(&mut self, backup: &BackupFile, path: P, lock: &OnlineMode
|
||||
) -> Result<Vec<Inode>, RepositoryError>;
|
||||
fn get_backup_inode<P: AsRef<Path>>(&mut self, backup: &BackupFile, path: P, lock: &OnlineMode
|
||||
) -> Result<Inode, RepositoryError>;
|
||||
fn find_versions<P: AsRef<Path>>(&mut self, path: P, lock: &OnlineMode
|
||||
) -> Result<Vec<(String, Inode)>, RepositoryError>;
|
||||
fn find_differences_recurse(&mut self, inode1: &Inode, inode2: &Inode, path: PathBuf,
|
||||
diffs: &mut Vec<(DiffType, PathBuf)>, lock: &OnlineMode) -> Result<(), RepositoryError>;
|
||||
fn find_differences(&mut self, inode1: &Inode, inode2: &Inode, lock: &OnlineMode
|
||||
) -> Result<Vec<(DiffType, PathBuf)>, RepositoryError>;
|
||||
fn count_sizes_recursive(&mut self, inode: &Inode, sizes: &mut HashMap<u64, usize>,
|
||||
min_size: u64, lock: &OnlineMode) -> Result<(), RepositoryError>;
|
||||
fn find_duplicates_recursive(&mut self, inode: &Inode, path: &Path, sizes: &HashMap<u64, usize>,
|
||||
hashes: &mut HashMap<Hash, (Vec<PathBuf>, u64)>, lock: &OnlineMode
|
||||
) -> Result<(), RepositoryError>;
|
||||
fn find_duplicates(&mut self, inode: &Inode, min_size: u64, lock: &OnlineMode
|
||||
) -> Result<Vec<(Vec<PathBuf>, u64)>, RepositoryError>;
|
||||
}
|
||||
|
||||
|
||||
impl RepositoryBackupIO for Repository {
|
||||
fn get_all_backups(&self) -> Result<HashMap<String, BackupFile>, RepositoryError> {
|
||||
Ok(try!(BackupFile::get_all_from(&self.get_crypto(),self.get_layout().backups_path())))
|
||||
}
|
||||
|
||||
pub fn get_backups<P: AsRef<Path>>(
|
||||
&self,
|
||||
path: P,
|
||||
) -> Result<HashMap<String, BackupFile>, RepositoryError> {
|
||||
Ok(try!(BackupFile::get_all_from(
|
||||
&self.crypto,
|
||||
self.layout.backups_path().join(path)
|
||||
)))
|
||||
fn get_backups<P: AsRef<Path>>(&self, path: P) -> Result<HashMap<String, BackupFile>, RepositoryError> {
|
||||
Ok(try!(BackupFile::get_all_from(&self.get_crypto(), self.get_layout().backups_path().join(path))))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn has_backup(&self, name: &str) -> bool {
|
||||
self.layout.backup_path(name).exists()
|
||||
fn has_backup(&self, name: &str) -> bool {
|
||||
self.get_layout().backup_path(name).exists()
|
||||
}
|
||||
|
||||
pub fn get_backup(&self, name: &str) -> Result<BackupFile, RepositoryError> {
|
||||
Ok(try!(BackupFile::read_from(
|
||||
&self.crypto,
|
||||
self.layout.backup_path(name)
|
||||
)))
|
||||
fn get_backup(&self, name: &str) -> Result<BackupFile, RepositoryError> {
|
||||
Ok(try!(BackupFile::read_from(&self.get_crypto(), self.get_layout().backup_path(name))))
|
||||
}
|
||||
|
||||
pub fn save_backup(&mut self, backup: &BackupFile, name: &str) -> Result<(), RepositoryError> {
|
||||
try!(self.repo.write_mode());
|
||||
let path = self.layout.backup_path(name);
|
||||
fn save_backup(&mut self, backup: &BackupFile, name: &str, lock: &BackupMode) -> Result<(), RepositoryError> {
|
||||
let path = self.get_layout().backup_path(name);
|
||||
try!(fs::create_dir_all(path.parent().unwrap()));
|
||||
try!(backup.save_to(
|
||||
&self.crypto,
|
||||
&self.get_crypto(),
|
||||
self.get_config().encryption.clone(),
|
||||
path
|
||||
));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn delete_backup(&mut self, name: &str) -> Result<(), RepositoryError> {
|
||||
try!(self.repo.write_mode());
|
||||
let mut path = self.layout.backup_path(name);
|
||||
fn delete_backup(&mut self, name: &str, lock: &BackupMode) -> Result<(), RepositoryError> {
|
||||
let mut path = self.get_layout().backup_path(name);
|
||||
try!(fs::remove_file(&path));
|
||||
loop {
|
||||
path = path.parent().unwrap().to_owned();
|
||||
if path == self.layout.backups_path() || fs::remove_dir(&path).is_err() {
|
||||
if path == self.get_layout().backups_path() || fs::remove_dir(&path).is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -95,16 +123,9 @@ impl BackupRepository {
|
|||
}
|
||||
|
||||
|
||||
pub fn prune_backups(
|
||||
&mut self,
|
||||
prefix: &str,
|
||||
daily: usize,
|
||||
weekly: usize,
|
||||
monthly: usize,
|
||||
yearly: usize,
|
||||
force: bool,
|
||||
) -> Result<(), RepositoryError> {
|
||||
try!(self.repo.write_mode());
|
||||
fn prune_backups(&mut self, prefix: &str, daily: usize, weekly: usize, monthly: usize,
|
||||
yearly: usize, force: bool, lock: &BackupMode) -> Result<(), RepositoryError>
|
||||
{
|
||||
let mut backups = Vec::new();
|
||||
let backup_map = match self.get_all_backups() {
|
||||
Ok(backup_map) => backup_map,
|
||||
|
@ -175,19 +196,13 @@ impl BackupRepository {
|
|||
}
|
||||
if force {
|
||||
for name in remove {
|
||||
try!(self.delete_backup(&name));
|
||||
try!(self.delete_backup(&name, lock));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn restore_inode_tree<P: AsRef<Path>>(
|
||||
&mut self,
|
||||
backup: &BackupFile,
|
||||
inode: Inode,
|
||||
path: P,
|
||||
) -> Result<(), RepositoryError> {
|
||||
let _lock = try!(self.repo.lock(false));
|
||||
fn restore_inode_tree<P: AsRef<Path>>(&mut self, backup: &BackupFile, inode: Inode, path: P, lock: &OnlineMode) -> Result<(), RepositoryError> {
|
||||
let mut queue = VecDeque::new();
|
||||
queue.push_back((path.as_ref().to_owned(), inode));
|
||||
let cache = users::UsersCache::new();
|
||||
|
@ -204,7 +219,7 @@ impl BackupRepository {
|
|||
inode.group = group.gid();
|
||||
}
|
||||
}
|
||||
try!(self.save_inode_at(&inode, &path));
|
||||
try!(self.save_inode_at(&inode, &path, lock));
|
||||
}
|
||||
if inode.file_type == FileType::Directory {
|
||||
let path = if is_root {
|
||||
|
@ -213,7 +228,7 @@ impl BackupRepository {
|
|||
path.join(inode.name)
|
||||
};
|
||||
for chunks in inode.children.unwrap().values() {
|
||||
let inode = try!(self.get_inode(chunks));
|
||||
let inode = try!(self.get_inode(chunks, lock));
|
||||
queue.push_back((path.clone(), inode));
|
||||
}
|
||||
}
|
||||
|
@ -222,16 +237,12 @@ impl BackupRepository {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub fn create_backup_recurse<P: AsRef<Path>>(
|
||||
&mut self,
|
||||
path: P,
|
||||
reference: Option<&Inode>,
|
||||
options: &BackupOptions,
|
||||
backup: &mut BackupFile,
|
||||
failed_paths: &mut Vec<PathBuf>,
|
||||
fn create_backup_recurse<P: AsRef<Path>>(&mut self, path: P, reference: Option<&Inode>,
|
||||
options: &BackupOptions, backup: &mut BackupFile, failed_paths: &mut Vec<PathBuf>,
|
||||
lock: &BackupMode
|
||||
) -> Result<Inode, RepositoryError> {
|
||||
let path = path.as_ref();
|
||||
let mut inode = try!(self.create_inode(path, reference));
|
||||
let mut inode = try!(self.create_inode(path, reference, lock));
|
||||
if !backup.user_names.contains_key(&inode.user) {
|
||||
if let Some(user) = users::get_user_by_uid(inode.user) {
|
||||
backup.user_names.insert(
|
||||
|
@ -278,13 +289,14 @@ impl BackupRepository {
|
|||
.as_ref()
|
||||
.and_then(|inode| inode.children.as_ref())
|
||||
.and_then(|map| map.get(&name))
|
||||
.and_then(|chunks| self.get_inode(chunks).ok());
|
||||
.and_then(|chunks| self.get_inode(chunks, lock.as_online()).ok());
|
||||
let child_inode = match self.create_backup_recurse(
|
||||
&child_path,
|
||||
ref_child.as_ref(),
|
||||
options,
|
||||
backup,
|
||||
failed_paths
|
||||
failed_paths,
|
||||
lock
|
||||
) {
|
||||
Ok(inode) => inode,
|
||||
Err(RepositoryError::Inode(_)) |
|
||||
|
@ -296,7 +308,7 @@ impl BackupRepository {
|
|||
}
|
||||
Err(err) => return Err(err),
|
||||
};
|
||||
let chunks = try!(self.put_inode(&child_inode));
|
||||
let chunks = try!(self.put_inode(&child_inode, lock));
|
||||
inode.cum_size += child_inode.cum_size;
|
||||
for &(_, len) in chunks.iter() {
|
||||
meta_size += u64::from(len);
|
||||
|
@ -325,19 +337,10 @@ impl BackupRepository {
|
|||
Ok(inode)
|
||||
}
|
||||
|
||||
pub fn create_backup_recursively<P: AsRef<Path>>(
|
||||
&mut self,
|
||||
path: P,
|
||||
reference: Option<&BackupFile>,
|
||||
options: &BackupOptions,
|
||||
fn create_backup<P: AsRef<Path>>(&mut self, path: P, name: &str,
|
||||
reference: Option<&BackupFile>, options: &BackupOptions, lock: &BackupMode
|
||||
) -> Result<BackupFile, RepositoryError> {
|
||||
try!(self.repo.write_mode());
|
||||
let _lock = try!(self.repo.lock(false));
|
||||
if self.repo.is_dirty() {
|
||||
return Err(RepositoryError::Dirty);
|
||||
}
|
||||
try!(self.repo.set_dirty());
|
||||
let reference_inode = reference.and_then(|b| self.get_inode(&b.root).ok());
|
||||
let reference_inode = reference.and_then(|b| self.get_inode(&b.root, lock.as_online()).ok());
|
||||
let mut backup = BackupFile::default();
|
||||
backup.config = self.get_config().clone();
|
||||
backup.host = get_hostname().unwrap_or_else(|_| "".to_string());
|
||||
|
@ -350,10 +353,11 @@ impl BackupRepository {
|
|||
reference_inode.as_ref(),
|
||||
options,
|
||||
&mut backup,
|
||||
&mut failed_paths
|
||||
&mut failed_paths,
|
||||
lock
|
||||
));
|
||||
backup.root = try!(self.put_inode(&root_inode));
|
||||
try!(self.repo.flush());
|
||||
backup.root = try!(self.put_inode(&root_inode, lock));
|
||||
try!(self.flush(lock));
|
||||
let elapsed = Local::now().signed_duration_since(start);
|
||||
backup.timestamp = start.timestamp();
|
||||
backup.total_data_size = root_inode.cum_size;
|
||||
|
@ -369,7 +373,7 @@ impl BackupRepository {
|
|||
backup.bundle_count = info_after.bundle_count - info_before.bundle_count;
|
||||
backup.chunk_count = info_after.chunk_count - info_before.chunk_count;
|
||||
backup.avg_chunk_size = backup.deduplicated_data_size as f32 / backup.chunk_count as f32;
|
||||
self.repo.set_clean();
|
||||
try!(self.save_backup(&backup, name, lock));
|
||||
if failed_paths.is_empty() {
|
||||
Ok(backup)
|
||||
} else {
|
||||
|
@ -377,14 +381,10 @@ impl BackupRepository {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn remove_backup_path<P: AsRef<Path>>(
|
||||
&mut self,
|
||||
backup: &mut BackupFile,
|
||||
path: P,
|
||||
fn remove_backup_path<P: AsRef<Path>>(&mut self, backup: &mut BackupFile, path: P,
|
||||
lock: &BackupMode
|
||||
) -> Result<(), RepositoryError> {
|
||||
try!(self.repo.write_mode());
|
||||
let _lock = try!(self.repo.lock(false));
|
||||
let mut inodes = try!(self.get_backup_path(backup, path));
|
||||
let mut inodes = try!(self.get_backup_path(backup, path, lock.as_online()));
|
||||
let to_remove = inodes.pop().unwrap();
|
||||
let mut remove_from = match inodes.pop() {
|
||||
Some(inode) => inode,
|
||||
|
@ -393,28 +393,26 @@ impl BackupRepository {
|
|||
remove_from.children.as_mut().unwrap().remove(
|
||||
&to_remove.name
|
||||
);
|
||||
let mut last_inode_chunks = try!(self.put_inode(&remove_from));
|
||||
let mut last_inode_chunks = try!(self.put_inode(&remove_from, lock));
|
||||
let mut last_inode_name = remove_from.name;
|
||||
while let Some(mut inode) = inodes.pop() {
|
||||
inode.children.as_mut().unwrap().insert(
|
||||
last_inode_name,
|
||||
last_inode_chunks
|
||||
);
|
||||
last_inode_chunks = try!(self.put_inode(&inode));
|
||||
last_inode_chunks = try!(self.put_inode(&inode, lock));
|
||||
last_inode_name = inode.name;
|
||||
}
|
||||
backup.root = last_inode_chunks;
|
||||
backup.modified = true;
|
||||
//TODO: save
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_backup_path<P: AsRef<Path>>(
|
||||
&mut self,
|
||||
backup: &BackupFile,
|
||||
path: P,
|
||||
fn get_backup_path<P: AsRef<Path>>(&mut self, backup: &BackupFile, path: P, lock: &OnlineMode
|
||||
) -> Result<Vec<Inode>, RepositoryError> {
|
||||
let mut inodes = vec![];
|
||||
let mut inode = try!(self.get_inode(&backup.root));
|
||||
let mut inode = try!(self.get_inode(&backup.root, lock));
|
||||
for c in path.as_ref().components() {
|
||||
if let path::Component::Normal(name) = c {
|
||||
let name = name.to_string_lossy();
|
||||
|
@ -428,7 +426,7 @@ impl BackupRepository {
|
|||
)
|
||||
{
|
||||
inodes.push(inode);
|
||||
inode = try!(self.get_inode(&chunks));
|
||||
inode = try!(self.get_inode(&chunks, lock));
|
||||
} else {
|
||||
return Err(RepositoryError::NoSuchFileInBackup(
|
||||
backup.clone(),
|
||||
|
@ -442,24 +440,19 @@ impl BackupRepository {
|
|||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get_backup_inode<P: AsRef<Path>>(
|
||||
&mut self,
|
||||
backup: &BackupFile,
|
||||
path: P,
|
||||
fn get_backup_inode<P: AsRef<Path>>(&mut self, backup: &BackupFile, path: P, lock: &OnlineMode
|
||||
) -> Result<Inode, RepositoryError> {
|
||||
self.get_backup_path(backup, path).map(|mut inodes| {
|
||||
self.get_backup_path(backup, path, lock).map(|mut inodes| {
|
||||
inodes.pop().unwrap()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn find_versions<P: AsRef<Path>>(
|
||||
&mut self,
|
||||
path: P,
|
||||
fn find_versions<P: AsRef<Path>>(&mut self, path: P, lock: &OnlineMode
|
||||
) -> Result<Vec<(String, Inode)>, RepositoryError> {
|
||||
let path = path.as_ref();
|
||||
let mut versions = HashMap::new();
|
||||
for (name, backup) in try!(self.get_all_backups()) {
|
||||
match self.get_backup_inode(&backup, path) {
|
||||
match self.get_backup_inode(&backup, path, lock) {
|
||||
Ok(inode) => {
|
||||
versions.insert(
|
||||
(inode.file_type, inode.timestamp, inode.size),
|
||||
|
@ -476,12 +469,8 @@ impl BackupRepository {
|
|||
}
|
||||
|
||||
#[allow(needless_pass_by_value)]
|
||||
fn find_differences_recurse(
|
||||
&mut self,
|
||||
inode1: &Inode,
|
||||
inode2: &Inode,
|
||||
path: PathBuf,
|
||||
diffs: &mut Vec<(DiffType, PathBuf)>,
|
||||
fn find_differences_recurse(&mut self, inode1: &Inode, inode2: &Inode, path: PathBuf,
|
||||
diffs: &mut Vec<(DiffType, PathBuf)>, lock: &OnlineMode
|
||||
) -> Result<(), RepositoryError> {
|
||||
if !inode1.is_same_meta(inode2) || inode1.data != inode2.data {
|
||||
diffs.push((DiffType::Mod, path.clone()));
|
||||
|
@ -504,13 +493,14 @@ impl BackupRepository {
|
|||
for (name, chunks2) in children2 {
|
||||
if let Some(chunks1) = children1.get(name) {
|
||||
if chunks1 != chunks2 {
|
||||
let inode1 = try!(self.get_inode(chunks1));
|
||||
let inode2 = try!(self.get_inode(chunks2));
|
||||
let inode1 = try!(self.get_inode(chunks1, lock));
|
||||
let inode2 = try!(self.get_inode(chunks2, lock));
|
||||
try!(self.find_differences_recurse(
|
||||
&inode1,
|
||||
&inode2,
|
||||
path.join(name),
|
||||
diffs
|
||||
diffs,
|
||||
lock
|
||||
));
|
||||
}
|
||||
} else {
|
||||
|
@ -527,10 +517,7 @@ impl BackupRepository {
|
|||
}
|
||||
|
||||
#[inline]
|
||||
pub fn find_differences(
|
||||
&mut self,
|
||||
inode1: &Inode,
|
||||
inode2: &Inode,
|
||||
fn find_differences(&mut self, inode1: &Inode, inode2: &Inode, lock: &OnlineMode
|
||||
) -> Result<Vec<(DiffType, PathBuf)>, RepositoryError> {
|
||||
let mut diffs = vec![];
|
||||
let path = PathBuf::from("/");
|
||||
|
@ -538,25 +525,30 @@ impl BackupRepository {
|
|||
inode1,
|
||||
inode2,
|
||||
path,
|
||||
&mut diffs
|
||||
&mut diffs,
|
||||
lock
|
||||
));
|
||||
Ok(diffs)
|
||||
}
|
||||
|
||||
fn count_sizes_recursive(&mut self, inode: &Inode, sizes: &mut HashMap<u64, usize>, min_size: u64) -> Result<(), RepositoryError> {
|
||||
fn count_sizes_recursive(&mut self, inode: &Inode, sizes: &mut HashMap<u64, usize>,
|
||||
min_size: u64, lock: &OnlineMode
|
||||
) -> Result<(), RepositoryError> {
|
||||
if inode.size >= min_size {
|
||||
*sizes.entry(inode.size).or_insert(0) += 1;
|
||||
}
|
||||
if let Some(ref children) = inode.children {
|
||||
for chunks in children.values() {
|
||||
let ch = try!(self.get_inode(chunks));
|
||||
try!(self.count_sizes_recursive(&ch, sizes, min_size));
|
||||
let ch = try!(self.get_inode(chunks, lock));
|
||||
try!(self.count_sizes_recursive(&ch, sizes, min_size, lock));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn find_duplicates_recursive(&mut self, inode: &Inode, path: &Path, sizes: &HashMap<u64, usize>, hashes: &mut HashMap<Hash, (Vec<PathBuf>, u64)>) -> Result<(), RepositoryError> {
|
||||
fn find_duplicates_recursive(&mut self, inode: &Inode, path: &Path, sizes: &HashMap<u64, usize>,
|
||||
hashes: &mut HashMap<Hash, (Vec<PathBuf>, u64)>, lock: &OnlineMode
|
||||
) -> Result<(), RepositoryError> {
|
||||
let path = path.join(&inode.name);
|
||||
if sizes.get(&inode.size).cloned().unwrap_or(0) > 1 {
|
||||
if let Some(ref data) = inode.data {
|
||||
|
@ -567,21 +559,22 @@ impl BackupRepository {
|
|||
}
|
||||
if let Some(ref children) = inode.children {
|
||||
for chunks in children.values() {
|
||||
let ch = try!(self.get_inode(chunks));
|
||||
try!(self.find_duplicates_recursive(&ch, &path, sizes, hashes));
|
||||
let ch = try!(self.get_inode(chunks, lock));
|
||||
try!(self.find_duplicates_recursive(&ch, &path, sizes, hashes, lock));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn find_duplicates(&mut self, inode: &Inode, min_size: u64) -> Result<Vec<(Vec<PathBuf>, u64)>, RepositoryError> {
|
||||
fn find_duplicates(&mut self, inode: &Inode, min_size: u64, lock: &OnlineMode
|
||||
) -> Result<Vec<(Vec<PathBuf>, u64)>, RepositoryError> {
|
||||
let mut sizes = HashMap::new();
|
||||
try!(self.count_sizes_recursive(inode, &mut sizes, min_size));
|
||||
try!(self.count_sizes_recursive(inode, &mut sizes, min_size, lock));
|
||||
let mut hashes = HashMap::new();
|
||||
if let Some(ref children) = inode.children {
|
||||
for chunks in children.values() {
|
||||
let ch = try!(self.get_inode(chunks));
|
||||
try!(self.find_duplicates_recursive(&ch, Path::new(""), &sizes, &mut hashes));
|
||||
let ch = try!(self.get_inode(chunks, lock));
|
||||
try!(self.find_duplicates_recursive(&ch, Path::new(""), &sizes, &mut hashes, lock));
|
||||
}
|
||||
}
|
||||
let dups = hashes.into_iter().map(|(_,v)| v).filter(|&(ref v, _)| v.len() > 1).collect();
|
||||
|
|
|
@ -21,83 +21,127 @@ quick_error!{
|
|||
}
|
||||
}
|
||||
|
||||
impl BackupRepository {
|
||||
fn check_inode_contents(
|
||||
&mut self,
|
||||
inode: &Inode,
|
||||
checked: &mut Bitmap,
|
||||
|
||||
pub trait RepositoryIntegrityIO {
|
||||
fn check_inode_contents(&mut self, inode: &Inode, checked: &mut Bitmap, lock: &OnlineMode
|
||||
) -> Result<(), RepositoryError>;
|
||||
|
||||
fn check_subtree(&mut self, path: PathBuf, chunks: &[Chunk], checked: &mut Bitmap,
|
||||
lock: &OnlineMode
|
||||
) -> Result<(), RepositoryError>;
|
||||
|
||||
fn check_and_repair_subtree(&mut self, path: PathBuf, chunks: &[Chunk], checked: &mut Bitmap,
|
||||
lock: &BackupMode
|
||||
) -> Result<Option<ChunkList>, RepositoryError>;
|
||||
|
||||
fn evacuate_broken_backup(&self, name: &str, lock: &BackupMode) -> Result<(), RepositoryError>;
|
||||
|
||||
fn check_backup_inode(&mut self, name: &str, backup: &mut BackupFile, path: &Path,
|
||||
lock: &OnlineMode
|
||||
) -> Result<(), RepositoryError>;
|
||||
|
||||
fn check_and_repair_backup_inode(&mut self, name: &str, backup: &mut BackupFile, path: &Path,
|
||||
lock: &BackupMode,
|
||||
) -> Result<(), RepositoryError>;
|
||||
|
||||
fn check_backup(&mut self, name: &str, backup: &mut BackupFile, lock: &OnlineMode
|
||||
) -> Result<(), RepositoryError>;
|
||||
|
||||
fn check_and_repair_backup(&mut self, name: &str, backup: &mut BackupFile, lock: &BackupMode
|
||||
) -> Result<(), RepositoryError>;
|
||||
|
||||
fn check_backups(&mut self, lock: &OnlineMode) -> Result<(), RepositoryError>;
|
||||
|
||||
fn check_and_repair_backups(&mut self, lock: &BackupMode) -> Result<(), RepositoryError>;
|
||||
}
|
||||
|
||||
|
||||
impl RepositoryIntegrityIO for Repository {
|
||||
fn check_inode_contents(&mut self, inode: &Inode, checked: &mut Bitmap, lock: &OnlineMode
|
||||
) -> Result<(), RepositoryError> {
|
||||
match inode.data {
|
||||
None |
|
||||
Some(FileData::Inline(_)) => (),
|
||||
Some(FileData::ChunkedDirect(ref chunks)) => {
|
||||
try!(self.repo.mark_chunks(checked, chunks, true));
|
||||
try!(self.mark_chunks(checked, chunks, true));
|
||||
}
|
||||
Some(FileData::ChunkedIndirect(ref chunks)) => {
|
||||
if try!(self.repo.mark_chunks(checked, chunks, false)) {
|
||||
let chunk_data = try!(self.get_data(chunks));
|
||||
if try!(self.mark_chunks(checked, chunks, false)) {
|
||||
let chunk_data = try!(self.get_data(chunks, lock));
|
||||
let chunks2 = ChunkList::read_from(&chunk_data);
|
||||
try!(self.repo.mark_chunks(checked, &chunks2, true));
|
||||
try!(self.repo.mark_chunks(checked, chunks, true));
|
||||
try!(self.mark_chunks(checked, &chunks2, true));
|
||||
try!(self.mark_chunks(checked, chunks, true));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_subtree(
|
||||
&mut self,
|
||||
path: PathBuf,
|
||||
chunks: &[Chunk],
|
||||
checked: &mut Bitmap,
|
||||
repair: bool,
|
||||
fn check_subtree(&mut self, path: PathBuf, chunks: &[Chunk], checked: &mut Bitmap,
|
||||
lock: &OnlineMode,
|
||||
) -> Result<(), RepositoryError> {
|
||||
let mut modified = false;
|
||||
match self.mark_chunks(checked, chunks, false) {
|
||||
Ok(false) => return Ok(()),
|
||||
Ok(true) => (),
|
||||
Err(err) => return Err(InodeIntegrityError::BrokenInode(path, Box::new(err)).into()),
|
||||
}
|
||||
let mut inode = try!(self.get_inode(chunks, lock));
|
||||
// Mark the content chunks as used
|
||||
if let Err(err) = self.check_inode_contents(&inode, checked, lock) {
|
||||
return Err(InodeIntegrityError::MissingInodeData(path, Box::new(err)).into());
|
||||
}
|
||||
// Put children in to do
|
||||
if let Some(ref mut children) = inode.children {
|
||||
for (name, chunks) in children.iter_mut() {
|
||||
try!(self.check_subtree(path.join(name), chunks, checked, lock));
|
||||
}
|
||||
}
|
||||
try!(self.mark_chunks(checked, chunks, true));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_and_repair_subtree(&mut self, path: PathBuf, chunks: &[Chunk], checked: &mut Bitmap,
|
||||
lock: &BackupMode,
|
||||
) -> Result<Option<ChunkList>, RepositoryError> {
|
||||
let mut modified = false;
|
||||
match self.repo.mark_chunks(checked, chunks, false) {
|
||||
match self.mark_chunks(checked, chunks, false) {
|
||||
Ok(false) => return Ok(None),
|
||||
Ok(true) => (),
|
||||
Err(err) => return Err(InodeIntegrityError::BrokenInode(path, Box::new(err)).into()),
|
||||
}
|
||||
let mut inode = try!(self.get_inode(chunks));
|
||||
let mut inode = try!(self.get_inode(chunks, lock.as_online()));
|
||||
// Mark the content chunks as used
|
||||
if let Err(err) = self.check_inode_contents(&inode, checked) {
|
||||
if repair {
|
||||
tr_warn!(
|
||||
"Problem detected: data of {:?} is corrupt\n\tcaused by: {}",
|
||||
path,
|
||||
err
|
||||
);
|
||||
tr_info!("Removing inode data");
|
||||
inode.data = Some(FileData::Inline(vec![].into()));
|
||||
inode.size = 0;
|
||||
modified = true;
|
||||
} else {
|
||||
return Err(InodeIntegrityError::MissingInodeData(path, Box::new(err)).into());
|
||||
}
|
||||
if let Err(err) = self.check_inode_contents(&inode, checked, lock.as_online()) {
|
||||
tr_warn!(
|
||||
"Problem detected: data of {:?} is corrupt\n\tcaused by: {}",
|
||||
path,
|
||||
err
|
||||
);
|
||||
tr_info!("Removing inode data");
|
||||
inode.data = Some(FileData::Inline(vec![].into()));
|
||||
inode.size = 0;
|
||||
modified = true;
|
||||
}
|
||||
// Put children in to do
|
||||
if let Some(ref mut children) = inode.children {
|
||||
let mut removed = vec![];
|
||||
for (name, chunks) in children.iter_mut() {
|
||||
match self.check_subtree(path.join(name), chunks, checked, repair) {
|
||||
match self.check_and_repair_subtree(path.join(name), chunks, checked, lock) {
|
||||
Ok(None) => (),
|
||||
Ok(Some(c)) => {
|
||||
*chunks = c;
|
||||
modified = true;
|
||||
}
|
||||
Err(err) => {
|
||||
if repair {
|
||||
tr_warn!(
|
||||
"Problem detected: inode {:?} is corrupt\n\tcaused by: {}",
|
||||
path.join(name),
|
||||
err
|
||||
);
|
||||
tr_info!("Removing broken inode from backup");
|
||||
removed.push(name.to_string());
|
||||
modified = true;
|
||||
} else {
|
||||
return Err(err);
|
||||
}
|
||||
tr_warn!(
|
||||
"Problem detected: inode {:?} is corrupt\n\tcaused by: {}",
|
||||
path.join(name),
|
||||
err
|
||||
);
|
||||
tr_info!("Removing broken inode from backup");
|
||||
removed.push(name.to_string());
|
||||
modified = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -106,19 +150,20 @@ impl BackupRepository {
|
|||
}
|
||||
}
|
||||
if modified {
|
||||
Ok(Some(try!(self.put_inode(&inode))))
|
||||
Ok(Some(try!(self.put_inode(&inode, lock))))
|
||||
} else {
|
||||
try!(self.repo.mark_chunks(checked, chunks, true));
|
||||
try!(self.mark_chunks(checked, chunks, true));
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
fn evacuate_broken_backup(&self, name: &str) -> Result<(), RepositoryError> {
|
||||
|
||||
fn evacuate_broken_backup(&self, name: &str, lock: &BackupMode) -> Result<(), RepositoryError> {
|
||||
tr_warn!(
|
||||
"The backup {} was corrupted and needed to be modified.",
|
||||
name
|
||||
);
|
||||
let src = self.layout.backup_path(name);
|
||||
let src = self.get_layout().backup_path(name);
|
||||
let mut dst = src.with_extension("backup.broken");
|
||||
let mut num = 1;
|
||||
while dst.exists() {
|
||||
|
@ -133,108 +178,64 @@ impl BackupRepository {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn check_backup(
|
||||
&mut self,
|
||||
name: &str,
|
||||
backup: &mut BackupFile,
|
||||
repair: bool,
|
||||
fn check_backup_inode(&mut self, name: &str, backup: &mut BackupFile, path: &Path,
|
||||
lock: &OnlineMode
|
||||
) -> Result<(), RepositoryError> {
|
||||
let _lock = if repair {
|
||||
try!(self.repo.write_mode());
|
||||
Some(self.repo.lock(false))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
tr_info!("Checking backup...");
|
||||
let mut checked = self.repo.get_chunk_marker();
|
||||
match self.check_subtree(
|
||||
Path::new("").to_path_buf(),
|
||||
&backup.root,
|
||||
&mut checked,
|
||||
repair
|
||||
) {
|
||||
Ok(None) => (),
|
||||
Ok(Some(chunks)) => {
|
||||
try!(self.repo.flush());
|
||||
backup.root = chunks;
|
||||
backup.modified = true;
|
||||
try!(self.evacuate_broken_backup(name));
|
||||
try!(self.save_backup(backup, name));
|
||||
}
|
||||
Err(err) => {
|
||||
if repair {
|
||||
tr_warn!(
|
||||
"The root of the backup {} has been corrupted\n\tcaused by: {}",
|
||||
name,
|
||||
err
|
||||
);
|
||||
try!(self.evacuate_broken_backup(name));
|
||||
} else {
|
||||
return Err(err);
|
||||
}
|
||||
tr_info!("Checking inode...");
|
||||
let mut checked = self.get_chunk_marker();
|
||||
let mut inodes = try!(self.get_backup_path(backup, path, lock));
|
||||
let mut inode = inodes.pop().unwrap();
|
||||
let mut modified = false;
|
||||
if let Err(err) = self.check_inode_contents(&inode, &mut checked, lock) {
|
||||
return Err(
|
||||
InodeIntegrityError::MissingInodeData(path.to_path_buf(), Box::new(err)).into()
|
||||
);
|
||||
}
|
||||
if let Some(ref mut children) = inode.children {
|
||||
for (name, chunks) in children.iter_mut() {
|
||||
try!(self.check_subtree(path.join(name), chunks, &mut checked, lock));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn check_backup_inode(
|
||||
&mut self,
|
||||
name: &str,
|
||||
backup: &mut BackupFile,
|
||||
path: &Path,
|
||||
repair: bool,
|
||||
fn check_and_repair_backup_inode(&mut self, name: &str, backup: &mut BackupFile, path: &Path,
|
||||
lock: &BackupMode
|
||||
) -> Result<(), RepositoryError> {
|
||||
let _lock = if repair {
|
||||
try!(self.repo.write_mode());
|
||||
Some(self.repo.lock(false))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
tr_info!("Checking inode...");
|
||||
let mut checked = self.repo.get_chunk_marker();
|
||||
let mut inodes = try!(self.get_backup_path(backup, path));
|
||||
let mut checked = self.get_chunk_marker();
|
||||
let mut inodes = try!(self.get_backup_path(backup, path, lock.as_online()));
|
||||
let mut inode = inodes.pop().unwrap();
|
||||
let mut modified = false;
|
||||
if let Err(err) = self.check_inode_contents(&inode, &mut checked) {
|
||||
if repair {
|
||||
tr_warn!(
|
||||
"Problem detected: data of {:?} is corrupt\n\tcaused by: {}",
|
||||
path,
|
||||
err
|
||||
);
|
||||
tr_info!("Removing inode data");
|
||||
inode.data = Some(FileData::Inline(vec![].into()));
|
||||
inode.size = 0;
|
||||
modified = true;
|
||||
} else {
|
||||
return Err(
|
||||
InodeIntegrityError::MissingInodeData(path.to_path_buf(), Box::new(err)).into()
|
||||
);
|
||||
}
|
||||
if let Err(err) = self.check_inode_contents(&inode, &mut checked, lock.as_online()) {
|
||||
tr_warn!(
|
||||
"Problem detected: data of {:?} is corrupt\n\tcaused by: {}",
|
||||
path,
|
||||
err
|
||||
);
|
||||
tr_info!("Removing inode data");
|
||||
inode.data = Some(FileData::Inline(vec![].into()));
|
||||
inode.size = 0;
|
||||
modified = true;
|
||||
}
|
||||
if let Some(ref mut children) = inode.children {
|
||||
let mut removed = vec![];
|
||||
for (name, chunks) in children.iter_mut() {
|
||||
match self.check_subtree(path.join(name), chunks, &mut checked, repair) {
|
||||
match self.check_and_repair_subtree(path.join(name), chunks, &mut checked, lock) {
|
||||
Ok(None) => (),
|
||||
Ok(Some(c)) => {
|
||||
*chunks = c;
|
||||
modified = true;
|
||||
}
|
||||
Err(err) => {
|
||||
if repair {
|
||||
tr_warn!(
|
||||
"Problem detected: inode {:?} is corrupt\n\tcaused by: {}",
|
||||
path.join(name),
|
||||
err
|
||||
);
|
||||
tr_info!("Removing broken inode from backup");
|
||||
removed.push(name.to_string());
|
||||
modified = true;
|
||||
} else {
|
||||
return Err(err);
|
||||
}
|
||||
tr_warn!(
|
||||
"Problem detected: inode {:?} is corrupt\n\tcaused by: {}",
|
||||
path.join(name),
|
||||
err
|
||||
);
|
||||
tr_info!("Removing broken inode from backup");
|
||||
removed.push(name.to_string());
|
||||
modified = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -242,35 +243,66 @@ impl BackupRepository {
|
|||
children.remove(&name);
|
||||
}
|
||||
}
|
||||
let mut chunks = try!(self.put_inode(&inode));
|
||||
while let Some(mut parent) = inodes.pop() {
|
||||
parent.children.as_mut().unwrap().insert(inode.name, chunks);
|
||||
inode = parent;
|
||||
chunks = try!(self.put_inode(&inode));
|
||||
}
|
||||
if modified {
|
||||
try!(self.repo.flush());
|
||||
let mut chunks = try!(self.put_inode(&inode, lock));
|
||||
while let Some(mut parent) = inodes.pop() {
|
||||
parent.children.as_mut().unwrap().insert(inode.name, chunks);
|
||||
inode = parent;
|
||||
chunks = try!(self.put_inode(&inode, lock));
|
||||
}
|
||||
try!(self.flush(lock));
|
||||
backup.root = chunks;
|
||||
backup.modified = true;
|
||||
try!(self.evacuate_broken_backup(name));
|
||||
try!(self.save_backup(backup, name));
|
||||
try!(self.evacuate_broken_backup(name, lock));
|
||||
try!(self.save_backup(backup, name, lock));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn check_backups(&mut self, repair: bool) -> Result<(), RepositoryError> {
|
||||
let _lock = if repair {
|
||||
try!(self.repo.write_mode());
|
||||
Some(self.repo.lock(false))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
#[inline]
|
||||
fn check_backup(&mut self, name: &str, backup: &mut BackupFile, lock: &OnlineMode,
|
||||
) -> Result<(), RepositoryError> {
|
||||
tr_info!("Checking backup...");
|
||||
let mut checked = self.get_chunk_marker();
|
||||
try!(self.check_subtree(Path::new("").to_path_buf(), &backup.root, &mut checked, lock));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn check_and_repair_backup(&mut self, name: &str, backup: &mut BackupFile, lock: &BackupMode,
|
||||
) -> Result<(), RepositoryError> {
|
||||
tr_info!("Checking backup...");
|
||||
let mut checked = self.get_chunk_marker();
|
||||
match self.check_and_repair_subtree(Path::new("").to_path_buf(),
|
||||
&backup.root, &mut checked, lock
|
||||
) {
|
||||
Ok(None) => (),
|
||||
Ok(Some(chunks)) => {
|
||||
try!(self.flush(lock));
|
||||
backup.root = chunks;
|
||||
backup.modified = true;
|
||||
try!(self.evacuate_broken_backup(name, lock));
|
||||
try!(self.save_backup(backup, name, lock));
|
||||
}
|
||||
Err(err) => {
|
||||
tr_warn!(
|
||||
"The root of the backup {} has been corrupted\n\tcaused by: {}",
|
||||
name,
|
||||
err
|
||||
);
|
||||
try!(self.evacuate_broken_backup(name, lock));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_backups(&mut self, lock: &OnlineMode) -> Result<(), RepositoryError> {
|
||||
tr_info!("Checking backups...");
|
||||
let mut checked = self.repo.get_chunk_marker();
|
||||
let mut checked = self.get_chunk_marker();
|
||||
let backup_map = match self.get_all_backups() {
|
||||
Ok(backup_map) => backup_map,
|
||||
Err(RepositoryError::BackupFile(BackupFileError::PartialBackupsList(backup_map,
|
||||
_failed))) => {
|
||||
_failed))) => {
|
||||
tr_warn!("Some backups could not be read, ignoring them");
|
||||
backup_map
|
||||
}
|
||||
|
@ -278,46 +310,55 @@ impl BackupRepository {
|
|||
};
|
||||
for (name, mut backup) in
|
||||
ProgressIter::new(tr!("checking backups"), backup_map.len(), backup_map.into_iter())
|
||||
{
|
||||
let path = format!("{}::", name);
|
||||
match self.check_subtree(
|
||||
Path::new(&path).to_path_buf(),
|
||||
&backup.root,
|
||||
&mut checked,
|
||||
repair
|
||||
) {
|
||||
Ok(None) => (),
|
||||
Ok(Some(chunks)) => {
|
||||
try!(self.repo.flush());
|
||||
backup.root = chunks;
|
||||
backup.modified = true;
|
||||
try!(self.evacuate_broken_backup(&name));
|
||||
try!(self.save_backup(&backup, &name));
|
||||
}
|
||||
Err(err) => {
|
||||
if repair {
|
||||
tr_warn!(
|
||||
"The root of the backup {} has been corrupted\n\tcaused by: {}",
|
||||
name,
|
||||
err
|
||||
);
|
||||
try!(self.evacuate_broken_backup(&name));
|
||||
} else {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
let path = format!("{}::", name);
|
||||
try!(self.check_subtree(Path::new(&path).to_path_buf(), &backup.root,
|
||||
&mut checked, lock));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
#[inline]
|
||||
pub fn check_bundles(&mut self, full: bool, repair: bool) -> Result<(), RepositoryError> {
|
||||
self.repo.check_bundles(full, repair)
|
||||
}
|
||||
|
||||
pub fn check_repository(&mut self, repair: bool) -> Result<(), RepositoryError> {
|
||||
self.repo.check_repository(repair)
|
||||
fn check_and_repair_backups(&mut self, lock: &BackupMode) -> Result<(), RepositoryError> {
|
||||
tr_info!("Checking backups...");
|
||||
let mut checked = self.get_chunk_marker();
|
||||
let backup_map = match self.get_all_backups() {
|
||||
Ok(backup_map) => backup_map,
|
||||
Err(RepositoryError::BackupFile(BackupFileError::PartialBackupsList(backup_map,
|
||||
_failed))) => {
|
||||
tr_warn!("Some backups could not be read, ignoring them");
|
||||
backup_map
|
||||
}
|
||||
Err(err) => return Err(err),
|
||||
};
|
||||
for (name, mut backup) in
|
||||
ProgressIter::new(tr!("checking backups"), backup_map.len(), backup_map.into_iter())
|
||||
{
|
||||
let path = format!("{}::", name);
|
||||
match self.check_and_repair_subtree(
|
||||
Path::new(&path).to_path_buf(),
|
||||
&backup.root,
|
||||
&mut checked,
|
||||
lock
|
||||
) {
|
||||
Ok(None) => (),
|
||||
Ok(Some(chunks)) => {
|
||||
try!(self.flush(lock));
|
||||
backup.root = chunks;
|
||||
backup.modified = true;
|
||||
try!(self.evacuate_broken_backup(&name, lock));
|
||||
try!(self.save_backup(&backup, &name, lock));
|
||||
}
|
||||
Err(err) => {
|
||||
tr_warn!(
|
||||
"The root of the backup {} has been corrupted\n\tcaused by: {}",
|
||||
name,
|
||||
err
|
||||
);
|
||||
try!(self.evacuate_broken_backup(&name, lock));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
use ::repository::ChunkRepositoryLayout;
|
||||
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub trait BackupRepositoryLayout {
|
||||
fn config_path(&self) -> PathBuf;
|
||||
fn keys_path(&self) -> PathBuf;
|
||||
fn excludes_path(&self) -> PathBuf;
|
||||
fn backups_path(&self) -> PathBuf;
|
||||
fn backup_path(&self, name: &str) -> PathBuf;
|
||||
fn remote_exists(&self) -> bool;
|
||||
fn remote_readme_path(&self) -> PathBuf;
|
||||
}
|
||||
|
||||
impl<P: AsRef<ChunkRepositoryLayout>> BackupRepositoryLayout for P {
|
||||
fn config_path(&self) -> PathBuf {
|
||||
self.as_ref().base_path().join("config.yaml")
|
||||
}
|
||||
|
||||
fn keys_path(&self) -> PathBuf {
|
||||
self.as_ref().base_path().join("keys")
|
||||
}
|
||||
|
||||
fn excludes_path(&self) -> PathBuf {
|
||||
self.as_ref().base_path().join("excludes")
|
||||
}
|
||||
|
||||
fn backups_path(&self) -> PathBuf {
|
||||
self.as_ref().base_path().join("remote/backups")
|
||||
}
|
||||
|
||||
fn backup_path(&self, name: &str) -> PathBuf {
|
||||
self.backups_path().join(format!("{}.backup", name))
|
||||
}
|
||||
|
||||
fn remote_exists(&self) -> bool {
|
||||
self.as_ref().remote_bundles_path().exists() && self.backups_path().exists() &&
|
||||
self.as_ref().remote_locks_path().exists()
|
||||
}
|
||||
|
||||
fn remote_readme_path(&self) -> PathBuf {
|
||||
self.as_ref().base_path().join("remote/README.md")
|
||||
}
|
||||
|
||||
}
|
|
@ -7,12 +7,16 @@ use std::io::{Read, Write};
|
|||
use super::*;
|
||||
|
||||
|
||||
impl BackupRepository {
|
||||
pub fn create_inode<P: AsRef<Path>>(
|
||||
&mut self,
|
||||
path: P,
|
||||
reference: Option<&Inode>,
|
||||
) -> Result<Inode, RepositoryError> {
|
||||
pub trait RepositoryMetadataIO {
|
||||
fn create_inode<P: AsRef<Path>>(&mut self, path: P, reference: Option<&Inode>, lock: &BackupMode) -> Result<Inode, RepositoryError>;
|
||||
fn put_inode(&mut self, inode: &Inode, lock: &BackupMode) -> Result<ChunkList, RepositoryError>;
|
||||
fn get_inode(&mut self, chunks: &[Chunk], lock: &OnlineMode) -> Result<Inode, RepositoryError>;
|
||||
fn save_inode_at<P: AsRef<Path>>(&mut self, inode: &Inode, path: P, lock: &OnlineMode) -> Result<(), RepositoryError>;
|
||||
fn get_inode_children(&mut self, inode: &Inode, lock: &OnlineMode) -> Result<Vec<Inode>, RepositoryError>;
|
||||
}
|
||||
|
||||
impl RepositoryMetadataIO for Repository {
|
||||
fn create_inode<P: AsRef<Path>>(&mut self, path: P, reference: Option<&Inode>, lock: &BackupMode) -> Result<Inode, RepositoryError> {
|
||||
let mut inode = try!(Inode::get_from(path.as_ref()));
|
||||
if inode.file_type == FileType::File && inode.size > 0 {
|
||||
if let Some(reference) = reference {
|
||||
|
@ -27,13 +31,13 @@ impl BackupRepository {
|
|||
try!(file.read_to_end(&mut data));
|
||||
inode.data = Some(FileData::Inline(data.into()));
|
||||
} else {
|
||||
let mut chunks = try!(self.repo.put_stream(BundleMode::Data, &mut file));
|
||||
let mut chunks = try!(self.put_stream(BundleMode::Data, &mut file, lock));
|
||||
if chunks.len() < 10 {
|
||||
inode.data = Some(FileData::ChunkedDirect(chunks));
|
||||
} else {
|
||||
let mut chunk_data = Vec::with_capacity(chunks.encoded_size());
|
||||
chunks.write_to(&mut chunk_data).unwrap();
|
||||
chunks = try!(self.repo.put_data(BundleMode::Meta, &chunk_data));
|
||||
chunks = try!(self.put_data(BundleMode::Meta, &chunk_data, lock));
|
||||
inode.data = Some(FileData::ChunkedIndirect(chunks));
|
||||
}
|
||||
}
|
||||
|
@ -42,20 +46,27 @@ impl BackupRepository {
|
|||
}
|
||||
|
||||
#[inline]
|
||||
pub fn put_inode(&mut self, inode: &Inode) -> Result<ChunkList, RepositoryError> {
|
||||
self.repo.put_data(BundleMode::Meta, &try!(inode.encode()))
|
||||
fn put_inode(&mut self, inode: &Inode, lock: &BackupMode) -> Result<ChunkList, RepositoryError> {
|
||||
self.put_data(BundleMode::Meta, &try!(inode.encode()), lock)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get_inode(&mut self, chunks: &[Chunk]) -> Result<Inode, RepositoryError> {
|
||||
Ok(try!(Inode::decode(&try!(self.get_data(chunks)))))
|
||||
fn get_inode(&mut self, chunks: &[Chunk], lock: &OnlineMode) -> Result<Inode, RepositoryError> {
|
||||
Ok(try!(Inode::decode(&try!(self.get_data(chunks, lock)))))
|
||||
}
|
||||
|
||||
pub fn save_inode_at<P: AsRef<Path>>(
|
||||
&mut self,
|
||||
inode: &Inode,
|
||||
path: P,
|
||||
) -> Result<(), RepositoryError> {
|
||||
#[inline]
|
||||
fn get_inode_children(&mut self, inode: &Inode, lock: &OnlineMode) -> Result<Vec<Inode>, RepositoryError> {
|
||||
let mut res = vec![];
|
||||
if let Some(ref children) = inode.children {
|
||||
for chunks in children.values() {
|
||||
res.push(try!(self.get_inode(chunks, lock)))
|
||||
}
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
fn save_inode_at<P: AsRef<Path>>(&mut self, inode: &Inode, path: P, lock: &OnlineMode) -> Result<(), RepositoryError> {
|
||||
if let Some(mut file) = try!(inode.create_at(path.as_ref())) {
|
||||
if let Some(ref contents) = inode.data {
|
||||
match *contents {
|
||||
|
@ -63,12 +74,12 @@ impl BackupRepository {
|
|||
try!(file.write_all(data));
|
||||
}
|
||||
FileData::ChunkedDirect(ref chunks) => {
|
||||
try!(self.repo.get_stream(chunks, &mut file));
|
||||
try!(self.get_stream(chunks, &mut file, lock));
|
||||
}
|
||||
FileData::ChunkedIndirect(ref chunks) => {
|
||||
let chunk_data = try!(self.get_data(chunks));
|
||||
let chunk_data = try!(self.get_data(chunks, lock));
|
||||
let chunks = ChunkList::read_from(&chunk_data);
|
||||
try!(self.repo.get_stream(&chunks, &mut file));
|
||||
try!(self.get_stream(&chunks, &mut file, lock));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,15 +6,20 @@ mod backup;
|
|||
mod integrity;
|
||||
mod vacuum;
|
||||
mod metadata;
|
||||
mod layout;
|
||||
|
||||
pub use self::backup::{BackupOptions, BackupError, DiffType};
|
||||
pub use self::backup::{BackupOptions, BackupError, DiffType, RepositoryBackupIO};
|
||||
pub use self::backup_file::{BackupFile, BackupFileError};
|
||||
pub use self::inode::{Inode, FileData, FileType, InodeError};
|
||||
pub use self::integrity::InodeIntegrityError;
|
||||
pub use self::integrity::{InodeIntegrityError, RepositoryIntegrityIO};
|
||||
pub use self::layout::BackupRepositoryLayout;
|
||||
pub use self::metadata::RepositoryMetadataIO;
|
||||
pub use self::vacuum::RepositoryVacuumIO;
|
||||
pub use self::tarfile::RepositoryTarfileIO;
|
||||
|
||||
use ::prelude::*;
|
||||
|
||||
use std::path::Path;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::fs::{self, File};
|
||||
|
@ -24,16 +29,76 @@ use std::io::Write;
|
|||
const DEFAULT_EXCLUDES: &[u8] = include_bytes!("../../docs/excludes.default");
|
||||
|
||||
|
||||
|
||||
pub struct BackupRepository {
|
||||
layout: Arc<RepositoryLayout>,
|
||||
crypto: Arc<Crypto>,
|
||||
repo: Repository
|
||||
pub struct CheckOptions {
|
||||
all_backups: bool,
|
||||
single_backup: Option<String>,
|
||||
subpath: Option<PathBuf>,
|
||||
index: bool,
|
||||
bundles: bool,
|
||||
bundle_data: bool,
|
||||
repair: bool
|
||||
}
|
||||
|
||||
impl CheckOptions {
|
||||
pub fn new() -> CheckOptions {
|
||||
CheckOptions {
|
||||
all_backups: false,
|
||||
single_backup: None,
|
||||
subpath: None,
|
||||
index: false,
|
||||
bundles: false,
|
||||
bundle_data: false,
|
||||
repair: false
|
||||
}
|
||||
}
|
||||
|
||||
pub fn all_backups(&mut self) -> &mut Self {
|
||||
self.all_backups = true;
|
||||
self.single_backup = None;
|
||||
self.subpath = None;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn single_backup(&mut self, backup: &str) -> &mut Self {
|
||||
self.all_backups = false;
|
||||
self.single_backup = Some(backup.to_string());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn subpath(&mut self, subpath: &Path) -> &mut Self {
|
||||
self.subpath = Some(subpath.to_path_buf());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn index(&mut self, index: bool) -> &mut Self {
|
||||
self.index = index;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn bundles(&mut self, bundles: bool) -> &mut Self {
|
||||
self.bundles = bundles;
|
||||
self.bundle_data &= bundles;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn bundle_data(&mut self, bundle_data: bool) -> &mut Self {
|
||||
self.bundle_data = bundle_data;
|
||||
self.bundles |= bundle_data;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn repair(&mut self, repair: bool) -> &mut Self {
|
||||
self.repair = repair;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub struct BackupRepository(Repository);
|
||||
|
||||
impl BackupRepository {
|
||||
pub fn create<P: AsRef<Path>, R: AsRef<Path>>(path: P, config: &Config, remote: R) -> Result<Self, RepositoryError> {
|
||||
let layout = Arc::new(RepositoryLayout::new(path.as_ref()));
|
||||
let layout: Arc<ChunkRepositoryLayout> = Arc::new(path.as_ref().to_owned());
|
||||
try!(fs::create_dir(layout.base_path()));
|
||||
try!(File::create(layout.excludes_path()).and_then(|mut f| {
|
||||
f.write_all(DEFAULT_EXCLUDES)
|
||||
|
@ -41,36 +106,28 @@ impl BackupRepository {
|
|||
try!(fs::create_dir_all(layout.backups_path()));
|
||||
try!(fs::create_dir(layout.keys_path()));
|
||||
let crypto = Arc::new(try!(Crypto::open(layout.keys_path())));
|
||||
Ok(BackupRepository {
|
||||
crypto: crypto.clone(),
|
||||
layout: layout.clone(),
|
||||
repo: try!(Repository::create(layout, config, crypto, remote))
|
||||
})
|
||||
Ok(BackupRepository(try!(Repository::create(layout, config, crypto, remote))))
|
||||
}
|
||||
|
||||
#[allow(unknown_lints, useless_let_if_seq)]
|
||||
pub fn open<P: AsRef<Path>>(path: P, online: bool) -> Result<Self, RepositoryError> {
|
||||
let layout = Arc::new(RepositoryLayout::new(path.as_ref()));
|
||||
let layout: Arc<ChunkRepositoryLayout> = Arc::new(path.as_ref().to_owned());
|
||||
let crypto = Arc::new(try!(Crypto::open(layout.keys_path())));
|
||||
Ok(BackupRepository {
|
||||
crypto: crypto.clone(),
|
||||
layout: layout.clone(),
|
||||
repo: try!(Repository::open(layout, crypto, online))
|
||||
})
|
||||
Ok(BackupRepository(try!(Repository::open(layout, crypto, online))))
|
||||
}
|
||||
|
||||
pub fn import<P: AsRef<Path>, R: AsRef<Path>>(path: P, remote: R, key_files: Vec<String>) -> Result<Self, RepositoryError> {
|
||||
let config = Config::default();
|
||||
let mut repo = try!(Self::create(&path, &config, remote));
|
||||
for file in key_files {
|
||||
try!(repo.crypto.register_keyfile(file));
|
||||
try!(repo.0.get_crypto().register_keyfile(file));
|
||||
}
|
||||
repo = try!(Self::open(&path, true));
|
||||
let mut backups: Vec<(String, BackupFile)> = try!(repo.get_all_backups()).into_iter().collect();
|
||||
let mut backups: Vec<(String, BackupFile)> = try!(repo.0.get_all_backups()).into_iter().collect();
|
||||
backups.sort_by_key(|&(_, ref b)| b.timestamp);
|
||||
if let Some((name, backup)) = backups.pop() {
|
||||
tr_info!("Taking configuration from the last backup '{}'", name);
|
||||
repo.repo.set_config(backup.config);
|
||||
repo.0.set_config(backup.config);
|
||||
try!(repo.save_config())
|
||||
} else {
|
||||
tr_warn!(
|
||||
|
@ -80,69 +137,204 @@ impl BackupRepository {
|
|||
Ok(repo)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn has_backup(&self, name: &str) -> bool {
|
||||
self.0.has_backup(name)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get_backup(&self, name: &str) -> Result<BackupFile, RepositoryError> {
|
||||
self.0.get_backup(name)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn register_key(&mut self, public: PublicKey, secret: SecretKey) -> Result<(), RepositoryError> {
|
||||
try!(self.repo.write_mode());
|
||||
try!(self.crypto.register_secret_key(public, secret));
|
||||
try!(self.0.get_crypto().register_secret_key(public, secret));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
#[inline]
|
||||
pub fn save_config(&mut self) -> Result<(), RepositoryError> {
|
||||
self.repo.save_config()
|
||||
self.0.localwrite_mode(|r, l| r.save_config(l))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn set_encryption(&mut self, public: Option<&PublicKey>) {
|
||||
self.repo.set_encryption(public)
|
||||
self.0.set_encryption(public)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get_config(&self) -> &Config {
|
||||
self.repo.get_config()
|
||||
self.0.get_config()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn set_config(&mut self, config: Config) {
|
||||
self.repo.set_config(config);
|
||||
self.0.set_config(config);
|
||||
}
|
||||
|
||||
pub fn get_layout(&self) -> &RepositoryLayout {
|
||||
&self.layout
|
||||
#[inline]
|
||||
pub fn get_layout(&self) -> Arc<ChunkRepositoryLayout> {
|
||||
self.0.get_layout()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn info(&self) -> RepositoryInfo {
|
||||
self.repo.info()
|
||||
self.0.info()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn check_index(&mut self, repair: bool) -> Result<(), RepositoryError> {
|
||||
self.repo.check_index(repair)
|
||||
pub fn check_repo(&mut self, index: bool, bundles: bool, bundle_data: bool) -> Result<IntegrityReport, RepositoryError> {
|
||||
self.0.online_mode(|r, l| Ok(r.check(index, bundles, bundle_data, l)))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn set_clean(&mut self) {
|
||||
self.repo.set_clean()
|
||||
pub fn check_and_repair_repo(&mut self, index: bool, bundles: bool, bundle_data: bool) -> Result<IntegrityReport, RepositoryError> {
|
||||
self.0.vacuum_mode(|r, l| r.check_and_repair(index, bundles, bundle_data, l))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn statistics(&self) -> RepositoryStatistics {
|
||||
self.repo.statistics()
|
||||
self.0.statistics()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn list_bundles(&self) -> Vec<&BundleInfo> {
|
||||
self.repo.list_bundles()
|
||||
self.0.list_bundles()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get_bundle(&self, bundle: &BundleId) -> Option<&StoredBundle> {
|
||||
self.repo.get_bundle(bundle)
|
||||
self.0.get_bundle(bundle)
|
||||
}
|
||||
|
||||
pub fn get_chunk(&mut self, hash: Hash) -> Result<Option<Vec<u8>>, RepositoryError> {
|
||||
self.repo.get_chunk(hash)
|
||||
#[inline]
|
||||
pub fn get_all_backups(&self) -> Result<HashMap<String, BackupFile>, RepositoryError> {
|
||||
self.0.get_all_backups()
|
||||
}
|
||||
|
||||
pub fn get_data(&mut self, chunks: &[Chunk]) -> Result<Vec<u8>, RepositoryError> {
|
||||
self.repo.get_data(chunks)
|
||||
#[inline]
|
||||
pub fn get_backups<P: AsRef<Path>>(&self, path: P) -> Result<HashMap<String, BackupFile>, RepositoryError> {
|
||||
self.0.get_backups(path)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn delete_backup(&mut self, name: &str) -> Result<(), RepositoryError> {
|
||||
self.0.backup_mode(|r, l| r.delete_backup(name, l))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn prune_backups(&mut self, prefix: &str, daily: usize, weekly: usize, monthly: usize,
|
||||
yearly: usize, force: bool) -> Result<(), RepositoryError>
|
||||
{
|
||||
self.0.backup_mode(|r, l| r.prune_backups(prefix, daily, weekly, monthly, yearly, force, l))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get_root_inode(&mut self, backup: &BackupFile) -> Result<Inode, RepositoryError> {
|
||||
self.0.online_mode(|r, l| r.get_inode(&backup.root, l))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get_inode_children(&mut self, inode: &Inode) -> Result<Vec<Inode>, RepositoryError> {
|
||||
self.0.online_mode(|r, l| r.get_inode_children(inode, l))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn restore_inode_tree<P: AsRef<Path>>(&mut self, backup: &BackupFile, inode: Inode, path: P) -> Result<(), RepositoryError> {
|
||||
self.0.online_mode(|r, l| r.restore_inode_tree(backup, inode, path, l))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn create_backup<P: AsRef<Path>>(&mut self, path: P, name: &str, reference: Option<&BackupFile>,
|
||||
options: &BackupOptions) -> Result<BackupFile, RepositoryError>
|
||||
{
|
||||
self.0.backup_mode(|r, l| r.create_backup(path, name, reference, options,l))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn remove_backup_path<P: AsRef<Path>>(&mut self, backup: &mut BackupFile, path: P
|
||||
) -> Result<(), RepositoryError> {
|
||||
self.0.backup_mode(|r, l| r.remove_backup_path(backup, path, l))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get_backup_path<P: AsRef<Path>>(&mut self, backup: &BackupFile, path: P) -> Result<Vec<Inode>, RepositoryError> {
|
||||
self.0.online_mode(|r, l| r.get_backup_path(backup, path, l))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get_backup_inode<P: AsRef<Path>>(&mut self, backup: &BackupFile, path: P) -> Result<Inode, RepositoryError> {
|
||||
self.0.online_mode(|r, l| r.get_backup_inode(backup, path, l))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn find_differences(&mut self, inode1: &Inode, inode2: &Inode
|
||||
) -> Result<Vec<(DiffType, PathBuf)>, RepositoryError> {
|
||||
self.0.online_mode(|r, l| r.find_differences(inode1, inode2, l))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn find_versions<P: AsRef<Path>>(&mut self, path: P
|
||||
) -> Result<Vec<(String, Inode)>, RepositoryError> {
|
||||
self.0.online_mode(|r, l| r.find_versions(path, l))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn find_duplicates(&mut self, inode: &Inode, min_size: u64
|
||||
) -> Result<Vec<(Vec<PathBuf>, u64)>, RepositoryError> {
|
||||
self.0.online_mode(|r, l| r.find_duplicates(inode, min_size, l))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn analyze_usage(&mut self) -> Result<HashMap<u32, BundleAnalysis>, RepositoryError> {
|
||||
self.0.online_mode(|r, l| r.analyze_usage(l))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn vacuum(&mut self, ratio: f32, combine: bool, force: bool) -> Result<(), RepositoryError> {
|
||||
self.0.vacuum_mode(|r, l| r.vacuum(ratio, combine, force, l))
|
||||
}
|
||||
|
||||
pub fn mount_repository<P: AsRef<Path>>(&mut self, path: Option<&str>,
|
||||
mountpoint: P) -> Result<(), RepositoryError> {
|
||||
self.0.online_mode(|r, l| {
|
||||
let fs = try!(FuseFilesystem::from_repository(r, l, path));
|
||||
fs.mount(mountpoint)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn mount_backup<P: AsRef<Path>>(&mut self, backup: BackupFile,
|
||||
mountpoint: P) -> Result<(), RepositoryError> {
|
||||
self.0.online_mode(|r, l| {
|
||||
let fs = try!(FuseFilesystem::from_backup(r, l, backup));
|
||||
fs.mount(mountpoint)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn mount_inode<P: AsRef<Path>>(&mut self, backup: BackupFile, inode: Inode,
|
||||
mountpoint: P) -> Result<(), RepositoryError> {
|
||||
self.0.online_mode(|r, l| {
|
||||
let fs = try!(FuseFilesystem::from_inode(r, l, backup, inode));
|
||||
fs.mount(mountpoint)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn check(&mut self, options: CheckOptions) -> Result<(), RepositoryError> {
|
||||
unimplemented!()
|
||||
//TODO: implement
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn import_tarfile<P: AsRef<Path>>(&mut self, tarfile: P) -> Result<BackupFile, RepositoryError> {
|
||||
self.0.backup_mode(|r, l| r.import_tarfile(tarfile, l))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn export_tarfile<P: AsRef<Path>>(&mut self, backup: &BackupFile, inode: Inode, tarfile: P
|
||||
) -> Result<(), RepositoryError> {
|
||||
self.0.online_mode(|r, l| r.export_tarfile(backup, inode, tarfile, l))
|
||||
}
|
||||
|
||||
}
|
|
@ -150,21 +150,24 @@ impl FuseInode {
|
|||
|
||||
pub struct FuseFilesystem<'a> {
|
||||
next_id: u64,
|
||||
repository: &'a mut BackupRepository,
|
||||
inodes: HashMap<u64, FuseInodeRef>
|
||||
lock: &'a OnlineMode,
|
||||
repository: &'a mut Repository,
|
||||
inodes: HashMap<u64, FuseInodeRef>,
|
||||
}
|
||||
|
||||
impl<'a> FuseFilesystem<'a> {
|
||||
pub fn new(repository: &'a mut BackupRepository) -> Result<Self, RepositoryError> {
|
||||
pub fn new(repository: &'a mut Repository, lock: &'a OnlineMode) -> Result<Self, RepositoryError> {
|
||||
Ok(FuseFilesystem {
|
||||
next_id: 1,
|
||||
lock,
|
||||
repository,
|
||||
inodes: HashMap::new()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn from_repository(
|
||||
repository: &'a mut BackupRepository,
|
||||
repository: &'a mut Repository,
|
||||
lock: &'a OnlineMode,
|
||||
path: Option<&str>,
|
||||
) -> Result<Self, RepositoryError> {
|
||||
let mut backups = vec![];
|
||||
|
@ -173,10 +176,10 @@ impl<'a> FuseFilesystem<'a> {
|
|||
None => try!(repository.get_all_backups()),
|
||||
};
|
||||
for (name, backup) in backup_map {
|
||||
let inode = try!(repository.get_inode(&backup.root));
|
||||
let inode = try!(repository.get_inode(&backup.root, lock));
|
||||
backups.push((name, backup, inode));
|
||||
}
|
||||
let mut fs = try!(FuseFilesystem::new(repository));
|
||||
let mut fs = try!(FuseFilesystem::new(repository, lock));
|
||||
let root = fs.add_virtual_directory("".to_string(), None);
|
||||
for (name, backup, mut inode) in backups {
|
||||
let mut parent = root.clone();
|
||||
|
@ -196,21 +199,23 @@ impl<'a> FuseFilesystem<'a> {
|
|||
}
|
||||
|
||||
pub fn from_backup(
|
||||
repository: &'a mut BackupRepository,
|
||||
repository: &'a mut Repository,
|
||||
lock: &'a OnlineMode,
|
||||
backup: BackupFile,
|
||||
) -> Result<Self, RepositoryError> {
|
||||
let inode = try!(repository.get_inode(&backup.root));
|
||||
let mut fs = try!(FuseFilesystem::new(repository));
|
||||
let inode = try!(repository.get_inode(&backup.root, lock));
|
||||
let mut fs = try!(FuseFilesystem::new(repository, lock));
|
||||
fs.add_inode(inode, None, backup.user_names, backup.group_names);
|
||||
Ok(fs)
|
||||
}
|
||||
|
||||
pub fn from_inode(
|
||||
repository: &'a mut BackupRepository,
|
||||
repository: &'a mut Repository,
|
||||
lock: &'a OnlineMode,
|
||||
backup: BackupFile,
|
||||
inode: Inode,
|
||||
) -> Result<Self, RepositoryError> {
|
||||
let mut fs = try!(FuseFilesystem::new(repository));
|
||||
let mut fs = try!(FuseFilesystem::new(repository, lock));
|
||||
fs.add_inode(inode, None, backup.user_names, backup.group_names);
|
||||
Ok(fs)
|
||||
}
|
||||
|
@ -290,7 +295,7 @@ impl<'a> FuseFilesystem<'a> {
|
|||
if let Some(chunks) = parent_mut.inode.children.as_ref().and_then(|c| c.get(name)) {
|
||||
child = Rc::new(RefCell::new(FuseInode {
|
||||
num: self.next_id,
|
||||
inode: try!(self.repository.get_inode(chunks)),
|
||||
inode: try!(self.repository.get_inode(chunks, self.lock)),
|
||||
parent: Some(parent.clone()),
|
||||
children: HashMap::new(),
|
||||
chunks: None,
|
||||
|
@ -316,7 +321,7 @@ impl<'a> FuseFilesystem<'a> {
|
|||
if !parent_mut.children.contains_key(name) {
|
||||
let child = Rc::new(RefCell::new(FuseInode {
|
||||
num: self.next_id,
|
||||
inode: try!(self.repository.get_inode(chunks)),
|
||||
inode: try!(self.repository.get_inode(chunks, self.lock)),
|
||||
parent: Some(parent.clone()),
|
||||
children: HashMap::new(),
|
||||
chunks: None,
|
||||
|
@ -344,7 +349,7 @@ impl<'a> FuseFilesystem<'a> {
|
|||
chunks = Some(c.clone());
|
||||
}
|
||||
Some(FileData::ChunkedIndirect(ref c)) => {
|
||||
let chunk_data = try!(self.repository.get_data(c));
|
||||
let chunk_data = try!(self.repository.get_data(c, self.lock));
|
||||
chunks = Some(ChunkList::read_from(&chunk_data));
|
||||
}
|
||||
}
|
||||
|
@ -556,7 +561,7 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> {
|
|||
offset -= i64::from(len);
|
||||
continue;
|
||||
}
|
||||
let chunk = match fuse_try!(self.repository.get_chunk(hash), reply) {
|
||||
let chunk = match fuse_try!(self.repository.get_chunk(hash, self.lock), reply) {
|
||||
Some(chunk) => chunk,
|
||||
None => return reply.error(libc::EIO),
|
||||
};
|
||||
|
|
|
@ -134,10 +134,27 @@ fn inode_from_entry<R: Read>(entry: &mut tar::Entry<R>) -> Result<Inode, Reposit
|
|||
}
|
||||
|
||||
|
||||
impl BackupRepository {
|
||||
fn import_tar_entry<R: Read>(
|
||||
&mut self,
|
||||
entry: &mut tar::Entry<R>,
|
||||
pub trait RepositoryTarfileIO {
|
||||
fn import_tar_entry<R: Read>(&mut self, entry: &mut tar::Entry<R>, lock: &BackupMode
|
||||
) -> Result<Inode, RepositoryError>;
|
||||
fn import_tarfile_as_inode<R: Read>(&mut self, backup: &mut BackupFile, input: R,
|
||||
failed_paths: &mut Vec<PathBuf>, lock: &BackupMode
|
||||
) -> Result<(Inode, ChunkList), RepositoryError>;
|
||||
fn import_tarfile<P: AsRef<Path>>(&mut self, tarfile: P, lock: &BackupMode
|
||||
) -> Result<BackupFile, RepositoryError>;
|
||||
fn export_xattrs<W: Write>(&mut self, inode: &Inode, tarfile: &mut tar::Builder<W>,
|
||||
lock: &OnlineMode
|
||||
) -> Result<(), RepositoryError>;
|
||||
fn export_tarfile_recurse<W: Write>(&mut self, backup: &BackupFile, path: &Path,
|
||||
inode: Inode, tarfile: &mut tar::Builder<W>, skip_root: bool, lock: &OnlineMode
|
||||
) -> Result<(), RepositoryError>;
|
||||
fn export_tarfile<P: AsRef<Path>>(&mut self, backup: &BackupFile, inode: Inode,
|
||||
tarfile: P, lock: &OnlineMode
|
||||
) -> Result<(), RepositoryError>;
|
||||
}
|
||||
|
||||
impl RepositoryTarfileIO for Repository {
|
||||
fn import_tar_entry<R: Read>(&mut self, entry: &mut tar::Entry<R>, lock: &BackupMode
|
||||
) -> Result<Inode, RepositoryError> {
|
||||
let mut inode = try!(inode_from_entry(entry));
|
||||
if inode.size < 100 {
|
||||
|
@ -145,24 +162,21 @@ impl BackupRepository {
|
|||
try!(entry.read_to_end(&mut data));
|
||||
inode.data = Some(FileData::Inline(data.into()));
|
||||
} else {
|
||||
let mut chunks = try!(self.repo.put_stream(BundleMode::Data, entry));
|
||||
let mut chunks = try!(self.put_stream(BundleMode::Data, entry, lock));
|
||||
if chunks.len() < 10 {
|
||||
inode.data = Some(FileData::ChunkedDirect(chunks));
|
||||
} else {
|
||||
let mut chunk_data = Vec::with_capacity(chunks.encoded_size());
|
||||
chunks.write_to(&mut chunk_data).unwrap();
|
||||
chunks = try!(self.repo.put_data(BundleMode::Meta, &chunk_data));
|
||||
chunks = try!(self.put_data(BundleMode::Meta, &chunk_data, lock));
|
||||
inode.data = Some(FileData::ChunkedIndirect(chunks));
|
||||
}
|
||||
}
|
||||
Ok(inode)
|
||||
}
|
||||
|
||||
fn import_tarfile_as_inode<R: Read>(
|
||||
&mut self,
|
||||
backup: &mut BackupFile,
|
||||
input: R,
|
||||
failed_paths: &mut Vec<PathBuf>,
|
||||
fn import_tarfile_as_inode<R: Read>(&mut self, backup: &mut BackupFile, input: R,
|
||||
failed_paths: &mut Vec<PathBuf>, lock: &BackupMode
|
||||
) -> Result<(Inode, ChunkList), RepositoryError> {
|
||||
let mut tarfile = tar::Archive::new(input);
|
||||
// Step 1: create inodes for all entries
|
||||
|
@ -170,7 +184,7 @@ impl BackupRepository {
|
|||
for entry in try!(tarfile.entries()) {
|
||||
let mut entry = try!(entry);
|
||||
let path = try!(entry.path()).to_path_buf();
|
||||
match self.import_tar_entry(&mut entry) {
|
||||
match self.import_tar_entry(&mut entry, lock) {
|
||||
Ok(mut inode) => {
|
||||
inode.cum_size = inode.size;
|
||||
if inode.file_type == FileType::Directory {
|
||||
|
@ -219,7 +233,7 @@ impl BackupRepository {
|
|||
}
|
||||
for path in childless {
|
||||
let (inode, _) = inodes.remove(&path).unwrap();
|
||||
let chunks = try!(self.put_inode(&inode));
|
||||
let chunks = try!(self.put_inode(&inode, lock));
|
||||
if let Some(parent_path) = path.parent() {
|
||||
if let Some(&mut (ref mut parent_inode, ref mut children)) =
|
||||
inodes.get_mut(parent_path)
|
||||
|
@ -265,23 +279,15 @@ impl BackupRepository {
|
|||
children.insert(inode.name, chunks);
|
||||
}
|
||||
root_inode.children = Some(children);
|
||||
let chunks = try!(self.put_inode(&root_inode));
|
||||
let chunks = try!(self.put_inode(&root_inode, lock));
|
||||
Ok((root_inode, chunks))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn import_tarfile<P: AsRef<Path>>(
|
||||
&mut self,
|
||||
tarfile: P,
|
||||
fn import_tarfile<P: AsRef<Path>>(&mut self, tarfile: P, lock: &BackupMode
|
||||
) -> Result<BackupFile, RepositoryError> {
|
||||
try!(self.repo.write_mode());
|
||||
let _lock = try!(self.repo.lock(false));
|
||||
if self.repo.is_dirty() {
|
||||
return Err(RepositoryError::Dirty);
|
||||
}
|
||||
try!(self.repo.set_dirty());
|
||||
let mut backup = BackupFile::default();
|
||||
backup.config = self.repo.get_config().clone();
|
||||
backup.config = self.get_config().clone();
|
||||
backup.host = get_hostname().unwrap_or_else(|_| "".to_string());
|
||||
backup.path = tarfile.as_ref().to_string_lossy().to_string();
|
||||
let info_before = self.info();
|
||||
|
@ -292,17 +298,19 @@ impl BackupRepository {
|
|||
try!(self.import_tarfile_as_inode(
|
||||
&mut backup,
|
||||
io::stdin(),
|
||||
&mut failed_paths
|
||||
&mut failed_paths,
|
||||
lock
|
||||
))
|
||||
} else {
|
||||
try!(self.import_tarfile_as_inode(
|
||||
&mut backup,
|
||||
try!(File::open(tarfile)),
|
||||
&mut failed_paths
|
||||
&mut failed_paths,
|
||||
lock
|
||||
))
|
||||
};
|
||||
backup.root = chunks;
|
||||
try!(self.repo.flush());
|
||||
try!(self.flush(lock));
|
||||
let elapsed = Local::now().signed_duration_since(start);
|
||||
backup.timestamp = start.timestamp();
|
||||
backup.total_data_size = root_inode.cum_size;
|
||||
|
@ -315,7 +323,6 @@ impl BackupRepository {
|
|||
backup.bundle_count = info_after.bundle_count - info_before.bundle_count;
|
||||
backup.chunk_count = info_after.chunk_count - info_before.chunk_count;
|
||||
backup.avg_chunk_size = backup.deduplicated_data_size as f32 / backup.chunk_count as f32;
|
||||
self.repo.set_clean();
|
||||
if failed_paths.is_empty() {
|
||||
Ok(backup)
|
||||
} else {
|
||||
|
@ -323,10 +330,8 @@ impl BackupRepository {
|
|||
}
|
||||
}
|
||||
|
||||
fn export_xattrs<W: Write>(
|
||||
&mut self,
|
||||
inode: &Inode,
|
||||
tarfile: &mut tar::Builder<W>,
|
||||
fn export_xattrs<W: Write>(&mut self, inode: &Inode, tarfile: &mut tar::Builder<W>,
|
||||
lock: &OnlineMode
|
||||
) -> Result<(), RepositoryError> {
|
||||
let mut pax = PaxBuilder::new();
|
||||
for (key, value) in &inode.xattrs {
|
||||
|
@ -339,13 +344,8 @@ impl BackupRepository {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
fn export_tarfile_recurse<W: Write>(
|
||||
&mut self,
|
||||
backup: &BackupFile,
|
||||
path: &Path,
|
||||
inode: Inode,
|
||||
tarfile: &mut tar::Builder<W>,
|
||||
skip_root: bool,
|
||||
fn export_tarfile_recurse<W: Write>(&mut self, backup: &BackupFile, path: &Path, inode: Inode,
|
||||
tarfile: &mut tar::Builder<W>, skip_root: bool, lock: &OnlineMode
|
||||
) -> Result<(), RepositoryError> {
|
||||
let path = if skip_root {
|
||||
path.to_path_buf()
|
||||
|
@ -354,7 +354,7 @@ impl BackupRepository {
|
|||
};
|
||||
if inode.file_type != FileType::Directory || !skip_root {
|
||||
if !inode.xattrs.is_empty() {
|
||||
try!(self.export_xattrs(&inode, tarfile));
|
||||
try!(self.export_xattrs(&inode, tarfile, lock));
|
||||
}
|
||||
let mut header = tar::Header::new_gnu();
|
||||
header.set_size(inode.size);
|
||||
|
@ -397,34 +397,32 @@ impl BackupRepository {
|
|||
None => try!(tarfile.append(&header, Cursor::new(&[]))),
|
||||
Some(FileData::Inline(data)) => try!(tarfile.append(&header, Cursor::new(data))),
|
||||
Some(FileData::ChunkedDirect(chunks)) => {
|
||||
try!(tarfile.append(&header, self.repo.get_reader(chunks)))
|
||||
try!(tarfile.append(&header, self.get_reader(chunks, lock)))
|
||||
}
|
||||
Some(FileData::ChunkedIndirect(chunks)) => {
|
||||
let chunks = ChunkList::read_from(&try!(self.get_data(&chunks)));
|
||||
try!(tarfile.append(&header, self.repo.get_reader(chunks)))
|
||||
let chunks = ChunkList::read_from(&try!(self.get_data(&chunks, lock)));
|
||||
try!(tarfile.append(&header, self.get_reader(chunks, lock)))
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(children) = inode.children {
|
||||
for chunks in children.values() {
|
||||
let inode = try!(self.get_inode(chunks));
|
||||
let inode = try!(self.get_inode(chunks, lock));
|
||||
try!(self.export_tarfile_recurse(
|
||||
backup,
|
||||
&path,
|
||||
inode,
|
||||
tarfile,
|
||||
false
|
||||
false,
|
||||
lock
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn export_tarfile<P: AsRef<Path>>(
|
||||
&mut self,
|
||||
backup: &BackupFile,
|
||||
inode: Inode,
|
||||
tarfile: P,
|
||||
fn export_tarfile<P: AsRef<Path>>(&mut self, backup: &BackupFile, inode: Inode, tarfile: P,
|
||||
lock: &OnlineMode
|
||||
) -> Result<(), RepositoryError> {
|
||||
let tarfile = tarfile.as_ref();
|
||||
if tarfile == Path::new("-") {
|
||||
|
@ -434,7 +432,8 @@ impl BackupRepository {
|
|||
Path::new(""),
|
||||
inode,
|
||||
&mut tarfile,
|
||||
true
|
||||
true,
|
||||
lock
|
||||
));
|
||||
try!(tarfile.finish());
|
||||
} else {
|
||||
|
@ -444,7 +443,8 @@ impl BackupRepository {
|
|||
Path::new(""),
|
||||
inode,
|
||||
&mut tarfile,
|
||||
true
|
||||
true,
|
||||
lock
|
||||
));
|
||||
try!(tarfile.finish());
|
||||
}
|
||||
|
|
|
@ -5,15 +5,23 @@ use super::*;
|
|||
use std::collections::{VecDeque, HashSet};
|
||||
|
||||
|
||||
impl BackupRepository {
|
||||
fn mark_used(
|
||||
&self,
|
||||
bundles: &mut HashMap<u32, BundleAnalysis>,
|
||||
chunks: &[Chunk],
|
||||
pub trait RepositoryVacuumIO {
|
||||
fn mark_used(&self, bundles: &mut HashMap<u32, BundleAnalysis>, chunks: &[Chunk],
|
||||
lock: &OnlineMode
|
||||
) -> Result<bool, RepositoryError>;
|
||||
fn analyze_usage(&mut self, lock: &OnlineMode
|
||||
) -> Result<HashMap<u32, BundleAnalysis>, RepositoryError>;
|
||||
fn vacuum(&mut self, ratio: f32, combine: bool, force: bool, lock: &VacuumMode
|
||||
) -> Result<(), RepositoryError>;
|
||||
}
|
||||
|
||||
impl RepositoryVacuumIO for Repository {
|
||||
fn mark_used(&self, bundles: &mut HashMap<u32, BundleAnalysis>, chunks: &[Chunk],
|
||||
lock: &OnlineMode
|
||||
) -> Result<bool, RepositoryError> {
|
||||
let mut new = false;
|
||||
for &(hash, len) in chunks {
|
||||
if let Some(pos) = self.repo.get_chunk_location(hash) {
|
||||
if let Some(pos) = self.get_chunk_location(hash) {
|
||||
let bundle = pos.bundle;
|
||||
if let Some(bundle) = bundles.get_mut(&bundle) {
|
||||
if !bundle.chunk_usage.get(pos.chunk as usize) {
|
||||
|
@ -31,13 +39,10 @@ impl BackupRepository {
|
|||
Ok(new)
|
||||
}
|
||||
|
||||
pub fn analyze_usage(&mut self) -> Result<HashMap<u32, BundleAnalysis>, RepositoryError> {
|
||||
if self.repo.is_dirty() {
|
||||
return Err(RepositoryError::Dirty);
|
||||
}
|
||||
try!(self.repo.set_dirty());
|
||||
fn analyze_usage(&mut self, lock: &OnlineMode
|
||||
) -> Result<HashMap<u32, BundleAnalysis>, RepositoryError> {
|
||||
let mut usage = HashMap::new();
|
||||
for (id, bundle) in try!(self.repo.get_bundle_map()) {
|
||||
for (id, bundle) in try!(self.get_bundle_map()) {
|
||||
usage.insert(
|
||||
id,
|
||||
BundleAnalysis {
|
||||
|
@ -53,22 +58,22 @@ impl BackupRepository {
|
|||
todo.push_back(backup.root);
|
||||
}
|
||||
while let Some(chunks) = todo.pop_back() {
|
||||
if !try!(self.mark_used(&mut usage, &chunks)) {
|
||||
if !try!(self.mark_used(&mut usage, &chunks, lock)) {
|
||||
continue;
|
||||
}
|
||||
let inode = try!(self.get_inode(&chunks));
|
||||
let inode = try!(self.get_inode(&chunks, lock));
|
||||
// Mark the content chunks as used
|
||||
match inode.data {
|
||||
None |
|
||||
Some(FileData::Inline(_)) => (),
|
||||
Some(FileData::ChunkedDirect(chunks)) => {
|
||||
try!(self.mark_used(&mut usage, &chunks));
|
||||
try!(self.mark_used(&mut usage, &chunks, lock));
|
||||
}
|
||||
Some(FileData::ChunkedIndirect(chunks)) => {
|
||||
if try!(self.mark_used(&mut usage, &chunks)) {
|
||||
let chunk_data = try!(self.get_data(&chunks));
|
||||
if try!(self.mark_used(&mut usage, &chunks, lock)) {
|
||||
let chunk_data = try!(self.get_data(&chunks, lock));
|
||||
let chunks = ChunkList::read_from(&chunk_data);
|
||||
try!(self.mark_used(&mut usage, &chunks));
|
||||
try!(self.mark_used(&mut usage, &chunks, lock));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -79,23 +84,14 @@ impl BackupRepository {
|
|||
}
|
||||
}
|
||||
}
|
||||
self.repo.set_clean();
|
||||
Ok(usage)
|
||||
}
|
||||
|
||||
pub fn vacuum(
|
||||
&mut self,
|
||||
ratio: f32,
|
||||
combine: bool,
|
||||
force: bool,
|
||||
fn vacuum(&mut self, ratio: f32, combine: bool, force: bool, lock: &VacuumMode
|
||||
) -> Result<(), RepositoryError> {
|
||||
try!(self.repo.flush());
|
||||
tr_info!("Locking repository");
|
||||
try!(self.repo.write_mode());
|
||||
let _lock = try!(self.repo.lock(true));
|
||||
// analyze_usage will set the dirty flag
|
||||
try!(self.flush(lock.as_backup()));
|
||||
tr_info!("Analyzing chunk usage");
|
||||
let usage = try!(self.analyze_usage());
|
||||
let usage = try!(self.analyze_usage(lock.as_online()));
|
||||
let mut data_total = 0;
|
||||
let mut data_used = 0;
|
||||
for bundle in usage.values() {
|
||||
|
@ -122,7 +118,7 @@ impl BackupRepository {
|
|||
let mut small_meta = vec![];
|
||||
let mut small_data = vec![];
|
||||
for (id, bundle) in &usage {
|
||||
if bundle.info.encoded_size * 4 < self.repo.get_config().bundle_size {
|
||||
if bundle.info.encoded_size * 4 < self.get_config().bundle_size {
|
||||
match bundle.info.mode {
|
||||
BundleMode::Meta => small_meta.push(*id),
|
||||
BundleMode::Data => small_data.push(*id),
|
||||
|
@ -147,12 +143,10 @@ impl BackupRepository {
|
|||
to_file_size(rewrite_data as u64)
|
||||
);
|
||||
if !force {
|
||||
self.repo.set_clean();
|
||||
return Ok(());
|
||||
}
|
||||
let rewrite_bundles: Vec<_> = rewrite_bundles.into_iter().collect();
|
||||
try!(self.repo.rewrite_bundles(&rewrite_bundles, &usage));
|
||||
self.repo.set_clean();
|
||||
try!(self.rewrite_bundles(&rewrite_bundles, &usage, lock));
|
||||
Ok(())
|
||||
}
|
||||
}
|
103
src/cli/mod.rs
103
src/cli/mod.rs
|
@ -144,7 +144,7 @@ fn get_inode(repo: &mut BackupRepository, backup: &BackupFile, inode: Option<&St
|
|||
)
|
||||
} else {
|
||||
checked!(
|
||||
repo.get_inode(&backup.root),
|
||||
repo.get_root_inode(&backup),
|
||||
"load root inode",
|
||||
ErrorCode::LoadInode
|
||||
)
|
||||
|
@ -655,7 +655,7 @@ pub fn run() -> Result<(), ErrorCode> {
|
|||
let result = if tar {
|
||||
repo.import_tarfile(&src_path)
|
||||
} else {
|
||||
repo.create_backup_recursively(&src_path, reference_backup.as_ref(), &options)
|
||||
repo.create_backup(&src_path, &backup_name, reference_backup.as_ref(), &options)
|
||||
};
|
||||
let backup = match result {
|
||||
Ok(backup) => {
|
||||
|
@ -671,11 +671,6 @@ pub fn run() -> Result<(), ErrorCode> {
|
|||
return Err(ErrorCode::BackupRun);
|
||||
}
|
||||
};
|
||||
checked!(
|
||||
repo.save_backup(&backup, &backup_name),
|
||||
"save backup file",
|
||||
ErrorCode::SaveBackup
|
||||
);
|
||||
print_backup(&backup);
|
||||
}
|
||||
Arguments::Restore {
|
||||
|
@ -719,11 +714,7 @@ pub fn run() -> Result<(), ErrorCode> {
|
|||
return Err(ErrorCode::BackupAlreadyExists);
|
||||
}
|
||||
let backup = try!(get_backup(&repo, &backup_name_src));
|
||||
checked!(
|
||||
repo.save_backup(&backup, &backup_name_dst),
|
||||
"save backup file",
|
||||
ErrorCode::SaveBackup
|
||||
);
|
||||
//TODO: implement
|
||||
}
|
||||
Arguments::Remove {
|
||||
repo_path,
|
||||
|
@ -739,11 +730,6 @@ pub fn run() -> Result<(), ErrorCode> {
|
|||
"remove backup subpath",
|
||||
ErrorCode::RemoveRun
|
||||
);
|
||||
checked!(
|
||||
repo.save_backup(&backup, &backup_name),
|
||||
"save backup file",
|
||||
ErrorCode::SaveBackup
|
||||
);
|
||||
tr_info!("The backup subpath has been deleted, run vacuum to reclaim space");
|
||||
} else if repo.get_layout().backups_path().join(&backup_name).is_dir() {
|
||||
let backups = checked!(
|
||||
|
@ -830,44 +816,21 @@ pub fn run() -> Result<(), ErrorCode> {
|
|||
repair
|
||||
} => {
|
||||
let mut repo = try!(open_repository(&repo_path, true));
|
||||
let mut options = CheckOptions::new();
|
||||
options.index(index).bundle_data(bundle_data).bundles(bundles).repair(repair);
|
||||
if let Some(backup_name) = backup_name {
|
||||
options.single_backup(&backup_name);
|
||||
if let Some(inode) = inode {
|
||||
options.subpath(Path::new(&inode));
|
||||
}
|
||||
} else {
|
||||
options.all_backups();
|
||||
}
|
||||
checked!(
|
||||
repo.check_repository(repair),
|
||||
repo.check(options),
|
||||
"check repository",
|
||||
ErrorCode::CheckRun
|
||||
);
|
||||
if bundles {
|
||||
checked!(
|
||||
repo.check_bundles(bundle_data, repair),
|
||||
"check bundles",
|
||||
ErrorCode::CheckRun
|
||||
);
|
||||
}
|
||||
if index {
|
||||
checked!(repo.check_index(repair), "check index", ErrorCode::CheckRun);
|
||||
}
|
||||
if let Some(backup_name) = backup_name {
|
||||
let mut backup = try!(get_backup(&repo, &backup_name));
|
||||
if let Some(path) = inode {
|
||||
checked!(
|
||||
repo.check_backup_inode(&backup_name, &mut backup, Path::new(&path), repair),
|
||||
"check inode",
|
||||
ErrorCode::CheckRun
|
||||
)
|
||||
} else {
|
||||
checked!(
|
||||
repo.check_backup(&backup_name, &mut backup, repair),
|
||||
"check backup",
|
||||
ErrorCode::CheckRun
|
||||
)
|
||||
}
|
||||
} else {
|
||||
checked!(
|
||||
repo.check_backups(repair),
|
||||
"check repository",
|
||||
ErrorCode::CheckRun
|
||||
)
|
||||
}
|
||||
repo.set_clean();
|
||||
tr_info!("Integrity verified")
|
||||
}
|
||||
Arguments::List {
|
||||
|
@ -890,15 +853,8 @@ pub fn run() -> Result<(), ErrorCode> {
|
|||
ErrorCode::LoadInode
|
||||
);
|
||||
println!("{}", format_inode_one_line(&inode));
|
||||
if let Some(children) = inode.children {
|
||||
for chunks in children.values() {
|
||||
let inode = checked!(
|
||||
repo.get_inode(chunks),
|
||||
"load child inode",
|
||||
ErrorCode::LoadInode
|
||||
);
|
||||
println!("- {}", format_inode_one_line(&inode));
|
||||
}
|
||||
for ch in checked!(repo.get_inode_children(&inode), "load inodes", ErrorCode::LoadInode) {
|
||||
println!("- {}", format_inode_one_line(&ch));
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
@ -969,10 +925,15 @@ pub fn run() -> Result<(), ErrorCode> {
|
|||
mount_point
|
||||
} => {
|
||||
let mut repo = try!(open_repository(&repo_path, true));
|
||||
let fs = if let Some(backup_name) = backup_name {
|
||||
tr_info!("Mounting the filesystem...");
|
||||
tr_info!(
|
||||
"Please unmount the filesystem via 'fusermount -u {}' when done.",
|
||||
mount_point
|
||||
);
|
||||
if let Some(backup_name) = backup_name {
|
||||
if repo.get_layout().backups_path().join(&backup_name).is_dir() {
|
||||
checked!(
|
||||
FuseFilesystem::from_repository(&mut repo, Some(&backup_name)),
|
||||
repo.mount_repository(Some(&backup_name), mount_point),
|
||||
"create fuse filesystem",
|
||||
ErrorCode::FuseMount
|
||||
)
|
||||
|
@ -985,13 +946,13 @@ pub fn run() -> Result<(), ErrorCode> {
|
|||
ErrorCode::LoadInode
|
||||
);
|
||||
checked!(
|
||||
FuseFilesystem::from_inode(&mut repo, backup, inode),
|
||||
repo.mount_inode(backup, inode, mount_point),
|
||||
"create fuse filesystem",
|
||||
ErrorCode::FuseMount
|
||||
)
|
||||
} else {
|
||||
checked!(
|
||||
FuseFilesystem::from_backup(&mut repo, backup),
|
||||
repo.mount_backup(backup, mount_point),
|
||||
"create fuse filesystem",
|
||||
ErrorCode::FuseMount
|
||||
)
|
||||
|
@ -999,21 +960,11 @@ pub fn run() -> Result<(), ErrorCode> {
|
|||
}
|
||||
} else {
|
||||
checked!(
|
||||
FuseFilesystem::from_repository(&mut repo, None),
|
||||
repo.mount_repository(None, mount_point),
|
||||
"create fuse filesystem",
|
||||
ErrorCode::FuseMount
|
||||
)
|
||||
};
|
||||
tr_info!("Mounting the filesystem...");
|
||||
tr_info!(
|
||||
"Please unmount the filesystem via 'fusermount -u {}' when done.",
|
||||
mount_point
|
||||
);
|
||||
checked!(
|
||||
fs.mount(&mount_point),
|
||||
"mount filesystem",
|
||||
ErrorCode::FuseMount
|
||||
);
|
||||
}
|
||||
}
|
||||
Arguments::Analyze { repo_path } => {
|
||||
let mut repo = try!(open_repository(&repo_path, true));
|
||||
|
|
|
@ -3,13 +3,14 @@ pub use repository::bundledb::{BundleReader, BundleMode, BundleWriter, BundleInf
|
|||
BundleDb, BundleWriterError, StoredBundle, BundleStatistics};
|
||||
pub use repository::chunking::{ChunkerType, Chunker, ChunkerStatus, ChunkerError};
|
||||
pub use repository::{Repository, Config, RepositoryError, RepositoryInfo,
|
||||
IntegrityError, BundleAnalysis, RepositoryLayout, Location,
|
||||
IntegrityError, BundleAnalysis, Location,
|
||||
RepositoryStatistics, ChunkRepositoryLayout};
|
||||
pub use repository::*;
|
||||
pub use repository::index::{Index, IndexError, IndexStatistics};
|
||||
pub use backups::mount::FuseFilesystem;
|
||||
pub use backups::{BackupFile, BackupFileError, Inode, FileType, FileData, InodeError, BackupError,
|
||||
BackupOptions, DiffType, InodeIntegrityError};
|
||||
BackupOptions, DiffType, InodeIntegrityError, BackupRepositoryLayout,
|
||||
RepositoryBackupIO, RepositoryMetadataIO, CheckOptions};
|
||||
pub use translation::CowStr;
|
||||
pub use backups::BackupRepository;
|
||||
|
||||
|
|
|
@ -63,14 +63,18 @@ pub struct ChunkMarker<'a> {
|
|||
repo: &'a Repository
|
||||
}
|
||||
|
||||
impl<'a> ChunkMarker<'a> {
|
||||
pub fn mark_chunks(&mut self, chunks: &[Chunk], set_marked: bool) -> Result<bool, RepositoryError> {
|
||||
impl Repository {
|
||||
pub fn get_chunk_marker(&self) -> Bitmap {
|
||||
Bitmap::new(self.index.capacity())
|
||||
}
|
||||
|
||||
pub fn mark_chunks(&mut self, bitmap: &mut Bitmap, chunks: &[Chunk], set_marked: bool) -> Result<bool, RepositoryError> {
|
||||
let mut new = false;
|
||||
for &(hash, _len) in chunks {
|
||||
if let Some(pos) = self.repo.index.pos(&hash) {
|
||||
new |= !self.marked.get(pos);
|
||||
if let Some(pos) = self.index.pos(&hash) {
|
||||
new |= !bitmap.get(pos);
|
||||
if set_marked {
|
||||
self.marked.set(pos);
|
||||
bitmap.set(pos);
|
||||
}
|
||||
} else {
|
||||
return Err(IntegrityError::MissingChunk(hash).into());
|
||||
|
@ -78,16 +82,6 @@ impl<'a> ChunkMarker<'a> {
|
|||
}
|
||||
Ok(new)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl Repository {
|
||||
pub fn get_chunk_marker(&self) -> ChunkMarker {
|
||||
ChunkMarker {
|
||||
marked: Bitmap::new( self.index.capacity()),
|
||||
repo: self
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_bundle_map(&mut self) -> ModuleIntegrityReport {
|
||||
tr_info!("Checking bundle map...");
|
||||
|
@ -212,7 +206,7 @@ impl Repository {
|
|||
let mut errors = vec![];
|
||||
let mut bundles = vec![];
|
||||
for (id, err) in self.bundles.check(full, lock) {
|
||||
bundles.push(id);
|
||||
bundles.push(id.clone());
|
||||
errors.push(IntegrityError::BundleIntegrity(id, err));
|
||||
}
|
||||
(ModuleIntegrityReport { errors_fixed: vec![], errors_unfixed: errors }, bundles)
|
||||
|
|
|
@ -27,73 +27,27 @@ pub trait ChunkRepositoryLayout {
|
|||
}
|
||||
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RepositoryLayout(PathBuf);
|
||||
|
||||
impl RepositoryLayout {
|
||||
pub fn new<P: AsRef<Path>>(path: P) -> Self {
|
||||
RepositoryLayout(path.as_ref().to_path_buf())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn config_path(&self) -> PathBuf {
|
||||
self.0.join("config.yaml")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn keys_path(&self) -> PathBuf {
|
||||
self.0.join("keys")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn excludes_path(&self) -> PathBuf {
|
||||
self.0.join("excludes")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn backups_path(&self) -> PathBuf {
|
||||
self.0.join("remote/backups")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn backup_path(&self, name: &str) -> PathBuf {
|
||||
self.backups_path().join(format!("{}.backup", name))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn remote_exists(&self) -> bool {
|
||||
self.remote_bundles_path().exists() && self.backups_path().exists() &&
|
||||
self.remote_locks_path().exists()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn remote_readme_path(&self) -> PathBuf {
|
||||
self.0.join("remote/README.md")
|
||||
}
|
||||
|
||||
fn bundle_path(&self, bundle: &BundleId, mut folder: PathBuf, mut count: usize) -> PathBuf {
|
||||
let file = bundle.to_string().to_owned() + ".bundle";
|
||||
{
|
||||
let mut rest = &file as &str;
|
||||
while count >= 100 {
|
||||
if rest.len() < 10 {
|
||||
break;
|
||||
}
|
||||
folder = folder.join(&rest[0..2]);
|
||||
rest = &rest[2..];
|
||||
count /= 250;
|
||||
fn bundle_path(bundle: &BundleId, mut folder: PathBuf, mut count: usize) -> PathBuf {
|
||||
let file = bundle.to_string().to_owned() + ".bundle";
|
||||
{
|
||||
let mut rest = &file as &str;
|
||||
while count >= 100 {
|
||||
if rest.len() < 10 {
|
||||
break;
|
||||
}
|
||||
folder = folder.join(&rest[0..2]);
|
||||
rest = &rest[2..];
|
||||
count /= 250;
|
||||
}
|
||||
folder.join(Path::new(&file))
|
||||
}
|
||||
|
||||
folder.join(Path::new(&file))
|
||||
}
|
||||
|
||||
|
||||
impl ChunkRepositoryLayout for RepositoryLayout {
|
||||
impl ChunkRepositoryLayout for PathBuf {
|
||||
#[inline]
|
||||
fn base_path(&self) -> &Path {
|
||||
&self.0
|
||||
&self
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -103,52 +57,52 @@ impl ChunkRepositoryLayout for RepositoryLayout {
|
|||
|
||||
#[inline]
|
||||
fn index_path(&self) -> PathBuf {
|
||||
self.0.join("index")
|
||||
self.join("index")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn bundle_map_path(&self) -> PathBuf {
|
||||
self.0.join("bundles.map")
|
||||
self.join("bundles.map")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn local_locks_path(&self) -> PathBuf {
|
||||
self.0.join("locks")
|
||||
self.join("locks")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn remote_path(&self) -> PathBuf {
|
||||
self.0.join("remote")
|
||||
self.join("remote")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn remote_locks_path(&self) -> PathBuf {
|
||||
self.0.join("remote/locks")
|
||||
self.join("remote/locks")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn remote_bundles_path(&self) -> PathBuf {
|
||||
self.0.join("remote/bundles")
|
||||
self.join("remote/bundles")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn local_bundles_path(&self) -> PathBuf {
|
||||
self.0.join("bundles/cached")
|
||||
self.join("bundles/cached")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn remote_bundle_path(&self, _bundle: &BundleId, count: usize) -> PathBuf {
|
||||
self.bundle_path(&BundleId::random(), self.remote_bundles_path(), count)
|
||||
bundle_path(&BundleId::random(), self.remote_bundles_path(), count)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn local_bundle_path(&self, bundle: &BundleId, count: usize) -> PathBuf {
|
||||
self.bundle_path(bundle, self.local_bundles_path(), count)
|
||||
bundle_path(bundle, self.local_bundles_path(), count)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn temp_bundles_path(&self) -> PathBuf {
|
||||
self.0.join("bundles/temp")
|
||||
self.join("bundles/temp")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -158,29 +112,29 @@ impl ChunkRepositoryLayout for RepositoryLayout {
|
|||
|
||||
#[inline]
|
||||
fn local_bundle_cache_path(&self) -> PathBuf {
|
||||
self.0.join("bundles/local.cache")
|
||||
self.join("bundles/local.cache")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn remote_bundle_cache_path(&self) -> PathBuf {
|
||||
self.0.join("bundles/remote.cache")
|
||||
self.join("bundles/remote.cache")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn dirtyfile_path(&self) -> PathBuf {
|
||||
self.0.join("dirty")
|
||||
self.join("dirty")
|
||||
}
|
||||
|
||||
|
||||
|
||||
#[inline]
|
||||
fn config_path(&self) -> PathBuf {
|
||||
self.0.join("config.yaml")
|
||||
self.join("config.yaml")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn remote_readme_path(&self) -> PathBuf {
|
||||
self.0.join("remote/README.md")
|
||||
self.join("remote/README.md")
|
||||
}
|
||||
|
||||
}
|
|
@ -22,9 +22,9 @@ use std::io::Write;
|
|||
|
||||
pub use self::error::RepositoryError;
|
||||
pub use self::config::Config;
|
||||
pub use self::layout::{RepositoryLayout, ChunkRepositoryLayout};
|
||||
pub use self::layout::ChunkRepositoryLayout;
|
||||
use self::bundle_map::BundleMap;
|
||||
pub use self::integrity::IntegrityError;
|
||||
pub use self::integrity::{IntegrityError, ModuleIntegrityReport, IntegrityReport};
|
||||
pub use self::info::{BundleAnalysis, RepositoryInfo, RepositoryStatistics};
|
||||
|
||||
const REPOSITORY_README: &[u8] = include_bytes!("../../docs/repository_readme.md");
|
||||
|
@ -319,6 +319,14 @@ impl Repository {
|
|||
pub fn set_config(&mut self, config: Config) {
|
||||
self.config = config;
|
||||
}
|
||||
|
||||
pub fn get_crypto(&self) -> Arc<Crypto> {
|
||||
self.crypto.clone()
|
||||
}
|
||||
|
||||
pub fn get_layout(&self) -> Arc<ChunkRepositoryLayout> {
|
||||
self.layout.clone()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue