mirror of https://github.com/dswd/zvault
Code cleanup
This commit is contained in:
parent
05290f3198
commit
0d94a75613
|
@ -76,6 +76,26 @@ fn print_backup(backup: &Backup) {
|
||||||
println!("Chunk count: {}, avg size: {}", backup.chunk_count, to_file_size(backup.avg_chunk_size as u64));
|
println!("Chunk count: {}, avg size: {}", backup.chunk_count, to_file_size(backup.avg_chunk_size as u64));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn print_inode(inode: &Inode) {
|
||||||
|
println!("Name: {}", inode.name);
|
||||||
|
println!("Type: {}", inode.file_type);
|
||||||
|
println!("Size: {}", to_file_size(inode.size));
|
||||||
|
println!("Permissions: {:3o}", inode.mode);
|
||||||
|
println!("User: {}", inode.user);
|
||||||
|
println!("Group: {}", inode.group);
|
||||||
|
println!("Access time: {}", Local.timestamp(inode.access_time, 0).to_rfc2822());
|
||||||
|
println!("Modification time: {}", Local.timestamp(inode.modify_time, 0).to_rfc2822());
|
||||||
|
if let Some(ref target) = inode.symlink_target {
|
||||||
|
println!("Symlink target: {}", target);
|
||||||
|
}
|
||||||
|
if let Some(ref children) = inode.children {
|
||||||
|
println!("Children:");
|
||||||
|
for name in children.keys() {
|
||||||
|
println!(" - {}", name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn print_backups(backup_map: &HashMap<String, Backup>) {
|
fn print_backups(backup_map: &HashMap<String, Backup>) {
|
||||||
for (name, backup) in backup_map {
|
for (name, backup) in backup_map {
|
||||||
println!("{:25} {:>32} {:5} files, {:4} dirs, {:>10}",
|
println!("{:25} {:>32} {:5} files, {:4} dirs, {:>10}",
|
||||||
|
@ -168,7 +188,7 @@ pub fn run() {
|
||||||
info!("No reference backup found, doing a full scan instead");
|
info!("No reference backup found, doing a full scan instead");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let backup = match repo.create_backup(&src_path, reference_backup.as_ref()) {
|
let backup = match repo.create_backup_recursively(&src_path, reference_backup.as_ref()) {
|
||||||
Ok(backup) => backup,
|
Ok(backup) => backup,
|
||||||
Err(RepositoryError::Backup(BackupError::FailedPaths(backup, _failed_paths))) => {
|
Err(RepositoryError::Backup(BackupError::FailedPaths(backup, _failed_paths))) => {
|
||||||
warn!("Some files are missing form the backup");
|
warn!("Some files are missing form the backup");
|
||||||
|
@ -265,11 +285,12 @@ pub fn run() {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Arguments::Info{repo_path, backup_name, inode} => {
|
Arguments::Info{repo_path, backup_name, inode} => {
|
||||||
let repo = open_repository(&repo_path);
|
let mut repo = open_repository(&repo_path);
|
||||||
if let Some(backup_name) = backup_name {
|
if let Some(backup_name) = backup_name {
|
||||||
let backup = get_backup(&repo, &backup_name);
|
let backup = get_backup(&repo, &backup_name);
|
||||||
if let Some(_inode) = inode {
|
if let Some(inode) = inode {
|
||||||
error!("Displaying information on single inodes is not implemented yet");
|
let inode = checked(repo.get_backup_inode(&backup, inode), "load subpath inode");
|
||||||
|
print_inode(&inode);
|
||||||
} else {
|
} else {
|
||||||
print_backup(&backup);
|
print_backup(&backup);
|
||||||
}
|
}
|
||||||
|
|
10
src/index.rs
10
src/index.rs
|
@ -460,23 +460,25 @@ impl Index {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn walk<F>(&self, mut f: F) where F: FnMut(&Hash, &Location) {
|
pub fn walk<F, E>(&self, mut f: F) -> Result<(), E> where F: FnMut(&Hash, &Location) -> Result<(), E> {
|
||||||
for pos in 0..self.capacity {
|
for pos in 0..self.capacity {
|
||||||
let entry = &self.data[pos];
|
let entry = &self.data[pos];
|
||||||
if entry.is_used() {
|
if entry.is_used() {
|
||||||
f(&entry.key, &entry.data);
|
try!(f(&entry.key, &entry.data));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn walk_mut<F>(&mut self, mut f: F) where F: FnMut(&Hash, &mut Location) {
|
pub fn walk_mut<F, E>(&mut self, mut f: F) -> Result<(), E> where F: FnMut(&Hash, &mut Location) -> Result<(), E> {
|
||||||
for pos in 0..self.capacity {
|
for pos in 0..self.capacity {
|
||||||
let entry = &mut self.data[pos];
|
let entry = &mut self.data[pos];
|
||||||
if entry.is_used() {
|
if entry.is_used() {
|
||||||
f(&entry.key, &mut entry.data);
|
try!(f(&entry.key, &mut entry.data));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
|
|
|
@ -19,7 +19,7 @@ extern crate filetime;
|
||||||
extern crate libc;
|
extern crate libc;
|
||||||
|
|
||||||
pub mod util;
|
pub mod util;
|
||||||
pub mod bundledb;
|
mod bundledb;
|
||||||
pub mod index;
|
pub mod index;
|
||||||
mod chunker;
|
mod chunker;
|
||||||
mod repository;
|
mod repository;
|
||||||
|
|
|
@ -307,34 +307,19 @@ impl Repository {
|
||||||
self.restore_inode_tree(inode, path)
|
self.restore_inode_tree(inode, path)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(dead_code)]
|
|
||||||
pub fn create_backup<P: AsRef<Path>>(&mut self, path: P, reference: Option<&Backup>) -> Result<Backup, RepositoryError> {
|
pub fn create_backup_recurse<P: AsRef<Path>>(
|
||||||
let reference_inode = reference.and_then(|b| self.get_inode(&b.root).ok());
|
&mut self,
|
||||||
let mut scan_stack = vec![(path.as_ref().to_owned(), reference_inode)];
|
path: P,
|
||||||
let mut save_stack = vec![];
|
reference: Option<&Inode>,
|
||||||
let mut directories = HashMap::new();
|
backup: &mut Backup,
|
||||||
let mut backup = Backup::default();
|
failed_paths: &mut Vec<PathBuf>
|
||||||
backup.config = self.config.clone();
|
) -> Result<ChunkList, RepositoryError> {
|
||||||
backup.host = get_hostname().unwrap_or_else(|_| "".to_string());
|
let path = path.as_ref();
|
||||||
backup.path = path.as_ref().to_string_lossy().to_string();
|
let mut inode = try!(self.create_inode(path, reference));
|
||||||
let info_before = self.info();
|
|
||||||
let start = Local::now();
|
|
||||||
let mut failed_paths = vec![];
|
|
||||||
while let Some((path, reference_inode)) = scan_stack.pop() {
|
|
||||||
// Create an inode for this path containing all attributes and contents
|
|
||||||
// (for files) but no children (for directories)
|
|
||||||
let mut inode = match self.create_inode(&path, reference_inode.as_ref()) {
|
|
||||||
Ok(inode) => inode,
|
|
||||||
Err(RepositoryError::Inode(err)) => {
|
|
||||||
warn!("Failed to backup inode {}", err);
|
|
||||||
failed_paths.push(path);
|
|
||||||
continue
|
|
||||||
},
|
|
||||||
Err(err) => return Err(err)
|
|
||||||
};
|
|
||||||
let meta_size = 1000; // add 1000 for encoded metadata
|
let meta_size = 1000; // add 1000 for encoded metadata
|
||||||
backup.total_data_size += inode.size + meta_size;
|
backup.total_data_size += inode.size + meta_size;
|
||||||
if let Some(ref ref_inode) = reference_inode {
|
if let Some(ref_inode) = reference {
|
||||||
if !ref_inode.is_unchanged(&inode) {
|
if !ref_inode.is_unchanged(&inode) {
|
||||||
backup.changed_data_size += inode.size + meta_size;
|
backup.changed_data_size += inode.size + meta_size;
|
||||||
}
|
}
|
||||||
|
@ -343,72 +328,42 @@ impl Repository {
|
||||||
}
|
}
|
||||||
if inode.file_type == FileType::Directory {
|
if inode.file_type == FileType::Directory {
|
||||||
backup.dir_count +=1;
|
backup.dir_count +=1;
|
||||||
// For directories we need to put all children on the stack too, so there will be inodes created for them
|
let mut children = BTreeMap::new();
|
||||||
// Also we put directories on the save stack to save them in order
|
for ch in try!(fs::read_dir(path)) {
|
||||||
save_stack.push(path.clone());
|
let child = try!(ch);
|
||||||
inode.children = Some(BTreeMap::new());
|
|
||||||
directories.insert(path.clone(), inode);
|
|
||||||
let dirlist = match fs::read_dir(&path) {
|
|
||||||
Ok(dirlist) => dirlist,
|
|
||||||
Err(err) => {
|
|
||||||
warn!("Failed to read {:?}: {}", &path, err);
|
|
||||||
failed_paths.push(path);
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
};
|
|
||||||
for ch in dirlist {
|
|
||||||
let child = match ch {
|
|
||||||
Ok(child) => child,
|
|
||||||
Err(err) => {
|
|
||||||
warn!("Failed to read {:?}: {}", &path, err);
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let name = child.file_name().to_string_lossy().to_string();
|
let name = child.file_name().to_string_lossy().to_string();
|
||||||
let ref_child = reference_inode.as_ref()
|
let ref_child = reference.as_ref()
|
||||||
.and_then(|inode| inode.children.as_ref())
|
.and_then(|inode| inode.children.as_ref())
|
||||||
.and_then(|map| map.get(&name))
|
.and_then(|map| map.get(&name))
|
||||||
.and_then(|chunks| self.get_inode(chunks).ok());
|
.and_then(|chunks| self.get_inode(chunks).ok());
|
||||||
scan_stack.push((child.path(), ref_child));
|
let child_path = child.path();
|
||||||
|
let chunks = match self.create_backup_recurse(&child_path, ref_child.as_ref(), backup, failed_paths) {
|
||||||
|
Ok(chunks) => chunks,
|
||||||
|
Err(_) => {
|
||||||
|
failed_paths.push(child_path);
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
children.insert(name, chunks);
|
||||||
|
}
|
||||||
|
inode.children = Some(children);
|
||||||
} else {
|
} else {
|
||||||
backup.file_count +=1;
|
backup.file_count +=1;
|
||||||
// Non-directories are stored directly and the chunks are put into the children map of their parents
|
|
||||||
if let Some(parent) = path.parent() {
|
|
||||||
let parent = parent.to_owned();
|
|
||||||
if !directories.contains_key(&parent) {
|
|
||||||
// This is a backup of one one file, put it in the directories map so it will be saved later
|
|
||||||
assert!(scan_stack.is_empty() && save_stack.is_empty() && directories.is_empty());
|
|
||||||
save_stack.push(path.clone());
|
|
||||||
directories.insert(path.clone(), inode);
|
|
||||||
} else {
|
|
||||||
let mut parent = directories.get_mut(&parent).unwrap();
|
|
||||||
let chunks = try!(self.put_inode(&inode));
|
|
||||||
let children = parent.children.as_mut().unwrap();
|
|
||||||
children.insert(inode.name.clone(), chunks);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
loop {
|
|
||||||
let path = save_stack.pop().unwrap();
|
|
||||||
// Now that all children have been saved the directories can be saved in order, adding their chunks to their parents as well
|
|
||||||
let inode = directories.remove(&path).unwrap();
|
|
||||||
let chunks = try!(self.put_inode(&inode));
|
|
||||||
if let Some(parent) = path.parent() {
|
|
||||||
let parent = parent.to_owned();
|
|
||||||
if let Some(ref mut parent) = directories.get_mut(&parent) {
|
|
||||||
let children = parent.children.as_mut().unwrap();
|
|
||||||
children.insert(inode.name.clone(), chunks);
|
|
||||||
} else if save_stack.is_empty() {
|
|
||||||
backup.root = chunks;
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else if save_stack.is_empty() {
|
|
||||||
backup.root = chunks;
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
|
self.put_inode(&inode)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn create_backup_recursively<P: AsRef<Path>>(&mut self, path: P, reference: Option<&Backup>) -> Result<Backup, RepositoryError> {
|
||||||
|
let reference_inode = reference.and_then(|b| self.get_inode(&b.root).ok());
|
||||||
|
let mut backup = Backup::default();
|
||||||
|
backup.config = self.config.clone();
|
||||||
|
backup.host = get_hostname().unwrap_or_else(|_| "".to_string());
|
||||||
|
backup.path = path.as_ref().to_string_lossy().to_string();
|
||||||
|
let info_before = self.info();
|
||||||
|
let start = Local::now();
|
||||||
|
let mut failed_paths = vec![];
|
||||||
|
backup.root = try!(self.create_backup_recurse(path, reference_inode.as_ref(), &mut backup, &mut failed_paths));
|
||||||
try!(self.flush());
|
try!(self.flush());
|
||||||
let elapsed = Local::now().signed_duration_since(start);
|
let elapsed = Local::now().signed_duration_since(start);
|
||||||
backup.date = start.timestamp();
|
backup.date = start.timestamp();
|
||||||
|
|
|
@ -32,16 +32,9 @@ quick_error!{
|
||||||
|
|
||||||
impl Repository {
|
impl Repository {
|
||||||
fn check_index_chunks(&self) -> Result<(), RepositoryError> {
|
fn check_index_chunks(&self) -> Result<(), RepositoryError> {
|
||||||
let mut pos = 0;
|
self.index.walk(|_hash, location| {
|
||||||
loop {
|
|
||||||
pos = if let Some(pos) = self.index.next_entry(pos) {
|
|
||||||
pos
|
|
||||||
} else {
|
|
||||||
break
|
|
||||||
};
|
|
||||||
let entry = self.index.get_entry(pos).unwrap();
|
|
||||||
// Lookup bundle id from map
|
// Lookup bundle id from map
|
||||||
let bundle_id = try!(self.get_bundle_id(entry.data.bundle));
|
let bundle_id = try!(self.get_bundle_id(location.bundle));
|
||||||
// Get bundle object from bundledb
|
// Get bundle object from bundledb
|
||||||
let bundle = if let Some(bundle) = self.bundles.get_bundle_info(&bundle_id) {
|
let bundle = if let Some(bundle) = self.bundles.get_bundle_info(&bundle_id) {
|
||||||
bundle
|
bundle
|
||||||
|
@ -49,12 +42,11 @@ impl Repository {
|
||||||
return Err(RepositoryIntegrityError::MissingBundle(bundle_id.clone()).into())
|
return Err(RepositoryIntegrityError::MissingBundle(bundle_id.clone()).into())
|
||||||
};
|
};
|
||||||
// Get chunk from bundle
|
// Get chunk from bundle
|
||||||
if bundle.chunk_count <= entry.data.chunk as usize {
|
if bundle.chunk_count <= location.chunk as usize {
|
||||||
return Err(RepositoryIntegrityError::NoSuchChunk(bundle_id.clone(), entry.data.chunk).into())
|
return Err(RepositoryIntegrityError::NoSuchChunk(bundle_id.clone(), location.chunk).into())
|
||||||
}
|
|
||||||
pos += 1;
|
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_repository(&self) -> Result<(), RepositoryError> {
|
fn check_repository(&self) -> Result<(), RepositoryError> {
|
||||||
|
|
|
@ -8,6 +8,7 @@ use std::fs::{self, File, Permissions};
|
||||||
use std::os::linux::fs::MetadataExt;
|
use std::os::linux::fs::MetadataExt;
|
||||||
use std::os::unix::fs::{PermissionsExt, symlink};
|
use std::os::unix::fs::{PermissionsExt, symlink};
|
||||||
use std::io::{self, Read, Write};
|
use std::io::{self, Read, Write};
|
||||||
|
use std::fmt;
|
||||||
|
|
||||||
|
|
||||||
quick_error!{
|
quick_error!{
|
||||||
|
@ -78,6 +79,15 @@ serde_impl!(FileType(u8) {
|
||||||
Directory => 1,
|
Directory => 1,
|
||||||
Symlink => 2
|
Symlink => 2
|
||||||
});
|
});
|
||||||
|
impl fmt::Display for FileType {
|
||||||
|
fn fmt(&self, format: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||||
|
match *self {
|
||||||
|
FileType::File => write!(format, "file"),
|
||||||
|
FileType::Directory => write!(format, "directory"),
|
||||||
|
FileType::Symlink => write!(format, "symlink")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
|
|
Loading…
Reference in New Issue