zvault/src/repository/backup.rs

275 lines
11 KiB
Rust
Raw Normal View History

2017-03-21 10:28:11 +00:00
use ::prelude::*;
2017-03-15 20:53:05 +00:00
2017-03-24 08:26:55 +00:00
use std::fs;
2017-03-21 09:52:48 +00:00
use std::path::{self, Path, PathBuf};
2017-03-22 20:42:43 +00:00
use std::collections::{HashMap, BTreeMap, VecDeque};
2017-03-24 08:26:55 +00:00
use std::os::linux::fs::MetadataExt;
2017-03-16 11:33:10 +00:00
use chrono::prelude::*;
2017-03-15 20:53:05 +00:00
2017-03-22 08:19:16 +00:00
quick_error!{
#[derive(Debug)]
2017-03-23 07:24:27 +00:00
#[allow(unknown_lints,large_enum_variant)]
2017-03-22 08:19:16 +00:00
pub enum BackupError {
2017-03-22 10:10:13 +00:00
FailedPaths(backup: Backup, failed: Vec<PathBuf>) {
description("Some paths could not be backed up")
display("Backup error: some paths could not be backed up")
}
2017-03-23 07:24:27 +00:00
RemoveRoot {
description("The root of a backup can not be removed")
display("Backup error: the root of a backup can not be removed")
}
2017-03-22 08:19:16 +00:00
}
}
2017-03-24 08:26:55 +00:00
pub struct BackupOptions {
pub same_device: bool
}
2017-03-22 08:19:16 +00:00
impl Repository {
pub fn get_backups(&self) -> Result<HashMap<String, Backup>, RepositoryError> {
2017-03-24 08:05:41 +00:00
Ok(try!(Backup::get_all_from(&self.crypto.lock().unwrap(), &self.backups_path)))
2017-03-15 20:53:05 +00:00
}
2017-03-16 08:42:30 +00:00
pub fn get_backup(&self, name: &str) -> Result<Backup, RepositoryError> {
2017-03-24 08:05:41 +00:00
Ok(try!(Backup::read_from(&self.crypto.lock().unwrap(), self.backups_path.join(name))))
2017-03-15 20:53:05 +00:00
}
2017-03-16 08:42:30 +00:00
pub fn save_backup(&mut self, backup: &Backup, name: &str) -> Result<(), RepositoryError> {
2017-03-24 08:05:41 +00:00
let path = &self.backups_path.join(name);
2017-03-18 15:54:43 +00:00
try!(fs::create_dir_all(path.parent().unwrap()));
2017-03-21 09:52:48 +00:00
Ok(try!(backup.save_to(&self.crypto.lock().unwrap(), self.config.encryption.clone(), path)))
2017-03-15 20:53:05 +00:00
}
2017-03-18 15:54:43 +00:00
pub fn delete_backup(&self, name: &str) -> Result<(), RepositoryError> {
2017-03-24 08:05:41 +00:00
let mut path = self.backups_path.join(name);
2017-03-18 15:54:43 +00:00
try!(fs::remove_file(&path));
loop {
path = path.parent().unwrap().to_owned();
if fs::remove_dir(&path).is_err() {
break
}
}
Ok(())
}
2017-03-20 17:11:03 +00:00
pub fn prune_backups(&self, prefix: &str, daily: Option<usize>, weekly: Option<usize>, monthly: Option<usize>, yearly: Option<usize>, force: bool) -> Result<(), RepositoryError> {
2017-03-20 14:38:33 +00:00
let mut backups = Vec::new();
2017-03-22 08:19:16 +00:00
let backup_map = match self.get_backups() {
Ok(backup_map) => backup_map,
Err(RepositoryError::BackupFile(BackupFileError::PartialBackupsList(backup_map, _failed))) => {
warn!("Some backups could not be read, ignoring them");
backup_map
},
Err(err) => return Err(err)
};
2017-03-21 09:52:48 +00:00
for (name, backup) in backup_map {
2017-03-20 14:38:33 +00:00
if name.starts_with(prefix) {
let date = Local.timestamp(backup.date, 0);
backups.push((name, date, backup));
}
}
backups.sort_by_key(|backup| backup.2.date);
let mut keep = Bitmap::new(backups.len());
2017-03-20 17:11:03 +00:00
fn mark_needed<K: Eq, F: Fn(&DateTime<Local>) -> K>(backups: &[(String, DateTime<Local>, Backup)], keep: &mut Bitmap, max: usize, keyfn: F) {
2017-03-20 14:38:33 +00:00
let mut unique = VecDeque::with_capacity(max+1);
let mut last = None;
for (i, backup) in backups.iter().enumerate() {
2017-03-20 17:11:03 +00:00
let val = keyfn(&backup.1);
let cur = Some(val);
if cur != last {
last = cur;
2017-03-20 14:38:33 +00:00
unique.push_back(i);
if unique.len() > max {
unique.pop_front();
}
}
}
for i in unique {
keep.set(i);
}
}
2017-03-20 17:11:03 +00:00
if let Some(max) = yearly {
mark_needed(&backups, &mut keep, max, |d| d.year());
}
2017-03-20 14:38:33 +00:00
if let Some(max) = monthly {
2017-03-20 17:11:03 +00:00
mark_needed(&backups, &mut keep, max, |d| (d.year(), d.month()));
2017-03-20 14:38:33 +00:00
}
if let Some(max) = weekly {
2017-03-20 17:11:03 +00:00
mark_needed(&backups, &mut keep, max, |d| (d.isoweekdate().0, d.isoweekdate().1));
2017-03-20 14:38:33 +00:00
}
if let Some(max) = daily {
2017-03-20 17:11:03 +00:00
mark_needed(&backups, &mut keep, max, |d| (d.year(), d.month(), d.day()));
2017-03-20 14:38:33 +00:00
}
let mut remove = Vec::new();
for (i, backup) in backups.into_iter().enumerate() {
if !keep.get(i) {
remove.push(backup.0);
}
}
info!("Removing the following backups: {:?}", remove);
2017-03-20 17:11:03 +00:00
if force {
2017-03-20 14:38:33 +00:00
for name in remove {
try!(self.delete_backup(&name));
}
}
Ok(())
}
2017-03-16 13:14:35 +00:00
pub fn restore_inode_tree<P: AsRef<Path>>(&mut self, inode: Inode, path: P) -> Result<(), RepositoryError> {
2017-03-24 07:56:57 +00:00
let _lock = try!(self.lock(false));
2017-03-16 12:59:57 +00:00
let mut queue = VecDeque::new();
2017-03-16 13:14:35 +00:00
queue.push_back((path.as_ref().to_owned(), inode));
2017-03-16 12:59:57 +00:00
while let Some((path, inode)) = queue.pop_front() {
try!(self.save_inode_at(&inode, &path));
if inode.file_type == FileType::Directory {
let path = path.join(inode.name);
for chunks in inode.children.unwrap().values() {
let inode = try!(self.get_inode(&chunks));
queue.push_back((path.clone(), inode));
}
}
}
2017-03-15 20:53:05 +00:00
Ok(())
}
2017-03-15 21:14:50 +00:00
2017-03-16 13:14:35 +00:00
#[inline]
pub fn restore_backup<P: AsRef<Path>>(&mut self, backup: &Backup, path: P) -> Result<(), RepositoryError> {
2017-03-24 07:56:57 +00:00
let _lock = try!(self.lock(false));
2017-03-16 13:14:35 +00:00
let inode = try!(self.get_inode(&backup.root));
self.restore_inode_tree(inode, path)
}
2017-03-23 08:31:23 +00:00
pub fn create_backup_recurse<P: AsRef<Path>>(
&mut self,
path: P,
reference: Option<&Inode>,
2017-03-24 08:26:55 +00:00
options: &BackupOptions,
2017-03-23 08:31:23 +00:00
backup: &mut Backup,
failed_paths: &mut Vec<PathBuf>
) -> Result<ChunkList, RepositoryError> {
let path = path.as_ref();
let mut inode = try!(self.create_inode(path, reference));
let meta_size = 1000; // add 1000 for encoded metadata
backup.total_data_size += inode.size + meta_size;
if let Some(ref_inode) = reference {
if !ref_inode.is_unchanged(&inode) {
backup.changed_data_size += inode.size + meta_size;
}
} else {
backup.changed_data_size += inode.size + meta_size;
}
if inode.file_type == FileType::Directory {
backup.dir_count +=1;
let mut children = BTreeMap::new();
2017-03-24 08:26:55 +00:00
let parent_dev = try!(path.metadata()).st_dev();
2017-03-23 08:31:23 +00:00
for ch in try!(fs::read_dir(path)) {
let child = try!(ch);
2017-03-24 08:26:55 +00:00
if options.same_device {
let child_dev = try!(child.metadata()).st_dev();
if child_dev != parent_dev {
continue
}
}
2017-03-23 08:31:23 +00:00
let name = child.file_name().to_string_lossy().to_string();
let ref_child = reference.as_ref()
.and_then(|inode| inode.children.as_ref())
.and_then(|map| map.get(&name))
.and_then(|chunks| self.get_inode(chunks).ok());
let child_path = child.path();
2017-03-24 08:26:55 +00:00
let chunks = match self.create_backup_recurse(&child_path, ref_child.as_ref(), options, backup, failed_paths) {
2017-03-23 08:31:23 +00:00
Ok(chunks) => chunks,
Err(_) => {
2017-03-24 06:01:04 +00:00
warn!("Failed to backup {:?}", child_path);
2017-03-23 08:31:23 +00:00
failed_paths.push(child_path);
continue
}
};
children.insert(name, chunks);
}
inode.children = Some(children);
} else {
backup.file_count +=1;
}
self.put_inode(&inode)
}
2017-03-16 11:33:10 +00:00
#[allow(dead_code)]
2017-03-24 08:26:55 +00:00
pub fn create_backup_recursively<P: AsRef<Path>>(&mut self, path: P, reference: Option<&Backup>, options: &BackupOptions) -> Result<Backup, RepositoryError> {
2017-03-24 07:56:57 +00:00
let _lock = try!(self.lock(false));
2017-03-20 21:24:53 +00:00
let reference_inode = reference.and_then(|b| self.get_inode(&b.root).ok());
2017-03-16 11:33:10 +00:00
let mut backup = Backup::default();
2017-03-22 13:42:27 +00:00
backup.config = self.config.clone();
2017-03-20 21:24:53 +00:00
backup.host = get_hostname().unwrap_or_else(|_| "".to_string());
backup.path = path.as_ref().to_string_lossy().to_string();
2017-03-16 11:33:10 +00:00
let info_before = self.info();
let start = Local::now();
2017-03-22 10:10:13 +00:00
let mut failed_paths = vec![];
2017-03-24 08:26:55 +00:00
backup.root = try!(self.create_backup_recurse(path, reference_inode.as_ref(), options, &mut backup, &mut failed_paths));
2017-03-16 11:33:10 +00:00
try!(self.flush());
let elapsed = Local::now().signed_duration_since(start);
backup.date = start.timestamp();
backup.duration = elapsed.num_milliseconds() as f32 / 1_000.0;
let info_after = self.info();
backup.deduplicated_data_size = info_after.raw_data_size - info_before.raw_data_size;
backup.encoded_data_size = info_after.encoded_data_size - info_before.encoded_data_size;
backup.bundle_count = info_after.bundle_count - info_before.bundle_count;
backup.chunk_count = info_after.chunk_count - info_before.chunk_count;
backup.avg_chunk_size = backup.deduplicated_data_size as f32 / backup.chunk_count as f32;
2017-03-22 10:10:13 +00:00
if failed_paths.is_empty() {
Ok(backup)
} else {
Err(BackupError::FailedPaths(backup, failed_paths).into())
}
2017-03-15 21:14:50 +00:00
}
2017-03-16 12:59:57 +00:00
2017-03-23 07:24:27 +00:00
pub fn remove_backup_path<P: AsRef<Path>>(&mut self, backup: &mut Backup, path: P) -> Result<(), RepositoryError> {
2017-03-24 07:56:57 +00:00
let _lock = try!(self.lock(false));
2017-03-23 07:24:27 +00:00
let mut inodes = try!(self.get_backup_path(backup, path));
let to_remove = inodes.pop().unwrap();
let mut remove_from = match inodes.pop() {
Some(inode) => inode,
None => return Err(BackupError::RemoveRoot.into())
};
remove_from.children.as_mut().unwrap().remove(&to_remove.name);
let mut last_inode_chunks = try!(self.put_inode(&remove_from));
let mut last_inode_name = remove_from.name;
while let Some(mut inode) = inodes.pop() {
inode.children.as_mut().unwrap().insert(last_inode_name, last_inode_chunks);
last_inode_chunks = try!(self.put_inode(&inode));
last_inode_name = inode.name;
}
backup.root = last_inode_chunks;
Ok(())
}
pub fn get_backup_path<P: AsRef<Path>>(&mut self, backup: &Backup, path: P) -> Result<Vec<Inode>, RepositoryError> {
let mut inodes = vec![];
2017-03-16 12:59:57 +00:00
let mut inode = try!(self.get_inode(&backup.root));
for c in path.as_ref().components() {
if let path::Component::Normal(name) = c {
let name = name.to_string_lossy();
if let Some(chunks) = inode.children.as_mut().and_then(|c| c.remove(&name as &str)) {
2017-03-23 07:24:27 +00:00
inodes.push(inode);
2017-03-16 12:59:57 +00:00
inode = try!(self.get_inode(&chunks));
} else {
return Err(RepositoryError::NoSuchFileInBackup(backup.clone(), path.as_ref().to_owned()));
}
}
}
2017-03-23 07:24:27 +00:00
inodes.push(inode);
Ok(inodes)
}
#[inline]
pub fn get_backup_inode<P: AsRef<Path>>(&mut self, backup: &Backup, path: P) -> Result<Inode, RepositoryError> {
self.get_backup_path(backup, path).map(|mut inodes| inodes.pop().unwrap())
2017-03-16 12:59:57 +00:00
}
2017-03-15 20:53:05 +00:00
}