2017-03-21 10:28:11 +00:00
|
|
|
use ::prelude::*;
|
2017-03-15 20:53:05 +00:00
|
|
|
|
2017-03-21 09:52:48 +00:00
|
|
|
use std::io::{self, BufReader, BufWriter, Read, Write};
|
2017-03-15 20:53:05 +00:00
|
|
|
use std::fs::{self, File};
|
2017-03-21 09:52:48 +00:00
|
|
|
use std::path::{self, Path, PathBuf};
|
2017-03-22 20:42:43 +00:00
|
|
|
use std::collections::{HashMap, BTreeMap, VecDeque};
|
2017-03-16 11:33:10 +00:00
|
|
|
|
|
|
|
use chrono::prelude::*;
|
2017-03-15 20:53:05 +00:00
|
|
|
|
|
|
|
|
2017-03-21 09:52:48 +00:00
|
|
|
static HEADER_STRING: [u8; 7] = *b"zvault\x03";
|
|
|
|
static HEADER_VERSION: u8 = 1;
|
|
|
|
|
|
|
|
|
|
|
|
quick_error!{
|
|
|
|
#[derive(Debug)]
|
2017-03-22 08:19:16 +00:00
|
|
|
pub enum BackupFileError {
|
|
|
|
Read(err: io::Error, path: PathBuf) {
|
|
|
|
cause(err)
|
|
|
|
description("Failed to write backup")
|
|
|
|
display("Backup file error: failed to write backup file {:?}\n\tcaused by: {}", path, err)
|
|
|
|
}
|
|
|
|
Write(err: io::Error, path: PathBuf) {
|
2017-03-21 09:52:48 +00:00
|
|
|
cause(err)
|
|
|
|
description("Failed to read/write backup")
|
2017-03-22 08:19:16 +00:00
|
|
|
display("Backup file error: failed to read backup file {:?}\n\tcaused by: {}", path, err)
|
2017-03-21 09:52:48 +00:00
|
|
|
}
|
|
|
|
Decode(err: msgpack::DecodeError, path: PathBuf) {
|
|
|
|
cause(err)
|
|
|
|
context(path: &'a Path, err: msgpack::DecodeError) -> (err, path.to_path_buf())
|
|
|
|
description("Failed to decode backup")
|
2017-03-22 08:19:16 +00:00
|
|
|
display("Backup file error: failed to decode backup of {:?}\n\tcaused by: {}", path, err)
|
2017-03-21 09:52:48 +00:00
|
|
|
}
|
|
|
|
Encode(err: msgpack::EncodeError, path: PathBuf) {
|
|
|
|
cause(err)
|
|
|
|
context(path: &'a Path, err: msgpack::EncodeError) -> (err, path.to_path_buf())
|
|
|
|
description("Failed to encode backup")
|
2017-03-22 08:19:16 +00:00
|
|
|
display("Backup file error: failed to encode backup of {:?}\n\tcaused by: {}", path, err)
|
2017-03-21 09:52:48 +00:00
|
|
|
}
|
|
|
|
WrongHeader(path: PathBuf) {
|
|
|
|
description("Wrong header")
|
2017-03-22 08:19:16 +00:00
|
|
|
display("Backup file error: wrong header on backup {:?}", path)
|
2017-03-21 09:52:48 +00:00
|
|
|
}
|
2017-03-22 08:19:16 +00:00
|
|
|
UnsupportedVersion(path: PathBuf, version: u8) {
|
2017-03-21 09:52:48 +00:00
|
|
|
description("Wrong version")
|
2017-03-22 08:19:16 +00:00
|
|
|
display("Backup file error: unsupported version on backup {:?}: {}", path, version)
|
2017-03-21 09:52:48 +00:00
|
|
|
}
|
|
|
|
Decryption(err: EncryptionError, path: PathBuf) {
|
|
|
|
cause(err)
|
|
|
|
context(path: &'a Path, err: EncryptionError) -> (err, path.to_path_buf())
|
|
|
|
description("Decryption failed")
|
2017-03-22 08:19:16 +00:00
|
|
|
display("Backup file error: decryption failed on backup {:?}\n\tcaused by: {}", path, err)
|
2017-03-21 09:52:48 +00:00
|
|
|
}
|
|
|
|
Encryption(err: EncryptionError) {
|
|
|
|
from()
|
|
|
|
cause(err)
|
|
|
|
description("Encryption failed")
|
2017-03-22 08:19:16 +00:00
|
|
|
display("Backup file error: encryption failed\n\tcaused by: {}", err)
|
|
|
|
}
|
|
|
|
PartialBackupsList(partial: HashMap<String, Backup>, failed: Vec<PathBuf>) {
|
|
|
|
description("Some backups could not be loaded")
|
|
|
|
display("Backup file error: some backups could not be loaded: {:?}", failed)
|
2017-03-21 09:52:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Default, Debug, Clone)]
|
|
|
|
struct BackupHeader {
|
|
|
|
pub encryption: Option<Encryption>
|
|
|
|
}
|
|
|
|
serde_impl!(BackupHeader(u8) {
|
|
|
|
encryption: Option<Encryption> => 0
|
|
|
|
});
|
|
|
|
|
|
|
|
|
2017-03-16 12:59:57 +00:00
|
|
|
#[derive(Default, Debug, Clone)]
|
2017-03-15 20:53:05 +00:00
|
|
|
pub struct Backup {
|
2017-03-18 14:41:59 +00:00
|
|
|
pub root: ChunkList,
|
2017-03-16 11:33:10 +00:00
|
|
|
pub total_data_size: u64, // Sum of all raw sizes of all entities
|
|
|
|
pub changed_data_size: u64, // Sum of all raw sizes of all entities actively stored
|
|
|
|
pub deduplicated_data_size: u64, // Sum of all raw sizes of all new bundles
|
|
|
|
pub encoded_data_size: u64, // Sum al all encoded sizes of all new bundles
|
|
|
|
pub bundle_count: usize,
|
2017-03-15 20:53:05 +00:00
|
|
|
pub chunk_count: usize,
|
|
|
|
pub avg_chunk_size: f32,
|
|
|
|
pub date: i64,
|
|
|
|
pub duration: f32,
|
|
|
|
pub file_count: usize,
|
2017-03-20 21:24:53 +00:00
|
|
|
pub dir_count: usize,
|
|
|
|
pub host: String,
|
2017-03-22 13:42:27 +00:00
|
|
|
pub path: String,
|
|
|
|
pub config: Config,
|
2017-03-15 20:53:05 +00:00
|
|
|
}
|
|
|
|
serde_impl!(Backup(u8) {
|
|
|
|
root: Vec<Chunk> => 0,
|
|
|
|
total_data_size: u64 => 1,
|
|
|
|
changed_data_size: u64 => 2,
|
2017-03-16 11:33:10 +00:00
|
|
|
deduplicated_data_size: u64 => 3,
|
2017-03-15 20:53:05 +00:00
|
|
|
encoded_data_size: u64 => 4,
|
2017-03-16 11:33:10 +00:00
|
|
|
bundle_count: usize => 5,
|
2017-03-15 20:53:05 +00:00
|
|
|
chunk_count: usize => 6,
|
|
|
|
avg_chunk_size: f32 => 7,
|
|
|
|
date: i64 => 8,
|
|
|
|
duration: f32 => 9,
|
|
|
|
file_count: usize => 10,
|
2017-03-20 21:24:53 +00:00
|
|
|
dir_count: usize => 11,
|
|
|
|
host: String => 12,
|
2017-03-22 13:42:27 +00:00
|
|
|
path: String => 13,
|
|
|
|
config: Config => 14
|
2017-03-15 20:53:05 +00:00
|
|
|
});
|
|
|
|
|
2017-03-21 09:52:48 +00:00
|
|
|
impl Backup {
|
2017-03-22 08:19:16 +00:00
|
|
|
pub fn read_from<P: AsRef<Path>>(crypto: &Crypto, path: P) -> Result<Self, BackupFileError> {
|
2017-03-21 09:52:48 +00:00
|
|
|
let path = path.as_ref();
|
2017-03-22 08:19:16 +00:00
|
|
|
let mut file = BufReader::new(try!(File::open(path).map_err(|err| BackupFileError::Read(err, path.to_path_buf()))));
|
2017-03-21 09:52:48 +00:00
|
|
|
let mut header = [0u8; 8];
|
2017-03-22 08:19:16 +00:00
|
|
|
try!(file.read_exact(&mut header).map_err(|err| BackupFileError::Read(err, path.to_path_buf())));
|
2017-03-21 09:52:48 +00:00
|
|
|
if header[..HEADER_STRING.len()] != HEADER_STRING {
|
2017-03-22 08:19:16 +00:00
|
|
|
return Err(BackupFileError::WrongHeader(path.to_path_buf()))
|
2017-03-21 09:52:48 +00:00
|
|
|
}
|
|
|
|
let version = header[HEADER_STRING.len()];
|
|
|
|
if version != HEADER_VERSION {
|
2017-03-22 08:19:16 +00:00
|
|
|
return Err(BackupFileError::UnsupportedVersion(path.to_path_buf(), version))
|
2017-03-21 09:52:48 +00:00
|
|
|
}
|
|
|
|
let header: BackupHeader = try!(msgpack::decode_from_stream(&mut file).context(path));
|
|
|
|
let mut data = Vec::new();
|
2017-03-22 08:19:16 +00:00
|
|
|
try!(file.read_to_end(&mut data).map_err(|err| BackupFileError::Read(err, path.to_path_buf())));
|
2017-03-21 09:52:48 +00:00
|
|
|
if let Some(ref encryption) = header.encryption {
|
|
|
|
data = try!(crypto.decrypt(encryption, &data));
|
|
|
|
}
|
|
|
|
Ok(try!(msgpack::decode(&data).context(path)))
|
|
|
|
}
|
|
|
|
|
2017-03-22 08:19:16 +00:00
|
|
|
pub fn save_to<P: AsRef<Path>>(&self, crypto: &Crypto, encryption: Option<Encryption>, path: P) -> Result<(), BackupFileError> {
|
2017-03-21 09:52:48 +00:00
|
|
|
let path = path.as_ref();
|
|
|
|
let mut data = try!(msgpack::encode(self).context(path));
|
|
|
|
if let Some(ref encryption) = encryption {
|
|
|
|
data = try!(crypto.encrypt(encryption, &data));
|
|
|
|
}
|
2017-03-22 08:19:16 +00:00
|
|
|
let mut file = BufWriter::new(try!(File::create(path).map_err(|err| BackupFileError::Write(err, path.to_path_buf()))));
|
|
|
|
try!(file.write_all(&HEADER_STRING).map_err(|err| BackupFileError::Write(err, path.to_path_buf())));
|
|
|
|
try!(file.write_all(&[HEADER_VERSION]).map_err(|err| BackupFileError::Write(err, path.to_path_buf())));
|
2017-03-21 09:52:48 +00:00
|
|
|
let header = BackupHeader { encryption: encryption };
|
|
|
|
try!(msgpack::encode_to_stream(&header, &mut file).context(path));
|
2017-03-22 08:19:16 +00:00
|
|
|
try!(file.write_all(&data).map_err(|err| BackupFileError::Write(err, path.to_path_buf())));
|
2017-03-21 09:52:48 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2017-03-22 08:19:16 +00:00
|
|
|
pub fn get_all_from<P: AsRef<Path>>(crypto: &Crypto, path: P) -> Result<HashMap<String, Backup>, BackupFileError> {
|
2017-03-20 14:38:33 +00:00
|
|
|
let mut backups = HashMap::new();
|
2017-03-22 08:19:16 +00:00
|
|
|
let base_path = path.as_ref();
|
|
|
|
let mut paths = vec![path.as_ref().to_path_buf()];
|
|
|
|
let mut failed_paths = vec![];
|
2017-03-15 20:53:05 +00:00
|
|
|
while let Some(path) = paths.pop() {
|
2017-03-22 08:19:16 +00:00
|
|
|
for entry in try!(fs::read_dir(&path).map_err(|e| BackupFileError::Read(e, path.clone()))) {
|
|
|
|
let entry = try!(entry.map_err(|e| BackupFileError::Read(e, path.clone())));
|
2017-03-15 20:53:05 +00:00
|
|
|
let path = entry.path();
|
|
|
|
if path.is_dir() {
|
|
|
|
paths.push(path);
|
|
|
|
} else {
|
2017-03-16 08:42:30 +00:00
|
|
|
let relpath = path.strip_prefix(&base_path).unwrap();
|
2017-03-20 14:38:33 +00:00
|
|
|
let name = relpath.to_string_lossy().to_string();
|
2017-03-22 08:19:16 +00:00
|
|
|
if let Ok(backup) = Backup::read_from(crypto, &path) {
|
2017-03-21 09:52:48 +00:00
|
|
|
backups.insert(name, backup);
|
|
|
|
} else {
|
2017-03-22 08:19:16 +00:00
|
|
|
failed_paths.push(path.clone());
|
2017-03-21 09:52:48 +00:00
|
|
|
}
|
2017-03-15 20:53:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-03-22 08:19:16 +00:00
|
|
|
if failed_paths.is_empty() {
|
|
|
|
Ok(backups)
|
|
|
|
} else {
|
|
|
|
Err(BackupFileError::PartialBackupsList(backups, failed_paths))
|
2017-03-21 09:52:48 +00:00
|
|
|
}
|
2017-03-22 08:19:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
quick_error!{
|
|
|
|
#[derive(Debug)]
|
2017-03-23 07:24:27 +00:00
|
|
|
#[allow(unknown_lints,large_enum_variant)]
|
2017-03-22 08:19:16 +00:00
|
|
|
pub enum BackupError {
|
2017-03-22 10:10:13 +00:00
|
|
|
FailedPaths(backup: Backup, failed: Vec<PathBuf>) {
|
|
|
|
description("Some paths could not be backed up")
|
|
|
|
display("Backup error: some paths could not be backed up")
|
|
|
|
}
|
2017-03-23 07:24:27 +00:00
|
|
|
RemoveRoot {
|
|
|
|
description("The root of a backup can not be removed")
|
|
|
|
display("Backup error: the root of a backup can not be removed")
|
|
|
|
}
|
2017-03-22 08:19:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
impl Repository {
|
|
|
|
pub fn get_backups(&self) -> Result<HashMap<String, Backup>, RepositoryError> {
|
|
|
|
Ok(try!(Backup::get_all_from(&self.crypto.lock().unwrap(), self.path.join("backups"))))
|
2017-03-15 20:53:05 +00:00
|
|
|
}
|
|
|
|
|
2017-03-16 08:42:30 +00:00
|
|
|
pub fn get_backup(&self, name: &str) -> Result<Backup, RepositoryError> {
|
2017-03-21 09:52:48 +00:00
|
|
|
Ok(try!(Backup::read_from(&self.crypto.lock().unwrap(), self.path.join("backups").join(name))))
|
2017-03-15 20:53:05 +00:00
|
|
|
}
|
|
|
|
|
2017-03-16 08:42:30 +00:00
|
|
|
pub fn save_backup(&mut self, backup: &Backup, name: &str) -> Result<(), RepositoryError> {
|
2017-03-18 15:54:43 +00:00
|
|
|
let path = self.path.join("backups").join(name);
|
|
|
|
try!(fs::create_dir_all(path.parent().unwrap()));
|
2017-03-21 09:52:48 +00:00
|
|
|
Ok(try!(backup.save_to(&self.crypto.lock().unwrap(), self.config.encryption.clone(), path)))
|
2017-03-15 20:53:05 +00:00
|
|
|
}
|
|
|
|
|
2017-03-18 15:54:43 +00:00
|
|
|
pub fn delete_backup(&self, name: &str) -> Result<(), RepositoryError> {
|
|
|
|
let mut path = self.path.join("backups").join(name);
|
|
|
|
try!(fs::remove_file(&path));
|
|
|
|
loop {
|
|
|
|
path = path.parent().unwrap().to_owned();
|
|
|
|
if fs::remove_dir(&path).is_err() {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2017-03-20 17:11:03 +00:00
|
|
|
|
|
|
|
pub fn prune_backups(&self, prefix: &str, daily: Option<usize>, weekly: Option<usize>, monthly: Option<usize>, yearly: Option<usize>, force: bool) -> Result<(), RepositoryError> {
|
2017-03-20 14:38:33 +00:00
|
|
|
let mut backups = Vec::new();
|
2017-03-22 08:19:16 +00:00
|
|
|
let backup_map = match self.get_backups() {
|
|
|
|
Ok(backup_map) => backup_map,
|
|
|
|
Err(RepositoryError::BackupFile(BackupFileError::PartialBackupsList(backup_map, _failed))) => {
|
|
|
|
warn!("Some backups could not be read, ignoring them");
|
|
|
|
backup_map
|
|
|
|
},
|
|
|
|
Err(err) => return Err(err)
|
|
|
|
};
|
2017-03-21 09:52:48 +00:00
|
|
|
for (name, backup) in backup_map {
|
2017-03-20 14:38:33 +00:00
|
|
|
if name.starts_with(prefix) {
|
|
|
|
let date = Local.timestamp(backup.date, 0);
|
|
|
|
backups.push((name, date, backup));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
backups.sort_by_key(|backup| backup.2.date);
|
|
|
|
let mut keep = Bitmap::new(backups.len());
|
2017-03-20 17:11:03 +00:00
|
|
|
|
|
|
|
fn mark_needed<K: Eq, F: Fn(&DateTime<Local>) -> K>(backups: &[(String, DateTime<Local>, Backup)], keep: &mut Bitmap, max: usize, keyfn: F) {
|
2017-03-20 14:38:33 +00:00
|
|
|
let mut unique = VecDeque::with_capacity(max+1);
|
|
|
|
let mut last = None;
|
|
|
|
for (i, backup) in backups.iter().enumerate() {
|
2017-03-20 17:11:03 +00:00
|
|
|
let val = keyfn(&backup.1);
|
|
|
|
let cur = Some(val);
|
|
|
|
if cur != last {
|
|
|
|
last = cur;
|
2017-03-20 14:38:33 +00:00
|
|
|
unique.push_back(i);
|
|
|
|
if unique.len() > max {
|
|
|
|
unique.pop_front();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for i in unique {
|
|
|
|
keep.set(i);
|
|
|
|
}
|
|
|
|
}
|
2017-03-20 17:11:03 +00:00
|
|
|
if let Some(max) = yearly {
|
|
|
|
mark_needed(&backups, &mut keep, max, |d| d.year());
|
|
|
|
}
|
2017-03-20 14:38:33 +00:00
|
|
|
if let Some(max) = monthly {
|
2017-03-20 17:11:03 +00:00
|
|
|
mark_needed(&backups, &mut keep, max, |d| (d.year(), d.month()));
|
2017-03-20 14:38:33 +00:00
|
|
|
}
|
|
|
|
if let Some(max) = weekly {
|
2017-03-20 17:11:03 +00:00
|
|
|
mark_needed(&backups, &mut keep, max, |d| (d.isoweekdate().0, d.isoweekdate().1));
|
2017-03-20 14:38:33 +00:00
|
|
|
}
|
|
|
|
if let Some(max) = daily {
|
2017-03-20 17:11:03 +00:00
|
|
|
mark_needed(&backups, &mut keep, max, |d| (d.year(), d.month(), d.day()));
|
2017-03-20 14:38:33 +00:00
|
|
|
}
|
|
|
|
let mut remove = Vec::new();
|
|
|
|
for (i, backup) in backups.into_iter().enumerate() {
|
|
|
|
if !keep.get(i) {
|
|
|
|
remove.push(backup.0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
info!("Removing the following backups: {:?}", remove);
|
2017-03-20 17:11:03 +00:00
|
|
|
if force {
|
2017-03-20 14:38:33 +00:00
|
|
|
for name in remove {
|
|
|
|
try!(self.delete_backup(&name));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2017-03-16 13:14:35 +00:00
|
|
|
pub fn restore_inode_tree<P: AsRef<Path>>(&mut self, inode: Inode, path: P) -> Result<(), RepositoryError> {
|
2017-03-16 12:59:57 +00:00
|
|
|
let mut queue = VecDeque::new();
|
2017-03-16 13:14:35 +00:00
|
|
|
queue.push_back((path.as_ref().to_owned(), inode));
|
2017-03-16 12:59:57 +00:00
|
|
|
while let Some((path, inode)) = queue.pop_front() {
|
|
|
|
try!(self.save_inode_at(&inode, &path));
|
|
|
|
if inode.file_type == FileType::Directory {
|
|
|
|
let path = path.join(inode.name);
|
|
|
|
for chunks in inode.children.unwrap().values() {
|
|
|
|
let inode = try!(self.get_inode(&chunks));
|
|
|
|
queue.push_back((path.clone(), inode));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-03-15 20:53:05 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
2017-03-15 21:14:50 +00:00
|
|
|
|
2017-03-16 13:14:35 +00:00
|
|
|
#[inline]
|
|
|
|
pub fn restore_backup<P: AsRef<Path>>(&mut self, backup: &Backup, path: P) -> Result<(), RepositoryError> {
|
|
|
|
let inode = try!(self.get_inode(&backup.root));
|
|
|
|
self.restore_inode_tree(inode, path)
|
|
|
|
}
|
|
|
|
|
2017-03-23 08:31:23 +00:00
|
|
|
|
|
|
|
pub fn create_backup_recurse<P: AsRef<Path>>(
|
|
|
|
&mut self,
|
|
|
|
path: P,
|
|
|
|
reference: Option<&Inode>,
|
|
|
|
backup: &mut Backup,
|
|
|
|
failed_paths: &mut Vec<PathBuf>
|
|
|
|
) -> Result<ChunkList, RepositoryError> {
|
|
|
|
let path = path.as_ref();
|
|
|
|
let mut inode = try!(self.create_inode(path, reference));
|
|
|
|
let meta_size = 1000; // add 1000 for encoded metadata
|
|
|
|
backup.total_data_size += inode.size + meta_size;
|
|
|
|
if let Some(ref_inode) = reference {
|
|
|
|
if !ref_inode.is_unchanged(&inode) {
|
|
|
|
backup.changed_data_size += inode.size + meta_size;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
backup.changed_data_size += inode.size + meta_size;
|
|
|
|
}
|
|
|
|
if inode.file_type == FileType::Directory {
|
|
|
|
backup.dir_count +=1;
|
|
|
|
let mut children = BTreeMap::new();
|
|
|
|
for ch in try!(fs::read_dir(path)) {
|
|
|
|
let child = try!(ch);
|
|
|
|
let name = child.file_name().to_string_lossy().to_string();
|
|
|
|
let ref_child = reference.as_ref()
|
|
|
|
.and_then(|inode| inode.children.as_ref())
|
|
|
|
.and_then(|map| map.get(&name))
|
|
|
|
.and_then(|chunks| self.get_inode(chunks).ok());
|
|
|
|
let child_path = child.path();
|
|
|
|
let chunks = match self.create_backup_recurse(&child_path, ref_child.as_ref(), backup, failed_paths) {
|
|
|
|
Ok(chunks) => chunks,
|
|
|
|
Err(_) => {
|
2017-03-24 06:01:04 +00:00
|
|
|
warn!("Failed to backup {:?}", child_path);
|
2017-03-23 08:31:23 +00:00
|
|
|
failed_paths.push(child_path);
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
};
|
|
|
|
children.insert(name, chunks);
|
|
|
|
}
|
|
|
|
inode.children = Some(children);
|
|
|
|
} else {
|
|
|
|
backup.file_count +=1;
|
|
|
|
}
|
|
|
|
self.put_inode(&inode)
|
|
|
|
}
|
|
|
|
|
2017-03-16 11:33:10 +00:00
|
|
|
#[allow(dead_code)]
|
2017-03-23 08:31:23 +00:00
|
|
|
pub fn create_backup_recursively<P: AsRef<Path>>(&mut self, path: P, reference: Option<&Backup>) -> Result<Backup, RepositoryError> {
|
2017-03-20 21:24:53 +00:00
|
|
|
let reference_inode = reference.and_then(|b| self.get_inode(&b.root).ok());
|
2017-03-16 11:33:10 +00:00
|
|
|
let mut backup = Backup::default();
|
2017-03-22 13:42:27 +00:00
|
|
|
backup.config = self.config.clone();
|
2017-03-20 21:24:53 +00:00
|
|
|
backup.host = get_hostname().unwrap_or_else(|_| "".to_string());
|
|
|
|
backup.path = path.as_ref().to_string_lossy().to_string();
|
2017-03-16 11:33:10 +00:00
|
|
|
let info_before = self.info();
|
|
|
|
let start = Local::now();
|
2017-03-22 10:10:13 +00:00
|
|
|
let mut failed_paths = vec![];
|
2017-03-23 08:31:23 +00:00
|
|
|
backup.root = try!(self.create_backup_recurse(path, reference_inode.as_ref(), &mut backup, &mut failed_paths));
|
2017-03-16 11:33:10 +00:00
|
|
|
try!(self.flush());
|
|
|
|
let elapsed = Local::now().signed_duration_since(start);
|
|
|
|
backup.date = start.timestamp();
|
|
|
|
backup.duration = elapsed.num_milliseconds() as f32 / 1_000.0;
|
|
|
|
let info_after = self.info();
|
|
|
|
backup.deduplicated_data_size = info_after.raw_data_size - info_before.raw_data_size;
|
|
|
|
backup.encoded_data_size = info_after.encoded_data_size - info_before.encoded_data_size;
|
|
|
|
backup.bundle_count = info_after.bundle_count - info_before.bundle_count;
|
|
|
|
backup.chunk_count = info_after.chunk_count - info_before.chunk_count;
|
|
|
|
backup.avg_chunk_size = backup.deduplicated_data_size as f32 / backup.chunk_count as f32;
|
2017-03-22 10:10:13 +00:00
|
|
|
if failed_paths.is_empty() {
|
|
|
|
Ok(backup)
|
|
|
|
} else {
|
|
|
|
Err(BackupError::FailedPaths(backup, failed_paths).into())
|
|
|
|
}
|
2017-03-15 21:14:50 +00:00
|
|
|
}
|
2017-03-16 12:59:57 +00:00
|
|
|
|
2017-03-23 07:24:27 +00:00
|
|
|
pub fn remove_backup_path<P: AsRef<Path>>(&mut self, backup: &mut Backup, path: P) -> Result<(), RepositoryError> {
|
|
|
|
let mut inodes = try!(self.get_backup_path(backup, path));
|
|
|
|
let to_remove = inodes.pop().unwrap();
|
|
|
|
let mut remove_from = match inodes.pop() {
|
|
|
|
Some(inode) => inode,
|
|
|
|
None => return Err(BackupError::RemoveRoot.into())
|
|
|
|
};
|
|
|
|
remove_from.children.as_mut().unwrap().remove(&to_remove.name);
|
|
|
|
let mut last_inode_chunks = try!(self.put_inode(&remove_from));
|
|
|
|
let mut last_inode_name = remove_from.name;
|
|
|
|
while let Some(mut inode) = inodes.pop() {
|
|
|
|
inode.children.as_mut().unwrap().insert(last_inode_name, last_inode_chunks);
|
|
|
|
last_inode_chunks = try!(self.put_inode(&inode));
|
|
|
|
last_inode_name = inode.name;
|
|
|
|
}
|
|
|
|
backup.root = last_inode_chunks;
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn get_backup_path<P: AsRef<Path>>(&mut self, backup: &Backup, path: P) -> Result<Vec<Inode>, RepositoryError> {
|
|
|
|
let mut inodes = vec![];
|
2017-03-16 12:59:57 +00:00
|
|
|
let mut inode = try!(self.get_inode(&backup.root));
|
|
|
|
for c in path.as_ref().components() {
|
|
|
|
if let path::Component::Normal(name) = c {
|
|
|
|
let name = name.to_string_lossy();
|
|
|
|
if let Some(chunks) = inode.children.as_mut().and_then(|c| c.remove(&name as &str)) {
|
2017-03-23 07:24:27 +00:00
|
|
|
inodes.push(inode);
|
2017-03-16 12:59:57 +00:00
|
|
|
inode = try!(self.get_inode(&chunks));
|
|
|
|
} else {
|
|
|
|
return Err(RepositoryError::NoSuchFileInBackup(backup.clone(), path.as_ref().to_owned()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-03-23 07:24:27 +00:00
|
|
|
inodes.push(inode);
|
|
|
|
Ok(inodes)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
pub fn get_backup_inode<P: AsRef<Path>>(&mut self, backup: &Backup, path: P) -> Result<Inode, RepositoryError> {
|
|
|
|
self.get_backup_path(backup, path).map(|mut inodes| inodes.pop().unwrap())
|
2017-03-16 12:59:57 +00:00
|
|
|
}
|
2017-03-15 20:53:05 +00:00
|
|
|
}
|