Refactor part 1

refactor
Dennis Schwerdel 2018-03-08 23:41:56 +01:00
parent 98a09fea2e
commit 0b5a485a26
23 changed files with 289 additions and 92 deletions

197
src/backups/mod.rs Normal file
View File

@ -0,0 +1,197 @@
pub mod mount;
use ::prelude::*;
use std::path::{Path, PathBuf};
use std::collections::HashMap;
pub struct BackupRepository {
repo: Repository
}
impl BackupRepository {
pub fn create<P: AsRef<Path>, R: AsRef<Path>>(path: P, config: &Config, remote: R) -> Result<Self, RepositoryError> {
Ok(BackupRepository {
repo: try!(Repository::create(path, config, remote))
})
}
#[allow(unknown_lints, useless_let_if_seq)]
pub fn open<P: AsRef<Path>>(path: P, online: bool) -> Result<Self, RepositoryError> {
Ok(BackupRepository {
repo: try!(Repository::open(path, online))
})
}
pub fn import<P: AsRef<Path>, R: AsRef<Path>>(path: P, remote: R, key_files: Vec<String>) -> Result<Self, RepositoryError> {
Ok(BackupRepository {
repo: try!(Repository::import(path, remote, key_files))
})
}
#[inline]
pub fn register_key(&mut self, public: PublicKey, secret: SecretKey) -> Result<(), RepositoryError> {
self.repo.register_key(public, secret)
}
#[inline]
pub fn save_config(&mut self) -> Result<(), RepositoryError> {
self.repo.save_config()
}
#[inline]
pub fn set_encryption(&mut self, public: Option<&PublicKey>) {
self.repo.set_encryption(public)
}
#[inline]
pub fn has_backup(&self, name: &str) -> bool {
self.repo.has_backup(name)
}
pub fn get_backup(&self, name: &str) -> Result<Backup, RepositoryError> {
self.repo.get_backup(name)
}
#[inline]
pub fn get_backup_inode<P: AsRef<Path>>(&mut self, backup: &Backup, path: P) -> Result<Inode, RepositoryError> {
self.repo.get_backup_inode(backup, path)
}
#[inline]
pub fn get_inode(&mut self, chunks: &[Chunk]) -> Result<Inode, RepositoryError> {
self.repo.get_inode(chunks)
}
pub fn get_all_backups(&self) -> Result<HashMap<String, Backup>, RepositoryError> {
self.repo.get_all_backups()
}
pub fn get_config(&self) -> &Config {
&self.repo.config
}
pub fn set_config(&mut self, config: Config) {
self.repo.config = config;
}
pub fn get_layout(&self) -> &RepositoryLayout {
&self.repo.layout
}
pub fn create_backup_recursively<P: AsRef<Path>>(&mut self, path: P, reference: Option<&Backup>, options: &BackupOptions) -> Result<Backup, RepositoryError> {
self.repo.create_backup_recursively(path, reference, options)
}
pub fn import_tarfile<P: AsRef<Path>>(&mut self, tarfile: P) -> Result<Backup, RepositoryError> {
self.repo.import_tarfile(tarfile)
}
pub fn save_backup(&mut self, backup: &Backup, name: &str) -> Result<(), RepositoryError> {
self.repo.save_backup(backup, name)
}
pub fn export_tarfile<P: AsRef<Path>>(&mut self, backup: &Backup, inode: Inode, tarfile: P) -> Result<(), RepositoryError> {
self.repo.export_tarfile(backup, inode, tarfile)
}
pub fn restore_inode_tree<P: AsRef<Path>>(&mut self, backup: &Backup, inode: Inode, path: P) -> Result<(), RepositoryError> {
self.repo.restore_inode_tree(backup, inode, path)
}
pub fn remove_backup_path<P: AsRef<Path>>(&mut self, backup: &mut Backup, path: P) -> Result<(), RepositoryError> {
self.repo.remove_backup_path(backup, path)
}
pub fn get_backups<P: AsRef<Path>>(&self, path: P) -> Result<HashMap<String, Backup>, RepositoryError> {
self.repo.get_backups(path)
}
pub fn delete_backup(&mut self, name: &str) -> Result<(), RepositoryError> {
self.repo.delete_backup(name)
}
pub fn prune_backups(&mut self, prefix: &str, daily: usize, weekly: usize, monthly: usize, yearly: usize, force: bool) -> Result<(), RepositoryError> {
self.repo.prune_backups(prefix, daily, weekly, monthly, yearly, force)
}
pub fn info(&self) -> RepositoryInfo {
self.repo.info()
}
pub fn vacuum(&mut self, ratio: f32, combine: bool, force: bool) -> Result<(), RepositoryError> {
self.repo.vacuum(ratio, combine, force)
}
pub fn check_repository(&mut self, repair: bool) -> Result<(), RepositoryError> {
self.repo.check_repository(repair)
}
#[inline]
pub fn check_bundles(&mut self, full: bool, repair: bool) -> Result<(), RepositoryError> {
self.repo.check_bundles(full, repair)
}
#[inline]
pub fn check_index(&mut self, repair: bool) -> Result<(), RepositoryError> {
self.repo.check_index(repair)
}
pub fn check_backup_inode(&mut self, name: &str, backup: &mut Backup, path: &Path, repair: bool) -> Result<(), RepositoryError> {
self.repo.check_backup_inode(name, backup, path, repair)
}
#[inline]
pub fn check_backup(&mut self, name: &str, backup: &mut Backup, repair: bool) -> Result<(), RepositoryError> {
self.repo.check_backup(name, backup, repair)
}
pub fn check_backups(&mut self, repair: bool) -> Result<(), RepositoryError> {
self.repo.check_backups(repair)
}
#[inline]
pub fn set_clean(&mut self) {
self.repo.set_clean()
}
pub fn statistics(&self) -> RepositoryStatistics {
self.repo.statistics()
}
pub fn find_duplicates(&mut self, inode: &Inode, min_size: u64) -> Result<Vec<(Vec<PathBuf>, u64)>, RepositoryError> {
self.repo.find_duplicates(inode, min_size)
}
pub fn analyze_usage(&mut self) -> Result<HashMap<u32, BundleAnalysis>, RepositoryError> {
self.repo.analyze_usage()
}
#[inline]
pub fn list_bundles(&self) -> Vec<&BundleInfo> {
self.repo.list_bundles()
}
#[inline]
pub fn get_bundle(&self, bundle: &BundleId) -> Option<&StoredBundle> {
self.repo.get_bundle(bundle)
}
pub fn find_versions<P: AsRef<Path>>(&mut self, path: P) -> Result<Vec<(String, Inode)>, RepositoryError> {
self.repo.find_versions(path)
}
#[inline]
pub fn find_differences(&mut self, inode1: &Inode, inode2: &Inode) -> Result<Vec<(DiffType, PathBuf)>, RepositoryError> {
self.repo.find_differences(inode1, inode2)
}
pub fn get_chunk(&mut self, hash: Hash) -> Result<Option<Vec<u8>>, RepositoryError> {
self.repo.get_chunk(hash)
}
pub fn get_data(&mut self, chunks: &[Chunk]) -> Result<Vec<u8>, RepositoryError> {
self.repo.get_data(chunks)
}
}

View File

@ -150,12 +150,12 @@ impl FuseInode {
pub struct FuseFilesystem<'a> {
next_id: u64,
repository: &'a mut Repository,
repository: &'a mut BackupRepository,
inodes: HashMap<u64, FuseInodeRef>
}
impl<'a> FuseFilesystem<'a> {
pub fn new(repository: &'a mut Repository) -> Result<Self, RepositoryError> {
pub fn new(repository: &'a mut BackupRepository) -> Result<Self, RepositoryError> {
Ok(FuseFilesystem {
next_id: 1,
repository,
@ -164,7 +164,7 @@ impl<'a> FuseFilesystem<'a> {
}
pub fn from_repository(
repository: &'a mut Repository,
repository: &'a mut BackupRepository,
path: Option<&str>,
) -> Result<Self, RepositoryError> {
let mut backups = vec![];
@ -196,7 +196,7 @@ impl<'a> FuseFilesystem<'a> {
}
pub fn from_backup(
repository: &'a mut Repository,
repository: &'a mut BackupRepository,
backup: Backup,
) -> Result<Self, RepositoryError> {
let inode = try!(repository.get_inode(&backup.root));
@ -206,7 +206,7 @@ impl<'a> FuseFilesystem<'a> {
}
pub fn from_inode(
repository: &'a mut Repository,
repository: &'a mut BackupRepository,
backup: Backup,
inode: Inode,
) -> Result<Self, RepositoryError> {

View File

@ -1,53 +0,0 @@
use std::io::{self, Write, Read};
mod fixed;
mod ae;
mod rabin;
mod fastcdc;
#[cfg(test)] mod test;
#[cfg(feature = "bench")] mod benches;
pub use self::fixed::FixedChunker;
pub use self::ae::AeChunker;
pub use self::rabin::RabinChunker;
pub use self::fastcdc::FastCdcChunker;
// https://moinakg.wordpress.com/2013/06/22/high-performance-content-defined-chunking/
// Paper: "A Comprehensive Study of the Past, Present, and Future of Data Deduplication"
// Paper-URL: http://wxia.hustbackup.cn/IEEE-Survey-final.pdf
// https://borgbackup.readthedocs.io/en/stable/internals.html#chunks
// https://github.com/bup/bup/blob/master/lib/bup/bupsplit.c
quick_error!{
#[derive(Debug)]
pub enum ChunkerError {
Read(err: io::Error) {
cause(err)
description(tr!("Failed to read input"))
display("{}", tr_format!("Chunker error: failed to read input\n\tcaused by: {}", err))
}
Write(err: io::Error) {
cause(err)
description(tr!("Failed to write to output"))
display("{}", tr_format!("Chunker error: failed to write to output\n\tcaused by: {}", err))
}
Custom(reason: &'static str) {
from()
description(tr!("Custom error"))
display("{}", tr_format!("Chunker error: {}", reason))
}
}
}
#[derive(Debug, Eq, PartialEq)]
pub enum ChunkerStatus {
Continue,
Finished
}
pub trait Chunker {
fn chunk(&mut self, r: &mut Read, w: &mut Write) -> Result<ChunkerStatus, ChunkerError>;
}

View File

@ -115,15 +115,15 @@ macro_rules! checked {
};
}
fn open_repository(path: &Path, online: bool) -> Result<Repository, ErrorCode> {
fn open_repository(path: &Path, online: bool) -> Result<BackupRepository, ErrorCode> {
Ok(checked!(
Repository::open(path, online),
BackupRepository::open(path, online),
"load repository",
ErrorCode::LoadRepository
))
}
fn get_backup(repo: &Repository, backup_name: &str) -> Result<Backup, ErrorCode> {
fn get_backup(repo: &BackupRepository, backup_name: &str) -> Result<Backup, ErrorCode> {
if !repo.has_backup(backup_name) {
tr_error!("A backup with that name does not exist");
return Err(ErrorCode::NoSuchBackup);
@ -135,7 +135,7 @@ fn get_backup(repo: &Repository, backup_name: &str) -> Result<Backup, ErrorCode>
))
}
fn get_inode(repo: &mut Repository, backup: &Backup, inode: Option<&String>) -> Result<Inode, ErrorCode> {
fn get_inode(repo: &mut BackupRepository, backup: &Backup, inode: Option<&String>) -> Result<Inode, ErrorCode> {
Ok(if let Some(inode) = inode {
checked!(
repo.get_backup_inode(backup, &inode),
@ -152,7 +152,7 @@ fn get_inode(repo: &mut Repository, backup: &Backup, inode: Option<&String>) ->
}
fn find_reference_backup(
repo: &Repository,
repo: &BackupRepository,
path: &str,
) -> Result<Option<(String, Backup)>, ErrorCode> {
let mut matching = Vec::new();
@ -523,7 +523,7 @@ pub fn run() -> Result<(), ErrorCode> {
return Err(ErrorCode::InvalidArgs);
}
let mut repo = checked!(
Repository::create(
BackupRepository::create(
repo_path,
&Config {
bundle_size,
@ -554,7 +554,7 @@ pub fn run() -> Result<(), ErrorCode> {
);
println!();
}
print_config(&repo.config);
print_config(repo.get_config());
}
Arguments::Backup {
repo_path,
@ -598,7 +598,7 @@ pub fn run() -> Result<(), ErrorCode> {
let reference_backup = reference_backup.map(|(_, backup)| backup);
if !no_default_excludes && !tar {
for line in BufReader::new(checked!(
File::open(&repo.layout.excludes_path()),
File::open(&repo.get_layout().excludes_path()),
"open default excludes file",
ErrorCode::LoadExcludes
)).lines()
@ -745,7 +745,7 @@ pub fn run() -> Result<(), ErrorCode> {
ErrorCode::SaveBackup
);
tr_info!("The backup subpath has been deleted, run vacuum to reclaim space");
} else if repo.layout.backups_path().join(&backup_name).is_dir() {
} else if repo.get_layout().backups_path().join(&backup_name).is_dir() {
let backups = checked!(
repo.get_backups(&backup_name),
"retrieve backups",
@ -877,7 +877,7 @@ pub fn run() -> Result<(), ErrorCode> {
} => {
let mut repo = try!(open_repository(&repo_path, false));
let backup_map = if let Some(backup_name) = backup_name {
if repo.layout.backups_path().join(&backup_name).is_dir() {
if repo.get_layout().backups_path().join(&backup_name).is_dir() {
repo.get_backups(&backup_name)
} else {
let backup = try!(get_backup(&repo, &backup_name));
@ -970,7 +970,7 @@ pub fn run() -> Result<(), ErrorCode> {
} => {
let mut repo = try!(open_repository(&repo_path, true));
let fs = if let Some(backup_name) = backup_name {
if repo.layout.backups_path().join(&backup_name).is_dir() {
if repo.get_layout().backups_path().join(&backup_name).is_dir() {
checked!(
FuseFilesystem::from_repository(&mut repo, Some(&backup_name)),
"create fuse filesystem",
@ -1047,7 +1047,7 @@ pub fn run() -> Result<(), ErrorCode> {
key_files
} => {
checked!(
Repository::import(repo_path, remote_path, key_files),
BackupRepository::import(repo_path, remote_path, key_files),
"import repository",
ErrorCode::ImportRun
);
@ -1128,19 +1128,20 @@ pub fn run() -> Result<(), ErrorCode> {
} => {
let mut repo = try!(open_repository(&repo_path, false));
let mut changed = false;
let mut config = repo.get_config().clone();
if let Some(bundle_size) = bundle_size {
repo.config.bundle_size = bundle_size;
config.bundle_size = bundle_size;
changed = true;
}
if let Some(chunker) = chunker {
tr_warn!(
"Changing the chunker makes it impossible to use existing data for deduplication"
);
repo.config.chunker = chunker;
config.chunker = chunker;
changed = true;
}
if let Some(compression) = compression {
repo.config.compression = compression;
config.compression = compression;
changed = true;
}
if let Some(encryption) = encryption {
@ -1151,14 +1152,15 @@ pub fn run() -> Result<(), ErrorCode> {
tr_warn!(
"Changing the hash makes it impossible to use existing data for deduplication"
);
repo.config.hash = hash;
config.hash = hash;
changed = true;
}
if changed {
repo.set_config(config);
checked!(repo.save_config(), "save config", ErrorCode::SaveConfig);
tr_info!("The configuration has been updated.");
} else {
print_config(&repo.config);
print_config(repo.get_config());
}
}
Arguments::GenKey { file, password } => {

View File

@ -43,14 +43,10 @@ extern crate mmap;
#[macro_use] mod translation;
pub mod util;
mod bundledb;
mod repository;
mod cli;
mod prelude;
mod mount;
mod chunker;
mod chunking;
mod index;
mod backups;
use std::process::exit;

View File

@ -1,14 +1,15 @@
pub use util::*;
pub use bundledb::{BundleReader, BundleMode, BundleWriter, BundleInfo, BundleId, BundleDbError,
pub use repository::bundledb::{BundleReader, BundleMode, BundleWriter, BundleInfo, BundleId, BundleDbError,
BundleDb, BundleWriterError, StoredBundle, BundleStatistics};
pub use chunker::{ChunkerType, Chunker, ChunkerStatus, ChunkerError};
pub use repository::chunking::{ChunkerType, Chunker, ChunkerStatus, ChunkerError};
pub use repository::{Repository, Backup, Config, RepositoryError, RepositoryInfo, Inode, FileType,
IntegrityError, BackupFileError, BackupError, BackupOptions, BundleAnalysis,
FileData, DiffType, InodeError, RepositoryLayout, Location,
RepositoryStatistics};
pub use index::{Index, IndexError, IndexStatistics};
pub use mount::FuseFilesystem;
pub use repository::index::{Index, IndexError, IndexStatistics};
pub use backups::mount::FuseFilesystem;
pub use translation::CowStr;
pub use backups::BackupRepository;
pub use serde::{Serialize, Deserialize};

View File

@ -1,4 +1,4 @@
use prelude::*;
use ::prelude::*;
use super::*;
use std::path::{Path, PathBuf};

View File

@ -10,7 +10,7 @@ pub use self::reader::{BundleReader, BundleReaderError};
pub use self::db::*;
pub use self::uploader::BundleUploader;
use prelude::*;
use ::prelude::*;
use std::fmt;
use std::collections::HashMap;

View File

@ -1,7 +1,58 @@
pub use chunking::*;
use std::io::{self, Write, Read};
use std::str::FromStr;
mod fixed;
mod ae;
mod rabin;
mod fastcdc;
#[cfg(test)] mod test;
#[cfg(feature = "bench")] mod benches;
pub use self::fixed::FixedChunker;
pub use self::ae::AeChunker;
pub use self::rabin::RabinChunker;
pub use self::fastcdc::FastCdcChunker;
// https://moinakg.wordpress.com/2013/06/22/high-performance-content-defined-chunking/
// Paper: "A Comprehensive Study of the Past, Present, and Future of Data Deduplication"
// Paper-URL: http://wxia.hustbackup.cn/IEEE-Survey-final.pdf
// https://borgbackup.readthedocs.io/en/stable/internals.html#chunks
// https://github.com/bup/bup/blob/master/lib/bup/bupsplit.c
quick_error!{
#[derive(Debug)]
pub enum ChunkerError {
Read(err: io::Error) {
cause(err)
description(tr!("Failed to read input"))
display("{}", tr_format!("Chunker error: failed to read input\n\tcaused by: {}", err))
}
Write(err: io::Error) {
cause(err)
description(tr!("Failed to write to output"))
display("{}", tr_format!("Chunker error: failed to write to output\n\tcaused by: {}", err))
}
Custom(reason: &'static str) {
from()
description(tr!("Custom error"))
display("{}", tr_format!("Chunker error: {}", reason))
}
}
}
#[derive(Debug, Eq, PartialEq)]
pub enum ChunkerStatus {
Continue,
Finished
}
pub trait Chunker {
fn chunk(&mut self, r: &mut Read, w: &mut Write) -> Result<ChunkerStatus, ChunkerError>;
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum ChunkerType {

View File

@ -116,7 +116,7 @@ impl Repository {
}
}
}
// Put children in todo
// Put children in to do
if let Some(children) = inode.children {
for (_name, chunks) in children {
todo.push_back(chunks);

View File

@ -149,7 +149,7 @@ impl Repository {
return Err(IntegrityError::MissingInodeData(path, Box::new(err)).into());
}
}
// Put children in todo
// Put children in to do
if let Some(ref mut children) = inode.children {
let mut removed = vec![];
for (name, chunks) in children.iter_mut() {

View File

@ -10,6 +10,9 @@ mod vacuum;
mod backup_file;
mod tarfile;
mod layout;
pub mod bundledb;
pub mod index;
pub mod chunking;
use prelude::*;
@ -53,9 +56,9 @@ impl Location {
}
}
impl ::index::Value for Location {}
impl index::Value for Location {}
impl ::index::Key for Hash {
impl index::Key for Hash {
fn hash(&self) -> u64 {
self.low
}