Some changes

This commit is contained in:
Dennis Schwerdel 2018-03-03 17:25:05 +01:00
parent 2f6c3b239e
commit 1102600893
26 changed files with 102 additions and 104 deletions

View File

@ -3,6 +3,12 @@
This project follows [semantic versioning](http://semver.org). This project follows [semantic versioning](http://semver.org).
### UNRELEASED
* [added] Translation infrastructure (**requires nightly rust**)
* [fixed] Also including the first min_size bytes in hash
* [modified] Updated dependencies
### v0.4.0 (2017-07-21) ### v0.4.0 (2017-07-21)
* [added] Added `copy` subcommand * [added] Added `copy` subcommand
* [added] Added support for xattrs in fuse mount * [added] Added support for xattrs in fuse mount

View File

@ -99,8 +99,8 @@ fn load_bundles(
} }
}; };
let bundle = StoredBundle { let bundle = StoredBundle {
info: info, info,
path: path path
}; };
let id = bundle.info.id.clone(); let id = bundle.info.id.clone();
if !bundles.contains_key(&id) { if !bundles.contains_key(&id) {
@ -129,8 +129,8 @@ pub struct BundleDb {
impl BundleDb { impl BundleDb {
fn new(layout: RepositoryLayout, crypto: Arc<Mutex<Crypto>>) -> Self { fn new(layout: RepositoryLayout, crypto: Arc<Mutex<Crypto>>) -> Self {
BundleDb { BundleDb {
layout: layout, layout,
crypto: crypto, crypto,
uploader: None, uploader: None,
local_bundles: HashMap::new(), local_bundles: HashMap::new(),
remote_bundles: HashMap::new(), remote_bundles: HashMap::new(),

View File

@ -75,12 +75,12 @@ impl BundleReader {
info: BundleInfo, info: BundleInfo,
) -> Self { ) -> Self {
BundleReader { BundleReader {
info: info, info,
chunks: None, chunks: None,
version: version, version,
path: path, path,
crypto: crypto, crypto,
content_start: content_start, content_start,
chunk_positions: None chunk_positions: None
} }
} }

View File

@ -20,7 +20,7 @@ pub struct BundleUploader {
impl BundleUploader { impl BundleUploader {
pub fn new(capacity: usize) -> Arc<Self> { pub fn new(capacity: usize) -> Arc<Self> {
let self_ = Arc::new(BundleUploader { let self_ = Arc::new(BundleUploader {
capacity: capacity, capacity,
error_present: AtomicBool::new(false), error_present: AtomicBool::new(false),
error: Mutex::new(None), error: Mutex::new(None),
waiting: AtomicUsize::new(0), waiting: AtomicUsize::new(0),

View File

@ -72,13 +72,13 @@ impl BundleWriter {
None => None, None => None,
}; };
Ok(BundleWriter { Ok(BundleWriter {
mode: mode, mode,
hash_method: hash_method, hash_method,
data: vec![], data: vec![],
compression: compression, compression,
compression_stream: compression_stream, compression_stream,
encryption: encryption, encryption,
crypto: crypto, crypto,
raw_size: 0, raw_size: 0,
chunk_count: 0, chunk_count: 0,
chunks: ChunkList::new() chunks: ChunkList::new()
@ -127,7 +127,7 @@ impl BundleWriter {
chunk_count: self.chunk_count, chunk_count: self.chunk_count,
id: id.clone(), id: id.clone(),
raw_size: self.raw_size, raw_size: self.raw_size,
encoded_size: encoded_size, encoded_size,
chunk_list_size: chunk_data.len(), chunk_list_size: chunk_data.len(),
timestamp: Local::now().timestamp() timestamp: Local::now().timestamp()
}; };
@ -149,8 +149,8 @@ impl BundleWriter {
.unwrap() .unwrap()
.to_path_buf(); .to_path_buf();
Ok(StoredBundle { Ok(StoredBundle {
path: path, path,
info: info info
}) })
} }

View File

@ -20,7 +20,7 @@ impl AeChunker {
AeChunker{ AeChunker{
buffer: [0; 0x1000], buffer: [0; 0x1000],
buffered: 0, buffered: 0,
window_size: window_size, window_size,
} }
} }
} }

View File

@ -66,9 +66,9 @@ impl FastCdcChunker {
gear: create_gear(seed), gear: create_gear(seed),
min_size: avg_size/4, min_size: avg_size/4,
max_size: avg_size*8, max_size: avg_size*8,
avg_size: avg_size, avg_size,
mask_long: mask_long, mask_long,
mask_short: mask_short, mask_short,
} }
} }
} }

View File

@ -55,12 +55,12 @@ impl RabinChunker {
buffer: [0; 0x1000], buffer: [0; 0x1000],
buffered: 0, buffered: 0,
table: create_table(alpha, window_size), table: create_table(alpha, window_size),
alpha: alpha, alpha,
seed: seed, seed,
min_size: avg_size/4, min_size: avg_size/4,
max_size: avg_size*4, max_size: avg_size*4,
window_size: window_size, window_size,
chunk_mask: chunk_mask, chunk_mask,
} }
} }
} }

View File

@ -2,16 +2,8 @@ use log;
pub use log::SetLoggerError; pub use log::SetLoggerError;
use ansi_term::{Color, Style}; use ansi_term::{Color, Style};
use std::io::Write;
macro_rules! println_stderr(
($($arg:tt)*) => { {
let r = writeln!(&mut ::std::io::stderr(), $($arg)*);
r.expect("failed printing to stderr");
} }
);
struct Logger(log::Level); struct Logger(log::Level);
impl log::Log for Logger { impl log::Log for Logger {
@ -25,22 +17,22 @@ impl log::Log for Logger {
if self.enabled(record.metadata()) { if self.enabled(record.metadata()) {
match record.level() { match record.level() {
log::Level::Error => { log::Level::Error => {
println_stderr!("{}: {}", Color::Red.bold().paint("error"), record.args()) eprintln!("{}: {}", Color::Red.bold().paint("error"), record.args())
} }
log::Level::Warn => { log::Level::Warn => {
println_stderr!( eprintln!(
"{}: {}", "{}: {}",
Color::Yellow.bold().paint("warning"), Color::Yellow.bold().paint("warning"),
record.args() record.args()
) )
} }
log::Level::Info => { log::Level::Info => {
println_stderr!("{}: {}", Color::Green.bold().paint("info"), record.args()) eprintln!("{}: {}", Color::Green.bold().paint("info"), record.args())
} }
log::Level::Debug => { log::Level::Debug => {
println_stderr!("{}: {}", Style::new().bold().paint("debug"), record.args()) eprintln!("{}: {}", Style::new().bold().paint("debug"), record.args())
} }
log::Level::Trace => println_stderr!("{}: {}", "trace", record.args()), log::Level::Trace => eprintln!("{}: {}", "trace", record.args()),
} }
} }
} }

View File

@ -105,7 +105,7 @@ macro_rules! checked {
match $expr { match $expr {
Ok(val) => val, Ok(val) => val,
Err(err) => { Err(err) => {
tr_error!("Failed to {}\n\tcaused by: {}", $msg, err); tr_error!("Failed to {}\n\tcaused by: {}", tr!($msg), err);
return Err($code) return Err($code)
} }
} }
@ -187,16 +187,16 @@ fn print_backup(backup: &Backup) {
); );
let dedup_ratio = backup.deduplicated_data_size as f32 / backup.changed_data_size as f32; let dedup_ratio = backup.deduplicated_data_size as f32 / backup.changed_data_size as f32;
tr_println!( tr_println!(
"Deduplicated size: {}, {:.1}% saved", "Deduplicated size: {}, {:.1}%",
to_file_size(backup.deduplicated_data_size), to_file_size(backup.deduplicated_data_size),
(1.0 - dedup_ratio) * 100.0 (dedup_ratio - 1.0) * 100.0
); );
let compress_ratio = backup.encoded_data_size as f32 / backup.deduplicated_data_size as f32; let compress_ratio = backup.encoded_data_size as f32 / backup.deduplicated_data_size as f32;
tr_println!( tr_println!(
"Compressed size: {} in {} bundles, {:.1}% saved", "Compressed size: {} in {} bundles, {:.1}%",
to_file_size(backup.encoded_data_size), to_file_size(backup.encoded_data_size),
backup.bundle_count, backup.bundle_count,
(1.0 - compress_ratio) * 100.0 (compress_ratio - 1.0) * 100.0
); );
tr_println!( tr_println!(
"Chunk count: {}, avg size: {}", "Chunk count: {}, avg size: {}",
@ -299,7 +299,7 @@ fn print_repoinfo(info: &RepositoryInfo) {
tr_println!("Bundles: {}", info.bundle_count); tr_println!("Bundles: {}", info.bundle_count);
tr_println!("Total size: {}", to_file_size(info.encoded_data_size)); tr_println!("Total size: {}", to_file_size(info.encoded_data_size));
tr_println!("Uncompressed size: {}", to_file_size(info.raw_data_size)); tr_println!("Uncompressed size: {}", to_file_size(info.raw_data_size));
tr_println!("Compression ratio: {:.1}%", info.compression_ratio * 100.0); tr_println!("Compression ratio: {:.1}%", (info.compression_ratio - 1.0) * 100.0);
tr_println!("Chunk count: {}", info.chunk_count); tr_println!("Chunk count: {}", info.chunk_count);
tr_println!( tr_println!(
"Average chunk size: {}", "Average chunk size: {}",
@ -346,7 +346,7 @@ fn print_bundle(bundle: &StoredBundle) {
tr_println!( tr_println!(
" - Compression: {}, ratio: {:.1}%", " - Compression: {}, ratio: {:.1}%",
compression, compression,
ratio * 100.0 (ratio - 1.0) * 100.0
); );
} }
@ -436,11 +436,11 @@ pub fn run() -> Result<(), ErrorCode> {
Repository::create( Repository::create(
repo_path, repo_path,
&Config { &Config {
bundle_size: bundle_size, bundle_size,
chunker: chunker, chunker,
compression: compression, compression,
encryption: None, encryption: None,
hash: hash hash
}, },
remote_path remote_path
), ),
@ -559,8 +559,8 @@ pub fn run() -> Result<(), ErrorCode> {
)) ))
}; };
let options = BackupOptions { let options = BackupOptions {
same_device: same_device, same_device,
excludes: excludes excludes
}; };
let result = if tar { let result = if tar {
repo.import_tarfile(&src_path) repo.import_tarfile(&src_path)

View File

@ -14,7 +14,6 @@ pub const MIN_USAGE: f64 = 0.35;
pub const INITIAL_SIZE: usize = 1024; pub const INITIAL_SIZE: usize = 1024;
//TODO: translate
quick_error!{ quick_error!{
#[derive(Debug)] #[derive(Debug)]
pub enum IndexError { pub enum IndexError {
@ -230,10 +229,10 @@ impl<K: Key, V: Value> Index<K, V> {
max_entries: (header.capacity as f64 * MAX_USAGE) as usize, max_entries: (header.capacity as f64 * MAX_USAGE) as usize,
min_entries: (header.capacity as f64 * MIN_USAGE) as usize, min_entries: (header.capacity as f64 * MIN_USAGE) as usize,
entries: header.entries as usize, entries: header.entries as usize,
fd: fd, fd,
mmap: mmap, mmap,
data: data, data,
header: header header
}; };
debug_assert!(index.check().is_ok(), tr!("Inconsistent after creation")); debug_assert!(index.check().is_ok(), tr!("Inconsistent after creation"));
Ok(index) Ok(index)
@ -276,6 +275,7 @@ impl<K: Key, V: Value> Index<K, V> {
self.max_entries = (capacity as f64 * MAX_USAGE) as usize; self.max_entries = (capacity as f64 * MAX_USAGE) as usize;
} }
#[allow(redundant_field_names)]
fn reinsert(&mut self, start: usize, end: usize) -> Result<(), IndexError> { fn reinsert(&mut self, start: usize, end: usize) -> Result<(), IndexError> {
for pos in start..end { for pos in start..end {
let key; let key;

View File

@ -113,8 +113,8 @@ impl FuseInode {
kind: convert_file_type(self.inode.file_type), kind: convert_file_type(self.inode.file_type),
perm: self.inode.mode as u16, perm: self.inode.mode as u16,
nlink: 1, nlink: 1,
uid: uid, uid,
gid: gid, gid,
rdev: self.inode.device.map_or( rdev: self.inode.device.map_or(
0, 0,
|(major, minor)| (major << 8) + minor |(major, minor)| (major << 8) + minor
@ -158,7 +158,7 @@ impl<'a> FuseFilesystem<'a> {
pub fn new(repository: &'a mut Repository) -> Result<Self, RepositoryError> { pub fn new(repository: &'a mut Repository) -> Result<Self, RepositoryError> {
Ok(FuseFilesystem { Ok(FuseFilesystem {
next_id: 1, next_id: 1,
repository: repository, repository,
inodes: HashMap::new() inodes: HashMap::new()
}) })
} }
@ -222,7 +222,7 @@ impl<'a> FuseFilesystem<'a> {
) -> FuseInodeRef { ) -> FuseInodeRef {
self.add_inode( self.add_inode(
Inode { Inode {
name: name, name,
file_type: FileType::Directory, file_type: FileType::Directory,
..Default::default() ..Default::default()
}, },
@ -240,7 +240,7 @@ impl<'a> FuseFilesystem<'a> {
group_names: HashMap<u32, String>, group_names: HashMap<u32, String>,
) -> FuseInodeRef { ) -> FuseInodeRef {
let inode = FuseInode { let inode = FuseInode {
inode: inode, inode,
num: self.next_id, num: self.next_id,
parent: parent.clone(), parent: parent.clone(),
chunks: None, chunks: None,

View File

@ -164,7 +164,7 @@ impl Backup {
try!(file.write_all(&[HEADER_VERSION]).map_err(|err| { try!(file.write_all(&[HEADER_VERSION]).map_err(|err| {
BackupFileError::Write(err, path.to_path_buf()) BackupFileError::Write(err, path.to_path_buf())
})); }));
let header = BackupHeader { encryption: encryption }; let header = BackupHeader { encryption };
try!(msgpack::encode_to_stream(&header, &mut file).context(path)); try!(msgpack::encode_to_stream(&header, &mut file).context(path));
try!(file.write_all(&data).map_err(|err| { try!(file.write_all(&data).map_err(|err| {
BackupFileError::Write(err, path.to_path_buf()) BackupFileError::Write(err, path.to_path_buf())

View File

@ -16,7 +16,7 @@ pub struct ChunkReader<'a> {
impl<'a> ChunkReader<'a> { impl<'a> ChunkReader<'a> {
pub fn new(repo: &'a mut Repository, chunks: ChunkList) -> Self { pub fn new(repo: &'a mut Repository, chunks: ChunkList) -> Self {
ChunkReader { ChunkReader {
repo: repo, repo,
chunks: chunks.into_inner().into(), chunks: chunks.into_inner().into(),
data: vec![], data: vec![],
pos: 0 pos: 0

View File

@ -193,8 +193,8 @@ impl Config {
None None
}; };
Ok(Config { Ok(Config {
compression: compression, compression,
encryption: encryption, encryption,
bundle_size: yaml.bundle_size, bundle_size: yaml.bundle_size,
chunker: try!(ChunkerType::from_yaml(&yaml.chunker)), chunker: try!(ChunkerType::from_yaml(&yaml.chunker)),
hash: try!(HashMethod::from_yaml(&yaml.hash)) hash: try!(HashMethod::from_yaml(&yaml.hash))

View File

@ -137,9 +137,9 @@ impl Repository {
let chunk_count = bundles.iter().map(|b| b.chunk_count).sum(); let chunk_count = bundles.iter().map(|b| b.chunk_count).sum();
RepositoryInfo { RepositoryInfo {
bundle_count: bundles.len(), bundle_count: bundles.len(),
chunk_count: chunk_count, chunk_count,
encoded_data_size: encoded_data_size, encoded_data_size,
raw_data_size: raw_data_size, raw_data_size,
compression_ratio: encoded_data_size as f32 / raw_data_size as f32, compression_ratio: encoded_data_size as f32 / raw_data_size as f32,
avg_chunk_size: raw_data_size as f32 / chunk_count as f32, avg_chunk_size: raw_data_size as f32 / chunk_count as f32,
index_size: self.index.size(), index_size: self.index.size(),

View File

@ -47,8 +47,8 @@ pub struct Location {
impl Location { impl Location {
pub fn new(bundle: u32, chunk: u32) -> Self { pub fn new(bundle: u32, chunk: u32) -> Self {
Location { Location {
bundle: bundle, bundle,
chunk: chunk chunk
} }
} }
} }
@ -158,21 +158,21 @@ impl Repository {
}; };
let dirty = layout.dirtyfile_path().exists(); let dirty = layout.dirtyfile_path().exists();
let mut repo = Repository { let mut repo = Repository {
layout: layout, layout,
dirty: true, dirty: true,
chunker: config.chunker.create(), chunker: config.chunker.create(),
config: config, config,
index: index, index,
crypto: crypto, crypto,
bundle_map: bundle_map, bundle_map,
next_data_bundle: 0, next_data_bundle: 0,
next_meta_bundle: 0, next_meta_bundle: 0,
bundles: bundles, bundles,
data_bundle: None, data_bundle: None,
meta_bundle: None, meta_bundle: None,
lock: lock, lock,
remote_locks: remote_locks, remote_locks,
local_locks: local_locks local_locks
}; };
if !rebuild_bundle_map { if !rebuild_bundle_map {
let mut save_bundle_map = false; let mut save_bundle_map = false;

View File

@ -93,7 +93,7 @@ fn inode_from_entry<R: Read>(entry: &mut tar::Entry<R>) -> Result<Inode, Reposit
_ => return Err(InodeError::UnsupportedFiletype(path.to_path_buf()).into()), _ => return Err(InodeError::UnsupportedFiletype(path.to_path_buf()).into()),
}; };
Inode { Inode {
file_type: file_type, file_type,
name: path.file_name() name: path.file_name()
.map(|s| s.to_string_lossy().to_string()) .map(|s| s.to_string_lossy().to_string())
.unwrap_or_else(|| "/".to_string()), .unwrap_or_else(|| "/".to_string()),

View File

@ -58,11 +58,11 @@ impl<'a> MoFile<'a> {
return Err(()); return Err(());
} }
Ok(MoFile{ Ok(MoFile{
data: data, data,
count: count, count,
orig_pos: orig_pos, orig_pos,
trans_pos: trans_pos, trans_pos,
reorder: reorder, reorder,
i: 0 i: 0
}) })
} }

View File

@ -11,7 +11,7 @@ impl Bitmap {
let len = (len + 7) / 8; let len = (len + 7) / 8;
let mut bytes = Vec::with_capacity(len); let mut bytes = Vec::with_capacity(len);
bytes.resize(len, 0); bytes.resize(len, 0);
Self { bytes: bytes } Self { bytes }
} }
/// Returns the number of bits in the bitmap /// Returns the number of bits in the bitmap
@ -67,7 +67,7 @@ impl Bitmap {
#[inline] #[inline]
pub fn from_bytes(bytes: Vec<u8>) -> Self { pub fn from_bytes(bytes: Vec<u8>) -> Self {
Self { bytes: bytes } Self { bytes }
} }
} }

View File

@ -56,9 +56,9 @@ impl<T> ProgressIter<T> {
bar.message(&msg); bar.message(&msg);
bar.set_max_refresh_rate(Some(Duration::from_millis(100))); bar.set_max_refresh_rate(Some(Duration::from_millis(100)));
ProgressIter { ProgressIter {
inner: inner, inner,
bar: bar, bar,
msg: msg msg
} }
} }
} }

View File

@ -93,8 +93,8 @@ impl Compression {
_ => return Err(CompressionError::UnsupportedCodec(name.to_string())), _ => return Err(CompressionError::UnsupportedCodec(name.to_string())),
}; };
Ok(Compression { Ok(Compression {
method: method, method,
level: level level
}) })
} }
@ -234,7 +234,7 @@ impl CompressionStream {
#[inline] #[inline]
fn new(stream: *mut SquashStream) -> Self { fn new(stream: *mut SquashStream) -> Self {
CompressionStream { CompressionStream {
stream: stream, stream,
buffer: [0; 16 * 1024] buffer: [0; 16 * 1024]
} }
} }

View File

@ -152,7 +152,7 @@ impl Crypto {
} }
Ok(Crypto { Ok(Crypto {
path: Some(path), path: Some(path),
keys: keys keys
}) })
} }

View File

@ -45,8 +45,8 @@ impl Hash {
let high = try!(src.read_u64::<LittleEndian>()); let high = try!(src.read_u64::<LittleEndian>());
let low = try!(src.read_u64::<LittleEndian>()); let low = try!(src.read_u64::<LittleEndian>());
Ok(Hash { Ok(Hash {
high: high, high,
low: low low
}) })
} }
@ -55,8 +55,8 @@ impl Hash {
let high = try!(u64::from_str_radix(&val[..16], 16).map_err(|_| ())); let high = try!(u64::from_str_radix(&val[..16], 16).map_err(|_| ()));
let low = try!(u64::from_str_radix(&val[16..], 16).map_err(|_| ())); let low = try!(u64::from_str_radix(&val[16..], 16).map_err(|_| ()));
Ok(Self { Ok(Self {
high: high, high,
low: low low
}) })
} }
} }

View File

@ -146,7 +146,7 @@ impl LockFolder {
hostname: get_hostname().unwrap(), hostname: get_hostname().unwrap(),
processid: unsafe { libc::getpid() } as usize, processid: unsafe { libc::getpid() } as usize,
date: Utc::now().timestamp(), date: Utc::now().timestamp(),
exclusive: exclusive exclusive
}; };
let path = self.path.join(format!( let path = self.path.join(format!(
"{}-{}.lock", "{}-{}.lock",
@ -156,7 +156,7 @@ impl LockFolder {
try!(lockfile.save(&path)); try!(lockfile.save(&path));
let handle = LockHandle { let handle = LockHandle {
lock: lockfile, lock: lockfile,
path: path path
}; };
if self.get_lock_level().is_err() { if self.get_lock_level().is_err() {
try!(handle.release()); try!(handle.release());

View File

@ -15,8 +15,8 @@ impl<K: Eq + Hash, V> LruCache<K, V> {
pub fn new(min_size: usize, max_size: usize) -> Self { pub fn new(min_size: usize, max_size: usize) -> Self {
LruCache { LruCache {
items: HashMap::default(), items: HashMap::default(),
min_size: min_size, min_size,
max_size: max_size, max_size,
next: 0 next: 0
} }
} }