mirror of https://github.com/dswd/zvault
Some changes
This commit is contained in:
parent
2f6c3b239e
commit
1102600893
|
@ -3,6 +3,12 @@
|
|||
This project follows [semantic versioning](http://semver.org).
|
||||
|
||||
|
||||
### UNRELEASED
|
||||
* [added] Translation infrastructure (**requires nightly rust**)
|
||||
* [fixed] Also including the first min_size bytes in hash
|
||||
* [modified] Updated dependencies
|
||||
|
||||
|
||||
### v0.4.0 (2017-07-21)
|
||||
* [added] Added `copy` subcommand
|
||||
* [added] Added support for xattrs in fuse mount
|
||||
|
|
|
@ -99,8 +99,8 @@ fn load_bundles(
|
|||
}
|
||||
};
|
||||
let bundle = StoredBundle {
|
||||
info: info,
|
||||
path: path
|
||||
info,
|
||||
path
|
||||
};
|
||||
let id = bundle.info.id.clone();
|
||||
if !bundles.contains_key(&id) {
|
||||
|
@ -129,8 +129,8 @@ pub struct BundleDb {
|
|||
impl BundleDb {
|
||||
fn new(layout: RepositoryLayout, crypto: Arc<Mutex<Crypto>>) -> Self {
|
||||
BundleDb {
|
||||
layout: layout,
|
||||
crypto: crypto,
|
||||
layout,
|
||||
crypto,
|
||||
uploader: None,
|
||||
local_bundles: HashMap::new(),
|
||||
remote_bundles: HashMap::new(),
|
||||
|
|
|
@ -75,12 +75,12 @@ impl BundleReader {
|
|||
info: BundleInfo,
|
||||
) -> Self {
|
||||
BundleReader {
|
||||
info: info,
|
||||
info,
|
||||
chunks: None,
|
||||
version: version,
|
||||
path: path,
|
||||
crypto: crypto,
|
||||
content_start: content_start,
|
||||
version,
|
||||
path,
|
||||
crypto,
|
||||
content_start,
|
||||
chunk_positions: None
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ pub struct BundleUploader {
|
|||
impl BundleUploader {
|
||||
pub fn new(capacity: usize) -> Arc<Self> {
|
||||
let self_ = Arc::new(BundleUploader {
|
||||
capacity: capacity,
|
||||
capacity,
|
||||
error_present: AtomicBool::new(false),
|
||||
error: Mutex::new(None),
|
||||
waiting: AtomicUsize::new(0),
|
||||
|
|
|
@ -72,13 +72,13 @@ impl BundleWriter {
|
|||
None => None,
|
||||
};
|
||||
Ok(BundleWriter {
|
||||
mode: mode,
|
||||
hash_method: hash_method,
|
||||
mode,
|
||||
hash_method,
|
||||
data: vec![],
|
||||
compression: compression,
|
||||
compression_stream: compression_stream,
|
||||
encryption: encryption,
|
||||
crypto: crypto,
|
||||
compression,
|
||||
compression_stream,
|
||||
encryption,
|
||||
crypto,
|
||||
raw_size: 0,
|
||||
chunk_count: 0,
|
||||
chunks: ChunkList::new()
|
||||
|
@ -127,7 +127,7 @@ impl BundleWriter {
|
|||
chunk_count: self.chunk_count,
|
||||
id: id.clone(),
|
||||
raw_size: self.raw_size,
|
||||
encoded_size: encoded_size,
|
||||
encoded_size,
|
||||
chunk_list_size: chunk_data.len(),
|
||||
timestamp: Local::now().timestamp()
|
||||
};
|
||||
|
@ -149,8 +149,8 @@ impl BundleWriter {
|
|||
.unwrap()
|
||||
.to_path_buf();
|
||||
Ok(StoredBundle {
|
||||
path: path,
|
||||
info: info
|
||||
path,
|
||||
info
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ impl AeChunker {
|
|||
AeChunker{
|
||||
buffer: [0; 0x1000],
|
||||
buffered: 0,
|
||||
window_size: window_size,
|
||||
window_size,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -66,9 +66,9 @@ impl FastCdcChunker {
|
|||
gear: create_gear(seed),
|
||||
min_size: avg_size/4,
|
||||
max_size: avg_size*8,
|
||||
avg_size: avg_size,
|
||||
mask_long: mask_long,
|
||||
mask_short: mask_short,
|
||||
avg_size,
|
||||
mask_long,
|
||||
mask_short,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -55,12 +55,12 @@ impl RabinChunker {
|
|||
buffer: [0; 0x1000],
|
||||
buffered: 0,
|
||||
table: create_table(alpha, window_size),
|
||||
alpha: alpha,
|
||||
seed: seed,
|
||||
alpha,
|
||||
seed,
|
||||
min_size: avg_size/4,
|
||||
max_size: avg_size*4,
|
||||
window_size: window_size,
|
||||
chunk_mask: chunk_mask,
|
||||
window_size,
|
||||
chunk_mask,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,16 +2,8 @@ use log;
|
|||
pub use log::SetLoggerError;
|
||||
|
||||
use ansi_term::{Color, Style};
|
||||
use std::io::Write;
|
||||
|
||||
|
||||
macro_rules! println_stderr(
|
||||
($($arg:tt)*) => { {
|
||||
let r = writeln!(&mut ::std::io::stderr(), $($arg)*);
|
||||
r.expect("failed printing to stderr");
|
||||
} }
|
||||
);
|
||||
|
||||
struct Logger(log::Level);
|
||||
|
||||
impl log::Log for Logger {
|
||||
|
@ -25,22 +17,22 @@ impl log::Log for Logger {
|
|||
if self.enabled(record.metadata()) {
|
||||
match record.level() {
|
||||
log::Level::Error => {
|
||||
println_stderr!("{}: {}", Color::Red.bold().paint("error"), record.args())
|
||||
eprintln!("{}: {}", Color::Red.bold().paint("error"), record.args())
|
||||
}
|
||||
log::Level::Warn => {
|
||||
println_stderr!(
|
||||
eprintln!(
|
||||
"{}: {}",
|
||||
Color::Yellow.bold().paint("warning"),
|
||||
record.args()
|
||||
)
|
||||
}
|
||||
log::Level::Info => {
|
||||
println_stderr!("{}: {}", Color::Green.bold().paint("info"), record.args())
|
||||
eprintln!("{}: {}", Color::Green.bold().paint("info"), record.args())
|
||||
}
|
||||
log::Level::Debug => {
|
||||
println_stderr!("{}: {}", Style::new().bold().paint("debug"), record.args())
|
||||
eprintln!("{}: {}", Style::new().bold().paint("debug"), record.args())
|
||||
}
|
||||
log::Level::Trace => println_stderr!("{}: {}", "trace", record.args()),
|
||||
log::Level::Trace => eprintln!("{}: {}", "trace", record.args()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -105,7 +105,7 @@ macro_rules! checked {
|
|||
match $expr {
|
||||
Ok(val) => val,
|
||||
Err(err) => {
|
||||
tr_error!("Failed to {}\n\tcaused by: {}", $msg, err);
|
||||
tr_error!("Failed to {}\n\tcaused by: {}", tr!($msg), err);
|
||||
return Err($code)
|
||||
}
|
||||
}
|
||||
|
@ -187,16 +187,16 @@ fn print_backup(backup: &Backup) {
|
|||
);
|
||||
let dedup_ratio = backup.deduplicated_data_size as f32 / backup.changed_data_size as f32;
|
||||
tr_println!(
|
||||
"Deduplicated size: {}, {:.1}% saved",
|
||||
"Deduplicated size: {}, {:.1}%",
|
||||
to_file_size(backup.deduplicated_data_size),
|
||||
(1.0 - dedup_ratio) * 100.0
|
||||
(dedup_ratio - 1.0) * 100.0
|
||||
);
|
||||
let compress_ratio = backup.encoded_data_size as f32 / backup.deduplicated_data_size as f32;
|
||||
tr_println!(
|
||||
"Compressed size: {} in {} bundles, {:.1}% saved",
|
||||
"Compressed size: {} in {} bundles, {:.1}%",
|
||||
to_file_size(backup.encoded_data_size),
|
||||
backup.bundle_count,
|
||||
(1.0 - compress_ratio) * 100.0
|
||||
(compress_ratio - 1.0) * 100.0
|
||||
);
|
||||
tr_println!(
|
||||
"Chunk count: {}, avg size: {}",
|
||||
|
@ -299,7 +299,7 @@ fn print_repoinfo(info: &RepositoryInfo) {
|
|||
tr_println!("Bundles: {}", info.bundle_count);
|
||||
tr_println!("Total size: {}", to_file_size(info.encoded_data_size));
|
||||
tr_println!("Uncompressed size: {}", to_file_size(info.raw_data_size));
|
||||
tr_println!("Compression ratio: {:.1}%", info.compression_ratio * 100.0);
|
||||
tr_println!("Compression ratio: {:.1}%", (info.compression_ratio - 1.0) * 100.0);
|
||||
tr_println!("Chunk count: {}", info.chunk_count);
|
||||
tr_println!(
|
||||
"Average chunk size: {}",
|
||||
|
@ -346,7 +346,7 @@ fn print_bundle(bundle: &StoredBundle) {
|
|||
tr_println!(
|
||||
" - Compression: {}, ratio: {:.1}%",
|
||||
compression,
|
||||
ratio * 100.0
|
||||
(ratio - 1.0) * 100.0
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -436,11 +436,11 @@ pub fn run() -> Result<(), ErrorCode> {
|
|||
Repository::create(
|
||||
repo_path,
|
||||
&Config {
|
||||
bundle_size: bundle_size,
|
||||
chunker: chunker,
|
||||
compression: compression,
|
||||
bundle_size,
|
||||
chunker,
|
||||
compression,
|
||||
encryption: None,
|
||||
hash: hash
|
||||
hash
|
||||
},
|
||||
remote_path
|
||||
),
|
||||
|
@ -559,8 +559,8 @@ pub fn run() -> Result<(), ErrorCode> {
|
|||
))
|
||||
};
|
||||
let options = BackupOptions {
|
||||
same_device: same_device,
|
||||
excludes: excludes
|
||||
same_device,
|
||||
excludes
|
||||
};
|
||||
let result = if tar {
|
||||
repo.import_tarfile(&src_path)
|
||||
|
|
10
src/index.rs
10
src/index.rs
|
@ -14,7 +14,6 @@ pub const MIN_USAGE: f64 = 0.35;
|
|||
pub const INITIAL_SIZE: usize = 1024;
|
||||
|
||||
|
||||
//TODO: translate
|
||||
quick_error!{
|
||||
#[derive(Debug)]
|
||||
pub enum IndexError {
|
||||
|
@ -230,10 +229,10 @@ impl<K: Key, V: Value> Index<K, V> {
|
|||
max_entries: (header.capacity as f64 * MAX_USAGE) as usize,
|
||||
min_entries: (header.capacity as f64 * MIN_USAGE) as usize,
|
||||
entries: header.entries as usize,
|
||||
fd: fd,
|
||||
mmap: mmap,
|
||||
data: data,
|
||||
header: header
|
||||
fd,
|
||||
mmap,
|
||||
data,
|
||||
header
|
||||
};
|
||||
debug_assert!(index.check().is_ok(), tr!("Inconsistent after creation"));
|
||||
Ok(index)
|
||||
|
@ -276,6 +275,7 @@ impl<K: Key, V: Value> Index<K, V> {
|
|||
self.max_entries = (capacity as f64 * MAX_USAGE) as usize;
|
||||
}
|
||||
|
||||
#[allow(redundant_field_names)]
|
||||
fn reinsert(&mut self, start: usize, end: usize) -> Result<(), IndexError> {
|
||||
for pos in start..end {
|
||||
let key;
|
||||
|
|
10
src/mount.rs
10
src/mount.rs
|
@ -113,8 +113,8 @@ impl FuseInode {
|
|||
kind: convert_file_type(self.inode.file_type),
|
||||
perm: self.inode.mode as u16,
|
||||
nlink: 1,
|
||||
uid: uid,
|
||||
gid: gid,
|
||||
uid,
|
||||
gid,
|
||||
rdev: self.inode.device.map_or(
|
||||
0,
|
||||
|(major, minor)| (major << 8) + minor
|
||||
|
@ -158,7 +158,7 @@ impl<'a> FuseFilesystem<'a> {
|
|||
pub fn new(repository: &'a mut Repository) -> Result<Self, RepositoryError> {
|
||||
Ok(FuseFilesystem {
|
||||
next_id: 1,
|
||||
repository: repository,
|
||||
repository,
|
||||
inodes: HashMap::new()
|
||||
})
|
||||
}
|
||||
|
@ -222,7 +222,7 @@ impl<'a> FuseFilesystem<'a> {
|
|||
) -> FuseInodeRef {
|
||||
self.add_inode(
|
||||
Inode {
|
||||
name: name,
|
||||
name,
|
||||
file_type: FileType::Directory,
|
||||
..Default::default()
|
||||
},
|
||||
|
@ -240,7 +240,7 @@ impl<'a> FuseFilesystem<'a> {
|
|||
group_names: HashMap<u32, String>,
|
||||
) -> FuseInodeRef {
|
||||
let inode = FuseInode {
|
||||
inode: inode,
|
||||
inode,
|
||||
num: self.next_id,
|
||||
parent: parent.clone(),
|
||||
chunks: None,
|
||||
|
|
|
@ -164,7 +164,7 @@ impl Backup {
|
|||
try!(file.write_all(&[HEADER_VERSION]).map_err(|err| {
|
||||
BackupFileError::Write(err, path.to_path_buf())
|
||||
}));
|
||||
let header = BackupHeader { encryption: encryption };
|
||||
let header = BackupHeader { encryption };
|
||||
try!(msgpack::encode_to_stream(&header, &mut file).context(path));
|
||||
try!(file.write_all(&data).map_err(|err| {
|
||||
BackupFileError::Write(err, path.to_path_buf())
|
||||
|
|
|
@ -16,7 +16,7 @@ pub struct ChunkReader<'a> {
|
|||
impl<'a> ChunkReader<'a> {
|
||||
pub fn new(repo: &'a mut Repository, chunks: ChunkList) -> Self {
|
||||
ChunkReader {
|
||||
repo: repo,
|
||||
repo,
|
||||
chunks: chunks.into_inner().into(),
|
||||
data: vec![],
|
||||
pos: 0
|
||||
|
|
|
@ -193,8 +193,8 @@ impl Config {
|
|||
None
|
||||
};
|
||||
Ok(Config {
|
||||
compression: compression,
|
||||
encryption: encryption,
|
||||
compression,
|
||||
encryption,
|
||||
bundle_size: yaml.bundle_size,
|
||||
chunker: try!(ChunkerType::from_yaml(&yaml.chunker)),
|
||||
hash: try!(HashMethod::from_yaml(&yaml.hash))
|
||||
|
|
|
@ -137,9 +137,9 @@ impl Repository {
|
|||
let chunk_count = bundles.iter().map(|b| b.chunk_count).sum();
|
||||
RepositoryInfo {
|
||||
bundle_count: bundles.len(),
|
||||
chunk_count: chunk_count,
|
||||
encoded_data_size: encoded_data_size,
|
||||
raw_data_size: raw_data_size,
|
||||
chunk_count,
|
||||
encoded_data_size,
|
||||
raw_data_size,
|
||||
compression_ratio: encoded_data_size as f32 / raw_data_size as f32,
|
||||
avg_chunk_size: raw_data_size as f32 / chunk_count as f32,
|
||||
index_size: self.index.size(),
|
||||
|
|
|
@ -47,8 +47,8 @@ pub struct Location {
|
|||
impl Location {
|
||||
pub fn new(bundle: u32, chunk: u32) -> Self {
|
||||
Location {
|
||||
bundle: bundle,
|
||||
chunk: chunk
|
||||
bundle,
|
||||
chunk
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -158,21 +158,21 @@ impl Repository {
|
|||
};
|
||||
let dirty = layout.dirtyfile_path().exists();
|
||||
let mut repo = Repository {
|
||||
layout: layout,
|
||||
layout,
|
||||
dirty: true,
|
||||
chunker: config.chunker.create(),
|
||||
config: config,
|
||||
index: index,
|
||||
crypto: crypto,
|
||||
bundle_map: bundle_map,
|
||||
config,
|
||||
index,
|
||||
crypto,
|
||||
bundle_map,
|
||||
next_data_bundle: 0,
|
||||
next_meta_bundle: 0,
|
||||
bundles: bundles,
|
||||
bundles,
|
||||
data_bundle: None,
|
||||
meta_bundle: None,
|
||||
lock: lock,
|
||||
remote_locks: remote_locks,
|
||||
local_locks: local_locks
|
||||
lock,
|
||||
remote_locks,
|
||||
local_locks
|
||||
};
|
||||
if !rebuild_bundle_map {
|
||||
let mut save_bundle_map = false;
|
||||
|
|
|
@ -93,7 +93,7 @@ fn inode_from_entry<R: Read>(entry: &mut tar::Entry<R>) -> Result<Inode, Reposit
|
|||
_ => return Err(InodeError::UnsupportedFiletype(path.to_path_buf()).into()),
|
||||
};
|
||||
Inode {
|
||||
file_type: file_type,
|
||||
file_type,
|
||||
name: path.file_name()
|
||||
.map(|s| s.to_string_lossy().to_string())
|
||||
.unwrap_or_else(|| "/".to_string()),
|
||||
|
|
|
@ -58,11 +58,11 @@ impl<'a> MoFile<'a> {
|
|||
return Err(());
|
||||
}
|
||||
Ok(MoFile{
|
||||
data: data,
|
||||
count: count,
|
||||
orig_pos: orig_pos,
|
||||
trans_pos: trans_pos,
|
||||
reorder: reorder,
|
||||
data,
|
||||
count,
|
||||
orig_pos,
|
||||
trans_pos,
|
||||
reorder,
|
||||
i: 0
|
||||
})
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ impl Bitmap {
|
|||
let len = (len + 7) / 8;
|
||||
let mut bytes = Vec::with_capacity(len);
|
||||
bytes.resize(len, 0);
|
||||
Self { bytes: bytes }
|
||||
Self { bytes }
|
||||
}
|
||||
|
||||
/// Returns the number of bits in the bitmap
|
||||
|
@ -67,7 +67,7 @@ impl Bitmap {
|
|||
|
||||
#[inline]
|
||||
pub fn from_bytes(bytes: Vec<u8>) -> Self {
|
||||
Self { bytes: bytes }
|
||||
Self { bytes }
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -56,9 +56,9 @@ impl<T> ProgressIter<T> {
|
|||
bar.message(&msg);
|
||||
bar.set_max_refresh_rate(Some(Duration::from_millis(100)));
|
||||
ProgressIter {
|
||||
inner: inner,
|
||||
bar: bar,
|
||||
msg: msg
|
||||
inner,
|
||||
bar,
|
||||
msg
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -93,8 +93,8 @@ impl Compression {
|
|||
_ => return Err(CompressionError::UnsupportedCodec(name.to_string())),
|
||||
};
|
||||
Ok(Compression {
|
||||
method: method,
|
||||
level: level
|
||||
method,
|
||||
level
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -234,7 +234,7 @@ impl CompressionStream {
|
|||
#[inline]
|
||||
fn new(stream: *mut SquashStream) -> Self {
|
||||
CompressionStream {
|
||||
stream: stream,
|
||||
stream,
|
||||
buffer: [0; 16 * 1024]
|
||||
}
|
||||
}
|
||||
|
|
|
@ -152,7 +152,7 @@ impl Crypto {
|
|||
}
|
||||
Ok(Crypto {
|
||||
path: Some(path),
|
||||
keys: keys
|
||||
keys
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -45,8 +45,8 @@ impl Hash {
|
|||
let high = try!(src.read_u64::<LittleEndian>());
|
||||
let low = try!(src.read_u64::<LittleEndian>());
|
||||
Ok(Hash {
|
||||
high: high,
|
||||
low: low
|
||||
high,
|
||||
low
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -55,8 +55,8 @@ impl Hash {
|
|||
let high = try!(u64::from_str_radix(&val[..16], 16).map_err(|_| ()));
|
||||
let low = try!(u64::from_str_radix(&val[16..], 16).map_err(|_| ()));
|
||||
Ok(Self {
|
||||
high: high,
|
||||
low: low
|
||||
high,
|
||||
low
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -146,7 +146,7 @@ impl LockFolder {
|
|||
hostname: get_hostname().unwrap(),
|
||||
processid: unsafe { libc::getpid() } as usize,
|
||||
date: Utc::now().timestamp(),
|
||||
exclusive: exclusive
|
||||
exclusive
|
||||
};
|
||||
let path = self.path.join(format!(
|
||||
"{}-{}.lock",
|
||||
|
@ -156,7 +156,7 @@ impl LockFolder {
|
|||
try!(lockfile.save(&path));
|
||||
let handle = LockHandle {
|
||||
lock: lockfile,
|
||||
path: path
|
||||
path
|
||||
};
|
||||
if self.get_lock_level().is_err() {
|
||||
try!(handle.release());
|
||||
|
|
|
@ -15,8 +15,8 @@ impl<K: Eq + Hash, V> LruCache<K, V> {
|
|||
pub fn new(min_size: usize, max_size: usize) -> Self {
|
||||
LruCache {
|
||||
items: HashMap::default(),
|
||||
min_size: min_size,
|
||||
max_size: max_size,
|
||||
min_size,
|
||||
max_size,
|
||||
next: 0
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue