mirror of https://github.com/dswd/zvault
Reformatted using rustfmt
This commit is contained in:
parent
15ab556c18
commit
d062aaa6d4
|
@ -8,6 +8,7 @@ This project follows [semantic versioning](http://semver.org).
|
|||
* [added] Added support for xattrs in fuse mount
|
||||
* [added] Added support for block/char devices
|
||||
* [added] Added support for fifo files
|
||||
* [modified] Reformatted sources using rustfmt
|
||||
* [modified] Also documenting common flags in subcommands
|
||||
* [modified] Using repository aliases (**conversion needed**)
|
||||
* [modified] Remote path must be absolute
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
trailing_semicolon = false
|
||||
trailing_comma = "Never"
|
|
@ -1,4 +1,4 @@
|
|||
use ::prelude::*;
|
||||
use prelude::*;
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::fs::{self, File};
|
||||
|
@ -62,7 +62,11 @@ impl StoredBundle {
|
|||
self.info.id.clone()
|
||||
}
|
||||
|
||||
pub fn copy_to<P: AsRef<Path>>(&self, base_path: &Path, path: P) -> Result<Self, BundleDbError> {
|
||||
pub fn copy_to<P: AsRef<Path>>(
|
||||
&self,
|
||||
base_path: &Path,
|
||||
path: P,
|
||||
) -> Result<Self, BundleDbError> {
|
||||
let src_path = base_path.join(&self.path);
|
||||
let dst_path = path.as_ref();
|
||||
try!(fs::copy(&src_path, dst_path).context(dst_path));
|
||||
|
@ -71,7 +75,11 @@ impl StoredBundle {
|
|||
Ok(bundle)
|
||||
}
|
||||
|
||||
pub fn move_to<P: AsRef<Path>>(&mut self, base_path: &Path, path: P) -> Result<(), BundleDbError> {
|
||||
pub fn move_to<P: AsRef<Path>>(
|
||||
&mut self,
|
||||
base_path: &Path,
|
||||
path: P,
|
||||
) -> Result<(), BundleDbError> {
|
||||
let src_path = base_path.join(&self.path);
|
||||
let dst_path = path.as_ref();
|
||||
if fs::rename(&src_path, dst_path).is_err() {
|
||||
|
@ -88,11 +96,11 @@ impl StoredBundle {
|
|||
let mut header = [0u8; 8];
|
||||
try!(file.read_exact(&mut header).map_err(BundleCacheError::Read));
|
||||
if header[..CACHE_FILE_STRING.len()] != CACHE_FILE_STRING {
|
||||
return Err(BundleCacheError::WrongHeader)
|
||||
return Err(BundleCacheError::WrongHeader);
|
||||
}
|
||||
let version = header[CACHE_FILE_STRING.len()];
|
||||
if version != CACHE_FILE_VERSION {
|
||||
return Err(BundleCacheError::UnsupportedVersion(version))
|
||||
return Err(BundleCacheError::UnsupportedVersion(version));
|
||||
}
|
||||
Ok(try!(msgpack::decode_from_stream(&mut file)))
|
||||
}
|
||||
|
@ -100,8 +108,12 @@ impl StoredBundle {
|
|||
pub fn save_list_to<P: AsRef<Path>>(list: &[Self], path: P) -> Result<(), BundleCacheError> {
|
||||
let path = path.as_ref();
|
||||
let mut file = BufWriter::new(try!(File::create(path).map_err(BundleCacheError::Write)));
|
||||
try!(file.write_all(&CACHE_FILE_STRING).map_err(BundleCacheError::Write));
|
||||
try!(file.write_all(&[CACHE_FILE_VERSION]).map_err(BundleCacheError::Write));
|
||||
try!(file.write_all(&CACHE_FILE_STRING).map_err(
|
||||
BundleCacheError::Write
|
||||
));
|
||||
try!(file.write_all(&[CACHE_FILE_VERSION]).map_err(
|
||||
BundleCacheError::Write
|
||||
));
|
||||
try!(msgpack::encode_to_stream(&list, &mut file));
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use ::prelude::*;
|
||||
use prelude::*;
|
||||
use super::*;
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
|
@ -57,7 +57,12 @@ quick_error!{
|
|||
}
|
||||
|
||||
|
||||
fn load_bundles(path: &Path, base: &Path, bundles: &mut HashMap<BundleId, StoredBundle>, crypto: Arc<Mutex<Crypto>>) -> Result<(Vec<StoredBundle>, Vec<StoredBundle>), BundleDbError> {
|
||||
fn load_bundles(
|
||||
path: &Path,
|
||||
base: &Path,
|
||||
bundles: &mut HashMap<BundleId, StoredBundle>,
|
||||
crypto: Arc<Mutex<Crypto>>,
|
||||
) -> Result<(Vec<StoredBundle>, Vec<StoredBundle>), BundleDbError> {
|
||||
let mut paths = vec![path.to_path_buf()];
|
||||
let mut bundle_paths = HashSet::new();
|
||||
while let Some(path) = paths.pop() {
|
||||
|
@ -68,7 +73,7 @@ fn load_bundles(path: &Path, base: &Path, bundles: &mut HashMap<BundleId, Stored
|
|||
paths.push(path);
|
||||
} else {
|
||||
if path.extension() != Some("bundle".as_ref()) {
|
||||
continue
|
||||
continue;
|
||||
}
|
||||
bundle_paths.insert(path.strip_prefix(base).unwrap().to_path_buf());
|
||||
}
|
||||
|
@ -89,10 +94,13 @@ fn load_bundles(path: &Path, base: &Path, bundles: &mut HashMap<BundleId, Stored
|
|||
Err(err) => {
|
||||
warn!("Failed to read bundle {:?}\n\tcaused by: {}", path, err);
|
||||
info!("Ignoring unreadable bundle");
|
||||
continue
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let bundle = StoredBundle { info: info, path: path };
|
||||
let bundle = StoredBundle {
|
||||
info: info,
|
||||
path: path
|
||||
};
|
||||
let id = bundle.info.id.clone();
|
||||
if !bundles.contains_key(&id) {
|
||||
new.push(bundle.clone());
|
||||
|
@ -129,7 +137,9 @@ impl BundleDb {
|
|||
}
|
||||
}
|
||||
|
||||
fn load_bundle_list(&mut self) -> Result<(Vec<StoredBundle>, Vec<StoredBundle>), BundleDbError> {
|
||||
fn load_bundle_list(
|
||||
&mut self,
|
||||
) -> Result<(Vec<StoredBundle>, Vec<StoredBundle>), BundleDbError> {
|
||||
if let Ok(list) = StoredBundle::read_list_from(&self.layout.local_bundle_cache_path()) {
|
||||
for bundle in list {
|
||||
self.local_bundles.insert(bundle.id(), bundle);
|
||||
|
@ -145,15 +155,31 @@ impl BundleDb {
|
|||
warn!("Failed to read remote bundle cache, rebuilding cache");
|
||||
}
|
||||
let base_path = self.layout.base_path();
|
||||
let (new, gone) = try!(load_bundles(&self.layout.local_bundles_path(), base_path, &mut self.local_bundles, self.crypto.clone()));
|
||||
let (new, gone) = try!(load_bundles(
|
||||
&self.layout.local_bundles_path(),
|
||||
base_path,
|
||||
&mut self.local_bundles,
|
||||
self.crypto.clone()
|
||||
));
|
||||
if !new.is_empty() || !gone.is_empty() {
|
||||
let bundles: Vec<_> = self.local_bundles.values().cloned().collect();
|
||||
try!(StoredBundle::save_list_to(&bundles, &self.layout.local_bundle_cache_path()));
|
||||
try!(StoredBundle::save_list_to(
|
||||
&bundles,
|
||||
&self.layout.local_bundle_cache_path()
|
||||
));
|
||||
}
|
||||
let (new, gone) = try!(load_bundles(&self.layout.remote_bundles_path(), base_path, &mut self.remote_bundles, self.crypto.clone()));
|
||||
let (new, gone) = try!(load_bundles(
|
||||
&self.layout.remote_bundles_path(),
|
||||
base_path,
|
||||
&mut self.remote_bundles,
|
||||
self.crypto.clone()
|
||||
));
|
||||
if !new.is_empty() || !gone.is_empty() {
|
||||
let bundles: Vec<_> = self.remote_bundles.values().cloned().collect();
|
||||
try!(StoredBundle::save_list_to(&bundles, &self.layout.remote_bundle_cache_path()));
|
||||
try!(StoredBundle::save_list_to(
|
||||
&bundles,
|
||||
&self.layout.remote_bundle_cache_path()
|
||||
));
|
||||
}
|
||||
Ok((new, gone))
|
||||
}
|
||||
|
@ -164,9 +190,15 @@ impl BundleDb {
|
|||
|
||||
fn save_cache(&self) -> Result<(), BundleDbError> {
|
||||
let bundles: Vec<_> = self.local_bundles.values().cloned().collect();
|
||||
try!(StoredBundle::save_list_to(&bundles, &self.layout.local_bundle_cache_path()));
|
||||
try!(StoredBundle::save_list_to(
|
||||
&bundles,
|
||||
&self.layout.local_bundle_cache_path()
|
||||
));
|
||||
let bundles: Vec<_> = self.remote_bundles.values().cloned().collect();
|
||||
Ok(try!(StoredBundle::save_list_to(&bundles, &self.layout.remote_bundle_cache_path())))
|
||||
Ok(try!(StoredBundle::save_list_to(
|
||||
&bundles,
|
||||
&self.layout.remote_bundle_cache_path()
|
||||
)))
|
||||
}
|
||||
|
||||
fn update_cache(&mut self) -> Result<(), BundleDbError> {
|
||||
|
@ -192,13 +224,18 @@ impl BundleDb {
|
|||
let base_path = self.layout.base_path();
|
||||
for id in remove {
|
||||
if let Some(bundle) = self.local_bundles.remove(&id) {
|
||||
try!(fs::remove_file(base_path.join(&bundle.path)).map_err(|e| BundleDbError::Remove(e, id)))
|
||||
try!(fs::remove_file(base_path.join(&bundle.path)).map_err(|e| {
|
||||
BundleDbError::Remove(e, id)
|
||||
}))
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn open(layout: RepositoryLayout, crypto: Arc<Mutex<Crypto>>) -> Result<(Self, Vec<BundleInfo>, Vec<BundleInfo>), BundleDbError> {
|
||||
pub fn open(
|
||||
layout: RepositoryLayout,
|
||||
crypto: Arc<Mutex<Crypto>>,
|
||||
) -> Result<(Self, Vec<BundleInfo>, Vec<BundleInfo>), BundleDbError> {
|
||||
let mut self_ = Self::new(layout, crypto);
|
||||
let (new, gone) = try!(self_.load_bundle_list());
|
||||
try!(self_.update_cache());
|
||||
|
@ -208,21 +245,51 @@ impl BundleDb {
|
|||
}
|
||||
|
||||
pub fn create(layout: RepositoryLayout) -> Result<(), BundleDbError> {
|
||||
try!(fs::create_dir_all(layout.remote_bundles_path()).context(&layout.remote_bundles_path() as &Path));
|
||||
try!(fs::create_dir_all(layout.local_bundles_path()).context(&layout.local_bundles_path() as &Path));
|
||||
try!(fs::create_dir_all(layout.temp_bundles_path()).context(&layout.temp_bundles_path() as &Path));
|
||||
try!(StoredBundle::save_list_to(&[], layout.local_bundle_cache_path()));
|
||||
try!(StoredBundle::save_list_to(&[], layout.remote_bundle_cache_path()));
|
||||
try!(fs::create_dir_all(layout.remote_bundles_path()).context(
|
||||
&layout.remote_bundles_path() as
|
||||
&Path
|
||||
));
|
||||
try!(fs::create_dir_all(layout.local_bundles_path()).context(
|
||||
&layout.local_bundles_path() as
|
||||
&Path
|
||||
));
|
||||
try!(fs::create_dir_all(layout.temp_bundles_path()).context(
|
||||
&layout.temp_bundles_path() as
|
||||
&Path
|
||||
));
|
||||
try!(StoredBundle::save_list_to(
|
||||
&[],
|
||||
layout.local_bundle_cache_path()
|
||||
));
|
||||
try!(StoredBundle::save_list_to(
|
||||
&[],
|
||||
layout.remote_bundle_cache_path()
|
||||
));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn create_bundle(&self, mode: BundleMode, hash_method: HashMethod, compression: Option<Compression>, encryption: Option<Encryption>) -> Result<BundleWriter, BundleDbError> {
|
||||
Ok(try!(BundleWriter::new(mode, hash_method, compression, encryption, self.crypto.clone())))
|
||||
pub fn create_bundle(
|
||||
&self,
|
||||
mode: BundleMode,
|
||||
hash_method: HashMethod,
|
||||
compression: Option<Compression>,
|
||||
encryption: Option<Encryption>,
|
||||
) -> Result<BundleWriter, BundleDbError> {
|
||||
Ok(try!(BundleWriter::new(
|
||||
mode,
|
||||
hash_method,
|
||||
compression,
|
||||
encryption,
|
||||
self.crypto.clone()
|
||||
)))
|
||||
}
|
||||
|
||||
fn get_stored_bundle(&self, bundle_id: &BundleId) -> Result<&StoredBundle, BundleDbError> {
|
||||
if let Some(stored) = self.local_bundles.get(bundle_id).or_else(|| self.remote_bundles.get(bundle_id)) {
|
||||
if let Some(stored) = self.local_bundles.get(bundle_id).or_else(|| {
|
||||
self.remote_bundles.get(bundle_id)
|
||||
})
|
||||
{
|
||||
Ok(stored)
|
||||
} else {
|
||||
Err(BundleDbError::NoSuchBundle(bundle_id.clone()))
|
||||
|
@ -232,21 +299,26 @@ impl BundleDb {
|
|||
#[inline]
|
||||
fn get_bundle(&self, stored: &StoredBundle) -> Result<BundleReader, BundleDbError> {
|
||||
let base_path = self.layout.base_path();
|
||||
Ok(try!(BundleReader::load(base_path.join(&stored.path), self.crypto.clone())))
|
||||
Ok(try!(BundleReader::load(
|
||||
base_path.join(&stored.path),
|
||||
self.crypto.clone()
|
||||
)))
|
||||
}
|
||||
|
||||
pub fn get_chunk(&mut self, bundle_id: &BundleId, id: usize) -> Result<Vec<u8>, BundleDbError> {
|
||||
if let Some(&mut (ref mut bundle, ref data)) = self.bundle_cache.get_mut(bundle_id) {
|
||||
let (pos, len) = try!(bundle.get_chunk_position(id));
|
||||
let mut chunk = Vec::with_capacity(len);
|
||||
chunk.extend_from_slice(&data[pos..pos+len]);
|
||||
chunk.extend_from_slice(&data[pos..pos + len]);
|
||||
return Ok(chunk);
|
||||
}
|
||||
let mut bundle = try!(self.get_stored_bundle(bundle_id).and_then(|s| self.get_bundle(s)));
|
||||
let mut bundle = try!(self.get_stored_bundle(bundle_id).and_then(
|
||||
|s| self.get_bundle(s)
|
||||
));
|
||||
let (pos, len) = try!(bundle.get_chunk_position(id));
|
||||
let mut chunk = Vec::with_capacity(len);
|
||||
let data = try!(bundle.load_contents());
|
||||
chunk.extend_from_slice(&data[pos..pos+len]);
|
||||
chunk.extend_from_slice(&data[pos..pos + len]);
|
||||
self.bundle_cache.put(bundle_id.clone(), (bundle, data));
|
||||
Ok(chunk)
|
||||
}
|
||||
|
@ -255,7 +327,10 @@ impl BundleDb {
|
|||
let id = bundle.id();
|
||||
let (folder, filename) = self.layout.local_bundle_path(&id, self.local_bundles.len());
|
||||
try!(fs::create_dir_all(&folder).context(&folder as &Path));
|
||||
let bundle = try!(bundle.copy_to(self.layout.base_path(), folder.join(filename)));
|
||||
let bundle = try!(bundle.copy_to(
|
||||
self.layout.base_path(),
|
||||
folder.join(filename)
|
||||
));
|
||||
self.local_bundles.insert(id, bundle);
|
||||
Ok(())
|
||||
}
|
||||
|
@ -268,7 +343,10 @@ impl BundleDb {
|
|||
let (folder, filename) = self.layout.remote_bundle_path(self.remote_bundles.len());
|
||||
let dst_path = folder.join(filename);
|
||||
let src_path = self.layout.base_path().join(bundle.path);
|
||||
bundle.path = dst_path.strip_prefix(self.layout.base_path()).unwrap().to_path_buf();
|
||||
bundle.path = dst_path
|
||||
.strip_prefix(self.layout.base_path())
|
||||
.unwrap()
|
||||
.to_path_buf();
|
||||
if self.uploader.is_none() {
|
||||
self.uploader = Some(BundleUploader::new(5));
|
||||
}
|
||||
|
@ -288,7 +366,9 @@ impl BundleDb {
|
|||
}
|
||||
|
||||
pub fn get_chunk_list(&self, bundle: &BundleId) -> Result<ChunkList, BundleDbError> {
|
||||
let mut bundle = try!(self.get_stored_bundle(bundle).and_then(|stored| self.get_bundle(stored)));
|
||||
let mut bundle = try!(self.get_stored_bundle(bundle).and_then(|stored| {
|
||||
self.get_bundle(stored)
|
||||
}));
|
||||
Ok(try!(bundle.get_chunk_list()).clone())
|
||||
}
|
||||
|
||||
|
@ -305,7 +385,9 @@ impl BundleDb {
|
|||
pub fn delete_local_bundle(&mut self, bundle: &BundleId) -> Result<(), BundleDbError> {
|
||||
if let Some(bundle) = self.local_bundles.remove(bundle) {
|
||||
let path = self.layout.base_path().join(&bundle.path);
|
||||
try!(fs::remove_file(path).map_err(|e| BundleDbError::Remove(e, bundle.id())))
|
||||
try!(fs::remove_file(path).map_err(|e| {
|
||||
BundleDbError::Remove(e, bundle.id())
|
||||
}))
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -322,24 +404,29 @@ impl BundleDb {
|
|||
|
||||
pub fn check(&mut self, full: bool, repair: bool) -> Result<bool, BundleDbError> {
|
||||
let mut to_repair = vec![];
|
||||
for (id, stored) in ProgressIter::new("checking bundles", self.remote_bundles.len(), self.remote_bundles.iter()) {
|
||||
for (id, stored) in ProgressIter::new(
|
||||
"checking bundles",
|
||||
self.remote_bundles.len(),
|
||||
self.remote_bundles.iter()
|
||||
)
|
||||
{
|
||||
let mut bundle = match self.get_bundle(stored) {
|
||||
Ok(bundle) => bundle,
|
||||
Err(err) => {
|
||||
if repair {
|
||||
to_repair.push(id.clone());
|
||||
continue
|
||||
continue;
|
||||
} else {
|
||||
return Err(err)
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
};
|
||||
if let Err(err) = bundle.check(full) {
|
||||
if repair {
|
||||
to_repair.push(id.clone());
|
||||
continue
|
||||
continue;
|
||||
} else {
|
||||
return Err(err.into())
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -371,35 +458,52 @@ impl BundleDb {
|
|||
let mut bundle = match self.get_bundle(&stored) {
|
||||
Ok(bundle) => bundle,
|
||||
Err(err) => {
|
||||
warn!("Problem detected: failed to read bundle header: {}\n\tcaused by: {}", id, err);
|
||||
warn!(
|
||||
"Problem detected: failed to read bundle header: {}\n\tcaused by: {}",
|
||||
id,
|
||||
err
|
||||
);
|
||||
return self.evacuate_broken_bundle(stored);
|
||||
}
|
||||
};
|
||||
let chunks = match bundle.get_chunk_list() {
|
||||
Ok(chunks) => chunks.clone(),
|
||||
Err(err) => {
|
||||
warn!("Problem detected: failed to read bundle chunks: {}\n\tcaused by: {}", id, err);
|
||||
warn!(
|
||||
"Problem detected: failed to read bundle chunks: {}\n\tcaused by: {}",
|
||||
id,
|
||||
err
|
||||
);
|
||||
return self.evacuate_broken_bundle(stored);
|
||||
}
|
||||
};
|
||||
let data = match bundle.load_contents() {
|
||||
Ok(data) => data,
|
||||
Err(err) => {
|
||||
warn!("Problem detected: failed to read bundle data: {}\n\tcaused by: {}", id, err);
|
||||
warn!(
|
||||
"Problem detected: failed to read bundle data: {}\n\tcaused by: {}",
|
||||
id,
|
||||
err
|
||||
);
|
||||
return self.evacuate_broken_bundle(stored);
|
||||
}
|
||||
};
|
||||
warn!("Problem detected: bundle data was truncated: {}", id);
|
||||
info!("Copying readable data into new bundle");
|
||||
let info = stored.info.clone();
|
||||
let mut new_bundle = try!(self.create_bundle(info.mode, info.hash_method, info.compression, info.encryption));
|
||||
let mut new_bundle = try!(self.create_bundle(
|
||||
info.mode,
|
||||
info.hash_method,
|
||||
info.compression,
|
||||
info.encryption
|
||||
));
|
||||
let mut pos = 0;
|
||||
for (hash, mut len) in chunks.into_inner() {
|
||||
if pos >= data.len() {
|
||||
break
|
||||
break;
|
||||
}
|
||||
len = min(len, (data.len() - pos) as u32);
|
||||
try!(new_bundle.add(&data[pos..pos+len as usize], hash));
|
||||
try!(new_bundle.add(&data[pos..pos + len as usize], hash));
|
||||
pos += len as usize;
|
||||
}
|
||||
let bundle = try!(self.add_bundle(new_bundle));
|
||||
|
@ -411,5 +515,4 @@ impl BundleDb {
|
|||
pub fn len(&self) -> usize {
|
||||
self.remote_bundles.len()
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ pub use self::reader::{BundleReader, BundleReaderError};
|
|||
pub use self::db::*;
|
||||
pub use self::uploader::BundleUploader;
|
||||
|
||||
use ::prelude::*;
|
||||
use prelude::*;
|
||||
|
||||
use std::fmt;
|
||||
use serde;
|
||||
|
@ -47,7 +47,10 @@ impl BundleId {
|
|||
|
||||
#[inline]
|
||||
pub fn random() -> Self {
|
||||
BundleId(Hash{high: rand::random(), low: rand::random()})
|
||||
BundleId(Hash {
|
||||
high: rand::random(),
|
||||
low: rand::random()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -68,7 +71,8 @@ impl fmt::Debug for BundleId {
|
|||
|
||||
#[derive(Eq, Debug, PartialEq, Clone, Copy)]
|
||||
pub enum BundleMode {
|
||||
Data, Meta
|
||||
Data,
|
||||
Meta
|
||||
}
|
||||
serde_impl!(BundleMode(u8) {
|
||||
Data => 0,
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use ::prelude::*;
|
||||
use prelude::*;
|
||||
use super::*;
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
|
@ -67,7 +67,13 @@ pub struct BundleReader {
|
|||
}
|
||||
|
||||
impl BundleReader {
|
||||
pub fn new(path: PathBuf, version: u8, content_start: usize, crypto: Arc<Mutex<Crypto>>, info: BundleInfo) -> Self {
|
||||
pub fn new(
|
||||
path: PathBuf,
|
||||
version: u8,
|
||||
content_start: usize,
|
||||
crypto: Arc<Mutex<Crypto>>,
|
||||
info: BundleInfo,
|
||||
) -> Self {
|
||||
BundleReader {
|
||||
info: info,
|
||||
chunks: None,
|
||||
|
@ -84,54 +90,90 @@ impl BundleReader {
|
|||
self.info.id.clone()
|
||||
}
|
||||
|
||||
fn load_header<P: AsRef<Path>>(path: P, crypto: Arc<Mutex<Crypto>>) -> Result<(BundleInfo, u8, usize), BundleReaderError> {
|
||||
fn load_header<P: AsRef<Path>>(
|
||||
path: P,
|
||||
crypto: Arc<Mutex<Crypto>>,
|
||||
) -> Result<(BundleInfo, u8, usize), BundleReaderError> {
|
||||
let path = path.as_ref();
|
||||
let mut file = BufReader::new(try!(File::open(path).context(path)));
|
||||
let mut header = [0u8; 8];
|
||||
try!(file.read_exact(&mut header).context(path));
|
||||
if header[..HEADER_STRING.len()] != HEADER_STRING {
|
||||
return Err(BundleReaderError::WrongHeader(path.to_path_buf()))
|
||||
return Err(BundleReaderError::WrongHeader(path.to_path_buf()));
|
||||
}
|
||||
let version = header[HEADER_STRING.len()];
|
||||
if version != HEADER_VERSION {
|
||||
return Err(BundleReaderError::UnsupportedVersion(path.to_path_buf(), version))
|
||||
return Err(BundleReaderError::UnsupportedVersion(
|
||||
path.to_path_buf(),
|
||||
version
|
||||
));
|
||||
}
|
||||
let header: BundleHeader = try!(msgpack::decode_from_stream(&mut file).context(path));
|
||||
let mut info_data = Vec::with_capacity(header.info_size);
|
||||
info_data.resize(header.info_size, 0);
|
||||
try!(file.read_exact(&mut info_data).context(path));
|
||||
if let Some(ref encryption) = header.encryption {
|
||||
info_data = try!(crypto.lock().unwrap().decrypt(encryption, &info_data).context(path));
|
||||
info_data = try!(
|
||||
crypto
|
||||
.lock()
|
||||
.unwrap()
|
||||
.decrypt(encryption, &info_data)
|
||||
.context(path)
|
||||
);
|
||||
}
|
||||
let mut info: BundleInfo = try!(msgpack::decode(&info_data).context(path));
|
||||
info.encryption = header.encryption;
|
||||
debug!("Load bundle {}", info.id);
|
||||
let content_start = file.seek(SeekFrom::Current(0)).unwrap() as usize + info.chunk_list_size;
|
||||
let content_start = file.seek(SeekFrom::Current(0)).unwrap() as usize +
|
||||
info.chunk_list_size;
|
||||
Ok((info, version, content_start))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn load_info<P: AsRef<Path>>(path: P, crypto: Arc<Mutex<Crypto>>) -> Result<BundleInfo, BundleReaderError> {
|
||||
pub fn load_info<P: AsRef<Path>>(
|
||||
path: P,
|
||||
crypto: Arc<Mutex<Crypto>>,
|
||||
) -> Result<BundleInfo, BundleReaderError> {
|
||||
Self::load_header(path, crypto).map(|b| b.0)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn load(path: PathBuf, crypto: Arc<Mutex<Crypto>>) -> Result<Self, BundleReaderError> {
|
||||
let (header, version, content_start) = try!(Self::load_header(&path, crypto.clone()));
|
||||
Ok(BundleReader::new(path, version, content_start, crypto, header))
|
||||
Ok(BundleReader::new(
|
||||
path,
|
||||
version,
|
||||
content_start,
|
||||
crypto,
|
||||
header
|
||||
))
|
||||
}
|
||||
|
||||
fn load_chunklist(&mut self) -> Result<(), BundleReaderError> {
|
||||
debug!("Load bundle chunklist {} ({:?})", self.info.id, self.info.mode);
|
||||
debug!(
|
||||
"Load bundle chunklist {} ({:?})",
|
||||
self.info.id,
|
||||
self.info.mode
|
||||
);
|
||||
let mut file = BufReader::new(try!(File::open(&self.path).context(&self.path as &Path)));
|
||||
let len = self.info.chunk_list_size;
|
||||
let start = self.content_start - len;
|
||||
try!(file.seek(SeekFrom::Start(start as u64)).context(&self.path as &Path));
|
||||
try!(file.seek(SeekFrom::Start(start as u64)).context(
|
||||
&self.path as &Path
|
||||
));
|
||||
let mut chunk_data = Vec::with_capacity(len);
|
||||
chunk_data.resize(self.info.chunk_list_size, 0);
|
||||
try!(file.read_exact(&mut chunk_data).context(&self.path as &Path));
|
||||
try!(file.read_exact(&mut chunk_data).context(
|
||||
&self.path as &Path
|
||||
));
|
||||
if let Some(ref encryption) = self.info.encryption {
|
||||
chunk_data = try!(self.crypto.lock().unwrap().decrypt(encryption, &chunk_data).context(&self.path as &Path));
|
||||
chunk_data = try!(
|
||||
self.crypto
|
||||
.lock()
|
||||
.unwrap()
|
||||
.decrypt(encryption, &chunk_data)
|
||||
.context(&self.path as &Path)
|
||||
);
|
||||
}
|
||||
let chunks = ChunkList::read_from(&chunk_data);
|
||||
let mut chunk_positions = Vec::with_capacity(chunks.len());
|
||||
|
@ -156,20 +198,31 @@ impl BundleReader {
|
|||
fn load_encoded_contents(&self) -> Result<Vec<u8>, BundleReaderError> {
|
||||
debug!("Load bundle data {} ({:?})", self.info.id, self.info.mode);
|
||||
let mut file = BufReader::new(try!(File::open(&self.path).context(&self.path as &Path)));
|
||||
try!(file.seek(SeekFrom::Start(self.content_start as u64)).context(&self.path as &Path));
|
||||
let mut data = Vec::with_capacity(max(self.info.encoded_size, self.info.raw_size)+1024);
|
||||
try!(
|
||||
file.seek(SeekFrom::Start(self.content_start as u64))
|
||||
.context(&self.path as &Path)
|
||||
);
|
||||
let mut data = Vec::with_capacity(max(self.info.encoded_size, self.info.raw_size) + 1024);
|
||||
try!(file.read_to_end(&mut data).context(&self.path as &Path));
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
fn decode_contents(&self, mut data: Vec<u8>) -> Result<Vec<u8>, BundleReaderError> {
|
||||
if let Some(ref encryption) = self.info.encryption {
|
||||
data = try!(self.crypto.lock().unwrap().decrypt(encryption, &data).context(&self.path as &Path));
|
||||
data = try!(
|
||||
self.crypto
|
||||
.lock()
|
||||
.unwrap()
|
||||
.decrypt(encryption, &data)
|
||||
.context(&self.path as &Path)
|
||||
);
|
||||
}
|
||||
if let Some(ref compression) = self.info.compression {
|
||||
let mut stream = try!(compression.decompress_stream().context(&self.path as &Path));
|
||||
let mut buffer = Vec::with_capacity(self.info.raw_size);
|
||||
try!(stream.process(&data, &mut buffer).context(&self.path as &Path));
|
||||
try!(stream.process(&data, &mut buffer).context(
|
||||
&self.path as &Path
|
||||
));
|
||||
try!(stream.finish(&mut buffer).context(&self.path as &Path));
|
||||
data = buffer;
|
||||
}
|
||||
|
@ -178,12 +231,14 @@ impl BundleReader {
|
|||
|
||||
#[inline]
|
||||
pub fn load_contents(&self) -> Result<Vec<u8>, BundleReaderError> {
|
||||
self.load_encoded_contents().and_then(|data| self.decode_contents(data))
|
||||
self.load_encoded_contents().and_then(|data| {
|
||||
self.decode_contents(data)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_chunk_position(&mut self, id: usize) -> Result<(usize, usize), BundleReaderError> {
|
||||
if id >= self.info.chunk_count {
|
||||
return Err(BundleReaderError::NoSuchChunk(self.id(), id))
|
||||
return Err(BundleReaderError::NoSuchChunk(self.id(), id));
|
||||
}
|
||||
if self.chunks.is_none() || self.chunk_positions.is_none() {
|
||||
try!(self.load_chunklist());
|
||||
|
@ -198,30 +253,46 @@ impl BundleReader {
|
|||
try!(self.load_chunklist());
|
||||
}
|
||||
if self.info.chunk_count != self.chunks.as_ref().unwrap().len() {
|
||||
return Err(BundleReaderError::Integrity(self.id(),
|
||||
"Chunk list size does not match chunk count"))
|
||||
return Err(BundleReaderError::Integrity(
|
||||
self.id(),
|
||||
"Chunk list size does not match chunk count"
|
||||
));
|
||||
}
|
||||
if self.chunks.as_ref().unwrap().iter().map(|c| c.1 as usize).sum::<usize>() != self.info.raw_size {
|
||||
return Err(BundleReaderError::Integrity(self.id(),
|
||||
"Individual chunk sizes do not add up to total size"))
|
||||
if self.chunks
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|c| c.1 as usize)
|
||||
.sum::<usize>() != self.info.raw_size
|
||||
{
|
||||
return Err(BundleReaderError::Integrity(
|
||||
self.id(),
|
||||
"Individual chunk sizes do not add up to total size"
|
||||
));
|
||||
}
|
||||
if !full {
|
||||
let size = try!(fs::metadata(&self.path).context(&self.path as &Path)).len();
|
||||
if size as usize != self.info.encoded_size + self.content_start {
|
||||
return Err(BundleReaderError::Integrity(self.id(),
|
||||
"File size does not match size in header, truncated file"))
|
||||
return Err(BundleReaderError::Integrity(
|
||||
self.id(),
|
||||
"File size does not match size in header, truncated file"
|
||||
));
|
||||
}
|
||||
return Ok(())
|
||||
return Ok(());
|
||||
}
|
||||
let encoded_contents = try!(self.load_encoded_contents());
|
||||
if self.info.encoded_size != encoded_contents.len() {
|
||||
return Err(BundleReaderError::Integrity(self.id(),
|
||||
"Encoded data size does not match size in header, truncated bundle"))
|
||||
return Err(BundleReaderError::Integrity(
|
||||
self.id(),
|
||||
"Encoded data size does not match size in header, truncated bundle"
|
||||
));
|
||||
}
|
||||
let contents = try!(self.decode_contents(encoded_contents));
|
||||
if self.info.raw_size != contents.len() {
|
||||
return Err(BundleReaderError::Integrity(self.id(),
|
||||
"Raw data size does not match size in header, truncated bundle"))
|
||||
return Err(BundleReaderError::Integrity(
|
||||
self.id(),
|
||||
"Raw data size does not match size in header, truncated bundle"
|
||||
));
|
||||
}
|
||||
//TODO: verify checksum
|
||||
Ok(())
|
||||
|
@ -230,8 +301,15 @@ impl BundleReader {
|
|||
|
||||
impl Debug for BundleReader {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
write!(fmt, "Bundle(\n\tid: {}\n\tpath: {:?}\n\tchunks: {}\n\tsize: {}, encoded: {}\n\tcompression: {:?}\n)",
|
||||
self.info.id.to_string(), self.path, self.info.chunk_count, self.info.raw_size,
|
||||
self.info.encoded_size, self.info.compression)
|
||||
write!(
|
||||
fmt,
|
||||
"Bundle(\n\tid: {}\n\tpath: {:?}\n\tchunks: {}\n\tsize: {}, encoded: {}\n\tcompression: {:?}\n)",
|
||||
self.info.id.to_string(),
|
||||
self.path,
|
||||
self.info.chunk_count,
|
||||
self.info.raw_size,
|
||||
self.info.encoded_size,
|
||||
self.info.compression
|
||||
)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use ::prelude::*;
|
||||
use prelude::*;
|
||||
|
||||
use std::sync::atomic::{Ordering, AtomicBool, AtomicUsize};
|
||||
use std::sync::{Mutex, Condvar, Arc};
|
||||
|
@ -28,7 +28,10 @@ impl BundleUploader {
|
|||
wait: (Condvar::new(), Mutex::new(()))
|
||||
});
|
||||
let self2 = self_.clone();
|
||||
thread::Builder::new().name("uploader".to_string()).spawn(move || self2.worker_thread()).unwrap();
|
||||
thread::Builder::new()
|
||||
.name("uploader".to_string())
|
||||
.spawn(move || self2.worker_thread())
|
||||
.unwrap();
|
||||
self_
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use ::prelude::*;
|
||||
use prelude::*;
|
||||
use super::*;
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
|
@ -54,14 +54,22 @@ pub struct BundleWriter {
|
|||
crypto: Arc<Mutex<Crypto>>,
|
||||
raw_size: usize,
|
||||
chunk_count: usize,
|
||||
chunks: ChunkList,
|
||||
chunks: ChunkList
|
||||
}
|
||||
|
||||
impl BundleWriter {
|
||||
pub fn new(mode: BundleMode, hash_method: HashMethod, compression: Option<Compression>, encryption: Option<Encryption>, crypto: Arc<Mutex<Crypto>>) -> Result<Self, BundleWriterError> {
|
||||
pub fn new(
|
||||
mode: BundleMode,
|
||||
hash_method: HashMethod,
|
||||
compression: Option<Compression>,
|
||||
encryption: Option<Encryption>,
|
||||
crypto: Arc<Mutex<Crypto>>,
|
||||
) -> Result<Self, BundleWriterError> {
|
||||
let compression_stream = match compression {
|
||||
Some(ref compression) => Some(try!(compression.compress_stream().map_err(BundleWriterError::CompressionSetup))),
|
||||
None => None
|
||||
Some(ref compression) => Some(try!(compression.compress_stream().map_err(
|
||||
BundleWriterError::CompressionSetup
|
||||
))),
|
||||
None => None,
|
||||
};
|
||||
Ok(BundleWriter {
|
||||
mode: mode,
|
||||
|
@ -79,19 +87,23 @@ impl BundleWriter {
|
|||
|
||||
pub fn add(&mut self, chunk: &[u8], hash: Hash) -> Result<usize, BundleWriterError> {
|
||||
if let Some(ref mut stream) = self.compression_stream {
|
||||
try!(stream.process(chunk, &mut self.data).map_err(BundleWriterError::Compression))
|
||||
try!(stream.process(chunk, &mut self.data).map_err(
|
||||
BundleWriterError::Compression
|
||||
))
|
||||
} else {
|
||||
self.data.extend_from_slice(chunk)
|
||||
}
|
||||
self.raw_size += chunk.len();
|
||||
self.chunk_count += 1;
|
||||
self.chunks.push((hash, chunk.len() as u32));
|
||||
Ok(self.chunk_count-1)
|
||||
Ok(self.chunk_count - 1)
|
||||
}
|
||||
|
||||
pub fn finish(mut self, db: &BundleDb) -> Result<StoredBundle, BundleWriterError> {
|
||||
if let Some(stream) = self.compression_stream {
|
||||
try!(stream.finish(&mut self.data).map_err(BundleWriterError::Compression))
|
||||
try!(stream.finish(&mut self.data).map_err(
|
||||
BundleWriterError::Compression
|
||||
))
|
||||
}
|
||||
if let Some(ref encryption) = self.encryption {
|
||||
self.data = try!(self.crypto.lock().unwrap().encrypt(encryption, &self.data));
|
||||
|
@ -127,12 +139,19 @@ impl BundleWriter {
|
|||
encryption: self.encryption,
|
||||
info_size: info_data.len()
|
||||
};
|
||||
try!(msgpack::encode_to_stream(&header, &mut file).context(&path as &Path));
|
||||
try!(msgpack::encode_to_stream(&header, &mut file).context(
|
||||
&path as &Path
|
||||
));
|
||||
try!(file.write_all(&info_data).context(&path as &Path));
|
||||
try!(file.write_all(&chunk_data).context(&path as &Path));
|
||||
try!(file.write_all(&self.data).context(&path as &Path));
|
||||
path = path.strip_prefix(db.layout.base_path()).unwrap().to_path_buf();
|
||||
Ok(StoredBundle { path: path, info: info })
|
||||
path = path.strip_prefix(db.layout.base_path())
|
||||
.unwrap()
|
||||
.to_path_buf();
|
||||
Ok(StoredBundle {
|
||||
path: path,
|
||||
info: info
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
|
|
@ -25,13 +25,15 @@ impl ChunkerType {
|
|||
"rabin" => Ok(ChunkerType::Rabin((avg_size, seed as u32))),
|
||||
"fastcdc" => Ok(ChunkerType::FastCdc((avg_size, seed))),
|
||||
"fixed" => Ok(ChunkerType::Fixed(avg_size)),
|
||||
_ => Err("Unsupported chunker type")
|
||||
_ => Err("Unsupported chunker type"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_string(name: &str) -> Result<Self, &'static str> {
|
||||
let (name, size) = if let Some(pos) = name.find('/') {
|
||||
let size = try!(usize::from_str(&name[pos+1..]).map_err(|_| "Chunk size must be a number"));
|
||||
let size = try!(usize::from_str(&name[pos + 1..]).map_err(
|
||||
|_| "Chunk size must be a number"
|
||||
));
|
||||
let name = &name[..pos];
|
||||
(name, size)
|
||||
} else {
|
||||
|
@ -62,21 +64,23 @@ impl ChunkerType {
|
|||
|
||||
pub fn avg_size(&self) -> usize {
|
||||
match *self {
|
||||
ChunkerType::Ae(size) | ChunkerType::Fixed(size) => size,
|
||||
ChunkerType::Ae(size) |
|
||||
ChunkerType::Fixed(size) => size,
|
||||
ChunkerType::Rabin((size, _seed)) => size,
|
||||
ChunkerType::FastCdc((size, _seed)) => size
|
||||
ChunkerType::FastCdc((size, _seed)) => size,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_string(&self) -> String {
|
||||
format!("{}/{}", self.name(), self.avg_size()/1024)
|
||||
format!("{}/{}", self.name(), self.avg_size() / 1024)
|
||||
}
|
||||
|
||||
pub fn seed(&self) -> u64 {
|
||||
match *self {
|
||||
ChunkerType::Ae(_size) | ChunkerType::Fixed(_size) => 0,
|
||||
ChunkerType::Ae(_size) |
|
||||
ChunkerType::Fixed(_size) => 0,
|
||||
ChunkerType::Rabin((_size, seed)) => seed as u64,
|
||||
ChunkerType::FastCdc((_size, seed)) => seed
|
||||
ChunkerType::FastCdc((_size, seed)) => seed,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use ::prelude::*;
|
||||
use prelude::*;
|
||||
|
||||
use std::io::{self, Cursor, Read, Write};
|
||||
use std::fs::File;
|
||||
|
@ -41,7 +41,14 @@ fn chunk(data: &[u8], mut chunker: Box<Chunker>, sink: &mut ChunkSink) {
|
|||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn run(path: &str, bundle_size: usize, chunker: ChunkerType, compression: Option<Compression>, encrypt: bool,hash: HashMethod) {
|
||||
pub fn run(
|
||||
path: &str,
|
||||
bundle_size: usize,
|
||||
chunker: ChunkerType,
|
||||
compression: Option<Compression>,
|
||||
encrypt: bool,
|
||||
hash: HashMethod,
|
||||
) {
|
||||
let mut total_write_time = 0.0;
|
||||
let mut total_read_time = 0.0;
|
||||
|
||||
|
@ -50,42 +57,64 @@ pub fn run(path: &str, bundle_size: usize, chunker: ChunkerType, compression: Op
|
|||
let total_size = file.metadata().unwrap().len();
|
||||
let mut size = total_size;
|
||||
let mut data = Vec::with_capacity(size as usize);
|
||||
let read_time = Duration::span(|| {
|
||||
file.read_to_end(&mut data).unwrap();
|
||||
}).num_milliseconds() as f32 / 1_000.0;
|
||||
println!("- {}, {}", to_duration(read_time), to_speed(size, read_time));
|
||||
let read_time = Duration::span(|| { file.read_to_end(&mut data).unwrap(); })
|
||||
.num_milliseconds() as f32 / 1_000.0;
|
||||
println!(
|
||||
"- {}, {}",
|
||||
to_duration(read_time),
|
||||
to_speed(size, read_time)
|
||||
);
|
||||
|
||||
println!();
|
||||
|
||||
println!("Chunking data with {}, avg chunk size {} ...", chunker.name(), to_file_size(chunker.avg_size() as u64));
|
||||
println!(
|
||||
"Chunking data with {}, avg chunk size {} ...",
|
||||
chunker.name(),
|
||||
to_file_size(chunker.avg_size() as u64)
|
||||
);
|
||||
let mut chunk_sink = ChunkSink {
|
||||
chunks: Vec::with_capacity(2*size as usize/chunker.avg_size()),
|
||||
chunks: Vec::with_capacity(2 * size as usize / chunker.avg_size()),
|
||||
written: 0,
|
||||
pos: 0
|
||||
};
|
||||
let chunker = chunker.create();
|
||||
let chunk_time = Duration::span(|| {
|
||||
chunk(&data, chunker, &mut chunk_sink)
|
||||
}).num_milliseconds() as f32 / 1_000.0;
|
||||
let chunk_time = Duration::span(|| chunk(&data, chunker, &mut chunk_sink))
|
||||
.num_milliseconds() as f32 / 1_000.0;
|
||||
total_write_time += chunk_time;
|
||||
println!("- {}, {}", to_duration(chunk_time), to_speed(size, chunk_time));
|
||||
println!(
|
||||
"- {}, {}",
|
||||
to_duration(chunk_time),
|
||||
to_speed(size, chunk_time)
|
||||
);
|
||||
let mut chunks = chunk_sink.chunks;
|
||||
assert_eq!(chunks.iter().map(|c| c.1).sum::<usize>(), size as usize);
|
||||
let chunk_size_avg = size as f32 / chunks.len() as f32;
|
||||
let chunk_size_stddev = (chunks.iter().map(|c| (c.1 as f32 - chunk_size_avg).powi(2)).sum::<f32>() / (chunks.len() as f32 - 1.0)).sqrt();
|
||||
println!("- {} chunks, avg size: {} ±{}", chunks.len(), to_file_size(chunk_size_avg as u64), to_file_size(chunk_size_stddev as u64));
|
||||
let chunk_size_stddev = (chunks
|
||||
.iter()
|
||||
.map(|c| (c.1 as f32 - chunk_size_avg).powi(2))
|
||||
.sum::<f32>() /
|
||||
(chunks.len() as f32 - 1.0))
|
||||
.sqrt();
|
||||
println!(
|
||||
"- {} chunks, avg size: {} ±{}",
|
||||
chunks.len(),
|
||||
to_file_size(chunk_size_avg as u64),
|
||||
to_file_size(chunk_size_stddev as u64)
|
||||
);
|
||||
|
||||
println!();
|
||||
|
||||
println!("Hashing chunks with {} ...", hash.name());
|
||||
let mut hashes = Vec::with_capacity(chunks.len());
|
||||
let hash_time = Duration::span(|| {
|
||||
for &(pos, len) in &chunks {
|
||||
hashes.push(hash.hash(&data[pos..pos+len]))
|
||||
}
|
||||
let hash_time = Duration::span(|| for &(pos, len) in &chunks {
|
||||
hashes.push(hash.hash(&data[pos..pos + len]))
|
||||
}).num_milliseconds() as f32 / 1_000.0;
|
||||
total_write_time += hash_time;
|
||||
println!("- {}, {}", to_duration(hash_time), to_speed(size, hash_time));
|
||||
println!(
|
||||
"- {}, {}",
|
||||
to_duration(hash_time),
|
||||
to_speed(size, hash_time)
|
||||
);
|
||||
let mut seen_hashes = HashSet::with_capacity(hashes.len());
|
||||
let mut dups = Vec::new();
|
||||
for (i, hash) in hashes.into_iter().enumerate() {
|
||||
|
@ -99,7 +128,12 @@ pub fn run(path: &str, bundle_size: usize, chunker: ChunkerType, compression: Op
|
|||
let (_, len) = chunks.remove(*i);
|
||||
dup_size += len;
|
||||
}
|
||||
println!("- {} duplicate chunks, {}, {:.1}% saved", dups.len(), to_file_size(dup_size as u64), dup_size as f32 / size as f32*100.0);
|
||||
println!(
|
||||
"- {} duplicate chunks, {}, {:.1}% saved",
|
||||
dups.len(),
|
||||
to_file_size(dup_size as u64),
|
||||
dup_size as f32 / size as f32 * 100.0
|
||||
);
|
||||
size -= dup_size as u64;
|
||||
|
||||
let mut bundles = Vec::new();
|
||||
|
@ -109,14 +143,14 @@ pub fn run(path: &str, bundle_size: usize, chunker: ChunkerType, compression: Op
|
|||
|
||||
println!("Compressing chunks with {} ...", compression.to_string());
|
||||
let compress_time = Duration::span(|| {
|
||||
let mut bundle = Vec::with_capacity(bundle_size + 2*chunk_size_avg as usize);
|
||||
let mut bundle = Vec::with_capacity(bundle_size + 2 * chunk_size_avg as usize);
|
||||
let mut c = compression.compress_stream().unwrap();
|
||||
for &(pos, len) in &chunks {
|
||||
c.process(&data[pos..pos+len], &mut bundle).unwrap();
|
||||
c.process(&data[pos..pos + len], &mut bundle).unwrap();
|
||||
if bundle.len() >= bundle_size {
|
||||
c.finish(&mut bundle).unwrap();
|
||||
bundles.push(bundle);
|
||||
bundle = Vec::with_capacity(bundle_size + 2*chunk_size_avg as usize);
|
||||
bundle = Vec::with_capacity(bundle_size + 2 * chunk_size_avg as usize);
|
||||
c = compression.compress_stream().unwrap();
|
||||
}
|
||||
}
|
||||
|
@ -124,17 +158,26 @@ pub fn run(path: &str, bundle_size: usize, chunker: ChunkerType, compression: Op
|
|||
bundles.push(bundle);
|
||||
}).num_milliseconds() as f32 / 1_000.0;
|
||||
total_write_time += compress_time;
|
||||
println!("- {}, {}", to_duration(compress_time), to_speed(size, compress_time));
|
||||
println!(
|
||||
"- {}, {}",
|
||||
to_duration(compress_time),
|
||||
to_speed(size, compress_time)
|
||||
);
|
||||
let compressed_size = bundles.iter().map(|b| b.len()).sum::<usize>();
|
||||
println!("- {} bundles, {}, {:.1}% saved", bundles.len(), to_file_size(compressed_size as u64), (size as f32 - compressed_size as f32)/size as f32*100.0);
|
||||
println!(
|
||||
"- {} bundles, {}, {:.1}% saved",
|
||||
bundles.len(),
|
||||
to_file_size(compressed_size as u64),
|
||||
(size as f32 - compressed_size as f32) / size as f32 * 100.0
|
||||
);
|
||||
size = compressed_size as u64;
|
||||
} else {
|
||||
let mut bundle = Vec::with_capacity(bundle_size + 2*chunk_size_avg as usize);
|
||||
let mut bundle = Vec::with_capacity(bundle_size + 2 * chunk_size_avg as usize);
|
||||
for &(pos, len) in &chunks {
|
||||
bundle.extend_from_slice(&data[pos..pos+len]);
|
||||
bundle.extend_from_slice(&data[pos..pos + len]);
|
||||
if bundle.len() >= bundle_size {
|
||||
bundles.push(bundle);
|
||||
bundle = Vec::with_capacity(bundle_size + 2*chunk_size_avg as usize);
|
||||
bundle = Vec::with_capacity(bundle_size + 2 * chunk_size_avg as usize);
|
||||
}
|
||||
}
|
||||
bundles.push(bundle);
|
||||
|
@ -151,24 +194,28 @@ pub fn run(path: &str, bundle_size: usize, chunker: ChunkerType, compression: Op
|
|||
println!("Encrypting bundles...");
|
||||
let mut encrypted_bundles = Vec::with_capacity(bundles.len());
|
||||
|
||||
let encrypt_time = Duration::span(|| {
|
||||
for bundle in bundles {
|
||||
encrypted_bundles.push(crypto.encrypt(&encryption, &bundle).unwrap());
|
||||
}
|
||||
let encrypt_time = Duration::span(|| for bundle in bundles {
|
||||
encrypted_bundles.push(crypto.encrypt(&encryption, &bundle).unwrap());
|
||||
}).num_milliseconds() as f32 / 1_000.0;
|
||||
println!("- {}, {}", to_duration(encrypt_time), to_speed(size, encrypt_time));
|
||||
println!(
|
||||
"- {}, {}",
|
||||
to_duration(encrypt_time),
|
||||
to_speed(size, encrypt_time)
|
||||
);
|
||||
total_write_time += encrypt_time;
|
||||
|
||||
println!();
|
||||
|
||||
println!("Decrypting bundles...");
|
||||
bundles = Vec::with_capacity(encrypted_bundles.len());
|
||||
let decrypt_time = Duration::span(|| {
|
||||
for bundle in encrypted_bundles {
|
||||
bundles.push(crypto.decrypt(&encryption, &bundle).unwrap());
|
||||
}
|
||||
let decrypt_time = Duration::span(|| for bundle in encrypted_bundles {
|
||||
bundles.push(crypto.decrypt(&encryption, &bundle).unwrap());
|
||||
}).num_milliseconds() as f32 / 1_000.0;
|
||||
println!("- {}, {}", to_duration(decrypt_time), to_speed(size, decrypt_time));
|
||||
println!(
|
||||
"- {}, {}",
|
||||
to_duration(decrypt_time),
|
||||
to_speed(size, decrypt_time)
|
||||
);
|
||||
total_read_time += decrypt_time;
|
||||
}
|
||||
|
||||
|
@ -176,21 +223,38 @@ pub fn run(path: &str, bundle_size: usize, chunker: ChunkerType, compression: Op
|
|||
println!();
|
||||
|
||||
println!("Decompressing bundles with {} ...", compression.to_string());
|
||||
let mut dummy = ChunkSink { chunks: vec![], written: 0, pos: 0 };
|
||||
let decompress_time = Duration::span(|| {
|
||||
for bundle in &bundles {
|
||||
let mut c = compression.decompress_stream().unwrap();
|
||||
c.process(bundle, &mut dummy).unwrap();
|
||||
c.finish(&mut dummy).unwrap();
|
||||
}
|
||||
let mut dummy = ChunkSink {
|
||||
chunks: vec![],
|
||||
written: 0,
|
||||
pos: 0
|
||||
};
|
||||
let decompress_time = Duration::span(|| for bundle in &bundles {
|
||||
let mut c = compression.decompress_stream().unwrap();
|
||||
c.process(bundle, &mut dummy).unwrap();
|
||||
c.finish(&mut dummy).unwrap();
|
||||
}).num_milliseconds() as f32 / 1_000.0;
|
||||
println!("- {}, {}", to_duration(decompress_time), to_speed(total_size - dup_size as u64, decompress_time));
|
||||
println!(
|
||||
"- {}, {}",
|
||||
to_duration(decompress_time),
|
||||
to_speed(total_size - dup_size as u64, decompress_time)
|
||||
);
|
||||
total_read_time += decompress_time;
|
||||
}
|
||||
|
||||
println!();
|
||||
|
||||
println!("Total storage size: {} / {}, ratio: {:.1}%", to_file_size(size as u64), to_file_size(total_size as u64), size as f32/total_size as f32*100.0);
|
||||
println!("Total processing speed: {}", to_speed(total_size, total_write_time));
|
||||
println!("Total read speed: {}", to_speed(total_size, total_read_time));
|
||||
println!(
|
||||
"Total storage size: {} / {}, ratio: {:.1}%",
|
||||
to_file_size(size as u64),
|
||||
to_file_size(total_size as u64),
|
||||
size as f32 / total_size as f32 * 100.0
|
||||
);
|
||||
println!(
|
||||
"Total processing speed: {}",
|
||||
to_speed(total_size, total_write_time)
|
||||
);
|
||||
println!(
|
||||
"Total read speed: {}",
|
||||
to_speed(total_size, total_read_time)
|
||||
);
|
||||
}
|
||||
|
|
260
src/cli/args.rs
260
src/cli/args.rs
|
@ -1,4 +1,4 @@
|
|||
use ::prelude::*;
|
||||
use prelude::*;
|
||||
use super::*;
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
|
@ -78,7 +78,7 @@ pub enum Arguments {
|
|||
repo_path_src: PathBuf,
|
||||
backup_name_src: String,
|
||||
repo_path_dst: PathBuf,
|
||||
backup_name_dst: String,
|
||||
backup_name_dst: String
|
||||
},
|
||||
Mount {
|
||||
repo_path: PathBuf,
|
||||
|
@ -86,10 +86,7 @@ pub enum Arguments {
|
|||
inode: Option<String>,
|
||||
mount_point: String
|
||||
},
|
||||
Versions {
|
||||
repo_path: PathBuf,
|
||||
path: String
|
||||
},
|
||||
Versions { repo_path: PathBuf, path: String },
|
||||
Diff {
|
||||
repo_path_old: PathBuf,
|
||||
backup_name_old: String,
|
||||
|
@ -98,12 +95,8 @@ pub enum Arguments {
|
|||
backup_name_new: String,
|
||||
inode_new: Option<String>
|
||||
},
|
||||
Analyze {
|
||||
repo_path: PathBuf
|
||||
},
|
||||
BundleList {
|
||||
repo_path: PathBuf
|
||||
},
|
||||
Analyze { repo_path: PathBuf },
|
||||
BundleList { repo_path: PathBuf },
|
||||
BundleInfo {
|
||||
repo_path: PathBuf,
|
||||
bundle_id: BundleId
|
||||
|
@ -154,7 +147,12 @@ fn convert_repo_path(mut path_str: &str) -> PathBuf {
|
|||
}
|
||||
}
|
||||
|
||||
fn parse_repo_path(repo_path: &str, existing: bool, backup_restr: Option<bool>, path_restr: Option<bool>) -> Result<(PathBuf, Option<&str>, Option<&str>), String> {
|
||||
fn parse_repo_path(
|
||||
repo_path: &str,
|
||||
existing: bool,
|
||||
backup_restr: Option<bool>,
|
||||
path_restr: Option<bool>,
|
||||
) -> Result<(PathBuf, Option<&str>, Option<&str>), String> {
|
||||
let mut parts = repo_path.splitn(3, "::");
|
||||
let repo = convert_repo_path(parts.next().unwrap_or(""));
|
||||
if existing && !repo.join("config.yaml").exists() {
|
||||
|
@ -194,8 +192,13 @@ fn parse_repo_path(repo_path: &str, existing: bool, backup_restr: Option<bool>,
|
|||
Ok((repo, backup, path))
|
||||
}
|
||||
|
||||
#[allow(unknown_lints,needless_pass_by_value)]
|
||||
fn validate_repo_path(repo_path: String, existing: bool, backup_restr: Option<bool>, path_restr: Option<bool>) -> Result<(), String> {
|
||||
#[allow(unknown_lints, needless_pass_by_value)]
|
||||
fn validate_repo_path(
|
||||
repo_path: String,
|
||||
existing: bool,
|
||||
backup_restr: Option<bool>,
|
||||
path_restr: Option<bool>,
|
||||
) -> Result<(), String> {
|
||||
parse_repo_path(&repo_path, existing, backup_restr, path_restr).map(|_| ())
|
||||
}
|
||||
|
||||
|
@ -207,7 +210,7 @@ fn parse_num(num: &str) -> Result<u64, String> {
|
|||
}
|
||||
}
|
||||
|
||||
#[allow(unknown_lints,needless_pass_by_value)]
|
||||
#[allow(unknown_lints, needless_pass_by_value)]
|
||||
fn validate_num(val: String) -> Result<(), String> {
|
||||
parse_num(&val).map(|_| ())
|
||||
}
|
||||
|
@ -220,14 +223,14 @@ fn parse_chunker(val: &str) -> Result<ChunkerType, String> {
|
|||
}
|
||||
}
|
||||
|
||||
#[allow(unknown_lints,needless_pass_by_value)]
|
||||
#[allow(unknown_lints, needless_pass_by_value)]
|
||||
fn validate_chunker(val: String) -> Result<(), String> {
|
||||
parse_chunker(&val).map(|_| ())
|
||||
}
|
||||
|
||||
fn parse_compression(val: &str) -> Result<Option<Compression>, String> {
|
||||
if val == "none" {
|
||||
return Ok(None)
|
||||
return Ok(None);
|
||||
}
|
||||
if let Ok(compression) = Compression::from_string(val) {
|
||||
Ok(Some(compression))
|
||||
|
@ -236,7 +239,7 @@ fn parse_compression(val: &str) -> Result<Option<Compression>, String> {
|
|||
}
|
||||
}
|
||||
|
||||
#[allow(unknown_lints,needless_pass_by_value)]
|
||||
#[allow(unknown_lints, needless_pass_by_value)]
|
||||
fn validate_compression(val: String) -> Result<(), String> {
|
||||
parse_compression(&val).map(|_| ())
|
||||
}
|
||||
|
@ -254,11 +257,11 @@ fn parse_public_key(val: &str) -> Result<Option<PublicKey>, String> {
|
|||
if let Some(key) = PublicKey::from_slice(&bytes) {
|
||||
Ok(Some(key))
|
||||
} else {
|
||||
return Err("Invalid key".to_string())
|
||||
return Err("Invalid key".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(unknown_lints,needless_pass_by_value)]
|
||||
#[allow(unknown_lints, needless_pass_by_value)]
|
||||
fn validate_public_key(val: String) -> Result<(), String> {
|
||||
parse_public_key(&val).map(|_| ())
|
||||
}
|
||||
|
@ -271,7 +274,7 @@ fn parse_hash(val: &str) -> Result<HashMethod, String> {
|
|||
}
|
||||
}
|
||||
|
||||
#[allow(unknown_lints,needless_pass_by_value)]
|
||||
#[allow(unknown_lints, needless_pass_by_value)]
|
||||
fn validate_hash(val: String) -> Result<(), String> {
|
||||
parse_hash(&val).map(|_| ())
|
||||
}
|
||||
|
@ -285,7 +288,7 @@ fn parse_bundle_id(val: &str) -> Result<BundleId, ErrorCode> {
|
|||
}
|
||||
}
|
||||
|
||||
#[allow(unknown_lints,needless_pass_by_value)]
|
||||
#[allow(unknown_lints, needless_pass_by_value)]
|
||||
fn validate_existing_path(val: String) -> Result<(), String> {
|
||||
if !Path::new(&val).exists() {
|
||||
Err("Path does not exist".to_string())
|
||||
|
@ -294,7 +297,7 @@ fn validate_existing_path(val: String) -> Result<(), String> {
|
|||
}
|
||||
}
|
||||
|
||||
#[allow(unknown_lints,needless_pass_by_value)]
|
||||
#[allow(unknown_lints, needless_pass_by_value)]
|
||||
fn validate_existing_path_or_stdio(val: String) -> Result<(), String> {
|
||||
if val != "-" && !Path::new(&val).exists() {
|
||||
Err("Path does not exist".to_string())
|
||||
|
@ -304,7 +307,7 @@ fn validate_existing_path_or_stdio(val: String) -> Result<(), String> {
|
|||
}
|
||||
|
||||
|
||||
#[allow(unknown_lints,cyclomatic_complexity)]
|
||||
#[allow(unknown_lints, cyclomatic_complexity)]
|
||||
pub fn parse() -> Result<(LogLevel, Arguments), ErrorCode> {
|
||||
let args = App::new("zvault").version(crate_version!()).author(crate_authors!(",\n")).about(crate_description!())
|
||||
.settings(&[AppSettings::VersionlessSubcommands, AppSettings::SubcommandRequiredElseHelp])
|
||||
|
@ -454,19 +457,31 @@ pub fn parse() -> Result<(LogLevel, Arguments), ErrorCode> {
|
|||
.default_value(DEFAULT_HASH).validator(validate_hash))
|
||||
.arg(Arg::from_usage("<FILE> 'File with test data'")
|
||||
.validator(validate_existing_path))).get_matches();
|
||||
let verbose_count = args.subcommand().1.map(|m| m.occurrences_of("verbose")).unwrap_or(0) + args.occurrences_of("verbose");
|
||||
let quiet_count= args.subcommand().1.map(|m| m.occurrences_of("quiet")).unwrap_or(0) + args.occurrences_of("quiet");
|
||||
let verbose_count = args.subcommand()
|
||||
.1
|
||||
.map(|m| m.occurrences_of("verbose"))
|
||||
.unwrap_or(0) + args.occurrences_of("verbose");
|
||||
let quiet_count = args.subcommand()
|
||||
.1
|
||||
.map(|m| m.occurrences_of("quiet"))
|
||||
.unwrap_or(0) + args.occurrences_of("quiet");
|
||||
let log_level = match 1 + verbose_count - quiet_count {
|
||||
0 => LogLevel::Warn,
|
||||
1 => LogLevel::Info,
|
||||
2 => LogLevel::Debug,
|
||||
_ => LogLevel::Trace
|
||||
_ => LogLevel::Trace,
|
||||
};
|
||||
let args = match args.subcommand() {
|
||||
("init", Some(args)) => {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), false, Some(false), Some(false)).unwrap();
|
||||
let (repository, _backup, _inode) = parse_repo_path(
|
||||
args.value_of("REPO").unwrap(),
|
||||
false,
|
||||
Some(false),
|
||||
Some(false)
|
||||
).unwrap();
|
||||
Arguments::Init {
|
||||
bundle_size: (parse_num(args.value_of("bundle_size").unwrap()).unwrap() * 1024 * 1024) as usize,
|
||||
bundle_size: (parse_num(args.value_of("bundle_size").unwrap()).unwrap() *
|
||||
1024 * 1024) as usize,
|
||||
chunker: parse_chunker(args.value_of("chunker").unwrap()).unwrap(),
|
||||
compression: parse_compression(args.value_of("compression").unwrap()).unwrap(),
|
||||
encryption: args.is_present("encrypt"),
|
||||
|
@ -474,24 +489,32 @@ pub fn parse() -> Result<(LogLevel, Arguments), ErrorCode> {
|
|||
repo_path: repository,
|
||||
remote_path: args.value_of("remote").unwrap().to_string()
|
||||
}
|
||||
},
|
||||
}
|
||||
("backup", Some(args)) => {
|
||||
let (repository, backup, _inode) = parse_repo_path(args.value_of("BACKUP").unwrap(), true, Some(true), Some(false)).unwrap();
|
||||
let (repository, backup, _inode) = parse_repo_path(
|
||||
args.value_of("BACKUP").unwrap(),
|
||||
true,
|
||||
Some(true),
|
||||
Some(false)
|
||||
).unwrap();
|
||||
Arguments::Backup {
|
||||
repo_path: repository,
|
||||
backup_name: backup.unwrap().to_string(),
|
||||
full: args.is_present("full"),
|
||||
same_device: !args.is_present("cross_device"),
|
||||
excludes: args.values_of("exclude").map(|v| v.map(|k| k.to_string()).collect()).unwrap_or_else(|| vec![]),
|
||||
excludes: args.values_of("exclude")
|
||||
.map(|v| v.map(|k| k.to_string()).collect())
|
||||
.unwrap_or_else(|| vec![]),
|
||||
excludes_from: args.value_of("excludes_from").map(|v| v.to_string()),
|
||||
src_path: args.value_of("SRC").unwrap().to_string(),
|
||||
reference: args.value_of("reference").map(|v| v.to_string()),
|
||||
no_default_excludes: args.is_present("no_default_excludes"),
|
||||
tar: args.is_present("tar")
|
||||
}
|
||||
},
|
||||
}
|
||||
("restore", Some(args)) => {
|
||||
let (repository, backup, inode) = parse_repo_path(args.value_of("BACKUP").unwrap(), true, Some(true), None).unwrap();
|
||||
let (repository, backup, inode) =
|
||||
parse_repo_path(args.value_of("BACKUP").unwrap(), true, Some(true), None).unwrap();
|
||||
Arguments::Restore {
|
||||
repo_path: repository,
|
||||
backup_name: backup.unwrap().to_string(),
|
||||
|
@ -499,18 +522,24 @@ pub fn parse() -> Result<(LogLevel, Arguments), ErrorCode> {
|
|||
dst_path: args.value_of("DST").unwrap().to_string(),
|
||||
tar: args.is_present("tar")
|
||||
}
|
||||
},
|
||||
}
|
||||
("remove", Some(args)) => {
|
||||
let (repository, backup, inode) = parse_repo_path(args.value_of("BACKUP").unwrap(), true, Some(true), None).unwrap();
|
||||
let (repository, backup, inode) =
|
||||
parse_repo_path(args.value_of("BACKUP").unwrap(), true, Some(true), None).unwrap();
|
||||
Arguments::Remove {
|
||||
repo_path: repository,
|
||||
backup_name: backup.unwrap().to_string(),
|
||||
inode: inode.map(|v| v.to_string()),
|
||||
force: args.is_present("force")
|
||||
}
|
||||
},
|
||||
}
|
||||
("prune", Some(args)) => {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), true, Some(false), Some(false)).unwrap();
|
||||
let (repository, _backup, _inode) = parse_repo_path(
|
||||
args.value_of("REPO").unwrap(),
|
||||
true,
|
||||
Some(false),
|
||||
Some(false)
|
||||
).unwrap();
|
||||
Arguments::Prune {
|
||||
repo_path: repository,
|
||||
prefix: args.value_of("prefix").unwrap_or("").to_string(),
|
||||
|
@ -520,18 +549,24 @@ pub fn parse() -> Result<(LogLevel, Arguments), ErrorCode> {
|
|||
monthly: parse_num(args.value_of("monthly").unwrap()).unwrap() as usize,
|
||||
yearly: parse_num(args.value_of("yearly").unwrap()).unwrap() as usize
|
||||
}
|
||||
},
|
||||
}
|
||||
("vacuum", Some(args)) => {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), true, Some(false), Some(false)).unwrap();
|
||||
let (repository, _backup, _inode) = parse_repo_path(
|
||||
args.value_of("REPO").unwrap(),
|
||||
true,
|
||||
Some(false),
|
||||
Some(false)
|
||||
).unwrap();
|
||||
Arguments::Vacuum {
|
||||
repo_path: repository,
|
||||
force: args.is_present("force"),
|
||||
combine: args.is_present("combine"),
|
||||
ratio: parse_num(args.value_of("ratio").unwrap()).unwrap() as f32 / 100.0
|
||||
}
|
||||
},
|
||||
}
|
||||
("check", Some(args)) => {
|
||||
let (repository, backup, inode) = parse_repo_path(args.value_of("PATH").unwrap(), true, None, None).unwrap();
|
||||
let (repository, backup, inode) =
|
||||
parse_repo_path(args.value_of("PATH").unwrap(), true, None, None).unwrap();
|
||||
Arguments::Check {
|
||||
repo_path: repository,
|
||||
backup_name: backup.map(|v| v.to_string()),
|
||||
|
@ -541,127 +576,176 @@ pub fn parse() -> Result<(LogLevel, Arguments), ErrorCode> {
|
|||
index: args.is_present("index"),
|
||||
repair: args.is_present("repair")
|
||||
}
|
||||
},
|
||||
}
|
||||
("list", Some(args)) => {
|
||||
let (repository, backup, inode) = parse_repo_path(args.value_of("PATH").unwrap(), true, None, None).unwrap();
|
||||
let (repository, backup, inode) =
|
||||
parse_repo_path(args.value_of("PATH").unwrap(), true, None, None).unwrap();
|
||||
Arguments::List {
|
||||
repo_path: repository,
|
||||
backup_name: backup.map(|v| v.to_string()),
|
||||
inode: inode.map(|v| v.to_string())
|
||||
}
|
||||
},
|
||||
}
|
||||
("bundlelist", Some(args)) => {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), true, Some(false), Some(false)).unwrap();
|
||||
Arguments::BundleList {
|
||||
repo_path: repository,
|
||||
}
|
||||
},
|
||||
let (repository, _backup, _inode) = parse_repo_path(
|
||||
args.value_of("REPO").unwrap(),
|
||||
true,
|
||||
Some(false),
|
||||
Some(false)
|
||||
).unwrap();
|
||||
Arguments::BundleList { repo_path: repository }
|
||||
}
|
||||
("bundleinfo", Some(args)) => {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), true, Some(false), Some(false)).unwrap();
|
||||
let (repository, _backup, _inode) = parse_repo_path(
|
||||
args.value_of("REPO").unwrap(),
|
||||
true,
|
||||
Some(false),
|
||||
Some(false)
|
||||
).unwrap();
|
||||
Arguments::BundleInfo {
|
||||
repo_path: repository,
|
||||
bundle_id: try!(parse_bundle_id(args.value_of("BUNDLE").unwrap()))
|
||||
}
|
||||
},
|
||||
}
|
||||
("info", Some(args)) => {
|
||||
let (repository, backup, inode) = parse_repo_path(args.value_of("PATH").unwrap(), true, None, None).unwrap();
|
||||
let (repository, backup, inode) =
|
||||
parse_repo_path(args.value_of("PATH").unwrap(), true, None, None).unwrap();
|
||||
Arguments::Info {
|
||||
repo_path: repository,
|
||||
backup_name: backup.map(|v| v.to_string()),
|
||||
inode: inode.map(|v| v.to_string())
|
||||
}
|
||||
},
|
||||
}
|
||||
("copy", Some(args)) => {
|
||||
let (repository_src, backup_src, _inode) = parse_repo_path(args.value_of("SRC").unwrap(), true, Some(true), Some(false)).unwrap();
|
||||
let (repository_dst, backup_dst, _inode) = parse_repo_path(args.value_of("DST").unwrap(), true, Some(true), Some(false)).unwrap();
|
||||
let (repository_src, backup_src, _inode) =
|
||||
parse_repo_path(args.value_of("SRC").unwrap(), true, Some(true), Some(false))
|
||||
.unwrap();
|
||||
let (repository_dst, backup_dst, _inode) =
|
||||
parse_repo_path(args.value_of("DST").unwrap(), true, Some(true), Some(false))
|
||||
.unwrap();
|
||||
Arguments::Copy {
|
||||
repo_path_src: repository_src,
|
||||
backup_name_src: backup_src.unwrap().to_string(),
|
||||
repo_path_dst: repository_dst,
|
||||
backup_name_dst: backup_dst.unwrap().to_string(),
|
||||
backup_name_dst: backup_dst.unwrap().to_string()
|
||||
}
|
||||
},
|
||||
}
|
||||
("mount", Some(args)) => {
|
||||
let (repository, backup, inode) = parse_repo_path(args.value_of("PATH").unwrap(), true, None, None).unwrap();
|
||||
let (repository, backup, inode) =
|
||||
parse_repo_path(args.value_of("PATH").unwrap(), true, None, None).unwrap();
|
||||
Arguments::Mount {
|
||||
repo_path: repository,
|
||||
backup_name: backup.map(|v| v.to_string()),
|
||||
inode: inode.map(|v| v.to_string()),
|
||||
mount_point: args.value_of("MOUNTPOINT").unwrap().to_string()
|
||||
}
|
||||
},
|
||||
}
|
||||
("versions", Some(args)) => {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), true, Some(false), Some(false)).unwrap();
|
||||
let (repository, _backup, _inode) = parse_repo_path(
|
||||
args.value_of("REPO").unwrap(),
|
||||
true,
|
||||
Some(false),
|
||||
Some(false)
|
||||
).unwrap();
|
||||
Arguments::Versions {
|
||||
repo_path: repository,
|
||||
path: args.value_of("PATH").unwrap().to_string()
|
||||
}
|
||||
},
|
||||
}
|
||||
("diff", Some(args)) => {
|
||||
let (repository_old, backup_old, inode_old) = parse_repo_path(args.value_of("OLD").unwrap(), true, Some(true), None).unwrap();
|
||||
let (repository_new, backup_new, inode_new) = parse_repo_path(args.value_of("NEW").unwrap(), true, Some(true), None).unwrap();
|
||||
let (repository_old, backup_old, inode_old) =
|
||||
parse_repo_path(args.value_of("OLD").unwrap(), true, Some(true), None).unwrap();
|
||||
let (repository_new, backup_new, inode_new) =
|
||||
parse_repo_path(args.value_of("NEW").unwrap(), true, Some(true), None).unwrap();
|
||||
Arguments::Diff {
|
||||
repo_path_old: repository_old,
|
||||
backup_name_old: backup_old.unwrap().to_string(),
|
||||
inode_old: inode_old.map(|v| v.to_string()),
|
||||
repo_path_new: repository_new,
|
||||
backup_name_new: backup_new.unwrap().to_string(),
|
||||
inode_new: inode_new.map(|v| v.to_string()),
|
||||
inode_new: inode_new.map(|v| v.to_string())
|
||||
}
|
||||
},
|
||||
}
|
||||
("analyze", Some(args)) => {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), true, Some(false), Some(false)).unwrap();
|
||||
Arguments::Analyze {
|
||||
repo_path: repository
|
||||
}
|
||||
},
|
||||
let (repository, _backup, _inode) = parse_repo_path(
|
||||
args.value_of("REPO").unwrap(),
|
||||
true,
|
||||
Some(false),
|
||||
Some(false)
|
||||
).unwrap();
|
||||
Arguments::Analyze { repo_path: repository }
|
||||
}
|
||||
("import", Some(args)) => {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), false, Some(false), Some(false)).unwrap();
|
||||
let (repository, _backup, _inode) = parse_repo_path(
|
||||
args.value_of("REPO").unwrap(),
|
||||
false,
|
||||
Some(false),
|
||||
Some(false)
|
||||
).unwrap();
|
||||
Arguments::Import {
|
||||
repo_path: repository,
|
||||
remote_path: args.value_of("REMOTE").unwrap().to_string(),
|
||||
key_files: args.values_of("key").map(|v| v.map(|k| k.to_string()).collect()).unwrap_or_else(|| vec![])
|
||||
key_files: args.values_of("key")
|
||||
.map(|v| v.map(|k| k.to_string()).collect())
|
||||
.unwrap_or_else(|| vec![])
|
||||
}
|
||||
},
|
||||
}
|
||||
("config", Some(args)) => {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), true, Some(false), Some(false)).unwrap();
|
||||
let (repository, _backup, _inode) = parse_repo_path(
|
||||
args.value_of("REPO").unwrap(),
|
||||
true,
|
||||
Some(false),
|
||||
Some(false)
|
||||
).unwrap();
|
||||
Arguments::Config {
|
||||
bundle_size: args.value_of("bundle_size").map(|v| parse_num(v).unwrap() as usize * 1024 * 1024),
|
||||
bundle_size: args.value_of("bundle_size").map(|v| {
|
||||
parse_num(v).unwrap() as usize * 1024 * 1024
|
||||
}),
|
||||
chunker: args.value_of("chunker").map(|v| parse_chunker(v).unwrap()),
|
||||
compression: args.value_of("compression").map(|v| parse_compression(v).unwrap()),
|
||||
encryption: args.value_of("encryption").map(|v| parse_public_key(v).unwrap()),
|
||||
compression: args.value_of("compression").map(|v| {
|
||||
parse_compression(v).unwrap()
|
||||
}),
|
||||
encryption: args.value_of("encryption").map(
|
||||
|v| parse_public_key(v).unwrap()
|
||||
),
|
||||
hash: args.value_of("hash").map(|v| parse_hash(v).unwrap()),
|
||||
repo_path: repository,
|
||||
repo_path: repository
|
||||
}
|
||||
},
|
||||
}
|
||||
("genkey", Some(args)) => {
|
||||
Arguments::GenKey {
|
||||
file: args.value_of("FILE").map(|v| v.to_string()),
|
||||
password: args.value_of("password").map(|v| v.to_string())
|
||||
}
|
||||
},
|
||||
}
|
||||
("addkey", Some(args)) => {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), true, Some(false), Some(false)).unwrap();
|
||||
let (repository, _backup, _inode) = parse_repo_path(
|
||||
args.value_of("REPO").unwrap(),
|
||||
true,
|
||||
Some(false),
|
||||
Some(false)
|
||||
).unwrap();
|
||||
Arguments::AddKey {
|
||||
repo_path: repository,
|
||||
set_default: args.is_present("set_default"),
|
||||
password: args.value_of("password").map(|v| v.to_string()),
|
||||
file: args.value_of("FILE").map(|v| v.to_string())
|
||||
}
|
||||
},
|
||||
}
|
||||
("algotest", Some(args)) => {
|
||||
Arguments::AlgoTest {
|
||||
bundle_size: (parse_num(args.value_of("bundle_size").unwrap()).unwrap() * 1024 * 1024) as usize,
|
||||
bundle_size: (parse_num(args.value_of("bundle_size").unwrap()).unwrap() *
|
||||
1024 * 1024) as usize,
|
||||
chunker: parse_chunker(args.value_of("chunker").unwrap()).unwrap(),
|
||||
compression: parse_compression(args.value_of("compression").unwrap()).unwrap(),
|
||||
encrypt: args.is_present("encrypt"),
|
||||
hash: parse_hash(args.value_of("hash").unwrap()).unwrap(),
|
||||
file: args.value_of("FILE").unwrap().to_string(),
|
||||
file: args.value_of("FILE").unwrap().to_string()
|
||||
}
|
||||
},
|
||||
}
|
||||
_ => {
|
||||
error!("No subcommand given");
|
||||
return Err(ErrorCode::InvalidArgs)
|
||||
return Err(ErrorCode::InvalidArgs);
|
||||
}
|
||||
};
|
||||
Ok((log_level, args))
|
||||
|
|
|
@ -22,11 +22,23 @@ impl log::Log for Logger {
|
|||
fn log(&self, record: &LogRecord) {
|
||||
if self.enabled(record.metadata()) {
|
||||
match record.level() {
|
||||
LogLevel::Error => println_stderr!("{}: {}", Color::Red.bold().paint("error"), record.args()),
|
||||
LogLevel::Warn => println_stderr!("{}: {}", Color::Yellow.bold().paint("warning"), record.args()),
|
||||
LogLevel::Info => println_stderr!("{}: {}", Color::Green.bold().paint("info"), record.args()),
|
||||
LogLevel::Debug => println_stderr!("{}: {}", Style::new().bold().paint("debug"), record.args()),
|
||||
LogLevel::Trace => println_stderr!("{}: {}", "trace", record.args())
|
||||
LogLevel::Error => {
|
||||
println_stderr!("{}: {}", Color::Red.bold().paint("error"), record.args())
|
||||
}
|
||||
LogLevel::Warn => {
|
||||
println_stderr!(
|
||||
"{}: {}",
|
||||
Color::Yellow.bold().paint("warning"),
|
||||
record.args()
|
||||
)
|
||||
}
|
||||
LogLevel::Info => {
|
||||
println_stderr!("{}: {}", Color::Green.bold().paint("info"), record.args())
|
||||
}
|
||||
LogLevel::Debug => {
|
||||
println_stderr!("{}: {}", Style::new().bold().paint("debug"), record.args())
|
||||
}
|
||||
LogLevel::Trace => println_stderr!("{}: {}", "trace", record.args()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
778
src/cli/mod.rs
778
src/cli/mod.rs
File diff suppressed because it is too large
Load Diff
20
src/main.rs
20
src/main.rs
|
@ -1,26 +1,32 @@
|
|||
#![recursion_limit="128"]
|
||||
#![allow(unknown_lints, float_cmp)]
|
||||
#![cfg_attr(feature = "bench", feature(test))]
|
||||
#[cfg(feature = "bench")] extern crate test;
|
||||
#[cfg(feature = "bench")]
|
||||
extern crate test;
|
||||
extern crate serde;
|
||||
extern crate serde_bytes;
|
||||
extern crate rmp_serde;
|
||||
#[macro_use] extern crate serde_utils;
|
||||
#[macro_use]
|
||||
extern crate serde_utils;
|
||||
extern crate squash_sys as squash;
|
||||
extern crate blake2_rfc as blake2;
|
||||
extern crate murmurhash3;
|
||||
extern crate serde_yaml;
|
||||
#[macro_use] extern crate quick_error;
|
||||
#[macro_use]
|
||||
extern crate quick_error;
|
||||
extern crate chrono;
|
||||
#[macro_use] extern crate clap;
|
||||
#[macro_use] extern crate log;
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate byteorder;
|
||||
extern crate sodiumoxide;
|
||||
extern crate libsodium_sys;
|
||||
extern crate ansi_term;
|
||||
extern crate filetime;
|
||||
extern crate regex;
|
||||
#[macro_use] extern crate lazy_static;
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
extern crate fuse;
|
||||
extern crate rand;
|
||||
extern crate time;
|
||||
|
@ -46,6 +52,6 @@ use std::process::exit;
|
|||
fn main() {
|
||||
match cli::run() {
|
||||
Ok(()) => exit(0),
|
||||
Err(code) => exit(code.code())
|
||||
Err(code) => exit(code.code()),
|
||||
}
|
||||
}
|
||||
|
|
386
src/mount.rs
386
src/mount.rs
|
@ -1,4 +1,4 @@
|
|||
use ::prelude::*;
|
||||
use prelude::*;
|
||||
|
||||
use std::path::Path;
|
||||
use std::ffi::OsStr;
|
||||
|
@ -71,7 +71,7 @@ fn convert_file_type(kind: FileType) -> fuse::FileType {
|
|||
FileType::Symlink => fuse::FileType::Symlink,
|
||||
FileType::BlockDevice => fuse::FileType::BlockDevice,
|
||||
FileType::CharDevice => fuse::FileType::CharDevice,
|
||||
FileType::NamedPipe => fuse::FileType::NamedPipe
|
||||
FileType::NamedPipe => fuse::FileType::NamedPipe,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -115,16 +115,19 @@ impl FuseInode {
|
|||
nlink: 1,
|
||||
uid: uid,
|
||||
gid: gid,
|
||||
rdev: self.inode.device.map_or(0, |(major, minor)| (major << 8) + minor),
|
||||
rdev: self.inode.device.map_or(
|
||||
0,
|
||||
|(major, minor)| (major << 8) + minor
|
||||
),
|
||||
flags: 0
|
||||
}
|
||||
}
|
||||
|
||||
pub fn dir_list(&self) -> Option<Vec<(u64, fuse::FileType, String)>> {
|
||||
if self.inode.file_type != FileType::Directory {
|
||||
return None
|
||||
return None;
|
||||
}
|
||||
let mut list = Vec::with_capacity(self.children.len()+2);
|
||||
let mut list = Vec::with_capacity(self.children.len() + 2);
|
||||
list.push((self.num, fuse::FileType::Directory, ".".to_string()));
|
||||
if let Some(ref parent) = self.parent {
|
||||
let parent = parent.borrow();
|
||||
|
@ -134,7 +137,11 @@ impl FuseInode {
|
|||
}
|
||||
for ch in self.children.values() {
|
||||
let child = ch.borrow();
|
||||
list.push((child.num, convert_file_type(child.inode.file_type), child.inode.name.clone()));
|
||||
list.push((
|
||||
child.num,
|
||||
convert_file_type(child.inode.file_type),
|
||||
child.inode.name.clone()
|
||||
));
|
||||
}
|
||||
Some(list)
|
||||
}
|
||||
|
@ -156,11 +163,14 @@ impl<'a> FuseFilesystem<'a> {
|
|||
})
|
||||
}
|
||||
|
||||
pub fn from_repository(repository: &'a mut Repository, path: Option<&str>) -> Result<Self, RepositoryError> {
|
||||
pub fn from_repository(
|
||||
repository: &'a mut Repository,
|
||||
path: Option<&str>,
|
||||
) -> Result<Self, RepositoryError> {
|
||||
let mut backups = vec![];
|
||||
let backup_map = match path {
|
||||
Some(path) => try!(repository.get_backups(path)),
|
||||
None => try!(repository.get_all_backups())
|
||||
None => try!(repository.get_all_backups()),
|
||||
};
|
||||
for (name, backup) in backup_map {
|
||||
let inode = try!(repository.get_inode(&backup.root));
|
||||
|
@ -173,7 +183,7 @@ impl<'a> FuseFilesystem<'a> {
|
|||
for part in name.split('/') {
|
||||
parent = match fs.get_child(&parent, part).unwrap() {
|
||||
Some(child) => child,
|
||||
None => fs.add_virtual_directory(part.to_string(), Some(parent))
|
||||
None => fs.add_virtual_directory(part.to_string(), Some(parent)),
|
||||
};
|
||||
}
|
||||
let mut parent_mut = parent.borrow_mut();
|
||||
|
@ -185,28 +195,50 @@ impl<'a> FuseFilesystem<'a> {
|
|||
Ok(fs)
|
||||
}
|
||||
|
||||
pub fn from_backup(repository: &'a mut Repository, backup: Backup) -> Result<Self, RepositoryError> {
|
||||
pub fn from_backup(
|
||||
repository: &'a mut Repository,
|
||||
backup: Backup,
|
||||
) -> Result<Self, RepositoryError> {
|
||||
let inode = try!(repository.get_inode(&backup.root));
|
||||
let mut fs = try!(FuseFilesystem::new(repository));
|
||||
fs.add_inode(inode, None, backup.user_names, backup.group_names);
|
||||
Ok(fs)
|
||||
}
|
||||
|
||||
pub fn from_inode(repository: &'a mut Repository, backup: Backup, inode: Inode) -> Result<Self, RepositoryError> {
|
||||
pub fn from_inode(
|
||||
repository: &'a mut Repository,
|
||||
backup: Backup,
|
||||
inode: Inode,
|
||||
) -> Result<Self, RepositoryError> {
|
||||
let mut fs = try!(FuseFilesystem::new(repository));
|
||||
fs.add_inode(inode, None, backup.user_names, backup.group_names);
|
||||
Ok(fs)
|
||||
}
|
||||
|
||||
pub fn add_virtual_directory(&mut self, name: String, parent: Option<FuseInodeRef>) -> FuseInodeRef {
|
||||
self.add_inode(Inode {
|
||||
name: name,
|
||||
file_type: FileType::Directory,
|
||||
..Default::default()
|
||||
}, parent, HashMap::default(), HashMap::default())
|
||||
pub fn add_virtual_directory(
|
||||
&mut self,
|
||||
name: String,
|
||||
parent: Option<FuseInodeRef>,
|
||||
) -> FuseInodeRef {
|
||||
self.add_inode(
|
||||
Inode {
|
||||
name: name,
|
||||
file_type: FileType::Directory,
|
||||
..Default::default()
|
||||
},
|
||||
parent,
|
||||
HashMap::default(),
|
||||
HashMap::default()
|
||||
)
|
||||
}
|
||||
|
||||
pub fn add_inode(&mut self, inode: Inode, parent: Option<FuseInodeRef>, user_names: HashMap<u32, String>, group_names: HashMap<u32, String>) -> FuseInodeRef {
|
||||
pub fn add_inode(
|
||||
&mut self,
|
||||
inode: Inode,
|
||||
parent: Option<FuseInodeRef>,
|
||||
user_names: HashMap<u32, String>,
|
||||
group_names: HashMap<u32, String>,
|
||||
) -> FuseInodeRef {
|
||||
let inode = FuseInode {
|
||||
inode: inode,
|
||||
num: self.next_id,
|
||||
|
@ -228,22 +260,30 @@ impl<'a> FuseFilesystem<'a> {
|
|||
}
|
||||
|
||||
pub fn mount<P: AsRef<Path>>(self, mountpoint: P) -> Result<(), RepositoryError> {
|
||||
Ok(try!(fuse::mount(self, &mountpoint, &[
|
||||
OsStr::new("default_permissions"),
|
||||
OsStr::new("kernel_cache"),
|
||||
OsStr::new("auto_cache"),
|
||||
OsStr::new("readonly")
|
||||
])))
|
||||
Ok(try!(fuse::mount(
|
||||
self,
|
||||
&mountpoint,
|
||||
&[
|
||||
OsStr::new("default_permissions"),
|
||||
OsStr::new("kernel_cache"),
|
||||
OsStr::new("auto_cache"),
|
||||
OsStr::new("readonly"),
|
||||
]
|
||||
)))
|
||||
}
|
||||
|
||||
pub fn get_inode(&mut self, num: u64) -> Option<FuseInodeRef> {
|
||||
self.inodes.get(&num).cloned()
|
||||
}
|
||||
|
||||
pub fn get_child(&mut self, parent: &FuseInodeRef, name: &str) -> Result<Option<FuseInodeRef>, RepositoryError> {
|
||||
pub fn get_child(
|
||||
&mut self,
|
||||
parent: &FuseInodeRef,
|
||||
name: &str,
|
||||
) -> Result<Option<FuseInodeRef>, RepositoryError> {
|
||||
let mut parent_mut = parent.borrow_mut();
|
||||
if let Some(child) = parent_mut.children.get(name) {
|
||||
return Ok(Some(child.clone()))
|
||||
return Ok(Some(child.clone()));
|
||||
}
|
||||
let child;
|
||||
if let Some(chunks) = parent_mut.inode.children.as_ref().and_then(|c| c.get(name)) {
|
||||
|
@ -258,9 +298,9 @@ impl<'a> FuseFilesystem<'a> {
|
|||
name_cache: parent_mut.name_cache.clone()
|
||||
}));
|
||||
self.inodes.insert(self.next_id, child.clone());
|
||||
self.next_id +=1;
|
||||
self.next_id += 1;
|
||||
} else {
|
||||
return Ok(None)
|
||||
return Ok(None);
|
||||
}
|
||||
parent_mut.children.insert(name.to_string(), child.clone());
|
||||
Ok(Some(child))
|
||||
|
@ -284,7 +324,7 @@ impl<'a> FuseFilesystem<'a> {
|
|||
name_cache: parent_mut.name_cache.clone()
|
||||
}));
|
||||
self.inodes.insert(self.next_id, child.clone());
|
||||
self.next_id +=1;
|
||||
self.next_id += 1;
|
||||
parent_children.insert(name.clone(), child);
|
||||
}
|
||||
}
|
||||
|
@ -297,10 +337,11 @@ impl<'a> FuseFilesystem<'a> {
|
|||
let mut inode = inode.borrow_mut();
|
||||
let mut chunks = None;
|
||||
match inode.inode.data {
|
||||
None | Some(FileData::Inline(_)) => (),
|
||||
None |
|
||||
Some(FileData::Inline(_)) => (),
|
||||
Some(FileData::ChunkedDirect(ref c)) => {
|
||||
chunks = Some(c.clone());
|
||||
},
|
||||
}
|
||||
Some(FileData::ChunkedIndirect(ref c)) => {
|
||||
let chunk_data = try!(self.repository.get_data(c));
|
||||
chunks = Some(ChunkList::read_from(&chunk_data));
|
||||
|
@ -313,9 +354,8 @@ impl<'a> FuseFilesystem<'a> {
|
|||
|
||||
|
||||
impl<'a> fuse::Filesystem for FuseFilesystem<'a> {
|
||||
|
||||
/// Look up a directory entry by name and get its attributes.
|
||||
fn lookup (&mut self, _req: &fuse::Request, parent: u64, name: &OsStr, reply: fuse::ReplyEntry) {
|
||||
fn lookup(&mut self, _req: &fuse::Request, parent: u64, name: &OsStr, reply: fuse::ReplyEntry) {
|
||||
let sname = str!(name, reply);
|
||||
let parent = inode!(self, parent, reply);
|
||||
let child = lookup!(self, &parent, sname, reply);
|
||||
|
@ -324,7 +364,7 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> {
|
|||
reply.entry(&ttl, &attrs, 0)
|
||||
}
|
||||
|
||||
fn destroy (&mut self, _req: &fuse::Request) {
|
||||
fn destroy(&mut self, _req: &fuse::Request) {
|
||||
info!("destroy");
|
||||
}
|
||||
|
||||
|
@ -335,66 +375,131 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> {
|
|||
/// each forget. The filesystem may ignore forget calls, if the inodes don't need to
|
||||
/// have a limited lifetime. On unmount it is not guaranteed, that all referenced
|
||||
/// inodes will receive a forget message.
|
||||
fn forget (&mut self, _req: &fuse::Request, ino: u64, _nlookup: u64) {
|
||||
fn forget(&mut self, _req: &fuse::Request, ino: u64, _nlookup: u64) {
|
||||
info!("forget {:?}", ino);
|
||||
//self.fs.forget(ino).unwrap();
|
||||
}
|
||||
|
||||
/// Get file attributes
|
||||
fn getattr (&mut self, _req: &fuse::Request, ino: u64, reply: fuse::ReplyAttr) {
|
||||
fn getattr(&mut self, _req: &fuse::Request, ino: u64, reply: fuse::ReplyAttr) {
|
||||
let inode = inode!(self, ino, reply);
|
||||
let ttl = Timespec::new(60, 0);
|
||||
reply.attr(&ttl, &inode.borrow().to_attrs());
|
||||
}
|
||||
|
||||
/// Set file attributes
|
||||
fn setattr (&mut self, _req: &fuse::Request, _ino: u64, _mode: Option<u32>, _uid: Option<u32>, _gid: Option<u32>, _size: Option<u64>, _atime: Option<Timespec>, _mtime: Option<Timespec>, _fh: Option<u64>, _crtime: Option<Timespec>, _chgtime: Option<Timespec>, _bkuptime: Option<Timespec>, _flags: Option<u32>, reply: fuse::ReplyAttr) {
|
||||
fn setattr(
|
||||
&mut self,
|
||||
_req: &fuse::Request,
|
||||
_ino: u64,
|
||||
_mode: Option<u32>,
|
||||
_uid: Option<u32>,
|
||||
_gid: Option<u32>,
|
||||
_size: Option<u64>,
|
||||
_atime: Option<Timespec>,
|
||||
_mtime: Option<Timespec>,
|
||||
_fh: Option<u64>,
|
||||
_crtime: Option<Timespec>,
|
||||
_chgtime: Option<Timespec>,
|
||||
_bkuptime: Option<Timespec>,
|
||||
_flags: Option<u32>,
|
||||
reply: fuse::ReplyAttr,
|
||||
) {
|
||||
reply.error(libc::EROFS)
|
||||
}
|
||||
|
||||
/// Read symbolic link
|
||||
fn readlink (&mut self, _req: &fuse::Request, ino: u64, reply: fuse::ReplyData) {
|
||||
fn readlink(&mut self, _req: &fuse::Request, ino: u64, reply: fuse::ReplyData) {
|
||||
let inode = inode!(self, ino, reply);
|
||||
let inode = inode.borrow();
|
||||
match inode.inode.symlink_target {
|
||||
None => reply.error(libc::EINVAL),
|
||||
Some(ref link) => reply.data(link.as_bytes())
|
||||
Some(ref link) => reply.data(link.as_bytes()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a hard link
|
||||
fn link (&mut self, _req: &fuse::Request, _ino: u64, _newparent: u64, _newname: &OsStr, reply: fuse::ReplyEntry) {
|
||||
fn link(
|
||||
&mut self,
|
||||
_req: &fuse::Request,
|
||||
_ino: u64,
|
||||
_newparent: u64,
|
||||
_newname: &OsStr,
|
||||
reply: fuse::ReplyEntry,
|
||||
) {
|
||||
reply.error(libc::EROFS)
|
||||
}
|
||||
|
||||
/// Create file node
|
||||
/// Create a regular file, character device, block device, fifo or socket node.
|
||||
fn mknod (&mut self, _req: &fuse::Request, _parent: u64, _name: &OsStr, _mode: u32, _rdev: u32, reply: fuse::ReplyEntry) {
|
||||
fn mknod(
|
||||
&mut self,
|
||||
_req: &fuse::Request,
|
||||
_parent: u64,
|
||||
_name: &OsStr,
|
||||
_mode: u32,
|
||||
_rdev: u32,
|
||||
reply: fuse::ReplyEntry,
|
||||
) {
|
||||
reply.error(libc::EROFS)
|
||||
}
|
||||
|
||||
/// Create a directory
|
||||
fn mkdir (&mut self, _req: &fuse::Request, _parent: u64, _name: &OsStr, _mode: u32, reply: fuse::ReplyEntry) {
|
||||
fn mkdir(
|
||||
&mut self,
|
||||
_req: &fuse::Request,
|
||||
_parent: u64,
|
||||
_name: &OsStr,
|
||||
_mode: u32,
|
||||
reply: fuse::ReplyEntry,
|
||||
) {
|
||||
reply.error(libc::EROFS)
|
||||
}
|
||||
|
||||
/// Remove a file
|
||||
fn unlink (&mut self, _req: &fuse::Request, _parent: u64, _name: &OsStr, reply: fuse::ReplyEmpty) {
|
||||
fn unlink(
|
||||
&mut self,
|
||||
_req: &fuse::Request,
|
||||
_parent: u64,
|
||||
_name: &OsStr,
|
||||
reply: fuse::ReplyEmpty,
|
||||
) {
|
||||
reply.error(libc::EROFS)
|
||||
}
|
||||
|
||||
/// Remove a directory
|
||||
fn rmdir (&mut self, _req: &fuse::Request, _parent: u64, _name: &OsStr, reply: fuse::ReplyEmpty) {
|
||||
fn rmdir(
|
||||
&mut self,
|
||||
_req: &fuse::Request,
|
||||
_parent: u64,
|
||||
_name: &OsStr,
|
||||
reply: fuse::ReplyEmpty,
|
||||
) {
|
||||
reply.error(libc::EROFS)
|
||||
}
|
||||
|
||||
/// Create a symbolic link
|
||||
fn symlink (&mut self, _req: &fuse::Request, _parent: u64, _name: &OsStr, _link: &Path, reply: fuse::ReplyEntry) {
|
||||
fn symlink(
|
||||
&mut self,
|
||||
_req: &fuse::Request,
|
||||
_parent: u64,
|
||||
_name: &OsStr,
|
||||
_link: &Path,
|
||||
reply: fuse::ReplyEntry,
|
||||
) {
|
||||
reply.error(libc::EROFS)
|
||||
}
|
||||
|
||||
/// Rename a file
|
||||
fn rename (&mut self, _req: &fuse::Request, _parent: u64, _name: &OsStr, _newparent: u64, _newname: &OsStr, reply: fuse::ReplyEmpty) {
|
||||
fn rename(
|
||||
&mut self,
|
||||
_req: &fuse::Request,
|
||||
_parent: u64,
|
||||
_name: &OsStr,
|
||||
_newparent: u64,
|
||||
_newname: &OsStr,
|
||||
reply: fuse::ReplyEmpty,
|
||||
) {
|
||||
reply.error(libc::EROFS)
|
||||
}
|
||||
|
||||
|
@ -406,7 +511,7 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> {
|
|||
/// anything in fh. There are also some flags (direct_io, keep_cache) which the
|
||||
/// filesystem may set, to change the way the file is opened. See fuse_file_info
|
||||
/// structure in <fuse_common.h> for more details.
|
||||
fn open (&mut self, _req: &fuse::Request, ino: u64, flags: u32, reply: fuse::ReplyOpen) {
|
||||
fn open(&mut self, _req: &fuse::Request, ino: u64, flags: u32, reply: fuse::ReplyOpen) {
|
||||
if (flags & (libc::O_WRONLY | libc::O_RDWR | libc::O_TRUNC) as u32) != 0 {
|
||||
return reply.error(libc::EROFS);
|
||||
}
|
||||
|
@ -422,29 +527,44 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> {
|
|||
/// return value of the read system call will reflect the return value of this
|
||||
/// operation. fh will contain the value set by the open method, or will be undefined
|
||||
/// if the open method didn't set any value.
|
||||
fn read (&mut self, _req: &fuse::Request, ino: u64, _fh: u64, mut offset: u64, mut size: u32, reply: fuse::ReplyData) {
|
||||
fn read(
|
||||
&mut self,
|
||||
_req: &fuse::Request,
|
||||
ino: u64,
|
||||
_fh: u64,
|
||||
mut offset: u64,
|
||||
mut size: u32,
|
||||
reply: fuse::ReplyData,
|
||||
) {
|
||||
let inode = inode!(self, ino, reply);
|
||||
let inode = inode.borrow();
|
||||
match inode.inode.data {
|
||||
None => return reply.data(&[]),
|
||||
Some(FileData::Inline(ref data)) => return reply.data(&data[min(offset as usize, data.len())..min(offset as usize+size as usize, data.len())]),
|
||||
_ => ()
|
||||
Some(FileData::Inline(ref data)) => {
|
||||
return reply.data(
|
||||
&data[min(offset as usize, data.len())..
|
||||
min(offset as usize + size as usize, data.len())]
|
||||
)
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
if let Some(ref chunks) = inode.chunks {
|
||||
let mut data = Vec::with_capacity(size as usize);
|
||||
for &(hash, len) in chunks.iter() {
|
||||
if len as u64 <= offset {
|
||||
offset -= len as u64;
|
||||
continue
|
||||
continue;
|
||||
}
|
||||
let chunk = match fuse_try!(self.repository.get_chunk(hash), reply) {
|
||||
Some(chunk) => chunk,
|
||||
None => return reply.error(libc::EIO)
|
||||
None => return reply.error(libc::EIO),
|
||||
};
|
||||
assert_eq!(chunk.len() as u32, len);
|
||||
data.extend_from_slice(&chunk[offset as usize..min(offset as usize + size as usize, len as usize)]);
|
||||
data.extend_from_slice(
|
||||
&chunk[offset as usize..min(offset as usize + size as usize, len as usize)]
|
||||
);
|
||||
if len - offset as u32 >= size {
|
||||
break
|
||||
break;
|
||||
}
|
||||
size -= len - offset as u32;
|
||||
offset = 0;
|
||||
|
@ -456,12 +576,28 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> {
|
|||
}
|
||||
|
||||
/// Write data
|
||||
fn write (&mut self, _req: &fuse::Request, _ino: u64, _fh: u64, _offset: u64, _data: &[u8], _flags: u32, reply: fuse::ReplyWrite) {
|
||||
fn write(
|
||||
&mut self,
|
||||
_req: &fuse::Request,
|
||||
_ino: u64,
|
||||
_fh: u64,
|
||||
_offset: u64,
|
||||
_data: &[u8],
|
||||
_flags: u32,
|
||||
reply: fuse::ReplyWrite,
|
||||
) {
|
||||
reply.error(libc::EROFS)
|
||||
}
|
||||
|
||||
/// Flush method
|
||||
fn flush (&mut self, _req: &fuse::Request, _ino: u64, _fh: u64, _lock_owner: u64, reply: fuse::ReplyEmpty) {
|
||||
fn flush(
|
||||
&mut self,
|
||||
_req: &fuse::Request,
|
||||
_ino: u64,
|
||||
_fh: u64,
|
||||
_lock_owner: u64,
|
||||
reply: fuse::ReplyEmpty,
|
||||
) {
|
||||
reply.ok()
|
||||
}
|
||||
|
||||
|
@ -473,7 +609,16 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> {
|
|||
/// the release. fh will contain the value set by the open method, or will be undefined
|
||||
/// if the open method didn't set any value. flags will contain the same flags as for
|
||||
/// open.
|
||||
fn release (&mut self, _req: &fuse::Request, _ino: u64, _fh: u64, _flags: u32, _lock_owner: u64, _flush: bool, reply: fuse::ReplyEmpty) {
|
||||
fn release(
|
||||
&mut self,
|
||||
_req: &fuse::Request,
|
||||
_ino: u64,
|
||||
_fh: u64,
|
||||
_flags: u32,
|
||||
_lock_owner: u64,
|
||||
_flush: bool,
|
||||
reply: fuse::ReplyEmpty,
|
||||
) {
|
||||
/*if self.read_fds.remove(&fh).is_some() || self.write_fds.remove(&fh).is_some() {
|
||||
reply.ok();
|
||||
} else {
|
||||
|
@ -483,28 +628,42 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> {
|
|||
}
|
||||
|
||||
/// Synchronize file contents
|
||||
fn fsync (&mut self, _req: &fuse::Request, _ino: u64, _fh: u64, _datasync: bool, reply: fuse::ReplyEmpty) {
|
||||
fn fsync(
|
||||
&mut self,
|
||||
_req: &fuse::Request,
|
||||
_ino: u64,
|
||||
_fh: u64,
|
||||
_datasync: bool,
|
||||
reply: fuse::ReplyEmpty,
|
||||
) {
|
||||
reply.ok()
|
||||
}
|
||||
|
||||
/// Open a directory, finished
|
||||
fn opendir (&mut self, _req: &fuse::Request, ino: u64, _flags: u32, reply: fuse::ReplyOpen) {
|
||||
fn opendir(&mut self, _req: &fuse::Request, ino: u64, _flags: u32, reply: fuse::ReplyOpen) {
|
||||
let dir = inode!(self, ino, reply);
|
||||
fuse_try!(self.fetch_children(&dir), reply);
|
||||
reply.opened(ino, 0);
|
||||
}
|
||||
|
||||
/// Read directory, finished
|
||||
fn readdir (&mut self, _req: &fuse::Request, ino: u64, _fh: u64, offset: u64, mut reply: fuse::ReplyDirectory) {
|
||||
fn readdir(
|
||||
&mut self,
|
||||
_req: &fuse::Request,
|
||||
ino: u64,
|
||||
_fh: u64,
|
||||
offset: u64,
|
||||
mut reply: fuse::ReplyDirectory,
|
||||
) {
|
||||
let dir = inode!(self, ino, reply);
|
||||
let dir = dir.borrow();
|
||||
if let Some(entries) = dir.dir_list() {
|
||||
for (i, (num, file_type, name)) in entries.into_iter().enumerate() {
|
||||
if i < offset as usize {
|
||||
continue
|
||||
continue;
|
||||
}
|
||||
if reply.add(num, i as u64 +1, file_type, &Path::new(&name)) {
|
||||
break
|
||||
if reply.add(num, i as u64 + 1, file_type, &Path::new(&name)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
reply.ok()
|
||||
|
@ -514,20 +673,34 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> {
|
|||
}
|
||||
|
||||
/// Release an open directory, finished
|
||||
fn releasedir (&mut self, _req: &fuse::Request, _ino: u64, _fh: u64, _flags: u32, reply: fuse::ReplyEmpty) {
|
||||
fn releasedir(
|
||||
&mut self,
|
||||
_req: &fuse::Request,
|
||||
_ino: u64,
|
||||
_fh: u64,
|
||||
_flags: u32,
|
||||
reply: fuse::ReplyEmpty,
|
||||
) {
|
||||
reply.ok()
|
||||
}
|
||||
|
||||
/// Synchronize directory contents, finished
|
||||
fn fsyncdir (&mut self, _req: &fuse::Request, _ino: u64, _fh: u64, _datasync: bool, reply: fuse::ReplyEmpty) {
|
||||
fn fsyncdir(
|
||||
&mut self,
|
||||
_req: &fuse::Request,
|
||||
_ino: u64,
|
||||
_fh: u64,
|
||||
_datasync: bool,
|
||||
reply: fuse::ReplyEmpty,
|
||||
) {
|
||||
reply.ok()
|
||||
}
|
||||
|
||||
/// Get file system statistics
|
||||
fn statfs (&mut self, _req: &fuse::Request, _ino: u64, reply: fuse::ReplyStatfs) {
|
||||
fn statfs(&mut self, _req: &fuse::Request, _ino: u64, reply: fuse::ReplyStatfs) {
|
||||
let info = self.repository.info();
|
||||
reply.statfs(
|
||||
info.raw_data_size/512 as u64, //total blocks
|
||||
info.raw_data_size / 512 as u64, //total blocks
|
||||
0, //free blocks for admin
|
||||
0, //free blocks for users
|
||||
0,
|
||||
|
@ -539,12 +712,28 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> {
|
|||
}
|
||||
|
||||
/// Set an extended attribute
|
||||
fn setxattr (&mut self, _req: &fuse::Request, _ino: u64, _name: &OsStr, _value: &[u8], _flags: u32, _position: u32, reply: fuse::ReplyEmpty) {
|
||||
fn setxattr(
|
||||
&mut self,
|
||||
_req: &fuse::Request,
|
||||
_ino: u64,
|
||||
_name: &OsStr,
|
||||
_value: &[u8],
|
||||
_flags: u32,
|
||||
_position: u32,
|
||||
reply: fuse::ReplyEmpty,
|
||||
) {
|
||||
reply.error(libc::EROFS)
|
||||
}
|
||||
|
||||
/// Get an extended attribute
|
||||
fn getxattr (&mut self, _req: &fuse::Request, ino: u64, name: &OsStr, size: u32, reply: fuse::ReplyXattr) {
|
||||
fn getxattr(
|
||||
&mut self,
|
||||
_req: &fuse::Request,
|
||||
ino: u64,
|
||||
name: &OsStr,
|
||||
size: u32,
|
||||
reply: fuse::ReplyXattr,
|
||||
) {
|
||||
let inode = inode!(self, ino, reply);
|
||||
let inode = inode.borrow();
|
||||
if let Some(val) = inode.inode.xattrs.get(&name.to_string_lossy() as &str) {
|
||||
|
@ -561,7 +750,7 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> {
|
|||
}
|
||||
|
||||
/// List extended attribute names
|
||||
fn listxattr (&mut self, _req: &fuse::Request, ino: u64, size: u32, reply: fuse::ReplyXattr) {
|
||||
fn listxattr(&mut self, _req: &fuse::Request, ino: u64, size: u32, reply: fuse::ReplyXattr) {
|
||||
let inode = inode!(self, ino, reply);
|
||||
let inode = inode.borrow();
|
||||
let mut names_str = String::new();
|
||||
|
@ -579,7 +768,13 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> {
|
|||
}
|
||||
|
||||
/// Remove an extended attribute
|
||||
fn removexattr (&mut self, _req: &fuse::Request, _ino: u64, _name: &OsStr, reply: fuse::ReplyEmpty) {
|
||||
fn removexattr(
|
||||
&mut self,
|
||||
_req: &fuse::Request,
|
||||
_ino: u64,
|
||||
_name: &OsStr,
|
||||
reply: fuse::ReplyEmpty,
|
||||
) {
|
||||
reply.error(libc::EROFS)
|
||||
}
|
||||
|
||||
|
@ -587,28 +782,65 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> {
|
|||
/// This will be called for the access() system call. If the 'default_permissions'
|
||||
/// mount option is given, this method is not called. This method is not called
|
||||
/// under Linux kernel versions 2.4.x
|
||||
fn access (&mut self, _req: &fuse::Request, _ino: u64, _mask: u32, reply: fuse::ReplyEmpty) {
|
||||
fn access(&mut self, _req: &fuse::Request, _ino: u64, _mask: u32, reply: fuse::ReplyEmpty) {
|
||||
reply.error(libc::ENOSYS);
|
||||
}
|
||||
|
||||
/// Create and open a file
|
||||
fn create (&mut self, _req: &fuse::Request, _parent: u64, _name: &OsStr, _mode: u32, _flags: u32, reply: fuse::ReplyCreate) {
|
||||
fn create(
|
||||
&mut self,
|
||||
_req: &fuse::Request,
|
||||
_parent: u64,
|
||||
_name: &OsStr,
|
||||
_mode: u32,
|
||||
_flags: u32,
|
||||
reply: fuse::ReplyCreate,
|
||||
) {
|
||||
reply.error(libc::EROFS)
|
||||
}
|
||||
|
||||
/// Test for a POSIX file lock
|
||||
fn getlk (&mut self, _req: &fuse::Request, _ino: u64, _fh: u64, _lock_owner: u64, _start: u64, _end: u64, _typ: u32, _pid: u32, reply: fuse::ReplyLock) {
|
||||
fn getlk(
|
||||
&mut self,
|
||||
_req: &fuse::Request,
|
||||
_ino: u64,
|
||||
_fh: u64,
|
||||
_lock_owner: u64,
|
||||
_start: u64,
|
||||
_end: u64,
|
||||
_typ: u32,
|
||||
_pid: u32,
|
||||
reply: fuse::ReplyLock,
|
||||
) {
|
||||
reply.error(libc::ENOSYS);
|
||||
}
|
||||
|
||||
/// Acquire, modify or release a POSIX file lock
|
||||
fn setlk (&mut self, _req: &fuse::Request, _ino: u64, _fh: u64, _lock_owner: u64, _start: u64, _end: u64, _typ: u32, _pid: u32, _sleep: bool, reply: fuse::ReplyEmpty) {
|
||||
fn setlk(
|
||||
&mut self,
|
||||
_req: &fuse::Request,
|
||||
_ino: u64,
|
||||
_fh: u64,
|
||||
_lock_owner: u64,
|
||||
_start: u64,
|
||||
_end: u64,
|
||||
_typ: u32,
|
||||
_pid: u32,
|
||||
_sleep: bool,
|
||||
reply: fuse::ReplyEmpty,
|
||||
) {
|
||||
reply.error(libc::ENOSYS);
|
||||
}
|
||||
|
||||
/// Map block index within file to block index within device
|
||||
fn bmap (&mut self, _req: &fuse::Request, _ino: u64, _blocksize: u32, _idx: u64, reply: fuse::ReplyBmap) {
|
||||
fn bmap(
|
||||
&mut self,
|
||||
_req: &fuse::Request,
|
||||
_ino: u64,
|
||||
_blocksize: u32,
|
||||
_idx: u64,
|
||||
reply: fuse::ReplyBmap,
|
||||
) {
|
||||
reply.error(libc::ENOSYS);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,9 +1,12 @@
|
|||
pub use ::util::*;
|
||||
pub use ::bundledb::{BundleReader, BundleMode, BundleWriter, BundleInfo, BundleId, BundleDbError, BundleDb, BundleWriterError, StoredBundle};
|
||||
pub use ::chunker::{ChunkerType, Chunker, ChunkerStatus, ChunkerError};
|
||||
pub use ::repository::{Repository, Backup, Config, RepositoryError, RepositoryInfo, Inode, FileType, IntegrityError, BackupFileError, BackupError, BackupOptions, BundleAnalysis, FileData, DiffType, InodeError, RepositoryLayout, Location};
|
||||
pub use ::index::{Index, IndexError};
|
||||
pub use ::mount::FuseFilesystem;
|
||||
pub use util::*;
|
||||
pub use bundledb::{BundleReader, BundleMode, BundleWriter, BundleInfo, BundleId, BundleDbError,
|
||||
BundleDb, BundleWriterError, StoredBundle};
|
||||
pub use chunker::{ChunkerType, Chunker, ChunkerStatus, ChunkerError};
|
||||
pub use repository::{Repository, Backup, Config, RepositoryError, RepositoryInfo, Inode, FileType,
|
||||
IntegrityError, BackupFileError, BackupError, BackupOptions, BundleAnalysis,
|
||||
FileData, DiffType, InodeError, RepositoryLayout, Location};
|
||||
pub use index::{Index, IndexError};
|
||||
pub use mount::FuseFilesystem;
|
||||
|
||||
pub use serde::{Serialize, Deserialize};
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use ::prelude::*;
|
||||
use prelude::*;
|
||||
|
||||
use std::fs;
|
||||
use std::path::{self, Path, PathBuf};
|
||||
|
@ -33,17 +33,28 @@ pub struct BackupOptions {
|
|||
|
||||
|
||||
pub enum DiffType {
|
||||
Add, Mod, Del
|
||||
Add,
|
||||
Mod,
|
||||
Del
|
||||
}
|
||||
|
||||
|
||||
impl Repository {
|
||||
pub fn get_all_backups(&self) -> Result<HashMap<String, Backup>, RepositoryError> {
|
||||
Ok(try!(Backup::get_all_from(&self.crypto.lock().unwrap(), self.layout.backups_path())))
|
||||
Ok(try!(Backup::get_all_from(
|
||||
&self.crypto.lock().unwrap(),
|
||||
self.layout.backups_path()
|
||||
)))
|
||||
}
|
||||
|
||||
pub fn get_backups<P: AsRef<Path>>(&self, path: P) -> Result<HashMap<String, Backup>, RepositoryError> {
|
||||
Ok(try!(Backup::get_all_from(&self.crypto.lock().unwrap(), self.layout.backups_path().join(path))))
|
||||
pub fn get_backups<P: AsRef<Path>>(
|
||||
&self,
|
||||
path: P,
|
||||
) -> Result<HashMap<String, Backup>, RepositoryError> {
|
||||
Ok(try!(Backup::get_all_from(
|
||||
&self.crypto.lock().unwrap(),
|
||||
self.layout.backups_path().join(path)
|
||||
)))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -52,14 +63,21 @@ impl Repository {
|
|||
}
|
||||
|
||||
pub fn get_backup(&self, name: &str) -> Result<Backup, RepositoryError> {
|
||||
Ok(try!(Backup::read_from(&self.crypto.lock().unwrap(), self.layout.backup_path(name))))
|
||||
Ok(try!(Backup::read_from(
|
||||
&self.crypto.lock().unwrap(),
|
||||
self.layout.backup_path(name)
|
||||
)))
|
||||
}
|
||||
|
||||
pub fn save_backup(&mut self, backup: &Backup, name: &str) -> Result<(), RepositoryError> {
|
||||
try!(self.write_mode());
|
||||
let path = self.layout.backup_path(name);
|
||||
try!(fs::create_dir_all(path.parent().unwrap()));
|
||||
Ok(try!(backup.save_to(&self.crypto.lock().unwrap(), self.config.encryption.clone(), path)))
|
||||
Ok(try!(backup.save_to(
|
||||
&self.crypto.lock().unwrap(),
|
||||
self.config.encryption.clone(),
|
||||
path
|
||||
)))
|
||||
}
|
||||
|
||||
pub fn delete_backup(&mut self, name: &str) -> Result<(), RepositoryError> {
|
||||
|
@ -69,23 +87,32 @@ impl Repository {
|
|||
loop {
|
||||
path = path.parent().unwrap().to_owned();
|
||||
if path == self.layout.backups_path() || fs::remove_dir(&path).is_err() {
|
||||
break
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
pub fn prune_backups(&mut self, prefix: &str, daily: usize, weekly: usize, monthly: usize, yearly: usize, force: bool) -> Result<(), RepositoryError> {
|
||||
pub fn prune_backups(
|
||||
&mut self,
|
||||
prefix: &str,
|
||||
daily: usize,
|
||||
weekly: usize,
|
||||
monthly: usize,
|
||||
yearly: usize,
|
||||
force: bool,
|
||||
) -> Result<(), RepositoryError> {
|
||||
try!(self.write_mode());
|
||||
let mut backups = Vec::new();
|
||||
let backup_map = match self.get_all_backups() {
|
||||
Ok(backup_map) => backup_map,
|
||||
Err(RepositoryError::BackupFile(BackupFileError::PartialBackupsList(backup_map, _failed))) => {
|
||||
Err(RepositoryError::BackupFile(BackupFileError::PartialBackupsList(backup_map,
|
||||
_failed))) => {
|
||||
warn!("Some backups could not be read, ignoring them");
|
||||
backup_map
|
||||
},
|
||||
Err(err) => return Err(err)
|
||||
}
|
||||
Err(err) => return Err(err),
|
||||
};
|
||||
for (name, backup) in backup_map {
|
||||
if name.starts_with(prefix) {
|
||||
|
@ -96,7 +123,12 @@ impl Repository {
|
|||
backups.sort_by_key(|backup| -backup.2.timestamp);
|
||||
let mut keep = Bitmap::new(backups.len());
|
||||
|
||||
fn mark_needed<K: Eq, F: Fn(&DateTime<Local>) -> K>(backups: &[(String, DateTime<Local>, Backup)], keep: &mut Bitmap, max: usize, keyfn: F) {
|
||||
fn mark_needed<K: Eq, F: Fn(&DateTime<Local>) -> K>(
|
||||
backups: &[(String, DateTime<Local>, Backup)],
|
||||
keep: &mut Bitmap,
|
||||
max: usize,
|
||||
keyfn: F,
|
||||
) {
|
||||
let mut kept = 0;
|
||||
let mut last = None;
|
||||
for (i, backup) in backups.iter().enumerate() {
|
||||
|
@ -104,7 +136,7 @@ impl Repository {
|
|||
let cur = Some(val);
|
||||
if cur != last {
|
||||
if kept >= max {
|
||||
break
|
||||
break;
|
||||
}
|
||||
last = cur;
|
||||
keep.set(i);
|
||||
|
@ -125,7 +157,12 @@ impl Repository {
|
|||
});
|
||||
}
|
||||
if daily > 0 {
|
||||
mark_needed(&backups, &mut keep, daily, |d| (d.year(), d.month(), d.day()));
|
||||
mark_needed(
|
||||
&backups,
|
||||
&mut keep,
|
||||
daily,
|
||||
|d| (d.year(), d.month(), d.day())
|
||||
);
|
||||
}
|
||||
let mut remove = Vec::new();
|
||||
println!("Removing the following backups");
|
||||
|
@ -143,7 +180,12 @@ impl Repository {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub fn restore_inode_tree<P: AsRef<Path>>(&mut self, backup: &Backup, inode: Inode, path: P) -> Result<(), RepositoryError> {
|
||||
pub fn restore_inode_tree<P: AsRef<Path>>(
|
||||
&mut self,
|
||||
backup: &Backup,
|
||||
inode: Inode,
|
||||
path: P,
|
||||
) -> Result<(), RepositoryError> {
|
||||
let _lock = try!(self.lock(false));
|
||||
let mut queue = VecDeque::new();
|
||||
queue.push_back((path.as_ref().to_owned(), inode));
|
||||
|
@ -164,7 +206,11 @@ impl Repository {
|
|||
try!(self.save_inode_at(&inode, &path));
|
||||
}
|
||||
if inode.file_type == FileType::Directory {
|
||||
let path = if is_root { path.to_path_buf() } else { path.join(inode.name) };
|
||||
let path = if is_root {
|
||||
path.to_path_buf()
|
||||
} else {
|
||||
path.join(inode.name)
|
||||
};
|
||||
for chunks in inode.children.unwrap().values() {
|
||||
let inode = try!(self.get_inode(chunks));
|
||||
queue.push_back((path.clone(), inode));
|
||||
|
@ -181,20 +227,26 @@ impl Repository {
|
|||
reference: Option<&Inode>,
|
||||
options: &BackupOptions,
|
||||
backup: &mut Backup,
|
||||
failed_paths: &mut Vec<PathBuf>
|
||||
failed_paths: &mut Vec<PathBuf>,
|
||||
) -> Result<Inode, RepositoryError> {
|
||||
let path = path.as_ref();
|
||||
let mut inode = try!(self.create_inode(path, reference));
|
||||
if !backup.user_names.contains_key(&inode.user) {
|
||||
if let Some(user) = users::get_user_by_uid(inode.user) {
|
||||
backup.user_names.insert(inode.user, user.name().to_string());
|
||||
backup.user_names.insert(
|
||||
inode.user,
|
||||
user.name().to_string()
|
||||
);
|
||||
} else {
|
||||
warn!("Failed to retrieve name of user {}", inode.user);
|
||||
}
|
||||
}
|
||||
if !backup.group_names.contains_key(&inode.group) {
|
||||
if let Some(group) = users::get_group_by_gid(inode.group) {
|
||||
backup.group_names.insert(inode.group, group.name().to_string());
|
||||
backup.group_names.insert(
|
||||
inode.group,
|
||||
group.name().to_string()
|
||||
);
|
||||
} else {
|
||||
warn!("Failed to retrieve name of group {}", inode.group);
|
||||
}
|
||||
|
@ -211,28 +263,37 @@ impl Repository {
|
|||
if options.same_device {
|
||||
let child_dev = try!(child.metadata()).st_dev();
|
||||
if child_dev != parent_dev {
|
||||
continue
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if let Some(ref excludes) = options.excludes {
|
||||
let child_path_str = child_path.to_string_lossy();
|
||||
if excludes.is_match(&child_path_str) {
|
||||
continue
|
||||
continue;
|
||||
}
|
||||
}
|
||||
let name = child.file_name().to_string_lossy().to_string();
|
||||
let ref_child = reference.as_ref()
|
||||
let ref_child = reference
|
||||
.as_ref()
|
||||
.and_then(|inode| inode.children.as_ref())
|
||||
.and_then(|map| map.get(&name))
|
||||
.and_then(|chunks| self.get_inode(chunks).ok());
|
||||
let child_inode = match self.create_backup_recurse(&child_path, ref_child.as_ref(), options, backup, failed_paths) {
|
||||
let child_inode = match self.create_backup_recurse(
|
||||
&child_path,
|
||||
ref_child.as_ref(),
|
||||
options,
|
||||
backup,
|
||||
failed_paths
|
||||
) {
|
||||
Ok(inode) => inode,
|
||||
Err(RepositoryError::Inode(_)) | Err(RepositoryError::Chunker(_)) | Err(RepositoryError::Io(_)) => {
|
||||
Err(RepositoryError::Inode(_)) |
|
||||
Err(RepositoryError::Chunker(_)) |
|
||||
Err(RepositoryError::Io(_)) => {
|
||||
info!("Failed to backup {:?}", child_path);
|
||||
failed_paths.push(child_path);
|
||||
continue
|
||||
},
|
||||
Err(err) => return Err(err)
|
||||
continue;
|
||||
}
|
||||
Err(err) => return Err(err),
|
||||
};
|
||||
let chunks = try!(self.put_inode(&child_inode));
|
||||
inode.cum_size += child_inode.cum_size;
|
||||
|
@ -263,11 +324,16 @@ impl Repository {
|
|||
Ok(inode)
|
||||
}
|
||||
|
||||
pub fn create_backup_recursively<P: AsRef<Path>>(&mut self, path: P, reference: Option<&Backup>, options: &BackupOptions) -> Result<Backup, RepositoryError> {
|
||||
pub fn create_backup_recursively<P: AsRef<Path>>(
|
||||
&mut self,
|
||||
path: P,
|
||||
reference: Option<&Backup>,
|
||||
options: &BackupOptions,
|
||||
) -> Result<Backup, RepositoryError> {
|
||||
try!(self.write_mode());
|
||||
let _lock = try!(self.lock(false));
|
||||
if self.dirty {
|
||||
return Err(RepositoryError::Dirty)
|
||||
return Err(RepositoryError::Dirty);
|
||||
}
|
||||
try!(self.set_dirty());
|
||||
let reference_inode = reference.and_then(|b| self.get_inode(&b.root).ok());
|
||||
|
@ -278,7 +344,13 @@ impl Repository {
|
|||
let info_before = self.info();
|
||||
let start = Local::now();
|
||||
let mut failed_paths = vec![];
|
||||
let root_inode = try!(self.create_backup_recurse(path, reference_inode.as_ref(), options, &mut backup, &mut failed_paths));
|
||||
let root_inode = try!(self.create_backup_recurse(
|
||||
path,
|
||||
reference_inode.as_ref(),
|
||||
options,
|
||||
&mut backup,
|
||||
&mut failed_paths
|
||||
));
|
||||
backup.root = try!(self.put_inode(&root_inode));
|
||||
try!(self.flush());
|
||||
let elapsed = Local::now().signed_duration_since(start);
|
||||
|
@ -304,20 +376,29 @@ impl Repository {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn remove_backup_path<P: AsRef<Path>>(&mut self, backup: &mut Backup, path: P) -> Result<(), RepositoryError> {
|
||||
pub fn remove_backup_path<P: AsRef<Path>>(
|
||||
&mut self,
|
||||
backup: &mut Backup,
|
||||
path: P,
|
||||
) -> Result<(), RepositoryError> {
|
||||
try!(self.write_mode());
|
||||
let _lock = try!(self.lock(false));
|
||||
let mut inodes = try!(self.get_backup_path(backup, path));
|
||||
let to_remove = inodes.pop().unwrap();
|
||||
let mut remove_from = match inodes.pop() {
|
||||
Some(inode) => inode,
|
||||
None => return Err(BackupError::RemoveRoot.into())
|
||||
None => return Err(BackupError::RemoveRoot.into()),
|
||||
};
|
||||
remove_from.children.as_mut().unwrap().remove(&to_remove.name);
|
||||
remove_from.children.as_mut().unwrap().remove(
|
||||
&to_remove.name
|
||||
);
|
||||
let mut last_inode_chunks = try!(self.put_inode(&remove_from));
|
||||
let mut last_inode_name = remove_from.name;
|
||||
while let Some(mut inode) = inodes.pop() {
|
||||
inode.children.as_mut().unwrap().insert(last_inode_name, last_inode_chunks);
|
||||
inode.children.as_mut().unwrap().insert(
|
||||
last_inode_name,
|
||||
last_inode_chunks
|
||||
);
|
||||
last_inode_chunks = try!(self.put_inode(&inode));
|
||||
last_inode_name = inode.name;
|
||||
}
|
||||
|
@ -326,20 +407,32 @@ impl Repository {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_backup_path<P: AsRef<Path>>(&mut self, backup: &Backup, path: P) -> Result<Vec<Inode>, RepositoryError> {
|
||||
pub fn get_backup_path<P: AsRef<Path>>(
|
||||
&mut self,
|
||||
backup: &Backup,
|
||||
path: P,
|
||||
) -> Result<Vec<Inode>, RepositoryError> {
|
||||
let mut inodes = vec![];
|
||||
let mut inode = try!(self.get_inode(&backup.root));
|
||||
for c in path.as_ref().components() {
|
||||
if let path::Component::Normal(name) = c {
|
||||
let name = name.to_string_lossy();
|
||||
if inodes.is_empty() && inode.file_type != FileType::Directory && inode.name == name {
|
||||
if inodes.is_empty() && inode.file_type != FileType::Directory &&
|
||||
inode.name == name
|
||||
{
|
||||
return Ok(vec![inode]);
|
||||
}
|
||||
if let Some(chunks) = inode.children.as_mut().and_then(|c| c.remove(&name as &str)) {
|
||||
if let Some(chunks) = inode.children.as_mut().and_then(
|
||||
|c| c.remove(&name as &str)
|
||||
)
|
||||
{
|
||||
inodes.push(inode);
|
||||
inode = try!(self.get_inode(&chunks));
|
||||
} else {
|
||||
return Err(RepositoryError::NoSuchFileInBackup(backup.clone(), path.as_ref().to_owned()));
|
||||
return Err(RepositoryError::NoSuchFileInBackup(
|
||||
backup.clone(),
|
||||
path.as_ref().to_owned()
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -348,20 +441,32 @@ impl Repository {
|
|||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get_backup_inode<P: AsRef<Path>>(&mut self, backup: &Backup, path: P) -> Result<Inode, RepositoryError> {
|
||||
self.get_backup_path(backup, path).map(|mut inodes| inodes.pop().unwrap())
|
||||
pub fn get_backup_inode<P: AsRef<Path>>(
|
||||
&mut self,
|
||||
backup: &Backup,
|
||||
path: P,
|
||||
) -> Result<Inode, RepositoryError> {
|
||||
self.get_backup_path(backup, path).map(|mut inodes| {
|
||||
inodes.pop().unwrap()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn find_versions<P: AsRef<Path>>(&mut self, path: P) -> Result<Vec<(String, Inode)>, RepositoryError> {
|
||||
pub fn find_versions<P: AsRef<Path>>(
|
||||
&mut self,
|
||||
path: P,
|
||||
) -> Result<Vec<(String, Inode)>, RepositoryError> {
|
||||
let path = path.as_ref();
|
||||
let mut versions = HashMap::new();
|
||||
for (name, backup) in try!(self.get_all_backups()) {
|
||||
match self.get_backup_inode(&backup, path) {
|
||||
Ok(inode) => {
|
||||
versions.insert((inode.file_type, inode.timestamp, inode.size), (name, inode));
|
||||
},
|
||||
versions.insert(
|
||||
(inode.file_type, inode.timestamp, inode.size),
|
||||
(name, inode)
|
||||
);
|
||||
}
|
||||
Err(RepositoryError::NoSuchFileInBackup(..)) => continue,
|
||||
Err(err) => return Err(err)
|
||||
Err(err) => return Err(err),
|
||||
}
|
||||
}
|
||||
let mut versions: Vec<_> = versions.into_iter().map(|(_, v)| v).collect();
|
||||
|
@ -369,7 +474,13 @@ impl Repository {
|
|||
Ok(versions)
|
||||
}
|
||||
|
||||
fn find_differences_recurse(&mut self, inode1: &Inode, inode2: &Inode, path: PathBuf, diffs: &mut Vec<(DiffType, PathBuf)>) -> Result<(), RepositoryError> {
|
||||
fn find_differences_recurse(
|
||||
&mut self,
|
||||
inode1: &Inode,
|
||||
inode2: &Inode,
|
||||
path: PathBuf,
|
||||
diffs: &mut Vec<(DiffType, PathBuf)>,
|
||||
) -> Result<(), RepositoryError> {
|
||||
if !inode1.is_same_meta(inode2) || inode1.data != inode2.data {
|
||||
diffs.push((DiffType::Mod, path.clone()));
|
||||
}
|
||||
|
@ -393,7 +504,12 @@ impl Repository {
|
|||
if chunks1 != chunks2 {
|
||||
let inode1 = try!(self.get_inode(chunks1));
|
||||
let inode2 = try!(self.get_inode(chunks2));
|
||||
try!(self.find_differences_recurse(&inode1, &inode2, path.join(name), diffs));
|
||||
try!(self.find_differences_recurse(
|
||||
&inode1,
|
||||
&inode2,
|
||||
path.join(name),
|
||||
diffs
|
||||
));
|
||||
}
|
||||
} else {
|
||||
diffs.push((DiffType::Add, path.join(name)));
|
||||
|
@ -409,10 +525,19 @@ impl Repository {
|
|||
}
|
||||
|
||||
#[inline]
|
||||
pub fn find_differences(&mut self, inode1: &Inode, inode2: &Inode) -> Result<Vec<(DiffType, PathBuf)>, RepositoryError> {
|
||||
pub fn find_differences(
|
||||
&mut self,
|
||||
inode1: &Inode,
|
||||
inode2: &Inode,
|
||||
) -> Result<Vec<(DiffType, PathBuf)>, RepositoryError> {
|
||||
let mut diffs = vec![];
|
||||
let path = PathBuf::from("/");
|
||||
try!(self.find_differences_recurse(inode1, inode2, path, &mut diffs));
|
||||
try!(self.find_differences_recurse(
|
||||
inode1,
|
||||
inode2,
|
||||
path,
|
||||
&mut diffs
|
||||
));
|
||||
Ok(diffs)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use ::prelude::*;
|
||||
use prelude::*;
|
||||
|
||||
use std::io::{self, BufReader, BufWriter, Read, Write};
|
||||
use std::fs::{self, File};
|
||||
|
@ -116,41 +116,66 @@ serde_impl!(Backup(u8?) {
|
|||
impl Backup {
|
||||
pub fn read_from<P: AsRef<Path>>(crypto: &Crypto, path: P) -> Result<Self, BackupFileError> {
|
||||
let path = path.as_ref();
|
||||
let mut file = BufReader::new(try!(File::open(path).map_err(|err| BackupFileError::Read(err, path.to_path_buf()))));
|
||||
let mut file = BufReader::new(try!(File::open(path).map_err(|err| {
|
||||
BackupFileError::Read(err, path.to_path_buf())
|
||||
})));
|
||||
let mut header = [0u8; 8];
|
||||
try!(file.read_exact(&mut header).map_err(|err| BackupFileError::Read(err, path.to_path_buf())));
|
||||
try!(file.read_exact(&mut header).map_err(|err| {
|
||||
BackupFileError::Read(err, path.to_path_buf())
|
||||
}));
|
||||
if header[..HEADER_STRING.len()] != HEADER_STRING {
|
||||
return Err(BackupFileError::WrongHeader(path.to_path_buf()))
|
||||
return Err(BackupFileError::WrongHeader(path.to_path_buf()));
|
||||
}
|
||||
let version = header[HEADER_STRING.len()];
|
||||
if version != HEADER_VERSION {
|
||||
return Err(BackupFileError::UnsupportedVersion(path.to_path_buf(), version))
|
||||
return Err(BackupFileError::UnsupportedVersion(
|
||||
path.to_path_buf(),
|
||||
version
|
||||
));
|
||||
}
|
||||
let header: BackupHeader = try!(msgpack::decode_from_stream(&mut file).context(path));
|
||||
let mut data = Vec::new();
|
||||
try!(file.read_to_end(&mut data).map_err(|err| BackupFileError::Read(err, path.to_path_buf())));
|
||||
try!(file.read_to_end(&mut data).map_err(|err| {
|
||||
BackupFileError::Read(err, path.to_path_buf())
|
||||
}));
|
||||
if let Some(ref encryption) = header.encryption {
|
||||
data = try!(crypto.decrypt(encryption, &data));
|
||||
}
|
||||
Ok(try!(msgpack::decode(&data).context(path)))
|
||||
}
|
||||
|
||||
pub fn save_to<P: AsRef<Path>>(&self, crypto: &Crypto, encryption: Option<Encryption>, path: P) -> Result<(), BackupFileError> {
|
||||
pub fn save_to<P: AsRef<Path>>(
|
||||
&self,
|
||||
crypto: &Crypto,
|
||||
encryption: Option<Encryption>,
|
||||
path: P,
|
||||
) -> Result<(), BackupFileError> {
|
||||
let path = path.as_ref();
|
||||
let mut data = try!(msgpack::encode(self).context(path));
|
||||
if let Some(ref encryption) = encryption {
|
||||
data = try!(crypto.encrypt(encryption, &data));
|
||||
}
|
||||
let mut file = BufWriter::new(try!(File::create(path).map_err(|err| BackupFileError::Write(err, path.to_path_buf()))));
|
||||
try!(file.write_all(&HEADER_STRING).map_err(|err| BackupFileError::Write(err, path.to_path_buf())));
|
||||
try!(file.write_all(&[HEADER_VERSION]).map_err(|err| BackupFileError::Write(err, path.to_path_buf())));
|
||||
let mut file = BufWriter::new(try!(File::create(path).map_err(|err| {
|
||||
BackupFileError::Write(err, path.to_path_buf())
|
||||
})));
|
||||
try!(file.write_all(&HEADER_STRING).map_err(|err| {
|
||||
BackupFileError::Write(err, path.to_path_buf())
|
||||
}));
|
||||
try!(file.write_all(&[HEADER_VERSION]).map_err(|err| {
|
||||
BackupFileError::Write(err, path.to_path_buf())
|
||||
}));
|
||||
let header = BackupHeader { encryption: encryption };
|
||||
try!(msgpack::encode_to_stream(&header, &mut file).context(path));
|
||||
try!(file.write_all(&data).map_err(|err| BackupFileError::Write(err, path.to_path_buf())));
|
||||
try!(file.write_all(&data).map_err(|err| {
|
||||
BackupFileError::Write(err, path.to_path_buf())
|
||||
}));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_all_from<P: AsRef<Path>>(crypto: &Crypto, path: P) -> Result<HashMap<String, Backup>, BackupFileError> {
|
||||
pub fn get_all_from<P: AsRef<Path>>(
|
||||
crypto: &Crypto,
|
||||
path: P,
|
||||
) -> Result<HashMap<String, Backup>, BackupFileError> {
|
||||
let mut backups = HashMap::new();
|
||||
let base_path = path.as_ref();
|
||||
let path = path.as_ref();
|
||||
|
@ -161,7 +186,10 @@ impl Backup {
|
|||
let mut paths = vec![path.to_path_buf()];
|
||||
let mut failed_paths = vec![];
|
||||
while let Some(path) = paths.pop() {
|
||||
for entry in try!(fs::read_dir(&path).map_err(|e| BackupFileError::Read(e, path.clone()))) {
|
||||
for entry in try!(fs::read_dir(&path).map_err(|e| {
|
||||
BackupFileError::Read(e, path.clone())
|
||||
}))
|
||||
{
|
||||
let entry = try!(entry.map_err(|e| BackupFileError::Read(e, path.clone())));
|
||||
let path = entry.path();
|
||||
if path.is_dir() {
|
||||
|
@ -169,9 +197,12 @@ impl Backup {
|
|||
} else {
|
||||
let relpath = path.strip_prefix(&base_path).unwrap();
|
||||
if relpath.extension() != Some("backup".as_ref()) {
|
||||
continue
|
||||
continue;
|
||||
}
|
||||
let name = relpath.with_file_name(relpath.file_stem().unwrap()).to_string_lossy().to_string();
|
||||
let name = relpath
|
||||
.with_file_name(relpath.file_stem().unwrap())
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
if let Ok(backup) = Backup::read_from(crypto, &path) {
|
||||
backups.insert(name, backup);
|
||||
} else {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use ::prelude::*;
|
||||
use prelude::*;
|
||||
|
||||
use std::mem;
|
||||
use std::cmp::min;
|
||||
|
@ -29,22 +29,27 @@ impl<'a> Read for ChunkReader<'a> {
|
|||
let mut bpos = 0;
|
||||
loop {
|
||||
if buf.len() == bpos {
|
||||
break
|
||||
break;
|
||||
}
|
||||
if self.data.len() == self.pos {
|
||||
if let Some(chunk) = self.chunks.pop_front() {
|
||||
self.data = match self.repo.get_chunk(chunk.0) {
|
||||
Ok(Some(data)) => data,
|
||||
Ok(None) => return Err(io::Error::new(io::ErrorKind::Other, IntegrityError::MissingChunk(chunk.0))),
|
||||
Err(err) => return Err(io::Error::new(io::ErrorKind::Other, err))
|
||||
Ok(None) => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
IntegrityError::MissingChunk(chunk.0)
|
||||
))
|
||||
}
|
||||
Err(err) => return Err(io::Error::new(io::ErrorKind::Other, err)),
|
||||
};
|
||||
self.pos = 0;
|
||||
} else {
|
||||
break
|
||||
break;
|
||||
}
|
||||
}
|
||||
let l = min(self.data.len()-self.pos, buf.len() - bpos);
|
||||
buf[bpos..bpos+l].copy_from_slice(&self.data[self.pos..self.pos+l]);
|
||||
let l = min(self.data.len() - self.pos, buf.len() - bpos);
|
||||
buf[bpos..bpos + l].copy_from_slice(&self.data[self.pos..self.pos + l]);
|
||||
bpos += l;
|
||||
self.pos += l;
|
||||
}
|
||||
|
@ -56,7 +61,9 @@ impl<'a> Read for ChunkReader<'a> {
|
|||
impl Repository {
|
||||
#[inline]
|
||||
pub fn get_bundle_id(&self, id: u32) -> Result<BundleId, RepositoryError> {
|
||||
self.bundle_map.get(id).ok_or_else(|| IntegrityError::MissingBundleId(id).into())
|
||||
self.bundle_map.get(id).ok_or_else(|| {
|
||||
IntegrityError::MissingBundleId(id).into()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_chunk(&mut self, hash: Hash) -> Result<Option<Vec<u8>>, RepositoryError> {
|
||||
|
@ -64,27 +71,39 @@ impl Repository {
|
|||
let found = if let Some(found) = self.index.get(&hash) {
|
||||
found
|
||||
} else {
|
||||
return Ok(None)
|
||||
return Ok(None);
|
||||
};
|
||||
// Lookup bundle id from map
|
||||
let bundle_id = try!(self.get_bundle_id(found.bundle));
|
||||
// Get chunk from bundle
|
||||
Ok(Some(try!(self.bundles.get_chunk(&bundle_id, found.chunk as usize))))
|
||||
Ok(Some(try!(
|
||||
self.bundles.get_chunk(&bundle_id, found.chunk as usize)
|
||||
)))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn put_chunk(&mut self, mode: BundleMode, hash: Hash, data: &[u8]) -> Result<(), RepositoryError> {
|
||||
pub fn put_chunk(
|
||||
&mut self,
|
||||
mode: BundleMode,
|
||||
hash: Hash,
|
||||
data: &[u8],
|
||||
) -> Result<(), RepositoryError> {
|
||||
// If this chunk is in the index, ignore it
|
||||
if self.index.contains(&hash) {
|
||||
return Ok(())
|
||||
return Ok(());
|
||||
}
|
||||
self.put_chunk_override(mode, hash, data)
|
||||
}
|
||||
|
||||
fn write_chunk_to_bundle_and_index(&mut self, mode: BundleMode, hash: Hash, data: &[u8]) -> Result<(), RepositoryError> {
|
||||
fn write_chunk_to_bundle_and_index(
|
||||
&mut self,
|
||||
mode: BundleMode,
|
||||
hash: Hash,
|
||||
data: &[u8],
|
||||
) -> Result<(), RepositoryError> {
|
||||
let writer = match mode {
|
||||
BundleMode::Data => &mut self.data_bundle,
|
||||
BundleMode::Meta => &mut self.meta_bundle
|
||||
BundleMode::Meta => &mut self.meta_bundle,
|
||||
};
|
||||
// ...alocate one if needed
|
||||
if writer.is_none() {
|
||||
|
@ -101,10 +120,13 @@ impl Repository {
|
|||
let chunk_id = try!(writer_obj.add(data, hash));
|
||||
let bundle_id = match mode {
|
||||
BundleMode::Data => self.next_data_bundle,
|
||||
BundleMode::Meta => self.next_meta_bundle
|
||||
BundleMode::Meta => self.next_meta_bundle,
|
||||
};
|
||||
// Add location to the index
|
||||
try!(self.index.set(&hash, &Location::new(bundle_id, chunk_id as u32)));
|
||||
try!(self.index.set(
|
||||
&hash,
|
||||
&Location::new(bundle_id, chunk_id as u32)
|
||||
));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -113,14 +135,14 @@ impl Repository {
|
|||
let next_free_bundle_id = self.next_free_bundle_id();
|
||||
let writer = match mode {
|
||||
BundleMode::Data => &mut self.data_bundle,
|
||||
BundleMode::Meta => &mut self.meta_bundle
|
||||
BundleMode::Meta => &mut self.meta_bundle,
|
||||
};
|
||||
if writer.is_none() {
|
||||
return Ok(())
|
||||
return Ok(());
|
||||
}
|
||||
let bundle_id = match mode {
|
||||
BundleMode::Data => self.next_data_bundle,
|
||||
BundleMode::Meta => self.next_meta_bundle
|
||||
BundleMode::Meta => self.next_meta_bundle,
|
||||
};
|
||||
let mut finished = None;
|
||||
mem::swap(writer, &mut finished);
|
||||
|
@ -139,12 +161,12 @@ impl Repository {
|
|||
let (size, raw_size) = {
|
||||
let writer = match mode {
|
||||
BundleMode::Data => &mut self.data_bundle,
|
||||
BundleMode::Meta => &mut self.meta_bundle
|
||||
BundleMode::Meta => &mut self.meta_bundle,
|
||||
};
|
||||
if let Some(ref writer) = *writer {
|
||||
(writer.estimate_final_size(), writer.raw_size())
|
||||
} else {
|
||||
return Ok(())
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
if size >= self.config.bundle_size || raw_size >= 4 * self.config.bundle_size {
|
||||
|
@ -158,18 +180,31 @@ impl Repository {
|
|||
}
|
||||
|
||||
#[inline]
|
||||
pub fn put_chunk_override(&mut self, mode: BundleMode, hash: Hash, data: &[u8]) -> Result<(), RepositoryError> {
|
||||
pub fn put_chunk_override(
|
||||
&mut self,
|
||||
mode: BundleMode,
|
||||
hash: Hash,
|
||||
data: &[u8],
|
||||
) -> Result<(), RepositoryError> {
|
||||
try!(self.write_chunk_to_bundle_and_index(mode, hash, data));
|
||||
self.finish_bundle_if_needed(mode)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn put_data(&mut self, mode: BundleMode, data: &[u8]) -> Result<ChunkList, RepositoryError> {
|
||||
pub fn put_data(
|
||||
&mut self,
|
||||
mode: BundleMode,
|
||||
data: &[u8],
|
||||
) -> Result<ChunkList, RepositoryError> {
|
||||
let mut input = Cursor::new(data);
|
||||
self.put_stream(mode, &mut input)
|
||||
}
|
||||
|
||||
pub fn put_stream<R: Read>(&mut self, mode: BundleMode, data: &mut R) -> Result<ChunkList, RepositoryError> {
|
||||
pub fn put_stream<R: Read>(
|
||||
&mut self,
|
||||
mode: BundleMode,
|
||||
data: &mut R,
|
||||
) -> Result<ChunkList, RepositoryError> {
|
||||
let avg_size = self.config.chunker.avg_size();
|
||||
let mut chunks = Vec::new();
|
||||
let mut chunk = Vec::with_capacity(avg_size * 2);
|
||||
|
@ -182,14 +217,15 @@ impl Repository {
|
|||
try!(self.put_chunk(mode, hash, &chunk));
|
||||
chunks.push((hash, chunk.len() as u32));
|
||||
if res == ChunkerStatus::Finished {
|
||||
break
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(chunks.into())
|
||||
}
|
||||
|
||||
pub fn get_data(&mut self, chunks: &[Chunk]) -> Result<Vec<u8>, RepositoryError> {
|
||||
let mut data = Vec::with_capacity(chunks.iter().map(|&(_, size)| size).sum::<u32>() as usize);
|
||||
let mut data =
|
||||
Vec::with_capacity(chunks.iter().map(|&(_, size)| size).sum::<u32>() as usize);
|
||||
try!(self.get_stream(chunks, &mut data));
|
||||
Ok(data)
|
||||
}
|
||||
|
@ -199,9 +235,15 @@ impl Repository {
|
|||
ChunkReader::new(self, chunks)
|
||||
}
|
||||
|
||||
pub fn get_stream<W: Write>(&mut self, chunks: &[Chunk], w: &mut W) -> Result<(), RepositoryError> {
|
||||
pub fn get_stream<W: Write>(
|
||||
&mut self,
|
||||
chunks: &[Chunk],
|
||||
w: &mut W,
|
||||
) -> Result<(), RepositoryError> {
|
||||
for &(ref hash, len) in chunks {
|
||||
let data = try!(try!(self.get_chunk(*hash)).ok_or_else(|| IntegrityError::MissingChunk(*hash)));
|
||||
let data = try!(try!(self.get_chunk(*hash)).ok_or_else(|| {
|
||||
IntegrityError::MissingChunk(*hash)
|
||||
}));
|
||||
debug_assert_eq!(data.len() as u32, len);
|
||||
try!(w.write_all(&data));
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use ::prelude::*;
|
||||
use prelude::*;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
|
@ -51,11 +51,11 @@ impl BundleMap {
|
|||
let mut header = [0u8; 8];
|
||||
try!(file.read_exact(&mut header));
|
||||
if header[..HEADER_STRING.len()] != HEADER_STRING {
|
||||
return Err(BundleMapError::WrongHeader)
|
||||
return Err(BundleMapError::WrongHeader);
|
||||
}
|
||||
let version = header[HEADER_STRING.len()];
|
||||
if version != HEADER_VERSION {
|
||||
return Err(BundleMapError::WrongVersion(version))
|
||||
return Err(BundleMapError::WrongVersion(version));
|
||||
}
|
||||
Ok(BundleMap(try!(msgpack::decode_from_stream(&mut file))))
|
||||
}
|
||||
|
@ -80,7 +80,7 @@ impl BundleMap {
|
|||
pub fn find(&self, bundle: &BundleId) -> Option<u32> {
|
||||
for (id, bundle_id) in &self.0 {
|
||||
if bundle == bundle_id {
|
||||
return Some(*id)
|
||||
return Some(*id);
|
||||
}
|
||||
}
|
||||
None
|
||||
|
@ -92,7 +92,10 @@ impl BundleMap {
|
|||
}
|
||||
|
||||
pub fn bundles(&self) -> Vec<(u32, BundleId)> {
|
||||
self.0.iter().map(|(id, bundle)| (*id, bundle.clone())).collect()
|
||||
self.0
|
||||
.iter()
|
||||
.map(|(id, bundle)| (*id, bundle.clone()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use ::prelude::*;
|
||||
use prelude::*;
|
||||
|
||||
use serde_yaml;
|
||||
|
||||
|
@ -49,7 +49,7 @@ impl Default for ChunkerYaml {
|
|||
fn default() -> Self {
|
||||
ChunkerYaml {
|
||||
method: "fastcdc".to_string(),
|
||||
avg_size: 16*1024,
|
||||
avg_size: 16 * 1024,
|
||||
seed: 0
|
||||
}
|
||||
}
|
||||
|
@ -126,14 +126,14 @@ struct ConfigYaml {
|
|||
encryption: Option<EncryptionYaml>,
|
||||
bundle_size: usize,
|
||||
chunker: ChunkerYaml,
|
||||
hash: String,
|
||||
hash: String
|
||||
}
|
||||
impl Default for ConfigYaml {
|
||||
fn default() -> Self {
|
||||
ConfigYaml {
|
||||
compression: Some("brotli/5".to_string()),
|
||||
encryption: None,
|
||||
bundle_size: 25*1024*1024,
|
||||
bundle_size: 25 * 1024 * 1024,
|
||||
chunker: ChunkerYaml::default(),
|
||||
hash: "blake2".to_string()
|
||||
}
|
||||
|
@ -162,7 +162,7 @@ impl Default for Config {
|
|||
Config {
|
||||
compression: Some(Compression::from_string("brotli/3").unwrap()),
|
||||
encryption: None,
|
||||
bundle_size: 25*1024*1024,
|
||||
bundle_size: 25 * 1024 * 1024,
|
||||
chunker: ChunkerType::from_string("fastcdc/16").unwrap(),
|
||||
hash: HashMethod::Blake2
|
||||
}
|
||||
|
@ -185,12 +185,14 @@ impl Config {
|
|||
};
|
||||
let encryption = if let Some(e) = yaml.encryption {
|
||||
let method = try!(EncryptionMethod::from_yaml(e.method));
|
||||
let key = try!(parse_hex(&e.key).map_err(|_| ConfigError::Parse("Invalid public key")));
|
||||
let key = try!(parse_hex(&e.key).map_err(|_| {
|
||||
ConfigError::Parse("Invalid public key")
|
||||
}));
|
||||
Some((method, key.into()))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
Ok(Config{
|
||||
Ok(Config {
|
||||
compression: compression,
|
||||
encryption: encryption,
|
||||
bundle_size: yaml.bundle_size,
|
||||
|
@ -202,7 +204,12 @@ impl Config {
|
|||
fn to_yaml(&self) -> ConfigYaml {
|
||||
ConfigYaml {
|
||||
compression: self.compression.as_ref().map(|c| c.to_yaml()),
|
||||
encryption: self.encryption.as_ref().map(|e| EncryptionYaml{method: e.0.to_yaml(), key: to_hex(&e.1[..])}),
|
||||
encryption: self.encryption.as_ref().map(|e| {
|
||||
EncryptionYaml {
|
||||
method: e.0.to_yaml(),
|
||||
key: to_hex(&e.1[..])
|
||||
}
|
||||
}),
|
||||
bundle_size: self.bundle_size,
|
||||
chunker: self.chunker.to_yaml(),
|
||||
hash: self.hash.to_yaml()
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use ::prelude::*;
|
||||
use prelude::*;
|
||||
|
||||
use std::io;
|
||||
use std::path::PathBuf;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use ::prelude::*;
|
||||
use prelude::*;
|
||||
|
||||
use std::collections::{HashMap, VecDeque};
|
||||
|
||||
|
@ -40,7 +40,11 @@ pub struct RepositoryInfo {
|
|||
|
||||
|
||||
impl Repository {
|
||||
fn mark_used(&self, bundles: &mut HashMap<u32, BundleAnalysis>, chunks: &[Chunk]) -> Result<bool, RepositoryError> {
|
||||
fn mark_used(
|
||||
&self,
|
||||
bundles: &mut HashMap<u32, BundleAnalysis>,
|
||||
chunks: &[Chunk],
|
||||
) -> Result<bool, RepositoryError> {
|
||||
let mut new = false;
|
||||
for &(hash, len) in chunks {
|
||||
if let Some(pos) = self.index.get(&hash) {
|
||||
|
@ -62,17 +66,22 @@ impl Repository {
|
|||
|
||||
pub fn analyze_usage(&mut self) -> Result<HashMap<u32, BundleAnalysis>, RepositoryError> {
|
||||
if self.dirty {
|
||||
return Err(RepositoryError::Dirty)
|
||||
return Err(RepositoryError::Dirty);
|
||||
}
|
||||
try!(self.set_dirty());
|
||||
let mut usage = HashMap::new();
|
||||
for (id, bundle) in self.bundle_map.bundles() {
|
||||
let bundle = try!(self.bundles.get_bundle_info(&bundle).ok_or_else(|| IntegrityError::MissingBundle(bundle)));
|
||||
usage.insert(id, BundleAnalysis {
|
||||
chunk_usage: Bitmap::new(bundle.info.chunk_count),
|
||||
info: bundle.info.clone(),
|
||||
used_raw_size: 0
|
||||
});
|
||||
let bundle = try!(self.bundles.get_bundle_info(&bundle).ok_or_else(|| {
|
||||
IntegrityError::MissingBundle(bundle)
|
||||
}));
|
||||
usage.insert(
|
||||
id,
|
||||
BundleAnalysis {
|
||||
chunk_usage: Bitmap::new(bundle.info.chunk_count),
|
||||
info: bundle.info.clone(),
|
||||
used_raw_size: 0
|
||||
}
|
||||
);
|
||||
}
|
||||
let backups = try!(self.get_all_backups());
|
||||
let mut todo = VecDeque::new();
|
||||
|
@ -81,15 +90,16 @@ impl Repository {
|
|||
}
|
||||
while let Some(chunks) = todo.pop_back() {
|
||||
if !try!(self.mark_used(&mut usage, &chunks)) {
|
||||
continue
|
||||
continue;
|
||||
}
|
||||
let inode = try!(self.get_inode(&chunks));
|
||||
// Mark the content chunks as used
|
||||
match inode.data {
|
||||
None | Some(FileData::Inline(_)) => (),
|
||||
None |
|
||||
Some(FileData::Inline(_)) => (),
|
||||
Some(FileData::ChunkedDirect(chunks)) => {
|
||||
try!(self.mark_used(&mut usage, &chunks));
|
||||
},
|
||||
}
|
||||
Some(FileData::ChunkedIndirect(chunks)) => {
|
||||
if try!(self.mark_used(&mut usage, &chunks)) {
|
||||
let chunk_data = try!(self.get_data(&chunks));
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use ::prelude::*;
|
||||
use prelude::*;
|
||||
|
||||
use super::*;
|
||||
|
||||
|
@ -51,7 +51,7 @@ impl Repository {
|
|||
let mut progress = ProgressBar::new(self.index.len() as u64);
|
||||
progress.message("checking index: ");
|
||||
progress.set_max_refresh_rate(Some(Duration::from_millis(100)));
|
||||
for (count,(_hash, location)) in self.index.iter().enumerate() {
|
||||
for (count, (_hash, location)) in self.index.iter().enumerate() {
|
||||
// Lookup bundle id from map
|
||||
let bundle_id = try!(self.get_bundle_id(location.bundle));
|
||||
// Get bundle object from bundledb
|
||||
|
@ -59,12 +59,14 @@ impl Repository {
|
|||
bundle
|
||||
} else {
|
||||
progress.finish_print("checking index: done.");
|
||||
return Err(IntegrityError::MissingBundle(bundle_id.clone()).into())
|
||||
return Err(IntegrityError::MissingBundle(bundle_id.clone()).into());
|
||||
};
|
||||
// Get chunk from bundle
|
||||
if bundle.info.chunk_count <= location.chunk as usize {
|
||||
progress.finish_print("checking index: done.");
|
||||
return Err(IntegrityError::NoSuchChunk(bundle_id.clone(), location.chunk).into())
|
||||
return Err(
|
||||
IntegrityError::NoSuchChunk(bundle_id.clone(), location.chunk).into()
|
||||
);
|
||||
}
|
||||
if count % 1000 == 0 {
|
||||
progress.set(count as u64);
|
||||
|
@ -74,7 +76,12 @@ impl Repository {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
fn check_chunks(&self, checked: &mut Bitmap, chunks: &[Chunk], mark: bool) -> Result<bool, RepositoryError> {
|
||||
fn check_chunks(
|
||||
&self,
|
||||
checked: &mut Bitmap,
|
||||
chunks: &[Chunk],
|
||||
mark: bool,
|
||||
) -> Result<bool, RepositoryError> {
|
||||
let mut new = false;
|
||||
for &(hash, _len) in chunks {
|
||||
if let Some(pos) = self.index.pos(&hash) {
|
||||
|
@ -83,18 +90,23 @@ impl Repository {
|
|||
checked.set(pos);
|
||||
}
|
||||
} else {
|
||||
return Err(IntegrityError::MissingChunk(hash).into())
|
||||
return Err(IntegrityError::MissingChunk(hash).into());
|
||||
}
|
||||
}
|
||||
Ok(new)
|
||||
}
|
||||
|
||||
fn check_inode_contents(&mut self, inode: &Inode, checked: &mut Bitmap) -> Result<(), RepositoryError> {
|
||||
fn check_inode_contents(
|
||||
&mut self,
|
||||
inode: &Inode,
|
||||
checked: &mut Bitmap,
|
||||
) -> Result<(), RepositoryError> {
|
||||
match inode.data {
|
||||
None | Some(FileData::Inline(_)) => (),
|
||||
None |
|
||||
Some(FileData::Inline(_)) => (),
|
||||
Some(FileData::ChunkedDirect(ref chunks)) => {
|
||||
try!(self.check_chunks(checked, chunks, true));
|
||||
},
|
||||
}
|
||||
Some(FileData::ChunkedIndirect(ref chunks)) => {
|
||||
if try!(self.check_chunks(checked, chunks, true)) {
|
||||
let chunk_data = try!(self.get_data(chunks));
|
||||
|
@ -106,24 +118,34 @@ impl Repository {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
fn check_subtree(&mut self, path: PathBuf, chunks: &[Chunk], checked: &mut Bitmap, repair: bool) -> Result<Option<ChunkList>, RepositoryError> {
|
||||
fn check_subtree(
|
||||
&mut self,
|
||||
path: PathBuf,
|
||||
chunks: &[Chunk],
|
||||
checked: &mut Bitmap,
|
||||
repair: bool,
|
||||
) -> Result<Option<ChunkList>, RepositoryError> {
|
||||
let mut modified = false;
|
||||
match self.check_chunks(checked, chunks, false) {
|
||||
Ok(false) => return Ok(None),
|
||||
Ok(true) => (),
|
||||
Err(err) => return Err(IntegrityError::BrokenInode(path, Box::new(err)).into())
|
||||
Err(err) => return Err(IntegrityError::BrokenInode(path, Box::new(err)).into()),
|
||||
}
|
||||
let mut inode = try!(self.get_inode(chunks));
|
||||
// Mark the content chunks as used
|
||||
if let Err(err) = self.check_inode_contents(&inode, checked) {
|
||||
if repair {
|
||||
warn!("Problem detected: data of {:?} is corrupt\n\tcaused by: {}", path, err);
|
||||
warn!(
|
||||
"Problem detected: data of {:?} is corrupt\n\tcaused by: {}",
|
||||
path,
|
||||
err
|
||||
);
|
||||
info!("Removing inode data");
|
||||
inode.data = Some(FileData::Inline(vec![].into()));
|
||||
inode.size = 0;
|
||||
modified = true;
|
||||
} else {
|
||||
return Err(IntegrityError::MissingInodeData(path, Box::new(err)).into())
|
||||
return Err(IntegrityError::MissingInodeData(path, Box::new(err)).into());
|
||||
}
|
||||
}
|
||||
// Put children in todo
|
||||
|
@ -135,14 +157,20 @@ impl Repository {
|
|||
Ok(Some(c)) => {
|
||||
*chunks = c;
|
||||
modified = true;
|
||||
},
|
||||
Err(err) => if repair {
|
||||
warn!("Problem detected: inode {:?} is corrupt\n\tcaused by: {}", path.join(name), err);
|
||||
info!("Removing broken inode from backup");
|
||||
removed.push(name.to_string());
|
||||
modified = true;
|
||||
} else {
|
||||
return Err(err)
|
||||
}
|
||||
Err(err) => {
|
||||
if repair {
|
||||
warn!(
|
||||
"Problem detected: inode {:?} is corrupt\n\tcaused by: {}",
|
||||
path.join(name),
|
||||
err
|
||||
);
|
||||
info!("Removing broken inode from backup");
|
||||
removed.push(name.to_string());
|
||||
modified = true;
|
||||
} else {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -159,7 +187,10 @@ impl Repository {
|
|||
}
|
||||
|
||||
fn evacuate_broken_backup(&self, name: &str) -> Result<(), RepositoryError> {
|
||||
warn!("The backup {} was corrupted and needed to be modified.", name);
|
||||
warn!(
|
||||
"The backup {} was corrupted and needed to be modified.",
|
||||
name
|
||||
);
|
||||
let src = self.layout.backup_path(name);
|
||||
let mut dst = src.with_extension("backup.broken");
|
||||
let mut num = 1;
|
||||
|
@ -176,7 +207,12 @@ impl Repository {
|
|||
}
|
||||
|
||||
#[inline]
|
||||
pub fn check_backup(&mut self, name: &str, backup: &mut Backup, repair: bool) -> Result<(), RepositoryError> {
|
||||
pub fn check_backup(
|
||||
&mut self,
|
||||
name: &str,
|
||||
backup: &mut Backup,
|
||||
repair: bool,
|
||||
) -> Result<(), RepositoryError> {
|
||||
let _lock = if repair {
|
||||
try!(self.write_mode());
|
||||
Some(self.lock(false))
|
||||
|
@ -185,7 +221,12 @@ impl Repository {
|
|||
};
|
||||
info!("Checking backup...");
|
||||
let mut checked = Bitmap::new(self.index.capacity());
|
||||
match self.check_subtree(Path::new("").to_path_buf(), &backup.root, &mut checked, repair) {
|
||||
match self.check_subtree(
|
||||
Path::new("").to_path_buf(),
|
||||
&backup.root,
|
||||
&mut checked,
|
||||
repair
|
||||
) {
|
||||
Ok(None) => (),
|
||||
Ok(Some(chunks)) => {
|
||||
try!(self.flush());
|
||||
|
@ -193,18 +234,30 @@ impl Repository {
|
|||
backup.modified = true;
|
||||
try!(self.evacuate_broken_backup(name));
|
||||
try!(self.save_backup(backup, name));
|
||||
},
|
||||
Err(err) => if repair {
|
||||
warn!("The root of the backup {} has been corrupted\n\tcaused by: {}", name, err);
|
||||
try!(self.evacuate_broken_backup(name));
|
||||
} else {
|
||||
return Err(err)
|
||||
}
|
||||
Err(err) => {
|
||||
if repair {
|
||||
warn!(
|
||||
"The root of the backup {} has been corrupted\n\tcaused by: {}",
|
||||
name,
|
||||
err
|
||||
);
|
||||
try!(self.evacuate_broken_backup(name));
|
||||
} else {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn check_backup_inode(&mut self, name: &str, backup: &mut Backup, path: &Path, repair: bool) -> Result<(), RepositoryError> {
|
||||
pub fn check_backup_inode(
|
||||
&mut self,
|
||||
name: &str,
|
||||
backup: &mut Backup,
|
||||
path: &Path,
|
||||
repair: bool,
|
||||
) -> Result<(), RepositoryError> {
|
||||
let _lock = if repair {
|
||||
try!(self.write_mode());
|
||||
Some(self.lock(false))
|
||||
|
@ -218,13 +271,19 @@ impl Repository {
|
|||
let mut modified = false;
|
||||
if let Err(err) = self.check_inode_contents(&inode, &mut checked) {
|
||||
if repair {
|
||||
warn!("Problem detected: data of {:?} is corrupt\n\tcaused by: {}", path, err);
|
||||
warn!(
|
||||
"Problem detected: data of {:?} is corrupt\n\tcaused by: {}",
|
||||
path,
|
||||
err
|
||||
);
|
||||
info!("Removing inode data");
|
||||
inode.data = Some(FileData::Inline(vec![].into()));
|
||||
inode.size = 0;
|
||||
modified = true;
|
||||
} else {
|
||||
return Err(IntegrityError::MissingInodeData(path.to_path_buf(), Box::new(err)).into())
|
||||
return Err(
|
||||
IntegrityError::MissingInodeData(path.to_path_buf(), Box::new(err)).into()
|
||||
);
|
||||
}
|
||||
}
|
||||
if let Some(ref mut children) = inode.children {
|
||||
|
@ -235,14 +294,20 @@ impl Repository {
|
|||
Ok(Some(c)) => {
|
||||
*chunks = c;
|
||||
modified = true;
|
||||
},
|
||||
Err(err) => if repair {
|
||||
warn!("Problem detected: inode {:?} is corrupt\n\tcaused by: {}", path.join(name), err);
|
||||
info!("Removing broken inode from backup");
|
||||
removed.push(name.to_string());
|
||||
modified = true;
|
||||
} else {
|
||||
return Err(err)
|
||||
}
|
||||
Err(err) => {
|
||||
if repair {
|
||||
warn!(
|
||||
"Problem detected: inode {:?} is corrupt\n\tcaused by: {}",
|
||||
path.join(name),
|
||||
err
|
||||
);
|
||||
info!("Removing broken inode from backup");
|
||||
removed.push(name.to_string());
|
||||
modified = true;
|
||||
} else {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -277,15 +342,23 @@ impl Repository {
|
|||
let mut checked = Bitmap::new(self.index.capacity());
|
||||
let backup_map = match self.get_all_backups() {
|
||||
Ok(backup_map) => backup_map,
|
||||
Err(RepositoryError::BackupFile(BackupFileError::PartialBackupsList(backup_map, _failed))) => {
|
||||
Err(RepositoryError::BackupFile(BackupFileError::PartialBackupsList(backup_map,
|
||||
_failed))) => {
|
||||
warn!("Some backups could not be read, ignoring them");
|
||||
backup_map
|
||||
},
|
||||
Err(err) => return Err(err)
|
||||
}
|
||||
Err(err) => return Err(err),
|
||||
};
|
||||
for (name, mut backup) in ProgressIter::new("checking backups", backup_map.len(), backup_map.into_iter()) {
|
||||
for (name, mut backup) in
|
||||
ProgressIter::new("checking backups", backup_map.len(), backup_map.into_iter())
|
||||
{
|
||||
let path = format!("{}::", name);
|
||||
match self.check_subtree(Path::new(&path).to_path_buf(), &backup.root, &mut checked, repair) {
|
||||
match self.check_subtree(
|
||||
Path::new(&path).to_path_buf(),
|
||||
&backup.root,
|
||||
&mut checked,
|
||||
repair
|
||||
) {
|
||||
Ok(None) => (),
|
||||
Ok(Some(chunks)) => {
|
||||
try!(self.flush());
|
||||
|
@ -293,12 +366,18 @@ impl Repository {
|
|||
backup.modified = true;
|
||||
try!(self.evacuate_broken_backup(&name));
|
||||
try!(self.save_backup(&backup, &name));
|
||||
},
|
||||
Err(err) => if repair {
|
||||
warn!("The root of the backup {} has been corrupted\n\tcaused by: {}", name, err);
|
||||
try!(self.evacuate_broken_backup(&name));
|
||||
} else {
|
||||
return Err(err)
|
||||
}
|
||||
Err(err) => {
|
||||
if repair {
|
||||
warn!(
|
||||
"The root of the backup {} has been corrupted\n\tcaused by: {}",
|
||||
name,
|
||||
err
|
||||
);
|
||||
try!(self.evacuate_broken_backup(&name));
|
||||
} else {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -311,10 +390,13 @@ impl Repository {
|
|||
for (_id, bundle_id) in self.bundle_map.bundles() {
|
||||
if self.bundles.get_bundle_info(&bundle_id).is_none() {
|
||||
if repair {
|
||||
warn!("Problem detected: bundle map contains unknown bundle {}", bundle_id);
|
||||
warn!(
|
||||
"Problem detected: bundle map contains unknown bundle {}",
|
||||
bundle_id
|
||||
);
|
||||
rebuild = true;
|
||||
} else {
|
||||
return Err(IntegrityError::MissingBundle(bundle_id).into())
|
||||
return Err(IntegrityError::MissingBundle(bundle_id).into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -323,7 +405,7 @@ impl Repository {
|
|||
warn!("Problem detected: bundle map does not contain all remote bundles");
|
||||
rebuild = true;
|
||||
} else {
|
||||
return Err(IntegrityError::RemoteBundlesNotInMap.into())
|
||||
return Err(IntegrityError::RemoteBundlesNotInMap.into());
|
||||
}
|
||||
}
|
||||
if self.bundle_map.len() > self.bundles.len() {
|
||||
|
@ -331,7 +413,7 @@ impl Repository {
|
|||
warn!("Problem detected: bundle map contains bundles multiple times");
|
||||
rebuild = true;
|
||||
} else {
|
||||
return Err(IntegrityError::MapContainsDuplicates.into())
|
||||
return Err(IntegrityError::MapContainsDuplicates.into());
|
||||
}
|
||||
}
|
||||
if rebuild {
|
||||
|
@ -347,7 +429,7 @@ impl Repository {
|
|||
for bundle in self.bundles.list_bundles() {
|
||||
let bundle_id = match bundle.mode {
|
||||
BundleMode::Data => self.next_data_bundle,
|
||||
BundleMode::Meta => self.next_meta_bundle
|
||||
BundleMode::Meta => self.next_meta_bundle,
|
||||
};
|
||||
self.bundle_map.set(bundle_id, bundle.id.clone());
|
||||
if self.next_meta_bundle == bundle_id {
|
||||
|
@ -368,7 +450,13 @@ impl Repository {
|
|||
for (num, id) in bundles {
|
||||
let chunks = try!(self.bundles.get_chunk_list(&id));
|
||||
for (i, (hash, _len)) in chunks.into_inner().into_iter().enumerate() {
|
||||
try!(self.index.set(&hash, &Location{bundle: num as u32, chunk: i as u32}));
|
||||
try!(self.index.set(
|
||||
&hash,
|
||||
&Location {
|
||||
bundle: num as u32,
|
||||
chunk: i as u32
|
||||
}
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
|
@ -382,19 +470,25 @@ impl Repository {
|
|||
info!("Checking index integrity...");
|
||||
if let Err(err) = self.index.check() {
|
||||
if repair {
|
||||
warn!("Problem detected: index was corrupted\n\tcaused by: {}", err);
|
||||
warn!(
|
||||
"Problem detected: index was corrupted\n\tcaused by: {}",
|
||||
err
|
||||
);
|
||||
return self.rebuild_index();
|
||||
} else {
|
||||
return Err(err.into())
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
info!("Checking index entries...");
|
||||
if let Err(err) = self.check_index_chunks() {
|
||||
if repair {
|
||||
warn!("Problem detected: index entries were inconsistent\n\tcaused by: {}", err);
|
||||
warn!(
|
||||
"Problem detected: index entries were inconsistent\n\tcaused by: {}",
|
||||
err
|
||||
);
|
||||
return self.rebuild_index();
|
||||
} else {
|
||||
return Err(err.into())
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use ::prelude::*;
|
||||
use prelude::*;
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
|
@ -62,7 +62,8 @@ impl RepositoryLayout {
|
|||
|
||||
#[inline]
|
||||
pub fn remote_exists(&self) -> bool {
|
||||
self.remote_bundles_path().exists() && self.backups_path().exists() && self.remote_locks_path().exists()
|
||||
self.remote_bundles_path().exists() && self.backups_path().exists() &&
|
||||
self.remote_locks_path().exists()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -85,13 +86,18 @@ impl RepositoryLayout {
|
|||
self.0.join("bundles/cached")
|
||||
}
|
||||
|
||||
fn bundle_path(&self, bundle: &BundleId, mut folder: PathBuf, mut count: usize) -> (PathBuf, PathBuf) {
|
||||
fn bundle_path(
|
||||
&self,
|
||||
bundle: &BundleId,
|
||||
mut folder: PathBuf,
|
||||
mut count: usize,
|
||||
) -> (PathBuf, PathBuf) {
|
||||
let file = bundle.to_string().to_owned() + ".bundle";
|
||||
{
|
||||
let mut rest = &file as &str;
|
||||
while count >= 100 {
|
||||
if rest.len() < 10 {
|
||||
break
|
||||
break;
|
||||
}
|
||||
folder = folder.join(&rest[0..2]);
|
||||
rest = &rest[2..];
|
||||
|
@ -118,7 +124,10 @@ impl RepositoryLayout {
|
|||
|
||||
#[inline]
|
||||
pub fn temp_bundle_path(&self) -> PathBuf {
|
||||
self.temp_bundles_path().join(BundleId::random().to_string().to_owned() + ".bundle")
|
||||
self.temp_bundles_path().join(
|
||||
BundleId::random().to_string().to_owned() +
|
||||
".bundle"
|
||||
)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use ::prelude::*;
|
||||
use prelude::*;
|
||||
|
||||
use filetime::{self, FileTime};
|
||||
use xattr;
|
||||
|
@ -87,7 +87,7 @@ impl fmt::Display for FileType {
|
|||
FileType::Symlink => write!(format, "symlink"),
|
||||
FileType::BlockDevice => write!(format, "block device"),
|
||||
FileType::CharDevice => write!(format, "char device"),
|
||||
FileType::NamedPipe => write!(format, "named pipe")
|
||||
FileType::NamedPipe => write!(format, "named pipe"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -167,8 +167,12 @@ serde_impl!(Inode(u8?) {
|
|||
impl Inode {
|
||||
pub fn get_from<P: AsRef<Path>>(path: P) -> Result<Self, InodeError> {
|
||||
let path = path.as_ref();
|
||||
let name = path.file_name().map(|s| s.to_string_lossy().to_string()).unwrap_or_else(|| "_".to_string());
|
||||
let meta = try!(fs::symlink_metadata(path).map_err(|e| InodeError::ReadMetadata(e, path.to_owned())));
|
||||
let name = path.file_name()
|
||||
.map(|s| s.to_string_lossy().to_string())
|
||||
.unwrap_or_else(|| "_".to_string());
|
||||
let meta = try!(fs::symlink_metadata(path).map_err(|e| {
|
||||
InodeError::ReadMetadata(e, path.to_owned())
|
||||
}));
|
||||
let mut inode = Inode::default();
|
||||
inode.name = name;
|
||||
if meta.is_file() {
|
||||
|
@ -190,7 +194,12 @@ impl Inode {
|
|||
return Err(InodeError::UnsupportedFiletype(path.to_owned()));
|
||||
};
|
||||
if meta.file_type().is_symlink() {
|
||||
inode.symlink_target = Some(try!(fs::read_link(path).map_err(|e| InodeError::ReadLinkTarget(e, path.to_owned()))).to_string_lossy().to_string());
|
||||
inode.symlink_target = Some(
|
||||
try!(fs::read_link(path).map_err(|e| {
|
||||
InodeError::ReadLinkTarget(e, path.to_owned())
|
||||
})).to_string_lossy()
|
||||
.to_string()
|
||||
);
|
||||
}
|
||||
if meta.file_type().is_block_device() || meta.file_type().is_char_device() {
|
||||
let rdev = meta.rdev();
|
||||
|
@ -205,8 +214,14 @@ impl Inode {
|
|||
if xattr::SUPPORTED_PLATFORM {
|
||||
if let Ok(attrs) = xattr::list(path) {
|
||||
for name in attrs {
|
||||
if let Some(data) = try!(xattr::get(path, &name).map_err(|e| InodeError::ReadXattr(e, path.to_owned()))) {
|
||||
inode.xattrs.insert(name.to_string_lossy().to_string(), data.into());
|
||||
if let Some(data) = try!(xattr::get(path, &name).map_err(|e| {
|
||||
InodeError::ReadXattr(e, path.to_owned())
|
||||
}))
|
||||
{
|
||||
inode.xattrs.insert(
|
||||
name.to_string_lossy().to_string(),
|
||||
data.into()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -219,39 +234,58 @@ impl Inode {
|
|||
let mut file = None;
|
||||
match self.file_type {
|
||||
FileType::File => {
|
||||
file = Some(try!(File::create(&full_path).map_err(|e| InodeError::Create(e, full_path.clone()))));
|
||||
},
|
||||
file = Some(try!(File::create(&full_path).map_err(|e| {
|
||||
InodeError::Create(e, full_path.clone())
|
||||
})));
|
||||
}
|
||||
FileType::Directory => {
|
||||
try!(fs::create_dir(&full_path).map_err(|e| InodeError::Create(e, full_path.clone())));
|
||||
},
|
||||
try!(fs::create_dir(&full_path).map_err(|e| {
|
||||
InodeError::Create(e, full_path.clone())
|
||||
}));
|
||||
}
|
||||
FileType::Symlink => {
|
||||
if let Some(ref src) = self.symlink_target {
|
||||
try!(symlink(src, &full_path).map_err(|e| InodeError::Create(e, full_path.clone())));
|
||||
try!(symlink(src, &full_path).map_err(|e| {
|
||||
InodeError::Create(e, full_path.clone())
|
||||
}));
|
||||
} else {
|
||||
return Err(InodeError::Integrity("Symlink without target"))
|
||||
return Err(InodeError::Integrity("Symlink without target"));
|
||||
}
|
||||
},
|
||||
}
|
||||
FileType::NamedPipe => {
|
||||
let name = try!(ffi::CString::new(full_path.as_os_str().as_bytes()).map_err(|_| InodeError::Integrity("Name contains nulls")));
|
||||
let name = try!(
|
||||
ffi::CString::new(full_path.as_os_str().as_bytes())
|
||||
.map_err(|_| InodeError::Integrity("Name contains nulls"))
|
||||
);
|
||||
let mode = self.mode | libc::S_IFIFO;
|
||||
if unsafe { libc::mkfifo(name.as_ptr(), mode) } != 0 {
|
||||
return Err(InodeError::Create(io::Error::last_os_error(), full_path.clone()));
|
||||
return Err(InodeError::Create(
|
||||
io::Error::last_os_error(),
|
||||
full_path.clone()
|
||||
));
|
||||
}
|
||||
},
|
||||
}
|
||||
FileType::BlockDevice | FileType::CharDevice => {
|
||||
let name = try!(ffi::CString::new(full_path.as_os_str().as_bytes()).map_err(|_| InodeError::Integrity("Name contains nulls")));
|
||||
let mode = self.mode | match self.file_type {
|
||||
FileType::BlockDevice => libc::S_IFBLK,
|
||||
FileType::CharDevice => libc::S_IFCHR,
|
||||
_ => unreachable!()
|
||||
};
|
||||
let name = try!(
|
||||
ffi::CString::new(full_path.as_os_str().as_bytes())
|
||||
.map_err(|_| InodeError::Integrity("Name contains nulls"))
|
||||
);
|
||||
let mode = self.mode |
|
||||
match self.file_type {
|
||||
FileType::BlockDevice => libc::S_IFBLK,
|
||||
FileType::CharDevice => libc::S_IFCHR,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let device = if let Some((major, minor)) = self.device {
|
||||
unsafe { libc::makedev(major, minor) }
|
||||
} else {
|
||||
return Err(InodeError::Integrity("Device without id"))
|
||||
return Err(InodeError::Integrity("Device without id"));
|
||||
};
|
||||
if unsafe { libc::mknod(name.as_ptr(), mode, device) } != 0 {
|
||||
return Err(InodeError::Create(io::Error::last_os_error(), full_path.clone()));
|
||||
return Err(InodeError::Create(
|
||||
io::Error::last_os_error(),
|
||||
full_path.clone()
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -271,26 +305,37 @@ impl Inode {
|
|||
}
|
||||
}
|
||||
if let Err(err) = fs::set_permissions(&full_path, Permissions::from_mode(self.mode)) {
|
||||
warn!("Failed to set permissions {:o} on {:?}: {}", self.mode, full_path, err);
|
||||
warn!(
|
||||
"Failed to set permissions {:o} on {:?}: {}",
|
||||
self.mode,
|
||||
full_path,
|
||||
err
|
||||
);
|
||||
}
|
||||
if let Err(err) = chown(&full_path, self.user, self.group) {
|
||||
warn!("Failed to set user {} and group {} on {:?}: {}", self.user, self.group, full_path, err);
|
||||
warn!(
|
||||
"Failed to set user {} and group {} on {:?}: {}",
|
||||
self.user,
|
||||
self.group,
|
||||
full_path,
|
||||
err
|
||||
);
|
||||
}
|
||||
Ok(file)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn is_same_meta(&self, other: &Inode) -> bool {
|
||||
self.file_type == other.file_type && self.size == other.size && self.mode == other.mode
|
||||
&& self.user == other.user && self.group == other.group && self.name == other.name
|
||||
&& self.timestamp == other.timestamp && self.symlink_target == other.symlink_target
|
||||
self.file_type == other.file_type && self.size == other.size &&
|
||||
self.mode == other.mode && self.user == other.user &&
|
||||
self.group == other.group && self.name == other.name &&
|
||||
self.timestamp == other.timestamp && self.symlink_target == other.symlink_target
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn is_same_meta_quick(&self, other: &Inode) -> bool {
|
||||
self.timestamp == other.timestamp
|
||||
&& self.file_type == other.file_type
|
||||
&& self.size == other.size
|
||||
self.timestamp == other.timestamp && self.file_type == other.file_type &&
|
||||
self.size == other.size
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -306,13 +351,17 @@ impl Inode {
|
|||
|
||||
|
||||
impl Repository {
|
||||
pub fn create_inode<P: AsRef<Path>>(&mut self, path: P, reference: Option<&Inode>) -> Result<Inode, RepositoryError> {
|
||||
pub fn create_inode<P: AsRef<Path>>(
|
||||
&mut self,
|
||||
path: P,
|
||||
reference: Option<&Inode>,
|
||||
) -> Result<Inode, RepositoryError> {
|
||||
let mut inode = try!(Inode::get_from(path.as_ref()));
|
||||
if inode.file_type == FileType::File && inode.size > 0 {
|
||||
if let Some(reference) = reference {
|
||||
if reference.is_same_meta_quick(&inode) {
|
||||
inode.data = reference.data.clone();
|
||||
return Ok(inode)
|
||||
return Ok(inode);
|
||||
}
|
||||
}
|
||||
let mut file = try!(File::open(path));
|
||||
|
@ -345,16 +394,20 @@ impl Repository {
|
|||
Ok(try!(Inode::decode(&try!(self.get_data(chunks)))))
|
||||
}
|
||||
|
||||
pub fn save_inode_at<P: AsRef<Path>>(&mut self, inode: &Inode, path: P) -> Result<(), RepositoryError> {
|
||||
pub fn save_inode_at<P: AsRef<Path>>(
|
||||
&mut self,
|
||||
inode: &Inode,
|
||||
path: P,
|
||||
) -> Result<(), RepositoryError> {
|
||||
if let Some(mut file) = try!(inode.create_at(path.as_ref())) {
|
||||
if let Some(ref contents) = inode.data {
|
||||
match *contents {
|
||||
FileData::Inline(ref data) => {
|
||||
try!(file.write_all(data));
|
||||
},
|
||||
}
|
||||
FileData::ChunkedDirect(ref chunks) => {
|
||||
try!(self.get_stream(chunks, &mut file));
|
||||
},
|
||||
}
|
||||
FileData::ChunkedIndirect(ref chunks) => {
|
||||
let chunk_data = try!(self.get_data(chunks));
|
||||
let chunks = ChunkList::read_from(&chunk_data);
|
||||
|
|
|
@ -11,7 +11,7 @@ mod backup_file;
|
|||
mod tarfile;
|
||||
mod layout;
|
||||
|
||||
use ::prelude::*;
|
||||
use prelude::*;
|
||||
|
||||
use std::mem;
|
||||
use std::cmp::max;
|
||||
|
@ -47,7 +47,10 @@ pub struct Location {
|
|||
}
|
||||
impl Location {
|
||||
pub fn new(bundle: u32, chunk: u32) -> Self {
|
||||
Location{ bundle: bundle, chunk: chunk }
|
||||
Location {
|
||||
bundle: bundle,
|
||||
chunk: chunk
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -88,28 +91,42 @@ pub struct Repository {
|
|||
|
||||
|
||||
impl Repository {
|
||||
pub fn create<P: AsRef<Path>, R: AsRef<Path>>(path: P, config: Config, remote: R) -> Result<Self, RepositoryError> {
|
||||
pub fn create<P: AsRef<Path>, R: AsRef<Path>>(
|
||||
path: P,
|
||||
config: Config,
|
||||
remote: R,
|
||||
) -> Result<Self, RepositoryError> {
|
||||
let layout = RepositoryLayout::new(path.as_ref().to_path_buf());
|
||||
try!(fs::create_dir(layout.base_path()));
|
||||
try!(File::create(layout.excludes_path()).and_then(|mut f| f.write_all(DEFAULT_EXCLUDES)));
|
||||
try!(File::create(layout.excludes_path()).and_then(|mut f| {
|
||||
f.write_all(DEFAULT_EXCLUDES)
|
||||
}));
|
||||
try!(fs::create_dir(layout.keys_path()));
|
||||
try!(fs::create_dir(layout.local_locks_path()));
|
||||
try!(symlink(remote, layout.remote_path()));
|
||||
try!(File::create(layout.remote_readme_path()).and_then(|mut f| f.write_all(REPOSITORY_README)));
|
||||
try!(File::create(layout.remote_readme_path()).and_then(
|
||||
|mut f| {
|
||||
f.write_all(REPOSITORY_README)
|
||||
}
|
||||
));
|
||||
try!(fs::create_dir_all(layout.remote_locks_path()));
|
||||
try!(config.save(layout.config_path()));
|
||||
try!(BundleDb::create(layout.clone()));
|
||||
try!(Index::<Hash, Location>::create(layout.index_path(), &INDEX_MAGIC, INDEX_VERSION));
|
||||
try!(Index::<Hash, Location>::create(
|
||||
layout.index_path(),
|
||||
&INDEX_MAGIC,
|
||||
INDEX_VERSION
|
||||
));
|
||||
try!(BundleMap::create().save(layout.bundle_map_path()));
|
||||
try!(fs::create_dir_all(layout.backups_path()));
|
||||
Self::open(path)
|
||||
}
|
||||
|
||||
#[allow(unknown_lints,useless_let_if_seq)]
|
||||
#[allow(unknown_lints, useless_let_if_seq)]
|
||||
pub fn open<P: AsRef<Path>>(path: P) -> Result<Self, RepositoryError> {
|
||||
let layout = RepositoryLayout::new(path.as_ref().to_path_buf());
|
||||
if !layout.remote_exists() {
|
||||
return Err(RepositoryError::NoRemote)
|
||||
return Err(RepositoryError::NoRemote);
|
||||
}
|
||||
let config = try!(Config::load(layout.config_path()));
|
||||
let remote_locks = LockFolder::new(layout.remote_locks_path());
|
||||
|
@ -118,13 +135,21 @@ impl Repository {
|
|||
let lock = try!(local_locks.lock(false));
|
||||
let crypto = Arc::new(Mutex::new(try!(Crypto::open(layout.keys_path()))));
|
||||
let (bundles, new, gone) = try!(BundleDb::open(layout.clone(), crypto.clone()));
|
||||
let (index, mut rebuild_index) = match unsafe { Index::open(layout.index_path(), &INDEX_MAGIC, INDEX_VERSION) } {
|
||||
Ok(index) => (index, false),
|
||||
Err(err) => {
|
||||
error!("Failed to load local index:\n\tcaused by: {}", err);
|
||||
(try!(Index::create(layout.index_path(), &INDEX_MAGIC, INDEX_VERSION)), true)
|
||||
}
|
||||
};
|
||||
let (index, mut rebuild_index) =
|
||||
match unsafe { Index::open(layout.index_path(), &INDEX_MAGIC, INDEX_VERSION) } {
|
||||
Ok(index) => (index, false),
|
||||
Err(err) => {
|
||||
error!("Failed to load local index:\n\tcaused by: {}", err);
|
||||
(
|
||||
try!(Index::create(
|
||||
layout.index_path(),
|
||||
&INDEX_MAGIC,
|
||||
INDEX_VERSION
|
||||
)),
|
||||
true
|
||||
)
|
||||
}
|
||||
};
|
||||
let (bundle_map, rebuild_bundle_map) = match BundleMap::load(layout.bundle_map_path()) {
|
||||
Ok(bundle_map) => (bundle_map, false),
|
||||
Err(err) => {
|
||||
|
@ -163,7 +188,12 @@ impl Repository {
|
|||
if !new.is_empty() {
|
||||
info!("Adding {} new bundles to index", new.len());
|
||||
try!(repo.write_mode());
|
||||
for bundle in ProgressIter::new("adding bundles to index", new.len(), new.into_iter()) {
|
||||
for bundle in ProgressIter::new(
|
||||
"adding bundles to index",
|
||||
new.len(),
|
||||
new.into_iter()
|
||||
)
|
||||
{
|
||||
try!(repo.add_new_remote_bundle(bundle))
|
||||
}
|
||||
save_bundle_map = true;
|
||||
|
@ -188,7 +218,11 @@ impl Repository {
|
|||
Ok(repo)
|
||||
}
|
||||
|
||||
pub fn import<P: AsRef<Path>, R: AsRef<Path>>(path: P, remote: R, key_files: Vec<String>) -> Result<Self, RepositoryError> {
|
||||
pub fn import<P: AsRef<Path>, R: AsRef<Path>>(
|
||||
path: P,
|
||||
remote: R,
|
||||
key_files: Vec<String>,
|
||||
) -> Result<Self, RepositoryError> {
|
||||
let path = path.as_ref();
|
||||
let mut repo = try!(Repository::create(path, Config::default(), remote));
|
||||
for file in key_files {
|
||||
|
@ -202,15 +236,24 @@ impl Repository {
|
|||
repo.config = backup.config;
|
||||
try!(repo.save_config())
|
||||
} else {
|
||||
warn!("No backup found in the repository to take configuration from, please set the configuration manually.");
|
||||
warn!(
|
||||
"No backup found in the repository to take configuration from, please set the configuration manually."
|
||||
);
|
||||
}
|
||||
Ok(repo)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn register_key(&mut self, public: PublicKey, secret: SecretKey) -> Result<(), RepositoryError> {
|
||||
pub fn register_key(
|
||||
&mut self,
|
||||
public: PublicKey,
|
||||
secret: SecretKey,
|
||||
) -> Result<(), RepositoryError> {
|
||||
try!(self.write_mode());
|
||||
Ok(try!(self.crypto.lock().unwrap().register_secret_key(public, secret)))
|
||||
Ok(try!(self.crypto.lock().unwrap().register_secret_key(
|
||||
public,
|
||||
secret
|
||||
)))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -268,7 +311,10 @@ impl Repository {
|
|||
mem::swap(&mut self.data_bundle, &mut finished);
|
||||
{
|
||||
let bundle = try!(self.bundles.add_bundle(finished.unwrap()));
|
||||
self.bundle_map.set(self.next_data_bundle, bundle.id.clone());
|
||||
self.bundle_map.set(
|
||||
self.next_data_bundle,
|
||||
bundle.id.clone()
|
||||
);
|
||||
}
|
||||
self.next_data_bundle = self.next_free_bundle_id()
|
||||
}
|
||||
|
@ -277,7 +323,10 @@ impl Repository {
|
|||
mem::swap(&mut self.meta_bundle, &mut finished);
|
||||
{
|
||||
let bundle = try!(self.bundles.add_bundle(finished.unwrap()));
|
||||
self.bundle_map.set(self.next_meta_bundle, bundle.id.clone());
|
||||
self.bundle_map.set(
|
||||
self.next_meta_bundle,
|
||||
bundle.id.clone()
|
||||
);
|
||||
}
|
||||
self.next_meta_bundle = self.next_free_bundle_id()
|
||||
}
|
||||
|
@ -291,12 +340,12 @@ impl Repository {
|
|||
|
||||
fn add_new_remote_bundle(&mut self, bundle: BundleInfo) -> Result<(), RepositoryError> {
|
||||
if self.bundle_map.find(&bundle.id).is_some() {
|
||||
return Ok(())
|
||||
return Ok(());
|
||||
}
|
||||
debug!("Adding new bundle to index: {}", bundle.id);
|
||||
let bundle_id = match bundle.mode {
|
||||
BundleMode::Data => self.next_data_bundle,
|
||||
BundleMode::Meta => self.next_meta_bundle
|
||||
BundleMode::Meta => self.next_meta_bundle,
|
||||
};
|
||||
let chunks = try!(self.bundles.get_chunk_list(&bundle.id));
|
||||
self.bundle_map.set(bundle_id, bundle.id.clone());
|
||||
|
@ -307,7 +356,14 @@ impl Repository {
|
|||
self.next_data_bundle = self.next_free_bundle_id()
|
||||
}
|
||||
for (i, (hash, _len)) in chunks.into_inner().into_iter().enumerate() {
|
||||
if let Some(old) = try!(self.index.set(&hash, &Location{bundle: bundle_id as u32, chunk: i as u32})) {
|
||||
if let Some(old) = try!(self.index.set(
|
||||
&hash,
|
||||
&Location {
|
||||
bundle: bundle_id as u32,
|
||||
chunk: i as u32
|
||||
}
|
||||
))
|
||||
{
|
||||
// Duplicate chunk, forced ordering: higher bundle id wins
|
||||
let old_bundle_id = try!(self.get_bundle_id(old.bundle));
|
||||
if old_bundle_id > bundle.id {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use ::prelude::*;
|
||||
use prelude::*;
|
||||
|
||||
use std::collections::{HashMap, HashSet, BTreeMap};
|
||||
use std::path::{Path, PathBuf};
|
||||
|
@ -82,17 +82,21 @@ fn inode_from_entry<R: Read>(entry: &mut tar::Entry<R>) -> Result<Inode, Reposit
|
|||
let path = try!(entry.path());
|
||||
let header = entry.header();
|
||||
let file_type = match header.entry_type() {
|
||||
tar::EntryType::Regular | tar::EntryType::Link | tar::EntryType::Continuous => FileType::File,
|
||||
tar::EntryType::Regular |
|
||||
tar::EntryType::Link |
|
||||
tar::EntryType::Continuous => FileType::File,
|
||||
tar::EntryType::Symlink => FileType::Symlink,
|
||||
tar::EntryType::Directory => FileType::Directory,
|
||||
tar::EntryType::Block => FileType::BlockDevice,
|
||||
tar::EntryType::Char => FileType::CharDevice,
|
||||
tar::EntryType::Fifo => FileType::NamedPipe,
|
||||
_ => return Err(InodeError::UnsupportedFiletype(path.to_path_buf()).into())
|
||||
_ => return Err(InodeError::UnsupportedFiletype(path.to_path_buf()).into()),
|
||||
};
|
||||
Inode {
|
||||
file_type: file_type,
|
||||
name: path.file_name().map(|s| s.to_string_lossy().to_string()).unwrap_or_else(|| "/".to_string()),
|
||||
name: path.file_name()
|
||||
.map(|s| s.to_string_lossy().to_string())
|
||||
.unwrap_or_else(|| "/".to_string()),
|
||||
symlink_target: try!(entry.link_name()).map(|s| s.to_string_lossy().to_string()),
|
||||
size: try!(header.size()),
|
||||
mode: try!(header.mode()),
|
||||
|
@ -100,8 +104,13 @@ fn inode_from_entry<R: Read>(entry: &mut tar::Entry<R>) -> Result<Inode, Reposit
|
|||
group: try!(header.gid()),
|
||||
timestamp: try!(header.mtime()) as i64,
|
||||
device: match file_type {
|
||||
FileType::BlockDevice | FileType::CharDevice => Some((try!(header.device_major()).unwrap_or(0), try!(header.device_minor()).unwrap_or(0))),
|
||||
_ => None
|
||||
FileType::BlockDevice | FileType::CharDevice => Some((
|
||||
try!(header.device_major())
|
||||
.unwrap_or(0),
|
||||
try!(header.device_minor())
|
||||
.unwrap_or(0)
|
||||
)),
|
||||
_ => None,
|
||||
},
|
||||
..Default::default()
|
||||
}
|
||||
|
@ -111,7 +120,10 @@ fn inode_from_entry<R: Read>(entry: &mut tar::Entry<R>) -> Result<Inode, Reposit
|
|||
let ext = try!(ext);
|
||||
let key = ext.key().unwrap_or("");
|
||||
if key.starts_with(PAX_XATTR_PREFIX) {
|
||||
inode.xattrs.insert(key[PAX_XATTR_PREFIX.len()..].to_string(), ext.value_bytes().to_vec().into());
|
||||
inode.xattrs.insert(
|
||||
key[PAX_XATTR_PREFIX.len()..].to_string(),
|
||||
ext.value_bytes().to_vec().into()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -122,7 +134,10 @@ fn inode_from_entry<R: Read>(entry: &mut tar::Entry<R>) -> Result<Inode, Reposit
|
|||
}
|
||||
|
||||
impl Repository {
|
||||
fn import_tar_entry<R: Read>(&mut self, entry: &mut tar::Entry<R>) -> Result<Inode, RepositoryError> {
|
||||
fn import_tar_entry<R: Read>(
|
||||
&mut self,
|
||||
entry: &mut tar::Entry<R>,
|
||||
) -> Result<Inode, RepositoryError> {
|
||||
let mut inode = try!(inode_from_entry(entry));
|
||||
if inode.size < 100 {
|
||||
let mut data = Vec::with_capacity(inode.size as usize);
|
||||
|
@ -142,7 +157,12 @@ impl Repository {
|
|||
Ok(inode)
|
||||
}
|
||||
|
||||
fn import_tarfile_as_inode<R: Read>(&mut self, backup: &mut Backup, input: R, failed_paths: &mut Vec<PathBuf>) -> Result<(Inode, ChunkList), RepositoryError> {
|
||||
fn import_tarfile_as_inode<R: Read>(
|
||||
&mut self,
|
||||
backup: &mut Backup,
|
||||
input: R,
|
||||
failed_paths: &mut Vec<PathBuf>,
|
||||
) -> Result<(Inode, ChunkList), RepositoryError> {
|
||||
let mut tarfile = tar::Archive::new(input);
|
||||
// Step 1: create inodes for all entries
|
||||
let mut inodes = HashMap::<PathBuf, (Inode, HashSet<String>)>::new();
|
||||
|
@ -174,12 +194,14 @@ impl Repository {
|
|||
backup.group_names.insert(inode.group, name.to_string());
|
||||
}
|
||||
inodes.insert(path, (inode, HashSet::new()));
|
||||
},
|
||||
Err(RepositoryError::Inode(_)) | Err(RepositoryError::Chunker(_)) | Err(RepositoryError::Io(_)) => {
|
||||
}
|
||||
Err(RepositoryError::Inode(_)) |
|
||||
Err(RepositoryError::Chunker(_)) |
|
||||
Err(RepositoryError::Io(_)) => {
|
||||
info!("Failed to backup {:?}", path);
|
||||
failed_paths.push(path);
|
||||
continue
|
||||
},
|
||||
continue;
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(err);
|
||||
}
|
||||
|
@ -198,7 +220,9 @@ impl Repository {
|
|||
let (inode, _) = inodes.remove(&path).unwrap();
|
||||
let chunks = try!(self.put_inode(&inode));
|
||||
if let Some(parent_path) = path.parent() {
|
||||
if let Some(&mut (ref mut parent_inode, ref mut children)) = inodes.get_mut(parent_path) {
|
||||
if let Some(&mut (ref mut parent_inode, ref mut children)) =
|
||||
inodes.get_mut(parent_path)
|
||||
{
|
||||
children.remove(&inode.name);
|
||||
parent_inode.cum_size += inode.cum_size;
|
||||
for &(_, len) in chunks.iter() {
|
||||
|
@ -206,8 +230,11 @@ impl Repository {
|
|||
}
|
||||
parent_inode.cum_files += inode.cum_files;
|
||||
parent_inode.cum_dirs += inode.cum_dirs;
|
||||
parent_inode.children.as_mut().unwrap().insert(inode.name.clone(), chunks);
|
||||
continue
|
||||
parent_inode.children.as_mut().unwrap().insert(
|
||||
inode.name.clone(),
|
||||
chunks
|
||||
);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
roots.push((inode, chunks));
|
||||
|
@ -242,11 +269,14 @@ impl Repository {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn import_tarfile<P: AsRef<Path>>(&mut self, tarfile: P) -> Result<Backup, RepositoryError> {
|
||||
pub fn import_tarfile<P: AsRef<Path>>(
|
||||
&mut self,
|
||||
tarfile: P,
|
||||
) -> Result<Backup, RepositoryError> {
|
||||
try!(self.write_mode());
|
||||
let _lock = try!(self.lock(false));
|
||||
if self.dirty {
|
||||
return Err(RepositoryError::Dirty)
|
||||
return Err(RepositoryError::Dirty);
|
||||
}
|
||||
try!(self.set_dirty());
|
||||
let mut backup = Backup::default();
|
||||
|
@ -258,9 +288,17 @@ impl Repository {
|
|||
let mut failed_paths = vec![];
|
||||
let tarfile = tarfile.as_ref();
|
||||
let (root_inode, chunks) = if tarfile == Path::new("-") {
|
||||
try!(self.import_tarfile_as_inode(&mut backup, io::stdin(), &mut failed_paths))
|
||||
try!(self.import_tarfile_as_inode(
|
||||
&mut backup,
|
||||
io::stdin(),
|
||||
&mut failed_paths
|
||||
))
|
||||
} else {
|
||||
try!(self.import_tarfile_as_inode(&mut backup, try!(File::open(tarfile)), &mut failed_paths))
|
||||
try!(self.import_tarfile_as_inode(
|
||||
&mut backup,
|
||||
try!(File::open(tarfile)),
|
||||
&mut failed_paths
|
||||
))
|
||||
};
|
||||
backup.root = chunks;
|
||||
try!(self.flush());
|
||||
|
@ -284,16 +322,34 @@ impl Repository {
|
|||
}
|
||||
}
|
||||
|
||||
fn export_xattrs<W: Write>(&mut self, inode: &Inode, tarfile: &mut tar::Builder<W>) -> Result<(), RepositoryError> {
|
||||
fn export_xattrs<W: Write>(
|
||||
&mut self,
|
||||
inode: &Inode,
|
||||
tarfile: &mut tar::Builder<W>,
|
||||
) -> Result<(), RepositoryError> {
|
||||
let mut pax = PaxBuilder::new();
|
||||
for (key, value) in &inode.xattrs {
|
||||
pax.add(&format!("{}{}", PAX_XATTR_PREFIX,key), str::from_utf8(value).unwrap());
|
||||
pax.add(
|
||||
&format!("{}{}", PAX_XATTR_PREFIX, key),
|
||||
str::from_utf8(value).unwrap()
|
||||
);
|
||||
}
|
||||
Ok(try!(tarfile.append_pax_extensions(&pax)))
|
||||
}
|
||||
|
||||
fn export_tarfile_recurse<W: Write>(&mut self, backup: &Backup, path: &Path, inode: Inode, tarfile: &mut tar::Builder<W>, skip_root: bool) -> Result<(), RepositoryError> {
|
||||
let path = if skip_root { path.to_path_buf() } else { path.join(&inode.name) };
|
||||
fn export_tarfile_recurse<W: Write>(
|
||||
&mut self,
|
||||
backup: &Backup,
|
||||
path: &Path,
|
||||
inode: Inode,
|
||||
tarfile: &mut tar::Builder<W>,
|
||||
skip_root: bool,
|
||||
) -> Result<(), RepositoryError> {
|
||||
let path = if skip_root {
|
||||
path.to_path_buf()
|
||||
} else {
|
||||
path.join(&inode.name)
|
||||
};
|
||||
if inode.file_type != FileType::Directory || !skip_root {
|
||||
if !inode.xattrs.is_empty() {
|
||||
try!(self.export_xattrs(&inode, tarfile));
|
||||
|
@ -332,13 +388,15 @@ impl Repository {
|
|||
FileType::Directory => tar::EntryType::Directory,
|
||||
FileType::BlockDevice => tar::EntryType::Block,
|
||||
FileType::CharDevice => tar::EntryType::Char,
|
||||
FileType::NamedPipe => tar::EntryType::Fifo
|
||||
FileType::NamedPipe => tar::EntryType::Fifo,
|
||||
});
|
||||
header.set_cksum();
|
||||
match inode.data {
|
||||
None => try!(tarfile.append(&header, Cursor::new(&[]))),
|
||||
Some(FileData::Inline(data)) => try!(tarfile.append(&header, Cursor::new(data))),
|
||||
Some(FileData::ChunkedDirect(chunks)) => try!(tarfile.append(&header, self.get_reader(chunks))),
|
||||
Some(FileData::ChunkedDirect(chunks)) => {
|
||||
try!(tarfile.append(&header, self.get_reader(chunks)))
|
||||
}
|
||||
Some(FileData::ChunkedIndirect(chunks)) => {
|
||||
let chunks = ChunkList::read_from(&try!(self.get_data(&chunks)));
|
||||
try!(tarfile.append(&header, self.get_reader(chunks)))
|
||||
|
@ -348,24 +406,46 @@ impl Repository {
|
|||
if let Some(children) = inode.children {
|
||||
for chunks in children.values() {
|
||||
let inode = try!(self.get_inode(chunks));
|
||||
try!(self.export_tarfile_recurse(backup, &path, inode, tarfile, false));
|
||||
try!(self.export_tarfile_recurse(
|
||||
backup,
|
||||
&path,
|
||||
inode,
|
||||
tarfile,
|
||||
false
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn export_tarfile<P: AsRef<Path>>(&mut self, backup: &Backup, inode: Inode, tarfile: P) -> Result<(), RepositoryError> {
|
||||
pub fn export_tarfile<P: AsRef<Path>>(
|
||||
&mut self,
|
||||
backup: &Backup,
|
||||
inode: Inode,
|
||||
tarfile: P,
|
||||
) -> Result<(), RepositoryError> {
|
||||
let tarfile = tarfile.as_ref();
|
||||
if tarfile == Path::new("-") {
|
||||
let mut tarfile = tar::Builder::new(io::stdout());
|
||||
try!(self.export_tarfile_recurse(backup, Path::new(""), inode, &mut tarfile, true));
|
||||
try!(self.export_tarfile_recurse(
|
||||
backup,
|
||||
Path::new(""),
|
||||
inode,
|
||||
&mut tarfile,
|
||||
true
|
||||
));
|
||||
try!(tarfile.finish());
|
||||
} else {
|
||||
let mut tarfile = tar::Builder::new(try!(File::create(tarfile)));
|
||||
try!(self.export_tarfile_recurse(backup, Path::new(""), inode, &mut tarfile, true));
|
||||
try!(self.export_tarfile_recurse(
|
||||
backup,
|
||||
Path::new(""),
|
||||
inode,
|
||||
&mut tarfile,
|
||||
true
|
||||
));
|
||||
try!(tarfile.finish());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use ::prelude::*;
|
||||
use prelude::*;
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
|
@ -13,7 +13,12 @@ impl Repository {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn vacuum(&mut self, ratio: f32, combine: bool, force: bool) -> Result<(), RepositoryError> {
|
||||
pub fn vacuum(
|
||||
&mut self,
|
||||
ratio: f32,
|
||||
combine: bool,
|
||||
force: bool,
|
||||
) -> Result<(), RepositoryError> {
|
||||
try!(self.flush());
|
||||
info!("Locking repository");
|
||||
try!(self.write_mode());
|
||||
|
@ -27,7 +32,12 @@ impl Repository {
|
|||
data_total += bundle.info.encoded_size;
|
||||
data_used += bundle.get_used_size();
|
||||
}
|
||||
info!("Usage: {} of {}, {:.1}%", to_file_size(data_used as u64), to_file_size(data_total as u64), data_used as f32/data_total as f32*100.0);
|
||||
info!(
|
||||
"Usage: {} of {}, {:.1}%",
|
||||
to_file_size(data_used as u64),
|
||||
to_file_size(data_total as u64),
|
||||
data_used as f32 / data_total as f32 * 100.0
|
||||
);
|
||||
let mut rewrite_bundles = HashSet::new();
|
||||
let mut reclaim_space = 0;
|
||||
for (id, bundle) in &usage {
|
||||
|
@ -58,12 +68,21 @@ impl Repository {
|
|||
}
|
||||
}
|
||||
}
|
||||
info!("Reclaiming {} by rewriting {} bundles", to_file_size(reclaim_space as u64), rewrite_bundles.len());
|
||||
info!(
|
||||
"Reclaiming {} by rewriting {} bundles",
|
||||
to_file_size(reclaim_space as u64),
|
||||
rewrite_bundles.len()
|
||||
);
|
||||
if !force {
|
||||
self.dirty = false;
|
||||
return Ok(())
|
||||
return Ok(());
|
||||
}
|
||||
for id in ProgressIter::new("rewriting bundles", rewrite_bundles.len(), rewrite_bundles.iter()) {
|
||||
for id in ProgressIter::new(
|
||||
"rewriting bundles",
|
||||
rewrite_bundles.len(),
|
||||
rewrite_bundles.iter()
|
||||
)
|
||||
{
|
||||
let bundle = &usage[id];
|
||||
let bundle_id = self.bundle_map.get(*id).unwrap();
|
||||
let chunks = try!(self.bundles.get_chunk_list(&bundle_id));
|
||||
|
@ -71,7 +90,7 @@ impl Repository {
|
|||
for (chunk, &(hash, _len)) in chunks.into_iter().enumerate() {
|
||||
if !bundle.chunk_usage.get(chunk) {
|
||||
try!(self.index.delete(&hash));
|
||||
continue
|
||||
continue;
|
||||
}
|
||||
let data = try!(self.bundles.get_chunk(&bundle_id, chunk));
|
||||
try!(self.put_chunk_override(mode, hash, &data));
|
||||
|
@ -81,7 +100,12 @@ impl Repository {
|
|||
info!("Checking index");
|
||||
for (hash, location) in self.index.iter() {
|
||||
if rewrite_bundles.contains(&location.bundle) {
|
||||
panic!("Removed bundle is still referenced in index: hash:{}, bundle:{}, chunk:{}", hash, location.bundle, location.chunk);
|
||||
panic!(
|
||||
"Removed bundle is still referenced in index: hash:{}, bundle:{}, chunk:{}",
|
||||
hash,
|
||||
location.bundle,
|
||||
location.chunk
|
||||
);
|
||||
}
|
||||
}
|
||||
info!("Deleting {} bundles", rewrite_bundles.len());
|
||||
|
|
|
@ -8,7 +8,7 @@ pub struct Bitmap {
|
|||
impl Bitmap {
|
||||
/// Creates a new bitmap
|
||||
pub fn new(len: usize) -> Self {
|
||||
let len = (len+7)/8;
|
||||
let len = (len + 7) / 8;
|
||||
let mut bytes = Vec::with_capacity(len);
|
||||
bytes.resize(len, 0);
|
||||
Self { bytes: bytes }
|
||||
|
@ -28,7 +28,7 @@ impl Bitmap {
|
|||
|
||||
#[inline]
|
||||
fn convert_index(&self, index: usize) -> (usize, u8) {
|
||||
(index/8, 1u8<<(index%8))
|
||||
(index / 8, 1u8 << (index % 8))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
|
|
@ -63,7 +63,7 @@ impl ChunkList {
|
|||
if src.len() % 20 != 0 {
|
||||
warn!("Reading truncated chunk list");
|
||||
}
|
||||
ChunkList::read_n_from(src.len()/20, &mut Cursor::new(src)).unwrap()
|
||||
ChunkList::read_n_from(src.len() / 20, &mut Cursor::new(src)).unwrap()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -111,7 +111,10 @@ impl DerefMut for ChunkList {
|
|||
|
||||
impl Serialize for ChunkList {
|
||||
#[inline]
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
let mut buf = Vec::with_capacity(self.encoded_size());
|
||||
self.write_to(&mut buf).unwrap();
|
||||
Bytes::from(&buf as &[u8]).serialize(serializer)
|
||||
|
@ -120,12 +123,17 @@ impl Serialize for ChunkList {
|
|||
|
||||
impl<'a> Deserialize<'a> for ChunkList {
|
||||
#[inline]
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'a> {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'a>,
|
||||
{
|
||||
let data: Vec<u8> = try!(ByteBuf::deserialize(deserializer)).into();
|
||||
if data.len() % 20 != 0 {
|
||||
return Err(D::Error::custom("Invalid chunk list length"));
|
||||
}
|
||||
Ok(ChunkList::read_n_from(data.len()/20, &mut Cursor::new(data)).unwrap())
|
||||
Ok(
|
||||
ChunkList::read_n_from(data.len() / 20, &mut Cursor::new(data)).unwrap()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -171,7 +179,10 @@ mod tests {
|
|||
let mut list = ChunkList::new();
|
||||
list.push((Hash::default(), 0));
|
||||
list.push((Hash::default(), 1));
|
||||
assert_eq!(list.into_inner(), vec![(Hash::default(), 0), (Hash::default(), 1)]);
|
||||
assert_eq!(
|
||||
list.into_inner(),
|
||||
vec![(Hash::default(), 0), (Hash::default(), 1)]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -182,8 +193,8 @@ mod tests {
|
|||
let mut buf = Vec::new();
|
||||
assert!(list.write_to(&mut buf).is_ok());
|
||||
assert_eq!(buf.len(), 40);
|
||||
assert_eq!(&buf[16..20], &[0,0,0,0]);
|
||||
assert_eq!(&buf[36..40], &[1,0,0,0]);
|
||||
assert_eq!(&buf[16..20], &[0, 0, 0, 0]);
|
||||
assert_eq!(&buf[36..40], &[1, 0, 0, 0]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -196,7 +207,48 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_read_from() {
|
||||
let data = vec![0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, 1,0,0,0];
|
||||
let data = vec![
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
];
|
||||
let list = ChunkList::read_from(&data);
|
||||
assert_eq!(list.len(), 2);
|
||||
assert_eq!(list[0], (Hash::default(), 0));
|
||||
|
@ -212,7 +264,7 @@ mod tests {
|
|||
assert!(list.write_to(&mut buf).is_ok());
|
||||
let encoded = msgpack::encode(&list).unwrap();
|
||||
assert_eq!(buf, &encoded[2..]);
|
||||
assert_eq!(&[196,40], &encoded[..2]);
|
||||
assert_eq!(&[196, 40], &encoded[..2]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -220,7 +272,7 @@ mod tests {
|
|||
let mut list = ChunkList::new();
|
||||
list.push((Hash::default(), 0));
|
||||
list.push((Hash::default(), 1));
|
||||
let mut buf = vec![196,40];
|
||||
let mut buf = vec![196, 40];
|
||||
assert!(list.write_to(&mut buf).is_ok());
|
||||
assert!(msgpack::decode::<ChunkList>(&buf).is_ok());
|
||||
assert_eq!(msgpack::decode::<ChunkList>(&buf).unwrap(), list);
|
||||
|
|
|
@ -55,7 +55,11 @@ impl<T> ProgressIter<T> {
|
|||
let msg = format!("{}: ", msg);
|
||||
bar.message(&msg);
|
||||
bar.set_max_refresh_rate(Some(Duration::from_millis(100)));
|
||||
ProgressIter { inner: inner, bar: bar, msg: msg }
|
||||
ProgressIter {
|
||||
inner: inner,
|
||||
bar: bar,
|
||||
msg: msg
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -72,7 +76,7 @@ impl<T: Iterator> Iterator for ProgressIter<T> {
|
|||
let msg = self.msg.clone() + "done.";
|
||||
self.bar.finish_print(&msg);
|
||||
None
|
||||
},
|
||||
}
|
||||
Some(item) => {
|
||||
self.bar.inc();
|
||||
Some(item)
|
||||
|
|
|
@ -57,7 +57,10 @@ pub struct Compression {
|
|||
}
|
||||
impl Default for Compression {
|
||||
fn default() -> Self {
|
||||
Compression { method: CompressionMethod::Brotli, level: 3 }
|
||||
Compression {
|
||||
method: CompressionMethod::Brotli,
|
||||
level: 3
|
||||
}
|
||||
}
|
||||
}
|
||||
serde_impl!(Compression(u64) {
|
||||
|
@ -74,7 +77,9 @@ impl Compression {
|
|||
|
||||
pub fn from_string(name: &str) -> Result<Self, CompressionError> {
|
||||
let (name, level) = if let Some(pos) = name.find('/') {
|
||||
let level = try!(u8::from_str(&name[pos+1..]).map_err(|_| CompressionError::UnsupportedCodec(name.to_string())));
|
||||
let level = try!(u8::from_str(&name[pos + 1..]).map_err(|_| {
|
||||
CompressionError::UnsupportedCodec(name.to_string())
|
||||
}));
|
||||
let name = &name[..pos];
|
||||
(name, level)
|
||||
} else {
|
||||
|
@ -85,9 +90,12 @@ impl Compression {
|
|||
"brotli" => CompressionMethod::Brotli,
|
||||
"lzma" | "lzma2" | "xz" => CompressionMethod::Lzma,
|
||||
"lz4" => CompressionMethod::Lz4,
|
||||
_ => return Err(CompressionError::UnsupportedCodec(name.to_string()))
|
||||
_ => return Err(CompressionError::UnsupportedCodec(name.to_string())),
|
||||
};
|
||||
Ok(Compression { method: method, level: level })
|
||||
Ok(Compression {
|
||||
method: method,
|
||||
level: level
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name(&self) -> &'static str {
|
||||
|
@ -103,7 +111,7 @@ impl Compression {
|
|||
let name = CString::new(self.name().as_bytes()).unwrap();
|
||||
let codec = unsafe { squash_get_codec(name.as_ptr()) };
|
||||
if codec.is_null() {
|
||||
return Err(CompressionError::InitializeCodec)
|
||||
return Err(CompressionError::InitializeCodec);
|
||||
}
|
||||
Ok(codec)
|
||||
}
|
||||
|
@ -117,25 +125,27 @@ impl Compression {
|
|||
let codec = try!(self.codec());
|
||||
let options = unsafe { squash_options_new(codec, ptr::null::<()>()) };
|
||||
if options.is_null() {
|
||||
return Err(CompressionError::InitializeOptions)
|
||||
return Err(CompressionError::InitializeOptions);
|
||||
}
|
||||
let option = CString::new("level");
|
||||
let value = CString::new(format!("{}", self.level));
|
||||
let res = unsafe { squash_options_parse_option(
|
||||
options,
|
||||
option.unwrap().as_ptr(),
|
||||
value.unwrap().as_ptr()
|
||||
)};
|
||||
let res = unsafe {
|
||||
squash_options_parse_option(options, option.unwrap().as_ptr(), value.unwrap().as_ptr())
|
||||
};
|
||||
if res != SQUASH_OK {
|
||||
//panic!(unsafe { CStr::from_ptr(squash_status_to_string(res)).to_str().unwrap() });
|
||||
return Err(CompressionError::InitializeOptions)
|
||||
return Err(CompressionError::InitializeOptions);
|
||||
}
|
||||
Ok(options)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn error(code: SquashStatus) -> CompressionError {
|
||||
CompressionError::Operation(unsafe { CStr::from_ptr(squash_status_to_string(code)).to_str().unwrap() })
|
||||
CompressionError::Operation(unsafe {
|
||||
CStr::from_ptr(squash_status_to_string(code))
|
||||
.to_str()
|
||||
.unwrap()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn compress(&self, data: &[u8]) -> Result<Vec<u8>, CompressionError> {
|
||||
|
@ -148,18 +158,20 @@ impl Compression {
|
|||
data.len() as usize
|
||||
)};*/
|
||||
let mut buf = Vec::with_capacity(size as usize);
|
||||
let res = unsafe { squash_codec_compress_with_options(
|
||||
codec,
|
||||
&mut size,
|
||||
buf.as_mut_ptr(),
|
||||
data.len(),
|
||||
data.as_ptr(),
|
||||
options)
|
||||
let res = unsafe {
|
||||
squash_codec_compress_with_options(
|
||||
codec,
|
||||
&mut size,
|
||||
buf.as_mut_ptr(),
|
||||
data.len(),
|
||||
data.as_ptr(),
|
||||
options
|
||||
)
|
||||
};
|
||||
if res != SQUASH_OK {
|
||||
println!("{:?}", data);
|
||||
println!("{:?}", data);
|
||||
println!("{}, {}", data.len(), size);
|
||||
return Err(Self::error(res))
|
||||
return Err(Self::error(res));
|
||||
}
|
||||
unsafe { buf.set_len(size) };
|
||||
Ok(buf)
|
||||
|
@ -167,25 +179,24 @@ impl Compression {
|
|||
|
||||
pub fn decompress(&self, data: &[u8]) -> Result<Vec<u8>, CompressionError> {
|
||||
let codec = try!(self.codec());
|
||||
let mut size = unsafe { squash_codec_get_uncompressed_size(
|
||||
codec,
|
||||
data.len(),
|
||||
data.as_ptr()
|
||||
)};
|
||||
let mut size =
|
||||
unsafe { squash_codec_get_uncompressed_size(codec, data.len(), data.as_ptr()) };
|
||||
if size == 0 {
|
||||
size = 100 * data.len();
|
||||
}
|
||||
let mut buf = Vec::with_capacity(size);
|
||||
let res = unsafe { squash_codec_decompress(
|
||||
codec,
|
||||
&mut size,
|
||||
buf.as_mut_ptr(),
|
||||
data.len(),
|
||||
data.as_ptr(),
|
||||
ptr::null_mut::<()>())
|
||||
let res = unsafe {
|
||||
squash_codec_decompress(
|
||||
codec,
|
||||
&mut size,
|
||||
buf.as_mut_ptr(),
|
||||
data.len(),
|
||||
data.as_ptr(),
|
||||
ptr::null_mut::<()>()
|
||||
)
|
||||
};
|
||||
if res != SQUASH_OK {
|
||||
return Err(Self::error(res))
|
||||
return Err(Self::error(res));
|
||||
}
|
||||
unsafe { buf.set_len(size) };
|
||||
Ok(buf)
|
||||
|
@ -194,9 +205,8 @@ impl Compression {
|
|||
pub fn compress_stream(&self) -> Result<CompressionStream, CompressionError> {
|
||||
let codec = try!(self.codec());
|
||||
let options = try!(self.options());
|
||||
let stream = unsafe { squash_stream_new_with_options(
|
||||
codec, SQUASH_STREAM_COMPRESS, options
|
||||
) };
|
||||
let stream =
|
||||
unsafe { squash_stream_new_with_options(codec, SQUASH_STREAM_COMPRESS, options) };
|
||||
if stream.is_null() {
|
||||
return Err(CompressionError::InitializeStream);
|
||||
}
|
||||
|
@ -205,9 +215,8 @@ impl Compression {
|
|||
|
||||
pub fn decompress_stream(&self) -> Result<CompressionStream, CompressionError> {
|
||||
let codec = try!(self.codec());
|
||||
let stream = unsafe { squash_stream_new(
|
||||
codec, SQUASH_STREAM_DECOMPRESS, ptr::null::<()>()
|
||||
) };
|
||||
let stream =
|
||||
unsafe { squash_stream_new(codec, SQUASH_STREAM_DECOMPRESS, ptr::null::<()>()) };
|
||||
if stream.is_null() {
|
||||
return Err(CompressionError::InitializeStream);
|
||||
}
|
||||
|
@ -218,7 +227,7 @@ impl Compression {
|
|||
|
||||
pub struct CompressionStream {
|
||||
stream: *mut SquashStream,
|
||||
buffer: [u8; 16*1024]
|
||||
buffer: [u8; 16 * 1024]
|
||||
}
|
||||
|
||||
impl CompressionStream {
|
||||
|
@ -226,11 +235,15 @@ impl CompressionStream {
|
|||
fn new(stream: *mut SquashStream) -> Self {
|
||||
CompressionStream {
|
||||
stream: stream,
|
||||
buffer: [0; 16*1024]
|
||||
buffer: [0; 16 * 1024]
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process<W: Write>(&mut self, input: &[u8], output: &mut W) -> Result<(), CompressionError> {
|
||||
pub fn process<W: Write>(
|
||||
&mut self,
|
||||
input: &[u8],
|
||||
output: &mut W,
|
||||
) -> Result<(), CompressionError> {
|
||||
let stream = unsafe { &mut (*self.stream) };
|
||||
stream.next_in = input.as_ptr();
|
||||
stream.avail_in = input.len();
|
||||
|
@ -239,12 +252,12 @@ impl CompressionStream {
|
|||
stream.avail_out = self.buffer.len();
|
||||
let res = unsafe { squash_stream_process(stream) };
|
||||
if res < 0 {
|
||||
return Err(Compression::error(res))
|
||||
return Err(Compression::error(res));
|
||||
}
|
||||
let output_size = self.buffer.len() - stream.avail_out;
|
||||
try!(output.write_all(&self.buffer[..output_size]));
|
||||
if res != SQUASH_PROCESSING {
|
||||
break
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
|
@ -257,12 +270,12 @@ impl CompressionStream {
|
|||
stream.avail_out = self.buffer.len();
|
||||
let res = unsafe { squash_stream_finish(stream) };
|
||||
if res < 0 {
|
||||
return Err(Compression::error(res))
|
||||
return Err(Compression::error(res));
|
||||
}
|
||||
let output_size = self.buffer.len() - stream.avail_out;
|
||||
try!(output.write_all(&self.buffer[..output_size]));
|
||||
if res != SQUASH_PROCESSING {
|
||||
break
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
|
@ -271,7 +284,9 @@ impl CompressionStream {
|
|||
|
||||
impl Drop for CompressionStream {
|
||||
fn drop(&mut self) {
|
||||
unsafe { squash_object_unref(self.stream as *mut libc::c_void); }
|
||||
unsafe {
|
||||
squash_object_unref(self.stream as *mut libc::c_void);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -303,8 +318,14 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_to_string() {
|
||||
assert_eq!("brotli/1", Compression::from_string("brotli/1").unwrap().to_string());
|
||||
assert_eq!("deflate/1", Compression::from_string("gzip/1").unwrap().to_string());
|
||||
assert_eq!(
|
||||
"brotli/1",
|
||||
Compression::from_string("brotli/1").unwrap().to_string()
|
||||
);
|
||||
assert_eq!(
|
||||
"deflate/1",
|
||||
Compression::from_string("gzip/1").unwrap().to_string()
|
||||
);
|
||||
}
|
||||
|
||||
#[allow(dead_code, needless_range_loop)]
|
||||
|
@ -318,8 +339,8 @@ mod tests {
|
|||
|
||||
#[allow(dead_code)]
|
||||
fn test_compression(method: &str, min_lvl: u8, max_lvl: u8) {
|
||||
let input = test_data(16*1024);
|
||||
for i in min_lvl..max_lvl+1 {
|
||||
let input = test_data(16 * 1024);
|
||||
for i in min_lvl..max_lvl + 1 {
|
||||
let method = Compression::from_string(&format!("{}/{}", method, i)).unwrap();
|
||||
println!("{}", method.to_string());
|
||||
let compressed = method.compress(&input).unwrap();
|
||||
|
@ -353,8 +374,8 @@ mod tests {
|
|||
|
||||
#[allow(dead_code)]
|
||||
fn test_stream_compression(method: &str, min_lvl: u8, max_lvl: u8) {
|
||||
let input = test_data(512*1024);
|
||||
for i in min_lvl..max_lvl+1 {
|
||||
let input = test_data(512 * 1024);
|
||||
for i in min_lvl..max_lvl + 1 {
|
||||
let method = Compression::from_string(&format!("{}/{}", method, i)).unwrap();
|
||||
println!("{}", method.to_string());
|
||||
let mut compressor = method.compress_stream().unwrap();
|
||||
|
@ -363,7 +384,9 @@ mod tests {
|
|||
compressor.finish(&mut compressed).unwrap();
|
||||
let mut decompressor = method.decompress_stream().unwrap();
|
||||
let mut decompressed = Vec::with_capacity(input.len());
|
||||
decompressor.process(&compressed, &mut decompressed).unwrap();
|
||||
decompressor
|
||||
.process(&compressed, &mut decompressed)
|
||||
.unwrap();
|
||||
decompressor.finish(&mut decompressed).unwrap();
|
||||
assert_eq!(input.len(), decompressed.len());
|
||||
for i in 0..input.len() {
|
||||
|
@ -415,7 +438,7 @@ mod benches {
|
|||
|
||||
#[allow(dead_code)]
|
||||
fn bench_stream_compression(b: &mut Bencher, method: Compression) {
|
||||
let input = test_data(512*1024);
|
||||
let input = test_data(512 * 1024);
|
||||
b.iter(|| {
|
||||
let mut compressor = method.compress_stream().unwrap();
|
||||
let mut compressed = Vec::with_capacity(input.len());
|
||||
|
@ -427,7 +450,7 @@ mod benches {
|
|||
|
||||
#[allow(dead_code)]
|
||||
fn bench_stream_decompression(b: &mut Bencher, method: Compression) {
|
||||
let input = test_data(512*1024);
|
||||
let input = test_data(512 * 1024);
|
||||
let mut compressor = method.compress_stream().unwrap();
|
||||
let mut compressed = Vec::with_capacity(input.len());
|
||||
compressor.process(&input, &mut compressed).unwrap();
|
||||
|
@ -435,7 +458,9 @@ mod benches {
|
|||
b.iter(|| {
|
||||
let mut decompressor = method.decompress_stream().unwrap();
|
||||
let mut decompressed = Vec::with_capacity(compressed.len());
|
||||
decompressor.process(&compressed, &mut decompressed).unwrap();
|
||||
decompressor
|
||||
.process(&compressed, &mut decompressed)
|
||||
.unwrap();
|
||||
decompressor.finish(&mut decompressed).unwrap();
|
||||
});
|
||||
b.bytes = input.len() as u64;
|
||||
|
|
|
@ -14,16 +14,14 @@ use sodiumoxide::crypto::box_;
|
|||
use sodiumoxide::crypto::pwhash;
|
||||
pub use sodiumoxide::crypto::box_::{SecretKey, PublicKey};
|
||||
|
||||
use ::util::*;
|
||||
use util::*;
|
||||
|
||||
|
||||
static INIT: Once = ONCE_INIT;
|
||||
|
||||
fn sodium_init() {
|
||||
INIT.call_once(|| {
|
||||
if !sodiumoxide::init() {
|
||||
panic!("Failed to initialize sodiumoxide");
|
||||
}
|
||||
INIT.call_once(|| if !sodiumoxide::init() {
|
||||
panic!("Failed to initialize sodiumoxide");
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -58,9 +56,9 @@ quick_error!{
|
|||
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
|
||||
#[allow(unknown_lints,non_camel_case_types)]
|
||||
#[allow(unknown_lints, non_camel_case_types)]
|
||||
pub enum EncryptionMethod {
|
||||
Sodium,
|
||||
Sodium
|
||||
}
|
||||
serde_impl!(EncryptionMethod(u64) {
|
||||
Sodium => 0
|
||||
|
@ -70,13 +68,13 @@ impl EncryptionMethod {
|
|||
pub fn from_string(val: &str) -> Result<Self, &'static str> {
|
||||
match val {
|
||||
"sodium" => Ok(EncryptionMethod::Sodium),
|
||||
_ => Err("Unsupported encryption method")
|
||||
_ => Err("Unsupported encryption method"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_string(&self) -> String {
|
||||
match *self {
|
||||
EncryptionMethod::Sodium => "sodium".to_string()
|
||||
EncryptionMethod::Sodium => "sodium".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -124,7 +122,10 @@ impl Crypto {
|
|||
#[inline]
|
||||
pub fn dummy() -> Self {
|
||||
sodium_init();
|
||||
Crypto { path: None, keys: HashMap::new() }
|
||||
Crypto {
|
||||
path: None,
|
||||
keys: HashMap::new()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn open<P: AsRef<Path>>(path: P) -> Result<Self, EncryptionError> {
|
||||
|
@ -134,13 +135,24 @@ impl Crypto {
|
|||
for entry in try!(fs::read_dir(&path)) {
|
||||
let entry = try!(entry);
|
||||
let keyfile = try!(KeyfileYaml::load(entry.path()));
|
||||
let public = try!(parse_hex(&keyfile.public).map_err(|_| EncryptionError::InvalidKey));
|
||||
let public = try!(PublicKey::from_slice(&public).ok_or(EncryptionError::InvalidKey));
|
||||
let secret = try!(parse_hex(&keyfile.secret).map_err(|_| EncryptionError::InvalidKey));
|
||||
let secret = try!(SecretKey::from_slice(&secret).ok_or(EncryptionError::InvalidKey));
|
||||
let public = try!(parse_hex(&keyfile.public).map_err(
|
||||
|_| EncryptionError::InvalidKey
|
||||
));
|
||||
let public = try!(PublicKey::from_slice(&public).ok_or(
|
||||
EncryptionError::InvalidKey
|
||||
));
|
||||
let secret = try!(parse_hex(&keyfile.secret).map_err(
|
||||
|_| EncryptionError::InvalidKey
|
||||
));
|
||||
let secret = try!(SecretKey::from_slice(&secret).ok_or(
|
||||
EncryptionError::InvalidKey
|
||||
));
|
||||
keys.insert(public, secret);
|
||||
}
|
||||
Ok(Crypto { path: Some(path), keys: keys })
|
||||
Ok(Crypto {
|
||||
path: Some(path),
|
||||
keys: keys
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -155,30 +167,53 @@ impl Crypto {
|
|||
}
|
||||
|
||||
#[inline]
|
||||
pub fn load_keypair_from_file<P: AsRef<Path>>(path: P) -> Result<(PublicKey, SecretKey), EncryptionError> {
|
||||
pub fn load_keypair_from_file<P: AsRef<Path>>(
|
||||
path: P,
|
||||
) -> Result<(PublicKey, SecretKey), EncryptionError> {
|
||||
Self::load_keypair_from_file_data(&try!(KeyfileYaml::load(path)))
|
||||
}
|
||||
|
||||
pub fn load_keypair_from_file_data(keyfile: &KeyfileYaml) -> Result<(PublicKey, SecretKey), EncryptionError> {
|
||||
let public = try!(parse_hex(&keyfile.public).map_err(|_| EncryptionError::InvalidKey));
|
||||
let public = try!(PublicKey::from_slice(&public).ok_or(EncryptionError::InvalidKey));
|
||||
let secret = try!(parse_hex(&keyfile.secret).map_err(|_| EncryptionError::InvalidKey));
|
||||
let secret = try!(SecretKey::from_slice(&secret).ok_or(EncryptionError::InvalidKey));
|
||||
pub fn load_keypair_from_file_data(
|
||||
keyfile: &KeyfileYaml,
|
||||
) -> Result<(PublicKey, SecretKey), EncryptionError> {
|
||||
let public = try!(parse_hex(&keyfile.public).map_err(
|
||||
|_| EncryptionError::InvalidKey
|
||||
));
|
||||
let public = try!(PublicKey::from_slice(&public).ok_or(
|
||||
EncryptionError::InvalidKey
|
||||
));
|
||||
let secret = try!(parse_hex(&keyfile.secret).map_err(
|
||||
|_| EncryptionError::InvalidKey
|
||||
));
|
||||
let secret = try!(SecretKey::from_slice(&secret).ok_or(
|
||||
EncryptionError::InvalidKey
|
||||
));
|
||||
Ok((public, secret))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn save_keypair_to_file_data(public: &PublicKey, secret: &SecretKey) -> KeyfileYaml {
|
||||
KeyfileYaml { public: to_hex(&public[..]), secret: to_hex(&secret[..]) }
|
||||
KeyfileYaml {
|
||||
public: to_hex(&public[..]),
|
||||
secret: to_hex(&secret[..])
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn save_keypair_to_file<P: AsRef<Path>>(public: &PublicKey, secret: &SecretKey, path: P) -> Result<(), EncryptionError> {
|
||||
pub fn save_keypair_to_file<P: AsRef<Path>>(
|
||||
public: &PublicKey,
|
||||
secret: &SecretKey,
|
||||
path: P,
|
||||
) -> Result<(), EncryptionError> {
|
||||
Self::save_keypair_to_file_data(public, secret).save(path)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn register_secret_key(&mut self, public: PublicKey, secret: SecretKey) -> Result<(), EncryptionError> {
|
||||
pub fn register_secret_key(
|
||||
&mut self,
|
||||
public: PublicKey,
|
||||
secret: SecretKey,
|
||||
) -> Result<(), EncryptionError> {
|
||||
if let Some(ref path) = self.path {
|
||||
let path = path.join(to_hex(&public[..]) + ".yaml");
|
||||
try!(Self::save_keypair_to_file(&public, &secret, path));
|
||||
|
@ -193,28 +228,34 @@ impl Crypto {
|
|||
}
|
||||
|
||||
fn get_secret_key(&self, public: &PublicKey) -> Result<&SecretKey, EncryptionError> {
|
||||
self.keys.get(public).ok_or_else(|| EncryptionError::MissingKey(*public))
|
||||
self.keys.get(public).ok_or_else(
|
||||
|| EncryptionError::MissingKey(*public)
|
||||
)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn encrypt(&self, enc: &Encryption, data: &[u8]) -> Result<Vec<u8>, EncryptionError> {
|
||||
let &(ref method, ref public) = enc;
|
||||
let public = try!(PublicKey::from_slice(public).ok_or(EncryptionError::InvalidKey));
|
||||
let public = try!(PublicKey::from_slice(public).ok_or(
|
||||
EncryptionError::InvalidKey
|
||||
));
|
||||
match *method {
|
||||
EncryptionMethod::Sodium => {
|
||||
Ok(sealedbox::seal(data, &public))
|
||||
}
|
||||
EncryptionMethod::Sodium => Ok(sealedbox::seal(data, &public)),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn decrypt(&self, enc: &Encryption, data: &[u8]) -> Result<Vec<u8>, EncryptionError> {
|
||||
let &(ref method, ref public) = enc;
|
||||
let public = try!(PublicKey::from_slice(public).ok_or(EncryptionError::InvalidKey));
|
||||
let public = try!(PublicKey::from_slice(public).ok_or(
|
||||
EncryptionError::InvalidKey
|
||||
));
|
||||
let secret = try!(self.get_secret_key(&public));
|
||||
match *method {
|
||||
EncryptionMethod::Sodium => {
|
||||
sealedbox::open(data, &public, secret).map_err(|_| EncryptionError::Operation("Decryption failed"))
|
||||
sealedbox::open(data, &public, secret).map_err(|_| {
|
||||
EncryptionError::Operation("Decryption failed")
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -228,18 +269,27 @@ impl Crypto {
|
|||
pub fn keypair_from_password(password: &str) -> (PublicKey, SecretKey) {
|
||||
let salt = pwhash::Salt::from_slice(b"the_great_zvault_password_salt_1").unwrap();
|
||||
let mut key = [0u8; pwhash::HASHEDPASSWORDBYTES];
|
||||
let key = pwhash::derive_key(&mut key, password.as_bytes(), &salt, pwhash::OPSLIMIT_INTERACTIVE, pwhash::MEMLIMIT_INTERACTIVE).unwrap();
|
||||
let key = pwhash::derive_key(
|
||||
&mut key,
|
||||
password.as_bytes(),
|
||||
&salt,
|
||||
pwhash::OPSLIMIT_INTERACTIVE,
|
||||
pwhash::MEMLIMIT_INTERACTIVE
|
||||
).unwrap();
|
||||
let mut seed = [0u8; 32];
|
||||
let offset = key.len()-seed.len();
|
||||
let offset = key.len() - seed.len();
|
||||
for (i, b) in seed.iter_mut().enumerate() {
|
||||
*b = key[i+offset];
|
||||
*b = key[i + offset];
|
||||
}
|
||||
let mut pk = [0u8; 32];
|
||||
let mut sk = [0u8; 32];
|
||||
if unsafe { libsodium_sys::crypto_box_seed_keypair(&mut pk, &mut sk, &seed) } != 0 {
|
||||
panic!("Libsodium failed");
|
||||
}
|
||||
(PublicKey::from_slice(&pk).unwrap(), SecretKey::from_slice(&sk).unwrap())
|
||||
(
|
||||
PublicKey::from_slice(&pk).unwrap(),
|
||||
SecretKey::from_slice(&sk).unwrap()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -374,7 +424,7 @@ mod benches {
|
|||
let (pk, sk) = Crypto::gen_keypair();
|
||||
crypto.add_secret_key(pk, sk.clone());
|
||||
let encryption = (EncryptionMethod::Sodium, ByteBuf::from(&pk[..]));
|
||||
let input = test_data(512*1024);
|
||||
let input = test_data(512 * 1024);
|
||||
b.iter(|| crypto.encrypt(&encryption, &input));
|
||||
b.bytes = input.len() as u64;
|
||||
}
|
||||
|
@ -385,7 +435,7 @@ mod benches {
|
|||
let (pk, sk) = Crypto::gen_keypair();
|
||||
crypto.add_secret_key(pk, sk.clone());
|
||||
let encryption = (EncryptionMethod::Sodium, ByteBuf::from(&pk[..]));
|
||||
let input = test_data(512*1024);
|
||||
let input = test_data(512 * 1024);
|
||||
let output = crypto.encrypt(&encryption, &input).unwrap();
|
||||
b.iter(|| crypto.decrypt(&encryption, &output));
|
||||
b.bytes = input.len() as u64;
|
||||
|
|
|
@ -7,13 +7,17 @@ mod linux {
|
|||
use std::os::unix::ffi::OsStringExt;
|
||||
|
||||
#[inline]
|
||||
pub fn chown<P: AsRef<Path>>(path: P, uid: libc::uid_t, gid: libc::gid_t) -> Result<(), io::Error> {
|
||||
pub fn chown<P: AsRef<Path>>(
|
||||
path: P,
|
||||
uid: libc::uid_t,
|
||||
gid: libc::gid_t,
|
||||
) -> Result<(), io::Error> {
|
||||
let path = CString::new(path.as_ref().to_path_buf().into_os_string().into_vec()).unwrap();
|
||||
let result = unsafe { libc::lchown((&path).as_ptr(), uid, gid) };
|
||||
match result {
|
||||
0 => Ok(()),
|
||||
-1 => Err(io::Error::last_os_error()),
|
||||
_ => unreachable!()
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ impl Hash {
|
|||
|
||||
#[inline]
|
||||
pub fn empty() -> Self {
|
||||
Hash{high: 0, low: 0}
|
||||
Hash { high: 0, low: 0 }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -45,14 +45,20 @@ impl Hash {
|
|||
pub fn read_from(src: &mut Read) -> Result<Self, io::Error> {
|
||||
let high = try!(src.read_u64::<LittleEndian>());
|
||||
let low = try!(src.read_u64::<LittleEndian>());
|
||||
Ok(Hash { high: high, low: low })
|
||||
Ok(Hash {
|
||||
high: high,
|
||||
low: low
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn from_string(val: &str) -> Result<Self, ()> {
|
||||
let high = try!(u64::from_str_radix(&val[..16], 16).map_err(|_| ()));
|
||||
let low = try!(u64::from_str_radix(&val[16..], 16).map_err(|_| ()));
|
||||
Ok(Self { high: high, low: low })
|
||||
Ok(Self {
|
||||
high: high,
|
||||
low: low
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -72,7 +78,10 @@ impl fmt::Debug for Hash {
|
|||
|
||||
|
||||
impl Serialize for Hash {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
let mut dat = [0u8; 16];
|
||||
LittleEndian::write_u64(&mut dat[..8], self.high);
|
||||
LittleEndian::write_u64(&mut dat[8..], self.low);
|
||||
|
@ -81,12 +90,15 @@ impl Serialize for Hash {
|
|||
}
|
||||
|
||||
impl<'a> Deserialize<'a> for Hash {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'a> {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'a>,
|
||||
{
|
||||
let dat: Vec<u8> = try!(ByteBuf::deserialize(deserializer)).into();
|
||||
if dat.len() != 16 {
|
||||
return Err(D::Error::custom("Invalid key length"));
|
||||
}
|
||||
Ok(Hash{
|
||||
Ok(Hash {
|
||||
high: LittleEndian::read_u64(&dat[..8]),
|
||||
low: LittleEndian::read_u64(&dat[8..])
|
||||
})
|
||||
|
@ -111,9 +123,13 @@ impl HashMethod {
|
|||
match *self {
|
||||
HashMethod::Blake2 => {
|
||||
let hash = blake2b(16, &[], data);
|
||||
let hash = unsafe { &*mem::transmute::<_, *const (u64, u64)>(hash.as_bytes().as_ptr()) };
|
||||
Hash { high: u64::from_be(hash.0), low: u64::from_be(hash.1) }
|
||||
},
|
||||
let hash =
|
||||
unsafe { &*mem::transmute::<_, *const (u64, u64)>(hash.as_bytes().as_ptr()) };
|
||||
Hash {
|
||||
high: u64::from_be(hash.0),
|
||||
low: u64::from_be(hash.1)
|
||||
}
|
||||
}
|
||||
HashMethod::Murmur3 => {
|
||||
let (a, b) = murmurhash3_x64_128(data, 0);
|
||||
Hash { high: a, low: b }
|
||||
|
@ -126,7 +142,7 @@ impl HashMethod {
|
|||
match name {
|
||||
"blake2" => Ok(HashMethod::Blake2),
|
||||
"murmur3" => Ok(HashMethod::Murmur3),
|
||||
_ => Err("Unsupported hash method")
|
||||
_ => Err("Unsupported hash method"),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -134,10 +150,9 @@ impl HashMethod {
|
|||
pub fn name(&self) -> &'static str {
|
||||
match *self {
|
||||
HashMethod::Blake2 => "blake2",
|
||||
HashMethod::Murmur3 => "murmur3"
|
||||
HashMethod::Murmur3 => "murmur3",
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
@ -163,12 +178,24 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_blake2() {
|
||||
assert_eq!(HashMethod::Blake2.hash(b"abc"), Hash{high: 0xcf4ab791c62b8d2b, low: 0x2109c90275287816});
|
||||
assert_eq!(
|
||||
HashMethod::Blake2.hash(b"abc"),
|
||||
Hash {
|
||||
high: 0xcf4ab791c62b8d2b,
|
||||
low: 0x2109c90275287816
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_murmur3() {
|
||||
assert_eq!(HashMethod::Murmur3.hash(b"123"), Hash{high: 10978418110857903978, low: 4791445053355511657});
|
||||
assert_eq!(
|
||||
HashMethod::Murmur3.hash(b"123"),
|
||||
Hash {
|
||||
high: 10978418110857903978,
|
||||
low: 4791445053355511657
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -195,14 +222,14 @@ mod benches {
|
|||
|
||||
#[bench]
|
||||
fn bench_blake2(b: &mut Bencher) {
|
||||
let data = test_data(16*1024);
|
||||
let data = test_data(16 * 1024);
|
||||
b.bytes = data.len() as u64;
|
||||
b.iter(|| HashMethod::Blake2.hash(&data));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_murmur3(b: &mut Bencher) {
|
||||
let data = test_data(16*1024);
|
||||
let data = test_data(16 * 1024);
|
||||
b.bytes = data.len() as u64;
|
||||
b.iter(|| HashMethod::Murmur3.hash(&data));
|
||||
}
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
pub fn to_hex(data: &[u8]) -> String {
|
||||
data.iter().map(|b| format!("{:02x}", b)).collect::<Vec<String>>().join("")
|
||||
data.iter()
|
||||
.map(|b| format!("{:02x}", b))
|
||||
.collect::<Vec<String>>()
|
||||
.join("")
|
||||
}
|
||||
|
||||
pub fn parse_hex(hex: &str) -> Result<Vec<u8>, ()> {
|
||||
|
@ -12,9 +15,9 @@ pub fn parse_hex(hex: &str) -> Result<Vec<u8>, ()> {
|
|||
b'A'...b'F' => buf |= byte - b'A' + 10,
|
||||
b'a'...b'f' => buf |= byte - b'a' + 10,
|
||||
b'0'...b'9' => buf |= byte - b'0',
|
||||
b' '|b'\r'|b'\n'|b'\t' => {
|
||||
b' ' | b'\r' | b'\n' | b'\t' => {
|
||||
buf >>= 4;
|
||||
continue
|
||||
continue;
|
||||
}
|
||||
_ => return Err(()),
|
||||
}
|
||||
|
@ -45,7 +48,7 @@ mod tests {
|
|||
assert_eq!(to_hex(&[15]), "0f");
|
||||
assert_eq!(to_hex(&[16]), "10");
|
||||
assert_eq!(to_hex(&[255]), "ff");
|
||||
assert_eq!(to_hex(&[5,255]), "05ff");
|
||||
assert_eq!(to_hex(&[5, 255]), "05ff");
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -53,9 +56,9 @@ mod tests {
|
|||
assert_eq!(parse_hex("00"), Ok(vec![0]));
|
||||
assert_eq!(parse_hex("01"), Ok(vec![1]));
|
||||
assert_eq!(parse_hex("0f"), Ok(vec![15]));
|
||||
assert_eq!(parse_hex("0fff"), Ok(vec![15,255]));
|
||||
assert_eq!(parse_hex("0fff"), Ok(vec![15, 255]));
|
||||
assert_eq!(parse_hex("0F"), Ok(vec![15]));
|
||||
assert_eq!(parse_hex("01 02\n03\t04"), Ok(vec![1,2,3,4]));
|
||||
assert_eq!(parse_hex("01 02\n03\t04"), Ok(vec![1, 2, 3, 4]));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,14 +1,20 @@
|
|||
use libc;
|
||||
use std::ffi;
|
||||
|
||||
extern {
|
||||
extern "C" {
|
||||
fn gethostname(name: *mut libc::c_char, size: libc::size_t) -> libc::c_int;
|
||||
}
|
||||
|
||||
pub fn get_hostname() -> Result<String, ()> {
|
||||
let mut buf = Vec::with_capacity(255);
|
||||
buf.resize(255, 0u8);
|
||||
if unsafe { gethostname(buf.as_mut_ptr() as *mut libc::c_char, buf.len() as libc::size_t) } == 0 {
|
||||
if unsafe {
|
||||
gethostname(
|
||||
buf.as_mut_ptr() as *mut libc::c_char,
|
||||
buf.len() as libc::size_t
|
||||
)
|
||||
} == 0
|
||||
{
|
||||
buf[254] = 0; //enforce null-termination
|
||||
let name = unsafe { ffi::CStr::from_ptr(buf.as_ptr() as *const libc::c_char) };
|
||||
name.to_str().map(|s| s.to_string()).map_err(|_| ())
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use ::prelude::*;
|
||||
use prelude::*;
|
||||
|
||||
use serde_yaml;
|
||||
use chrono::prelude::*;
|
||||
|
@ -121,12 +121,14 @@ impl LockFolder {
|
|||
for lock in try!(self.get_locks()) {
|
||||
if lock.exclusive {
|
||||
if level == LockLevel::Exclusive {
|
||||
return Err(LockError::InvalidLockState("multiple exclusive locks"))
|
||||
return Err(LockError::InvalidLockState("multiple exclusive locks"));
|
||||
} else {
|
||||
level = LockLevel::Exclusive
|
||||
}
|
||||
} else if level == LockLevel::Exclusive {
|
||||
return Err(LockError::InvalidLockState("exclusive lock and shared locks"))
|
||||
return Err(LockError::InvalidLockState(
|
||||
"exclusive lock and shared locks"
|
||||
));
|
||||
} else {
|
||||
level = LockLevel::Shared
|
||||
}
|
||||
|
@ -137,7 +139,7 @@ impl LockFolder {
|
|||
pub fn lock(&self, exclusive: bool) -> Result<LockHandle, LockError> {
|
||||
let level = try!(self.get_lock_level());
|
||||
if level == LockLevel::Exclusive || level == LockLevel::Shared && exclusive {
|
||||
return Err(LockError::Locked)
|
||||
return Err(LockError::Locked);
|
||||
}
|
||||
let lockfile = LockFile {
|
||||
hostname: get_hostname().unwrap(),
|
||||
|
@ -145,12 +147,19 @@ impl LockFolder {
|
|||
date: Utc::now().timestamp(),
|
||||
exclusive: exclusive
|
||||
};
|
||||
let path = self.path.join(format!("{}-{}.lock", &lockfile.hostname, lockfile.processid));
|
||||
let path = self.path.join(format!(
|
||||
"{}-{}.lock",
|
||||
&lockfile.hostname,
|
||||
lockfile.processid
|
||||
));
|
||||
try!(lockfile.save(&path));
|
||||
let handle = LockHandle{lock: lockfile, path: path};
|
||||
let handle = LockHandle {
|
||||
lock: lockfile,
|
||||
path: path
|
||||
};
|
||||
if self.get_lock_level().is_err() {
|
||||
try!(handle.release());
|
||||
return Err(LockError::Locked)
|
||||
return Err(LockError::Locked);
|
||||
}
|
||||
Ok(handle)
|
||||
}
|
||||
|
@ -158,19 +167,23 @@ impl LockFolder {
|
|||
pub fn upgrade(&self, lock: &mut LockHandle) -> Result<(), LockError> {
|
||||
let lockfile = &mut lock.lock;
|
||||
if lockfile.exclusive {
|
||||
return Ok(())
|
||||
return Ok(());
|
||||
}
|
||||
let level = try!(self.get_lock_level());
|
||||
if level == LockLevel::Exclusive {
|
||||
return Err(LockError::Locked)
|
||||
return Err(LockError::Locked);
|
||||
}
|
||||
lockfile.exclusive = true;
|
||||
let path = self.path.join(format!("{}-{}.lock", &lockfile.hostname, lockfile.processid));
|
||||
let path = self.path.join(format!(
|
||||
"{}-{}.lock",
|
||||
&lockfile.hostname,
|
||||
lockfile.processid
|
||||
));
|
||||
try!(lockfile.save(&path));
|
||||
if self.get_lock_level().is_err() {
|
||||
lockfile.exclusive = false;
|
||||
try!(lockfile.save(&path));
|
||||
return Err(LockError::Locked)
|
||||
return Err(LockError::Locked);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -178,10 +191,14 @@ impl LockFolder {
|
|||
pub fn downgrade(&self, lock: &mut LockHandle) -> Result<(), LockError> {
|
||||
let lockfile = &mut lock.lock;
|
||||
if !lockfile.exclusive {
|
||||
return Ok(())
|
||||
return Ok(());
|
||||
}
|
||||
lockfile.exclusive = false;
|
||||
let path = self.path.join(format!("{}-{}.lock", &lockfile.hostname, lockfile.processid));
|
||||
let path = self.path.join(format!(
|
||||
"{}-{}.lock",
|
||||
&lockfile.hostname,
|
||||
lockfile.processid
|
||||
));
|
||||
lockfile.save(&path)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ pub struct LruCache<K, V> {
|
|||
}
|
||||
|
||||
|
||||
impl<K: Eq+Hash, V> LruCache<K, V> {
|
||||
impl<K: Eq + Hash, V> LruCache<K, V> {
|
||||
#[inline]
|
||||
pub fn new(min_size: usize, max_size: usize) -> Self {
|
||||
LruCache {
|
||||
|
@ -55,9 +55,9 @@ impl<K: Eq+Hash, V> LruCache<K, V> {
|
|||
fn shrink(&mut self) {
|
||||
let mut tags: Vec<u64> = self.items.values().map(|&(_, n)| n).collect();
|
||||
tags.sort();
|
||||
let min = tags[tags.len()-self.min_size];
|
||||
let min = tags[tags.len() - self.min_size];
|
||||
let mut new = HashMap::with_capacity(self.min_size);
|
||||
new.extend(self.items.drain().filter(|&(_,(_, n))| n>=min));
|
||||
new.extend(self.items.drain().filter(|&(_, (_, n))| n >= min));
|
||||
self.items = new;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue