diff --git a/CHANGELOG.md b/CHANGELOG.md index 77401c2..7788f26 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ This project follows [semantic versioning](http://semver.org). * [added] Added support for xattrs in fuse mount * [added] Added support for block/char devices * [added] Added support for fifo files +* [modified] Reformatted sources using rustfmt * [modified] Also documenting common flags in subcommands * [modified] Using repository aliases (**conversion needed**) * [modified] Remote path must be absolute diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 0000000..50526f0 --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,2 @@ +trailing_semicolon = false +trailing_comma = "Never" diff --git a/src/bundledb/cache.rs b/src/bundledb/cache.rs index 0909959..21cca8b 100644 --- a/src/bundledb/cache.rs +++ b/src/bundledb/cache.rs @@ -1,4 +1,4 @@ -use ::prelude::*; +use prelude::*; use std::path::{Path, PathBuf}; use std::fs::{self, File}; @@ -62,7 +62,11 @@ impl StoredBundle { self.info.id.clone() } - pub fn copy_to>(&self, base_path: &Path, path: P) -> Result { + pub fn copy_to>( + &self, + base_path: &Path, + path: P, + ) -> Result { let src_path = base_path.join(&self.path); let dst_path = path.as_ref(); try!(fs::copy(&src_path, dst_path).context(dst_path)); @@ -71,7 +75,11 @@ impl StoredBundle { Ok(bundle) } - pub fn move_to>(&mut self, base_path: &Path, path: P) -> Result<(), BundleDbError> { + pub fn move_to>( + &mut self, + base_path: &Path, + path: P, + ) -> Result<(), BundleDbError> { let src_path = base_path.join(&self.path); let dst_path = path.as_ref(); if fs::rename(&src_path, dst_path).is_err() { @@ -88,11 +96,11 @@ impl StoredBundle { let mut header = [0u8; 8]; try!(file.read_exact(&mut header).map_err(BundleCacheError::Read)); if header[..CACHE_FILE_STRING.len()] != CACHE_FILE_STRING { - return Err(BundleCacheError::WrongHeader) + return Err(BundleCacheError::WrongHeader); } let version = header[CACHE_FILE_STRING.len()]; if version != CACHE_FILE_VERSION { - return Err(BundleCacheError::UnsupportedVersion(version)) + return Err(BundleCacheError::UnsupportedVersion(version)); } Ok(try!(msgpack::decode_from_stream(&mut file))) } @@ -100,8 +108,12 @@ impl StoredBundle { pub fn save_list_to>(list: &[Self], path: P) -> Result<(), BundleCacheError> { let path = path.as_ref(); let mut file = BufWriter::new(try!(File::create(path).map_err(BundleCacheError::Write))); - try!(file.write_all(&CACHE_FILE_STRING).map_err(BundleCacheError::Write)); - try!(file.write_all(&[CACHE_FILE_VERSION]).map_err(BundleCacheError::Write)); + try!(file.write_all(&CACHE_FILE_STRING).map_err( + BundleCacheError::Write + )); + try!(file.write_all(&[CACHE_FILE_VERSION]).map_err( + BundleCacheError::Write + )); try!(msgpack::encode_to_stream(&list, &mut file)); Ok(()) } diff --git a/src/bundledb/db.rs b/src/bundledb/db.rs index 740f3d6..ab67fde 100644 --- a/src/bundledb/db.rs +++ b/src/bundledb/db.rs @@ -1,4 +1,4 @@ -use ::prelude::*; +use prelude::*; use super::*; use std::path::{Path, PathBuf}; @@ -57,7 +57,12 @@ quick_error!{ } -fn load_bundles(path: &Path, base: &Path, bundles: &mut HashMap, crypto: Arc>) -> Result<(Vec, Vec), BundleDbError> { +fn load_bundles( + path: &Path, + base: &Path, + bundles: &mut HashMap, + crypto: Arc>, +) -> Result<(Vec, Vec), BundleDbError> { let mut paths = vec![path.to_path_buf()]; let mut bundle_paths = HashSet::new(); while let Some(path) = paths.pop() { @@ -68,7 +73,7 @@ fn load_bundles(path: &Path, base: &Path, bundles: &mut HashMap { warn!("Failed to read bundle {:?}\n\tcaused by: {}", path, err); info!("Ignoring unreadable bundle"); - continue + continue; } }; - let bundle = StoredBundle { info: info, path: path }; + let bundle = StoredBundle { + info: info, + path: path + }; let id = bundle.info.id.clone(); if !bundles.contains_key(&id) { new.push(bundle.clone()); @@ -129,7 +137,9 @@ impl BundleDb { } } - fn load_bundle_list(&mut self) -> Result<(Vec, Vec), BundleDbError> { + fn load_bundle_list( + &mut self, + ) -> Result<(Vec, Vec), BundleDbError> { if let Ok(list) = StoredBundle::read_list_from(&self.layout.local_bundle_cache_path()) { for bundle in list { self.local_bundles.insert(bundle.id(), bundle); @@ -145,15 +155,31 @@ impl BundleDb { warn!("Failed to read remote bundle cache, rebuilding cache"); } let base_path = self.layout.base_path(); - let (new, gone) = try!(load_bundles(&self.layout.local_bundles_path(), base_path, &mut self.local_bundles, self.crypto.clone())); + let (new, gone) = try!(load_bundles( + &self.layout.local_bundles_path(), + base_path, + &mut self.local_bundles, + self.crypto.clone() + )); if !new.is_empty() || !gone.is_empty() { let bundles: Vec<_> = self.local_bundles.values().cloned().collect(); - try!(StoredBundle::save_list_to(&bundles, &self.layout.local_bundle_cache_path())); + try!(StoredBundle::save_list_to( + &bundles, + &self.layout.local_bundle_cache_path() + )); } - let (new, gone) = try!(load_bundles(&self.layout.remote_bundles_path(), base_path, &mut self.remote_bundles, self.crypto.clone())); + let (new, gone) = try!(load_bundles( + &self.layout.remote_bundles_path(), + base_path, + &mut self.remote_bundles, + self.crypto.clone() + )); if !new.is_empty() || !gone.is_empty() { let bundles: Vec<_> = self.remote_bundles.values().cloned().collect(); - try!(StoredBundle::save_list_to(&bundles, &self.layout.remote_bundle_cache_path())); + try!(StoredBundle::save_list_to( + &bundles, + &self.layout.remote_bundle_cache_path() + )); } Ok((new, gone)) } @@ -164,9 +190,15 @@ impl BundleDb { fn save_cache(&self) -> Result<(), BundleDbError> { let bundles: Vec<_> = self.local_bundles.values().cloned().collect(); - try!(StoredBundle::save_list_to(&bundles, &self.layout.local_bundle_cache_path())); + try!(StoredBundle::save_list_to( + &bundles, + &self.layout.local_bundle_cache_path() + )); let bundles: Vec<_> = self.remote_bundles.values().cloned().collect(); - Ok(try!(StoredBundle::save_list_to(&bundles, &self.layout.remote_bundle_cache_path()))) + Ok(try!(StoredBundle::save_list_to( + &bundles, + &self.layout.remote_bundle_cache_path() + ))) } fn update_cache(&mut self) -> Result<(), BundleDbError> { @@ -192,13 +224,18 @@ impl BundleDb { let base_path = self.layout.base_path(); for id in remove { if let Some(bundle) = self.local_bundles.remove(&id) { - try!(fs::remove_file(base_path.join(&bundle.path)).map_err(|e| BundleDbError::Remove(e, id))) + try!(fs::remove_file(base_path.join(&bundle.path)).map_err(|e| { + BundleDbError::Remove(e, id) + })) } } Ok(()) } - pub fn open(layout: RepositoryLayout, crypto: Arc>) -> Result<(Self, Vec, Vec), BundleDbError> { + pub fn open( + layout: RepositoryLayout, + crypto: Arc>, + ) -> Result<(Self, Vec, Vec), BundleDbError> { let mut self_ = Self::new(layout, crypto); let (new, gone) = try!(self_.load_bundle_list()); try!(self_.update_cache()); @@ -208,21 +245,51 @@ impl BundleDb { } pub fn create(layout: RepositoryLayout) -> Result<(), BundleDbError> { - try!(fs::create_dir_all(layout.remote_bundles_path()).context(&layout.remote_bundles_path() as &Path)); - try!(fs::create_dir_all(layout.local_bundles_path()).context(&layout.local_bundles_path() as &Path)); - try!(fs::create_dir_all(layout.temp_bundles_path()).context(&layout.temp_bundles_path() as &Path)); - try!(StoredBundle::save_list_to(&[], layout.local_bundle_cache_path())); - try!(StoredBundle::save_list_to(&[], layout.remote_bundle_cache_path())); + try!(fs::create_dir_all(layout.remote_bundles_path()).context( + &layout.remote_bundles_path() as + &Path + )); + try!(fs::create_dir_all(layout.local_bundles_path()).context( + &layout.local_bundles_path() as + &Path + )); + try!(fs::create_dir_all(layout.temp_bundles_path()).context( + &layout.temp_bundles_path() as + &Path + )); + try!(StoredBundle::save_list_to( + &[], + layout.local_bundle_cache_path() + )); + try!(StoredBundle::save_list_to( + &[], + layout.remote_bundle_cache_path() + )); Ok(()) } #[inline] - pub fn create_bundle(&self, mode: BundleMode, hash_method: HashMethod, compression: Option, encryption: Option) -> Result { - Ok(try!(BundleWriter::new(mode, hash_method, compression, encryption, self.crypto.clone()))) + pub fn create_bundle( + &self, + mode: BundleMode, + hash_method: HashMethod, + compression: Option, + encryption: Option, + ) -> Result { + Ok(try!(BundleWriter::new( + mode, + hash_method, + compression, + encryption, + self.crypto.clone() + ))) } fn get_stored_bundle(&self, bundle_id: &BundleId) -> Result<&StoredBundle, BundleDbError> { - if let Some(stored) = self.local_bundles.get(bundle_id).or_else(|| self.remote_bundles.get(bundle_id)) { + if let Some(stored) = self.local_bundles.get(bundle_id).or_else(|| { + self.remote_bundles.get(bundle_id) + }) + { Ok(stored) } else { Err(BundleDbError::NoSuchBundle(bundle_id.clone())) @@ -232,21 +299,26 @@ impl BundleDb { #[inline] fn get_bundle(&self, stored: &StoredBundle) -> Result { let base_path = self.layout.base_path(); - Ok(try!(BundleReader::load(base_path.join(&stored.path), self.crypto.clone()))) + Ok(try!(BundleReader::load( + base_path.join(&stored.path), + self.crypto.clone() + ))) } pub fn get_chunk(&mut self, bundle_id: &BundleId, id: usize) -> Result, BundleDbError> { if let Some(&mut (ref mut bundle, ref data)) = self.bundle_cache.get_mut(bundle_id) { let (pos, len) = try!(bundle.get_chunk_position(id)); let mut chunk = Vec::with_capacity(len); - chunk.extend_from_slice(&data[pos..pos+len]); + chunk.extend_from_slice(&data[pos..pos + len]); return Ok(chunk); } - let mut bundle = try!(self.get_stored_bundle(bundle_id).and_then(|s| self.get_bundle(s))); + let mut bundle = try!(self.get_stored_bundle(bundle_id).and_then( + |s| self.get_bundle(s) + )); let (pos, len) = try!(bundle.get_chunk_position(id)); let mut chunk = Vec::with_capacity(len); let data = try!(bundle.load_contents()); - chunk.extend_from_slice(&data[pos..pos+len]); + chunk.extend_from_slice(&data[pos..pos + len]); self.bundle_cache.put(bundle_id.clone(), (bundle, data)); Ok(chunk) } @@ -255,7 +327,10 @@ impl BundleDb { let id = bundle.id(); let (folder, filename) = self.layout.local_bundle_path(&id, self.local_bundles.len()); try!(fs::create_dir_all(&folder).context(&folder as &Path)); - let bundle = try!(bundle.copy_to(self.layout.base_path(), folder.join(filename))); + let bundle = try!(bundle.copy_to( + self.layout.base_path(), + folder.join(filename) + )); self.local_bundles.insert(id, bundle); Ok(()) } @@ -268,7 +343,10 @@ impl BundleDb { let (folder, filename) = self.layout.remote_bundle_path(self.remote_bundles.len()); let dst_path = folder.join(filename); let src_path = self.layout.base_path().join(bundle.path); - bundle.path = dst_path.strip_prefix(self.layout.base_path()).unwrap().to_path_buf(); + bundle.path = dst_path + .strip_prefix(self.layout.base_path()) + .unwrap() + .to_path_buf(); if self.uploader.is_none() { self.uploader = Some(BundleUploader::new(5)); } @@ -288,7 +366,9 @@ impl BundleDb { } pub fn get_chunk_list(&self, bundle: &BundleId) -> Result { - let mut bundle = try!(self.get_stored_bundle(bundle).and_then(|stored| self.get_bundle(stored))); + let mut bundle = try!(self.get_stored_bundle(bundle).and_then(|stored| { + self.get_bundle(stored) + })); Ok(try!(bundle.get_chunk_list()).clone()) } @@ -305,7 +385,9 @@ impl BundleDb { pub fn delete_local_bundle(&mut self, bundle: &BundleId) -> Result<(), BundleDbError> { if let Some(bundle) = self.local_bundles.remove(bundle) { let path = self.layout.base_path().join(&bundle.path); - try!(fs::remove_file(path).map_err(|e| BundleDbError::Remove(e, bundle.id()))) + try!(fs::remove_file(path).map_err(|e| { + BundleDbError::Remove(e, bundle.id()) + })) } Ok(()) } @@ -322,24 +404,29 @@ impl BundleDb { pub fn check(&mut self, full: bool, repair: bool) -> Result { let mut to_repair = vec![]; - for (id, stored) in ProgressIter::new("checking bundles", self.remote_bundles.len(), self.remote_bundles.iter()) { + for (id, stored) in ProgressIter::new( + "checking bundles", + self.remote_bundles.len(), + self.remote_bundles.iter() + ) + { let mut bundle = match self.get_bundle(stored) { Ok(bundle) => bundle, Err(err) => { if repair { to_repair.push(id.clone()); - continue + continue; } else { - return Err(err) + return Err(err); } } }; if let Err(err) = bundle.check(full) { if repair { to_repair.push(id.clone()); - continue + continue; } else { - return Err(err.into()) + return Err(err.into()); } } } @@ -371,35 +458,52 @@ impl BundleDb { let mut bundle = match self.get_bundle(&stored) { Ok(bundle) => bundle, Err(err) => { - warn!("Problem detected: failed to read bundle header: {}\n\tcaused by: {}", id, err); + warn!( + "Problem detected: failed to read bundle header: {}\n\tcaused by: {}", + id, + err + ); return self.evacuate_broken_bundle(stored); } }; let chunks = match bundle.get_chunk_list() { Ok(chunks) => chunks.clone(), Err(err) => { - warn!("Problem detected: failed to read bundle chunks: {}\n\tcaused by: {}", id, err); + warn!( + "Problem detected: failed to read bundle chunks: {}\n\tcaused by: {}", + id, + err + ); return self.evacuate_broken_bundle(stored); } }; let data = match bundle.load_contents() { Ok(data) => data, Err(err) => { - warn!("Problem detected: failed to read bundle data: {}\n\tcaused by: {}", id, err); + warn!( + "Problem detected: failed to read bundle data: {}\n\tcaused by: {}", + id, + err + ); return self.evacuate_broken_bundle(stored); } }; warn!("Problem detected: bundle data was truncated: {}", id); info!("Copying readable data into new bundle"); let info = stored.info.clone(); - let mut new_bundle = try!(self.create_bundle(info.mode, info.hash_method, info.compression, info.encryption)); + let mut new_bundle = try!(self.create_bundle( + info.mode, + info.hash_method, + info.compression, + info.encryption + )); let mut pos = 0; for (hash, mut len) in chunks.into_inner() { if pos >= data.len() { - break + break; } len = min(len, (data.len() - pos) as u32); - try!(new_bundle.add(&data[pos..pos+len as usize], hash)); + try!(new_bundle.add(&data[pos..pos + len as usize], hash)); pos += len as usize; } let bundle = try!(self.add_bundle(new_bundle)); @@ -411,5 +515,4 @@ impl BundleDb { pub fn len(&self) -> usize { self.remote_bundles.len() } - } diff --git a/src/bundledb/mod.rs b/src/bundledb/mod.rs index 0c210fa..281d90f 100644 --- a/src/bundledb/mod.rs +++ b/src/bundledb/mod.rs @@ -10,7 +10,7 @@ pub use self::reader::{BundleReader, BundleReaderError}; pub use self::db::*; pub use self::uploader::BundleUploader; -use ::prelude::*; +use prelude::*; use std::fmt; use serde; @@ -47,7 +47,10 @@ impl BundleId { #[inline] pub fn random() -> Self { - BundleId(Hash{high: rand::random(), low: rand::random()}) + BundleId(Hash { + high: rand::random(), + low: rand::random() + }) } } @@ -68,7 +71,8 @@ impl fmt::Debug for BundleId { #[derive(Eq, Debug, PartialEq, Clone, Copy)] pub enum BundleMode { - Data, Meta + Data, + Meta } serde_impl!(BundleMode(u8) { Data => 0, diff --git a/src/bundledb/reader.rs b/src/bundledb/reader.rs index a0baab7..0b9aac6 100644 --- a/src/bundledb/reader.rs +++ b/src/bundledb/reader.rs @@ -1,4 +1,4 @@ -use ::prelude::*; +use prelude::*; use super::*; use std::path::{Path, PathBuf}; @@ -67,7 +67,13 @@ pub struct BundleReader { } impl BundleReader { - pub fn new(path: PathBuf, version: u8, content_start: usize, crypto: Arc>, info: BundleInfo) -> Self { + pub fn new( + path: PathBuf, + version: u8, + content_start: usize, + crypto: Arc>, + info: BundleInfo, + ) -> Self { BundleReader { info: info, chunks: None, @@ -84,54 +90,90 @@ impl BundleReader { self.info.id.clone() } - fn load_header>(path: P, crypto: Arc>) -> Result<(BundleInfo, u8, usize), BundleReaderError> { + fn load_header>( + path: P, + crypto: Arc>, + ) -> Result<(BundleInfo, u8, usize), BundleReaderError> { let path = path.as_ref(); let mut file = BufReader::new(try!(File::open(path).context(path))); let mut header = [0u8; 8]; try!(file.read_exact(&mut header).context(path)); if header[..HEADER_STRING.len()] != HEADER_STRING { - return Err(BundleReaderError::WrongHeader(path.to_path_buf())) + return Err(BundleReaderError::WrongHeader(path.to_path_buf())); } let version = header[HEADER_STRING.len()]; if version != HEADER_VERSION { - return Err(BundleReaderError::UnsupportedVersion(path.to_path_buf(), version)) + return Err(BundleReaderError::UnsupportedVersion( + path.to_path_buf(), + version + )); } let header: BundleHeader = try!(msgpack::decode_from_stream(&mut file).context(path)); let mut info_data = Vec::with_capacity(header.info_size); info_data.resize(header.info_size, 0); try!(file.read_exact(&mut info_data).context(path)); if let Some(ref encryption) = header.encryption { - info_data = try!(crypto.lock().unwrap().decrypt(encryption, &info_data).context(path)); + info_data = try!( + crypto + .lock() + .unwrap() + .decrypt(encryption, &info_data) + .context(path) + ); } let mut info: BundleInfo = try!(msgpack::decode(&info_data).context(path)); info.encryption = header.encryption; debug!("Load bundle {}", info.id); - let content_start = file.seek(SeekFrom::Current(0)).unwrap() as usize + info.chunk_list_size; + let content_start = file.seek(SeekFrom::Current(0)).unwrap() as usize + + info.chunk_list_size; Ok((info, version, content_start)) } #[inline] - pub fn load_info>(path: P, crypto: Arc>) -> Result { + pub fn load_info>( + path: P, + crypto: Arc>, + ) -> Result { Self::load_header(path, crypto).map(|b| b.0) } #[inline] pub fn load(path: PathBuf, crypto: Arc>) -> Result { let (header, version, content_start) = try!(Self::load_header(&path, crypto.clone())); - Ok(BundleReader::new(path, version, content_start, crypto, header)) + Ok(BundleReader::new( + path, + version, + content_start, + crypto, + header + )) } fn load_chunklist(&mut self) -> Result<(), BundleReaderError> { - debug!("Load bundle chunklist {} ({:?})", self.info.id, self.info.mode); + debug!( + "Load bundle chunklist {} ({:?})", + self.info.id, + self.info.mode + ); let mut file = BufReader::new(try!(File::open(&self.path).context(&self.path as &Path))); let len = self.info.chunk_list_size; let start = self.content_start - len; - try!(file.seek(SeekFrom::Start(start as u64)).context(&self.path as &Path)); + try!(file.seek(SeekFrom::Start(start as u64)).context( + &self.path as &Path + )); let mut chunk_data = Vec::with_capacity(len); chunk_data.resize(self.info.chunk_list_size, 0); - try!(file.read_exact(&mut chunk_data).context(&self.path as &Path)); + try!(file.read_exact(&mut chunk_data).context( + &self.path as &Path + )); if let Some(ref encryption) = self.info.encryption { - chunk_data = try!(self.crypto.lock().unwrap().decrypt(encryption, &chunk_data).context(&self.path as &Path)); + chunk_data = try!( + self.crypto + .lock() + .unwrap() + .decrypt(encryption, &chunk_data) + .context(&self.path as &Path) + ); } let chunks = ChunkList::read_from(&chunk_data); let mut chunk_positions = Vec::with_capacity(chunks.len()); @@ -156,20 +198,31 @@ impl BundleReader { fn load_encoded_contents(&self) -> Result, BundleReaderError> { debug!("Load bundle data {} ({:?})", self.info.id, self.info.mode); let mut file = BufReader::new(try!(File::open(&self.path).context(&self.path as &Path))); - try!(file.seek(SeekFrom::Start(self.content_start as u64)).context(&self.path as &Path)); - let mut data = Vec::with_capacity(max(self.info.encoded_size, self.info.raw_size)+1024); + try!( + file.seek(SeekFrom::Start(self.content_start as u64)) + .context(&self.path as &Path) + ); + let mut data = Vec::with_capacity(max(self.info.encoded_size, self.info.raw_size) + 1024); try!(file.read_to_end(&mut data).context(&self.path as &Path)); Ok(data) } fn decode_contents(&self, mut data: Vec) -> Result, BundleReaderError> { if let Some(ref encryption) = self.info.encryption { - data = try!(self.crypto.lock().unwrap().decrypt(encryption, &data).context(&self.path as &Path)); + data = try!( + self.crypto + .lock() + .unwrap() + .decrypt(encryption, &data) + .context(&self.path as &Path) + ); } if let Some(ref compression) = self.info.compression { let mut stream = try!(compression.decompress_stream().context(&self.path as &Path)); let mut buffer = Vec::with_capacity(self.info.raw_size); - try!(stream.process(&data, &mut buffer).context(&self.path as &Path)); + try!(stream.process(&data, &mut buffer).context( + &self.path as &Path + )); try!(stream.finish(&mut buffer).context(&self.path as &Path)); data = buffer; } @@ -178,12 +231,14 @@ impl BundleReader { #[inline] pub fn load_contents(&self) -> Result, BundleReaderError> { - self.load_encoded_contents().and_then(|data| self.decode_contents(data)) + self.load_encoded_contents().and_then(|data| { + self.decode_contents(data) + }) } pub fn get_chunk_position(&mut self, id: usize) -> Result<(usize, usize), BundleReaderError> { if id >= self.info.chunk_count { - return Err(BundleReaderError::NoSuchChunk(self.id(), id)) + return Err(BundleReaderError::NoSuchChunk(self.id(), id)); } if self.chunks.is_none() || self.chunk_positions.is_none() { try!(self.load_chunklist()); @@ -198,30 +253,46 @@ impl BundleReader { try!(self.load_chunklist()); } if self.info.chunk_count != self.chunks.as_ref().unwrap().len() { - return Err(BundleReaderError::Integrity(self.id(), - "Chunk list size does not match chunk count")) + return Err(BundleReaderError::Integrity( + self.id(), + "Chunk list size does not match chunk count" + )); } - if self.chunks.as_ref().unwrap().iter().map(|c| c.1 as usize).sum::() != self.info.raw_size { - return Err(BundleReaderError::Integrity(self.id(), - "Individual chunk sizes do not add up to total size")) + if self.chunks + .as_ref() + .unwrap() + .iter() + .map(|c| c.1 as usize) + .sum::() != self.info.raw_size + { + return Err(BundleReaderError::Integrity( + self.id(), + "Individual chunk sizes do not add up to total size" + )); } if !full { let size = try!(fs::metadata(&self.path).context(&self.path as &Path)).len(); if size as usize != self.info.encoded_size + self.content_start { - return Err(BundleReaderError::Integrity(self.id(), - "File size does not match size in header, truncated file")) + return Err(BundleReaderError::Integrity( + self.id(), + "File size does not match size in header, truncated file" + )); } - return Ok(()) + return Ok(()); } let encoded_contents = try!(self.load_encoded_contents()); if self.info.encoded_size != encoded_contents.len() { - return Err(BundleReaderError::Integrity(self.id(), - "Encoded data size does not match size in header, truncated bundle")) + return Err(BundleReaderError::Integrity( + self.id(), + "Encoded data size does not match size in header, truncated bundle" + )); } let contents = try!(self.decode_contents(encoded_contents)); if self.info.raw_size != contents.len() { - return Err(BundleReaderError::Integrity(self.id(), - "Raw data size does not match size in header, truncated bundle")) + return Err(BundleReaderError::Integrity( + self.id(), + "Raw data size does not match size in header, truncated bundle" + )); } //TODO: verify checksum Ok(()) @@ -230,8 +301,15 @@ impl BundleReader { impl Debug for BundleReader { fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> { - write!(fmt, "Bundle(\n\tid: {}\n\tpath: {:?}\n\tchunks: {}\n\tsize: {}, encoded: {}\n\tcompression: {:?}\n)", - self.info.id.to_string(), self.path, self.info.chunk_count, self.info.raw_size, - self.info.encoded_size, self.info.compression) + write!( + fmt, + "Bundle(\n\tid: {}\n\tpath: {:?}\n\tchunks: {}\n\tsize: {}, encoded: {}\n\tcompression: {:?}\n)", + self.info.id.to_string(), + self.path, + self.info.chunk_count, + self.info.raw_size, + self.info.encoded_size, + self.info.compression + ) } } diff --git a/src/bundledb/uploader.rs b/src/bundledb/uploader.rs index 6c9479b..3428f09 100644 --- a/src/bundledb/uploader.rs +++ b/src/bundledb/uploader.rs @@ -1,4 +1,4 @@ -use ::prelude::*; +use prelude::*; use std::sync::atomic::{Ordering, AtomicBool, AtomicUsize}; use std::sync::{Mutex, Condvar, Arc}; @@ -28,7 +28,10 @@ impl BundleUploader { wait: (Condvar::new(), Mutex::new(())) }); let self2 = self_.clone(); - thread::Builder::new().name("uploader".to_string()).spawn(move || self2.worker_thread()).unwrap(); + thread::Builder::new() + .name("uploader".to_string()) + .spawn(move || self2.worker_thread()) + .unwrap(); self_ } diff --git a/src/bundledb/writer.rs b/src/bundledb/writer.rs index 9dc767b..0dc7b68 100644 --- a/src/bundledb/writer.rs +++ b/src/bundledb/writer.rs @@ -1,4 +1,4 @@ -use ::prelude::*; +use prelude::*; use super::*; use std::path::{Path, PathBuf}; @@ -54,14 +54,22 @@ pub struct BundleWriter { crypto: Arc>, raw_size: usize, chunk_count: usize, - chunks: ChunkList, + chunks: ChunkList } impl BundleWriter { - pub fn new(mode: BundleMode, hash_method: HashMethod, compression: Option, encryption: Option, crypto: Arc>) -> Result { + pub fn new( + mode: BundleMode, + hash_method: HashMethod, + compression: Option, + encryption: Option, + crypto: Arc>, + ) -> Result { let compression_stream = match compression { - Some(ref compression) => Some(try!(compression.compress_stream().map_err(BundleWriterError::CompressionSetup))), - None => None + Some(ref compression) => Some(try!(compression.compress_stream().map_err( + BundleWriterError::CompressionSetup + ))), + None => None, }; Ok(BundleWriter { mode: mode, @@ -79,19 +87,23 @@ impl BundleWriter { pub fn add(&mut self, chunk: &[u8], hash: Hash) -> Result { if let Some(ref mut stream) = self.compression_stream { - try!(stream.process(chunk, &mut self.data).map_err(BundleWriterError::Compression)) + try!(stream.process(chunk, &mut self.data).map_err( + BundleWriterError::Compression + )) } else { self.data.extend_from_slice(chunk) } self.raw_size += chunk.len(); self.chunk_count += 1; self.chunks.push((hash, chunk.len() as u32)); - Ok(self.chunk_count-1) + Ok(self.chunk_count - 1) } pub fn finish(mut self, db: &BundleDb) -> Result { if let Some(stream) = self.compression_stream { - try!(stream.finish(&mut self.data).map_err(BundleWriterError::Compression)) + try!(stream.finish(&mut self.data).map_err( + BundleWriterError::Compression + )) } if let Some(ref encryption) = self.encryption { self.data = try!(self.crypto.lock().unwrap().encrypt(encryption, &self.data)); @@ -127,12 +139,19 @@ impl BundleWriter { encryption: self.encryption, info_size: info_data.len() }; - try!(msgpack::encode_to_stream(&header, &mut file).context(&path as &Path)); + try!(msgpack::encode_to_stream(&header, &mut file).context( + &path as &Path + )); try!(file.write_all(&info_data).context(&path as &Path)); try!(file.write_all(&chunk_data).context(&path as &Path)); try!(file.write_all(&self.data).context(&path as &Path)); - path = path.strip_prefix(db.layout.base_path()).unwrap().to_path_buf(); - Ok(StoredBundle { path: path, info: info }) + path = path.strip_prefix(db.layout.base_path()) + .unwrap() + .to_path_buf(); + Ok(StoredBundle { + path: path, + info: info + }) } #[inline] diff --git a/src/chunker.rs b/src/chunker.rs index 575ec47..337c311 100644 --- a/src/chunker.rs +++ b/src/chunker.rs @@ -25,13 +25,15 @@ impl ChunkerType { "rabin" => Ok(ChunkerType::Rabin((avg_size, seed as u32))), "fastcdc" => Ok(ChunkerType::FastCdc((avg_size, seed))), "fixed" => Ok(ChunkerType::Fixed(avg_size)), - _ => Err("Unsupported chunker type") + _ => Err("Unsupported chunker type"), } } pub fn from_string(name: &str) -> Result { let (name, size) = if let Some(pos) = name.find('/') { - let size = try!(usize::from_str(&name[pos+1..]).map_err(|_| "Chunk size must be a number")); + let size = try!(usize::from_str(&name[pos + 1..]).map_err( + |_| "Chunk size must be a number" + )); let name = &name[..pos]; (name, size) } else { @@ -62,21 +64,23 @@ impl ChunkerType { pub fn avg_size(&self) -> usize { match *self { - ChunkerType::Ae(size) | ChunkerType::Fixed(size) => size, + ChunkerType::Ae(size) | + ChunkerType::Fixed(size) => size, ChunkerType::Rabin((size, _seed)) => size, - ChunkerType::FastCdc((size, _seed)) => size + ChunkerType::FastCdc((size, _seed)) => size, } } pub fn to_string(&self) -> String { - format!("{}/{}", self.name(), self.avg_size()/1024) + format!("{}/{}", self.name(), self.avg_size() / 1024) } pub fn seed(&self) -> u64 { match *self { - ChunkerType::Ae(_size) | ChunkerType::Fixed(_size) => 0, + ChunkerType::Ae(_size) | + ChunkerType::Fixed(_size) => 0, ChunkerType::Rabin((_size, seed)) => seed as u64, - ChunkerType::FastCdc((_size, seed)) => seed + ChunkerType::FastCdc((_size, seed)) => seed, } } } diff --git a/src/cli/algotest.rs b/src/cli/algotest.rs index 69c87b1..0ede62a 100644 --- a/src/cli/algotest.rs +++ b/src/cli/algotest.rs @@ -1,4 +1,4 @@ -use ::prelude::*; +use prelude::*; use std::io::{self, Cursor, Read, Write}; use std::fs::File; @@ -41,7 +41,14 @@ fn chunk(data: &[u8], mut chunker: Box, sink: &mut ChunkSink) { } #[allow(dead_code)] -pub fn run(path: &str, bundle_size: usize, chunker: ChunkerType, compression: Option, encrypt: bool,hash: HashMethod) { +pub fn run( + path: &str, + bundle_size: usize, + chunker: ChunkerType, + compression: Option, + encrypt: bool, + hash: HashMethod, +) { let mut total_write_time = 0.0; let mut total_read_time = 0.0; @@ -50,42 +57,64 @@ pub fn run(path: &str, bundle_size: usize, chunker: ChunkerType, compression: Op let total_size = file.metadata().unwrap().len(); let mut size = total_size; let mut data = Vec::with_capacity(size as usize); - let read_time = Duration::span(|| { - file.read_to_end(&mut data).unwrap(); - }).num_milliseconds() as f32 / 1_000.0; - println!("- {}, {}", to_duration(read_time), to_speed(size, read_time)); + let read_time = Duration::span(|| { file.read_to_end(&mut data).unwrap(); }) + .num_milliseconds() as f32 / 1_000.0; + println!( + "- {}, {}", + to_duration(read_time), + to_speed(size, read_time) + ); println!(); - println!("Chunking data with {}, avg chunk size {} ...", chunker.name(), to_file_size(chunker.avg_size() as u64)); + println!( + "Chunking data with {}, avg chunk size {} ...", + chunker.name(), + to_file_size(chunker.avg_size() as u64) + ); let mut chunk_sink = ChunkSink { - chunks: Vec::with_capacity(2*size as usize/chunker.avg_size()), + chunks: Vec::with_capacity(2 * size as usize / chunker.avg_size()), written: 0, pos: 0 }; let chunker = chunker.create(); - let chunk_time = Duration::span(|| { - chunk(&data, chunker, &mut chunk_sink) - }).num_milliseconds() as f32 / 1_000.0; + let chunk_time = Duration::span(|| chunk(&data, chunker, &mut chunk_sink)) + .num_milliseconds() as f32 / 1_000.0; total_write_time += chunk_time; - println!("- {}, {}", to_duration(chunk_time), to_speed(size, chunk_time)); + println!( + "- {}, {}", + to_duration(chunk_time), + to_speed(size, chunk_time) + ); let mut chunks = chunk_sink.chunks; assert_eq!(chunks.iter().map(|c| c.1).sum::(), size as usize); let chunk_size_avg = size as f32 / chunks.len() as f32; - let chunk_size_stddev = (chunks.iter().map(|c| (c.1 as f32 - chunk_size_avg).powi(2)).sum::() / (chunks.len() as f32 - 1.0)).sqrt(); - println!("- {} chunks, avg size: {} ±{}", chunks.len(), to_file_size(chunk_size_avg as u64), to_file_size(chunk_size_stddev as u64)); + let chunk_size_stddev = (chunks + .iter() + .map(|c| (c.1 as f32 - chunk_size_avg).powi(2)) + .sum::() / + (chunks.len() as f32 - 1.0)) + .sqrt(); + println!( + "- {} chunks, avg size: {} ±{}", + chunks.len(), + to_file_size(chunk_size_avg as u64), + to_file_size(chunk_size_stddev as u64) + ); println!(); println!("Hashing chunks with {} ...", hash.name()); let mut hashes = Vec::with_capacity(chunks.len()); - let hash_time = Duration::span(|| { - for &(pos, len) in &chunks { - hashes.push(hash.hash(&data[pos..pos+len])) - } + let hash_time = Duration::span(|| for &(pos, len) in &chunks { + hashes.push(hash.hash(&data[pos..pos + len])) }).num_milliseconds() as f32 / 1_000.0; total_write_time += hash_time; - println!("- {}, {}", to_duration(hash_time), to_speed(size, hash_time)); + println!( + "- {}, {}", + to_duration(hash_time), + to_speed(size, hash_time) + ); let mut seen_hashes = HashSet::with_capacity(hashes.len()); let mut dups = Vec::new(); for (i, hash) in hashes.into_iter().enumerate() { @@ -99,7 +128,12 @@ pub fn run(path: &str, bundle_size: usize, chunker: ChunkerType, compression: Op let (_, len) = chunks.remove(*i); dup_size += len; } - println!("- {} duplicate chunks, {}, {:.1}% saved", dups.len(), to_file_size(dup_size as u64), dup_size as f32 / size as f32*100.0); + println!( + "- {} duplicate chunks, {}, {:.1}% saved", + dups.len(), + to_file_size(dup_size as u64), + dup_size as f32 / size as f32 * 100.0 + ); size -= dup_size as u64; let mut bundles = Vec::new(); @@ -109,14 +143,14 @@ pub fn run(path: &str, bundle_size: usize, chunker: ChunkerType, compression: Op println!("Compressing chunks with {} ...", compression.to_string()); let compress_time = Duration::span(|| { - let mut bundle = Vec::with_capacity(bundle_size + 2*chunk_size_avg as usize); + let mut bundle = Vec::with_capacity(bundle_size + 2 * chunk_size_avg as usize); let mut c = compression.compress_stream().unwrap(); for &(pos, len) in &chunks { - c.process(&data[pos..pos+len], &mut bundle).unwrap(); + c.process(&data[pos..pos + len], &mut bundle).unwrap(); if bundle.len() >= bundle_size { c.finish(&mut bundle).unwrap(); bundles.push(bundle); - bundle = Vec::with_capacity(bundle_size + 2*chunk_size_avg as usize); + bundle = Vec::with_capacity(bundle_size + 2 * chunk_size_avg as usize); c = compression.compress_stream().unwrap(); } } @@ -124,17 +158,26 @@ pub fn run(path: &str, bundle_size: usize, chunker: ChunkerType, compression: Op bundles.push(bundle); }).num_milliseconds() as f32 / 1_000.0; total_write_time += compress_time; - println!("- {}, {}", to_duration(compress_time), to_speed(size, compress_time)); + println!( + "- {}, {}", + to_duration(compress_time), + to_speed(size, compress_time) + ); let compressed_size = bundles.iter().map(|b| b.len()).sum::(); - println!("- {} bundles, {}, {:.1}% saved", bundles.len(), to_file_size(compressed_size as u64), (size as f32 - compressed_size as f32)/size as f32*100.0); + println!( + "- {} bundles, {}, {:.1}% saved", + bundles.len(), + to_file_size(compressed_size as u64), + (size as f32 - compressed_size as f32) / size as f32 * 100.0 + ); size = compressed_size as u64; } else { - let mut bundle = Vec::with_capacity(bundle_size + 2*chunk_size_avg as usize); + let mut bundle = Vec::with_capacity(bundle_size + 2 * chunk_size_avg as usize); for &(pos, len) in &chunks { - bundle.extend_from_slice(&data[pos..pos+len]); + bundle.extend_from_slice(&data[pos..pos + len]); if bundle.len() >= bundle_size { bundles.push(bundle); - bundle = Vec::with_capacity(bundle_size + 2*chunk_size_avg as usize); + bundle = Vec::with_capacity(bundle_size + 2 * chunk_size_avg as usize); } } bundles.push(bundle); @@ -151,24 +194,28 @@ pub fn run(path: &str, bundle_size: usize, chunker: ChunkerType, compression: Op println!("Encrypting bundles..."); let mut encrypted_bundles = Vec::with_capacity(bundles.len()); - let encrypt_time = Duration::span(|| { - for bundle in bundles { - encrypted_bundles.push(crypto.encrypt(&encryption, &bundle).unwrap()); - } + let encrypt_time = Duration::span(|| for bundle in bundles { + encrypted_bundles.push(crypto.encrypt(&encryption, &bundle).unwrap()); }).num_milliseconds() as f32 / 1_000.0; - println!("- {}, {}", to_duration(encrypt_time), to_speed(size, encrypt_time)); + println!( + "- {}, {}", + to_duration(encrypt_time), + to_speed(size, encrypt_time) + ); total_write_time += encrypt_time; println!(); println!("Decrypting bundles..."); bundles = Vec::with_capacity(encrypted_bundles.len()); - let decrypt_time = Duration::span(|| { - for bundle in encrypted_bundles { - bundles.push(crypto.decrypt(&encryption, &bundle).unwrap()); - } + let decrypt_time = Duration::span(|| for bundle in encrypted_bundles { + bundles.push(crypto.decrypt(&encryption, &bundle).unwrap()); }).num_milliseconds() as f32 / 1_000.0; - println!("- {}, {}", to_duration(decrypt_time), to_speed(size, decrypt_time)); + println!( + "- {}, {}", + to_duration(decrypt_time), + to_speed(size, decrypt_time) + ); total_read_time += decrypt_time; } @@ -176,21 +223,38 @@ pub fn run(path: &str, bundle_size: usize, chunker: ChunkerType, compression: Op println!(); println!("Decompressing bundles with {} ...", compression.to_string()); - let mut dummy = ChunkSink { chunks: vec![], written: 0, pos: 0 }; - let decompress_time = Duration::span(|| { - for bundle in &bundles { - let mut c = compression.decompress_stream().unwrap(); - c.process(bundle, &mut dummy).unwrap(); - c.finish(&mut dummy).unwrap(); - } + let mut dummy = ChunkSink { + chunks: vec![], + written: 0, + pos: 0 + }; + let decompress_time = Duration::span(|| for bundle in &bundles { + let mut c = compression.decompress_stream().unwrap(); + c.process(bundle, &mut dummy).unwrap(); + c.finish(&mut dummy).unwrap(); }).num_milliseconds() as f32 / 1_000.0; - println!("- {}, {}", to_duration(decompress_time), to_speed(total_size - dup_size as u64, decompress_time)); + println!( + "- {}, {}", + to_duration(decompress_time), + to_speed(total_size - dup_size as u64, decompress_time) + ); total_read_time += decompress_time; } println!(); - println!("Total storage size: {} / {}, ratio: {:.1}%", to_file_size(size as u64), to_file_size(total_size as u64), size as f32/total_size as f32*100.0); - println!("Total processing speed: {}", to_speed(total_size, total_write_time)); - println!("Total read speed: {}", to_speed(total_size, total_read_time)); + println!( + "Total storage size: {} / {}, ratio: {:.1}%", + to_file_size(size as u64), + to_file_size(total_size as u64), + size as f32 / total_size as f32 * 100.0 + ); + println!( + "Total processing speed: {}", + to_speed(total_size, total_write_time) + ); + println!( + "Total read speed: {}", + to_speed(total_size, total_read_time) + ); } diff --git a/src/cli/args.rs b/src/cli/args.rs index b769415..fa74515 100644 --- a/src/cli/args.rs +++ b/src/cli/args.rs @@ -1,4 +1,4 @@ -use ::prelude::*; +use prelude::*; use super::*; use std::path::{Path, PathBuf}; @@ -78,7 +78,7 @@ pub enum Arguments { repo_path_src: PathBuf, backup_name_src: String, repo_path_dst: PathBuf, - backup_name_dst: String, + backup_name_dst: String }, Mount { repo_path: PathBuf, @@ -86,10 +86,7 @@ pub enum Arguments { inode: Option, mount_point: String }, - Versions { - repo_path: PathBuf, - path: String - }, + Versions { repo_path: PathBuf, path: String }, Diff { repo_path_old: PathBuf, backup_name_old: String, @@ -98,12 +95,8 @@ pub enum Arguments { backup_name_new: String, inode_new: Option }, - Analyze { - repo_path: PathBuf - }, - BundleList { - repo_path: PathBuf - }, + Analyze { repo_path: PathBuf }, + BundleList { repo_path: PathBuf }, BundleInfo { repo_path: PathBuf, bundle_id: BundleId @@ -154,7 +147,12 @@ fn convert_repo_path(mut path_str: &str) -> PathBuf { } } -fn parse_repo_path(repo_path: &str, existing: bool, backup_restr: Option, path_restr: Option) -> Result<(PathBuf, Option<&str>, Option<&str>), String> { +fn parse_repo_path( + repo_path: &str, + existing: bool, + backup_restr: Option, + path_restr: Option, +) -> Result<(PathBuf, Option<&str>, Option<&str>), String> { let mut parts = repo_path.splitn(3, "::"); let repo = convert_repo_path(parts.next().unwrap_or("")); if existing && !repo.join("config.yaml").exists() { @@ -194,8 +192,13 @@ fn parse_repo_path(repo_path: &str, existing: bool, backup_restr: Option, Ok((repo, backup, path)) } -#[allow(unknown_lints,needless_pass_by_value)] -fn validate_repo_path(repo_path: String, existing: bool, backup_restr: Option, path_restr: Option) -> Result<(), String> { +#[allow(unknown_lints, needless_pass_by_value)] +fn validate_repo_path( + repo_path: String, + existing: bool, + backup_restr: Option, + path_restr: Option, +) -> Result<(), String> { parse_repo_path(&repo_path, existing, backup_restr, path_restr).map(|_| ()) } @@ -207,7 +210,7 @@ fn parse_num(num: &str) -> Result { } } -#[allow(unknown_lints,needless_pass_by_value)] +#[allow(unknown_lints, needless_pass_by_value)] fn validate_num(val: String) -> Result<(), String> { parse_num(&val).map(|_| ()) } @@ -220,14 +223,14 @@ fn parse_chunker(val: &str) -> Result { } } -#[allow(unknown_lints,needless_pass_by_value)] +#[allow(unknown_lints, needless_pass_by_value)] fn validate_chunker(val: String) -> Result<(), String> { parse_chunker(&val).map(|_| ()) } fn parse_compression(val: &str) -> Result, String> { if val == "none" { - return Ok(None) + return Ok(None); } if let Ok(compression) = Compression::from_string(val) { Ok(Some(compression)) @@ -236,7 +239,7 @@ fn parse_compression(val: &str) -> Result, String> { } } -#[allow(unknown_lints,needless_pass_by_value)] +#[allow(unknown_lints, needless_pass_by_value)] fn validate_compression(val: String) -> Result<(), String> { parse_compression(&val).map(|_| ()) } @@ -254,11 +257,11 @@ fn parse_public_key(val: &str) -> Result, String> { if let Some(key) = PublicKey::from_slice(&bytes) { Ok(Some(key)) } else { - return Err("Invalid key".to_string()) + return Err("Invalid key".to_string()); } } -#[allow(unknown_lints,needless_pass_by_value)] +#[allow(unknown_lints, needless_pass_by_value)] fn validate_public_key(val: String) -> Result<(), String> { parse_public_key(&val).map(|_| ()) } @@ -271,7 +274,7 @@ fn parse_hash(val: &str) -> Result { } } -#[allow(unknown_lints,needless_pass_by_value)] +#[allow(unknown_lints, needless_pass_by_value)] fn validate_hash(val: String) -> Result<(), String> { parse_hash(&val).map(|_| ()) } @@ -285,7 +288,7 @@ fn parse_bundle_id(val: &str) -> Result { } } -#[allow(unknown_lints,needless_pass_by_value)] +#[allow(unknown_lints, needless_pass_by_value)] fn validate_existing_path(val: String) -> Result<(), String> { if !Path::new(&val).exists() { Err("Path does not exist".to_string()) @@ -294,7 +297,7 @@ fn validate_existing_path(val: String) -> Result<(), String> { } } -#[allow(unknown_lints,needless_pass_by_value)] +#[allow(unknown_lints, needless_pass_by_value)] fn validate_existing_path_or_stdio(val: String) -> Result<(), String> { if val != "-" && !Path::new(&val).exists() { Err("Path does not exist".to_string()) @@ -304,7 +307,7 @@ fn validate_existing_path_or_stdio(val: String) -> Result<(), String> { } -#[allow(unknown_lints,cyclomatic_complexity)] +#[allow(unknown_lints, cyclomatic_complexity)] pub fn parse() -> Result<(LogLevel, Arguments), ErrorCode> { let args = App::new("zvault").version(crate_version!()).author(crate_authors!(",\n")).about(crate_description!()) .settings(&[AppSettings::VersionlessSubcommands, AppSettings::SubcommandRequiredElseHelp]) @@ -454,19 +457,31 @@ pub fn parse() -> Result<(LogLevel, Arguments), ErrorCode> { .default_value(DEFAULT_HASH).validator(validate_hash)) .arg(Arg::from_usage(" 'File with test data'") .validator(validate_existing_path))).get_matches(); - let verbose_count = args.subcommand().1.map(|m| m.occurrences_of("verbose")).unwrap_or(0) + args.occurrences_of("verbose"); - let quiet_count= args.subcommand().1.map(|m| m.occurrences_of("quiet")).unwrap_or(0) + args.occurrences_of("quiet"); + let verbose_count = args.subcommand() + .1 + .map(|m| m.occurrences_of("verbose")) + .unwrap_or(0) + args.occurrences_of("verbose"); + let quiet_count = args.subcommand() + .1 + .map(|m| m.occurrences_of("quiet")) + .unwrap_or(0) + args.occurrences_of("quiet"); let log_level = match 1 + verbose_count - quiet_count { 0 => LogLevel::Warn, 1 => LogLevel::Info, 2 => LogLevel::Debug, - _ => LogLevel::Trace + _ => LogLevel::Trace, }; let args = match args.subcommand() { ("init", Some(args)) => { - let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), false, Some(false), Some(false)).unwrap(); + let (repository, _backup, _inode) = parse_repo_path( + args.value_of("REPO").unwrap(), + false, + Some(false), + Some(false) + ).unwrap(); Arguments::Init { - bundle_size: (parse_num(args.value_of("bundle_size").unwrap()).unwrap() * 1024 * 1024) as usize, + bundle_size: (parse_num(args.value_of("bundle_size").unwrap()).unwrap() * + 1024 * 1024) as usize, chunker: parse_chunker(args.value_of("chunker").unwrap()).unwrap(), compression: parse_compression(args.value_of("compression").unwrap()).unwrap(), encryption: args.is_present("encrypt"), @@ -474,24 +489,32 @@ pub fn parse() -> Result<(LogLevel, Arguments), ErrorCode> { repo_path: repository, remote_path: args.value_of("remote").unwrap().to_string() } - }, + } ("backup", Some(args)) => { - let (repository, backup, _inode) = parse_repo_path(args.value_of("BACKUP").unwrap(), true, Some(true), Some(false)).unwrap(); + let (repository, backup, _inode) = parse_repo_path( + args.value_of("BACKUP").unwrap(), + true, + Some(true), + Some(false) + ).unwrap(); Arguments::Backup { repo_path: repository, backup_name: backup.unwrap().to_string(), full: args.is_present("full"), same_device: !args.is_present("cross_device"), - excludes: args.values_of("exclude").map(|v| v.map(|k| k.to_string()).collect()).unwrap_or_else(|| vec![]), + excludes: args.values_of("exclude") + .map(|v| v.map(|k| k.to_string()).collect()) + .unwrap_or_else(|| vec![]), excludes_from: args.value_of("excludes_from").map(|v| v.to_string()), src_path: args.value_of("SRC").unwrap().to_string(), reference: args.value_of("reference").map(|v| v.to_string()), no_default_excludes: args.is_present("no_default_excludes"), tar: args.is_present("tar") } - }, + } ("restore", Some(args)) => { - let (repository, backup, inode) = parse_repo_path(args.value_of("BACKUP").unwrap(), true, Some(true), None).unwrap(); + let (repository, backup, inode) = + parse_repo_path(args.value_of("BACKUP").unwrap(), true, Some(true), None).unwrap(); Arguments::Restore { repo_path: repository, backup_name: backup.unwrap().to_string(), @@ -499,18 +522,24 @@ pub fn parse() -> Result<(LogLevel, Arguments), ErrorCode> { dst_path: args.value_of("DST").unwrap().to_string(), tar: args.is_present("tar") } - }, + } ("remove", Some(args)) => { - let (repository, backup, inode) = parse_repo_path(args.value_of("BACKUP").unwrap(), true, Some(true), None).unwrap(); + let (repository, backup, inode) = + parse_repo_path(args.value_of("BACKUP").unwrap(), true, Some(true), None).unwrap(); Arguments::Remove { repo_path: repository, backup_name: backup.unwrap().to_string(), inode: inode.map(|v| v.to_string()), force: args.is_present("force") } - }, + } ("prune", Some(args)) => { - let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), true, Some(false), Some(false)).unwrap(); + let (repository, _backup, _inode) = parse_repo_path( + args.value_of("REPO").unwrap(), + true, + Some(false), + Some(false) + ).unwrap(); Arguments::Prune { repo_path: repository, prefix: args.value_of("prefix").unwrap_or("").to_string(), @@ -520,18 +549,24 @@ pub fn parse() -> Result<(LogLevel, Arguments), ErrorCode> { monthly: parse_num(args.value_of("monthly").unwrap()).unwrap() as usize, yearly: parse_num(args.value_of("yearly").unwrap()).unwrap() as usize } - }, + } ("vacuum", Some(args)) => { - let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), true, Some(false), Some(false)).unwrap(); + let (repository, _backup, _inode) = parse_repo_path( + args.value_of("REPO").unwrap(), + true, + Some(false), + Some(false) + ).unwrap(); Arguments::Vacuum { repo_path: repository, force: args.is_present("force"), combine: args.is_present("combine"), ratio: parse_num(args.value_of("ratio").unwrap()).unwrap() as f32 / 100.0 } - }, + } ("check", Some(args)) => { - let (repository, backup, inode) = parse_repo_path(args.value_of("PATH").unwrap(), true, None, None).unwrap(); + let (repository, backup, inode) = + parse_repo_path(args.value_of("PATH").unwrap(), true, None, None).unwrap(); Arguments::Check { repo_path: repository, backup_name: backup.map(|v| v.to_string()), @@ -541,127 +576,176 @@ pub fn parse() -> Result<(LogLevel, Arguments), ErrorCode> { index: args.is_present("index"), repair: args.is_present("repair") } - }, + } ("list", Some(args)) => { - let (repository, backup, inode) = parse_repo_path(args.value_of("PATH").unwrap(), true, None, None).unwrap(); + let (repository, backup, inode) = + parse_repo_path(args.value_of("PATH").unwrap(), true, None, None).unwrap(); Arguments::List { repo_path: repository, backup_name: backup.map(|v| v.to_string()), inode: inode.map(|v| v.to_string()) } - }, + } ("bundlelist", Some(args)) => { - let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), true, Some(false), Some(false)).unwrap(); - Arguments::BundleList { - repo_path: repository, - } - }, + let (repository, _backup, _inode) = parse_repo_path( + args.value_of("REPO").unwrap(), + true, + Some(false), + Some(false) + ).unwrap(); + Arguments::BundleList { repo_path: repository } + } ("bundleinfo", Some(args)) => { - let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), true, Some(false), Some(false)).unwrap(); + let (repository, _backup, _inode) = parse_repo_path( + args.value_of("REPO").unwrap(), + true, + Some(false), + Some(false) + ).unwrap(); Arguments::BundleInfo { repo_path: repository, bundle_id: try!(parse_bundle_id(args.value_of("BUNDLE").unwrap())) } - }, + } ("info", Some(args)) => { - let (repository, backup, inode) = parse_repo_path(args.value_of("PATH").unwrap(), true, None, None).unwrap(); + let (repository, backup, inode) = + parse_repo_path(args.value_of("PATH").unwrap(), true, None, None).unwrap(); Arguments::Info { repo_path: repository, backup_name: backup.map(|v| v.to_string()), inode: inode.map(|v| v.to_string()) } - }, + } ("copy", Some(args)) => { - let (repository_src, backup_src, _inode) = parse_repo_path(args.value_of("SRC").unwrap(), true, Some(true), Some(false)).unwrap(); - let (repository_dst, backup_dst, _inode) = parse_repo_path(args.value_of("DST").unwrap(), true, Some(true), Some(false)).unwrap(); + let (repository_src, backup_src, _inode) = + parse_repo_path(args.value_of("SRC").unwrap(), true, Some(true), Some(false)) + .unwrap(); + let (repository_dst, backup_dst, _inode) = + parse_repo_path(args.value_of("DST").unwrap(), true, Some(true), Some(false)) + .unwrap(); Arguments::Copy { repo_path_src: repository_src, backup_name_src: backup_src.unwrap().to_string(), repo_path_dst: repository_dst, - backup_name_dst: backup_dst.unwrap().to_string(), + backup_name_dst: backup_dst.unwrap().to_string() } - }, + } ("mount", Some(args)) => { - let (repository, backup, inode) = parse_repo_path(args.value_of("PATH").unwrap(), true, None, None).unwrap(); + let (repository, backup, inode) = + parse_repo_path(args.value_of("PATH").unwrap(), true, None, None).unwrap(); Arguments::Mount { repo_path: repository, backup_name: backup.map(|v| v.to_string()), inode: inode.map(|v| v.to_string()), mount_point: args.value_of("MOUNTPOINT").unwrap().to_string() } - }, + } ("versions", Some(args)) => { - let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), true, Some(false), Some(false)).unwrap(); + let (repository, _backup, _inode) = parse_repo_path( + args.value_of("REPO").unwrap(), + true, + Some(false), + Some(false) + ).unwrap(); Arguments::Versions { repo_path: repository, path: args.value_of("PATH").unwrap().to_string() } - }, + } ("diff", Some(args)) => { - let (repository_old, backup_old, inode_old) = parse_repo_path(args.value_of("OLD").unwrap(), true, Some(true), None).unwrap(); - let (repository_new, backup_new, inode_new) = parse_repo_path(args.value_of("NEW").unwrap(), true, Some(true), None).unwrap(); + let (repository_old, backup_old, inode_old) = + parse_repo_path(args.value_of("OLD").unwrap(), true, Some(true), None).unwrap(); + let (repository_new, backup_new, inode_new) = + parse_repo_path(args.value_of("NEW").unwrap(), true, Some(true), None).unwrap(); Arguments::Diff { repo_path_old: repository_old, backup_name_old: backup_old.unwrap().to_string(), inode_old: inode_old.map(|v| v.to_string()), repo_path_new: repository_new, backup_name_new: backup_new.unwrap().to_string(), - inode_new: inode_new.map(|v| v.to_string()), + inode_new: inode_new.map(|v| v.to_string()) } - }, + } ("analyze", Some(args)) => { - let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), true, Some(false), Some(false)).unwrap(); - Arguments::Analyze { - repo_path: repository - } - }, + let (repository, _backup, _inode) = parse_repo_path( + args.value_of("REPO").unwrap(), + true, + Some(false), + Some(false) + ).unwrap(); + Arguments::Analyze { repo_path: repository } + } ("import", Some(args)) => { - let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), false, Some(false), Some(false)).unwrap(); + let (repository, _backup, _inode) = parse_repo_path( + args.value_of("REPO").unwrap(), + false, + Some(false), + Some(false) + ).unwrap(); Arguments::Import { repo_path: repository, remote_path: args.value_of("REMOTE").unwrap().to_string(), - key_files: args.values_of("key").map(|v| v.map(|k| k.to_string()).collect()).unwrap_or_else(|| vec![]) + key_files: args.values_of("key") + .map(|v| v.map(|k| k.to_string()).collect()) + .unwrap_or_else(|| vec![]) } - }, + } ("config", Some(args)) => { - let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), true, Some(false), Some(false)).unwrap(); + let (repository, _backup, _inode) = parse_repo_path( + args.value_of("REPO").unwrap(), + true, + Some(false), + Some(false) + ).unwrap(); Arguments::Config { - bundle_size: args.value_of("bundle_size").map(|v| parse_num(v).unwrap() as usize * 1024 * 1024), + bundle_size: args.value_of("bundle_size").map(|v| { + parse_num(v).unwrap() as usize * 1024 * 1024 + }), chunker: args.value_of("chunker").map(|v| parse_chunker(v).unwrap()), - compression: args.value_of("compression").map(|v| parse_compression(v).unwrap()), - encryption: args.value_of("encryption").map(|v| parse_public_key(v).unwrap()), + compression: args.value_of("compression").map(|v| { + parse_compression(v).unwrap() + }), + encryption: args.value_of("encryption").map( + |v| parse_public_key(v).unwrap() + ), hash: args.value_of("hash").map(|v| parse_hash(v).unwrap()), - repo_path: repository, + repo_path: repository } - }, + } ("genkey", Some(args)) => { Arguments::GenKey { file: args.value_of("FILE").map(|v| v.to_string()), password: args.value_of("password").map(|v| v.to_string()) } - }, + } ("addkey", Some(args)) => { - let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), true, Some(false), Some(false)).unwrap(); + let (repository, _backup, _inode) = parse_repo_path( + args.value_of("REPO").unwrap(), + true, + Some(false), + Some(false) + ).unwrap(); Arguments::AddKey { repo_path: repository, set_default: args.is_present("set_default"), password: args.value_of("password").map(|v| v.to_string()), file: args.value_of("FILE").map(|v| v.to_string()) } - }, + } ("algotest", Some(args)) => { Arguments::AlgoTest { - bundle_size: (parse_num(args.value_of("bundle_size").unwrap()).unwrap() * 1024 * 1024) as usize, + bundle_size: (parse_num(args.value_of("bundle_size").unwrap()).unwrap() * + 1024 * 1024) as usize, chunker: parse_chunker(args.value_of("chunker").unwrap()).unwrap(), compression: parse_compression(args.value_of("compression").unwrap()).unwrap(), encrypt: args.is_present("encrypt"), hash: parse_hash(args.value_of("hash").unwrap()).unwrap(), - file: args.value_of("FILE").unwrap().to_string(), + file: args.value_of("FILE").unwrap().to_string() } - }, + } _ => { error!("No subcommand given"); - return Err(ErrorCode::InvalidArgs) + return Err(ErrorCode::InvalidArgs); } }; Ok((log_level, args)) diff --git a/src/cli/logger.rs b/src/cli/logger.rs index 26e09fa..532bfb4 100644 --- a/src/cli/logger.rs +++ b/src/cli/logger.rs @@ -22,11 +22,23 @@ impl log::Log for Logger { fn log(&self, record: &LogRecord) { if self.enabled(record.metadata()) { match record.level() { - LogLevel::Error => println_stderr!("{}: {}", Color::Red.bold().paint("error"), record.args()), - LogLevel::Warn => println_stderr!("{}: {}", Color::Yellow.bold().paint("warning"), record.args()), - LogLevel::Info => println_stderr!("{}: {}", Color::Green.bold().paint("info"), record.args()), - LogLevel::Debug => println_stderr!("{}: {}", Style::new().bold().paint("debug"), record.args()), - LogLevel::Trace => println_stderr!("{}: {}", "trace", record.args()) + LogLevel::Error => { + println_stderr!("{}: {}", Color::Red.bold().paint("error"), record.args()) + } + LogLevel::Warn => { + println_stderr!( + "{}: {}", + Color::Yellow.bold().paint("warning"), + record.args() + ) + } + LogLevel::Info => { + println_stderr!("{}: {}", Color::Green.bold().paint("info"), record.args()) + } + LogLevel::Debug => { + println_stderr!("{}: {}", Style::new().bold().paint("debug"), record.args()) + } + LogLevel::Trace => println_stderr!("{}: {}", "trace", record.args()), } } } diff --git a/src/cli/mod.rs b/src/cli/mod.rs index 37ce4eb..314ad28 100644 --- a/src/cli/mod.rs +++ b/src/cli/mod.rs @@ -2,7 +2,7 @@ mod args; mod logger; mod algotest; -use ::prelude::*; +use prelude::*; use chrono::prelude::*; use regex::{self, RegexSet}; @@ -18,22 +18,41 @@ use self::args::Arguments; pub enum ErrorCode { - UnsafeArgs, InvalidArgs, + UnsafeArgs, + InvalidArgs, InitializeLogger, CreateRepository, - LoadRepository, SaveBackup, LoadBackup, LoadInode, LoadBundle, - NoSuchBackup, BackupAlreadyExists, - AddKey, LoadKey, SaveKey, + LoadRepository, + SaveBackup, + LoadBackup, + LoadInode, + LoadBundle, + NoSuchBackup, + BackupAlreadyExists, + AddKey, + LoadKey, + SaveKey, SaveConfig, - LoadExcludes, InvalidExcludes, - BackupRun, RestoreRun, RemoveRun, PruneRun, VacuumRun, CheckRun, AnalyzeRun, DiffRun, - VersionsRun, ImportRun, FuseMount + LoadExcludes, + InvalidExcludes, + BackupRun, + RestoreRun, + RemoveRun, + PruneRun, + VacuumRun, + CheckRun, + AnalyzeRun, + DiffRun, + VersionsRun, + ImportRun, + FuseMount } impl ErrorCode { pub fn code(&self) -> i32 { match *self { // Crazy stuff - ErrorCode::InitializeLogger | ErrorCode::InvalidExcludes => -1, + ErrorCode::InitializeLogger | + ErrorCode::InvalidExcludes => -1, // Arguments ErrorCode::InvalidArgs => 1, ErrorCode::UnsafeArgs => 2, @@ -64,7 +83,7 @@ impl ErrorCode { ErrorCode::FuseMount => 24, // ErrorCode::NoSuchBackup => 25, - ErrorCode::BackupAlreadyExists => 26 + ErrorCode::BackupAlreadyExists => 26, } } } @@ -94,32 +113,44 @@ macro_rules! checked { } fn open_repository(path: &Path) -> Result { - Ok(checked!(Repository::open(path), "load repository", ErrorCode::LoadRepository)) + Ok(checked!( + Repository::open(path), + "load repository", + ErrorCode::LoadRepository + )) } fn get_backup(repo: &Repository, backup_name: &str) -> Result { if !repo.has_backup(backup_name) { error!("A backup with that name does not exist"); - return Err(ErrorCode::NoSuchBackup) + return Err(ErrorCode::NoSuchBackup); } - Ok(checked!(repo.get_backup(backup_name), "load backup", ErrorCode::LoadBackup)) + Ok(checked!( + repo.get_backup(backup_name), + "load backup", + ErrorCode::LoadBackup + )) } -fn find_reference_backup(repo: &Repository, path: &str) -> Result, ErrorCode> { +fn find_reference_backup( + repo: &Repository, + path: &str, +) -> Result, ErrorCode> { let mut matching = Vec::new(); let hostname = match get_hostname() { Ok(hostname) => hostname, - Err(_) => return Ok(None) + Err(_) => return Ok(None), }; let backup_map = match repo.get_all_backups() { Ok(backup_map) => backup_map, - Err(RepositoryError::BackupFile(BackupFileError::PartialBackupsList(backup_map, _failed))) => { + Err(RepositoryError::BackupFile(BackupFileError::PartialBackupsList(backup_map, + _failed))) => { warn!("Some backups could not be read, ignoring them"); backup_map - }, + } Err(err) => { error!("Failed to load backup files: {}", err); - return Err(ErrorCode::LoadBackup) + return Err(ErrorCode::LoadBackup); } }; for (name, backup) in backup_map { @@ -135,29 +166,82 @@ fn print_backup(backup: &Backup) { if backup.modified { warn!("This backup has been modified"); } - println!("Date: {}", Local.timestamp(backup.timestamp, 0).to_rfc2822()); + println!( + "Date: {}", + Local.timestamp(backup.timestamp, 0).to_rfc2822() + ); println!("Source: {}:{}", backup.host, backup.path); println!("Duration: {}", to_duration(backup.duration)); - println!("Entries: {} files, {} dirs", backup.file_count, backup.dir_count); - println!("Total backup size: {}", to_file_size(backup.total_data_size)); - println!("Modified data size: {}", to_file_size(backup.changed_data_size)); + println!( + "Entries: {} files, {} dirs", + backup.file_count, + backup.dir_count + ); + println!( + "Total backup size: {}", + to_file_size(backup.total_data_size) + ); + println!( + "Modified data size: {}", + to_file_size(backup.changed_data_size) + ); let dedup_ratio = backup.deduplicated_data_size as f32 / backup.changed_data_size as f32; - println!("Deduplicated size: {}, {:.1}% saved", to_file_size(backup.deduplicated_data_size), (1.0 - dedup_ratio)*100.0); + println!( + "Deduplicated size: {}, {:.1}% saved", + to_file_size(backup.deduplicated_data_size), + (1.0 - dedup_ratio) * 100.0 + ); let compress_ratio = backup.encoded_data_size as f32 / backup.deduplicated_data_size as f32; - println!("Compressed size: {} in {} bundles, {:.1}% saved", to_file_size(backup.encoded_data_size), backup.bundle_count, (1.0 - compress_ratio)*100.0); - println!("Chunk count: {}, avg size: {}", backup.chunk_count, to_file_size(backup.avg_chunk_size as u64)); + println!( + "Compressed size: {} in {} bundles, {:.1}% saved", + to_file_size(backup.encoded_data_size), + backup.bundle_count, + (1.0 - compress_ratio) * 100.0 + ); + println!( + "Chunk count: {}, avg size: {}", + backup.chunk_count, + to_file_size(backup.avg_chunk_size as u64) + ); } pub fn format_inode_one_line(inode: &Inode) -> String { match inode.file_type { - FileType::Directory => format!("{:25}\t{} entries", format!("{}/", inode.name), inode.children.as_ref().map(|c| c.len()).unwrap_or(0)), - FileType::File => format!("{:25}\t{:>10}\t{}", inode.name, to_file_size(inode.size), Local.timestamp(inode.timestamp, 0).to_rfc2822()), - FileType::Symlink => format!("{:25}\t -> {}", inode.name, inode.symlink_target.as_ref().map(|s| s as &str).unwrap_or("?")), + FileType::Directory => { + format!( + "{:25}\t{} entries", + format!("{}/", inode.name), + inode.children.as_ref().map(|c| c.len()).unwrap_or(0) + ) + } + FileType::File => { + format!( + "{:25}\t{:>10}\t{}", + inode.name, + to_file_size(inode.size), + Local.timestamp(inode.timestamp, 0).to_rfc2822() + ) + } + FileType::Symlink => { + format!( + "{:25}\t -> {}", + inode.name, + inode.symlink_target.as_ref().map(|s| s as &str).unwrap_or( + "?" + ) + ) + } FileType::BlockDevice | FileType::CharDevice => { let device = inode.device.unwrap_or((0, 0)); - format!("{:25}\t{:12}\t{}:{}", inode.name, inode.file_type, device.0, device.1) - }, - FileType::NamedPipe => format!("{:25}\t fifo", inode.name) + format!( + "{:25}\t{:12}\t{}:{}", + inode.name, + inode.file_type, + device.0, + device.1 + ) + } + FileType::NamedPipe => format!("{:25}\t fifo", inode.name), } } @@ -168,7 +252,10 @@ fn print_inode(inode: &Inode) { println!("Permissions: {:3o}", inode.mode); println!("User: {}", inode.user); println!("Group: {}", inode.group); - println!("Timestamp: {}", Local.timestamp(inode.timestamp, 0).to_rfc2822()); + println!( + "Timestamp: {}", + Local.timestamp(inode.timestamp, 0).to_rfc2822() + ); if let Some(ref target) = inode.symlink_target { println!("Symlink target: {}", target); } @@ -197,9 +284,14 @@ fn print_backups(backup_map: &HashMap) { let mut backups: Vec<_> = backup_map.into_iter().collect(); backups.sort_by_key(|b| b.0); for (name, backup) in backups { - println!("{:40} {:>32} {:7} files, {:6} dirs, {:>10}", - name, Local.timestamp(backup.timestamp, 0).to_rfc2822(), backup.file_count, - backup.dir_count, to_file_size(backup.total_data_size)); + println!( + "{:40} {:>32} {:7} files, {:6} dirs, {:>10}", + name, + Local.timestamp(backup.timestamp, 0).to_rfc2822(), + backup.file_count, + backup.dir_count, + to_file_size(backup.total_data_size) + ); } } @@ -209,16 +301,26 @@ fn print_repoinfo(info: &RepositoryInfo) { println!("Uncompressed size: {}", to_file_size(info.raw_data_size)); println!("Compression ratio: {:.1}%", info.compression_ratio * 100.0); println!("Chunk count: {}", info.chunk_count); - println!("Average chunk size: {}", to_file_size(info.avg_chunk_size as u64)); + println!( + "Average chunk size: {}", + to_file_size(info.avg_chunk_size as u64) + ); let index_usage = info.index_entries as f32 / info.index_capacity as f32; - println!("Index: {}, {:.0}% full", to_file_size(info.index_size as u64), index_usage * 100.0); + println!( + "Index: {}, {:.0}% full", + to_file_size(info.index_size as u64), + index_usage * 100.0 + ); } fn print_bundle(bundle: &StoredBundle) { println!("Bundle {}", bundle.info.id); println!(" - Mode: {:?}", bundle.info.mode); println!(" - Path: {:?}", bundle.path); - println!(" - Date: {}", Local.timestamp(bundle.info.timestamp, 0).to_rfc2822()); + println!( + " - Date: {}", + Local.timestamp(bundle.info.timestamp, 0).to_rfc2822() + ); println!(" - Hash method: {:?}", bundle.info.hash_method); let encryption = if let Some((_, ref key)) = bundle.info.encryption { to_hex(key) @@ -227,19 +329,35 @@ fn print_bundle(bundle: &StoredBundle) { }; println!(" - Encryption: {}", encryption); println!(" - Chunks: {}", bundle.info.chunk_count); - println!(" - Size: {}", to_file_size(bundle.info.encoded_size as u64)); - println!(" - Data size: {}", to_file_size(bundle.info.raw_size as u64)); + println!( + " - Size: {}", + to_file_size(bundle.info.encoded_size as u64) + ); + println!( + " - Data size: {}", + to_file_size(bundle.info.raw_size as u64) + ); let ratio = bundle.info.encoded_size as f32 / bundle.info.raw_size as f32; let compression = if let Some(ref c) = bundle.info.compression { c.to_string() } else { "none".to_string() }; - println!(" - Compression: {}, ratio: {:.1}%", compression, ratio * 100.0); + println!( + " - Compression: {}, ratio: {:.1}%", + compression, + ratio * 100.0 + ); } fn print_bundle_one_line(bundle: &BundleInfo) { - println!("{}: {:8?}, {:5} chunks, {:8}", bundle.id, bundle.mode, bundle.chunk_count, to_file_size(bundle.encoded_size as u64)) + println!( + "{}: {:8?}, {:5} chunks, {:8}", + bundle.id, + bundle.mode, + bundle.chunk_count, + to_file_size(bundle.encoded_size as u64) + ) } fn print_config(config: &Config) { @@ -264,7 +382,7 @@ fn print_analysis(analysis: &HashMap) { let mut data_total = 0; for bundle in analysis.values() { data_total += bundle.info.encoded_size; - #[allow(unknown_lints,needless_range_loop)] + #[allow(unknown_lints, needless_range_loop)] for i in 0..11 { if bundle.get_usage_ratio() <= i as f32 * 0.1 { reclaim_space[i] += bundle.get_unused_size(); @@ -274,57 +392,100 @@ fn print_analysis(analysis: &HashMap) { } println!("Total bundle size: {}", to_file_size(data_total as u64)); let used = data_total - reclaim_space[10]; - println!("Space used: {}, {:.1} %", to_file_size(used as u64), used as f32 / data_total as f32 * 100.0); + println!( + "Space used: {}, {:.1} %", + to_file_size(used as u64), + used as f32 / data_total as f32 * 100.0 + ); println!("Reclaimable space (depending on vacuum ratio)"); - #[allow(unknown_lints,needless_range_loop)] + #[allow(unknown_lints, needless_range_loop)] for i in 0..11 { - println!(" - ratio={:3}: {:>10}, {:4.1} %, rewriting {:>10}", i*10, to_file_size(reclaim_space[i] as u64), reclaim_space[i] as f32 / data_total as f32 * 100.0, to_file_size(rewrite_size[i] as u64)); + println!( + " - ratio={:3}: {:>10}, {:4.1} %, rewriting {:>10}", + i * 10, + to_file_size(reclaim_space[i] as u64), + reclaim_space[i] as f32 / data_total as f32 * 100.0, + to_file_size(rewrite_size[i] as u64) + ); } } -#[allow(unknown_lints,cyclomatic_complexity)] +#[allow(unknown_lints, cyclomatic_complexity)] pub fn run() -> Result<(), ErrorCode> { let (log_level, args) = try!(args::parse()); if let Err(err) = logger::init(log_level) { println!("Failed to initialize the logger: {}", err); - return Err(ErrorCode::InitializeLogger) + return Err(ErrorCode::InitializeLogger); } match args { - Arguments::Init{repo_path, bundle_size, chunker, compression, encryption, hash, remote_path} => { - if ! Path::new(&remote_path).is_absolute() { + Arguments::Init { + repo_path, + bundle_size, + chunker, + compression, + encryption, + hash, + remote_path + } => { + if !Path::new(&remote_path).is_absolute() { error!("The remote path of a repository must be absolute."); - return Err(ErrorCode::InvalidArgs) + return Err(ErrorCode::InvalidArgs); } - let mut repo = checked!(Repository::create(repo_path, Config { - bundle_size: bundle_size, - chunker: chunker, - compression: compression, - encryption: None, - hash: hash - }, remote_path), "create repository", ErrorCode::CreateRepository); + let mut repo = checked!( + Repository::create( + repo_path, + Config { + bundle_size: bundle_size, + chunker: chunker, + compression: compression, + encryption: None, + hash: hash + }, + remote_path + ), + "create repository", + ErrorCode::CreateRepository + ); if encryption { let (public, secret) = Crypto::gen_keypair(); info!("Created the following key pair"); println!("public: {}", to_hex(&public[..])); println!("secret: {}", to_hex(&secret[..])); repo.set_encryption(Some(&public)); - checked!(repo.register_key(public, secret), "add key", ErrorCode::AddKey); + checked!( + repo.register_key(public, secret), + "add key", + ErrorCode::AddKey + ); checked!(repo.save_config(), "save config", ErrorCode::SaveConfig); - warn!("Please store this key pair in a secure location before using the repository"); + warn!( + "Please store this key pair in a secure location before using the repository" + ); println!(); } print_config(&repo.config); - }, - Arguments::Backup{repo_path, backup_name, src_path, full, reference, same_device, mut excludes, excludes_from, no_default_excludes, tar} => { + } + Arguments::Backup { + repo_path, + backup_name, + src_path, + full, + reference, + same_device, + mut excludes, + excludes_from, + no_default_excludes, + tar + } => { let mut repo = try!(open_repository(&repo_path)); if repo.has_backup(&backup_name) { error!("A backup with that name already exists"); - return Err(ErrorCode::BackupAlreadyExists) + return Err(ErrorCode::BackupAlreadyExists); } if src_path == "-" && !tar { error!("Reading from stdin requires --tar"); - return Err(ErrorCode::InvalidArgs) + return Err(ErrorCode::InvalidArgs); } let mut reference_backup = None; if !full && !tar { @@ -332,8 +493,8 @@ pub fn run() -> Result<(), ErrorCode> { Some(r) => { let b = try!(get_backup(&repo, &r)); Some((r, b)) - }, - None => None + } + None => None, }; if reference_backup.is_none() { reference_backup = try!(find_reference_backup(&repo, &src_path)); @@ -346,31 +507,56 @@ pub fn run() -> Result<(), ErrorCode> { } let reference_backup = reference_backup.map(|(_, backup)| backup); if !no_default_excludes && !tar { - for line in BufReader::new(checked!(File::open(&repo.layout.excludes_path()), "open default excludes file", ErrorCode::LoadExcludes)).lines() { - excludes.push(checked!(line, "read default excludes file", ErrorCode::LoadExcludes)); + for line in BufReader::new(checked!( + File::open(&repo.layout.excludes_path()), + "open default excludes file", + ErrorCode::LoadExcludes + )).lines() + { + excludes.push(checked!( + line, + "read default excludes file", + ErrorCode::LoadExcludes + )); } } if let Some(excludes_from) = excludes_from { - for line in BufReader::new(checked!(File::open(excludes_from), "open excludes file", ErrorCode::LoadExcludes)).lines() { - excludes.push(checked!(line, "read excludes file", ErrorCode::LoadExcludes)); + for line in BufReader::new(checked!( + File::open(excludes_from), + "open excludes file", + ErrorCode::LoadExcludes + )).lines() + { + excludes.push(checked!( + line, + "read excludes file", + ErrorCode::LoadExcludes + )); } } let mut excludes_parsed = Vec::with_capacity(excludes.len()); for mut exclude in excludes { if exclude.starts_with('#') || exclude.is_empty() { - continue + continue; } - exclude = regex::escape(&exclude).replace('?', ".").replace(r"\*\*", ".*").replace(r"\*", "[^/]*"); + exclude = regex::escape(&exclude) + .replace('?', ".") + .replace(r"\*\*", ".*") + .replace(r"\*", "[^/]*"); excludes_parsed.push(if exclude.starts_with('/') { format!(r"^{}($|/)", exclude) } else { format!(r"/{}($|/)", exclude) }); - }; + } let excludes = if excludes_parsed.is_empty() { None } else { - Some(checked!(RegexSet::new(excludes_parsed), "parse exclude patterns", ErrorCode::InvalidExcludes)) + Some(checked!( + RegexSet::new(excludes_parsed), + "parse exclude patterns", + ErrorCode::InvalidExcludes + )) }; let options = BackupOptions { same_device: same_device, @@ -385,59 +571,115 @@ pub fn run() -> Result<(), ErrorCode> { Ok(backup) => { info!("Backup finished"); backup - }, + } Err(RepositoryError::Backup(BackupError::FailedPaths(backup, _failed_paths))) => { warn!("Some files are missing from the backup"); backup - }, + } Err(err) => { error!("Backup failed: {}", err); - return Err(ErrorCode::BackupRun) + return Err(ErrorCode::BackupRun); } }; - checked!(repo.save_backup(&backup, &backup_name), "save backup file", ErrorCode::SaveBackup); + checked!( + repo.save_backup(&backup, &backup_name), + "save backup file", + ErrorCode::SaveBackup + ); print_backup(&backup); - }, - Arguments::Restore{repo_path, backup_name, inode, dst_path, tar} => { + } + Arguments::Restore { + repo_path, + backup_name, + inode, + dst_path, + tar + } => { let mut repo = try!(open_repository(&repo_path)); let backup = try!(get_backup(&repo, &backup_name)); let inode = if let Some(inode) = inode { - checked!(repo.get_backup_inode(&backup, &inode), "load subpath inode", ErrorCode::LoadInode) + checked!( + repo.get_backup_inode(&backup, &inode), + "load subpath inode", + ErrorCode::LoadInode + ) } else { - checked!(repo.get_inode(&backup.root), "load root inode", ErrorCode::LoadInode) + checked!( + repo.get_inode(&backup.root), + "load root inode", + ErrorCode::LoadInode + ) }; if tar { - checked!(repo.export_tarfile(&backup, inode, &dst_path), "restore backup", ErrorCode::RestoreRun); + checked!( + repo.export_tarfile(&backup, inode, &dst_path), + "restore backup", + ErrorCode::RestoreRun + ); } else { - checked!(repo.restore_inode_tree(&backup, inode, &dst_path), "restore backup", ErrorCode::RestoreRun); + checked!( + repo.restore_inode_tree(&backup, inode, &dst_path), + "restore backup", + ErrorCode::RestoreRun + ); } info!("Restore finished"); - }, - Arguments::Copy{repo_path_src, backup_name_src, repo_path_dst, backup_name_dst} => { + } + Arguments::Copy { + repo_path_src, + backup_name_src, + repo_path_dst, + backup_name_dst + } => { if repo_path_src != repo_path_dst { error!("Can only run copy on same repository"); - return Err(ErrorCode::InvalidArgs) + return Err(ErrorCode::InvalidArgs); } let mut repo = try!(open_repository(&repo_path_src)); if repo.has_backup(&backup_name_dst) { error!("A backup with that name already exists"); - return Err(ErrorCode::BackupAlreadyExists) + return Err(ErrorCode::BackupAlreadyExists); } let backup = try!(get_backup(&repo, &backup_name_src)); - checked!(repo.save_backup(&backup, &backup_name_dst), "save backup file", ErrorCode::SaveBackup); - }, - Arguments::Remove{repo_path, backup_name, inode, force} => { + checked!( + repo.save_backup(&backup, &backup_name_dst), + "save backup file", + ErrorCode::SaveBackup + ); + } + Arguments::Remove { + repo_path, + backup_name, + inode, + force + } => { let mut repo = try!(open_repository(&repo_path)); if let Some(inode) = inode { let mut backup = try!(get_backup(&repo, &backup_name)); - checked!(repo.remove_backup_path(&mut backup, inode), "remove backup subpath", ErrorCode::RemoveRun); - checked!(repo.save_backup(&backup, &backup_name), "save backup file", ErrorCode::SaveBackup); + checked!( + repo.remove_backup_path(&mut backup, inode), + "remove backup subpath", + ErrorCode::RemoveRun + ); + checked!( + repo.save_backup(&backup, &backup_name), + "save backup file", + ErrorCode::SaveBackup + ); info!("The backup subpath has been deleted, run vacuum to reclaim space"); } else if repo.layout.backups_path().join(&backup_name).is_dir() { - let backups = checked!(repo.get_backups(&backup_name), "retrieve backups", ErrorCode::RemoveRun); + let backups = checked!( + repo.get_backups(&backup_name), + "retrieve backups", + ErrorCode::RemoveRun + ); if force { for name in backups.keys() { - checked!(repo.delete_backup(&format!("{}/{}", &backup_name, name)), "delete backup", ErrorCode::RemoveRun); + checked!( + repo.delete_backup(&format!("{}/{}", &backup_name, name)), + "delete backup", + ErrorCode::RemoveRun + ); } } else { error!("Denying to remove multiple backups (use --force):"); @@ -446,37 +688,81 @@ pub fn run() -> Result<(), ErrorCode> { } } } else { - checked!(repo.delete_backup(&backup_name), "delete backup", ErrorCode::RemoveRun); + checked!( + repo.delete_backup(&backup_name), + "delete backup", + ErrorCode::RemoveRun + ); info!("The backup has been deleted, run vacuum to reclaim space"); } - }, - Arguments::Prune{repo_path, prefix, daily, weekly, monthly, yearly, force} => { + } + Arguments::Prune { + repo_path, + prefix, + daily, + weekly, + monthly, + yearly, + force + } => { let mut repo = try!(open_repository(&repo_path)); if daily + weekly + monthly + yearly == 0 { error!("This would remove all those backups"); - return Err(ErrorCode::UnsafeArgs) + return Err(ErrorCode::UnsafeArgs); } - checked!(repo.prune_backups(&prefix, daily, weekly, monthly, yearly, force), "prune backups", ErrorCode::PruneRun); + checked!( + repo.prune_backups(&prefix, daily, weekly, monthly, yearly, force), + "prune backups", + ErrorCode::PruneRun + ); if !force { info!("Run with --force to actually execute this command"); } - }, - Arguments::Vacuum{repo_path, ratio, force, combine} => { + } + Arguments::Vacuum { + repo_path, + ratio, + force, + combine + } => { let mut repo = try!(open_repository(&repo_path)); let info_before = repo.info(); - checked!(repo.vacuum(ratio, combine, force), "vacuum", ErrorCode::VacuumRun); + checked!( + repo.vacuum(ratio, combine, force), + "vacuum", + ErrorCode::VacuumRun + ); if !force { info!("Run with --force to actually execute this command"); } else { let info_after = repo.info(); - info!("Reclaimed {}", to_file_size(info_before.encoded_data_size - info_after.encoded_data_size)); + info!( + "Reclaimed {}", + to_file_size(info_before.encoded_data_size - info_after.encoded_data_size) + ); } - }, - Arguments::Check{repo_path, backup_name, inode, bundles, index, bundle_data, repair} => { + } + Arguments::Check { + repo_path, + backup_name, + inode, + bundles, + index, + bundle_data, + repair + } => { let mut repo = try!(open_repository(&repo_path)); - checked!(repo.check_repository(repair), "check repository", ErrorCode::CheckRun); + checked!( + repo.check_repository(repair), + "check repository", + ErrorCode::CheckRun + ); if bundles { - checked!(repo.check_bundles(bundle_data, repair), "check bundles", ErrorCode::CheckRun); + checked!( + repo.check_bundles(bundle_data, repair), + "check bundles", + ErrorCode::CheckRun + ); } if index { checked!(repo.check_index(repair), "check index", ErrorCode::CheckRun); @@ -484,32 +770,59 @@ pub fn run() -> Result<(), ErrorCode> { if let Some(backup_name) = backup_name { let mut backup = try!(get_backup(&repo, &backup_name)); if let Some(path) = inode { - checked!(repo.check_backup_inode(&backup_name, &mut backup, Path::new(&path), repair), "check inode", ErrorCode::CheckRun) + checked!( + repo.check_backup_inode(&backup_name, &mut backup, Path::new(&path), repair), + "check inode", + ErrorCode::CheckRun + ) } else { - checked!(repo.check_backup(&backup_name, &mut backup, repair), "check backup", ErrorCode::CheckRun) + checked!( + repo.check_backup(&backup_name, &mut backup, repair), + "check backup", + ErrorCode::CheckRun + ) } } else { - checked!(repo.check_backups(repair), "check repository", ErrorCode::CheckRun) + checked!( + repo.check_backups(repair), + "check repository", + ErrorCode::CheckRun + ) } repo.set_clean(); info!("Integrity verified") - }, - Arguments::List{repo_path, backup_name, inode} => { + } + Arguments::List { + repo_path, + backup_name, + inode + } => { let mut repo = try!(open_repository(&repo_path)); let backup_map = if let Some(backup_name) = backup_name { if repo.layout.backups_path().join(&backup_name).is_dir() { repo.get_backups(&backup_name) } else { let backup = try!(get_backup(&repo, &backup_name)); - let inode = checked!(repo.get_backup_inode(&backup, inode.as_ref().map(|v| v as &str).unwrap_or("/")), "load subpath inode", ErrorCode::LoadInode); + let inode = checked!( + repo.get_backup_inode( + &backup, + inode.as_ref().map(|v| v as &str).unwrap_or("/") + ), + "load subpath inode", + ErrorCode::LoadInode + ); println!("{}", format_inode_one_line(&inode)); if let Some(children) = inode.children { for chunks in children.values() { - let inode = checked!(repo.get_inode(chunks), "load child inode", ErrorCode::LoadInode); + let inode = checked!( + repo.get_inode(chunks), + "load child inode", + ErrorCode::LoadInode + ); println!("- {}", format_inode_one_line(&inode)); } } - return Ok(()) + return Ok(()); } } else { repo.get_all_backups() @@ -519,20 +832,28 @@ pub fn run() -> Result<(), ErrorCode> { Err(RepositoryError::BackupFile(BackupFileError::PartialBackupsList(backup_map, _failed))) => { warn!("Some backups could not be read, ignoring them"); backup_map - }, + } Err(err) => { error!("Failed to load backup files: {}", err); - return Err(ErrorCode::LoadBackup) + return Err(ErrorCode::LoadBackup); } }; print_backups(&backup_map); - }, - Arguments::Info{repo_path, backup_name, inode} => { + } + Arguments::Info { + repo_path, + backup_name, + inode + } => { let mut repo = try!(open_repository(&repo_path)); if let Some(backup_name) = backup_name { let backup = try!(get_backup(&repo, &backup_name)); if let Some(inode) = inode { - let inode = checked!(repo.get_backup_inode(&backup, inode), "load subpath inode", ErrorCode::LoadInode); + let inode = checked!( + repo.get_backup_inode(&backup, inode), + "load subpath inode", + ErrorCode::LoadInode + ); print_inode(&inode); } else { print_backup(&backup); @@ -540,55 +861,108 @@ pub fn run() -> Result<(), ErrorCode> { } else { print_repoinfo(&repo.info()); } - }, - Arguments::Mount{repo_path, backup_name, inode, mount_point} => { + } + Arguments::Mount { + repo_path, + backup_name, + inode, + mount_point + } => { let mut repo = try!(open_repository(&repo_path)); let fs = if let Some(backup_name) = backup_name { if repo.layout.backups_path().join(&backup_name).is_dir() { - checked!(FuseFilesystem::from_repository(&mut repo, Some(&backup_name)), "create fuse filesystem", ErrorCode::FuseMount) + checked!( + FuseFilesystem::from_repository(&mut repo, Some(&backup_name)), + "create fuse filesystem", + ErrorCode::FuseMount + ) } else { let backup = try!(get_backup(&repo, &backup_name)); if let Some(inode) = inode { - let inode = checked!(repo.get_backup_inode(&backup, inode), "load subpath inode", ErrorCode::LoadInode); - checked!(FuseFilesystem::from_inode(&mut repo, backup, inode), "create fuse filesystem", ErrorCode::FuseMount) + let inode = checked!( + repo.get_backup_inode(&backup, inode), + "load subpath inode", + ErrorCode::LoadInode + ); + checked!( + FuseFilesystem::from_inode(&mut repo, backup, inode), + "create fuse filesystem", + ErrorCode::FuseMount + ) } else { - checked!(FuseFilesystem::from_backup(&mut repo, backup), "create fuse filesystem", ErrorCode::FuseMount) + checked!( + FuseFilesystem::from_backup(&mut repo, backup), + "create fuse filesystem", + ErrorCode::FuseMount + ) } } } else { - checked!(FuseFilesystem::from_repository(&mut repo, None), "create fuse filesystem", ErrorCode::FuseMount) + checked!( + FuseFilesystem::from_repository(&mut repo, None), + "create fuse filesystem", + ErrorCode::FuseMount + ) }; info!("Mounting the filesystem..."); - info!("Please unmount the filesystem via 'fusermount -u {}' when done.", mount_point); - checked!(fs.mount(&mount_point), "mount filesystem", ErrorCode::FuseMount); - }, - Arguments::Analyze{repo_path} => { + info!( + "Please unmount the filesystem via 'fusermount -u {}' when done.", + mount_point + ); + checked!( + fs.mount(&mount_point), + "mount filesystem", + ErrorCode::FuseMount + ); + } + Arguments::Analyze { repo_path } => { let mut repo = try!(open_repository(&repo_path)); - print_analysis(&checked!(repo.analyze_usage(), "analyze repository", ErrorCode::AnalyzeRun)); - }, - Arguments::BundleList{repo_path} => { + print_analysis(&checked!( + repo.analyze_usage(), + "analyze repository", + ErrorCode::AnalyzeRun + )); + } + Arguments::BundleList { repo_path } => { let repo = try!(open_repository(&repo_path)); for bundle in repo.list_bundles() { print_bundle_one_line(bundle); } - }, - Arguments::BundleInfo{repo_path, bundle_id} => { + } + Arguments::BundleInfo { + repo_path, + bundle_id + } => { let repo = try!(open_repository(&repo_path)); if let Some(bundle) = repo.get_bundle(&bundle_id) { print_bundle(bundle); } else { error!("No such bundle"); - return Err(ErrorCode::LoadBundle) + return Err(ErrorCode::LoadBundle); } - }, - Arguments::Import{repo_path, remote_path, key_files} => { - checked!(Repository::import(repo_path, remote_path, key_files), "import repository", ErrorCode::ImportRun); + } + Arguments::Import { + repo_path, + remote_path, + key_files + } => { + checked!( + Repository::import(repo_path, remote_path, key_files), + "import repository", + ErrorCode::ImportRun + ); info!("Import finished"); - }, - Arguments::Versions{repo_path, path} => { + } + Arguments::Versions { repo_path, path } => { let mut repo = try!(open_repository(&repo_path)); let mut found = false; - for (name, mut inode) in checked!(repo.find_versions(&path), "find versions", ErrorCode::VersionsRun) { + for (name, mut inode) in + checked!( + repo.find_versions(&path), + "find versions", + ErrorCode::VersionsRun + ) + { inode.name = format!("{}::{}", name, &path); println!("{}", format_inode_one_line(&inode)); found = true; @@ -596,30 +970,62 @@ pub fn run() -> Result<(), ErrorCode> { if !found { info!("No versions of that file were found."); } - }, - Arguments::Diff{repo_path_old, backup_name_old, inode_old, repo_path_new, backup_name_new, inode_new} => { + } + Arguments::Diff { + repo_path_old, + backup_name_old, + inode_old, + repo_path_new, + backup_name_new, + inode_new + } => { if repo_path_old != repo_path_new { error!("Can only run diff on same repository"); - return Err(ErrorCode::InvalidArgs) + return Err(ErrorCode::InvalidArgs); } let mut repo = try!(open_repository(&repo_path_old)); let backup_old = try!(get_backup(&repo, &backup_name_old)); let backup_new = try!(get_backup(&repo, &backup_name_new)); - let inode1 = checked!(repo.get_backup_inode(&backup_old, inode_old.unwrap_or_else(|| "/".to_string())), "load subpath inode", ErrorCode::LoadInode); - let inode2 = checked!(repo.get_backup_inode(&backup_new, inode_new.unwrap_or_else(|| "/".to_string())), "load subpath inode", ErrorCode::LoadInode); - let diffs = checked!(repo.find_differences(&inode1, &inode2), "find differences", ErrorCode::DiffRun); + let inode1 = + checked!( + repo.get_backup_inode(&backup_old, inode_old.unwrap_or_else(|| "/".to_string())), + "load subpath inode", + ErrorCode::LoadInode + ); + let inode2 = + checked!( + repo.get_backup_inode(&backup_new, inode_new.unwrap_or_else(|| "/".to_string())), + "load subpath inode", + ErrorCode::LoadInode + ); + let diffs = checked!( + repo.find_differences(&inode1, &inode2), + "find differences", + ErrorCode::DiffRun + ); for diff in &diffs { - println!("{} {:?}", match diff.0 { - DiffType::Add => "add", - DiffType::Mod => "mod", - DiffType::Del => "del" - }, diff.1); + println!( + "{} {:?}", + match diff.0 { + DiffType::Add => "add", + DiffType::Mod => "mod", + DiffType::Del => "del", + }, + diff.1 + ); } if diffs.is_empty() { info!("No differences found"); } - }, - Arguments::Config{repo_path, bundle_size, chunker, compression, encryption, hash} => { + } + Arguments::Config { + repo_path, + bundle_size, + chunker, + compression, + encryption, + hash + } => { let mut repo = try!(open_repository(&repo_path)); let mut changed = false; if let Some(bundle_size) = bundle_size { @@ -627,7 +1033,9 @@ pub fn run() -> Result<(), ErrorCode> { changed = true; } if let Some(chunker) = chunker { - warn!("Changing the chunker makes it impossible to use existing data for deduplication"); + warn!( + "Changing the chunker makes it impossible to use existing data for deduplication" + ); repo.config.chunker = chunker; changed = true; } @@ -640,7 +1048,9 @@ pub fn run() -> Result<(), ErrorCode> { changed = true; } if let Some(hash) = hash { - warn!("Changing the hash makes it impossible to use existing data for deduplication"); + warn!( + "Changing the hash makes it impossible to use existing data for deduplication" + ); repo.config.hash = hash; changed = true; } @@ -650,41 +1060,67 @@ pub fn run() -> Result<(), ErrorCode> { } else { print_config(&repo.config); } - }, - Arguments::GenKey{file, password} => { + } + Arguments::GenKey { file, password } => { let (public, secret) = match password { None => Crypto::gen_keypair(), - Some(ref password) => Crypto::keypair_from_password(password) + Some(ref password) => Crypto::keypair_from_password(password), }; info!("Created the following key pair"); println!("public: {}", to_hex(&public[..])); println!("secret: {}", to_hex(&secret[..])); if let Some(file) = file { - checked!(Crypto::save_keypair_to_file(&public, &secret, file), "save key pair", ErrorCode::SaveKey); + checked!( + Crypto::save_keypair_to_file(&public, &secret, file), + "save key pair", + ErrorCode::SaveKey + ); } - }, - Arguments::AddKey{repo_path, set_default, password, file} => { + } + Arguments::AddKey { + repo_path, + set_default, + password, + file + } => { let mut repo = try!(open_repository(&repo_path)); let (public, secret) = if let Some(file) = file { - checked!(Crypto::load_keypair_from_file(file), "load key pair", ErrorCode::LoadKey) + checked!( + Crypto::load_keypair_from_file(file), + "load key pair", + ErrorCode::LoadKey + ) } else { info!("Created the following key pair"); let (public, secret) = match password { None => Crypto::gen_keypair(), - Some(ref password) => Crypto::keypair_from_password(password) + Some(ref password) => Crypto::keypair_from_password(password), }; println!("public: {}", to_hex(&public[..])); println!("secret: {}", to_hex(&secret[..])); (public, secret) }; - checked!(repo.register_key(public, secret), "add key pair", ErrorCode::AddKey); + checked!( + repo.register_key(public, secret), + "add key pair", + ErrorCode::AddKey + ); if set_default { repo.set_encryption(Some(&public)); checked!(repo.save_config(), "save config", ErrorCode::SaveConfig); - warn!("Please store this key pair in a secure location before using the repository"); + warn!( + "Please store this key pair in a secure location before using the repository" + ); } - }, - Arguments::AlgoTest{bundle_size, chunker, compression, encrypt, hash, file} => { + } + Arguments::AlgoTest { + bundle_size, + chunker, + compression, + encrypt, + hash, + file + } => { algotest::run(&file, bundle_size, chunker, compression, encrypt, hash); } } diff --git a/src/main.rs b/src/main.rs index 2ac0a83..491b8b1 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,26 +1,32 @@ #![recursion_limit="128"] #![allow(unknown_lints, float_cmp)] #![cfg_attr(feature = "bench", feature(test))] -#[cfg(feature = "bench")] extern crate test; +#[cfg(feature = "bench")] +extern crate test; extern crate serde; extern crate serde_bytes; extern crate rmp_serde; -#[macro_use] extern crate serde_utils; +#[macro_use] +extern crate serde_utils; extern crate squash_sys as squash; extern crate blake2_rfc as blake2; extern crate murmurhash3; extern crate serde_yaml; -#[macro_use] extern crate quick_error; +#[macro_use] +extern crate quick_error; extern crate chrono; -#[macro_use] extern crate clap; -#[macro_use] extern crate log; +#[macro_use] +extern crate clap; +#[macro_use] +extern crate log; extern crate byteorder; extern crate sodiumoxide; extern crate libsodium_sys; extern crate ansi_term; extern crate filetime; extern crate regex; -#[macro_use] extern crate lazy_static; +#[macro_use] +extern crate lazy_static; extern crate fuse; extern crate rand; extern crate time; @@ -46,6 +52,6 @@ use std::process::exit; fn main() { match cli::run() { Ok(()) => exit(0), - Err(code) => exit(code.code()) + Err(code) => exit(code.code()), } } diff --git a/src/mount.rs b/src/mount.rs index 2ef5072..4435c1b 100644 --- a/src/mount.rs +++ b/src/mount.rs @@ -1,4 +1,4 @@ -use ::prelude::*; +use prelude::*; use std::path::Path; use std::ffi::OsStr; @@ -71,7 +71,7 @@ fn convert_file_type(kind: FileType) -> fuse::FileType { FileType::Symlink => fuse::FileType::Symlink, FileType::BlockDevice => fuse::FileType::BlockDevice, FileType::CharDevice => fuse::FileType::CharDevice, - FileType::NamedPipe => fuse::FileType::NamedPipe + FileType::NamedPipe => fuse::FileType::NamedPipe, } } @@ -115,16 +115,19 @@ impl FuseInode { nlink: 1, uid: uid, gid: gid, - rdev: self.inode.device.map_or(0, |(major, minor)| (major << 8) + minor), + rdev: self.inode.device.map_or( + 0, + |(major, minor)| (major << 8) + minor + ), flags: 0 } } pub fn dir_list(&self) -> Option> { if self.inode.file_type != FileType::Directory { - return None + return None; } - let mut list = Vec::with_capacity(self.children.len()+2); + let mut list = Vec::with_capacity(self.children.len() + 2); list.push((self.num, fuse::FileType::Directory, ".".to_string())); if let Some(ref parent) = self.parent { let parent = parent.borrow(); @@ -134,7 +137,11 @@ impl FuseInode { } for ch in self.children.values() { let child = ch.borrow(); - list.push((child.num, convert_file_type(child.inode.file_type), child.inode.name.clone())); + list.push(( + child.num, + convert_file_type(child.inode.file_type), + child.inode.name.clone() + )); } Some(list) } @@ -156,11 +163,14 @@ impl<'a> FuseFilesystem<'a> { }) } - pub fn from_repository(repository: &'a mut Repository, path: Option<&str>) -> Result { + pub fn from_repository( + repository: &'a mut Repository, + path: Option<&str>, + ) -> Result { let mut backups = vec![]; let backup_map = match path { Some(path) => try!(repository.get_backups(path)), - None => try!(repository.get_all_backups()) + None => try!(repository.get_all_backups()), }; for (name, backup) in backup_map { let inode = try!(repository.get_inode(&backup.root)); @@ -173,7 +183,7 @@ impl<'a> FuseFilesystem<'a> { for part in name.split('/') { parent = match fs.get_child(&parent, part).unwrap() { Some(child) => child, - None => fs.add_virtual_directory(part.to_string(), Some(parent)) + None => fs.add_virtual_directory(part.to_string(), Some(parent)), }; } let mut parent_mut = parent.borrow_mut(); @@ -185,28 +195,50 @@ impl<'a> FuseFilesystem<'a> { Ok(fs) } - pub fn from_backup(repository: &'a mut Repository, backup: Backup) -> Result { + pub fn from_backup( + repository: &'a mut Repository, + backup: Backup, + ) -> Result { let inode = try!(repository.get_inode(&backup.root)); let mut fs = try!(FuseFilesystem::new(repository)); fs.add_inode(inode, None, backup.user_names, backup.group_names); Ok(fs) } - pub fn from_inode(repository: &'a mut Repository, backup: Backup, inode: Inode) -> Result { + pub fn from_inode( + repository: &'a mut Repository, + backup: Backup, + inode: Inode, + ) -> Result { let mut fs = try!(FuseFilesystem::new(repository)); fs.add_inode(inode, None, backup.user_names, backup.group_names); Ok(fs) } - pub fn add_virtual_directory(&mut self, name: String, parent: Option) -> FuseInodeRef { - self.add_inode(Inode { - name: name, - file_type: FileType::Directory, - ..Default::default() - }, parent, HashMap::default(), HashMap::default()) + pub fn add_virtual_directory( + &mut self, + name: String, + parent: Option, + ) -> FuseInodeRef { + self.add_inode( + Inode { + name: name, + file_type: FileType::Directory, + ..Default::default() + }, + parent, + HashMap::default(), + HashMap::default() + ) } - pub fn add_inode(&mut self, inode: Inode, parent: Option, user_names: HashMap, group_names: HashMap) -> FuseInodeRef { + pub fn add_inode( + &mut self, + inode: Inode, + parent: Option, + user_names: HashMap, + group_names: HashMap, + ) -> FuseInodeRef { let inode = FuseInode { inode: inode, num: self.next_id, @@ -228,22 +260,30 @@ impl<'a> FuseFilesystem<'a> { } pub fn mount>(self, mountpoint: P) -> Result<(), RepositoryError> { - Ok(try!(fuse::mount(self, &mountpoint, &[ - OsStr::new("default_permissions"), - OsStr::new("kernel_cache"), - OsStr::new("auto_cache"), - OsStr::new("readonly") - ]))) + Ok(try!(fuse::mount( + self, + &mountpoint, + &[ + OsStr::new("default_permissions"), + OsStr::new("kernel_cache"), + OsStr::new("auto_cache"), + OsStr::new("readonly"), + ] + ))) } pub fn get_inode(&mut self, num: u64) -> Option { self.inodes.get(&num).cloned() } - pub fn get_child(&mut self, parent: &FuseInodeRef, name: &str) -> Result, RepositoryError> { + pub fn get_child( + &mut self, + parent: &FuseInodeRef, + name: &str, + ) -> Result, RepositoryError> { let mut parent_mut = parent.borrow_mut(); if let Some(child) = parent_mut.children.get(name) { - return Ok(Some(child.clone())) + return Ok(Some(child.clone())); } let child; if let Some(chunks) = parent_mut.inode.children.as_ref().and_then(|c| c.get(name)) { @@ -258,9 +298,9 @@ impl<'a> FuseFilesystem<'a> { name_cache: parent_mut.name_cache.clone() })); self.inodes.insert(self.next_id, child.clone()); - self.next_id +=1; + self.next_id += 1; } else { - return Ok(None) + return Ok(None); } parent_mut.children.insert(name.to_string(), child.clone()); Ok(Some(child)) @@ -284,7 +324,7 @@ impl<'a> FuseFilesystem<'a> { name_cache: parent_mut.name_cache.clone() })); self.inodes.insert(self.next_id, child.clone()); - self.next_id +=1; + self.next_id += 1; parent_children.insert(name.clone(), child); } } @@ -297,10 +337,11 @@ impl<'a> FuseFilesystem<'a> { let mut inode = inode.borrow_mut(); let mut chunks = None; match inode.inode.data { - None | Some(FileData::Inline(_)) => (), + None | + Some(FileData::Inline(_)) => (), Some(FileData::ChunkedDirect(ref c)) => { chunks = Some(c.clone()); - }, + } Some(FileData::ChunkedIndirect(ref c)) => { let chunk_data = try!(self.repository.get_data(c)); chunks = Some(ChunkList::read_from(&chunk_data)); @@ -313,9 +354,8 @@ impl<'a> FuseFilesystem<'a> { impl<'a> fuse::Filesystem for FuseFilesystem<'a> { - /// Look up a directory entry by name and get its attributes. - fn lookup (&mut self, _req: &fuse::Request, parent: u64, name: &OsStr, reply: fuse::ReplyEntry) { + fn lookup(&mut self, _req: &fuse::Request, parent: u64, name: &OsStr, reply: fuse::ReplyEntry) { let sname = str!(name, reply); let parent = inode!(self, parent, reply); let child = lookup!(self, &parent, sname, reply); @@ -324,7 +364,7 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> { reply.entry(&ttl, &attrs, 0) } - fn destroy (&mut self, _req: &fuse::Request) { + fn destroy(&mut self, _req: &fuse::Request) { info!("destroy"); } @@ -335,66 +375,131 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> { /// each forget. The filesystem may ignore forget calls, if the inodes don't need to /// have a limited lifetime. On unmount it is not guaranteed, that all referenced /// inodes will receive a forget message. - fn forget (&mut self, _req: &fuse::Request, ino: u64, _nlookup: u64) { + fn forget(&mut self, _req: &fuse::Request, ino: u64, _nlookup: u64) { info!("forget {:?}", ino); //self.fs.forget(ino).unwrap(); } /// Get file attributes - fn getattr (&mut self, _req: &fuse::Request, ino: u64, reply: fuse::ReplyAttr) { + fn getattr(&mut self, _req: &fuse::Request, ino: u64, reply: fuse::ReplyAttr) { let inode = inode!(self, ino, reply); let ttl = Timespec::new(60, 0); reply.attr(&ttl, &inode.borrow().to_attrs()); } /// Set file attributes - fn setattr (&mut self, _req: &fuse::Request, _ino: u64, _mode: Option, _uid: Option, _gid: Option, _size: Option, _atime: Option, _mtime: Option, _fh: Option, _crtime: Option, _chgtime: Option, _bkuptime: Option, _flags: Option, reply: fuse::ReplyAttr) { + fn setattr( + &mut self, + _req: &fuse::Request, + _ino: u64, + _mode: Option, + _uid: Option, + _gid: Option, + _size: Option, + _atime: Option, + _mtime: Option, + _fh: Option, + _crtime: Option, + _chgtime: Option, + _bkuptime: Option, + _flags: Option, + reply: fuse::ReplyAttr, + ) { reply.error(libc::EROFS) } /// Read symbolic link - fn readlink (&mut self, _req: &fuse::Request, ino: u64, reply: fuse::ReplyData) { + fn readlink(&mut self, _req: &fuse::Request, ino: u64, reply: fuse::ReplyData) { let inode = inode!(self, ino, reply); let inode = inode.borrow(); match inode.inode.symlink_target { None => reply.error(libc::EINVAL), - Some(ref link) => reply.data(link.as_bytes()) + Some(ref link) => reply.data(link.as_bytes()), } } /// Create a hard link - fn link (&mut self, _req: &fuse::Request, _ino: u64, _newparent: u64, _newname: &OsStr, reply: fuse::ReplyEntry) { + fn link( + &mut self, + _req: &fuse::Request, + _ino: u64, + _newparent: u64, + _newname: &OsStr, + reply: fuse::ReplyEntry, + ) { reply.error(libc::EROFS) } /// Create file node /// Create a regular file, character device, block device, fifo or socket node. - fn mknod (&mut self, _req: &fuse::Request, _parent: u64, _name: &OsStr, _mode: u32, _rdev: u32, reply: fuse::ReplyEntry) { + fn mknod( + &mut self, + _req: &fuse::Request, + _parent: u64, + _name: &OsStr, + _mode: u32, + _rdev: u32, + reply: fuse::ReplyEntry, + ) { reply.error(libc::EROFS) } /// Create a directory - fn mkdir (&mut self, _req: &fuse::Request, _parent: u64, _name: &OsStr, _mode: u32, reply: fuse::ReplyEntry) { + fn mkdir( + &mut self, + _req: &fuse::Request, + _parent: u64, + _name: &OsStr, + _mode: u32, + reply: fuse::ReplyEntry, + ) { reply.error(libc::EROFS) } /// Remove a file - fn unlink (&mut self, _req: &fuse::Request, _parent: u64, _name: &OsStr, reply: fuse::ReplyEmpty) { + fn unlink( + &mut self, + _req: &fuse::Request, + _parent: u64, + _name: &OsStr, + reply: fuse::ReplyEmpty, + ) { reply.error(libc::EROFS) } /// Remove a directory - fn rmdir (&mut self, _req: &fuse::Request, _parent: u64, _name: &OsStr, reply: fuse::ReplyEmpty) { + fn rmdir( + &mut self, + _req: &fuse::Request, + _parent: u64, + _name: &OsStr, + reply: fuse::ReplyEmpty, + ) { reply.error(libc::EROFS) } /// Create a symbolic link - fn symlink (&mut self, _req: &fuse::Request, _parent: u64, _name: &OsStr, _link: &Path, reply: fuse::ReplyEntry) { + fn symlink( + &mut self, + _req: &fuse::Request, + _parent: u64, + _name: &OsStr, + _link: &Path, + reply: fuse::ReplyEntry, + ) { reply.error(libc::EROFS) } /// Rename a file - fn rename (&mut self, _req: &fuse::Request, _parent: u64, _name: &OsStr, _newparent: u64, _newname: &OsStr, reply: fuse::ReplyEmpty) { + fn rename( + &mut self, + _req: &fuse::Request, + _parent: u64, + _name: &OsStr, + _newparent: u64, + _newname: &OsStr, + reply: fuse::ReplyEmpty, + ) { reply.error(libc::EROFS) } @@ -406,7 +511,7 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> { /// anything in fh. There are also some flags (direct_io, keep_cache) which the /// filesystem may set, to change the way the file is opened. See fuse_file_info /// structure in for more details. - fn open (&mut self, _req: &fuse::Request, ino: u64, flags: u32, reply: fuse::ReplyOpen) { + fn open(&mut self, _req: &fuse::Request, ino: u64, flags: u32, reply: fuse::ReplyOpen) { if (flags & (libc::O_WRONLY | libc::O_RDWR | libc::O_TRUNC) as u32) != 0 { return reply.error(libc::EROFS); } @@ -422,29 +527,44 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> { /// return value of the read system call will reflect the return value of this /// operation. fh will contain the value set by the open method, or will be undefined /// if the open method didn't set any value. - fn read (&mut self, _req: &fuse::Request, ino: u64, _fh: u64, mut offset: u64, mut size: u32, reply: fuse::ReplyData) { + fn read( + &mut self, + _req: &fuse::Request, + ino: u64, + _fh: u64, + mut offset: u64, + mut size: u32, + reply: fuse::ReplyData, + ) { let inode = inode!(self, ino, reply); let inode = inode.borrow(); match inode.inode.data { None => return reply.data(&[]), - Some(FileData::Inline(ref data)) => return reply.data(&data[min(offset as usize, data.len())..min(offset as usize+size as usize, data.len())]), - _ => () + Some(FileData::Inline(ref data)) => { + return reply.data( + &data[min(offset as usize, data.len()).. + min(offset as usize + size as usize, data.len())] + ) + } + _ => (), } if let Some(ref chunks) = inode.chunks { let mut data = Vec::with_capacity(size as usize); for &(hash, len) in chunks.iter() { if len as u64 <= offset { offset -= len as u64; - continue + continue; } let chunk = match fuse_try!(self.repository.get_chunk(hash), reply) { Some(chunk) => chunk, - None => return reply.error(libc::EIO) + None => return reply.error(libc::EIO), }; assert_eq!(chunk.len() as u32, len); - data.extend_from_slice(&chunk[offset as usize..min(offset as usize + size as usize, len as usize)]); + data.extend_from_slice( + &chunk[offset as usize..min(offset as usize + size as usize, len as usize)] + ); if len - offset as u32 >= size { - break + break; } size -= len - offset as u32; offset = 0; @@ -456,12 +576,28 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> { } /// Write data - fn write (&mut self, _req: &fuse::Request, _ino: u64, _fh: u64, _offset: u64, _data: &[u8], _flags: u32, reply: fuse::ReplyWrite) { + fn write( + &mut self, + _req: &fuse::Request, + _ino: u64, + _fh: u64, + _offset: u64, + _data: &[u8], + _flags: u32, + reply: fuse::ReplyWrite, + ) { reply.error(libc::EROFS) } /// Flush method - fn flush (&mut self, _req: &fuse::Request, _ino: u64, _fh: u64, _lock_owner: u64, reply: fuse::ReplyEmpty) { + fn flush( + &mut self, + _req: &fuse::Request, + _ino: u64, + _fh: u64, + _lock_owner: u64, + reply: fuse::ReplyEmpty, + ) { reply.ok() } @@ -473,7 +609,16 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> { /// the release. fh will contain the value set by the open method, or will be undefined /// if the open method didn't set any value. flags will contain the same flags as for /// open. - fn release (&mut self, _req: &fuse::Request, _ino: u64, _fh: u64, _flags: u32, _lock_owner: u64, _flush: bool, reply: fuse::ReplyEmpty) { + fn release( + &mut self, + _req: &fuse::Request, + _ino: u64, + _fh: u64, + _flags: u32, + _lock_owner: u64, + _flush: bool, + reply: fuse::ReplyEmpty, + ) { /*if self.read_fds.remove(&fh).is_some() || self.write_fds.remove(&fh).is_some() { reply.ok(); } else { @@ -483,28 +628,42 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> { } /// Synchronize file contents - fn fsync (&mut self, _req: &fuse::Request, _ino: u64, _fh: u64, _datasync: bool, reply: fuse::ReplyEmpty) { + fn fsync( + &mut self, + _req: &fuse::Request, + _ino: u64, + _fh: u64, + _datasync: bool, + reply: fuse::ReplyEmpty, + ) { reply.ok() } /// Open a directory, finished - fn opendir (&mut self, _req: &fuse::Request, ino: u64, _flags: u32, reply: fuse::ReplyOpen) { + fn opendir(&mut self, _req: &fuse::Request, ino: u64, _flags: u32, reply: fuse::ReplyOpen) { let dir = inode!(self, ino, reply); fuse_try!(self.fetch_children(&dir), reply); reply.opened(ino, 0); } /// Read directory, finished - fn readdir (&mut self, _req: &fuse::Request, ino: u64, _fh: u64, offset: u64, mut reply: fuse::ReplyDirectory) { + fn readdir( + &mut self, + _req: &fuse::Request, + ino: u64, + _fh: u64, + offset: u64, + mut reply: fuse::ReplyDirectory, + ) { let dir = inode!(self, ino, reply); let dir = dir.borrow(); if let Some(entries) = dir.dir_list() { for (i, (num, file_type, name)) in entries.into_iter().enumerate() { if i < offset as usize { - continue + continue; } - if reply.add(num, i as u64 +1, file_type, &Path::new(&name)) { - break + if reply.add(num, i as u64 + 1, file_type, &Path::new(&name)) { + break; } } reply.ok() @@ -514,20 +673,34 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> { } /// Release an open directory, finished - fn releasedir (&mut self, _req: &fuse::Request, _ino: u64, _fh: u64, _flags: u32, reply: fuse::ReplyEmpty) { + fn releasedir( + &mut self, + _req: &fuse::Request, + _ino: u64, + _fh: u64, + _flags: u32, + reply: fuse::ReplyEmpty, + ) { reply.ok() } /// Synchronize directory contents, finished - fn fsyncdir (&mut self, _req: &fuse::Request, _ino: u64, _fh: u64, _datasync: bool, reply: fuse::ReplyEmpty) { + fn fsyncdir( + &mut self, + _req: &fuse::Request, + _ino: u64, + _fh: u64, + _datasync: bool, + reply: fuse::ReplyEmpty, + ) { reply.ok() } /// Get file system statistics - fn statfs (&mut self, _req: &fuse::Request, _ino: u64, reply: fuse::ReplyStatfs) { + fn statfs(&mut self, _req: &fuse::Request, _ino: u64, reply: fuse::ReplyStatfs) { let info = self.repository.info(); reply.statfs( - info.raw_data_size/512 as u64, //total blocks + info.raw_data_size / 512 as u64, //total blocks 0, //free blocks for admin 0, //free blocks for users 0, @@ -539,12 +712,28 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> { } /// Set an extended attribute - fn setxattr (&mut self, _req: &fuse::Request, _ino: u64, _name: &OsStr, _value: &[u8], _flags: u32, _position: u32, reply: fuse::ReplyEmpty) { + fn setxattr( + &mut self, + _req: &fuse::Request, + _ino: u64, + _name: &OsStr, + _value: &[u8], + _flags: u32, + _position: u32, + reply: fuse::ReplyEmpty, + ) { reply.error(libc::EROFS) } /// Get an extended attribute - fn getxattr (&mut self, _req: &fuse::Request, ino: u64, name: &OsStr, size: u32, reply: fuse::ReplyXattr) { + fn getxattr( + &mut self, + _req: &fuse::Request, + ino: u64, + name: &OsStr, + size: u32, + reply: fuse::ReplyXattr, + ) { let inode = inode!(self, ino, reply); let inode = inode.borrow(); if let Some(val) = inode.inode.xattrs.get(&name.to_string_lossy() as &str) { @@ -561,7 +750,7 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> { } /// List extended attribute names - fn listxattr (&mut self, _req: &fuse::Request, ino: u64, size: u32, reply: fuse::ReplyXattr) { + fn listxattr(&mut self, _req: &fuse::Request, ino: u64, size: u32, reply: fuse::ReplyXattr) { let inode = inode!(self, ino, reply); let inode = inode.borrow(); let mut names_str = String::new(); @@ -579,7 +768,13 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> { } /// Remove an extended attribute - fn removexattr (&mut self, _req: &fuse::Request, _ino: u64, _name: &OsStr, reply: fuse::ReplyEmpty) { + fn removexattr( + &mut self, + _req: &fuse::Request, + _ino: u64, + _name: &OsStr, + reply: fuse::ReplyEmpty, + ) { reply.error(libc::EROFS) } @@ -587,28 +782,65 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> { /// This will be called for the access() system call. If the 'default_permissions' /// mount option is given, this method is not called. This method is not called /// under Linux kernel versions 2.4.x - fn access (&mut self, _req: &fuse::Request, _ino: u64, _mask: u32, reply: fuse::ReplyEmpty) { + fn access(&mut self, _req: &fuse::Request, _ino: u64, _mask: u32, reply: fuse::ReplyEmpty) { reply.error(libc::ENOSYS); } /// Create and open a file - fn create (&mut self, _req: &fuse::Request, _parent: u64, _name: &OsStr, _mode: u32, _flags: u32, reply: fuse::ReplyCreate) { + fn create( + &mut self, + _req: &fuse::Request, + _parent: u64, + _name: &OsStr, + _mode: u32, + _flags: u32, + reply: fuse::ReplyCreate, + ) { reply.error(libc::EROFS) } /// Test for a POSIX file lock - fn getlk (&mut self, _req: &fuse::Request, _ino: u64, _fh: u64, _lock_owner: u64, _start: u64, _end: u64, _typ: u32, _pid: u32, reply: fuse::ReplyLock) { + fn getlk( + &mut self, + _req: &fuse::Request, + _ino: u64, + _fh: u64, + _lock_owner: u64, + _start: u64, + _end: u64, + _typ: u32, + _pid: u32, + reply: fuse::ReplyLock, + ) { reply.error(libc::ENOSYS); } /// Acquire, modify or release a POSIX file lock - fn setlk (&mut self, _req: &fuse::Request, _ino: u64, _fh: u64, _lock_owner: u64, _start: u64, _end: u64, _typ: u32, _pid: u32, _sleep: bool, reply: fuse::ReplyEmpty) { + fn setlk( + &mut self, + _req: &fuse::Request, + _ino: u64, + _fh: u64, + _lock_owner: u64, + _start: u64, + _end: u64, + _typ: u32, + _pid: u32, + _sleep: bool, + reply: fuse::ReplyEmpty, + ) { reply.error(libc::ENOSYS); } /// Map block index within file to block index within device - fn bmap (&mut self, _req: &fuse::Request, _ino: u64, _blocksize: u32, _idx: u64, reply: fuse::ReplyBmap) { + fn bmap( + &mut self, + _req: &fuse::Request, + _ino: u64, + _blocksize: u32, + _idx: u64, + reply: fuse::ReplyBmap, + ) { reply.error(libc::ENOSYS); } - } diff --git a/src/prelude.rs b/src/prelude.rs index 0637ac6..149faa5 100644 --- a/src/prelude.rs +++ b/src/prelude.rs @@ -1,9 +1,12 @@ -pub use ::util::*; -pub use ::bundledb::{BundleReader, BundleMode, BundleWriter, BundleInfo, BundleId, BundleDbError, BundleDb, BundleWriterError, StoredBundle}; -pub use ::chunker::{ChunkerType, Chunker, ChunkerStatus, ChunkerError}; -pub use ::repository::{Repository, Backup, Config, RepositoryError, RepositoryInfo, Inode, FileType, IntegrityError, BackupFileError, BackupError, BackupOptions, BundleAnalysis, FileData, DiffType, InodeError, RepositoryLayout, Location}; -pub use ::index::{Index, IndexError}; -pub use ::mount::FuseFilesystem; +pub use util::*; +pub use bundledb::{BundleReader, BundleMode, BundleWriter, BundleInfo, BundleId, BundleDbError, + BundleDb, BundleWriterError, StoredBundle}; +pub use chunker::{ChunkerType, Chunker, ChunkerStatus, ChunkerError}; +pub use repository::{Repository, Backup, Config, RepositoryError, RepositoryInfo, Inode, FileType, + IntegrityError, BackupFileError, BackupError, BackupOptions, BundleAnalysis, + FileData, DiffType, InodeError, RepositoryLayout, Location}; +pub use index::{Index, IndexError}; +pub use mount::FuseFilesystem; pub use serde::{Serialize, Deserialize}; diff --git a/src/repository/backup.rs b/src/repository/backup.rs index 1a4e27b..4715f87 100644 --- a/src/repository/backup.rs +++ b/src/repository/backup.rs @@ -1,4 +1,4 @@ -use ::prelude::*; +use prelude::*; use std::fs; use std::path::{self, Path, PathBuf}; @@ -33,17 +33,28 @@ pub struct BackupOptions { pub enum DiffType { - Add, Mod, Del + Add, + Mod, + Del } impl Repository { pub fn get_all_backups(&self) -> Result, RepositoryError> { - Ok(try!(Backup::get_all_from(&self.crypto.lock().unwrap(), self.layout.backups_path()))) + Ok(try!(Backup::get_all_from( + &self.crypto.lock().unwrap(), + self.layout.backups_path() + ))) } - pub fn get_backups>(&self, path: P) -> Result, RepositoryError> { - Ok(try!(Backup::get_all_from(&self.crypto.lock().unwrap(), self.layout.backups_path().join(path)))) + pub fn get_backups>( + &self, + path: P, + ) -> Result, RepositoryError> { + Ok(try!(Backup::get_all_from( + &self.crypto.lock().unwrap(), + self.layout.backups_path().join(path) + ))) } #[inline] @@ -52,14 +63,21 @@ impl Repository { } pub fn get_backup(&self, name: &str) -> Result { - Ok(try!(Backup::read_from(&self.crypto.lock().unwrap(), self.layout.backup_path(name)))) + Ok(try!(Backup::read_from( + &self.crypto.lock().unwrap(), + self.layout.backup_path(name) + ))) } pub fn save_backup(&mut self, backup: &Backup, name: &str) -> Result<(), RepositoryError> { try!(self.write_mode()); let path = self.layout.backup_path(name); try!(fs::create_dir_all(path.parent().unwrap())); - Ok(try!(backup.save_to(&self.crypto.lock().unwrap(), self.config.encryption.clone(), path))) + Ok(try!(backup.save_to( + &self.crypto.lock().unwrap(), + self.config.encryption.clone(), + path + ))) } pub fn delete_backup(&mut self, name: &str) -> Result<(), RepositoryError> { @@ -69,23 +87,32 @@ impl Repository { loop { path = path.parent().unwrap().to_owned(); if path == self.layout.backups_path() || fs::remove_dir(&path).is_err() { - break + break; } } Ok(()) } - pub fn prune_backups(&mut self, prefix: &str, daily: usize, weekly: usize, monthly: usize, yearly: usize, force: bool) -> Result<(), RepositoryError> { + pub fn prune_backups( + &mut self, + prefix: &str, + daily: usize, + weekly: usize, + monthly: usize, + yearly: usize, + force: bool, + ) -> Result<(), RepositoryError> { try!(self.write_mode()); let mut backups = Vec::new(); let backup_map = match self.get_all_backups() { Ok(backup_map) => backup_map, - Err(RepositoryError::BackupFile(BackupFileError::PartialBackupsList(backup_map, _failed))) => { + Err(RepositoryError::BackupFile(BackupFileError::PartialBackupsList(backup_map, + _failed))) => { warn!("Some backups could not be read, ignoring them"); backup_map - }, - Err(err) => return Err(err) + } + Err(err) => return Err(err), }; for (name, backup) in backup_map { if name.starts_with(prefix) { @@ -96,7 +123,12 @@ impl Repository { backups.sort_by_key(|backup| -backup.2.timestamp); let mut keep = Bitmap::new(backups.len()); - fn mark_needed) -> K>(backups: &[(String, DateTime, Backup)], keep: &mut Bitmap, max: usize, keyfn: F) { + fn mark_needed) -> K>( + backups: &[(String, DateTime, Backup)], + keep: &mut Bitmap, + max: usize, + keyfn: F, + ) { let mut kept = 0; let mut last = None; for (i, backup) in backups.iter().enumerate() { @@ -104,7 +136,7 @@ impl Repository { let cur = Some(val); if cur != last { if kept >= max { - break + break; } last = cur; keep.set(i); @@ -125,7 +157,12 @@ impl Repository { }); } if daily > 0 { - mark_needed(&backups, &mut keep, daily, |d| (d.year(), d.month(), d.day())); + mark_needed( + &backups, + &mut keep, + daily, + |d| (d.year(), d.month(), d.day()) + ); } let mut remove = Vec::new(); println!("Removing the following backups"); @@ -143,7 +180,12 @@ impl Repository { Ok(()) } - pub fn restore_inode_tree>(&mut self, backup: &Backup, inode: Inode, path: P) -> Result<(), RepositoryError> { + pub fn restore_inode_tree>( + &mut self, + backup: &Backup, + inode: Inode, + path: P, + ) -> Result<(), RepositoryError> { let _lock = try!(self.lock(false)); let mut queue = VecDeque::new(); queue.push_back((path.as_ref().to_owned(), inode)); @@ -164,7 +206,11 @@ impl Repository { try!(self.save_inode_at(&inode, &path)); } if inode.file_type == FileType::Directory { - let path = if is_root { path.to_path_buf() } else { path.join(inode.name) }; + let path = if is_root { + path.to_path_buf() + } else { + path.join(inode.name) + }; for chunks in inode.children.unwrap().values() { let inode = try!(self.get_inode(chunks)); queue.push_back((path.clone(), inode)); @@ -181,20 +227,26 @@ impl Repository { reference: Option<&Inode>, options: &BackupOptions, backup: &mut Backup, - failed_paths: &mut Vec + failed_paths: &mut Vec, ) -> Result { let path = path.as_ref(); let mut inode = try!(self.create_inode(path, reference)); if !backup.user_names.contains_key(&inode.user) { if let Some(user) = users::get_user_by_uid(inode.user) { - backup.user_names.insert(inode.user, user.name().to_string()); + backup.user_names.insert( + inode.user, + user.name().to_string() + ); } else { warn!("Failed to retrieve name of user {}", inode.user); } } if !backup.group_names.contains_key(&inode.group) { if let Some(group) = users::get_group_by_gid(inode.group) { - backup.group_names.insert(inode.group, group.name().to_string()); + backup.group_names.insert( + inode.group, + group.name().to_string() + ); } else { warn!("Failed to retrieve name of group {}", inode.group); } @@ -211,28 +263,37 @@ impl Repository { if options.same_device { let child_dev = try!(child.metadata()).st_dev(); if child_dev != parent_dev { - continue + continue; } } if let Some(ref excludes) = options.excludes { let child_path_str = child_path.to_string_lossy(); if excludes.is_match(&child_path_str) { - continue + continue; } } let name = child.file_name().to_string_lossy().to_string(); - let ref_child = reference.as_ref() + let ref_child = reference + .as_ref() .and_then(|inode| inode.children.as_ref()) .and_then(|map| map.get(&name)) .and_then(|chunks| self.get_inode(chunks).ok()); - let child_inode = match self.create_backup_recurse(&child_path, ref_child.as_ref(), options, backup, failed_paths) { + let child_inode = match self.create_backup_recurse( + &child_path, + ref_child.as_ref(), + options, + backup, + failed_paths + ) { Ok(inode) => inode, - Err(RepositoryError::Inode(_)) | Err(RepositoryError::Chunker(_)) | Err(RepositoryError::Io(_)) => { + Err(RepositoryError::Inode(_)) | + Err(RepositoryError::Chunker(_)) | + Err(RepositoryError::Io(_)) => { info!("Failed to backup {:?}", child_path); failed_paths.push(child_path); - continue - }, - Err(err) => return Err(err) + continue; + } + Err(err) => return Err(err), }; let chunks = try!(self.put_inode(&child_inode)); inode.cum_size += child_inode.cum_size; @@ -263,11 +324,16 @@ impl Repository { Ok(inode) } - pub fn create_backup_recursively>(&mut self, path: P, reference: Option<&Backup>, options: &BackupOptions) -> Result { + pub fn create_backup_recursively>( + &mut self, + path: P, + reference: Option<&Backup>, + options: &BackupOptions, + ) -> Result { try!(self.write_mode()); let _lock = try!(self.lock(false)); if self.dirty { - return Err(RepositoryError::Dirty) + return Err(RepositoryError::Dirty); } try!(self.set_dirty()); let reference_inode = reference.and_then(|b| self.get_inode(&b.root).ok()); @@ -278,7 +344,13 @@ impl Repository { let info_before = self.info(); let start = Local::now(); let mut failed_paths = vec![]; - let root_inode = try!(self.create_backup_recurse(path, reference_inode.as_ref(), options, &mut backup, &mut failed_paths)); + let root_inode = try!(self.create_backup_recurse( + path, + reference_inode.as_ref(), + options, + &mut backup, + &mut failed_paths + )); backup.root = try!(self.put_inode(&root_inode)); try!(self.flush()); let elapsed = Local::now().signed_duration_since(start); @@ -304,20 +376,29 @@ impl Repository { } } - pub fn remove_backup_path>(&mut self, backup: &mut Backup, path: P) -> Result<(), RepositoryError> { + pub fn remove_backup_path>( + &mut self, + backup: &mut Backup, + path: P, + ) -> Result<(), RepositoryError> { try!(self.write_mode()); let _lock = try!(self.lock(false)); let mut inodes = try!(self.get_backup_path(backup, path)); let to_remove = inodes.pop().unwrap(); let mut remove_from = match inodes.pop() { Some(inode) => inode, - None => return Err(BackupError::RemoveRoot.into()) + None => return Err(BackupError::RemoveRoot.into()), }; - remove_from.children.as_mut().unwrap().remove(&to_remove.name); + remove_from.children.as_mut().unwrap().remove( + &to_remove.name + ); let mut last_inode_chunks = try!(self.put_inode(&remove_from)); let mut last_inode_name = remove_from.name; while let Some(mut inode) = inodes.pop() { - inode.children.as_mut().unwrap().insert(last_inode_name, last_inode_chunks); + inode.children.as_mut().unwrap().insert( + last_inode_name, + last_inode_chunks + ); last_inode_chunks = try!(self.put_inode(&inode)); last_inode_name = inode.name; } @@ -326,20 +407,32 @@ impl Repository { Ok(()) } - pub fn get_backup_path>(&mut self, backup: &Backup, path: P) -> Result, RepositoryError> { + pub fn get_backup_path>( + &mut self, + backup: &Backup, + path: P, + ) -> Result, RepositoryError> { let mut inodes = vec![]; let mut inode = try!(self.get_inode(&backup.root)); for c in path.as_ref().components() { if let path::Component::Normal(name) = c { let name = name.to_string_lossy(); - if inodes.is_empty() && inode.file_type != FileType::Directory && inode.name == name { + if inodes.is_empty() && inode.file_type != FileType::Directory && + inode.name == name + { return Ok(vec![inode]); } - if let Some(chunks) = inode.children.as_mut().and_then(|c| c.remove(&name as &str)) { + if let Some(chunks) = inode.children.as_mut().and_then( + |c| c.remove(&name as &str) + ) + { inodes.push(inode); inode = try!(self.get_inode(&chunks)); } else { - return Err(RepositoryError::NoSuchFileInBackup(backup.clone(), path.as_ref().to_owned())); + return Err(RepositoryError::NoSuchFileInBackup( + backup.clone(), + path.as_ref().to_owned() + )); } } } @@ -348,20 +441,32 @@ impl Repository { } #[inline] - pub fn get_backup_inode>(&mut self, backup: &Backup, path: P) -> Result { - self.get_backup_path(backup, path).map(|mut inodes| inodes.pop().unwrap()) + pub fn get_backup_inode>( + &mut self, + backup: &Backup, + path: P, + ) -> Result { + self.get_backup_path(backup, path).map(|mut inodes| { + inodes.pop().unwrap() + }) } - pub fn find_versions>(&mut self, path: P) -> Result, RepositoryError> { + pub fn find_versions>( + &mut self, + path: P, + ) -> Result, RepositoryError> { let path = path.as_ref(); let mut versions = HashMap::new(); for (name, backup) in try!(self.get_all_backups()) { match self.get_backup_inode(&backup, path) { Ok(inode) => { - versions.insert((inode.file_type, inode.timestamp, inode.size), (name, inode)); - }, + versions.insert( + (inode.file_type, inode.timestamp, inode.size), + (name, inode) + ); + } Err(RepositoryError::NoSuchFileInBackup(..)) => continue, - Err(err) => return Err(err) + Err(err) => return Err(err), } } let mut versions: Vec<_> = versions.into_iter().map(|(_, v)| v).collect(); @@ -369,7 +474,13 @@ impl Repository { Ok(versions) } - fn find_differences_recurse(&mut self, inode1: &Inode, inode2: &Inode, path: PathBuf, diffs: &mut Vec<(DiffType, PathBuf)>) -> Result<(), RepositoryError> { + fn find_differences_recurse( + &mut self, + inode1: &Inode, + inode2: &Inode, + path: PathBuf, + diffs: &mut Vec<(DiffType, PathBuf)>, + ) -> Result<(), RepositoryError> { if !inode1.is_same_meta(inode2) || inode1.data != inode2.data { diffs.push((DiffType::Mod, path.clone())); } @@ -393,7 +504,12 @@ impl Repository { if chunks1 != chunks2 { let inode1 = try!(self.get_inode(chunks1)); let inode2 = try!(self.get_inode(chunks2)); - try!(self.find_differences_recurse(&inode1, &inode2, path.join(name), diffs)); + try!(self.find_differences_recurse( + &inode1, + &inode2, + path.join(name), + diffs + )); } } else { diffs.push((DiffType::Add, path.join(name))); @@ -409,10 +525,19 @@ impl Repository { } #[inline] - pub fn find_differences(&mut self, inode1: &Inode, inode2: &Inode) -> Result, RepositoryError> { + pub fn find_differences( + &mut self, + inode1: &Inode, + inode2: &Inode, + ) -> Result, RepositoryError> { let mut diffs = vec![]; let path = PathBuf::from("/"); - try!(self.find_differences_recurse(inode1, inode2, path, &mut diffs)); + try!(self.find_differences_recurse( + inode1, + inode2, + path, + &mut diffs + )); Ok(diffs) } } diff --git a/src/repository/backup_file.rs b/src/repository/backup_file.rs index 3212554..fce7fdf 100644 --- a/src/repository/backup_file.rs +++ b/src/repository/backup_file.rs @@ -1,4 +1,4 @@ -use ::prelude::*; +use prelude::*; use std::io::{self, BufReader, BufWriter, Read, Write}; use std::fs::{self, File}; @@ -116,41 +116,66 @@ serde_impl!(Backup(u8?) { impl Backup { pub fn read_from>(crypto: &Crypto, path: P) -> Result { let path = path.as_ref(); - let mut file = BufReader::new(try!(File::open(path).map_err(|err| BackupFileError::Read(err, path.to_path_buf())))); + let mut file = BufReader::new(try!(File::open(path).map_err(|err| { + BackupFileError::Read(err, path.to_path_buf()) + }))); let mut header = [0u8; 8]; - try!(file.read_exact(&mut header).map_err(|err| BackupFileError::Read(err, path.to_path_buf()))); + try!(file.read_exact(&mut header).map_err(|err| { + BackupFileError::Read(err, path.to_path_buf()) + })); if header[..HEADER_STRING.len()] != HEADER_STRING { - return Err(BackupFileError::WrongHeader(path.to_path_buf())) + return Err(BackupFileError::WrongHeader(path.to_path_buf())); } let version = header[HEADER_STRING.len()]; if version != HEADER_VERSION { - return Err(BackupFileError::UnsupportedVersion(path.to_path_buf(), version)) + return Err(BackupFileError::UnsupportedVersion( + path.to_path_buf(), + version + )); } let header: BackupHeader = try!(msgpack::decode_from_stream(&mut file).context(path)); let mut data = Vec::new(); - try!(file.read_to_end(&mut data).map_err(|err| BackupFileError::Read(err, path.to_path_buf()))); + try!(file.read_to_end(&mut data).map_err(|err| { + BackupFileError::Read(err, path.to_path_buf()) + })); if let Some(ref encryption) = header.encryption { data = try!(crypto.decrypt(encryption, &data)); } Ok(try!(msgpack::decode(&data).context(path))) } - pub fn save_to>(&self, crypto: &Crypto, encryption: Option, path: P) -> Result<(), BackupFileError> { + pub fn save_to>( + &self, + crypto: &Crypto, + encryption: Option, + path: P, + ) -> Result<(), BackupFileError> { let path = path.as_ref(); let mut data = try!(msgpack::encode(self).context(path)); if let Some(ref encryption) = encryption { data = try!(crypto.encrypt(encryption, &data)); } - let mut file = BufWriter::new(try!(File::create(path).map_err(|err| BackupFileError::Write(err, path.to_path_buf())))); - try!(file.write_all(&HEADER_STRING).map_err(|err| BackupFileError::Write(err, path.to_path_buf()))); - try!(file.write_all(&[HEADER_VERSION]).map_err(|err| BackupFileError::Write(err, path.to_path_buf()))); + let mut file = BufWriter::new(try!(File::create(path).map_err(|err| { + BackupFileError::Write(err, path.to_path_buf()) + }))); + try!(file.write_all(&HEADER_STRING).map_err(|err| { + BackupFileError::Write(err, path.to_path_buf()) + })); + try!(file.write_all(&[HEADER_VERSION]).map_err(|err| { + BackupFileError::Write(err, path.to_path_buf()) + })); let header = BackupHeader { encryption: encryption }; try!(msgpack::encode_to_stream(&header, &mut file).context(path)); - try!(file.write_all(&data).map_err(|err| BackupFileError::Write(err, path.to_path_buf()))); + try!(file.write_all(&data).map_err(|err| { + BackupFileError::Write(err, path.to_path_buf()) + })); Ok(()) } - pub fn get_all_from>(crypto: &Crypto, path: P) -> Result, BackupFileError> { + pub fn get_all_from>( + crypto: &Crypto, + path: P, + ) -> Result, BackupFileError> { let mut backups = HashMap::new(); let base_path = path.as_ref(); let path = path.as_ref(); @@ -161,7 +186,10 @@ impl Backup { let mut paths = vec![path.to_path_buf()]; let mut failed_paths = vec![]; while let Some(path) = paths.pop() { - for entry in try!(fs::read_dir(&path).map_err(|e| BackupFileError::Read(e, path.clone()))) { + for entry in try!(fs::read_dir(&path).map_err(|e| { + BackupFileError::Read(e, path.clone()) + })) + { let entry = try!(entry.map_err(|e| BackupFileError::Read(e, path.clone()))); let path = entry.path(); if path.is_dir() { @@ -169,9 +197,12 @@ impl Backup { } else { let relpath = path.strip_prefix(&base_path).unwrap(); if relpath.extension() != Some("backup".as_ref()) { - continue + continue; } - let name = relpath.with_file_name(relpath.file_stem().unwrap()).to_string_lossy().to_string(); + let name = relpath + .with_file_name(relpath.file_stem().unwrap()) + .to_string_lossy() + .to_string(); if let Ok(backup) = Backup::read_from(crypto, &path) { backups.insert(name, backup); } else { diff --git a/src/repository/basic_io.rs b/src/repository/basic_io.rs index 7f0ec30..16e112f 100644 --- a/src/repository/basic_io.rs +++ b/src/repository/basic_io.rs @@ -1,4 +1,4 @@ -use ::prelude::*; +use prelude::*; use std::mem; use std::cmp::min; @@ -29,22 +29,27 @@ impl<'a> Read for ChunkReader<'a> { let mut bpos = 0; loop { if buf.len() == bpos { - break + break; } if self.data.len() == self.pos { if let Some(chunk) = self.chunks.pop_front() { self.data = match self.repo.get_chunk(chunk.0) { Ok(Some(data)) => data, - Ok(None) => return Err(io::Error::new(io::ErrorKind::Other, IntegrityError::MissingChunk(chunk.0))), - Err(err) => return Err(io::Error::new(io::ErrorKind::Other, err)) + Ok(None) => { + return Err(io::Error::new( + io::ErrorKind::Other, + IntegrityError::MissingChunk(chunk.0) + )) + } + Err(err) => return Err(io::Error::new(io::ErrorKind::Other, err)), }; self.pos = 0; } else { - break + break; } } - let l = min(self.data.len()-self.pos, buf.len() - bpos); - buf[bpos..bpos+l].copy_from_slice(&self.data[self.pos..self.pos+l]); + let l = min(self.data.len() - self.pos, buf.len() - bpos); + buf[bpos..bpos + l].copy_from_slice(&self.data[self.pos..self.pos + l]); bpos += l; self.pos += l; } @@ -56,7 +61,9 @@ impl<'a> Read for ChunkReader<'a> { impl Repository { #[inline] pub fn get_bundle_id(&self, id: u32) -> Result { - self.bundle_map.get(id).ok_or_else(|| IntegrityError::MissingBundleId(id).into()) + self.bundle_map.get(id).ok_or_else(|| { + IntegrityError::MissingBundleId(id).into() + }) } pub fn get_chunk(&mut self, hash: Hash) -> Result>, RepositoryError> { @@ -64,27 +71,39 @@ impl Repository { let found = if let Some(found) = self.index.get(&hash) { found } else { - return Ok(None) + return Ok(None); }; // Lookup bundle id from map let bundle_id = try!(self.get_bundle_id(found.bundle)); // Get chunk from bundle - Ok(Some(try!(self.bundles.get_chunk(&bundle_id, found.chunk as usize)))) + Ok(Some(try!( + self.bundles.get_chunk(&bundle_id, found.chunk as usize) + ))) } #[inline] - pub fn put_chunk(&mut self, mode: BundleMode, hash: Hash, data: &[u8]) -> Result<(), RepositoryError> { + pub fn put_chunk( + &mut self, + mode: BundleMode, + hash: Hash, + data: &[u8], + ) -> Result<(), RepositoryError> { // If this chunk is in the index, ignore it if self.index.contains(&hash) { - return Ok(()) + return Ok(()); } self.put_chunk_override(mode, hash, data) } - fn write_chunk_to_bundle_and_index(&mut self, mode: BundleMode, hash: Hash, data: &[u8]) -> Result<(), RepositoryError> { + fn write_chunk_to_bundle_and_index( + &mut self, + mode: BundleMode, + hash: Hash, + data: &[u8], + ) -> Result<(), RepositoryError> { let writer = match mode { BundleMode::Data => &mut self.data_bundle, - BundleMode::Meta => &mut self.meta_bundle + BundleMode::Meta => &mut self.meta_bundle, }; // ...alocate one if needed if writer.is_none() { @@ -101,10 +120,13 @@ impl Repository { let chunk_id = try!(writer_obj.add(data, hash)); let bundle_id = match mode { BundleMode::Data => self.next_data_bundle, - BundleMode::Meta => self.next_meta_bundle + BundleMode::Meta => self.next_meta_bundle, }; // Add location to the index - try!(self.index.set(&hash, &Location::new(bundle_id, chunk_id as u32))); + try!(self.index.set( + &hash, + &Location::new(bundle_id, chunk_id as u32) + )); Ok(()) } @@ -113,14 +135,14 @@ impl Repository { let next_free_bundle_id = self.next_free_bundle_id(); let writer = match mode { BundleMode::Data => &mut self.data_bundle, - BundleMode::Meta => &mut self.meta_bundle + BundleMode::Meta => &mut self.meta_bundle, }; if writer.is_none() { - return Ok(()) + return Ok(()); } let bundle_id = match mode { BundleMode::Data => self.next_data_bundle, - BundleMode::Meta => self.next_meta_bundle + BundleMode::Meta => self.next_meta_bundle, }; let mut finished = None; mem::swap(writer, &mut finished); @@ -139,12 +161,12 @@ impl Repository { let (size, raw_size) = { let writer = match mode { BundleMode::Data => &mut self.data_bundle, - BundleMode::Meta => &mut self.meta_bundle + BundleMode::Meta => &mut self.meta_bundle, }; if let Some(ref writer) = *writer { (writer.estimate_final_size(), writer.raw_size()) } else { - return Ok(()) + return Ok(()); } }; if size >= self.config.bundle_size || raw_size >= 4 * self.config.bundle_size { @@ -158,18 +180,31 @@ impl Repository { } #[inline] - pub fn put_chunk_override(&mut self, mode: BundleMode, hash: Hash, data: &[u8]) -> Result<(), RepositoryError> { + pub fn put_chunk_override( + &mut self, + mode: BundleMode, + hash: Hash, + data: &[u8], + ) -> Result<(), RepositoryError> { try!(self.write_chunk_to_bundle_and_index(mode, hash, data)); self.finish_bundle_if_needed(mode) } #[inline] - pub fn put_data(&mut self, mode: BundleMode, data: &[u8]) -> Result { + pub fn put_data( + &mut self, + mode: BundleMode, + data: &[u8], + ) -> Result { let mut input = Cursor::new(data); self.put_stream(mode, &mut input) } - pub fn put_stream(&mut self, mode: BundleMode, data: &mut R) -> Result { + pub fn put_stream( + &mut self, + mode: BundleMode, + data: &mut R, + ) -> Result { let avg_size = self.config.chunker.avg_size(); let mut chunks = Vec::new(); let mut chunk = Vec::with_capacity(avg_size * 2); @@ -182,14 +217,15 @@ impl Repository { try!(self.put_chunk(mode, hash, &chunk)); chunks.push((hash, chunk.len() as u32)); if res == ChunkerStatus::Finished { - break + break; } } Ok(chunks.into()) } pub fn get_data(&mut self, chunks: &[Chunk]) -> Result, RepositoryError> { - let mut data = Vec::with_capacity(chunks.iter().map(|&(_, size)| size).sum::() as usize); + let mut data = + Vec::with_capacity(chunks.iter().map(|&(_, size)| size).sum::() as usize); try!(self.get_stream(chunks, &mut data)); Ok(data) } @@ -199,9 +235,15 @@ impl Repository { ChunkReader::new(self, chunks) } - pub fn get_stream(&mut self, chunks: &[Chunk], w: &mut W) -> Result<(), RepositoryError> { + pub fn get_stream( + &mut self, + chunks: &[Chunk], + w: &mut W, + ) -> Result<(), RepositoryError> { for &(ref hash, len) in chunks { - let data = try!(try!(self.get_chunk(*hash)).ok_or_else(|| IntegrityError::MissingChunk(*hash))); + let data = try!(try!(self.get_chunk(*hash)).ok_or_else(|| { + IntegrityError::MissingChunk(*hash) + })); debug_assert_eq!(data.len() as u32, len); try!(w.write_all(&data)); } diff --git a/src/repository/bundle_map.rs b/src/repository/bundle_map.rs index fa2c9e0..c6a2553 100644 --- a/src/repository/bundle_map.rs +++ b/src/repository/bundle_map.rs @@ -1,4 +1,4 @@ -use ::prelude::*; +use prelude::*; use std::collections::HashMap; use std::path::Path; @@ -51,11 +51,11 @@ impl BundleMap { let mut header = [0u8; 8]; try!(file.read_exact(&mut header)); if header[..HEADER_STRING.len()] != HEADER_STRING { - return Err(BundleMapError::WrongHeader) + return Err(BundleMapError::WrongHeader); } let version = header[HEADER_STRING.len()]; if version != HEADER_VERSION { - return Err(BundleMapError::WrongVersion(version)) + return Err(BundleMapError::WrongVersion(version)); } Ok(BundleMap(try!(msgpack::decode_from_stream(&mut file)))) } @@ -80,7 +80,7 @@ impl BundleMap { pub fn find(&self, bundle: &BundleId) -> Option { for (id, bundle_id) in &self.0 { if bundle == bundle_id { - return Some(*id) + return Some(*id); } } None @@ -92,7 +92,10 @@ impl BundleMap { } pub fn bundles(&self) -> Vec<(u32, BundleId)> { - self.0.iter().map(|(id, bundle)| (*id, bundle.clone())).collect() + self.0 + .iter() + .map(|(id, bundle)| (*id, bundle.clone())) + .collect() } #[inline] diff --git a/src/repository/config.rs b/src/repository/config.rs index 3e62567..c31e246 100644 --- a/src/repository/config.rs +++ b/src/repository/config.rs @@ -1,4 +1,4 @@ -use ::prelude::*; +use prelude::*; use serde_yaml; @@ -49,7 +49,7 @@ impl Default for ChunkerYaml { fn default() -> Self { ChunkerYaml { method: "fastcdc".to_string(), - avg_size: 16*1024, + avg_size: 16 * 1024, seed: 0 } } @@ -126,14 +126,14 @@ struct ConfigYaml { encryption: Option, bundle_size: usize, chunker: ChunkerYaml, - hash: String, + hash: String } impl Default for ConfigYaml { fn default() -> Self { ConfigYaml { compression: Some("brotli/5".to_string()), encryption: None, - bundle_size: 25*1024*1024, + bundle_size: 25 * 1024 * 1024, chunker: ChunkerYaml::default(), hash: "blake2".to_string() } @@ -162,7 +162,7 @@ impl Default for Config { Config { compression: Some(Compression::from_string("brotli/3").unwrap()), encryption: None, - bundle_size: 25*1024*1024, + bundle_size: 25 * 1024 * 1024, chunker: ChunkerType::from_string("fastcdc/16").unwrap(), hash: HashMethod::Blake2 } @@ -185,12 +185,14 @@ impl Config { }; let encryption = if let Some(e) = yaml.encryption { let method = try!(EncryptionMethod::from_yaml(e.method)); - let key = try!(parse_hex(&e.key).map_err(|_| ConfigError::Parse("Invalid public key"))); + let key = try!(parse_hex(&e.key).map_err(|_| { + ConfigError::Parse("Invalid public key") + })); Some((method, key.into())) } else { None }; - Ok(Config{ + Ok(Config { compression: compression, encryption: encryption, bundle_size: yaml.bundle_size, @@ -202,7 +204,12 @@ impl Config { fn to_yaml(&self) -> ConfigYaml { ConfigYaml { compression: self.compression.as_ref().map(|c| c.to_yaml()), - encryption: self.encryption.as_ref().map(|e| EncryptionYaml{method: e.0.to_yaml(), key: to_hex(&e.1[..])}), + encryption: self.encryption.as_ref().map(|e| { + EncryptionYaml { + method: e.0.to_yaml(), + key: to_hex(&e.1[..]) + } + }), bundle_size: self.bundle_size, chunker: self.chunker.to_yaml(), hash: self.hash.to_yaml() diff --git a/src/repository/error.rs b/src/repository/error.rs index 6a3e7d7..9bab880 100644 --- a/src/repository/error.rs +++ b/src/repository/error.rs @@ -1,4 +1,4 @@ -use ::prelude::*; +use prelude::*; use std::io; use std::path::PathBuf; diff --git a/src/repository/info.rs b/src/repository/info.rs index 8001456..42321ae 100644 --- a/src/repository/info.rs +++ b/src/repository/info.rs @@ -1,4 +1,4 @@ -use ::prelude::*; +use prelude::*; use std::collections::{HashMap, VecDeque}; @@ -40,7 +40,11 @@ pub struct RepositoryInfo { impl Repository { - fn mark_used(&self, bundles: &mut HashMap, chunks: &[Chunk]) -> Result { + fn mark_used( + &self, + bundles: &mut HashMap, + chunks: &[Chunk], + ) -> Result { let mut new = false; for &(hash, len) in chunks { if let Some(pos) = self.index.get(&hash) { @@ -62,17 +66,22 @@ impl Repository { pub fn analyze_usage(&mut self) -> Result, RepositoryError> { if self.dirty { - return Err(RepositoryError::Dirty) + return Err(RepositoryError::Dirty); } try!(self.set_dirty()); let mut usage = HashMap::new(); for (id, bundle) in self.bundle_map.bundles() { - let bundle = try!(self.bundles.get_bundle_info(&bundle).ok_or_else(|| IntegrityError::MissingBundle(bundle))); - usage.insert(id, BundleAnalysis { - chunk_usage: Bitmap::new(bundle.info.chunk_count), - info: bundle.info.clone(), - used_raw_size: 0 - }); + let bundle = try!(self.bundles.get_bundle_info(&bundle).ok_or_else(|| { + IntegrityError::MissingBundle(bundle) + })); + usage.insert( + id, + BundleAnalysis { + chunk_usage: Bitmap::new(bundle.info.chunk_count), + info: bundle.info.clone(), + used_raw_size: 0 + } + ); } let backups = try!(self.get_all_backups()); let mut todo = VecDeque::new(); @@ -81,15 +90,16 @@ impl Repository { } while let Some(chunks) = todo.pop_back() { if !try!(self.mark_used(&mut usage, &chunks)) { - continue + continue; } let inode = try!(self.get_inode(&chunks)); // Mark the content chunks as used match inode.data { - None | Some(FileData::Inline(_)) => (), + None | + Some(FileData::Inline(_)) => (), Some(FileData::ChunkedDirect(chunks)) => { try!(self.mark_used(&mut usage, &chunks)); - }, + } Some(FileData::ChunkedIndirect(chunks)) => { if try!(self.mark_used(&mut usage, &chunks)) { let chunk_data = try!(self.get_data(&chunks)); diff --git a/src/repository/integrity.rs b/src/repository/integrity.rs index 8a5d563..b0ddb83 100644 --- a/src/repository/integrity.rs +++ b/src/repository/integrity.rs @@ -1,4 +1,4 @@ -use ::prelude::*; +use prelude::*; use super::*; @@ -51,7 +51,7 @@ impl Repository { let mut progress = ProgressBar::new(self.index.len() as u64); progress.message("checking index: "); progress.set_max_refresh_rate(Some(Duration::from_millis(100))); - for (count,(_hash, location)) in self.index.iter().enumerate() { + for (count, (_hash, location)) in self.index.iter().enumerate() { // Lookup bundle id from map let bundle_id = try!(self.get_bundle_id(location.bundle)); // Get bundle object from bundledb @@ -59,12 +59,14 @@ impl Repository { bundle } else { progress.finish_print("checking index: done."); - return Err(IntegrityError::MissingBundle(bundle_id.clone()).into()) + return Err(IntegrityError::MissingBundle(bundle_id.clone()).into()); }; // Get chunk from bundle if bundle.info.chunk_count <= location.chunk as usize { progress.finish_print("checking index: done."); - return Err(IntegrityError::NoSuchChunk(bundle_id.clone(), location.chunk).into()) + return Err( + IntegrityError::NoSuchChunk(bundle_id.clone(), location.chunk).into() + ); } if count % 1000 == 0 { progress.set(count as u64); @@ -74,7 +76,12 @@ impl Repository { Ok(()) } - fn check_chunks(&self, checked: &mut Bitmap, chunks: &[Chunk], mark: bool) -> Result { + fn check_chunks( + &self, + checked: &mut Bitmap, + chunks: &[Chunk], + mark: bool, + ) -> Result { let mut new = false; for &(hash, _len) in chunks { if let Some(pos) = self.index.pos(&hash) { @@ -83,18 +90,23 @@ impl Repository { checked.set(pos); } } else { - return Err(IntegrityError::MissingChunk(hash).into()) + return Err(IntegrityError::MissingChunk(hash).into()); } } Ok(new) } - fn check_inode_contents(&mut self, inode: &Inode, checked: &mut Bitmap) -> Result<(), RepositoryError> { + fn check_inode_contents( + &mut self, + inode: &Inode, + checked: &mut Bitmap, + ) -> Result<(), RepositoryError> { match inode.data { - None | Some(FileData::Inline(_)) => (), + None | + Some(FileData::Inline(_)) => (), Some(FileData::ChunkedDirect(ref chunks)) => { try!(self.check_chunks(checked, chunks, true)); - }, + } Some(FileData::ChunkedIndirect(ref chunks)) => { if try!(self.check_chunks(checked, chunks, true)) { let chunk_data = try!(self.get_data(chunks)); @@ -106,24 +118,34 @@ impl Repository { Ok(()) } - fn check_subtree(&mut self, path: PathBuf, chunks: &[Chunk], checked: &mut Bitmap, repair: bool) -> Result, RepositoryError> { + fn check_subtree( + &mut self, + path: PathBuf, + chunks: &[Chunk], + checked: &mut Bitmap, + repair: bool, + ) -> Result, RepositoryError> { let mut modified = false; match self.check_chunks(checked, chunks, false) { Ok(false) => return Ok(None), Ok(true) => (), - Err(err) => return Err(IntegrityError::BrokenInode(path, Box::new(err)).into()) + Err(err) => return Err(IntegrityError::BrokenInode(path, Box::new(err)).into()), } let mut inode = try!(self.get_inode(chunks)); // Mark the content chunks as used if let Err(err) = self.check_inode_contents(&inode, checked) { if repair { - warn!("Problem detected: data of {:?} is corrupt\n\tcaused by: {}", path, err); + warn!( + "Problem detected: data of {:?} is corrupt\n\tcaused by: {}", + path, + err + ); info!("Removing inode data"); inode.data = Some(FileData::Inline(vec![].into())); inode.size = 0; modified = true; } else { - return Err(IntegrityError::MissingInodeData(path, Box::new(err)).into()) + return Err(IntegrityError::MissingInodeData(path, Box::new(err)).into()); } } // Put children in todo @@ -135,14 +157,20 @@ impl Repository { Ok(Some(c)) => { *chunks = c; modified = true; - }, - Err(err) => if repair { - warn!("Problem detected: inode {:?} is corrupt\n\tcaused by: {}", path.join(name), err); - info!("Removing broken inode from backup"); - removed.push(name.to_string()); - modified = true; - } else { - return Err(err) + } + Err(err) => { + if repair { + warn!( + "Problem detected: inode {:?} is corrupt\n\tcaused by: {}", + path.join(name), + err + ); + info!("Removing broken inode from backup"); + removed.push(name.to_string()); + modified = true; + } else { + return Err(err); + } } } } @@ -159,7 +187,10 @@ impl Repository { } fn evacuate_broken_backup(&self, name: &str) -> Result<(), RepositoryError> { - warn!("The backup {} was corrupted and needed to be modified.", name); + warn!( + "The backup {} was corrupted and needed to be modified.", + name + ); let src = self.layout.backup_path(name); let mut dst = src.with_extension("backup.broken"); let mut num = 1; @@ -176,7 +207,12 @@ impl Repository { } #[inline] - pub fn check_backup(&mut self, name: &str, backup: &mut Backup, repair: bool) -> Result<(), RepositoryError> { + pub fn check_backup( + &mut self, + name: &str, + backup: &mut Backup, + repair: bool, + ) -> Result<(), RepositoryError> { let _lock = if repair { try!(self.write_mode()); Some(self.lock(false)) @@ -185,7 +221,12 @@ impl Repository { }; info!("Checking backup..."); let mut checked = Bitmap::new(self.index.capacity()); - match self.check_subtree(Path::new("").to_path_buf(), &backup.root, &mut checked, repair) { + match self.check_subtree( + Path::new("").to_path_buf(), + &backup.root, + &mut checked, + repair + ) { Ok(None) => (), Ok(Some(chunks)) => { try!(self.flush()); @@ -193,18 +234,30 @@ impl Repository { backup.modified = true; try!(self.evacuate_broken_backup(name)); try!(self.save_backup(backup, name)); - }, - Err(err) => if repair { - warn!("The root of the backup {} has been corrupted\n\tcaused by: {}", name, err); - try!(self.evacuate_broken_backup(name)); - } else { - return Err(err) + } + Err(err) => { + if repair { + warn!( + "The root of the backup {} has been corrupted\n\tcaused by: {}", + name, + err + ); + try!(self.evacuate_broken_backup(name)); + } else { + return Err(err); + } } } Ok(()) } - pub fn check_backup_inode(&mut self, name: &str, backup: &mut Backup, path: &Path, repair: bool) -> Result<(), RepositoryError> { + pub fn check_backup_inode( + &mut self, + name: &str, + backup: &mut Backup, + path: &Path, + repair: bool, + ) -> Result<(), RepositoryError> { let _lock = if repair { try!(self.write_mode()); Some(self.lock(false)) @@ -218,13 +271,19 @@ impl Repository { let mut modified = false; if let Err(err) = self.check_inode_contents(&inode, &mut checked) { if repair { - warn!("Problem detected: data of {:?} is corrupt\n\tcaused by: {}", path, err); + warn!( + "Problem detected: data of {:?} is corrupt\n\tcaused by: {}", + path, + err + ); info!("Removing inode data"); inode.data = Some(FileData::Inline(vec![].into())); inode.size = 0; modified = true; } else { - return Err(IntegrityError::MissingInodeData(path.to_path_buf(), Box::new(err)).into()) + return Err( + IntegrityError::MissingInodeData(path.to_path_buf(), Box::new(err)).into() + ); } } if let Some(ref mut children) = inode.children { @@ -235,14 +294,20 @@ impl Repository { Ok(Some(c)) => { *chunks = c; modified = true; - }, - Err(err) => if repair { - warn!("Problem detected: inode {:?} is corrupt\n\tcaused by: {}", path.join(name), err); - info!("Removing broken inode from backup"); - removed.push(name.to_string()); - modified = true; - } else { - return Err(err) + } + Err(err) => { + if repair { + warn!( + "Problem detected: inode {:?} is corrupt\n\tcaused by: {}", + path.join(name), + err + ); + info!("Removing broken inode from backup"); + removed.push(name.to_string()); + modified = true; + } else { + return Err(err); + } } } } @@ -277,15 +342,23 @@ impl Repository { let mut checked = Bitmap::new(self.index.capacity()); let backup_map = match self.get_all_backups() { Ok(backup_map) => backup_map, - Err(RepositoryError::BackupFile(BackupFileError::PartialBackupsList(backup_map, _failed))) => { + Err(RepositoryError::BackupFile(BackupFileError::PartialBackupsList(backup_map, + _failed))) => { warn!("Some backups could not be read, ignoring them"); backup_map - }, - Err(err) => return Err(err) + } + Err(err) => return Err(err), }; - for (name, mut backup) in ProgressIter::new("checking backups", backup_map.len(), backup_map.into_iter()) { + for (name, mut backup) in + ProgressIter::new("checking backups", backup_map.len(), backup_map.into_iter()) + { let path = format!("{}::", name); - match self.check_subtree(Path::new(&path).to_path_buf(), &backup.root, &mut checked, repair) { + match self.check_subtree( + Path::new(&path).to_path_buf(), + &backup.root, + &mut checked, + repair + ) { Ok(None) => (), Ok(Some(chunks)) => { try!(self.flush()); @@ -293,12 +366,18 @@ impl Repository { backup.modified = true; try!(self.evacuate_broken_backup(&name)); try!(self.save_backup(&backup, &name)); - }, - Err(err) => if repair { - warn!("The root of the backup {} has been corrupted\n\tcaused by: {}", name, err); - try!(self.evacuate_broken_backup(&name)); - } else { - return Err(err) + } + Err(err) => { + if repair { + warn!( + "The root of the backup {} has been corrupted\n\tcaused by: {}", + name, + err + ); + try!(self.evacuate_broken_backup(&name)); + } else { + return Err(err); + } } } } @@ -311,10 +390,13 @@ impl Repository { for (_id, bundle_id) in self.bundle_map.bundles() { if self.bundles.get_bundle_info(&bundle_id).is_none() { if repair { - warn!("Problem detected: bundle map contains unknown bundle {}", bundle_id); + warn!( + "Problem detected: bundle map contains unknown bundle {}", + bundle_id + ); rebuild = true; } else { - return Err(IntegrityError::MissingBundle(bundle_id).into()) + return Err(IntegrityError::MissingBundle(bundle_id).into()); } } } @@ -323,7 +405,7 @@ impl Repository { warn!("Problem detected: bundle map does not contain all remote bundles"); rebuild = true; } else { - return Err(IntegrityError::RemoteBundlesNotInMap.into()) + return Err(IntegrityError::RemoteBundlesNotInMap.into()); } } if self.bundle_map.len() > self.bundles.len() { @@ -331,7 +413,7 @@ impl Repository { warn!("Problem detected: bundle map contains bundles multiple times"); rebuild = true; } else { - return Err(IntegrityError::MapContainsDuplicates.into()) + return Err(IntegrityError::MapContainsDuplicates.into()); } } if rebuild { @@ -347,7 +429,7 @@ impl Repository { for bundle in self.bundles.list_bundles() { let bundle_id = match bundle.mode { BundleMode::Data => self.next_data_bundle, - BundleMode::Meta => self.next_meta_bundle + BundleMode::Meta => self.next_meta_bundle, }; self.bundle_map.set(bundle_id, bundle.id.clone()); if self.next_meta_bundle == bundle_id { @@ -368,7 +450,13 @@ impl Repository { for (num, id) in bundles { let chunks = try!(self.bundles.get_chunk_list(&id)); for (i, (hash, _len)) in chunks.into_inner().into_iter().enumerate() { - try!(self.index.set(&hash, &Location{bundle: num as u32, chunk: i as u32})); + try!(self.index.set( + &hash, + &Location { + bundle: num as u32, + chunk: i as u32 + } + )); } } Ok(()) @@ -382,19 +470,25 @@ impl Repository { info!("Checking index integrity..."); if let Err(err) = self.index.check() { if repair { - warn!("Problem detected: index was corrupted\n\tcaused by: {}", err); + warn!( + "Problem detected: index was corrupted\n\tcaused by: {}", + err + ); return self.rebuild_index(); } else { - return Err(err.into()) + return Err(err.into()); } } info!("Checking index entries..."); if let Err(err) = self.check_index_chunks() { if repair { - warn!("Problem detected: index entries were inconsistent\n\tcaused by: {}", err); + warn!( + "Problem detected: index entries were inconsistent\n\tcaused by: {}", + err + ); return self.rebuild_index(); } else { - return Err(err.into()) + return Err(err.into()); } } Ok(()) diff --git a/src/repository/layout.rs b/src/repository/layout.rs index 8282a99..def996f 100644 --- a/src/repository/layout.rs +++ b/src/repository/layout.rs @@ -1,4 +1,4 @@ -use ::prelude::*; +use prelude::*; use std::path::{Path, PathBuf}; @@ -62,7 +62,8 @@ impl RepositoryLayout { #[inline] pub fn remote_exists(&self) -> bool { - self.remote_bundles_path().exists() && self.backups_path().exists() && self.remote_locks_path().exists() + self.remote_bundles_path().exists() && self.backups_path().exists() && + self.remote_locks_path().exists() } #[inline] @@ -85,13 +86,18 @@ impl RepositoryLayout { self.0.join("bundles/cached") } - fn bundle_path(&self, bundle: &BundleId, mut folder: PathBuf, mut count: usize) -> (PathBuf, PathBuf) { + fn bundle_path( + &self, + bundle: &BundleId, + mut folder: PathBuf, + mut count: usize, + ) -> (PathBuf, PathBuf) { let file = bundle.to_string().to_owned() + ".bundle"; { let mut rest = &file as &str; while count >= 100 { if rest.len() < 10 { - break + break; } folder = folder.join(&rest[0..2]); rest = &rest[2..]; @@ -118,7 +124,10 @@ impl RepositoryLayout { #[inline] pub fn temp_bundle_path(&self) -> PathBuf { - self.temp_bundles_path().join(BundleId::random().to_string().to_owned() + ".bundle") + self.temp_bundles_path().join( + BundleId::random().to_string().to_owned() + + ".bundle" + ) } #[inline] diff --git a/src/repository/metadata.rs b/src/repository/metadata.rs index 89dc5dc..0b24e0b 100644 --- a/src/repository/metadata.rs +++ b/src/repository/metadata.rs @@ -1,4 +1,4 @@ -use ::prelude::*; +use prelude::*; use filetime::{self, FileTime}; use xattr; @@ -87,7 +87,7 @@ impl fmt::Display for FileType { FileType::Symlink => write!(format, "symlink"), FileType::BlockDevice => write!(format, "block device"), FileType::CharDevice => write!(format, "char device"), - FileType::NamedPipe => write!(format, "named pipe") + FileType::NamedPipe => write!(format, "named pipe"), } } } @@ -167,8 +167,12 @@ serde_impl!(Inode(u8?) { impl Inode { pub fn get_from>(path: P) -> Result { let path = path.as_ref(); - let name = path.file_name().map(|s| s.to_string_lossy().to_string()).unwrap_or_else(|| "_".to_string()); - let meta = try!(fs::symlink_metadata(path).map_err(|e| InodeError::ReadMetadata(e, path.to_owned()))); + let name = path.file_name() + .map(|s| s.to_string_lossy().to_string()) + .unwrap_or_else(|| "_".to_string()); + let meta = try!(fs::symlink_metadata(path).map_err(|e| { + InodeError::ReadMetadata(e, path.to_owned()) + })); let mut inode = Inode::default(); inode.name = name; if meta.is_file() { @@ -190,7 +194,12 @@ impl Inode { return Err(InodeError::UnsupportedFiletype(path.to_owned())); }; if meta.file_type().is_symlink() { - inode.symlink_target = Some(try!(fs::read_link(path).map_err(|e| InodeError::ReadLinkTarget(e, path.to_owned()))).to_string_lossy().to_string()); + inode.symlink_target = Some( + try!(fs::read_link(path).map_err(|e| { + InodeError::ReadLinkTarget(e, path.to_owned()) + })).to_string_lossy() + .to_string() + ); } if meta.file_type().is_block_device() || meta.file_type().is_char_device() { let rdev = meta.rdev(); @@ -205,8 +214,14 @@ impl Inode { if xattr::SUPPORTED_PLATFORM { if let Ok(attrs) = xattr::list(path) { for name in attrs { - if let Some(data) = try!(xattr::get(path, &name).map_err(|e| InodeError::ReadXattr(e, path.to_owned()))) { - inode.xattrs.insert(name.to_string_lossy().to_string(), data.into()); + if let Some(data) = try!(xattr::get(path, &name).map_err(|e| { + InodeError::ReadXattr(e, path.to_owned()) + })) + { + inode.xattrs.insert( + name.to_string_lossy().to_string(), + data.into() + ); } } } @@ -219,39 +234,58 @@ impl Inode { let mut file = None; match self.file_type { FileType::File => { - file = Some(try!(File::create(&full_path).map_err(|e| InodeError::Create(e, full_path.clone())))); - }, + file = Some(try!(File::create(&full_path).map_err(|e| { + InodeError::Create(e, full_path.clone()) + }))); + } FileType::Directory => { - try!(fs::create_dir(&full_path).map_err(|e| InodeError::Create(e, full_path.clone()))); - }, + try!(fs::create_dir(&full_path).map_err(|e| { + InodeError::Create(e, full_path.clone()) + })); + } FileType::Symlink => { if let Some(ref src) = self.symlink_target { - try!(symlink(src, &full_path).map_err(|e| InodeError::Create(e, full_path.clone()))); + try!(symlink(src, &full_path).map_err(|e| { + InodeError::Create(e, full_path.clone()) + })); } else { - return Err(InodeError::Integrity("Symlink without target")) + return Err(InodeError::Integrity("Symlink without target")); } - }, + } FileType::NamedPipe => { - let name = try!(ffi::CString::new(full_path.as_os_str().as_bytes()).map_err(|_| InodeError::Integrity("Name contains nulls"))); + let name = try!( + ffi::CString::new(full_path.as_os_str().as_bytes()) + .map_err(|_| InodeError::Integrity("Name contains nulls")) + ); let mode = self.mode | libc::S_IFIFO; if unsafe { libc::mkfifo(name.as_ptr(), mode) } != 0 { - return Err(InodeError::Create(io::Error::last_os_error(), full_path.clone())); + return Err(InodeError::Create( + io::Error::last_os_error(), + full_path.clone() + )); } - }, + } FileType::BlockDevice | FileType::CharDevice => { - let name = try!(ffi::CString::new(full_path.as_os_str().as_bytes()).map_err(|_| InodeError::Integrity("Name contains nulls"))); - let mode = self.mode | match self.file_type { - FileType::BlockDevice => libc::S_IFBLK, - FileType::CharDevice => libc::S_IFCHR, - _ => unreachable!() - }; + let name = try!( + ffi::CString::new(full_path.as_os_str().as_bytes()) + .map_err(|_| InodeError::Integrity("Name contains nulls")) + ); + let mode = self.mode | + match self.file_type { + FileType::BlockDevice => libc::S_IFBLK, + FileType::CharDevice => libc::S_IFCHR, + _ => unreachable!(), + }; let device = if let Some((major, minor)) = self.device { unsafe { libc::makedev(major, minor) } } else { - return Err(InodeError::Integrity("Device without id")) + return Err(InodeError::Integrity("Device without id")); }; if unsafe { libc::mknod(name.as_ptr(), mode, device) } != 0 { - return Err(InodeError::Create(io::Error::last_os_error(), full_path.clone())); + return Err(InodeError::Create( + io::Error::last_os_error(), + full_path.clone() + )); } } } @@ -271,26 +305,37 @@ impl Inode { } } if let Err(err) = fs::set_permissions(&full_path, Permissions::from_mode(self.mode)) { - warn!("Failed to set permissions {:o} on {:?}: {}", self.mode, full_path, err); + warn!( + "Failed to set permissions {:o} on {:?}: {}", + self.mode, + full_path, + err + ); } if let Err(err) = chown(&full_path, self.user, self.group) { - warn!("Failed to set user {} and group {} on {:?}: {}", self.user, self.group, full_path, err); + warn!( + "Failed to set user {} and group {} on {:?}: {}", + self.user, + self.group, + full_path, + err + ); } Ok(file) } #[inline] pub fn is_same_meta(&self, other: &Inode) -> bool { - self.file_type == other.file_type && self.size == other.size && self.mode == other.mode - && self.user == other.user && self.group == other.group && self.name == other.name - && self.timestamp == other.timestamp && self.symlink_target == other.symlink_target + self.file_type == other.file_type && self.size == other.size && + self.mode == other.mode && self.user == other.user && + self.group == other.group && self.name == other.name && + self.timestamp == other.timestamp && self.symlink_target == other.symlink_target } #[inline] pub fn is_same_meta_quick(&self, other: &Inode) -> bool { - self.timestamp == other.timestamp - && self.file_type == other.file_type - && self.size == other.size + self.timestamp == other.timestamp && self.file_type == other.file_type && + self.size == other.size } #[inline] @@ -306,13 +351,17 @@ impl Inode { impl Repository { - pub fn create_inode>(&mut self, path: P, reference: Option<&Inode>) -> Result { + pub fn create_inode>( + &mut self, + path: P, + reference: Option<&Inode>, + ) -> Result { let mut inode = try!(Inode::get_from(path.as_ref())); if inode.file_type == FileType::File && inode.size > 0 { if let Some(reference) = reference { if reference.is_same_meta_quick(&inode) { inode.data = reference.data.clone(); - return Ok(inode) + return Ok(inode); } } let mut file = try!(File::open(path)); @@ -345,16 +394,20 @@ impl Repository { Ok(try!(Inode::decode(&try!(self.get_data(chunks))))) } - pub fn save_inode_at>(&mut self, inode: &Inode, path: P) -> Result<(), RepositoryError> { + pub fn save_inode_at>( + &mut self, + inode: &Inode, + path: P, + ) -> Result<(), RepositoryError> { if let Some(mut file) = try!(inode.create_at(path.as_ref())) { if let Some(ref contents) = inode.data { match *contents { FileData::Inline(ref data) => { try!(file.write_all(data)); - }, + } FileData::ChunkedDirect(ref chunks) => { try!(self.get_stream(chunks, &mut file)); - }, + } FileData::ChunkedIndirect(ref chunks) => { let chunk_data = try!(self.get_data(chunks)); let chunks = ChunkList::read_from(&chunk_data); diff --git a/src/repository/mod.rs b/src/repository/mod.rs index 76fac06..0ea3885 100644 --- a/src/repository/mod.rs +++ b/src/repository/mod.rs @@ -11,7 +11,7 @@ mod backup_file; mod tarfile; mod layout; -use ::prelude::*; +use prelude::*; use std::mem; use std::cmp::max; @@ -47,7 +47,10 @@ pub struct Location { } impl Location { pub fn new(bundle: u32, chunk: u32) -> Self { - Location{ bundle: bundle, chunk: chunk } + Location { + bundle: bundle, + chunk: chunk + } } } @@ -88,28 +91,42 @@ pub struct Repository { impl Repository { - pub fn create, R: AsRef>(path: P, config: Config, remote: R) -> Result { + pub fn create, R: AsRef>( + path: P, + config: Config, + remote: R, + ) -> Result { let layout = RepositoryLayout::new(path.as_ref().to_path_buf()); try!(fs::create_dir(layout.base_path())); - try!(File::create(layout.excludes_path()).and_then(|mut f| f.write_all(DEFAULT_EXCLUDES))); + try!(File::create(layout.excludes_path()).and_then(|mut f| { + f.write_all(DEFAULT_EXCLUDES) + })); try!(fs::create_dir(layout.keys_path())); try!(fs::create_dir(layout.local_locks_path())); try!(symlink(remote, layout.remote_path())); - try!(File::create(layout.remote_readme_path()).and_then(|mut f| f.write_all(REPOSITORY_README))); + try!(File::create(layout.remote_readme_path()).and_then( + |mut f| { + f.write_all(REPOSITORY_README) + } + )); try!(fs::create_dir_all(layout.remote_locks_path())); try!(config.save(layout.config_path())); try!(BundleDb::create(layout.clone())); - try!(Index::::create(layout.index_path(), &INDEX_MAGIC, INDEX_VERSION)); + try!(Index::::create( + layout.index_path(), + &INDEX_MAGIC, + INDEX_VERSION + )); try!(BundleMap::create().save(layout.bundle_map_path())); try!(fs::create_dir_all(layout.backups_path())); Self::open(path) } - #[allow(unknown_lints,useless_let_if_seq)] + #[allow(unknown_lints, useless_let_if_seq)] pub fn open>(path: P) -> Result { let layout = RepositoryLayout::new(path.as_ref().to_path_buf()); if !layout.remote_exists() { - return Err(RepositoryError::NoRemote) + return Err(RepositoryError::NoRemote); } let config = try!(Config::load(layout.config_path())); let remote_locks = LockFolder::new(layout.remote_locks_path()); @@ -118,13 +135,21 @@ impl Repository { let lock = try!(local_locks.lock(false)); let crypto = Arc::new(Mutex::new(try!(Crypto::open(layout.keys_path())))); let (bundles, new, gone) = try!(BundleDb::open(layout.clone(), crypto.clone())); - let (index, mut rebuild_index) = match unsafe { Index::open(layout.index_path(), &INDEX_MAGIC, INDEX_VERSION) } { - Ok(index) => (index, false), - Err(err) => { - error!("Failed to load local index:\n\tcaused by: {}", err); - (try!(Index::create(layout.index_path(), &INDEX_MAGIC, INDEX_VERSION)), true) - } - }; + let (index, mut rebuild_index) = + match unsafe { Index::open(layout.index_path(), &INDEX_MAGIC, INDEX_VERSION) } { + Ok(index) => (index, false), + Err(err) => { + error!("Failed to load local index:\n\tcaused by: {}", err); + ( + try!(Index::create( + layout.index_path(), + &INDEX_MAGIC, + INDEX_VERSION + )), + true + ) + } + }; let (bundle_map, rebuild_bundle_map) = match BundleMap::load(layout.bundle_map_path()) { Ok(bundle_map) => (bundle_map, false), Err(err) => { @@ -163,7 +188,12 @@ impl Repository { if !new.is_empty() { info!("Adding {} new bundles to index", new.len()); try!(repo.write_mode()); - for bundle in ProgressIter::new("adding bundles to index", new.len(), new.into_iter()) { + for bundle in ProgressIter::new( + "adding bundles to index", + new.len(), + new.into_iter() + ) + { try!(repo.add_new_remote_bundle(bundle)) } save_bundle_map = true; @@ -188,7 +218,11 @@ impl Repository { Ok(repo) } - pub fn import, R: AsRef>(path: P, remote: R, key_files: Vec) -> Result { + pub fn import, R: AsRef>( + path: P, + remote: R, + key_files: Vec, + ) -> Result { let path = path.as_ref(); let mut repo = try!(Repository::create(path, Config::default(), remote)); for file in key_files { @@ -202,15 +236,24 @@ impl Repository { repo.config = backup.config; try!(repo.save_config()) } else { - warn!("No backup found in the repository to take configuration from, please set the configuration manually."); + warn!( + "No backup found in the repository to take configuration from, please set the configuration manually." + ); } Ok(repo) } #[inline] - pub fn register_key(&mut self, public: PublicKey, secret: SecretKey) -> Result<(), RepositoryError> { + pub fn register_key( + &mut self, + public: PublicKey, + secret: SecretKey, + ) -> Result<(), RepositoryError> { try!(self.write_mode()); - Ok(try!(self.crypto.lock().unwrap().register_secret_key(public, secret))) + Ok(try!(self.crypto.lock().unwrap().register_secret_key( + public, + secret + ))) } #[inline] @@ -268,7 +311,10 @@ impl Repository { mem::swap(&mut self.data_bundle, &mut finished); { let bundle = try!(self.bundles.add_bundle(finished.unwrap())); - self.bundle_map.set(self.next_data_bundle, bundle.id.clone()); + self.bundle_map.set( + self.next_data_bundle, + bundle.id.clone() + ); } self.next_data_bundle = self.next_free_bundle_id() } @@ -277,7 +323,10 @@ impl Repository { mem::swap(&mut self.meta_bundle, &mut finished); { let bundle = try!(self.bundles.add_bundle(finished.unwrap())); - self.bundle_map.set(self.next_meta_bundle, bundle.id.clone()); + self.bundle_map.set( + self.next_meta_bundle, + bundle.id.clone() + ); } self.next_meta_bundle = self.next_free_bundle_id() } @@ -291,12 +340,12 @@ impl Repository { fn add_new_remote_bundle(&mut self, bundle: BundleInfo) -> Result<(), RepositoryError> { if self.bundle_map.find(&bundle.id).is_some() { - return Ok(()) + return Ok(()); } debug!("Adding new bundle to index: {}", bundle.id); let bundle_id = match bundle.mode { BundleMode::Data => self.next_data_bundle, - BundleMode::Meta => self.next_meta_bundle + BundleMode::Meta => self.next_meta_bundle, }; let chunks = try!(self.bundles.get_chunk_list(&bundle.id)); self.bundle_map.set(bundle_id, bundle.id.clone()); @@ -307,7 +356,14 @@ impl Repository { self.next_data_bundle = self.next_free_bundle_id() } for (i, (hash, _len)) in chunks.into_inner().into_iter().enumerate() { - if let Some(old) = try!(self.index.set(&hash, &Location{bundle: bundle_id as u32, chunk: i as u32})) { + if let Some(old) = try!(self.index.set( + &hash, + &Location { + bundle: bundle_id as u32, + chunk: i as u32 + } + )) + { // Duplicate chunk, forced ordering: higher bundle id wins let old_bundle_id = try!(self.get_bundle_id(old.bundle)); if old_bundle_id > bundle.id { diff --git a/src/repository/tarfile.rs b/src/repository/tarfile.rs index 7db832d..f474788 100644 --- a/src/repository/tarfile.rs +++ b/src/repository/tarfile.rs @@ -1,4 +1,4 @@ -use ::prelude::*; +use prelude::*; use std::collections::{HashMap, HashSet, BTreeMap}; use std::path::{Path, PathBuf}; @@ -82,17 +82,21 @@ fn inode_from_entry(entry: &mut tar::Entry) -> Result FileType::File, + tar::EntryType::Regular | + tar::EntryType::Link | + tar::EntryType::Continuous => FileType::File, tar::EntryType::Symlink => FileType::Symlink, tar::EntryType::Directory => FileType::Directory, tar::EntryType::Block => FileType::BlockDevice, tar::EntryType::Char => FileType::CharDevice, tar::EntryType::Fifo => FileType::NamedPipe, - _ => return Err(InodeError::UnsupportedFiletype(path.to_path_buf()).into()) + _ => return Err(InodeError::UnsupportedFiletype(path.to_path_buf()).into()), }; Inode { file_type: file_type, - name: path.file_name().map(|s| s.to_string_lossy().to_string()).unwrap_or_else(|| "/".to_string()), + name: path.file_name() + .map(|s| s.to_string_lossy().to_string()) + .unwrap_or_else(|| "/".to_string()), symlink_target: try!(entry.link_name()).map(|s| s.to_string_lossy().to_string()), size: try!(header.size()), mode: try!(header.mode()), @@ -100,8 +104,13 @@ fn inode_from_entry(entry: &mut tar::Entry) -> Result Some((try!(header.device_major()).unwrap_or(0), try!(header.device_minor()).unwrap_or(0))), - _ => None + FileType::BlockDevice | FileType::CharDevice => Some(( + try!(header.device_major()) + .unwrap_or(0), + try!(header.device_minor()) + .unwrap_or(0) + )), + _ => None, }, ..Default::default() } @@ -111,7 +120,10 @@ fn inode_from_entry(entry: &mut tar::Entry) -> Result(entry: &mut tar::Entry) -> Result(&mut self, entry: &mut tar::Entry) -> Result { + fn import_tar_entry( + &mut self, + entry: &mut tar::Entry, + ) -> Result { let mut inode = try!(inode_from_entry(entry)); if inode.size < 100 { let mut data = Vec::with_capacity(inode.size as usize); @@ -142,7 +157,12 @@ impl Repository { Ok(inode) } - fn import_tarfile_as_inode(&mut self, backup: &mut Backup, input: R, failed_paths: &mut Vec) -> Result<(Inode, ChunkList), RepositoryError> { + fn import_tarfile_as_inode( + &mut self, + backup: &mut Backup, + input: R, + failed_paths: &mut Vec, + ) -> Result<(Inode, ChunkList), RepositoryError> { let mut tarfile = tar::Archive::new(input); // Step 1: create inodes for all entries let mut inodes = HashMap::)>::new(); @@ -174,12 +194,14 @@ impl Repository { backup.group_names.insert(inode.group, name.to_string()); } inodes.insert(path, (inode, HashSet::new())); - }, - Err(RepositoryError::Inode(_)) | Err(RepositoryError::Chunker(_)) | Err(RepositoryError::Io(_)) => { + } + Err(RepositoryError::Inode(_)) | + Err(RepositoryError::Chunker(_)) | + Err(RepositoryError::Io(_)) => { info!("Failed to backup {:?}", path); failed_paths.push(path); - continue - }, + continue; + } Err(err) => { return Err(err); } @@ -198,7 +220,9 @@ impl Repository { let (inode, _) = inodes.remove(&path).unwrap(); let chunks = try!(self.put_inode(&inode)); if let Some(parent_path) = path.parent() { - if let Some(&mut (ref mut parent_inode, ref mut children)) = inodes.get_mut(parent_path) { + if let Some(&mut (ref mut parent_inode, ref mut children)) = + inodes.get_mut(parent_path) + { children.remove(&inode.name); parent_inode.cum_size += inode.cum_size; for &(_, len) in chunks.iter() { @@ -206,8 +230,11 @@ impl Repository { } parent_inode.cum_files += inode.cum_files; parent_inode.cum_dirs += inode.cum_dirs; - parent_inode.children.as_mut().unwrap().insert(inode.name.clone(), chunks); - continue + parent_inode.children.as_mut().unwrap().insert( + inode.name.clone(), + chunks + ); + continue; } } roots.push((inode, chunks)); @@ -242,11 +269,14 @@ impl Repository { } } - pub fn import_tarfile>(&mut self, tarfile: P) -> Result { + pub fn import_tarfile>( + &mut self, + tarfile: P, + ) -> Result { try!(self.write_mode()); let _lock = try!(self.lock(false)); if self.dirty { - return Err(RepositoryError::Dirty) + return Err(RepositoryError::Dirty); } try!(self.set_dirty()); let mut backup = Backup::default(); @@ -258,9 +288,17 @@ impl Repository { let mut failed_paths = vec![]; let tarfile = tarfile.as_ref(); let (root_inode, chunks) = if tarfile == Path::new("-") { - try!(self.import_tarfile_as_inode(&mut backup, io::stdin(), &mut failed_paths)) + try!(self.import_tarfile_as_inode( + &mut backup, + io::stdin(), + &mut failed_paths + )) } else { - try!(self.import_tarfile_as_inode(&mut backup, try!(File::open(tarfile)), &mut failed_paths)) + try!(self.import_tarfile_as_inode( + &mut backup, + try!(File::open(tarfile)), + &mut failed_paths + )) }; backup.root = chunks; try!(self.flush()); @@ -284,16 +322,34 @@ impl Repository { } } - fn export_xattrs(&mut self, inode: &Inode, tarfile: &mut tar::Builder) -> Result<(), RepositoryError> { + fn export_xattrs( + &mut self, + inode: &Inode, + tarfile: &mut tar::Builder, + ) -> Result<(), RepositoryError> { let mut pax = PaxBuilder::new(); for (key, value) in &inode.xattrs { - pax.add(&format!("{}{}", PAX_XATTR_PREFIX,key), str::from_utf8(value).unwrap()); + pax.add( + &format!("{}{}", PAX_XATTR_PREFIX, key), + str::from_utf8(value).unwrap() + ); } Ok(try!(tarfile.append_pax_extensions(&pax))) } - fn export_tarfile_recurse(&mut self, backup: &Backup, path: &Path, inode: Inode, tarfile: &mut tar::Builder, skip_root: bool) -> Result<(), RepositoryError> { - let path = if skip_root { path.to_path_buf() } else { path.join(&inode.name) }; + fn export_tarfile_recurse( + &mut self, + backup: &Backup, + path: &Path, + inode: Inode, + tarfile: &mut tar::Builder, + skip_root: bool, + ) -> Result<(), RepositoryError> { + let path = if skip_root { + path.to_path_buf() + } else { + path.join(&inode.name) + }; if inode.file_type != FileType::Directory || !skip_root { if !inode.xattrs.is_empty() { try!(self.export_xattrs(&inode, tarfile)); @@ -332,13 +388,15 @@ impl Repository { FileType::Directory => tar::EntryType::Directory, FileType::BlockDevice => tar::EntryType::Block, FileType::CharDevice => tar::EntryType::Char, - FileType::NamedPipe => tar::EntryType::Fifo + FileType::NamedPipe => tar::EntryType::Fifo, }); header.set_cksum(); match inode.data { None => try!(tarfile.append(&header, Cursor::new(&[]))), Some(FileData::Inline(data)) => try!(tarfile.append(&header, Cursor::new(data))), - Some(FileData::ChunkedDirect(chunks)) => try!(tarfile.append(&header, self.get_reader(chunks))), + Some(FileData::ChunkedDirect(chunks)) => { + try!(tarfile.append(&header, self.get_reader(chunks))) + } Some(FileData::ChunkedIndirect(chunks)) => { let chunks = ChunkList::read_from(&try!(self.get_data(&chunks))); try!(tarfile.append(&header, self.get_reader(chunks))) @@ -348,24 +406,46 @@ impl Repository { if let Some(children) = inode.children { for chunks in children.values() { let inode = try!(self.get_inode(chunks)); - try!(self.export_tarfile_recurse(backup, &path, inode, tarfile, false)); + try!(self.export_tarfile_recurse( + backup, + &path, + inode, + tarfile, + false + )); } } Ok(()) } - pub fn export_tarfile>(&mut self, backup: &Backup, inode: Inode, tarfile: P) -> Result<(), RepositoryError> { + pub fn export_tarfile>( + &mut self, + backup: &Backup, + inode: Inode, + tarfile: P, + ) -> Result<(), RepositoryError> { let tarfile = tarfile.as_ref(); if tarfile == Path::new("-") { let mut tarfile = tar::Builder::new(io::stdout()); - try!(self.export_tarfile_recurse(backup, Path::new(""), inode, &mut tarfile, true)); + try!(self.export_tarfile_recurse( + backup, + Path::new(""), + inode, + &mut tarfile, + true + )); try!(tarfile.finish()); } else { let mut tarfile = tar::Builder::new(try!(File::create(tarfile))); - try!(self.export_tarfile_recurse(backup, Path::new(""), inode, &mut tarfile, true)); + try!(self.export_tarfile_recurse( + backup, + Path::new(""), + inode, + &mut tarfile, + true + )); try!(tarfile.finish()); } Ok(()) } - } diff --git a/src/repository/vacuum.rs b/src/repository/vacuum.rs index f2c6316..15250d4 100644 --- a/src/repository/vacuum.rs +++ b/src/repository/vacuum.rs @@ -1,4 +1,4 @@ -use ::prelude::*; +use prelude::*; use std::collections::HashSet; @@ -13,7 +13,12 @@ impl Repository { } } - pub fn vacuum(&mut self, ratio: f32, combine: bool, force: bool) -> Result<(), RepositoryError> { + pub fn vacuum( + &mut self, + ratio: f32, + combine: bool, + force: bool, + ) -> Result<(), RepositoryError> { try!(self.flush()); info!("Locking repository"); try!(self.write_mode()); @@ -27,7 +32,12 @@ impl Repository { data_total += bundle.info.encoded_size; data_used += bundle.get_used_size(); } - info!("Usage: {} of {}, {:.1}%", to_file_size(data_used as u64), to_file_size(data_total as u64), data_used as f32/data_total as f32*100.0); + info!( + "Usage: {} of {}, {:.1}%", + to_file_size(data_used as u64), + to_file_size(data_total as u64), + data_used as f32 / data_total as f32 * 100.0 + ); let mut rewrite_bundles = HashSet::new(); let mut reclaim_space = 0; for (id, bundle) in &usage { @@ -58,12 +68,21 @@ impl Repository { } } } - info!("Reclaiming {} by rewriting {} bundles", to_file_size(reclaim_space as u64), rewrite_bundles.len()); + info!( + "Reclaiming {} by rewriting {} bundles", + to_file_size(reclaim_space as u64), + rewrite_bundles.len() + ); if !force { self.dirty = false; - return Ok(()) + return Ok(()); } - for id in ProgressIter::new("rewriting bundles", rewrite_bundles.len(), rewrite_bundles.iter()) { + for id in ProgressIter::new( + "rewriting bundles", + rewrite_bundles.len(), + rewrite_bundles.iter() + ) + { let bundle = &usage[id]; let bundle_id = self.bundle_map.get(*id).unwrap(); let chunks = try!(self.bundles.get_chunk_list(&bundle_id)); @@ -71,7 +90,7 @@ impl Repository { for (chunk, &(hash, _len)) in chunks.into_iter().enumerate() { if !bundle.chunk_usage.get(chunk) { try!(self.index.delete(&hash)); - continue + continue; } let data = try!(self.bundles.get_chunk(&bundle_id, chunk)); try!(self.put_chunk_override(mode, hash, &data)); @@ -81,7 +100,12 @@ impl Repository { info!("Checking index"); for (hash, location) in self.index.iter() { if rewrite_bundles.contains(&location.bundle) { - panic!("Removed bundle is still referenced in index: hash:{}, bundle:{}, chunk:{}", hash, location.bundle, location.chunk); + panic!( + "Removed bundle is still referenced in index: hash:{}, bundle:{}, chunk:{}", + hash, + location.bundle, + location.chunk + ); } } info!("Deleting {} bundles", rewrite_bundles.len()); diff --git a/src/util/bitmap.rs b/src/util/bitmap.rs index e0d84d8..615c473 100644 --- a/src/util/bitmap.rs +++ b/src/util/bitmap.rs @@ -8,7 +8,7 @@ pub struct Bitmap { impl Bitmap { /// Creates a new bitmap pub fn new(len: usize) -> Self { - let len = (len+7)/8; + let len = (len + 7) / 8; let mut bytes = Vec::with_capacity(len); bytes.resize(len, 0); Self { bytes: bytes } @@ -28,7 +28,7 @@ impl Bitmap { #[inline] fn convert_index(&self, index: usize) -> (usize, u8) { - (index/8, 1u8<<(index%8)) + (index / 8, 1u8 << (index % 8)) } #[inline] diff --git a/src/util/chunk.rs b/src/util/chunk.rs index 5b9499f..3aa9934 100644 --- a/src/util/chunk.rs +++ b/src/util/chunk.rs @@ -63,7 +63,7 @@ impl ChunkList { if src.len() % 20 != 0 { warn!("Reading truncated chunk list"); } - ChunkList::read_n_from(src.len()/20, &mut Cursor::new(src)).unwrap() + ChunkList::read_n_from(src.len() / 20, &mut Cursor::new(src)).unwrap() } #[inline] @@ -111,7 +111,10 @@ impl DerefMut for ChunkList { impl Serialize for ChunkList { #[inline] - fn serialize(&self, serializer: S) -> Result where S: serde::Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { let mut buf = Vec::with_capacity(self.encoded_size()); self.write_to(&mut buf).unwrap(); Bytes::from(&buf as &[u8]).serialize(serializer) @@ -120,12 +123,17 @@ impl Serialize for ChunkList { impl<'a> Deserialize<'a> for ChunkList { #[inline] - fn deserialize(deserializer: D) -> Result where D: serde::Deserializer<'a> { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'a>, + { let data: Vec = try!(ByteBuf::deserialize(deserializer)).into(); if data.len() % 20 != 0 { return Err(D::Error::custom("Invalid chunk list length")); } - Ok(ChunkList::read_n_from(data.len()/20, &mut Cursor::new(data)).unwrap()) + Ok( + ChunkList::read_n_from(data.len() / 20, &mut Cursor::new(data)).unwrap() + ) } } @@ -171,7 +179,10 @@ mod tests { let mut list = ChunkList::new(); list.push((Hash::default(), 0)); list.push((Hash::default(), 1)); - assert_eq!(list.into_inner(), vec![(Hash::default(), 0), (Hash::default(), 1)]); + assert_eq!( + list.into_inner(), + vec![(Hash::default(), 0), (Hash::default(), 1)] + ); } #[test] @@ -182,8 +193,8 @@ mod tests { let mut buf = Vec::new(); assert!(list.write_to(&mut buf).is_ok()); assert_eq!(buf.len(), 40); - assert_eq!(&buf[16..20], &[0,0,0,0]); - assert_eq!(&buf[36..40], &[1,0,0,0]); + assert_eq!(&buf[16..20], &[0, 0, 0, 0]); + assert_eq!(&buf[36..40], &[1, 0, 0, 0]); } #[test] @@ -196,7 +207,48 @@ mod tests { #[test] fn test_read_from() { - let data = vec![0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, 1,0,0,0]; + let data = vec![ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + ]; let list = ChunkList::read_from(&data); assert_eq!(list.len(), 2); assert_eq!(list[0], (Hash::default(), 0)); @@ -212,7 +264,7 @@ mod tests { assert!(list.write_to(&mut buf).is_ok()); let encoded = msgpack::encode(&list).unwrap(); assert_eq!(buf, &encoded[2..]); - assert_eq!(&[196,40], &encoded[..2]); + assert_eq!(&[196, 40], &encoded[..2]); } #[test] @@ -220,7 +272,7 @@ mod tests { let mut list = ChunkList::new(); list.push((Hash::default(), 0)); list.push((Hash::default(), 1)); - let mut buf = vec![196,40]; + let mut buf = vec![196, 40]; assert!(list.write_to(&mut buf).is_ok()); assert!(msgpack::decode::(&buf).is_ok()); assert_eq!(msgpack::decode::(&buf).unwrap(), list); diff --git a/src/util/cli.rs b/src/util/cli.rs index 975df7c..3a3ffc1 100644 --- a/src/util/cli.rs +++ b/src/util/cli.rs @@ -55,7 +55,11 @@ impl ProgressIter { let msg = format!("{}: ", msg); bar.message(&msg); bar.set_max_refresh_rate(Some(Duration::from_millis(100))); - ProgressIter { inner: inner, bar: bar, msg: msg } + ProgressIter { + inner: inner, + bar: bar, + msg: msg + } } } @@ -72,7 +76,7 @@ impl Iterator for ProgressIter { let msg = self.msg.clone() + "done."; self.bar.finish_print(&msg); None - }, + } Some(item) => { self.bar.inc(); Some(item) diff --git a/src/util/compression.rs b/src/util/compression.rs index 81eeaca..befa547 100644 --- a/src/util/compression.rs +++ b/src/util/compression.rs @@ -57,7 +57,10 @@ pub struct Compression { } impl Default for Compression { fn default() -> Self { - Compression { method: CompressionMethod::Brotli, level: 3 } + Compression { + method: CompressionMethod::Brotli, + level: 3 + } } } serde_impl!(Compression(u64) { @@ -74,7 +77,9 @@ impl Compression { pub fn from_string(name: &str) -> Result { let (name, level) = if let Some(pos) = name.find('/') { - let level = try!(u8::from_str(&name[pos+1..]).map_err(|_| CompressionError::UnsupportedCodec(name.to_string()))); + let level = try!(u8::from_str(&name[pos + 1..]).map_err(|_| { + CompressionError::UnsupportedCodec(name.to_string()) + })); let name = &name[..pos]; (name, level) } else { @@ -85,9 +90,12 @@ impl Compression { "brotli" => CompressionMethod::Brotli, "lzma" | "lzma2" | "xz" => CompressionMethod::Lzma, "lz4" => CompressionMethod::Lz4, - _ => return Err(CompressionError::UnsupportedCodec(name.to_string())) + _ => return Err(CompressionError::UnsupportedCodec(name.to_string())), }; - Ok(Compression { method: method, level: level }) + Ok(Compression { + method: method, + level: level + }) } pub fn name(&self) -> &'static str { @@ -103,7 +111,7 @@ impl Compression { let name = CString::new(self.name().as_bytes()).unwrap(); let codec = unsafe { squash_get_codec(name.as_ptr()) }; if codec.is_null() { - return Err(CompressionError::InitializeCodec) + return Err(CompressionError::InitializeCodec); } Ok(codec) } @@ -117,25 +125,27 @@ impl Compression { let codec = try!(self.codec()); let options = unsafe { squash_options_new(codec, ptr::null::<()>()) }; if options.is_null() { - return Err(CompressionError::InitializeOptions) + return Err(CompressionError::InitializeOptions); } let option = CString::new("level"); let value = CString::new(format!("{}", self.level)); - let res = unsafe { squash_options_parse_option( - options, - option.unwrap().as_ptr(), - value.unwrap().as_ptr() - )}; + let res = unsafe { + squash_options_parse_option(options, option.unwrap().as_ptr(), value.unwrap().as_ptr()) + }; if res != SQUASH_OK { //panic!(unsafe { CStr::from_ptr(squash_status_to_string(res)).to_str().unwrap() }); - return Err(CompressionError::InitializeOptions) + return Err(CompressionError::InitializeOptions); } Ok(options) } #[inline] fn error(code: SquashStatus) -> CompressionError { - CompressionError::Operation(unsafe { CStr::from_ptr(squash_status_to_string(code)).to_str().unwrap() }) + CompressionError::Operation(unsafe { + CStr::from_ptr(squash_status_to_string(code)) + .to_str() + .unwrap() + }) } pub fn compress(&self, data: &[u8]) -> Result, CompressionError> { @@ -148,18 +158,20 @@ impl Compression { data.len() as usize )};*/ let mut buf = Vec::with_capacity(size as usize); - let res = unsafe { squash_codec_compress_with_options( - codec, - &mut size, - buf.as_mut_ptr(), - data.len(), - data.as_ptr(), - options) + let res = unsafe { + squash_codec_compress_with_options( + codec, + &mut size, + buf.as_mut_ptr(), + data.len(), + data.as_ptr(), + options + ) }; if res != SQUASH_OK { - println!("{:?}", data); + println!("{:?}", data); println!("{}, {}", data.len(), size); - return Err(Self::error(res)) + return Err(Self::error(res)); } unsafe { buf.set_len(size) }; Ok(buf) @@ -167,25 +179,24 @@ impl Compression { pub fn decompress(&self, data: &[u8]) -> Result, CompressionError> { let codec = try!(self.codec()); - let mut size = unsafe { squash_codec_get_uncompressed_size( - codec, - data.len(), - data.as_ptr() - )}; + let mut size = + unsafe { squash_codec_get_uncompressed_size(codec, data.len(), data.as_ptr()) }; if size == 0 { size = 100 * data.len(); } let mut buf = Vec::with_capacity(size); - let res = unsafe { squash_codec_decompress( - codec, - &mut size, - buf.as_mut_ptr(), - data.len(), - data.as_ptr(), - ptr::null_mut::<()>()) + let res = unsafe { + squash_codec_decompress( + codec, + &mut size, + buf.as_mut_ptr(), + data.len(), + data.as_ptr(), + ptr::null_mut::<()>() + ) }; if res != SQUASH_OK { - return Err(Self::error(res)) + return Err(Self::error(res)); } unsafe { buf.set_len(size) }; Ok(buf) @@ -194,9 +205,8 @@ impl Compression { pub fn compress_stream(&self) -> Result { let codec = try!(self.codec()); let options = try!(self.options()); - let stream = unsafe { squash_stream_new_with_options( - codec, SQUASH_STREAM_COMPRESS, options - ) }; + let stream = + unsafe { squash_stream_new_with_options(codec, SQUASH_STREAM_COMPRESS, options) }; if stream.is_null() { return Err(CompressionError::InitializeStream); } @@ -205,9 +215,8 @@ impl Compression { pub fn decompress_stream(&self) -> Result { let codec = try!(self.codec()); - let stream = unsafe { squash_stream_new( - codec, SQUASH_STREAM_DECOMPRESS, ptr::null::<()>() - ) }; + let stream = + unsafe { squash_stream_new(codec, SQUASH_STREAM_DECOMPRESS, ptr::null::<()>()) }; if stream.is_null() { return Err(CompressionError::InitializeStream); } @@ -218,7 +227,7 @@ impl Compression { pub struct CompressionStream { stream: *mut SquashStream, - buffer: [u8; 16*1024] + buffer: [u8; 16 * 1024] } impl CompressionStream { @@ -226,11 +235,15 @@ impl CompressionStream { fn new(stream: *mut SquashStream) -> Self { CompressionStream { stream: stream, - buffer: [0; 16*1024] + buffer: [0; 16 * 1024] } } - pub fn process(&mut self, input: &[u8], output: &mut W) -> Result<(), CompressionError> { + pub fn process( + &mut self, + input: &[u8], + output: &mut W, + ) -> Result<(), CompressionError> { let stream = unsafe { &mut (*self.stream) }; stream.next_in = input.as_ptr(); stream.avail_in = input.len(); @@ -239,12 +252,12 @@ impl CompressionStream { stream.avail_out = self.buffer.len(); let res = unsafe { squash_stream_process(stream) }; if res < 0 { - return Err(Compression::error(res)) + return Err(Compression::error(res)); } let output_size = self.buffer.len() - stream.avail_out; try!(output.write_all(&self.buffer[..output_size])); if res != SQUASH_PROCESSING { - break + break; } } Ok(()) @@ -257,12 +270,12 @@ impl CompressionStream { stream.avail_out = self.buffer.len(); let res = unsafe { squash_stream_finish(stream) }; if res < 0 { - return Err(Compression::error(res)) + return Err(Compression::error(res)); } let output_size = self.buffer.len() - stream.avail_out; try!(output.write_all(&self.buffer[..output_size])); if res != SQUASH_PROCESSING { - break + break; } } Ok(()) @@ -271,7 +284,9 @@ impl CompressionStream { impl Drop for CompressionStream { fn drop(&mut self) { - unsafe { squash_object_unref(self.stream as *mut libc::c_void); } + unsafe { + squash_object_unref(self.stream as *mut libc::c_void); + } } } @@ -303,8 +318,14 @@ mod tests { #[test] fn test_to_string() { - assert_eq!("brotli/1", Compression::from_string("brotli/1").unwrap().to_string()); - assert_eq!("deflate/1", Compression::from_string("gzip/1").unwrap().to_string()); + assert_eq!( + "brotli/1", + Compression::from_string("brotli/1").unwrap().to_string() + ); + assert_eq!( + "deflate/1", + Compression::from_string("gzip/1").unwrap().to_string() + ); } #[allow(dead_code, needless_range_loop)] @@ -318,8 +339,8 @@ mod tests { #[allow(dead_code)] fn test_compression(method: &str, min_lvl: u8, max_lvl: u8) { - let input = test_data(16*1024); - for i in min_lvl..max_lvl+1 { + let input = test_data(16 * 1024); + for i in min_lvl..max_lvl + 1 { let method = Compression::from_string(&format!("{}/{}", method, i)).unwrap(); println!("{}", method.to_string()); let compressed = method.compress(&input).unwrap(); @@ -353,8 +374,8 @@ mod tests { #[allow(dead_code)] fn test_stream_compression(method: &str, min_lvl: u8, max_lvl: u8) { - let input = test_data(512*1024); - for i in min_lvl..max_lvl+1 { + let input = test_data(512 * 1024); + for i in min_lvl..max_lvl + 1 { let method = Compression::from_string(&format!("{}/{}", method, i)).unwrap(); println!("{}", method.to_string()); let mut compressor = method.compress_stream().unwrap(); @@ -363,7 +384,9 @@ mod tests { compressor.finish(&mut compressed).unwrap(); let mut decompressor = method.decompress_stream().unwrap(); let mut decompressed = Vec::with_capacity(input.len()); - decompressor.process(&compressed, &mut decompressed).unwrap(); + decompressor + .process(&compressed, &mut decompressed) + .unwrap(); decompressor.finish(&mut decompressed).unwrap(); assert_eq!(input.len(), decompressed.len()); for i in 0..input.len() { @@ -415,7 +438,7 @@ mod benches { #[allow(dead_code)] fn bench_stream_compression(b: &mut Bencher, method: Compression) { - let input = test_data(512*1024); + let input = test_data(512 * 1024); b.iter(|| { let mut compressor = method.compress_stream().unwrap(); let mut compressed = Vec::with_capacity(input.len()); @@ -427,7 +450,7 @@ mod benches { #[allow(dead_code)] fn bench_stream_decompression(b: &mut Bencher, method: Compression) { - let input = test_data(512*1024); + let input = test_data(512 * 1024); let mut compressor = method.compress_stream().unwrap(); let mut compressed = Vec::with_capacity(input.len()); compressor.process(&input, &mut compressed).unwrap(); @@ -435,7 +458,9 @@ mod benches { b.iter(|| { let mut decompressor = method.decompress_stream().unwrap(); let mut decompressed = Vec::with_capacity(compressed.len()); - decompressor.process(&compressed, &mut decompressed).unwrap(); + decompressor + .process(&compressed, &mut decompressed) + .unwrap(); decompressor.finish(&mut decompressed).unwrap(); }); b.bytes = input.len() as u64; diff --git a/src/util/encryption.rs b/src/util/encryption.rs index 2bc7e7b..17a23bb 100644 --- a/src/util/encryption.rs +++ b/src/util/encryption.rs @@ -14,16 +14,14 @@ use sodiumoxide::crypto::box_; use sodiumoxide::crypto::pwhash; pub use sodiumoxide::crypto::box_::{SecretKey, PublicKey}; -use ::util::*; +use util::*; static INIT: Once = ONCE_INIT; fn sodium_init() { - INIT.call_once(|| { - if !sodiumoxide::init() { - panic!("Failed to initialize sodiumoxide"); - } + INIT.call_once(|| if !sodiumoxide::init() { + panic!("Failed to initialize sodiumoxide"); }); } @@ -58,9 +56,9 @@ quick_error!{ #[derive(Clone, Debug, Eq, PartialEq, Hash)] -#[allow(unknown_lints,non_camel_case_types)] +#[allow(unknown_lints, non_camel_case_types)] pub enum EncryptionMethod { - Sodium, + Sodium } serde_impl!(EncryptionMethod(u64) { Sodium => 0 @@ -70,13 +68,13 @@ impl EncryptionMethod { pub fn from_string(val: &str) -> Result { match val { "sodium" => Ok(EncryptionMethod::Sodium), - _ => Err("Unsupported encryption method") + _ => Err("Unsupported encryption method"), } } pub fn to_string(&self) -> String { match *self { - EncryptionMethod::Sodium => "sodium".to_string() + EncryptionMethod::Sodium => "sodium".to_string(), } } } @@ -124,7 +122,10 @@ impl Crypto { #[inline] pub fn dummy() -> Self { sodium_init(); - Crypto { path: None, keys: HashMap::new() } + Crypto { + path: None, + keys: HashMap::new() + } } pub fn open>(path: P) -> Result { @@ -134,13 +135,24 @@ impl Crypto { for entry in try!(fs::read_dir(&path)) { let entry = try!(entry); let keyfile = try!(KeyfileYaml::load(entry.path())); - let public = try!(parse_hex(&keyfile.public).map_err(|_| EncryptionError::InvalidKey)); - let public = try!(PublicKey::from_slice(&public).ok_or(EncryptionError::InvalidKey)); - let secret = try!(parse_hex(&keyfile.secret).map_err(|_| EncryptionError::InvalidKey)); - let secret = try!(SecretKey::from_slice(&secret).ok_or(EncryptionError::InvalidKey)); + let public = try!(parse_hex(&keyfile.public).map_err( + |_| EncryptionError::InvalidKey + )); + let public = try!(PublicKey::from_slice(&public).ok_or( + EncryptionError::InvalidKey + )); + let secret = try!(parse_hex(&keyfile.secret).map_err( + |_| EncryptionError::InvalidKey + )); + let secret = try!(SecretKey::from_slice(&secret).ok_or( + EncryptionError::InvalidKey + )); keys.insert(public, secret); } - Ok(Crypto { path: Some(path), keys: keys }) + Ok(Crypto { + path: Some(path), + keys: keys + }) } #[inline] @@ -155,30 +167,53 @@ impl Crypto { } #[inline] - pub fn load_keypair_from_file>(path: P) -> Result<(PublicKey, SecretKey), EncryptionError> { + pub fn load_keypair_from_file>( + path: P, + ) -> Result<(PublicKey, SecretKey), EncryptionError> { Self::load_keypair_from_file_data(&try!(KeyfileYaml::load(path))) } - pub fn load_keypair_from_file_data(keyfile: &KeyfileYaml) -> Result<(PublicKey, SecretKey), EncryptionError> { - let public = try!(parse_hex(&keyfile.public).map_err(|_| EncryptionError::InvalidKey)); - let public = try!(PublicKey::from_slice(&public).ok_or(EncryptionError::InvalidKey)); - let secret = try!(parse_hex(&keyfile.secret).map_err(|_| EncryptionError::InvalidKey)); - let secret = try!(SecretKey::from_slice(&secret).ok_or(EncryptionError::InvalidKey)); + pub fn load_keypair_from_file_data( + keyfile: &KeyfileYaml, + ) -> Result<(PublicKey, SecretKey), EncryptionError> { + let public = try!(parse_hex(&keyfile.public).map_err( + |_| EncryptionError::InvalidKey + )); + let public = try!(PublicKey::from_slice(&public).ok_or( + EncryptionError::InvalidKey + )); + let secret = try!(parse_hex(&keyfile.secret).map_err( + |_| EncryptionError::InvalidKey + )); + let secret = try!(SecretKey::from_slice(&secret).ok_or( + EncryptionError::InvalidKey + )); Ok((public, secret)) } #[inline] pub fn save_keypair_to_file_data(public: &PublicKey, secret: &SecretKey) -> KeyfileYaml { - KeyfileYaml { public: to_hex(&public[..]), secret: to_hex(&secret[..]) } + KeyfileYaml { + public: to_hex(&public[..]), + secret: to_hex(&secret[..]) + } } #[inline] - pub fn save_keypair_to_file>(public: &PublicKey, secret: &SecretKey, path: P) -> Result<(), EncryptionError> { + pub fn save_keypair_to_file>( + public: &PublicKey, + secret: &SecretKey, + path: P, + ) -> Result<(), EncryptionError> { Self::save_keypair_to_file_data(public, secret).save(path) } #[inline] - pub fn register_secret_key(&mut self, public: PublicKey, secret: SecretKey) -> Result<(), EncryptionError> { + pub fn register_secret_key( + &mut self, + public: PublicKey, + secret: SecretKey, + ) -> Result<(), EncryptionError> { if let Some(ref path) = self.path { let path = path.join(to_hex(&public[..]) + ".yaml"); try!(Self::save_keypair_to_file(&public, &secret, path)); @@ -193,28 +228,34 @@ impl Crypto { } fn get_secret_key(&self, public: &PublicKey) -> Result<&SecretKey, EncryptionError> { - self.keys.get(public).ok_or_else(|| EncryptionError::MissingKey(*public)) + self.keys.get(public).ok_or_else( + || EncryptionError::MissingKey(*public) + ) } #[inline] pub fn encrypt(&self, enc: &Encryption, data: &[u8]) -> Result, EncryptionError> { let &(ref method, ref public) = enc; - let public = try!(PublicKey::from_slice(public).ok_or(EncryptionError::InvalidKey)); + let public = try!(PublicKey::from_slice(public).ok_or( + EncryptionError::InvalidKey + )); match *method { - EncryptionMethod::Sodium => { - Ok(sealedbox::seal(data, &public)) - } + EncryptionMethod::Sodium => Ok(sealedbox::seal(data, &public)), } } #[inline] pub fn decrypt(&self, enc: &Encryption, data: &[u8]) -> Result, EncryptionError> { let &(ref method, ref public) = enc; - let public = try!(PublicKey::from_slice(public).ok_or(EncryptionError::InvalidKey)); + let public = try!(PublicKey::from_slice(public).ok_or( + EncryptionError::InvalidKey + )); let secret = try!(self.get_secret_key(&public)); match *method { EncryptionMethod::Sodium => { - sealedbox::open(data, &public, secret).map_err(|_| EncryptionError::Operation("Decryption failed")) + sealedbox::open(data, &public, secret).map_err(|_| { + EncryptionError::Operation("Decryption failed") + }) } } } @@ -228,18 +269,27 @@ impl Crypto { pub fn keypair_from_password(password: &str) -> (PublicKey, SecretKey) { let salt = pwhash::Salt::from_slice(b"the_great_zvault_password_salt_1").unwrap(); let mut key = [0u8; pwhash::HASHEDPASSWORDBYTES]; - let key = pwhash::derive_key(&mut key, password.as_bytes(), &salt, pwhash::OPSLIMIT_INTERACTIVE, pwhash::MEMLIMIT_INTERACTIVE).unwrap(); + let key = pwhash::derive_key( + &mut key, + password.as_bytes(), + &salt, + pwhash::OPSLIMIT_INTERACTIVE, + pwhash::MEMLIMIT_INTERACTIVE + ).unwrap(); let mut seed = [0u8; 32]; - let offset = key.len()-seed.len(); + let offset = key.len() - seed.len(); for (i, b) in seed.iter_mut().enumerate() { - *b = key[i+offset]; + *b = key[i + offset]; } let mut pk = [0u8; 32]; let mut sk = [0u8; 32]; if unsafe { libsodium_sys::crypto_box_seed_keypair(&mut pk, &mut sk, &seed) } != 0 { panic!("Libsodium failed"); } - (PublicKey::from_slice(&pk).unwrap(), SecretKey::from_slice(&sk).unwrap()) + ( + PublicKey::from_slice(&pk).unwrap(), + SecretKey::from_slice(&sk).unwrap() + ) } } @@ -374,7 +424,7 @@ mod benches { let (pk, sk) = Crypto::gen_keypair(); crypto.add_secret_key(pk, sk.clone()); let encryption = (EncryptionMethod::Sodium, ByteBuf::from(&pk[..])); - let input = test_data(512*1024); + let input = test_data(512 * 1024); b.iter(|| crypto.encrypt(&encryption, &input)); b.bytes = input.len() as u64; } @@ -385,7 +435,7 @@ mod benches { let (pk, sk) = Crypto::gen_keypair(); crypto.add_secret_key(pk, sk.clone()); let encryption = (EncryptionMethod::Sodium, ByteBuf::from(&pk[..])); - let input = test_data(512*1024); + let input = test_data(512 * 1024); let output = crypto.encrypt(&encryption, &input).unwrap(); b.iter(|| crypto.decrypt(&encryption, &output)); b.bytes = input.len() as u64; diff --git a/src/util/fs.rs b/src/util/fs.rs index 6cbc947..e25d1a4 100644 --- a/src/util/fs.rs +++ b/src/util/fs.rs @@ -7,13 +7,17 @@ mod linux { use std::os::unix::ffi::OsStringExt; #[inline] - pub fn chown>(path: P, uid: libc::uid_t, gid: libc::gid_t) -> Result<(), io::Error> { + pub fn chown>( + path: P, + uid: libc::uid_t, + gid: libc::gid_t, + ) -> Result<(), io::Error> { let path = CString::new(path.as_ref().to_path_buf().into_os_string().into_vec()).unwrap(); let result = unsafe { libc::lchown((&path).as_ptr(), uid, gid) }; match result { 0 => Ok(()), -1 => Err(io::Error::last_os_error()), - _ => unreachable!() + _ => unreachable!(), } } } diff --git a/src/util/hash.rs b/src/util/hash.rs index 6181832..364b475 100644 --- a/src/util/hash.rs +++ b/src/util/hash.rs @@ -27,7 +27,7 @@ impl Hash { #[inline] pub fn empty() -> Self { - Hash{high: 0, low: 0} + Hash { high: 0, low: 0 } } #[inline] @@ -45,14 +45,20 @@ impl Hash { pub fn read_from(src: &mut Read) -> Result { let high = try!(src.read_u64::()); let low = try!(src.read_u64::()); - Ok(Hash { high: high, low: low }) + Ok(Hash { + high: high, + low: low + }) } #[inline] pub fn from_string(val: &str) -> Result { let high = try!(u64::from_str_radix(&val[..16], 16).map_err(|_| ())); let low = try!(u64::from_str_radix(&val[16..], 16).map_err(|_| ())); - Ok(Self { high: high, low: low }) + Ok(Self { + high: high, + low: low + }) } } @@ -72,7 +78,10 @@ impl fmt::Debug for Hash { impl Serialize for Hash { - fn serialize(&self, serializer: S) -> Result where S: serde::Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { let mut dat = [0u8; 16]; LittleEndian::write_u64(&mut dat[..8], self.high); LittleEndian::write_u64(&mut dat[8..], self.low); @@ -81,12 +90,15 @@ impl Serialize for Hash { } impl<'a> Deserialize<'a> for Hash { - fn deserialize(deserializer: D) -> Result where D: serde::Deserializer<'a> { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'a>, + { let dat: Vec = try!(ByteBuf::deserialize(deserializer)).into(); if dat.len() != 16 { return Err(D::Error::custom("Invalid key length")); } - Ok(Hash{ + Ok(Hash { high: LittleEndian::read_u64(&dat[..8]), low: LittleEndian::read_u64(&dat[8..]) }) @@ -111,9 +123,13 @@ impl HashMethod { match *self { HashMethod::Blake2 => { let hash = blake2b(16, &[], data); - let hash = unsafe { &*mem::transmute::<_, *const (u64, u64)>(hash.as_bytes().as_ptr()) }; - Hash { high: u64::from_be(hash.0), low: u64::from_be(hash.1) } - }, + let hash = + unsafe { &*mem::transmute::<_, *const (u64, u64)>(hash.as_bytes().as_ptr()) }; + Hash { + high: u64::from_be(hash.0), + low: u64::from_be(hash.1) + } + } HashMethod::Murmur3 => { let (a, b) = murmurhash3_x64_128(data, 0); Hash { high: a, low: b } @@ -126,7 +142,7 @@ impl HashMethod { match name { "blake2" => Ok(HashMethod::Blake2), "murmur3" => Ok(HashMethod::Murmur3), - _ => Err("Unsupported hash method") + _ => Err("Unsupported hash method"), } } @@ -134,10 +150,9 @@ impl HashMethod { pub fn name(&self) -> &'static str { match *self { HashMethod::Blake2 => "blake2", - HashMethod::Murmur3 => "murmur3" + HashMethod::Murmur3 => "murmur3", } } - } @@ -163,12 +178,24 @@ mod tests { #[test] fn test_blake2() { - assert_eq!(HashMethod::Blake2.hash(b"abc"), Hash{high: 0xcf4ab791c62b8d2b, low: 0x2109c90275287816}); + assert_eq!( + HashMethod::Blake2.hash(b"abc"), + Hash { + high: 0xcf4ab791c62b8d2b, + low: 0x2109c90275287816 + } + ); } #[test] fn test_murmur3() { - assert_eq!(HashMethod::Murmur3.hash(b"123"), Hash{high: 10978418110857903978, low: 4791445053355511657}); + assert_eq!( + HashMethod::Murmur3.hash(b"123"), + Hash { + high: 10978418110857903978, + low: 4791445053355511657 + } + ); } } @@ -195,14 +222,14 @@ mod benches { #[bench] fn bench_blake2(b: &mut Bencher) { - let data = test_data(16*1024); + let data = test_data(16 * 1024); b.bytes = data.len() as u64; b.iter(|| HashMethod::Blake2.hash(&data)); } #[bench] fn bench_murmur3(b: &mut Bencher) { - let data = test_data(16*1024); + let data = test_data(16 * 1024); b.bytes = data.len() as u64; b.iter(|| HashMethod::Murmur3.hash(&data)); } diff --git a/src/util/hex.rs b/src/util/hex.rs index e988558..6e7f318 100644 --- a/src/util/hex.rs +++ b/src/util/hex.rs @@ -1,5 +1,8 @@ pub fn to_hex(data: &[u8]) -> String { - data.iter().map(|b| format!("{:02x}", b)).collect::>().join("") + data.iter() + .map(|b| format!("{:02x}", b)) + .collect::>() + .join("") } pub fn parse_hex(hex: &str) -> Result, ()> { @@ -12,9 +15,9 @@ pub fn parse_hex(hex: &str) -> Result, ()> { b'A'...b'F' => buf |= byte - b'A' + 10, b'a'...b'f' => buf |= byte - b'a' + 10, b'0'...b'9' => buf |= byte - b'0', - b' '|b'\r'|b'\n'|b'\t' => { + b' ' | b'\r' | b'\n' | b'\t' => { buf >>= 4; - continue + continue; } _ => return Err(()), } @@ -45,7 +48,7 @@ mod tests { assert_eq!(to_hex(&[15]), "0f"); assert_eq!(to_hex(&[16]), "10"); assert_eq!(to_hex(&[255]), "ff"); - assert_eq!(to_hex(&[5,255]), "05ff"); + assert_eq!(to_hex(&[5, 255]), "05ff"); } #[test] @@ -53,9 +56,9 @@ mod tests { assert_eq!(parse_hex("00"), Ok(vec![0])); assert_eq!(parse_hex("01"), Ok(vec![1])); assert_eq!(parse_hex("0f"), Ok(vec![15])); - assert_eq!(parse_hex("0fff"), Ok(vec![15,255])); + assert_eq!(parse_hex("0fff"), Ok(vec![15, 255])); assert_eq!(parse_hex("0F"), Ok(vec![15])); - assert_eq!(parse_hex("01 02\n03\t04"), Ok(vec![1,2,3,4])); + assert_eq!(parse_hex("01 02\n03\t04"), Ok(vec![1, 2, 3, 4])); } } diff --git a/src/util/hostname.rs b/src/util/hostname.rs index 5cc0a52..fdaf8c8 100644 --- a/src/util/hostname.rs +++ b/src/util/hostname.rs @@ -1,14 +1,20 @@ use libc; use std::ffi; -extern { +extern "C" { fn gethostname(name: *mut libc::c_char, size: libc::size_t) -> libc::c_int; } pub fn get_hostname() -> Result { let mut buf = Vec::with_capacity(255); buf.resize(255, 0u8); - if unsafe { gethostname(buf.as_mut_ptr() as *mut libc::c_char, buf.len() as libc::size_t) } == 0 { + if unsafe { + gethostname( + buf.as_mut_ptr() as *mut libc::c_char, + buf.len() as libc::size_t + ) + } == 0 + { buf[254] = 0; //enforce null-termination let name = unsafe { ffi::CStr::from_ptr(buf.as_ptr() as *const libc::c_char) }; name.to_str().map(|s| s.to_string()).map_err(|_| ()) diff --git a/src/util/lock.rs b/src/util/lock.rs index 49853b0..f07ed97 100644 --- a/src/util/lock.rs +++ b/src/util/lock.rs @@ -1,4 +1,4 @@ -use ::prelude::*; +use prelude::*; use serde_yaml; use chrono::prelude::*; @@ -121,12 +121,14 @@ impl LockFolder { for lock in try!(self.get_locks()) { if lock.exclusive { if level == LockLevel::Exclusive { - return Err(LockError::InvalidLockState("multiple exclusive locks")) + return Err(LockError::InvalidLockState("multiple exclusive locks")); } else { level = LockLevel::Exclusive } } else if level == LockLevel::Exclusive { - return Err(LockError::InvalidLockState("exclusive lock and shared locks")) + return Err(LockError::InvalidLockState( + "exclusive lock and shared locks" + )); } else { level = LockLevel::Shared } @@ -137,7 +139,7 @@ impl LockFolder { pub fn lock(&self, exclusive: bool) -> Result { let level = try!(self.get_lock_level()); if level == LockLevel::Exclusive || level == LockLevel::Shared && exclusive { - return Err(LockError::Locked) + return Err(LockError::Locked); } let lockfile = LockFile { hostname: get_hostname().unwrap(), @@ -145,12 +147,19 @@ impl LockFolder { date: Utc::now().timestamp(), exclusive: exclusive }; - let path = self.path.join(format!("{}-{}.lock", &lockfile.hostname, lockfile.processid)); + let path = self.path.join(format!( + "{}-{}.lock", + &lockfile.hostname, + lockfile.processid + )); try!(lockfile.save(&path)); - let handle = LockHandle{lock: lockfile, path: path}; + let handle = LockHandle { + lock: lockfile, + path: path + }; if self.get_lock_level().is_err() { try!(handle.release()); - return Err(LockError::Locked) + return Err(LockError::Locked); } Ok(handle) } @@ -158,19 +167,23 @@ impl LockFolder { pub fn upgrade(&self, lock: &mut LockHandle) -> Result<(), LockError> { let lockfile = &mut lock.lock; if lockfile.exclusive { - return Ok(()) + return Ok(()); } let level = try!(self.get_lock_level()); if level == LockLevel::Exclusive { - return Err(LockError::Locked) + return Err(LockError::Locked); } lockfile.exclusive = true; - let path = self.path.join(format!("{}-{}.lock", &lockfile.hostname, lockfile.processid)); + let path = self.path.join(format!( + "{}-{}.lock", + &lockfile.hostname, + lockfile.processid + )); try!(lockfile.save(&path)); if self.get_lock_level().is_err() { lockfile.exclusive = false; try!(lockfile.save(&path)); - return Err(LockError::Locked) + return Err(LockError::Locked); } Ok(()) } @@ -178,10 +191,14 @@ impl LockFolder { pub fn downgrade(&self, lock: &mut LockHandle) -> Result<(), LockError> { let lockfile = &mut lock.lock; if !lockfile.exclusive { - return Ok(()) + return Ok(()); } lockfile.exclusive = false; - let path = self.path.join(format!("{}-{}.lock", &lockfile.hostname, lockfile.processid)); + let path = self.path.join(format!( + "{}-{}.lock", + &lockfile.hostname, + lockfile.processid + )); lockfile.save(&path) } } diff --git a/src/util/lru_cache.rs b/src/util/lru_cache.rs index 29e082c..4b8b9c2 100644 --- a/src/util/lru_cache.rs +++ b/src/util/lru_cache.rs @@ -10,7 +10,7 @@ pub struct LruCache { } -impl LruCache { +impl LruCache { #[inline] pub fn new(min_size: usize, max_size: usize) -> Self { LruCache { @@ -55,9 +55,9 @@ impl LruCache { fn shrink(&mut self) { let mut tags: Vec = self.items.values().map(|&(_, n)| n).collect(); tags.sort(); - let min = tags[tags.len()-self.min_size]; + let min = tags[tags.len() - self.min_size]; let mut new = HashMap::with_capacity(self.min_size); - new.extend(self.items.drain().filter(|&(_,(_, n))| n>=min)); + new.extend(self.items.drain().filter(|&(_, (_, n))| n >= min)); self.items = new; } }