2017-03-21 10:28:11 +00:00
|
|
|
use ::prelude::*;
|
2017-03-10 11:43:32 +00:00
|
|
|
|
2017-04-12 18:19:21 +00:00
|
|
|
use super::*;
|
|
|
|
|
2017-03-20 13:03:29 +00:00
|
|
|
use std::collections::VecDeque;
|
2017-04-10 15:53:26 +00:00
|
|
|
use std::path::{Path, PathBuf};
|
2017-04-11 06:49:45 +00:00
|
|
|
use std::time::Duration;
|
|
|
|
|
|
|
|
use pbr::ProgressBar;
|
2017-03-10 11:43:32 +00:00
|
|
|
|
|
|
|
|
2017-03-16 08:42:30 +00:00
|
|
|
quick_error!{
|
|
|
|
#[derive(Debug)]
|
2017-04-10 15:53:26 +00:00
|
|
|
pub enum IntegrityError {
|
2017-03-16 08:42:30 +00:00
|
|
|
MissingChunk(hash: Hash) {
|
|
|
|
description("Missing chunk")
|
|
|
|
display("Missing chunk: {}", hash)
|
|
|
|
}
|
|
|
|
MissingBundleId(id: u32) {
|
|
|
|
description("Missing bundle")
|
|
|
|
display("Missing bundle: {}", id)
|
|
|
|
}
|
|
|
|
MissingBundle(id: BundleId) {
|
|
|
|
description("Missing bundle")
|
|
|
|
display("Missing bundle: {}", id)
|
|
|
|
}
|
|
|
|
NoSuchChunk(bundle: BundleId, chunk: u32) {
|
|
|
|
description("No such chunk")
|
2017-04-07 16:57:49 +00:00
|
|
|
display("Bundle {} does not contain the chunk {}", bundle, chunk)
|
2017-03-16 08:42:30 +00:00
|
|
|
}
|
2017-04-10 16:21:26 +00:00
|
|
|
RemoteBundlesNotInMap {
|
|
|
|
description("Remote bundles missing from map")
|
|
|
|
}
|
|
|
|
MapContainsDuplicates {
|
|
|
|
description("Map contains duplicates")
|
|
|
|
}
|
2017-04-10 15:53:26 +00:00
|
|
|
BrokenInode(path: PathBuf, err: Box<RepositoryError>) {
|
|
|
|
cause(err)
|
|
|
|
description("Broken inode")
|
|
|
|
display("Broken inode: {:?}\n\tcaused by: {}", path, err)
|
|
|
|
}
|
|
|
|
MissingInodeData(path: PathBuf, err: Box<RepositoryError>) {
|
|
|
|
cause(err)
|
|
|
|
description("Missing inode data")
|
|
|
|
display("Missing inode data in: {:?}\n\tcaused by: {}", path, err)
|
|
|
|
}
|
2017-03-16 08:42:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-10 11:43:32 +00:00
|
|
|
impl Repository {
|
2017-03-20 13:03:29 +00:00
|
|
|
fn check_index_chunks(&self) -> Result<(), RepositoryError> {
|
2017-04-11 06:49:45 +00:00
|
|
|
let mut count = 0;
|
|
|
|
let mut progress = ProgressBar::new(self.index.len() as u64);
|
|
|
|
progress.message("checking index: ");
|
|
|
|
progress.set_max_refresh_rate(Some(Duration::from_millis(100)));
|
|
|
|
let res = self.index.walk(|_hash, location| {
|
2017-03-20 13:03:29 +00:00
|
|
|
// Lookup bundle id from map
|
2017-03-23 08:31:23 +00:00
|
|
|
let bundle_id = try!(self.get_bundle_id(location.bundle));
|
2017-03-20 13:03:29 +00:00
|
|
|
// Get bundle object from bundledb
|
2017-03-21 14:38:42 +00:00
|
|
|
let bundle = if let Some(bundle) = self.bundles.get_bundle_info(&bundle_id) {
|
2017-03-20 13:03:29 +00:00
|
|
|
bundle
|
|
|
|
} else {
|
2017-04-10 15:53:26 +00:00
|
|
|
return Err(IntegrityError::MissingBundle(bundle_id.clone()).into())
|
2017-03-20 13:03:29 +00:00
|
|
|
};
|
|
|
|
// Get chunk from bundle
|
2017-04-07 16:57:49 +00:00
|
|
|
if bundle.info.chunk_count <= location.chunk as usize {
|
2017-04-10 15:53:26 +00:00
|
|
|
return Err(IntegrityError::NoSuchChunk(bundle_id.clone(), location.chunk).into())
|
2017-03-20 13:03:29 +00:00
|
|
|
}
|
2017-04-11 06:49:45 +00:00
|
|
|
count += 1;
|
|
|
|
if count % 1000 == 0 {
|
|
|
|
progress.set(count);
|
|
|
|
}
|
2017-03-23 08:31:23 +00:00
|
|
|
Ok(())
|
2017-04-11 06:49:45 +00:00
|
|
|
});
|
|
|
|
progress.finish_print("checking index: done.");
|
|
|
|
res
|
2017-03-20 13:03:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn check_chunks(&self, checked: &mut Bitmap, chunks: &[Chunk]) -> Result<bool, RepositoryError> {
|
|
|
|
let mut new = false;
|
|
|
|
for &(hash, _len) in chunks {
|
|
|
|
if let Some(pos) = self.index.pos(&hash) {
|
2017-03-20 21:24:53 +00:00
|
|
|
new |= !checked.get(pos);
|
2017-03-20 13:03:29 +00:00
|
|
|
checked.set(pos);
|
|
|
|
} else {
|
2017-04-10 15:53:26 +00:00
|
|
|
return Err(IntegrityError::MissingChunk(hash).into())
|
2017-03-20 13:03:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(new)
|
|
|
|
}
|
|
|
|
|
2017-03-22 10:10:13 +00:00
|
|
|
fn check_inode_contents(&mut self, inode: &Inode, checked: &mut Bitmap) -> Result<(), RepositoryError> {
|
2017-04-03 05:35:00 +00:00
|
|
|
match inode.data {
|
|
|
|
None | Some(FileData::Inline(_)) => (),
|
|
|
|
Some(FileData::ChunkedDirect(ref chunks)) => {
|
2017-03-22 10:10:13 +00:00
|
|
|
try!(self.check_chunks(checked, chunks));
|
|
|
|
},
|
2017-04-03 05:35:00 +00:00
|
|
|
Some(FileData::ChunkedIndirect(ref chunks)) => {
|
2017-03-22 10:10:13 +00:00
|
|
|
if try!(self.check_chunks(checked, chunks)) {
|
|
|
|
let chunk_data = try!(self.get_data(&chunks));
|
|
|
|
let chunks = ChunkList::read_from(&chunk_data);
|
|
|
|
try!(self.check_chunks(checked, &chunks));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2017-04-10 15:53:26 +00:00
|
|
|
fn check_subtree(&mut self, path: PathBuf, chunks: &[Chunk], checked: &mut Bitmap) -> Result<(), RepositoryError> {
|
2017-03-22 10:10:13 +00:00
|
|
|
let mut todo = VecDeque::new();
|
2017-04-10 15:53:26 +00:00
|
|
|
todo.push_back((path, ChunkList::from(chunks.to_vec())));
|
|
|
|
while let Some((path, chunks)) = todo.pop_front() {
|
|
|
|
match self.check_chunks(checked, &chunks) {
|
|
|
|
Ok(false) => continue, // checked this chunk list before
|
|
|
|
Ok(true) => (),
|
|
|
|
Err(err) => return Err(IntegrityError::BrokenInode(path, Box::new(err)).into())
|
2017-03-22 10:10:13 +00:00
|
|
|
}
|
|
|
|
let inode = try!(self.get_inode(&chunks));
|
|
|
|
// Mark the content chunks as used
|
2017-04-10 15:53:26 +00:00
|
|
|
if let Err(err) = self.check_inode_contents(&inode, checked) {
|
|
|
|
return Err(IntegrityError::MissingInodeData(path, Box::new(err)).into())
|
|
|
|
}
|
2017-03-22 10:10:13 +00:00
|
|
|
// Put children in todo
|
|
|
|
if let Some(children) = inode.children {
|
2017-04-10 15:53:26 +00:00
|
|
|
for (name, chunks) in children {
|
|
|
|
todo.push_back((path.join(name), chunks));
|
2017-03-22 10:10:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2017-04-10 18:35:28 +00:00
|
|
|
#[inline]
|
2017-03-22 10:10:13 +00:00
|
|
|
pub fn check_backup(&mut self, backup: &Backup) -> Result<(), RepositoryError> {
|
2017-04-10 17:28:17 +00:00
|
|
|
info!("Checking backup...");
|
2017-03-22 10:10:13 +00:00
|
|
|
let mut checked = Bitmap::new(self.index.capacity());
|
2017-04-10 15:53:26 +00:00
|
|
|
self.check_subtree(Path::new("").to_path_buf(), &backup.root, &mut checked)
|
2017-03-22 10:10:13 +00:00
|
|
|
}
|
|
|
|
|
2017-04-10 15:53:26 +00:00
|
|
|
pub fn check_inode(&mut self, inode: &Inode, path: &Path) -> Result<(), RepositoryError> {
|
2017-04-10 17:28:17 +00:00
|
|
|
info!("Checking inode...");
|
2017-03-22 10:10:13 +00:00
|
|
|
let mut checked = Bitmap::new(self.index.capacity());
|
|
|
|
try!(self.check_inode_contents(inode, &mut checked));
|
|
|
|
if let Some(ref children) = inode.children {
|
|
|
|
for chunks in children.values() {
|
2017-04-10 15:53:26 +00:00
|
|
|
try!(self.check_subtree(path.to_path_buf(), chunks, &mut checked))
|
2017-03-22 10:10:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2017-04-10 17:28:17 +00:00
|
|
|
pub fn check_backups(&mut self) -> Result<(), RepositoryError> {
|
|
|
|
info!("Checking backups...");
|
2017-03-20 13:03:29 +00:00
|
|
|
let mut checked = Bitmap::new(self.index.capacity());
|
2017-03-22 08:19:16 +00:00
|
|
|
let backup_map = match self.get_backups() {
|
|
|
|
Ok(backup_map) => backup_map,
|
|
|
|
Err(RepositoryError::BackupFile(BackupFileError::PartialBackupsList(backup_map, _failed))) => {
|
|
|
|
warn!("Some backups could not be read, ignoring them");
|
|
|
|
backup_map
|
|
|
|
},
|
|
|
|
Err(err) => return Err(err)
|
|
|
|
};
|
2017-04-11 06:49:45 +00:00
|
|
|
for (name, backup) in ProgressIter::new("ckecking backups", backup_map.len(), backup_map.into_iter()) {
|
2017-04-10 15:53:26 +00:00
|
|
|
let path = name+"::";
|
|
|
|
try!(self.check_subtree(Path::new(&path).to_path_buf(), &backup.root, &mut checked));
|
2017-03-20 13:03:29 +00:00
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2017-04-10 17:28:17 +00:00
|
|
|
pub fn check_repository(&mut self) -> Result<(), RepositoryError> {
|
|
|
|
info!("Checking repository integrity...");
|
2017-04-10 16:21:26 +00:00
|
|
|
for (_id, bundle_id) in self.bundle_map.bundles() {
|
|
|
|
if self.bundles.get_bundle_info(&bundle_id).is_none() {
|
|
|
|
return Err(IntegrityError::MissingBundle(bundle_id).into())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if self.bundle_map.len() < self.bundles.len() {
|
|
|
|
return Err(IntegrityError::RemoteBundlesNotInMap.into())
|
|
|
|
}
|
|
|
|
if self.bundle_map.len() > self.bundles.len() {
|
|
|
|
return Err(IntegrityError::MapContainsDuplicates.into())
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2017-04-12 18:19:21 +00:00
|
|
|
pub fn rebuild_bundle_map(&mut self) -> Result<(), RepositoryError> {
|
|
|
|
info!("Rebuilding bundle map from bundles");
|
|
|
|
self.bundle_map = BundleMap::create();
|
|
|
|
for bundle in self.bundles.list_bundles() {
|
|
|
|
let bundle_id = match bundle.mode {
|
|
|
|
BundleMode::Data => self.next_data_bundle,
|
|
|
|
BundleMode::Meta => self.next_meta_bundle
|
|
|
|
};
|
|
|
|
self.bundle_map.set(bundle_id, bundle.id.clone());
|
|
|
|
if self.next_meta_bundle == bundle_id {
|
|
|
|
self.next_meta_bundle = self.next_free_bundle_id()
|
|
|
|
}
|
|
|
|
if self.next_data_bundle == bundle_id {
|
|
|
|
self.next_data_bundle = self.next_free_bundle_id()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
self.save_bundle_map()
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn rebuild_index(&mut self) -> Result<(), RepositoryError> {
|
|
|
|
info!("Rebuilding index from bundles");
|
|
|
|
self.index.clear();
|
|
|
|
for (num, id) in self.bundle_map.bundles() {
|
|
|
|
let chunks = try!(self.bundles.get_chunk_list(&id));
|
|
|
|
for (i, (hash, _len)) in chunks.into_inner().into_iter().enumerate() {
|
|
|
|
try!(self.index.set(&hash, &Location{bundle: num as u32, chunk: i as u32}));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2017-04-10 18:35:28 +00:00
|
|
|
#[inline]
|
2017-04-12 18:19:21 +00:00
|
|
|
pub fn check_index(&mut self, repair: bool) -> Result<(), RepositoryError> {
|
|
|
|
if repair {
|
|
|
|
try!(self.write_mode());
|
|
|
|
}
|
2017-03-20 13:03:29 +00:00
|
|
|
info!("Checking index integrity...");
|
2017-04-12 18:19:21 +00:00
|
|
|
if let Err(err) = self.index.check() {
|
|
|
|
if repair {
|
|
|
|
warn!("Problem detected: index was corrupted\n\tcaused by: {}", err);
|
|
|
|
return self.rebuild_index();
|
|
|
|
} else {
|
|
|
|
return Err(err.into())
|
|
|
|
}
|
|
|
|
}
|
2017-04-10 16:21:26 +00:00
|
|
|
info!("Checking index entries...");
|
2017-04-12 18:19:21 +00:00
|
|
|
if let Err(err) = self.check_index_chunks() {
|
|
|
|
if repair {
|
|
|
|
warn!("Problem detected: index entries were inconsistent\n\tcaused by: {}", err);
|
|
|
|
return self.rebuild_index();
|
|
|
|
} else {
|
|
|
|
return Err(err.into())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
2017-04-10 17:28:17 +00:00
|
|
|
}
|
|
|
|
|
2017-04-10 18:35:28 +00:00
|
|
|
#[inline]
|
2017-04-12 18:19:21 +00:00
|
|
|
pub fn check_bundles(&mut self, full: bool, repair: bool) -> Result<(), RepositoryError> {
|
|
|
|
if repair {
|
|
|
|
try!(self.write_mode());
|
|
|
|
}
|
2017-04-10 17:28:17 +00:00
|
|
|
info!("Checking bundle integrity...");
|
2017-04-12 18:19:21 +00:00
|
|
|
if try!(self.bundles.check(full, repair)) {
|
|
|
|
// Some bundles got repaired
|
|
|
|
try!(self.bundles.finish_uploads());
|
|
|
|
try!(self.rebuild_bundle_map());
|
|
|
|
try!(self.rebuild_index());
|
|
|
|
}
|
|
|
|
Ok(())
|
2017-03-20 13:03:29 +00:00
|
|
|
}
|
2017-03-10 11:43:32 +00:00
|
|
|
}
|