Locking local repository to avoid index corruption (closes #4)

This commit is contained in:
Dennis Schwerdel 2017-04-12 08:32:27 +02:00
parent b4b004dd23
commit 1ab11c8ff9
7 changed files with 90 additions and 19 deletions

View File

@ -397,7 +397,7 @@ pub fn run() -> Result<(), ErrorCode> {
}
},
Arguments::Prune{repo_path, prefix, daily, weekly, monthly, yearly, force} => {
let repo = try!(open_repository(&repo_path));
let mut repo = try!(open_repository(&repo_path));
if daily + weekly + monthly + yearly == 0 {
error!("This would remove all those backups");
return Err(ErrorCode::UnsafeArgs)

View File

@ -51,12 +51,14 @@ impl Repository {
}
pub fn save_backup(&mut self, backup: &Backup, name: &str) -> Result<(), RepositoryError> {
try!(self.write_mode());
let path = self.layout.backup_path(name);
try!(fs::create_dir_all(path.parent().unwrap()));
Ok(try!(backup.save_to(&self.crypto.lock().unwrap(), self.config.encryption.clone(), path)))
}
pub fn delete_backup(&self, name: &str) -> Result<(), RepositoryError> {
pub fn delete_backup(&mut self, name: &str) -> Result<(), RepositoryError> {
try!(self.write_mode());
let mut path = self.layout.backup_path(name);
try!(fs::remove_file(&path));
loop {
@ -69,7 +71,8 @@ impl Repository {
}
pub fn prune_backups(&self, prefix: &str, daily: usize, weekly: usize, monthly: usize, yearly: usize, force: bool) -> Result<(), RepositoryError> {
pub fn prune_backups(&mut self, prefix: &str, daily: usize, weekly: usize, monthly: usize, yearly: usize, force: bool) -> Result<(), RepositoryError> {
try!(self.write_mode());
let mut backups = Vec::new();
let backup_map = match self.get_backups() {
Ok(backup_map) => backup_map,
@ -224,6 +227,7 @@ impl Repository {
}
pub fn create_backup_recursively<P: AsRef<Path>>(&mut self, path: P, reference: Option<&Backup>, options: &BackupOptions) -> Result<Backup, RepositoryError> {
try!(self.write_mode());
let _lock = try!(self.lock(false));
if self.dirty {
return Err(RepositoryError::Dirty)
@ -264,6 +268,7 @@ impl Repository {
}
pub fn remove_backup_path<P: AsRef<Path>>(&mut self, backup: &mut Backup, path: P) -> Result<(), RepositoryError> {
try!(self.write_mode());
let _lock = try!(self.lock(false));
let mut inodes = try!(self.get_backup_path(backup, path));
let to_remove = inodes.pop().unwrap();

View File

@ -40,6 +40,11 @@ impl RepositoryLayout {
self.0.join("bundles.map")
}
#[inline]
pub fn local_locks_path(&self) -> PathBuf {
self.0.join("locks")
}
#[inline]
pub fn backups_path(&self) -> PathBuf {
self.0.join("remote/backups")

View File

@ -48,7 +48,9 @@ pub struct Repository {
data_bundle: Option<BundleWriter>,
meta_bundle: Option<BundleWriter>,
chunker: Chunker,
locks: LockFolder,
remote_locks: LockFolder,
local_locks: LockFolder,
lock: LockHandle,
dirty: bool
}
@ -59,6 +61,7 @@ impl Repository {
try!(fs::create_dir(layout.base_path()));
try!(File::create(layout.excludes_path()).and_then(|mut f| f.write_all(DEFAULT_EXCLUDES)));
try!(fs::create_dir(layout.keys_path()));
try!(fs::create_dir(layout.local_locks_path()));
try!(symlink(remote, layout.remote_path()));
try!(File::create(layout.remote_readme_path()).and_then(|mut f| f.write_all(REPOSITORY_README)));
try!(fs::create_dir_all(layout.remote_locks_path()));
@ -70,13 +73,17 @@ impl Repository {
Self::open(path)
}
#[allow(unknown_lints,useless_let_if_seq)]
pub fn open<P: AsRef<Path>>(path: P) -> Result<Self, RepositoryError> {
let layout = RepositoryLayout::new(path.as_ref().to_path_buf());
if !layout.remote_exists() {
return Err(RepositoryError::NoRemote)
}
let config = try!(Config::load(layout.config_path()));
let locks = LockFolder::new(layout.remote_locks_path());
let remote_locks = LockFolder::new(layout.remote_locks_path());
try!(fs::create_dir_all(layout.local_locks_path())); // Added after v0.1.0
let local_locks = LockFolder::new(layout.local_locks_path());
let lock = try!(local_locks.lock(false));
let crypto = Arc::new(Mutex::new(try!(Crypto::open(layout.keys_path()))));
let (bundles, new, gone) = try!(BundleDb::open(layout.clone(), crypto.clone()));
let (index, mut rebuild_index) = match Index::open(layout.index_path()) {
@ -107,28 +114,42 @@ impl Repository {
bundles: bundles,
data_bundle: None,
meta_bundle: None,
locks: locks
lock: lock,
remote_locks: remote_locks,
local_locks: local_locks
};
if !new.is_empty() {
info!("Adding {} new bundles to index", new.len());
for bundle in ProgressIter::new("adding bundles to index", new.len(), new.into_iter()) {
try!(repo.add_new_remote_bundle(bundle))
if !rebuild_bundle_map {
let mut save_bundle_map = false;
if !new.is_empty() {
info!("Adding {} new bundles to index", new.len());
try!(repo.write_mode());
for bundle in ProgressIter::new("adding bundles to index", new.len(), new.into_iter()) {
try!(repo.add_new_remote_bundle(bundle))
}
save_bundle_map = true;
}
if !gone.is_empty() {
info!("Removig {} old bundles from index", gone.len());
try!(repo.write_mode());
for bundle in gone {
try!(repo.remove_gone_remote_bundle(bundle))
}
save_bundle_map = true;
}
if save_bundle_map {
try!(repo.write_mode());
try!(repo.save_bundle_map());
}
}
if !gone.is_empty() {
info!("Removig {} old bundles from index", gone.len());
for bundle in gone {
try!(repo.remove_gone_remote_bundle(bundle))
}
}
try!(repo.save_bundle_map());
repo.next_meta_bundle = repo.next_free_bundle_id();
repo.next_data_bundle = repo.next_free_bundle_id();
if rebuild_bundle_map {
try!(repo.write_mode());
try!(repo.rebuild_bundle_map());
rebuild_index = true;
}
if rebuild_index {
try!(repo.write_mode());
try!(repo.rebuild_index());
}
repo.dirty = dirty;
@ -156,11 +177,13 @@ impl Repository {
#[inline]
pub fn register_key(&mut self, public: PublicKey, secret: SecretKey) -> Result<(), RepositoryError> {
try!(self.write_mode());
Ok(try!(self.crypto.lock().unwrap().register_secret_key(public, secret)))
}
#[inline]
pub fn save_config(&mut self) -> Result<(), RepositoryError> {
try!(self.write_mode());
try!(self.config.save(self.layout.config_path()));
Ok(())
}
@ -290,9 +313,14 @@ impl Repository {
Ok(())
}
#[inline]
fn write_mode(&mut self) -> Result<(), RepositoryError> {
Ok(try!(self.local_locks.upgrade(&mut self.lock)))
}
#[inline]
fn lock(&self, exclusive: bool) -> Result<LockHandle, RepositoryError> {
Ok(try!(self.locks.lock(exclusive)))
Ok(try!(self.remote_locks.lock(exclusive)))
}
#[inline]

View File

@ -154,6 +154,7 @@ impl Repository {
}
pub fn import_tarfile<P: AsRef<Path>>(&mut self, tarfile: P) -> Result<Backup, RepositoryError> {
try!(self.write_mode());
let _lock = try!(self.lock(false));
if self.dirty {
return Err(RepositoryError::Dirty)

View File

@ -16,6 +16,7 @@ impl Repository {
pub fn vacuum(&mut self, ratio: f32, force: bool) -> Result<(), RepositoryError> {
try!(self.flush());
info!("Locking repository");
try!(self.write_mode());
let _lock = try!(self.lock(true));
if self.dirty {
return Err(RepositoryError::Dirty)

View File

@ -71,6 +71,7 @@ pub enum LockLevel {
pub struct LockHandle {
lock: LockFile,
path: PathBuf
}
@ -146,11 +147,41 @@ impl LockFolder {
};
let path = self.path.join(format!("{}-{}.lock", &lockfile.hostname, lockfile.processid));
try!(lockfile.save(&path));
let handle = LockHandle{path: path};
let handle = LockHandle{lock: lockfile, path: path};
if self.get_lock_level().is_err() {
try!(handle.release());
return Err(LockError::Locked)
}
Ok(handle)
}
pub fn upgrade(&self, lock: &mut LockHandle) -> Result<(), LockError> {
let lockfile = &mut lock.lock;
if lockfile.exclusive {
return Ok(())
}
let level = try!(self.get_lock_level());
if level == LockLevel::Exclusive {
return Err(LockError::Locked)
}
lockfile.exclusive = true;
let path = self.path.join(format!("{}-{}.lock", &lockfile.hostname, lockfile.processid));
try!(lockfile.save(&path));
if self.get_lock_level().is_err() {
lockfile.exclusive = false;
try!(lockfile.save(&path));
return Err(LockError::Locked)
}
Ok(())
}
pub fn downgrade(&self, lock: &mut LockHandle) -> Result<(), LockError> {
let lockfile = &mut lock.lock;
if !lockfile.exclusive {
return Ok(())
}
lockfile.exclusive = false;
let path = self.path.join(format!("{}-{}.lock", &lockfile.hostname, lockfile.processid));
lockfile.save(&path)
}
}