mirror of https://github.com/dswd/zvault
Some fixes
This commit is contained in:
parent
ced360b881
commit
11d7753e78
|
@ -90,7 +90,7 @@ As an example, I am going to backup my projects folder. To do that, I am
|
|||
initializing an encrypted zVault repository, storing the data on a remote
|
||||
filesystem which has been mounted on `/mnt/backup`.
|
||||
|
||||
#$> zvault init --encrypt --remote /mnt/backup
|
||||
#$> zvault init :: --encrypt --remote /mnt/backup
|
||||
public: 2bea1d15...
|
||||
secret: 3698a88c...
|
||||
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
# TODO
|
||||
|
||||
## Functionality
|
||||
* Detach bundle upload
|
||||
* XAttrs in fuse
|
||||
* XAttrs in tar
|
||||
|
||||
## Stability / Reliability
|
||||
* Lock the local repository to avoid index corruption
|
||||
* Recover from missing index and bundle map by rebuilding those
|
||||
|
||||
## Usability
|
||||
* Verbosity control
|
||||
* Check for private key when encryption is set
|
||||
* Display backup name and path on backup integrity error
|
||||
* Better control over what is checked in `check` subcommand
|
||||
* Nice error when remote storage is not mounted
|
||||
|
||||
## Code quality
|
||||
* Test cases
|
||||
* Benchmarks
|
||||
* Full fuse method coverage
|
||||
|
||||
## Other
|
||||
* Homepage
|
|
@ -1,7 +1,8 @@
|
|||
undefine LDFLAGS
|
||||
|
||||
build:
|
||||
git clone https://github.com/quixdb/squash -b 5ea579cae2324f9e814cb3d88aa589dff312e9e2 src
|
||||
git clone https://github.com/quixdb/squash src
|
||||
(cd src; git checkout 5ea579cae2324f9e814cb3d88aa589dff312e9e2)
|
||||
(cd src; ./autogen.sh --prefix=/usr --disable-external)
|
||||
make -C src
|
||||
|
||||
|
|
Binary file not shown.
|
@ -3,7 +3,7 @@ zvault-check(1) -- Check the repository, a backup or a backup subtree
|
|||
|
||||
## SYNOPSIS
|
||||
|
||||
`zvault check [OPTIONS] [PATH]`
|
||||
`zvault check [OPTIONS] <PATH>`
|
||||
|
||||
|
||||
## DESCRIPTION
|
||||
|
@ -13,7 +13,6 @@ This subcommand checks the repository, a backup or a backup subtree given by
|
|||
|
||||
The repository, backup, of subtree given by `PATH` must be in the format
|
||||
`[repository][::backup_name[::subtree]]` as described in _zvault(1)_.
|
||||
If `PATH` is omitted, the default repository location is used instead.
|
||||
|
||||
The command will perform the following checks in order:
|
||||
- Bundle integrity
|
||||
|
|
|
@ -3,13 +3,12 @@ zvault-config(1) -- Display or change the configuration
|
|||
|
||||
## SYNOPSIS
|
||||
|
||||
`zvault config [REPO]`
|
||||
`zvault config <REPO>`
|
||||
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
This subcommand displays or changes the configuration of the repository `REPO`.
|
||||
If `REPO` is omitted, the default repository location will be used.
|
||||
The configuration can be changes using the options described below. If no
|
||||
options are set, the current configuration is displayed. Otherwise, the
|
||||
configuration is changed as specified and then displayed.
|
||||
|
|
|
@ -3,7 +3,7 @@ zvault-import(1) -- Reconstruct a repository from the remote storage
|
|||
|
||||
## SYNOPSIS
|
||||
|
||||
`zvault import <REMOTE> [REPO]`
|
||||
`zvault import <REMOTE> <REPO>`
|
||||
|
||||
|
||||
## DESCRIPTION
|
||||
|
@ -12,9 +12,8 @@ This subcommand imports a repository from remote storage. First, an empty
|
|||
repository will be created and then the remote bundles will be imported and
|
||||
added to the local index.
|
||||
|
||||
The repository will be created at the location `REPO`. If `REPO` is omitted,
|
||||
the default repository location will be used. It is important that the path
|
||||
given as `REPO` does not yet exist, so that it can be created.
|
||||
The repository will be created at the location `REPO`. It is important that the
|
||||
path given as `REPO` does not yet exist, so that it can be created.
|
||||
|
||||
The remote storage path `REMOTE` must be an existing remote storage folder
|
||||
initialized by _zvault-init(1)_.
|
||||
|
|
|
@ -3,7 +3,7 @@ zvault-info(1) -- Display information on a repository, a backup or a subtree
|
|||
|
||||
## SYNOPSIS
|
||||
|
||||
`zvault info [PATH]`
|
||||
`zvault info <PATH>`
|
||||
|
||||
|
||||
## DESCRIPTION
|
||||
|
@ -13,7 +13,6 @@ specified by `PATH`.
|
|||
|
||||
The repository, backup or backup subtree given by `PATH` must be in the format
|
||||
`[repository][::backup_name[::subtree]]` as described in _zvault(1)_.
|
||||
If `PATH` is omitted, the default repository location is used instead.
|
||||
|
||||
|
||||
## OPTIONS
|
||||
|
|
|
@ -3,14 +3,14 @@ zvault-init(1) -- Initialize a new repository
|
|||
|
||||
## SYNOPSIS
|
||||
|
||||
`zvault init [OPTIONS] --remote <REMOTE> [REPO]`
|
||||
`zvault init [OPTIONS] --remote <REMOTE> <REPO>`
|
||||
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
This subcommand initializes a new repository at the location `REPO`. If `REPO`
|
||||
is omitted, the default repository location will be used. It is important that
|
||||
the path given as `REPO` does not yet exist, so that it can be created.
|
||||
This subcommand initializes a new repository at the location `REPO`. It is
|
||||
important that the path given as `REPO` does not yet exist, so that it can be
|
||||
created.
|
||||
|
||||
The remote storage path `REMOTE` must be an existing empty folder. ZVault
|
||||
supports mounted remote filesystems, so it is a good idea to use such a folder
|
||||
|
|
|
@ -3,7 +3,7 @@ zvault-list(1) -- List backups or backup contents
|
|||
|
||||
## SYNOPSIS
|
||||
|
||||
`zvault list [PATH]`
|
||||
`zvault list <PATH>`
|
||||
|
||||
|
||||
## DESCRIPTION
|
||||
|
@ -13,7 +13,6 @@ specified by `PATH`.
|
|||
|
||||
The repository, backup or backup subtree given by `PATH` must be in the format
|
||||
`[repository][::backup_name[::subtree]]` as described in _zvault(1)_.
|
||||
If `PATH` is omitted, the default repository location is used instead.
|
||||
|
||||
If `PATH` specifies a repository, all backups of this repository are listed.
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ zvault-mount(1) -- Mount the repository, a backup or a subtree
|
|||
|
||||
## SYNOPSIS
|
||||
|
||||
`zvault mount [PATH] <MOUNTPOINT>`
|
||||
`zvault mount <PATH> <MOUNTPOINT>`
|
||||
|
||||
|
||||
## DESCRIPTION
|
||||
|
@ -14,7 +14,6 @@ filesystem.
|
|||
|
||||
The repository, backup or backup subtree given by `PATH` must be in the format
|
||||
`[repository][::backup_name[::subtree]]` as described in _zvault(1)_.
|
||||
If `PATH` is omitted, the default repository location is used instead.
|
||||
|
||||
If `PATH` specifies a backup or backup subtree, the root of that backup or the
|
||||
respective subtree is mounted onto the given location.
|
||||
|
|
|
@ -3,15 +3,13 @@ zvault-prune(1) -- Remove backups based on age
|
|||
|
||||
## SYNOPSIS
|
||||
|
||||
`zvault prune [OPTIONS] [REPO]`
|
||||
`zvault prune [OPTIONS] <REPO>`
|
||||
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
This subcommand removes backups in the repository `REPO` based on their age.
|
||||
|
||||
If `REPO` is omitted, the default repository location is used instead.
|
||||
|
||||
If a prefix is specified via `--prefix`, only backups which start with this
|
||||
string are considered for removal.
|
||||
|
||||
|
|
|
@ -3,13 +3,12 @@ zvault-vacuum(1) -- Reclaim space by rewriting bundles
|
|||
|
||||
## SYNOPSIS
|
||||
|
||||
`zvault vacuum [OPTIONS] [REPO]`
|
||||
`zvault vacuum [OPTIONS] <REPO>`
|
||||
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
This subcommand reclaims space by rewriting bundles in the repository `REPO`.
|
||||
If `REPO` is omitted, the default repository location is used instead.
|
||||
|
||||
This command rewrites bundles to remove unused chunks of backups that have been
|
||||
removed by _zvault-remove(1)_ or _zvault-prune(1)_.
|
||||
|
|
|
@ -65,8 +65,8 @@ location.
|
|||
### Path syntax
|
||||
|
||||
Most subcommands work with a repository that has to be specified as a parameter.
|
||||
If this repository is not specified, the default repository in `~/.zvault` will
|
||||
be used instead.
|
||||
If this repository is specified as `::`, the default repository in `~/.zvault`
|
||||
will be used instead.
|
||||
|
||||
Some subcommands need to reference a specific backup in the repository. This is
|
||||
done via the syntax `repository::backup_name` where `repository` is the path to
|
||||
|
@ -86,8 +86,9 @@ this case it is important to note that if a path component is empty, it is
|
|||
regarded as not set at all.
|
||||
|
||||
Examples:
|
||||
|
||||
- `~/.zvault` references the repository in `~/.zvault` and is identical with
|
||||
`::` (as well as not setting the path at all).
|
||||
`::`.
|
||||
- `::backup1` references the backup `backup1` in the default repository
|
||||
- `::backup1::/` references the root folder of the backup `backup1` in the
|
||||
default repository
|
||||
|
@ -131,12 +132,14 @@ changed.
|
|||
|
||||
ZVault offers different chunker algorithms with different properties to choose
|
||||
from:
|
||||
|
||||
- The **rabin** chunker is a very common algorithm with a good quality but a
|
||||
mediocre speed (about 350 MB/s).
|
||||
- The **ae** chunker is a novel approach that can reach very high speeds
|
||||
(over 750 MB/s) at a cost of deduplication rate.
|
||||
- The **fastcdc** algorithm reaches a similar deduplication rate as the rabin
|
||||
chunker but is faster (about 550 MB/s).
|
||||
|
||||
The recommended chunker is **fastcdc**.
|
||||
|
||||
Besides the chunker algorithm, an important setting is the target chunk size,
|
||||
|
@ -155,6 +158,7 @@ per chunk should be a safe value to calculate with.
|
|||
|
||||
The configured value for chunk size needs to be a power of 2. Here is a
|
||||
selection of chunk sizes and their estimated RAM usage:
|
||||
|
||||
- Chunk size 4 KiB => ~40 GiB data stored in 1 GiB RAM
|
||||
- Chunk size 8 KiB => ~80 GiB data stored in 1 GiB RAM
|
||||
- Chunk size 16 KiB => ~160 GiB data stored in 1 GiB RAM
|
||||
|
@ -164,6 +168,7 @@ selection of chunk sizes and their estimated RAM usage:
|
|||
- Chunk size 256 KiB => ~2.5 TiB data stored in 1 GiB RAM
|
||||
- Chunk size 512 KiB => ~5 TiB data stored in 1 GiB RAM
|
||||
- Chunk size 1024 KiB => ~10 TiB data stored in 1 GiB RAM
|
||||
|
||||
The recommended chunk size for normal computers is 16 KiB. Servers with lots of
|
||||
data might want to use 128 KiB or 1024 KiB instead.
|
||||
|
||||
|
@ -189,6 +194,7 @@ space. Higher compression takes longer and saves more space while low
|
|||
compression is faster but needs more space.
|
||||
|
||||
ZVault supports the following compression methods:
|
||||
|
||||
- **deflate** (also called *zlib* and *gzip*) is the most common algorithm today
|
||||
and guarantees that backups can be decompressed in future centuries. Its
|
||||
speed and compression ratio are acceptable but other algorithms are better.
|
||||
|
@ -208,6 +214,7 @@ ZVault supports the following compression methods:
|
|||
9 (best).
|
||||
|
||||
The recommended combinations are:
|
||||
|
||||
- Focusing speed: lz4 with level between 1 and 7
|
||||
- Balanced focus: brotli with levels between 1 and 10
|
||||
- Focusing storage space: lzma with levels between 1 and 9
|
||||
|
@ -215,6 +222,7 @@ The recommended combinations are:
|
|||
The compression algorithm and level are configured together via the syntax
|
||||
`algorithm/level` where `algorithm` is either `deflate`, `lz4`, `brotli` or
|
||||
`lzma` and `level` is a number.
|
||||
|
||||
The default compression setting is **brotli/3**.
|
||||
|
||||
Since the compression ratio and speed hugely depend on the input data,
|
||||
|
@ -272,7 +280,7 @@ The recommended hash algorithm is **blake2**.
|
|||
This command will initialize a repository in the default location with
|
||||
encryption enabled:
|
||||
|
||||
$> zvault init -e --remote /mnt/remote/backups
|
||||
$> zvault init :: -e --remote /mnt/remote/backups
|
||||
|
||||
Before using this repository, the key pair located at `~/.zvault/keys` should be
|
||||
backed up in a safe location (e.g. printed to paper).
|
||||
|
@ -288,7 +296,7 @@ backup them separatly (zVault will not backup mounted folders by default):
|
|||
|
||||
The backups can be listed by this command:
|
||||
|
||||
$> zvault list
|
||||
$> zvault list ::
|
||||
|
||||
and inspected by this command (the date needs to be adapted):
|
||||
|
||||
|
@ -309,12 +317,12 @@ A single backup can be removed with this command:
|
|||
Multiple backups can be removed based on their date with the following command
|
||||
(add `-f` to actually remove backups):
|
||||
|
||||
$> zvault prune --prefix system --daily 7 --weekly 5 --monthly 12
|
||||
$> zvault prune :: --prefix system --daily 7 --weekly 5 --monthly 12
|
||||
|
||||
To reclaim storage space after removing some backups vacuum needs to be run
|
||||
(add `-f` to actually remove bundles):
|
||||
|
||||
$> zvault vacuum
|
||||
$> zvault vacuum ::
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -274,8 +274,8 @@ impl BundleDb {
|
|||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get_bundle_info(&self, bundle: &BundleId) -> Option<&BundleInfo> {
|
||||
self.get_stored_bundle(bundle).ok().map(|stored| &stored.info)
|
||||
pub fn get_bundle_info(&self, bundle: &BundleId) -> Option<&StoredBundle> {
|
||||
self.get_stored_bundle(bundle).ok()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
|
|
@ -98,7 +98,7 @@ serde_impl!(BundleInfo(u64?) {
|
|||
id: BundleId => 0,
|
||||
mode: BundleMode => 1,
|
||||
compression: Option<Compression> => 2,
|
||||
//encryption: already in the header
|
||||
encryption: Option<Encryption> => 3,
|
||||
hash_method: HashMethod => 4,
|
||||
raw_size: usize => 6,
|
||||
encoded_size: usize => 7,
|
||||
|
|
447
src/cli/args.rs
447
src/cli/args.rs
|
@ -42,10 +42,10 @@ pub enum Arguments {
|
|||
Prune {
|
||||
repo_path: String,
|
||||
prefix: String,
|
||||
daily: Option<usize>,
|
||||
weekly: Option<usize>,
|
||||
monthly: Option<usize>,
|
||||
yearly: Option<usize>,
|
||||
daily: usize,
|
||||
weekly: usize,
|
||||
monthly: usize,
|
||||
yearly: usize,
|
||||
force: bool
|
||||
},
|
||||
Vacuum {
|
||||
|
@ -125,6 +125,9 @@ pub enum Arguments {
|
|||
compression: Option<Compression>,
|
||||
encrypt: bool,
|
||||
hash: HashMethod
|
||||
},
|
||||
RebuildIndex {
|
||||
repo_path: String
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -276,8 +279,8 @@ fn validate_existing_path(val: String) -> Result<(), String> {
|
|||
#[allow(unknown_lints,cyclomatic_complexity)]
|
||||
pub fn parse() -> Result<Arguments, ErrorCode> {
|
||||
let args = App::new("zvault").version(crate_version!()).author(crate_authors!(",\n")).about(crate_description!())
|
||||
.settings(&[AppSettings::AllowMissingPositional, AppSettings::VersionlessSubcommands, AppSettings::SubcommandRequiredElseHelp])
|
||||
.global_settings(&[AppSettings::UnifiedHelpMessage, AppSettings::ColoredHelp, AppSettings::ColorAuto])
|
||||
.settings(&[AppSettings::VersionlessSubcommands, AppSettings::SubcommandRequiredElseHelp])
|
||||
.global_settings(&[AppSettings::AllowMissingPositional, AppSettings::UnifiedHelpMessage, AppSettings::ColoredHelp, AppSettings::ColorAuto])
|
||||
.subcommand(SubCommand::with_name("init").about("Initialize a new repository")
|
||||
.arg(Arg::from_usage("[bundle_size] --bundle-size [SIZE] 'Set the target bundle size in MiB'")
|
||||
.default_value(DEFAULT_BUNDLE_SIZE_STR).validator(validate_num))
|
||||
|
@ -290,8 +293,8 @@ pub fn parse() -> Result<Arguments, ErrorCode> {
|
|||
.default_value(DEFAULT_HASH).validator(validate_hash))
|
||||
.arg(Arg::from_usage("-r --remote <REMOTE> 'Set the path to the mounted remote storage'")
|
||||
.validator(validate_existing_path))
|
||||
.arg(Arg::from_usage("[REPO] 'The path for the new repository'")
|
||||
.default_value("").validator(|val| validate_repo_path(val, false, Some(false), Some(false)))))
|
||||
.arg(Arg::from_usage("<REPO> 'The path for the new repository'")
|
||||
.validator(|val| validate_repo_path(val, false, Some(false), Some(false)))))
|
||||
.subcommand(SubCommand::with_name("backup").about("Create a new backup")
|
||||
.arg(Arg::from_usage("--full 'Create a full backup without using a reference'"))
|
||||
.arg(Arg::from_usage("[reference] --ref [REF] 'Base the new backup on this reference'")
|
||||
|
@ -326,47 +329,47 @@ pub fn parse() -> Result<Arguments, ErrorCode> {
|
|||
.arg(Arg::from_usage("-y --yearly [NUM] 'Keep this number of yearly backups'")
|
||||
.default_value("0").validator(validate_num))
|
||||
.arg(Arg::from_usage("-f --force 'Actually run the prune instead of simulating it'"))
|
||||
.arg(Arg::from_usage("[REPO] 'Path of the repository'")
|
||||
.arg(Arg::from_usage("<REPO> 'Path of the repository'")
|
||||
.validator(|val| validate_repo_path(val, true, Some(false), Some(false)))))
|
||||
.subcommand(SubCommand::with_name("vacuum").about("Reclaim space by rewriting bundles")
|
||||
.arg(Arg::from_usage("-r --ratio [NUM] 'Ratio in % of unused space in a bundle to rewrite that bundle'")
|
||||
.default_value(DEFAULT_VACUUM_RATIO_STR).validator(validate_num))
|
||||
.arg(Arg::from_usage("-f --force 'Actually run the vacuum instead of simulating it'"))
|
||||
.arg(Arg::from_usage("[REPO] 'Path of the repository'")
|
||||
.arg(Arg::from_usage("<REPO> 'Path of the repository'")
|
||||
.validator(|val| validate_repo_path(val, true, Some(false), Some(false)))))
|
||||
.subcommand(SubCommand::with_name("check").about("Check the repository, a backup or a backup subtree")
|
||||
.arg(Arg::from_usage("--full 'Also check file contents (slow)'"))
|
||||
.arg(Arg::from_usage("[PATH] 'Path of the repository/backup/subtree, [repository][::backup[::subtree]]'")
|
||||
.arg(Arg::from_usage("<PATH> 'Path of the repository/backup/subtree, [repository][::backup[::subtree]]'")
|
||||
.validator(|val| validate_repo_path(val, true, None, None))))
|
||||
.subcommand(SubCommand::with_name("list").alias("ls").about("List backups or backup contents")
|
||||
.arg(Arg::from_usage("[PATH] 'Path of the repository/backup/subtree, [repository][::backup[::subtree]]'")
|
||||
.arg(Arg::from_usage("<PATH> 'Path of the repository/backup/subtree, [repository][::backup[::subtree]]'")
|
||||
.validator(|val| validate_repo_path(val, true, None, None))))
|
||||
.subcommand(SubCommand::with_name("mount").about("Mount the repository, a backup or a subtree")
|
||||
.arg(Arg::from_usage("[PATH] 'Path of the repository/backup/subtree, [repository][::backup[::subtree]]'")
|
||||
.arg(Arg::from_usage("<PATH> 'Path of the repository/backup/subtree, [repository][::backup[::subtree]]'")
|
||||
.validator(|val| validate_repo_path(val, true, None, None)))
|
||||
.arg(Arg::from_usage("<MOUNTPOINT> 'Existing mount point'")
|
||||
.validator(validate_existing_path)))
|
||||
.subcommand(SubCommand::with_name("bundlelist").about("List bundles in a repository")
|
||||
.arg(Arg::from_usage("[REPO] 'Path of the repository'")
|
||||
.arg(Arg::from_usage("<REPO> 'Path of the repository'")
|
||||
.validator(|val| validate_repo_path(val, true, Some(false), Some(false)))))
|
||||
.subcommand(SubCommand::with_name("bundleinfo").about("Display information on a bundle")
|
||||
.arg(Arg::from_usage("[REPO] 'Path of the repository'")
|
||||
.arg(Arg::from_usage("<REPO> 'Path of the repository'")
|
||||
.validator(|val| validate_repo_path(val, true, Some(false), Some(false))))
|
||||
.arg(Arg::from_usage("<BUNDLE> 'Id of the bundle'")))
|
||||
.subcommand(SubCommand::with_name("import").about("Reconstruct a repository from the remote storage")
|
||||
.arg(Arg::from_usage("-k --key [FILE]... 'Key file needed to read the bundles'"))
|
||||
.arg(Arg::from_usage("<REMOTE> 'Remote repository path'")
|
||||
.validator(validate_existing_path))
|
||||
.arg(Arg::from_usage("[REPO] 'The path for the new repository'")
|
||||
.arg(Arg::from_usage("<REPO> 'The path for the new repository'")
|
||||
.validator(|val| validate_repo_path(val, false, Some(false), Some(false)))))
|
||||
.subcommand(SubCommand::with_name("info").about("Display information on a repository, a backup or a subtree")
|
||||
.arg(Arg::from_usage("[PATH] 'Path of the repository/backup/subtree, [repository][::backup[::subtree]]'")
|
||||
.arg(Arg::from_usage("<PATH> 'Path of the repository/backup/subtree, [repository][::backup[::subtree]]'")
|
||||
.validator(|val| validate_repo_path(val, true, None, None))))
|
||||
.subcommand(SubCommand::with_name("analyze").about("Analyze the used and reclaimable space of bundles")
|
||||
.arg(Arg::from_usage("[REPO] 'Path of the repository'")
|
||||
.arg(Arg::from_usage("<REPO> 'Path of the repository'")
|
||||
.validator(|val| validate_repo_path(val, true, Some(false), Some(false)))))
|
||||
.subcommand(SubCommand::with_name("versions").about("Find different versions of a file in all backups")
|
||||
.arg(Arg::from_usage("[REPO] 'Path of the repository'")
|
||||
.arg(Arg::from_usage("<REPO> 'Path of the repository'")
|
||||
.validator(|val| validate_repo_path(val, true, Some(false), Some(false))))
|
||||
.arg(Arg::from_usage("<PATH> 'Path of the file'")))
|
||||
.subcommand(SubCommand::with_name("diff").about("Display differences between two backup versions")
|
||||
|
@ -385,7 +388,7 @@ pub fn parse() -> Result<Arguments, ErrorCode> {
|
|||
.validator(validate_public_key))
|
||||
.arg(Arg::from_usage("--hash [HASH] 'Set the hash method'")
|
||||
.validator(validate_hash))
|
||||
.arg(Arg::from_usage("[REPO] 'Path of the repository'")
|
||||
.arg(Arg::from_usage("<REPO> 'Path of the repository'")
|
||||
.validator(|val| validate_repo_path(val, true, Some(false), Some(false)))))
|
||||
.subcommand(SubCommand::with_name("genkey").about("Generate a new key pair")
|
||||
.arg(Arg::from_usage("[FILE] 'Destination file for the keypair'")))
|
||||
|
@ -393,9 +396,12 @@ pub fn parse() -> Result<Arguments, ErrorCode> {
|
|||
.arg(Arg::from_usage("-g --generate 'Generate a new key pair'")
|
||||
.conflicts_with("FILE"))
|
||||
.arg(Arg::from_usage("[set_default] --default -d 'Set the key pair as default'"))
|
||||
.arg(Arg::from_usage("<REPO> 'Path of the repository'")
|
||||
.validator(|val| validate_repo_path(val, true, Some(false), Some(false))))
|
||||
.arg(Arg::from_usage("[FILE] 'File containing the keypair'")
|
||||
.required_unless("generate").validator(validate_existing_path))
|
||||
.arg(Arg::from_usage("[REPO] 'Path of the repository'")
|
||||
.required_unless("generate").validator(validate_existing_path)))
|
||||
.subcommand(SubCommand::with_name("rebuild-index").about("Rebuild the index")
|
||||
.arg(Arg::from_usage("<REPO> 'Path of the repository'")
|
||||
.validator(|val| validate_repo_path(val, true, Some(false), Some(false)))))
|
||||
.subcommand(SubCommand::with_name("algotest").about("Test a specific algorithm combination")
|
||||
.arg(Arg::from_usage("[bundle_size] --bundle-size [SIZE] 'Set the target bundle size in MiB'")
|
||||
|
@ -409,212 +415,195 @@ pub fn parse() -> Result<Arguments, ErrorCode> {
|
|||
.default_value(DEFAULT_HASH).validator(validate_hash))
|
||||
.arg(Arg::from_usage("<FILE> 'File with test data'")
|
||||
.validator(validate_existing_path))).get_matches();
|
||||
if let Some(args) = args.subcommand_matches("init") {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap_or(""), false, Some(false), Some(false)).unwrap();
|
||||
return Ok(Arguments::Init {
|
||||
bundle_size: (parse_num(args.value_of("bundle_size").unwrap()).unwrap() * 1024 * 1024) as usize,
|
||||
chunker: parse_chunker(args.value_of("chunker").unwrap()).unwrap(),
|
||||
compression: parse_compression(args.value_of("compression").unwrap()).unwrap(),
|
||||
encryption: args.is_present("encrypt"),
|
||||
hash: parse_hash(args.value_of("hash").unwrap()).unwrap(),
|
||||
repo_path: repository.to_string(),
|
||||
remote_path: args.value_of("remote").unwrap().to_string()
|
||||
})
|
||||
match args.subcommand() {
|
||||
("init", Some(args)) => {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), false, Some(false), Some(false)).unwrap();
|
||||
Ok(Arguments::Init {
|
||||
bundle_size: (parse_num(args.value_of("bundle_size").unwrap()).unwrap() * 1024 * 1024) as usize,
|
||||
chunker: parse_chunker(args.value_of("chunker").unwrap()).unwrap(),
|
||||
compression: parse_compression(args.value_of("compression").unwrap()).unwrap(),
|
||||
encryption: args.is_present("encrypt"),
|
||||
hash: parse_hash(args.value_of("hash").unwrap()).unwrap(),
|
||||
repo_path: repository.to_string(),
|
||||
remote_path: args.value_of("remote").unwrap().to_string()
|
||||
})
|
||||
},
|
||||
("backup", Some(args)) => {
|
||||
let (repository, backup, _inode) = parse_repo_path(args.value_of("BACKUP").unwrap(), true, Some(true), Some(false)).unwrap();
|
||||
Ok(Arguments::Backup {
|
||||
repo_path: repository.to_string(),
|
||||
backup_name: backup.unwrap().to_string(),
|
||||
full: args.is_present("full"),
|
||||
same_device: !args.is_present("cross_device"),
|
||||
excludes: args.values_of("exclude").map(|v| v.map(|k| k.to_string()).collect()).unwrap_or_else(|| vec![]),
|
||||
excludes_from: args.value_of("excludes_from").map(|v| v.to_string()),
|
||||
src_path: args.value_of("SRC").unwrap().to_string(),
|
||||
reference: args.value_of("reference").map(|v| v.to_string()),
|
||||
no_default_excludes: args.is_present("no_default_excludes"),
|
||||
tar: args.is_present("tar")
|
||||
})
|
||||
},
|
||||
("restore", Some(args)) => {
|
||||
let (repository, backup, inode) = parse_repo_path(args.value_of("BACKUP").unwrap(), true, Some(true), None).unwrap();
|
||||
Ok(Arguments::Restore {
|
||||
repo_path: repository.to_string(),
|
||||
backup_name: backup.unwrap().to_string(),
|
||||
inode: inode.map(|v| v.to_string()),
|
||||
dst_path: args.value_of("DST").unwrap().to_string(),
|
||||
tar: args.is_present("tar")
|
||||
})
|
||||
},
|
||||
("remove", Some(args)) => {
|
||||
let (repository, backup, inode) = parse_repo_path(args.value_of("BACKUP").unwrap(), true, Some(true), None).unwrap();
|
||||
Ok(Arguments::Remove {
|
||||
repo_path: repository.to_string(),
|
||||
backup_name: backup.unwrap().to_string(),
|
||||
inode: inode.map(|v| v.to_string())
|
||||
})
|
||||
},
|
||||
("prune", Some(args)) => {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), true, Some(false), Some(false)).unwrap();
|
||||
Ok(Arguments::Prune {
|
||||
repo_path: repository.to_string(),
|
||||
prefix: args.value_of("prefix").unwrap_or("").to_string(),
|
||||
force: args.is_present("force"),
|
||||
daily: parse_num(args.value_of("daily").unwrap()).unwrap() as usize,
|
||||
weekly: parse_num(args.value_of("weekly").unwrap()).unwrap() as usize,
|
||||
monthly: parse_num(args.value_of("monthly").unwrap()).unwrap() as usize,
|
||||
yearly: parse_num(args.value_of("yearly").unwrap()).unwrap() as usize
|
||||
})
|
||||
},
|
||||
("vacuum", Some(args)) => {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), true, Some(false), Some(false)).unwrap();
|
||||
Ok(Arguments::Vacuum {
|
||||
repo_path: repository.to_string(),
|
||||
force: args.is_present("force"),
|
||||
ratio: parse_num(args.value_of("ratio").unwrap()).unwrap() as f32 / 100.0
|
||||
})
|
||||
},
|
||||
("check", Some(args)) => {
|
||||
let (repository, backup, inode) = parse_repo_path(args.value_of("PATH").unwrap(), true, None, None).unwrap();
|
||||
Ok(Arguments::Check {
|
||||
repo_path: repository.to_string(),
|
||||
backup_name: backup.map(|v| v.to_string()),
|
||||
inode: inode.map(|v| v.to_string()),
|
||||
full: args.is_present("full")
|
||||
})
|
||||
},
|
||||
("list", Some(args)) => {
|
||||
let (repository, backup, inode) = parse_repo_path(args.value_of("PATH").unwrap(), true, None, None).unwrap();
|
||||
Ok(Arguments::List {
|
||||
repo_path: repository.to_string(),
|
||||
backup_name: backup.map(|v| v.to_string()),
|
||||
inode: inode.map(|v| v.to_string())
|
||||
})
|
||||
},
|
||||
("bundlelist", Some(args)) => {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), true, Some(false), Some(false)).unwrap();
|
||||
Ok(Arguments::BundleList {
|
||||
repo_path: repository.to_string(),
|
||||
})
|
||||
},
|
||||
("bundleinfo", Some(args)) => {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), true, Some(false), Some(false)).unwrap();
|
||||
Ok(Arguments::BundleInfo {
|
||||
repo_path: repository.to_string(),
|
||||
bundle_id: try!(parse_bundle_id(args.value_of("BUNDLE").unwrap()))
|
||||
})
|
||||
},
|
||||
("info", Some(args)) => {
|
||||
let (repository, backup, inode) = parse_repo_path(args.value_of("PATH").unwrap(), true, None, None).unwrap();
|
||||
Ok(Arguments::Info {
|
||||
repo_path: repository.to_string(),
|
||||
backup_name: backup.map(|v| v.to_string()),
|
||||
inode: inode.map(|v| v.to_string())
|
||||
})
|
||||
},
|
||||
("mount", Some(args)) => {
|
||||
let (repository, backup, inode) = parse_repo_path(args.value_of("PATH").unwrap(), true, None, None).unwrap();
|
||||
Ok(Arguments::Mount {
|
||||
repo_path: repository.to_string(),
|
||||
backup_name: backup.map(|v| v.to_string()),
|
||||
inode: inode.map(|v| v.to_string()),
|
||||
mount_point: args.value_of("MOUNTPOINT").unwrap().to_string()
|
||||
})
|
||||
},
|
||||
("versions", Some(args)) => {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), true, Some(false), Some(false)).unwrap();
|
||||
Ok(Arguments::Versions {
|
||||
repo_path: repository.to_string(),
|
||||
path: args.value_of("PATH").unwrap().to_string()
|
||||
})
|
||||
},
|
||||
("diff", Some(args)) => {
|
||||
let (repository_old, backup_old, inode_old) = parse_repo_path(args.value_of("OLD").unwrap(), true, Some(true), None).unwrap();
|
||||
let (repository_new, backup_new, inode_new) = parse_repo_path(args.value_of("NEW").unwrap(), true, Some(true), None).unwrap();
|
||||
Ok(Arguments::Diff {
|
||||
repo_path_old: repository_old.to_string(),
|
||||
backup_name_old: backup_old.unwrap().to_string(),
|
||||
inode_old: inode_old.map(|v| v.to_string()),
|
||||
repo_path_new: repository_new.to_string(),
|
||||
backup_name_new: backup_new.unwrap().to_string(),
|
||||
inode_new: inode_new.map(|v| v.to_string()),
|
||||
})
|
||||
},
|
||||
("analyze", Some(args)) => {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), true, Some(false), Some(false)).unwrap();
|
||||
Ok(Arguments::Analyze {
|
||||
repo_path: repository.to_string()
|
||||
})
|
||||
},
|
||||
("rebuild-index", Some(args)) => {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), true, Some(false), Some(false)).unwrap();
|
||||
Ok(Arguments::RebuildIndex {
|
||||
repo_path: repository.to_string()
|
||||
})
|
||||
},
|
||||
("import", Some(args)) => {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), false, Some(false), Some(false)).unwrap();
|
||||
Ok(Arguments::Import {
|
||||
repo_path: repository.to_string(),
|
||||
remote_path: args.value_of("REMOTE").unwrap().to_string(),
|
||||
key_files: args.values_of("key").map(|v| v.map(|k| k.to_string()).collect()).unwrap_or_else(|| vec![])
|
||||
})
|
||||
},
|
||||
("config", Some(args)) => {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), true, Some(false), Some(false)).unwrap();
|
||||
Ok(Arguments::Config {
|
||||
bundle_size: args.value_of("bundle_size").map(|v| parse_num(v).unwrap() as usize * 1024 * 1024),
|
||||
chunker: args.value_of("chunker").map(|v| parse_chunker(v).unwrap()),
|
||||
compression: args.value_of("compression").map(|v| parse_compression(v).unwrap()),
|
||||
encryption: args.value_of("encryption").map(|v| parse_public_key(v).unwrap()),
|
||||
hash: args.value_of("hash").map(|v| parse_hash(v).unwrap()),
|
||||
repo_path: repository.to_string(),
|
||||
})
|
||||
},
|
||||
("genkey", Some(args)) => {
|
||||
Ok(Arguments::GenKey {
|
||||
file: args.value_of("FILE").map(|v| v.to_string())
|
||||
})
|
||||
},
|
||||
("addkey", Some(args)) => {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap(), true, Some(false), Some(false)).unwrap();
|
||||
Ok(Arguments::AddKey {
|
||||
repo_path: repository.to_string(),
|
||||
set_default: args.is_present("set_default"),
|
||||
file: args.value_of("FILE").map(|v| v.to_string())
|
||||
})
|
||||
},
|
||||
("algotest", Some(args)) => {
|
||||
Ok(Arguments::AlgoTest {
|
||||
bundle_size: (parse_num(args.value_of("bundle_size").unwrap()).unwrap() * 1024 * 1024) as usize,
|
||||
chunker: parse_chunker(args.value_of("chunker").unwrap()).unwrap(),
|
||||
compression: parse_compression(args.value_of("compression").unwrap()).unwrap(),
|
||||
encrypt: args.is_present("encrypt"),
|
||||
hash: parse_hash(args.value_of("hash").unwrap()).unwrap(),
|
||||
file: args.value_of("FILE").unwrap().to_string(),
|
||||
})
|
||||
},
|
||||
_ => {
|
||||
error!("No subcommand given");
|
||||
Err(ErrorCode::InvalidArgs)
|
||||
}
|
||||
}
|
||||
if let Some(args) = args.subcommand_matches("backup") {
|
||||
let (repository, backup, _inode) = parse_repo_path(args.value_of("BACKUP").unwrap(), true, Some(true), Some(false)).unwrap();
|
||||
return Ok(Arguments::Backup {
|
||||
repo_path: repository.to_string(),
|
||||
backup_name: backup.unwrap().to_string(),
|
||||
full: args.is_present("full"),
|
||||
same_device: !args.is_present("cross_device"),
|
||||
excludes: args.values_of("exclude").map(|v| v.map(|k| k.to_string()).collect()).unwrap_or_else(|| vec![]),
|
||||
excludes_from: args.value_of("excludes_from").map(|v| v.to_string()),
|
||||
src_path: args.value_of("SRC").unwrap().to_string(),
|
||||
reference: args.value_of("reference").map(|v| v.to_string()),
|
||||
no_default_excludes: args.is_present("no_default_excludes"),
|
||||
tar: args.is_present("tar")
|
||||
})
|
||||
}
|
||||
if let Some(args) = args.subcommand_matches("restore") {
|
||||
let (repository, backup, inode) = parse_repo_path(args.value_of("BACKUP").unwrap(), true, Some(true), None).unwrap();
|
||||
return Ok(Arguments::Restore {
|
||||
repo_path: repository.to_string(),
|
||||
backup_name: backup.unwrap().to_string(),
|
||||
inode: inode.map(|v| v.to_string()),
|
||||
dst_path: args.value_of("DST").unwrap().to_string(),
|
||||
tar: args.is_present("tar")
|
||||
})
|
||||
}
|
||||
if let Some(args) = args.subcommand_matches("remove") {
|
||||
let (repository, backup, inode) = parse_repo_path(args.value_of("BACKUP").unwrap(), true, Some(true), None).unwrap();
|
||||
return Ok(Arguments::Remove {
|
||||
repo_path: repository.to_string(),
|
||||
backup_name: backup.unwrap().to_string(),
|
||||
inode: inode.map(|v| v.to_string())
|
||||
})
|
||||
}
|
||||
if let Some(args) = args.subcommand_matches("prune") {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap_or(""), true, Some(false), Some(false)).unwrap();
|
||||
return Ok(Arguments::Prune {
|
||||
repo_path: repository.to_string(),
|
||||
prefix: args.value_of("prefix").unwrap_or("").to_string(),
|
||||
force: args.is_present("force"),
|
||||
daily: match args.value_of("daily") {
|
||||
None => None,
|
||||
Some(v) => Some(parse_num(v).unwrap() as usize)
|
||||
},
|
||||
weekly: match args.value_of("weekly") {
|
||||
None => None,
|
||||
Some(v) => Some(parse_num(v).unwrap() as usize)
|
||||
},
|
||||
monthly: match args.value_of("monthly") {
|
||||
None => None,
|
||||
Some(v) => Some(parse_num(v).unwrap() as usize)
|
||||
},
|
||||
yearly: match args.value_of("yearly") {
|
||||
None => None,
|
||||
Some(v) => Some(parse_num(v).unwrap() as usize)
|
||||
}
|
||||
})
|
||||
}
|
||||
if let Some(args) = args.subcommand_matches("vacuum") {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap_or(""), true, Some(false), Some(false)).unwrap();
|
||||
return Ok(Arguments::Vacuum {
|
||||
repo_path: repository.to_string(),
|
||||
force: args.is_present("force"),
|
||||
ratio: parse_num(args.value_of("ratio").unwrap()).unwrap() as f32 / 100.0
|
||||
})
|
||||
}
|
||||
if let Some(args) = args.subcommand_matches("check") {
|
||||
let (repository, backup, inode) = parse_repo_path(args.value_of("PATH").unwrap_or(""), true, None, None).unwrap();
|
||||
return Ok(Arguments::Check {
|
||||
repo_path: repository.to_string(),
|
||||
backup_name: backup.map(|v| v.to_string()),
|
||||
inode: inode.map(|v| v.to_string()),
|
||||
full: args.is_present("full")
|
||||
})
|
||||
}
|
||||
if let Some(args) = args.subcommand_matches("list") {
|
||||
let (repository, backup, inode) = parse_repo_path(args.value_of("PATH").unwrap_or(""), true, None, None).unwrap();
|
||||
return Ok(Arguments::List {
|
||||
repo_path: repository.to_string(),
|
||||
backup_name: backup.map(|v| v.to_string()),
|
||||
inode: inode.map(|v| v.to_string())
|
||||
})
|
||||
}
|
||||
if let Some(args) = args.subcommand_matches("bundlelist") {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap_or(""), true, Some(false), Some(false)).unwrap();
|
||||
return Ok(Arguments::BundleList {
|
||||
repo_path: repository.to_string(),
|
||||
})
|
||||
}
|
||||
if let Some(args) = args.subcommand_matches("bundleinfo") {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap_or(""), true, Some(false), Some(false)).unwrap();
|
||||
return Ok(Arguments::BundleInfo {
|
||||
repo_path: repository.to_string(),
|
||||
bundle_id: try!(parse_bundle_id(args.value_of("BUNDLE").unwrap()))
|
||||
})
|
||||
}
|
||||
if let Some(args) = args.subcommand_matches("info") {
|
||||
let (repository, backup, inode) = parse_repo_path(args.value_of("PATH").unwrap_or(""), true, None, None).unwrap();
|
||||
return Ok(Arguments::Info {
|
||||
repo_path: repository.to_string(),
|
||||
backup_name: backup.map(|v| v.to_string()),
|
||||
inode: inode.map(|v| v.to_string())
|
||||
})
|
||||
}
|
||||
if let Some(args) = args.subcommand_matches("mount") {
|
||||
let (repository, backup, inode) = parse_repo_path(args.value_of("PATH").unwrap_or(""), true, None, None).unwrap();
|
||||
return Ok(Arguments::Mount {
|
||||
repo_path: repository.to_string(),
|
||||
backup_name: backup.map(|v| v.to_string()),
|
||||
inode: inode.map(|v| v.to_string()),
|
||||
mount_point: args.value_of("MOUNTPOINT").unwrap().to_string()
|
||||
})
|
||||
}
|
||||
if let Some(args) = args.subcommand_matches("versions") {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap_or(""), true, Some(false), Some(false)).unwrap();
|
||||
return Ok(Arguments::Versions {
|
||||
repo_path: repository.to_string(),
|
||||
path: args.value_of("PATH").unwrap().to_string()
|
||||
})
|
||||
}
|
||||
if let Some(args) = args.subcommand_matches("diff") {
|
||||
let (repository_old, backup_old, inode_old) = parse_repo_path(args.value_of("OLD").unwrap(), true, Some(true), None).unwrap();
|
||||
let (repository_new, backup_new, inode_new) = parse_repo_path(args.value_of("NEW").unwrap(), true, Some(true), None).unwrap();
|
||||
return Ok(Arguments::Diff {
|
||||
repo_path_old: repository_old.to_string(),
|
||||
backup_name_old: backup_old.unwrap().to_string(),
|
||||
inode_old: inode_old.map(|v| v.to_string()),
|
||||
repo_path_new: repository_new.to_string(),
|
||||
backup_name_new: backup_new.unwrap().to_string(),
|
||||
inode_new: inode_new.map(|v| v.to_string()),
|
||||
})
|
||||
}
|
||||
if let Some(args) = args.subcommand_matches("analyze") {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap_or(""), true, Some(false), Some(false)).unwrap();
|
||||
return Ok(Arguments::Analyze {
|
||||
repo_path: repository.to_string()
|
||||
})
|
||||
}
|
||||
if let Some(args) = args.subcommand_matches("import") {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap_or(""), false, Some(false), Some(false)).unwrap();
|
||||
return Ok(Arguments::Import {
|
||||
repo_path: repository.to_string(),
|
||||
remote_path: args.value_of("REMOTE").unwrap().to_string(),
|
||||
key_files: args.values_of("key").map(|v| v.map(|k| k.to_string()).collect()).unwrap_or_else(|| vec![])
|
||||
})
|
||||
}
|
||||
if let Some(args) = args.subcommand_matches("config") {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap_or(""), true, Some(false), Some(false)).unwrap();
|
||||
return Ok(Arguments::Config {
|
||||
bundle_size: match args.value_of("bundle_size") {
|
||||
None => None,
|
||||
Some(v) => Some((parse_num(v).unwrap() * 1024 * 1024) as usize)
|
||||
},
|
||||
chunker: match args.value_of("chunker") {
|
||||
None => None,
|
||||
Some(v) => Some(parse_chunker(v).unwrap())
|
||||
},
|
||||
compression: match args.value_of("compression") {
|
||||
None => None,
|
||||
Some(v) => Some(parse_compression(v).unwrap())
|
||||
},
|
||||
encryption: match args.value_of("encryption") {
|
||||
None => None,
|
||||
Some(v) => Some(parse_public_key(v).unwrap())
|
||||
},
|
||||
hash: match args.value_of("hash") {
|
||||
None => None,
|
||||
Some(v) => Some(parse_hash(v).unwrap())
|
||||
},
|
||||
repo_path: repository.to_string(),
|
||||
})
|
||||
}
|
||||
if let Some(args) = args.subcommand_matches("genkey") {
|
||||
return Ok(Arguments::GenKey {
|
||||
file: args.value_of("FILE").map(|v| v.to_string())
|
||||
})
|
||||
}
|
||||
if let Some(args) = args.subcommand_matches("addkey") {
|
||||
let (repository, _backup, _inode) = parse_repo_path(args.value_of("REPO").unwrap_or(""), true, Some(false), Some(false)).unwrap();
|
||||
return Ok(Arguments::AddKey {
|
||||
repo_path: repository.to_string(),
|
||||
set_default: args.is_present("set_default"),
|
||||
file: args.value_of("FILE").map(|v| v.to_string())
|
||||
})
|
||||
}
|
||||
if let Some(args) = args.subcommand_matches("algotest") {
|
||||
return Ok(Arguments::AlgoTest {
|
||||
bundle_size: (parse_num(args.value_of("bundle_size").unwrap()).unwrap() * 1024 * 1024) as usize,
|
||||
chunker: parse_chunker(args.value_of("chunker").unwrap()).unwrap(),
|
||||
compression: parse_compression(args.value_of("compression").unwrap()).unwrap(),
|
||||
encrypt: args.is_present("encrypt"),
|
||||
hash: parse_hash(args.value_of("hash").unwrap()).unwrap(),
|
||||
file: args.value_of("FILE").unwrap().to_string(),
|
||||
})
|
||||
}
|
||||
error!("No subcommand given");
|
||||
Err(ErrorCode::InvalidArgs)
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ pub enum ErrorCode {
|
|||
SaveConfig,
|
||||
LoadExcludes, InvalidExcludes,
|
||||
BackupRun, RestoreRun, RemoveRun, PruneRun, VacuumRun, CheckRun, AnalyzeRun, DiffRun,
|
||||
VersionsRun, ImportRun, FuseMount
|
||||
VersionsRun, ImportRun, FuseMount, RebuildIndexRun
|
||||
}
|
||||
impl ErrorCode {
|
||||
pub fn code(&self) -> i32 {
|
||||
|
@ -59,6 +59,7 @@ impl ErrorCode {
|
|||
ErrorCode::VersionsRun => 22,
|
||||
ErrorCode::ImportRun => 23,
|
||||
ErrorCode::FuseMount => 24,
|
||||
ErrorCode::RebuildIndexRun => 25
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -189,21 +190,22 @@ fn print_repoinfo(info: &RepositoryInfo) {
|
|||
println!("Index: {}, {:.0}% full", to_file_size(info.index_size as u64), index_usage * 100.0);
|
||||
}
|
||||
|
||||
fn print_bundle(bundle: &BundleInfo) {
|
||||
println!("Bundle {}", bundle.id);
|
||||
println!(" - Mode: {:?}", bundle.mode);
|
||||
println!(" - Hash method: {:?}", bundle.hash_method);
|
||||
let encryption = if let Some((_, ref key)) = bundle.encryption {
|
||||
fn print_bundle(bundle: &StoredBundle) {
|
||||
println!("Bundle {}", bundle.info.id);
|
||||
println!(" - Mode: {:?}", bundle.info.mode);
|
||||
println!(" - Path: {:?}", bundle.path);
|
||||
println!(" - Hash method: {:?}", bundle.info.hash_method);
|
||||
let encryption = if let Some((_, ref key)) = bundle.info.encryption {
|
||||
to_hex(key)
|
||||
} else {
|
||||
"none".to_string()
|
||||
};
|
||||
println!(" - Encryption: {}", encryption);
|
||||
println!(" - Chunks: {}", bundle.chunk_count);
|
||||
println!(" - Size: {}", to_file_size(bundle.encoded_size as u64));
|
||||
println!(" - Data size: {}", to_file_size(bundle.raw_size as u64));
|
||||
let ratio = bundle.encoded_size as f32 / bundle.raw_size as f32;
|
||||
let compression = if let Some(ref c) = bundle.compression {
|
||||
println!(" - Chunks: {}", bundle.info.chunk_count);
|
||||
println!(" - Size: {}", to_file_size(bundle.info.encoded_size as u64));
|
||||
println!(" - Data size: {}", to_file_size(bundle.info.raw_size as u64));
|
||||
let ratio = bundle.info.encoded_size as f32 / bundle.info.raw_size as f32;
|
||||
let compression = if let Some(ref c) = bundle.info.compression {
|
||||
c.to_string()
|
||||
} else {
|
||||
"none".to_string()
|
||||
|
@ -383,7 +385,7 @@ pub fn run() -> Result<(), ErrorCode> {
|
|||
},
|
||||
Arguments::Prune{repo_path, prefix, daily, weekly, monthly, yearly, force} => {
|
||||
let repo = try!(open_repository(&repo_path));
|
||||
if daily.is_none() && weekly.is_none() && monthly.is_none() && yearly.is_none() {
|
||||
if daily + weekly + monthly + yearly == 0 {
|
||||
error!("This would remove all those backups");
|
||||
return Err(ErrorCode::UnsafeArgs)
|
||||
}
|
||||
|
@ -480,6 +482,10 @@ pub fn run() -> Result<(), ErrorCode> {
|
|||
let mut repo = try!(open_repository(&repo_path));
|
||||
print_analysis(&checked!(repo.analyze_usage(), "analyze repository", ErrorCode::AnalyzeRun));
|
||||
},
|
||||
Arguments::RebuildIndex{repo_path} => {
|
||||
let mut repo = try!(open_repository(&repo_path));
|
||||
checked!(repo.rebuild_index(), "rebuild index", ErrorCode::RebuildIndexRun);
|
||||
},
|
||||
Arguments::BundleList{repo_path} => {
|
||||
let repo = try!(open_repository(&repo_path));
|
||||
for bundle in repo.list_bundles() {
|
||||
|
|
|
@ -523,4 +523,12 @@ impl Index {
|
|||
pub fn capacity(&self) -> usize {
|
||||
self.capacity
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn clear(&mut self) {
|
||||
for entry in &mut self.data[..] {
|
||||
entry.clear();
|
||||
}
|
||||
self.entries = 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
pub use ::util::*;
|
||||
pub use ::bundledb::{BundleReader, BundleMode, BundleWriter, BundleInfo, BundleId, BundleDbError, BundleDb, BundleWriterError};
|
||||
pub use ::bundledb::{BundleReader, BundleMode, BundleWriter, BundleInfo, BundleId, BundleDbError, BundleDb, BundleWriterError, StoredBundle};
|
||||
pub use ::chunker::{ChunkerType, Chunker, ChunkerStatus, IChunker, ChunkerError};
|
||||
pub use ::repository::{Repository, Backup, Config, RepositoryError, RepositoryInfo, Inode, FileType, RepositoryIntegrityError, BackupFileError, BackupError, BackupOptions, BundleAnalysis, FileData, DiffType, InodeError};
|
||||
pub use ::index::{Index, Location, IndexError};
|
||||
|
|
|
@ -64,7 +64,7 @@ impl Repository {
|
|||
}
|
||||
|
||||
|
||||
pub fn prune_backups(&self, prefix: &str, daily: Option<usize>, weekly: Option<usize>, monthly: Option<usize>, yearly: Option<usize>, force: bool) -> Result<(), RepositoryError> {
|
||||
pub fn prune_backups(&self, prefix: &str, daily: usize, weekly: usize, monthly: usize, yearly: usize, force: bool) -> Result<(), RepositoryError> {
|
||||
let mut backups = Vec::new();
|
||||
let backup_map = match self.get_backups() {
|
||||
Ok(backup_map) => backup_map,
|
||||
|
@ -99,17 +99,17 @@ impl Repository {
|
|||
}
|
||||
}
|
||||
}
|
||||
if let Some(max) = yearly {
|
||||
mark_needed(&backups, &mut keep, max, |d| d.year());
|
||||
if yearly > 0 {
|
||||
mark_needed(&backups, &mut keep, yearly, |d| d.year());
|
||||
}
|
||||
if let Some(max) = monthly {
|
||||
mark_needed(&backups, &mut keep, max, |d| (d.year(), d.month()));
|
||||
if monthly > 0 {
|
||||
mark_needed(&backups, &mut keep, monthly, |d| (d.year(), d.month()));
|
||||
}
|
||||
if let Some(max) = weekly {
|
||||
mark_needed(&backups, &mut keep, max, |d| (d.isoweekdate().0, d.isoweekdate().1));
|
||||
if weekly > 0 {
|
||||
mark_needed(&backups, &mut keep, weekly, |d| (d.isoweekdate().0, d.isoweekdate().1));
|
||||
}
|
||||
if let Some(max) = daily {
|
||||
mark_needed(&backups, &mut keep, max, |d| (d.year(), d.month(), d.day()));
|
||||
if daily > 0 {
|
||||
mark_needed(&backups, &mut keep, daily, |d| (d.year(), d.month(), d.day()));
|
||||
}
|
||||
let mut remove = Vec::new();
|
||||
info!("Removing the following backups");
|
||||
|
@ -189,11 +189,12 @@ impl Repository {
|
|||
.and_then(|chunks| self.get_inode(chunks).ok());
|
||||
let child_inode = match self.create_backup_recurse(&child_path, ref_child.as_ref(), options, backup, failed_paths) {
|
||||
Ok(inode) => inode,
|
||||
Err(_) => {
|
||||
Err(RepositoryError::Inode(_)) | Err(RepositoryError::Chunker(_)) | Err(RepositoryError::Io(_)) => {
|
||||
warn!("Failed to backup {:?}", child_path);
|
||||
failed_paths.push(child_path);
|
||||
continue
|
||||
}
|
||||
},
|
||||
Err(err) => return Err(err)
|
||||
};
|
||||
let chunks = try!(self.put_inode(&child_inode));
|
||||
children.insert(name, chunks);
|
||||
|
|
|
@ -80,10 +80,7 @@ impl Repository {
|
|||
self.put_chunk_override(mode, hash, data)
|
||||
}
|
||||
|
||||
pub fn put_chunk_override(&mut self, mode: BundleMode, hash: Hash, data: &[u8]) -> Result<(), RepositoryError> {
|
||||
// Calculate the next free bundle id now (late lifetime prevents this)
|
||||
let next_free_bundle_id = self.next_free_bundle_id();
|
||||
// Select a bundle writer according to the mode and...
|
||||
fn write_chunk_to_bundle_and_index(&mut self, mode: BundleMode, hash: Hash, data: &[u8]) -> Result<(), RepositoryError> {
|
||||
let writer = match mode {
|
||||
BundleMode::Data => &mut self.data_bundle,
|
||||
BundleMode::Meta => &mut self.meta_bundle
|
||||
|
@ -98,39 +95,73 @@ impl Repository {
|
|||
)));
|
||||
}
|
||||
debug_assert!(writer.is_some());
|
||||
let chunk_id;
|
||||
let size;
|
||||
let raw_size;
|
||||
{
|
||||
// Add chunk to bundle writer and determine the size of the bundle
|
||||
let writer_obj = writer.as_mut().unwrap();
|
||||
chunk_id = try!(writer_obj.add(data, hash));
|
||||
size = writer_obj.size();
|
||||
raw_size = writer_obj.raw_size();
|
||||
// Add chunk to bundle writer and determine the size of the bundle
|
||||
let writer_obj = writer.as_mut().unwrap();
|
||||
let chunk_id = try!(writer_obj.add(data, hash));
|
||||
let bundle_id = match mode {
|
||||
BundleMode::Data => self.next_data_bundle,
|
||||
BundleMode::Meta => self.next_meta_bundle
|
||||
};
|
||||
// Add location to the index
|
||||
try!(self.index.set(&hash, &Location::new(bundle_id, chunk_id as u32)));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn finish_bundle(&mut self, mode: BundleMode) -> Result<(), RepositoryError> {
|
||||
// Calculate the next free bundle id now (late lifetime prevents this)
|
||||
let next_free_bundle_id = self.next_free_bundle_id();
|
||||
let writer = match mode {
|
||||
BundleMode::Data => &mut self.data_bundle,
|
||||
BundleMode::Meta => &mut self.meta_bundle
|
||||
};
|
||||
if writer.is_none() {
|
||||
return Ok(())
|
||||
}
|
||||
let bundle_id = match mode {
|
||||
BundleMode::Data => self.next_data_bundle,
|
||||
BundleMode::Meta => self.next_meta_bundle
|
||||
};
|
||||
// Finish bundle if over maximum size
|
||||
if size >= self.config.bundle_size || raw_size >= 4 * self.config.bundle_size {
|
||||
let mut finished = None;
|
||||
mem::swap(writer, &mut finished);
|
||||
let bundle = try!(self.bundles.add_bundle(finished.unwrap()));
|
||||
self.bundle_map.set(bundle_id, bundle.id.clone());
|
||||
if self.next_meta_bundle == bundle_id {
|
||||
self.next_meta_bundle = next_free_bundle_id
|
||||
}
|
||||
if self.next_data_bundle == bundle_id {
|
||||
self.next_data_bundle = next_free_bundle_id
|
||||
}
|
||||
// Not saving the bundle map, this will be done by flush
|
||||
let mut finished = None;
|
||||
mem::swap(writer, &mut finished);
|
||||
let bundle = try!(self.bundles.add_bundle(finished.unwrap()));
|
||||
self.bundle_map.set(bundle_id, bundle.id.clone());
|
||||
if self.next_meta_bundle == bundle_id {
|
||||
self.next_meta_bundle = next_free_bundle_id
|
||||
}
|
||||
if self.next_data_bundle == bundle_id {
|
||||
self.next_data_bundle = next_free_bundle_id
|
||||
}
|
||||
// Add location to the index
|
||||
try!(self.index.set(&hash, &Location::new(bundle_id, chunk_id as u32)));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn finish_bundle_if_needed(&mut self, mode: BundleMode) -> Result<(), RepositoryError> {
|
||||
let (size, raw_size) = {
|
||||
let writer = match mode {
|
||||
BundleMode::Data => &mut self.data_bundle,
|
||||
BundleMode::Meta => &mut self.meta_bundle
|
||||
};
|
||||
if let Some(ref writer) = *writer {
|
||||
(writer.size(), writer.raw_size())
|
||||
} else {
|
||||
return Ok(())
|
||||
}
|
||||
};
|
||||
if size >= self.config.bundle_size || raw_size >= 4 * self.config.bundle_size {
|
||||
if mode == BundleMode::Meta {
|
||||
//First store the current data bundle as meta referrs to those chunks
|
||||
try!(self.finish_bundle(BundleMode::Data))
|
||||
}
|
||||
try!(self.finish_bundle(mode))
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn put_chunk_override(&mut self, mode: BundleMode, hash: Hash, data: &[u8]) -> Result<(), RepositoryError> {
|
||||
try!(self.write_chunk_to_bundle_and_index(mode, hash, data));
|
||||
self.finish_bundle_if_needed(mode)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn put_data(&mut self, mode: BundleMode, data: &[u8]) -> Result<ChunkList, RepositoryError> {
|
||||
let mut input = Cursor::new(data);
|
||||
|
|
|
@ -65,8 +65,8 @@ impl Repository {
|
|||
for (id, bundle) in self.bundle_map.bundles() {
|
||||
let bundle = try!(self.bundles.get_bundle_info(&bundle).ok_or_else(|| RepositoryIntegrityError::MissingBundle(bundle)));
|
||||
usage.insert(id, BundleAnalysis {
|
||||
chunk_usage: Bitmap::new(bundle.chunk_count),
|
||||
info: bundle.clone(),
|
||||
chunk_usage: Bitmap::new(bundle.info.chunk_count),
|
||||
info: bundle.info.clone(),
|
||||
used_raw_size: 0
|
||||
});
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ impl Repository {
|
|||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get_bundle(&self, bundle: &BundleId) -> Option<&BundleInfo> {
|
||||
pub fn get_bundle(&self, bundle: &BundleId) -> Option<&StoredBundle> {
|
||||
self.bundles.get_bundle_info(bundle)
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ quick_error!{
|
|||
}
|
||||
NoSuchChunk(bundle: BundleId, chunk: u32) {
|
||||
description("No such chunk")
|
||||
display("Bundle {} does not conain the chunk {}", bundle, chunk)
|
||||
display("Bundle {} does not contain the chunk {}", bundle, chunk)
|
||||
}
|
||||
InvalidNextBundleId {
|
||||
description("Invalid next bundle id")
|
||||
|
@ -40,7 +40,7 @@ impl Repository {
|
|||
return Err(RepositoryIntegrityError::MissingBundle(bundle_id.clone()).into())
|
||||
};
|
||||
// Get chunk from bundle
|
||||
if bundle.chunk_count <= location.chunk as usize {
|
||||
if bundle.info.chunk_count <= location.chunk as usize {
|
||||
return Err(RepositoryIntegrityError::NoSuchChunk(bundle_id.clone(), location.chunk).into())
|
||||
}
|
||||
Ok(())
|
||||
|
|
|
@ -232,6 +232,17 @@ impl Repository {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub fn rebuild_index(&mut self) -> Result<(), RepositoryError> {
|
||||
self.index.clear();
|
||||
for (num, id) in self.bundle_map.bundles() {
|
||||
let chunks = try!(self.bundles.get_chunk_list(&id));
|
||||
for (i, (hash, _len)) in chunks.into_inner().into_iter().enumerate() {
|
||||
try!(self.index.set(&hash, &Location{bundle: num as u32, chunk: i as u32}));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn remove_gone_remote_bundle(&mut self, bundle: BundleInfo) -> Result<(), RepositoryError> {
|
||||
if let Some(id) = self.bundle_map.find(&bundle.id) {
|
||||
info!("Removing bundle from index: {}", bundle.id);
|
||||
|
|
|
@ -81,10 +81,13 @@ impl Repository {
|
|||
}
|
||||
inodes.insert(path, (inode, HashSet::new()));
|
||||
},
|
||||
Err(_) => {
|
||||
Err(RepositoryError::Inode(_)) | Err(RepositoryError::Chunker(_)) | Err(RepositoryError::Io(_)) => {
|
||||
warn!("Failed to backup {:?}", path);
|
||||
failed_paths.push(path);
|
||||
continue
|
||||
},
|
||||
Err(err) => {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue