Compare commits

...

63 Commits

Author SHA1 Message Date
Dennis Schwerdel 98a09fea2e Bugfix 2018-03-08 20:05:27 +01:00
Dennis Schwerdel 2f3c97a043 stats & dups 2018-03-08 15:21:11 +01:00
Dennis Schwerdel 56c916f585 more stats 2018-03-07 00:36:44 +01:00
Dennis Schwerdel d47edee08f Stats 2018-03-06 22:53:35 +01:00
Dennis Schwerdel 224bf1d25c Index stats 2018-03-06 22:22:52 +01:00
Dennis Schwerdel 3b7bb52620 libsodium23 deb 2018-03-04 23:09:41 +01:00
Dennis Schwerdel 08b1f118f1 New comparison 2018-03-04 17:51:55 +01:00
Dennis Schwerdel d93e4c9cb6 new comparison commands 2018-03-04 16:29:58 +01:00
Dennis Schwerdel 9bd518655d new comparison 2018-03-03 23:58:16 +01:00
Dennis Schwerdel bdb13b96cf Updated copyright 2018-03-03 17:25:38 +01:00
Dennis Schwerdel 1102600893 Some changes 2018-03-03 17:25:05 +01:00
Dennis Schwerdel 2f6c3b239e Update 2018-02-25 01:03:18 +01:00
Dennis Schwerdel 9ca22008c7 New strings 2018-02-24 23:35:12 +01:00
Dennis Schwerdel 6f9611bba6 All in one module 2018-02-24 23:28:18 +01:00
Dennis Schwerdel a81aaae637 strip 2018-02-24 15:17:02 +01:00
Dennis Schwerdel 98c352814f Tanslation 2018-02-24 14:55:56 +01:00
Dennis Schwerdel ce3223b5ea More files 2018-02-24 13:55:24 +01:00
Dennis Schwerdel 8911c8af6d Translation infrastructure 2018-02-24 13:19:51 +01:00
Dennis Schwerdel 24e28e6bcc First translation code 2018-02-21 22:49:21 +01:00
Dennis Schwerdel aa6a450e43 Updated dependencies 2 2018-02-19 23:31:58 +01:00
Dennis Schwerdel c87458d981 Updated dependencies 1 2018-02-19 22:42:44 +01:00
Dennis Schwerdel 618a858506 Make clippy happy 2018-02-19 22:30:59 +01:00
Dennis Schwerdel fb73e29a20 Some minor changes 2018-02-19 21:18:47 +01:00
Dennis Schwerdel b2331c61fd Repository readme 2017-08-04 20:35:04 +02:00
Dennis Schwerdel 5fe41127fc More tests, forcing cut-point-skipping, using black_box 2017-08-03 07:34:16 +02:00
Dennis Schwerdel b4e6b34bbe Configurable cut-point-skipping in fastcdc 2017-08-02 23:36:01 +02:00
Dennis Schwerdel cbfcc255af Simplified code 2017-08-02 23:12:46 +02:00
Dennis Schwerdel 5ad90f2929 Test chunker output 2017-08-02 22:19:01 +02:00
Dennis Schwerdel 837df8bbd3 Also including the first min_size bytes in hash (oops), performance improvements 2017-08-02 22:18:37 +02:00
Dennis Schwerdel 54e2329228 Badges 2017-07-31 20:23:46 +02:00
Dennis Schwerdel 5992a33c3f Coverage only on nightly 2017-07-30 22:09:40 +02:00
Dennis Schwerdel ccccd7da0c Added libfuse 2017-07-30 21:37:59 +02:00
Dennis Schwerdel c6480de13c Next try on travis ci 2017-07-30 21:25:09 +02:00
Dennis Schwerdel b62ab95503 Compiling libsodium18 2017-07-30 21:12:28 +02:00
Dennis Schwerdel c954b8489c Added libsodium to travis 2017-07-30 21:09:05 +02:00
Dennis Schwerdel e4a9b3a411 Updated dependencies 2017-07-30 21:05:04 +02:00
Dennis Schwerdel fd4798b35c Added travis config 2017-07-30 20:59:23 +02:00
Dennis Schwerdel 7303da43b3 Release v0.4.0 2017-07-21 11:44:07 +02:00
Dennis Schwerdel d062aaa6d4 Reformatted using rustfmt 2017-07-21 11:21:59 +02:00
Dennis Schwerdel 15ab556c18 Updated dependencies 2017-07-18 23:30:51 +02:00
Dennis Schwerdel abfa14effc Tests & Benches for hash 2017-07-04 14:55:30 +02:00
Dennis Schwerdel 1674f19309 More tests 2017-07-04 12:36:39 +02:00
Dennis Schwerdel 263339077e Encryption & compression tests/benches 2017-07-03 11:48:25 +02:00
Dennis Schwerdel 18891198f1 More tests 2017-06-27 15:25:20 +02:00
Dennis Schwerdel 8197c70697 Updated dependencies 2017-06-27 14:06:19 +02:00
Dennis Schwerdel f78de42980 Some tests 2017-06-20 15:36:00 +02:00
Dennis Schwerdel 709e5d1624 Removed unused code 2017-06-20 14:22:26 +02:00
Dennis Schwerdel 5bb9df761e Not removing prefixes from bundle names anymore 2017-06-20 14:19:45 +02:00
Dennis Schwerdel aad7bb675f Again excluding /dev by default 2017-06-20 14:08:52 +02:00
Dennis Schwerdel 98eb59c3e1 Added support for fifo files 2017-06-20 12:58:14 +02:00
Dennis Schwerdel 9231800f3b Added support for block/char devices 2017-06-20 12:38:16 +02:00
Dennis Schwerdel 8d45176146 Fixed tarfile import 2017-06-20 12:07:42 +02:00
Dennis Schwerdel e3ed5f628d Remote path must be absolute 2017-06-20 10:50:02 +02:00
Dennis Schwerdel 3b50267155 Added support for xattrs in fuse mount 2017-06-20 10:45:16 +02:00
Dennis Schwerdel 012e009bc4 Update 2017-06-19 23:22:33 +02:00
Dennis Schwerdel 2fe62cbe27 Using repository aliases 2017-05-17 09:43:14 +02:00
Dennis Schwerdel 645022ce9c Also documenting common flags in subcommands 2017-05-17 08:58:54 +02:00
Dennis Schwerdel ca28d3ebff Added `copy` subcommand 2017-05-17 07:35:41 +02:00
Dennis Schwerdel 148db7d627 Logo 2017-05-11 13:17:11 +02:00
Dennis Schwerdel 9590bfed26 Release v0.3.2 2017-05-11 10:57:54 +02:00
Dennis Schwerdel 2a1dc52c56 Skip root folder on restore & fixed exporting files with long names as tar files 2017-05-11 10:47:21 +02:00
Dennis Schwerdel 30bc7d80c5 Fixed reading tar files from stdin 2017-05-11 09:45:55 +02:00
Dennis Schwerdel 1cd58e180d Changed addkey arguments 2017-05-11 09:36:44 +02:00
98 changed files with 13331 additions and 2722 deletions

4
.gitignore vendored
View File

@ -6,3 +6,7 @@ restored
excludes
._*
.~*
docs/logo
lang/*.mo
lang/default.pot
.idea

36
.travis.yml Normal file
View File

@ -0,0 +1,36 @@
language: rust
dist: trusty
addons:
apt:
packages:
- libssl-dev
- libfuse-dev
install:
- wget https://github.com/jedisct1/libsodium/releases/download/1.0.8/libsodium-1.0.8.tar.gz
- tar xvfz libsodium-1.0.8.tar.gz
- cd libsodium-1.0.8 && ./configure --prefix=$HOME/installed_libs && make && make install && cd ..
- git clone https://github.com/quixdb/squash libsquash && cd libsquash && git checkout 5ea579cae2324f9e814cb3d88aa589dff312e9e2 && ./autogen.sh --prefix=$HOME/installed_libs --disable-external && make && make install && cd ..
- export PKG_CONFIG_PATH=$HOME/installed_libs/lib/pkgconfig:$PKG_CONFIG_PATH
- export LD_LIBRARY_PATH=$HOME/installed_libs/lib:$LD_LIBRARY_PATH
cache:
- cargo
- ccache
rust:
- stable
- beta
- nightly
matrix:
allow_failures:
- rust:
- beta
- stable
script:
- cargo clean
- cargo build
- cargo test
after_success: |
if [[ "$TRAVIS_RUST_VERSION" == nightly ]]; then
cargo install cargo-tarpaulin
cargo tarpaulin --ciserver travis-ci --coveralls $TRAVIS_JOB_ID
fi

View File

@ -3,6 +3,41 @@
This project follows [semantic versioning](http://semver.org).
### UNRELEASED
* [added] Translation infrastructure (**requires nightly rust**)
* [added] Checking hashes of chunks in check --bundle-data
* [added] Debian packet for libsodium23
* [modified] Updated dependencies
* [modified] Updated copyright date
* [modified] Moved all code into one crate for easier translation
* [modified] Compression ratio is now displayed in a clearer format
* [fixed] Also including the first min_size bytes in hash
* [fixed] Fixed some texts in manpages
* [fixed] Calling strip on final binaries
* [fixed] Fixed bug that caused repairs to miss some errors
### v0.4.0 (2017-07-21)
* [added] Added `copy` subcommand
* [added] Added support for xattrs in fuse mount
* [added] Added support for block/char devices
* [added] Added support for fifo files
* [modified] Reformatted sources using rustfmt
* [modified] Also documenting common flags in subcommands
* [modified] Using repository aliases (**conversion needed**)
* [modified] Remote path must be absolute
* [modified] Not removing prefixes from bundle names anymore
* [fixed] Fixed tarfile import
### v0.3.2 (2017-05-11)
* [modified] Changed order of arguments in `addkey` to match src-dst scheme
* [modified] Skip root folder on restore
* [fixed] Fixed `addkey` subcommand
* [fixed] Fixed reading tar files from stdin
* [fixed] Fixed exporting files with long names as tar files
### v0.3.1 (2017-05-09)
* [added] Derive key pairs from passwords
* [modified] Added root repository to exclude list

584
Cargo.lock generated
View File

@ -1,61 +1,35 @@
[root]
name = "zvault"
version = "0.3.1"
dependencies = [
"ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"blake2-rfc 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"chrono 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"chunking 0.1.0",
"clap 2.24.1 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
"filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
"fuse 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"index 0.1.0",
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
"libsodium-sys 0.0.14 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
"murmurhash3 0.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
"pbr 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
"quick-error 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rmp-serde 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_bytes 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_utils 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_yaml 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"sodiumoxide 0.0.14 (registry+https://github.com/rust-lang/crates.io-index)",
"squash-sys 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"tar 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)",
"users 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
"xattr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "aho-corasick"
version = "0.6.3"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "ansi_term"
version = "0.9.0"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "arrayvec"
version = "0.4.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"nodrop 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "atty"
version = "0.2.2"
version = "0.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)",
"termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -65,89 +39,95 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "bitflags"
version = "0.8.2"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "blake2-rfc"
version = "0.2.17"
version = "0.2.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"constant_time_eq 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"arrayvec 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)",
"constant_time_eq 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "byteorder"
version = "1.0.0"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "chrono"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"num 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "chunking"
version = "0.1.0"
dependencies = [
"quick-error 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "clap"
version = "2.24.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"atty 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
"strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"term_size 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"unicode-segmentation 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
"vec_map 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "constant_time_eq"
name = "cfg-if"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "chrono"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"num 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "clap"
version = "2.31.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
"atty 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"textwrap 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
"vec_map 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "constant_time_eq"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "crossbeam"
version = "0.2.10"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "filetime"
version = "0.1.10"
version = "0.1.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
"cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)",
"redox_syscall 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "fuchsia-zircon"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "fuchsia-zircon-sys"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "fuse"
version = "0.3.0"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
"pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
"thread-scoped 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "index"
version = "0.1.0"
dependencies = [
"mmap 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"quick-error 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"thread-scoped 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -161,7 +141,12 @@ dependencies = [
[[package]]
name = "lazy_static"
version = "0.2.8"
version = "0.2.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "lazy_static"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
@ -171,39 +156,56 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "libc"
version = "0.2.22"
version = "0.2.39"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "libsodium-sys"
version = "0.0.14"
version = "0.0.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)",
"pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "linked-hash-map"
version = "0.3.0"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "linked-hash-map"
version = "0.4.2"
name = "locale_config"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "log"
version = "0.3.7"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "log"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "memchr"
version = "1.0.1"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -212,7 +214,7 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
"tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"tempdir 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -220,36 +222,49 @@ name = "murmurhash3"
version = "0.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "nodrop"
version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "num"
version = "0.1.37"
version = "0.1.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"num-integer 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
"num-iter 0.1.33 (registry+https://github.com/rust-lang/crates.io-index)",
"num-traits 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)",
"num-integer 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)",
"num-iter 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
"num-traits 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "num-integer"
version = "0.1.34"
version = "0.1.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"num-traits 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)",
"num-traits 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "num-iter"
version = "0.1.33"
version = "0.1.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"num-integer 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
"num-traits 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)",
"num-integer 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)",
"num-traits 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "num-traits"
version = "0.1.37"
version = "0.1.43"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"num-traits 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "num-traits"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
@ -258,8 +273,8 @@ version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -270,74 +285,93 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "quick-error"
version = "1.2.0"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "rand"
version = "0.3.15"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
"fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "redox_syscall"
version = "0.1.17"
version = "0.1.37"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "regex"
version = "0.2.1"
name = "redox_termios"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"aho-corasick 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
"memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"regex-syntax 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"thread_local 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"redox_syscall 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "regex"
version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"aho-corasick 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
"memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"regex-syntax 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"thread_local 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "regex-syntax"
version = "0.4.0"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "rmp"
version = "0.8.6"
name = "remove_dir_all"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"num-traits 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)",
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rmp"
version = "0.8.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"num-traits 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rmp-serde"
version = "0.13.1"
version = "0.13.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rmp 0.8.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rmp 0.8.7 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.27 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "serde"
version = "0.9.15"
name = "runtime-fmt"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "serde"
version = "1.0.2"
version = "1.0.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "serde_bytes"
version = "0.10.0"
version = "0.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"serde 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.27 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -345,29 +379,29 @@ name = "serde_utils"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"serde 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_bytes 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.27 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_bytes 0.10.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "serde_yaml"
version = "0.7.0"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"linked-hash-map 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"num-traits 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"yaml-rust 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"linked-hash-map 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
"num-traits 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.27 (registry+https://github.com/rust-lang/crates.io-index)",
"yaml-rust 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "sodiumoxide"
version = "0.0.14"
version = "0.0.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
"libsodium-sys 0.0.14 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 0.9.15 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)",
"libsodium-sys 0.0.16 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.27 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -376,82 +410,77 @@ version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)",
"pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "strsim"
version = "0.6.0"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "tar"
version = "0.4.11"
version = "0.4.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
"filetime 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)",
"redox_syscall 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)",
"xattr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "tempdir"
version = "0.3.5"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"remove_dir_all 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "term_size"
version = "0.3.0"
name = "termion"
version = "1.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)",
"redox_syscall 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)",
"redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "thread-id"
version = "3.0.0"
name = "textwrap"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
"unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "thread-scoped"
version = "1.0.1"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "thread_local"
version = "0.3.3"
version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"thread-id 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"unreachable 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "time"
version = "0.1.37"
version = "0.1.39"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
"redox_syscall 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)",
"redox_syscall 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "unicode-segmentation"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "unicode-width"
version = "0.1.4"
@ -459,7 +488,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "unreachable"
version = "0.1.1"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -467,10 +496,10 @@ dependencies = [
[[package]]
name = "users"
version = "0.5.2"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -480,7 +509,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "vec_map"
version = "0.7.0"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
@ -493,88 +522,165 @@ name = "winapi"
version = "0.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "winapi"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "winapi-build"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "xattr"
version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "xattr"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "yaml-rust"
version = "0.3.5"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"linked-hash-map 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "zvault"
version = "0.5.0"
dependencies = [
"ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
"blake2-rfc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"chrono 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"clap 2.31.1 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"filetime 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
"fuse 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)",
"libsodium-sys 0.0.16 (registry+https://github.com/rust-lang/crates.io-index)",
"locale_config 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"mmap 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"murmurhash3 0.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
"pbr 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"quick-error 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"rmp-serde 0.13.7 (registry+https://github.com/rust-lang/crates.io-index)",
"runtime-fmt 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.27 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_bytes 0.10.3 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_utils 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_yaml 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
"sodiumoxide 0.0.16 (registry+https://github.com/rust-lang/crates.io-index)",
"squash-sys 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"tar 0.4.14 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)",
"users 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"xattr 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[metadata]
"checksum aho-corasick 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "500909c4f87a9e52355b26626d890833e9e1d53ac566db76c36faa984b889699"
"checksum ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "23ac7c30002a5accbf7e8987d0632fa6de155b7c3d39d0067317a391e00a2ef6"
"checksum atty 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d912da0db7fa85514874458ca3651fe2cddace8d0b0505571dbdcd41ab490159"
"checksum aho-corasick 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "d6531d44de723825aa81398a6415283229725a00fa30713812ab9323faa82fc4"
"checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
"checksum arrayvec 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)" = "a1e964f9e24d588183fcb43503abda40d288c8657dfc27311516ce2f05675aef"
"checksum atty 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "af80143d6f7608d746df1520709e5d141c96f240b0e62b0aa41bdfb53374d9d4"
"checksum bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aad18937a628ec6abcd26d1489012cc0e18c21798210f491af69ded9b881106d"
"checksum bitflags 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1370e9fc2a6ae53aea8b7a5110edbd08836ed87c88736dfabccade1c2b44bff4"
"checksum blake2-rfc 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)" = "0c6a476f32fef3402f1161f89d0d39822809627754a126f8441ff2a9d45e2d59"
"checksum byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c40977b0ee6b9885c9013cd41d9feffdd22deb3bb4dc3a71d901cc7a77de18c8"
"checksum chrono 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d9123be86fd2a8f627836c235ecdf331fdd067ecf7ac05aa1a68fbcf2429f056"
"checksum clap 2.24.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b7541069be0b8aec41030802abe8b5cdef0490070afaa55418adea93b1e431e0"
"checksum constant_time_eq 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "07dcb7959f0f6f1cf662f9a7ff389bcb919924d99ac41cf31f10d611d8721323"
"checksum crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "0c5ea215664ca264da8a9d9c3be80d2eaf30923c259d03e870388eb927508f97"
"checksum filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "5363ab8e4139b8568a6237db5248646e5a8a2f89bd5ccb02092182b11fd3e922"
"checksum fuse 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5087262ce5b36fed6ccd4abf0a8224e48d055a2bb07fecb5605765de6f114a28"
"checksum bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b3c30d3802dfb7281680d6285f2ccdaa8c2d8fee41f93805dba5c4cf50dc23cf"
"checksum blake2-rfc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)" = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400"
"checksum byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "652805b7e73fada9d85e9a6682a4abd490cb52d96aeecc12e33a0de34dfd0d23"
"checksum cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d4c819a1287eb618df47cc647173c5c4c66ba19d888a6e50d605672aed3140de"
"checksum chrono 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7c20ebe0b2b08b0aeddba49c609fe7957ba2e33449882cb186a180bc60682fa9"
"checksum clap 2.31.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5dc18f6f4005132120d9711636b32c46a233fad94df6217fa1d81c5e97a9f200"
"checksum constant_time_eq 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8ff012e225ce166d4422e0e78419d901719760f62ae2b7969ca6b564d1b54a9e"
"checksum crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "24ce9782d4d5c53674646a6a4c1863a21a8fc0cb649b3c94dfc16e45071dea19"
"checksum filetime 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)" = "714653f3e34871534de23771ac7b26e999651a0a228f47beb324dfdf1dd4b10f"
"checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82"
"checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7"
"checksum fuse 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "80e57070510966bfef93662a81cb8aa2b1c7db0964354fa9921434f04b9e8660"
"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
"checksum lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3b37545ab726dd833ec6420aaba8231c5b320814b9029ad585555d2a03e94fbf"
"checksum lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "76f033c7ad61445c5b347c7382dd1237847eb1bce590fe50365dcb33d546be73"
"checksum lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c8f31047daa365f19be14b47c29df4f7c3b581832407daabe6ae77397619237d"
"checksum libc 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "e32a70cf75e5846d53a673923498228bbec6a8624708a9ea5645f075d6276122"
"checksum libc 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)" = "babb8281da88cba992fa1f4ddec7d63ed96280a1a53ec9b919fd37b53d71e502"
"checksum libsodium-sys 0.0.14 (registry+https://github.com/rust-lang/crates.io-index)" = "cbbc6e46017815abf8698de0ed4847fad45fd8cad2909ac38ac6de79673c1ad1"
"checksum linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6d262045c5b87c0861b3f004610afd0e2c851e2908d08b6c870cbb9d5f494ecd"
"checksum linked-hash-map 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7860ec297f7008ff7a1e3382d7f7e1dcd69efc94751a2284bafc3d013c2aa939"
"checksum log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "5141eca02775a762cc6cd564d8d2c50f67c0ea3a372cbf1c51592b3e029e10ad"
"checksum memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1dbccc0e46f1ea47b9f17e6d67c5a96bd27030519c519c9c91327e31275a47b4"
"checksum libc 0.2.39 (registry+https://github.com/rust-lang/crates.io-index)" = "f54263ad99207254cf58b5f701ecb432c717445ea2ee8af387334bdd1a03fdff"
"checksum libsodium-sys 0.0.16 (registry+https://github.com/rust-lang/crates.io-index)" = "fcbd1beeed8d44caa8a669ebaa697c313976e242c03cc9fb23d88bf1656f5542"
"checksum linked-hash-map 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "70fb39025bc7cdd76305867c4eccf2f2dcf6e9a57f5b21a93e1c2d86cd03ec9e"
"checksum locale_config 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "14fbee0e39bc2dd6a2427c4fdea66e9826cc1fd09b0a0b7550359f5f6efe1dab"
"checksum log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b"
"checksum log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "89f010e843f2b1a31dbd316b3b8d443758bc634bed37aabade59c686d644e0a2"
"checksum memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "796fba70e76612589ed2ce7f45282f5af869e0fdd7cc6199fa1aa1f1d591ba9d"
"checksum mmap 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0bc85448a6006dd2ba26a385a564a8a0f1f2c7e78c70f1a70b2e0f4af286b823"
"checksum murmurhash3 0.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a2983372caf4480544083767bf2d27defafe32af49ab4df3a0b7fc90793a3664"
"checksum num 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)" = "98b15ba84e910ea7a1973bccd3df7b31ae282bf9d8bd2897779950c9b8303d40"
"checksum num-integer 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)" = "ef1a4bf6f9174aa5783a9b4cc892cacd11aebad6c69ad027a0b65c6ca5f8aa37"
"checksum num-iter 0.1.33 (registry+https://github.com/rust-lang/crates.io-index)" = "f7d1891bd7b936f12349b7d1403761c8a0b85a18b148e9da4429d5d102c1a41e"
"checksum num-traits 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)" = "e1cbfa3781f3fe73dc05321bed52a06d2d491eaa764c52335cf4399f046ece99"
"checksum nodrop 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "9a2228dca57108069a5262f2ed8bd2e82496d2e074a06d1ccc7ce1687b6ae0a2"
"checksum num 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "4703ad64153382334aa8db57c637364c322d3372e097840c72000dabdcf6156e"
"checksum num-integer 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)" = "f8d26da319fb45674985c78f1d1caf99aa4941f785d384a2ae36d0740bc3e2fe"
"checksum num-iter 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)" = "4b226df12c5a59b63569dd57fafb926d91b385dfce33d8074a412411b689d593"
"checksum num-traits 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)" = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31"
"checksum num-traits 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b3c2bd9b9d21e48e956b763c9f37134dc62d9e95da6edb3f672cacb6caf3cd3"
"checksum pbr 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e048e3afebb6c454bb1c5d0fe73fda54698b4715d78ed8e7302447c37736d23a"
"checksum pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "3a8b4c6b8165cd1a1cd4b9b120978131389f64bdaf456435caa41e630edba903"
"checksum quick-error 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3c36987d4978eb1be2e422b1e0423a557923a5c3e7e6f31d5699e9aafaefa469"
"checksum rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "022e0636ec2519ddae48154b028864bdce4eaf7d35226ab8e65c611be97b189d"
"checksum redox_syscall 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "29dbdfd4b9df8ab31dec47c6087b7b13cbf4a776f335e4de8efba8288dda075b"
"checksum regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4278c17d0f6d62dfef0ab00028feb45bd7d2102843f80763474eeb1be8a10c01"
"checksum regex-syntax 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9191b1f57603095f105d317e375d19b1c9c5c3185ea9633a99a6dcbed04457"
"checksum rmp 0.8.6 (registry+https://github.com/rust-lang/crates.io-index)" = "7ce560a5728f4eec697f07f8d7fa20608893d44b4f5b8f9f5f51a2987f3cffe2"
"checksum rmp-serde 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fe9599387fbc9af1a86a3ad0dc400f958acd6142a1e9c3167ff2acaefa591232"
"checksum serde 0.9.15 (registry+https://github.com/rust-lang/crates.io-index)" = "34b623917345a631dc9608d5194cc206b3fe6c3554cd1c75b937e55e285254af"
"checksum serde 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3b46a59dd63931010fdb1d88538513f3279090d88b5c22ef4fe8440cfffcc6e3"
"checksum serde_bytes 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a73f5ad9bb83e1e407254c7a355f4efdaffe3c1442fc0657ddb8b9b6b225655"
"checksum quick-error 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "eda5fe9b71976e62bc81b781206aaa076401769b2143379d3eb2118388babac4"
"checksum rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "eba5f8cb59cc50ed56be8880a5c7b496bfd9bd26394e176bc67884094145c2c5"
"checksum redox_syscall 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)" = "0d92eecebad22b767915e4d529f89f28ee96dbbf5a4810d2b844373f136417fd"
"checksum redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7e891cfe48e9100a70a3b6eb652fef28920c117d366339687bd5576160db0f76"
"checksum regex 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "5be5347bde0c48cfd8c3fdc0766cdfe9d8a755ef84d620d6794c778c91de8b2b"
"checksum regex-syntax 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8e931c58b93d86f080c734bfd2bce7dd0079ae2331235818133c8be7f422e20e"
"checksum remove_dir_all 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b5d2f806b0fcdabd98acd380dc8daef485e22bcb7cddc811d1337967f2528cf5"
"checksum rmp 0.8.7 (registry+https://github.com/rust-lang/crates.io-index)" = "a3d45d7afc9b132b34a2479648863aa95c5c88e98b32285326a6ebadc80ec5c9"
"checksum rmp-serde 0.13.7 (registry+https://github.com/rust-lang/crates.io-index)" = "011e1d58446e9fa3af7cdc1fb91295b10621d3ac4cb3a85cc86385ee9ca50cd3"
"checksum runtime-fmt 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "647a821d66049faccc993fc3c379d1181b81a484097495cda79ffdb17b55b87f"
"checksum serde 1.0.27 (registry+https://github.com/rust-lang/crates.io-index)" = "db99f3919e20faa51bb2996057f5031d8685019b5a06139b1ce761da671b8526"
"checksum serde_bytes 0.10.3 (registry+https://github.com/rust-lang/crates.io-index)" = "52b678af90a3aebc4484c22d639bf374eb7d598988edb33fa73c4febd6046a59"
"checksum serde_utils 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f6e0edb364c93646633800df969086bc7c5c25fb3f1eb57349990d1cb4cae4bc"
"checksum serde_yaml 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "67dbc8620027a35776aa327847d48f70fd4531a1d2b7774f26247869b508d1b2"
"checksum sodiumoxide 0.0.14 (registry+https://github.com/rust-lang/crates.io-index)" = "bc02c0bc77ffed8e8eaef004399b825cf4fd8aa02d0af6e473225affd583ff4d"
"checksum serde_yaml 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "e0f868d400d9d13d00988da49f7f02aeac6ef00f11901a8c535bd59d777b9e19"
"checksum sodiumoxide 0.0.16 (registry+https://github.com/rust-lang/crates.io-index)" = "eb5cb2f14f9a51352ad65e59257a0a9459d5a36a3615f3d53a974c82fdaaa00a"
"checksum squash-sys 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "db1f9dde91d819b7746e153bc32489fa19e6a106c3d7f2b92187a4efbdc88b40"
"checksum strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b4d15c810519a91cf877e7e36e63fe068815c678181439f2f29e2562147c3694"
"checksum tar 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)" = "c2374f318bbe2c5ac6c83dd6240d5f1a73106f72d39b3f7d6f8d8637c7b425d8"
"checksum tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "87974a6f5c1dfb344d733055601650059a3363de2a6104819293baff662132d6"
"checksum term_size 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2b6b55df3198cc93372e85dd2ed817f0e38ce8cc0f22eb32391bfad9c4bf209"
"checksum thread-id 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4437c97558c70d129e40629a5b385b3fb1ffac301e63941335e4d354081ec14a"
"checksum thread-scoped 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "14387dce246d09efe184c8ebc34d9db5c0672a908b2f50efc53359ae13d5ae68"
"checksum thread_local 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c85048c6260d17cf486ceae3282d9fb6b90be220bf5b28c400f5485ffc29f0c7"
"checksum time 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)" = "ffd7ccbf969a892bf83f1e441126968a07a3941c24ff522a26af9f9f4585d1a3"
"checksum unicode-segmentation 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "18127285758f0e2c6cf325bb3f3d138a12fee27de4f23e146cd6a179f26c2cf3"
"checksum strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb4f380125926a99e52bc279241539c018323fab05ad6368b56f93d9369ff550"
"checksum tar 0.4.14 (registry+https://github.com/rust-lang/crates.io-index)" = "1605d3388ceb50252952ffebab4b5dc43017ead7e4481b175961c283bb951195"
"checksum tempdir 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "f73eebdb68c14bcb24aef74ea96079830e7fa7b31a6106e42ea7ee887c1e134e"
"checksum termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "689a3bdfaab439fd92bc87df5c4c78417d3cbe537487274e9b0b2dce76e92096"
"checksum textwrap 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c0b59b6b4b44d867f1370ef1bd91bfb262bf07bf0ae65c202ea2fbc16153b693"
"checksum thread-scoped 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bcbb6aa301e5d3b0b5ef639c9a9c7e2f1c944f177b460c04dc24c69b1fa2bd99"
"checksum thread_local 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "279ef31c19ededf577bfd12dfae728040a21f635b06a24cd670ff510edd38963"
"checksum time 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)" = "a15375f1df02096fb3317256ce2cee6a1f42fc84ea5ad5fc8c421cfe40c73098"
"checksum unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "bf3a113775714a22dcb774d8ea3655c53a32debae63a063acc00a91cc586245f"
"checksum unreachable 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1f2ae5ddb18e1c92664717616dd9549dde73f539f01bd7b77c2edb2446bdff91"
"checksum users 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a7ae8fdf783cb9652109c99886459648feb92ecc749e6b8e7930f6decba74c7c"
"checksum unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56"
"checksum users 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "99ab1b53affc9f75f57da4a8b051a188e84d20d43bea0dd9bd8db71eebbca6da"
"checksum utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "662fab6525a98beff2921d7f61a39e7d59e0b425ebc7d0d9e66d316e55124122"
"checksum vec_map 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8cdc8b93bd0198ed872357fb2e667f7125646b1762f16d60b2c96350d361897"
"checksum vec_map 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "887b5b631c2ad01628bbbaa7dd4c869f80d3186688f8d0b6f58774fbe324988c"
"checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"
"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a"
"checksum winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "04e3bd221fcbe8a271359c04f21a76db7d0c6028862d1bb5512d85e1e2eb5bb3"
"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
"checksum xattr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "5f04de8a1346489a2f9e9bd8526b73d135ec554227b17568456e86aa35b6f3fc"
"checksum yaml-rust 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e66366e18dc58b46801afbf2ca7661a9f59cc8c5962c29892b6039b4f86fa992"
"checksum xattr 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "abb373b92de38a4301d66bec009929b4fb83120ea1c4a401be89dbe0b9777443"
"checksum yaml-rust 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "57ab38ee1a4a266ed033496cf9af1828d8d6e6c1cfa5f643a2809effcae4d628"

View File

@ -1,9 +1,12 @@
[package]
name = "zvault"
version = "0.3.1"
version = "0.5.0"
authors = ["Dennis Schwerdel <schwerdel@googlemail.com>"]
description = "Deduplicating backup tool"
[profile.release]
lto = true
[dependencies]
serde = "1.0"
rmp-serde = "0.13"
@ -14,27 +17,29 @@ squash-sys = "0.9"
quick-error = "1.1"
blake2-rfc = "0.2"
murmurhash3 = "0.0.5"
chrono = "0.3"
clap = "2.23"
log = "0.3"
chrono = "0.4"
clap = "^2.24"
log = "0.4"
byteorder = "1.0"
ansi_term = "0.9"
sodiumoxide = "0.0.14"
libsodium-sys = "0.0.14"
ansi_term = "0.11"
sodiumoxide = "0.0.16"
libsodium-sys = "0.0.16"
filetime = "0.1"
regex = "0.2"
fuse = "0.3"
lazy_static = "0.2"
rand = "0.3"
lazy_static = "1.0"
rand = "0.4"
tar = "0.4"
xattr = "0.1"
crossbeam = "0.2"
xattr = "0.2"
crossbeam = "0.3"
pbr = "1.0"
users = "0.5"
users = "0.6"
time = "*"
libc = "0.2"
index = {path="index"}
chunking = {path="chunking"}
runtime-fmt = "0.3"
locale_config = "^0.2.2"
mmap = "0.1"
[build-dependencies]
pkg-config = "0.3"
[features]
default = []
bench = []

View File

@ -1,7 +1,7 @@
# License: GPL-3
zVault - Deduplicating backup solution
Copyright (C) 2017 Dennis Schwerdel
Copyright (C) 2017-2018 Dennis Schwerdel
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by

108
README.md
View File

@ -1,4 +1,8 @@
# zVault Backup Solution
[![Build Status](https://travis-ci.org/dswd/zvault.svg?branch=master)](https://travis-ci.org/dswd/zvault)
[![Coverage Status](https://coveralls.io/repos/dswd/zvault/badge.svg?branch=master&service=github)](https://coveralls.io/github/dswd/zvault?branch=master)
zVault is a highly efficient deduplicating backup solution that supports
client-side encryption, compression and remote storage of backup data.
@ -12,7 +16,7 @@ and leave most chunks unchanged. Multiple backups of the same data set will only
take up the space of one copy.
The deduplication in zVault is able to reuse existing data no matter whether a
file is modified, stored again under a different name, renamed or moved to
file is modified, stored again under a different name, renamed or moved to a
different folder.
That makes it possible to store daily backups without much overhead as backups
@ -84,60 +88,80 @@ their contents. Once mounted, graphical programs like file managers can be used
to work on the backup data and find the needed files.
## Example usage
## Example scenario
As an example, I am going to backup my projects folder. To do that, I am
initializing an encrypted zVault repository, storing the data on a remote
filesystem which has been mounted on `/mnt/backup`.
#$> zvault init :: --encrypt --remote /mnt/backup
public: 2bea1d15...
secret: 3698a88c...
I am using zVault on several of my computers. Here are some numbers from my
desktop PC. On this computer I am running daily backups of both the system `/`
(excluding some folders like `/home`) with 12.9 GiB and the home folder `/home`
with 53.6 GiB.
$> zvault config ::
Bundle size: 25.0 MiB
Chunker: fastcdc/16
Compression: brotli/3
Encryption: 2bea1d15...
Encryption: 8678d...
Hash method: blake2
The repository has been created and zVault has generated as new key pair for me.
I should now store this key pair in a safe location before I continue.
The backup repository uses the default configuration with encryption enabled.
The repository currently contains 12 backup versions of each folder. Both
folders combined currently contain over 66.5 GiB not counting changes between
the different versions.
Now I can backup my home directory to the repository.
$> zvault info ::
Bundles: 1675
Total size: 37.9 GiB
Uncompressed size: 58.1 GiB
Compression ratio: 65.3%
Chunk count: 5580237
Average chunk size: 10.9 KiB
Index: 192.0 MiB, 67% full
#$> zvault backup /home/dswd/projects ::projects1
info: No reference backup found, doing a full scan instead
Date: Thu, 6 Apr 2017 20:33:20 +0200
Source: dswd-desktop:/home/dswd/projects
Duration: 0:00:26.2
Entries: 14618 files, 6044 dirs
Total backup size: 1.4 GiB
Modified data size: 1.4 GiB
Deduplicated size: 1.2 GiB, 14.9% saved
Compressed size: 0.5 GiB in 23 bundles, 54.7% saved
Chunk count: 95151, avg size: 12.8 KiB
The repository info reveals that the data stored in the repository is only
58.1 GiB, so 8.4 GiB / 12.5% has been saved by deduplication. Another 20.2 GiB /
34.7% have been saved by compression. In total, 28.6 out of 66.5 GiB / 43% have
been saved.
The backup run took about 26 seconds and by looking at the data, I see that
deduplication saved about 15% and compression again saved over 50% so that in
the end my backup only uses 0.5 GiB out of 1.4 GiB.
The data is stored in over 5 million chunks of an average size of 10.9 KiB. The
average chunk is smaller than configured because of files smaller than the chunk
size. The chunks are stored in an index file which takes up 192 MiB on disk and
in memory during backup runs. Additionally, 337 MiB of bundle data is stored
locally to allow fast access to metadata. In total that is less than 1% of the
original data.
After some work, I create another backup.
$> zvault info ::home/2017-06-19
Date: Mon, 19 Jun 2017 00:00:48 +0200
Source: desktop:/home
Duration: 0:01:57.2
Entries: 193624 files, 40651 dirs
Total backup size: 53.6 GiB
Modified data size: 2.4 GiB
Deduplicated size: 50.8 MiB, 97.9% saved
Compressed size: 8.9 MiB in 2 bundles, 82.4% saved
Chunk count: 2443, avg size: 21.3 KiB
#$> zvault backup /home/dswd/projects ::projects2
info: Using backup projects1 as reference
Date: Thu, 6 Apr 2017 20:46:19 +0200
Source: dswd-desktop:/home/dswd/projects
Duration: 0:00:00.7
Entries: 14626 files, 6046 dirs
Total backup size: 1.4 GiB
Modified data size: 27.2 MiB
Deduplicated size: 17.2 MiB, 36.9% saved
Compressed size: 6.1 MiB in 2 bundles, 64.4% saved
Chunk count: 995, avg size: 17.7 KiB
This is the information on the last backup run for `/home`. The total data in
that backup is 53.6 GiB of which 2.4 GiB have been detected to have changed by
comparing file dates and sizes to the last backup. Of those changed files,
deduplication reduced the data to 50.8 MiB and compression reduced this to
8.9 MiB. The whole backup run took less than 2 minutes.
This time, the backup run took less than a second as zVault skipped most of
the folder because it was unchanged. The backup only stored 6.1 MiB of data.
This shows the true potential of deduplication.
$> zvault info ::system/2017-06-19
Date: Mon, 19 Jun 2017 00:00:01 +0200
Source: desktop:/
Duration: 0:00:46.5
Entries: 435905 files, 56257 dirs
Total backup size: 12.9 GiB
Modified data size: 43.1 MiB
Deduplicated size: 6.8 MiB, 84.2% saved
Compressed size: 1.9 MiB in 2 bundles, 72.3% saved
Chunk count: 497, avg size: 14.0 KiB
The information of the last backup run for `/` looks similar. Out of 12.9 GiB,
deduplication and compression reduced the new data to 1.9 MiB and the backup
took less than one minute.
This data seems representative as other backup runs and other systems yield
similar results.
### Semantic Versioning

14
chunking/Cargo.lock generated
View File

@ -1,14 +0,0 @@
[root]
name = "chunking"
version = "0.1.0"
dependencies = [
"quick-error 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "quick-error"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[metadata]
"checksum quick-error 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0aad603e8d7fb67da22dbdf1f4b826ce8829e406124109e73cf1b2454b93a71c"

View File

@ -1,7 +0,0 @@
[package]
name = "chunking"
version = "0.1.0"
authors = ["Dennis Schwerdel <schwerdel@googlemail.com>"]
[dependencies]
quick-error = "1.1"

View File

@ -1,119 +0,0 @@
use super::*;
use std::ptr;
// FastCDC
// Paper: "FastCDC: a Fast and Efficient Content-Defined Chunking Approach for Data Deduplication"
// Paper-URL: https://www.usenix.org/system/files/conference/atc16/atc16-paper-xia.pdf
// Presentation: https://www.usenix.org/sites/default/files/conference/protected-files/atc16_slides_xia.pdf
// Creating 256 pseudo-random values (based on Knuth's MMIX)
fn create_gear(seed: u64) -> [u64; 256] {
let mut table = [0u64; 256];
let a = 6364136223846793005;
let c = 1442695040888963407;
let mut v = seed;
for t in &mut table.iter_mut() {
v = v.wrapping_mul(a).wrapping_add(c);
*t = v;
}
table
}
fn get_masks(avg_size: usize, nc_level: usize, seed: u64) -> (u64, u64) {
let bits = (avg_size.next_power_of_two() - 1).count_ones();
if bits == 13 {
// From the paper
return (0x0003590703530000, 0x0000d90003530000);
}
let mut mask = 0u64;
let mut v = seed;
let a = 6364136223846793005;
let c = 1442695040888963407;
while mask.count_ones() < bits - nc_level as u32 {
v = v.wrapping_mul(a).wrapping_add(c);
mask = (mask | 1).rotate_left(v as u32 & 0x3f);
}
let mask_long = mask;
while mask.count_ones() < bits + nc_level as u32 {
v = v.wrapping_mul(a).wrapping_add(c);
mask = (mask | 1).rotate_left(v as u32 & 0x3f);
}
let mask_short = mask;
(mask_short, mask_long)
}
pub struct FastCdcChunker {
buffer: [u8; 4096],
buffered: usize,
gear: [u64; 256],
min_size: usize,
max_size: usize,
avg_size: usize,
mask_long: u64,
mask_short: u64,
}
impl FastCdcChunker {
pub fn new(avg_size: usize, seed: u64) -> Self {
let (mask_short, mask_long) = get_masks(avg_size, 2, seed);
FastCdcChunker {
buffer: [0; 4096],
buffered: 0,
gear: create_gear(seed),
min_size: avg_size/4,
max_size: avg_size*8,
avg_size: avg_size,
mask_long: mask_long,
mask_short: mask_short,
}
}
}
impl Chunker for FastCdcChunker {
#[allow(unknown_lints,explicit_counter_loop,needless_range_loop)]
fn chunk(&mut self, r: &mut Read, mut w: &mut Write) -> Result<ChunkerStatus, ChunkerError> {
let mut max;
let mut hash = 0u64;
let mut pos = 0;
let gear = &self.gear;
let buffer = &mut self.buffer;
let min_size = self.min_size;
let mask_short = self.mask_short;
let mask_long = self.mask_long;
let avg_size = self.avg_size;
let max_size = self.max_size;
loop {
// Fill the buffer, there might be some bytes still in there from last chunk
max = try!(r.read(&mut buffer[self.buffered..]).map_err(ChunkerError::Read)) + self.buffered;
// If nothing to do, finish
if max == 0 {
return Ok(ChunkerStatus::Finished)
}
for i in 0..max {
if pos >= min_size {
// Hash update
hash = (hash << 1).wrapping_add(gear[buffer[i] as usize]);
// 3 options for break point
// 1) mask_short matches and chunk is smaller than average
// 2) mask_long matches and chunk is longer or equal to average
// 3) chunk reached max_size
if pos < avg_size && hash & mask_short == 0
|| pos >= avg_size && hash & mask_long == 0
|| pos >= max_size {
// Write all bytes from this chunk out to sink and store rest for next chunk
try!(w.write_all(&buffer[..i+1]).map_err(ChunkerError::Write));
unsafe { ptr::copy(buffer[i+1..].as_ptr(), buffer.as_mut_ptr(), max-i-1) };
self.buffered = max-i-1;
return Ok(ChunkerStatus::Continue);
}
}
pos += 1;
}
try!(w.write_all(&buffer[..max]).map_err(ChunkerError::Write));
self.buffered = 0;
}
}
}

View File

@ -25,6 +25,7 @@ $(PACKAGE)/man/*: ../docs/man/*
$(PACKAGE)/zvault: ../target/release/zvault
cp ../target/release/zvault $(PACKAGE)/zvault
strip -s $(PACKAGE)/zvault
../target/release/zvault: ../src/*.rs ../Cargo.toml
(cd ..; cargo build --release)

View File

@ -1,3 +1,28 @@
zvault (0.4.0) stable; urgency=medium
* [added] Added `copy` subcommand
* [added] Added support for xattrs in fuse mount
* [added] Added support for block/char devices
* [added] Added support for fifo files
* [modified] Reformatted sources using rustfmt
* [modified] Also documenting common flags in subcommands
* [modified] Using repository aliases (**conversion needed**)
* [modified] Remote path must be absolute
* [modified] Not removing prefixes from bundle names anymore
* [fixed] Fixed tarfile import
-- Dennis Schwerdel <schwerdel@informatik.uni-kl.de> Fri, 21 Jul 2017 11:25:32 +0200
zvault (0.3.2) stable; urgency=medium
* [modified] Changed order of arguments in `addkey` to match src-dst scheme
* [modified] Skip root folder on restore
* [fixed] Fixed `addkey` subcommand
* [fixed] Fixed reading tar files from stdin
* [fixed] Fixed exporting files with long names as tar files
-- Dennis Schwerdel <schwerdel@informatik.uni-kl.de> Thu, 11 May 2017 10:52:51 +0200
zvault (0.3.1) stable; urgency=medium
* [added] Derive key pairs from passwords

View File

@ -1,269 +1,396 @@
++ rm -rf repos
++ mkdir repos
++ target/release/zvault init --compression brotli/3 repos/zvault_brotli3
real 0m0.003s
user 0m0.000s
sys 0m0.000s
++ target/release/zvault init --compression brotli/6 repos/zvault_brotli6
real 0m0.004s
user 0m0.000s
sys 0m0.000s
++ target/release/zvault init --compression lzma2/2 repos/zvault_lzma2
real 0m0.004s
user 0m0.000s
sys 0m0.000s
++ mkdir -p repos/remotes/zvault_brotli3 repos/remotes/zvault_brotli6 repos/remotes/zvault_lzma2
+++ pwd
+++ pwd
++ target/release/zvault init --compression brotli/3 --remote /home/dschwerdel/shared/projekte/zvault.rs/repos/remotes/zvault_brotli3 /home/dschwerdel/shared/projekte/zvault.rs/repos/zvault_brotli3
Bundle size: 25.0 MiB
Chunker: fastcdc/16
Compression: brotli/3
Encryption: none
Hash method: blake2
+++ pwd
+++ pwd
++ target/release/zvault init --compression brotli/6 --remote /home/dschwerdel/shared/projekte/zvault.rs/repos/remotes/zvault_brotli6 /home/dschwerdel/shared/projekte/zvault.rs/repos/zvault_brotli6
Bundle size: 25.0 MiB
Chunker: fastcdc/16
Compression: brotli/6
Encryption: none
Hash method: blake2
+++ pwd
+++ pwd
++ target/release/zvault init --compression lzma2/2 --remote /home/dschwerdel/shared/projekte/zvault.rs/repos/remotes/zvault_lzma2 /home/dschwerdel/shared/projekte/zvault.rs/repos/zvault_lzma2
Bundle size: 25.0 MiB
Chunker: fastcdc/16
Compression: lzma/2
Encryption: none
Hash method: blake2
++ attic init repos/attic
Initializing repository at "repos/attic"
Encryption NOT enabled.
Use the "--encryption=passphrase|keyfile" to enable encryption.
Initializing cache...
real 0m0.147s
user 0m0.116s
sys 0m0.012s
++ borg init -e none repos/borg
real 0m0.403s
user 0m0.336s
sys 0m0.048s
++ borg init -e none repos/borg-zlib
real 0m0.338s
user 0m0.292s
sys 0m0.024s
++ zbackup init --non-encrypted repos/zbackup
++ find test_data/silesia -type f
++ xargs cat
+++ pwd
++ target/release/zvault backup test_data/silesia /home/dschwerdel/shared/projekte/zvault.rs/repos/zvault_brotli3::silesia1
info: No reference backup found, doing a full scan instead
info: Backup finished
Date: Sun, 4 Mar 2018 16:44:37 +0100
Source: lap-it-032:test_data/silesia
Duration: 0:00:04.0
Entries: 12 files, 1 dirs
Total backup size: 202.3 MiB
Modified data size: 202.3 MiB
Deduplicated size: 202.3 MiB, -0.0%
Compressed size: 64.5 MiB in 4 bundles, -68.1%
Chunk count: 11017, avg size: 18.8 KiB
real 0m0.009s
user 0m0.000s
sys 0m0.000s
++ cat
++ target/release/zvault backup repos/zvault_brotli3::silesia1 test_data/silesia.tar
WARN - Partial backups are not implemented yet, creating full backup
real 0m4.049s
user 0m3.714s
sys 0m0.504s
+++ pwd
++ target/release/zvault backup test_data/silesia /home/dschwerdel/shared/projekte/zvault.rs/repos/zvault_brotli3::silesia2
info: Using backup silesia1 as reference
info: Backup finished
Date: Sun, 4 Mar 2018 16:44:41 +0100
Source: lap-it-032:test_data/silesia
Duration: 0:00:00.0
Entries: 12 files, 1 dirs
Total backup size: 202.3 MiB
Modified data size: 0 Byte
Deduplicated size: 0 Byte, NaN%
Compressed size: 0 Byte in 0 bundles, NaN%
Chunk count: 0, avg size: 0 Byte
real 0m6.034s
user 0m5.508s
sys 0m0.424s
++ target/release/zvault backup repos/zvault_brotli3::silesia2 test_data/silesia.tar
WARN - Partial backups are not implemented yet, creating full backup
real 0m0.009s
user 0m0.004s
sys 0m0.004s
+++ pwd
++ target/release/zvault backup test_data/silesia /home/dschwerdel/shared/projekte/zvault.rs/repos/zvault_brotli6::silesia1
info: No reference backup found, doing a full scan instead
info: Backup finished
Date: Sun, 4 Mar 2018 16:44:41 +0100
Source: lap-it-032:test_data/silesia
Duration: 0:00:16.1
Entries: 12 files, 1 dirs
Total backup size: 202.3 MiB
Modified data size: 202.3 MiB
Deduplicated size: 202.3 MiB, -0.0%
Compressed size: 56.9 MiB in 4 bundles, -71.9%
Chunk count: 11017, avg size: 18.8 KiB
real 0m1.425s
user 0m1.348s
sys 0m0.076s
++ target/release/zvault backup repos/zvault_brotli6::silesia1 test_data/silesia.tar
WARN - Partial backups are not implemented yet, creating full backup
real 0m16.100s
user 0m15.441s
sys 0m0.833s
+++ pwd
++ target/release/zvault backup test_data/silesia /home/dschwerdel/shared/projekte/zvault.rs/repos/zvault_brotli6::silesia2
info: Using backup silesia1 as reference
info: Backup finished
Date: Sun, 4 Mar 2018 16:44:57 +0100
Source: lap-it-032:test_data/silesia
Duration: 0:00:00.0
Entries: 12 files, 1 dirs
Total backup size: 202.3 MiB
Modified data size: 0 Byte
Deduplicated size: 0 Byte, NaN%
Compressed size: 0 Byte in 0 bundles, NaN%
Chunk count: 0, avg size: 0 Byte
real 0m23.035s
user 0m22.156s
sys 0m0.692s
++ target/release/zvault backup repos/zvault_brotli6::silesia2 test_data/silesia.tar
WARN - Partial backups are not implemented yet, creating full backup
real 0m0.008s
user 0m0.000s
sys 0m0.008s
+++ pwd
++ target/release/zvault backup test_data/silesia /home/dschwerdel/shared/projekte/zvault.rs/repos/zvault_lzma2::silesia1
info: No reference backup found, doing a full scan instead
info: Backup finished
Date: Sun, 4 Mar 2018 16:44:57 +0100
Source: lap-it-032:test_data/silesia
Duration: 0:00:45.1
Entries: 12 files, 1 dirs
Total backup size: 202.3 MiB
Modified data size: 202.3 MiB
Deduplicated size: 202.3 MiB, -0.0%
Compressed size: 53.9 MiB in 4 bundles, -73.3%
Chunk count: 11017, avg size: 18.8 KiB
real 0m1.150s
user 0m1.120s
sys 0m0.024s
++ target/release/zvault backup repos/zvault_lzma2::silesia1 test_data/silesia.tar
WARN - Partial backups are not implemented yet, creating full backup
real 0m45.068s
user 0m44.571s
sys 0m0.628s
+++ pwd
++ target/release/zvault backup test_data/silesia /home/dschwerdel/shared/projekte/zvault.rs/repos/zvault_lzma2::silesia2
info: Using backup silesia1 as reference
info: Backup finished
Date: Sun, 4 Mar 2018 16:45:42 +0100
Source: lap-it-032:test_data/silesia
Duration: 0:00:00.0
Entries: 12 files, 1 dirs
Total backup size: 202.3 MiB
Modified data size: 0 Byte
Deduplicated size: 0 Byte, NaN%
Compressed size: 0 Byte in 0 bundles, NaN%
Chunk count: 0, avg size: 0 Byte
real 0m54.011s
user 0m53.044s
sys 0m0.728s
++ target/release/zvault backup repos/zvault_lzma2::silesia2 test_data/silesia.tar
WARN - Partial backups are not implemented yet, creating full backup
real 0m0.030s
user 0m0.019s
sys 0m0.011s
++ attic create repos/attic::silesia1 test_data/silesia
real 0m1.157s
user 0m1.108s
sys 0m0.040s
++ attic create repos/attic::silesia1 test_data/silesia.tar
real 0m12.686s
user 0m11.810s
sys 0m0.373s
++ attic create repos/attic::silesia2 test_data/silesia
real 0m13.427s
user 0m12.256s
sys 0m0.476s
++ attic create repos/attic::silesia2 test_data/silesia.tar
real 0m0.265s
user 0m0.185s
sys 0m0.047s
++ borg create -C none repos/borg::silesia1 test_data/silesia
real 0m1.930s
user 0m1.804s
sys 0m0.092s
++ borg create -C none repos/borg::silesia1 test_data/silesia.tar
real 0m4.206s
user 0m2.139s
sys 0m0.870s
++ borg create -C none repos/borg::silesia2 test_data/silesia
real 0m5.246s
user 0m2.516s
sys 0m1.132s
++ borg create -C none repos/borg::silesia2 test_data/silesia.tar
real 0m0.455s
user 0m0.357s
sys 0m0.071s
++ borg create -C zlib repos/borg-zlib::silesia1 test_data/silesia
real 0m3.029s
user 0m2.408s
sys 0m0.428s
++ borg create -C zlib repos/borg-zlib::silesia1 test_data/silesia.tar
real 0m13.184s
user 0m12.293s
sys 0m0.500s
++ borg create -C zlib repos/borg-zlib::silesia2 test_data/silesia
real 0m14.833s
user 0m13.524s
sys 0m0.692s
++ borg create -C zlib repos/borg-zlib::silesia2 test_data/silesia.tar
real 0m2.413s
user 0m1.996s
sys 0m0.368s
real 0m0.416s
user 0m0.335s
sys 0m0.059s
++ tar -c test_data/silesia
++ zbackup backup --non-encrypted repos/zbackup/backups/silesia1
Loading index...
Index loaded.
Using up to 4 thread(s) for compression
real 0m52.613s
user 3m12.460s
sys 0m2.568s
real 0m52.286s
user 2m52.262s
sys 0m3.453s
++ tar -c test_data/silesia
++ zbackup backup --non-encrypted repos/zbackup/backups/silesia2
Loading index...
Loading index file 1e374b3c9ce07b4d9ad4238e35e5834c07d3a4ca984bb842...
Loading index file 6ff054dcc4af8c472a5fbd661a8f61409e44a4fafc287d4d...
Index loaded.
Using up to 4 thread(s) for compression
real 0m2.141s
user 0m2.072s
sys 0m0.064s
real 0m1.983s
user 0m1.844s
sys 0m0.315s
++ du -h test_data/silesia.tar
203M test_data/silesia.tar
++ du -sh repos/zvault_brotli3/bundles repos/zvault_brotli6/bundles repos/zvault_lzma2/bundles repos/attic repos/borg repos/borg-zlib repos/zbackup
66M repos/zvault_brotli3/bundles
58M repos/zvault_brotli6/bundles
55M repos/zvault_lzma2/bundles
68M repos/attic
203M repos/borg
66M repos/borg-zlib
52M repos/zbackup
203M test_data/silesia.tar
++ du -sh repos/remotes/zvault_brotli3 repos/remotes/zvault_brotli6 repos/remotes/zvault_lzma2 repos/attic repos/borg repos/borg-zlib repos/zbackup
65M repos/remotes/zvault_brotli3
58M repos/remotes/zvault_brotli6
55M repos/remotes/zvault_lzma2
68M repos/attic
203M repos/borg
66M repos/borg-zlib
52M repos/zbackup
++ rm -rf repos
++ mkdir repos
++ target/release/zvault init --compression brotli/3 repos/zvault_brotli3
real 0m0.004s
user 0m0.000s
sys 0m0.000s
++ target/release/zvault init --compression brotli/6 repos/zvault_brotli6
real 0m0.003s
user 0m0.000s
sys 0m0.000s
++ target/release/zvault init --compression lzma2/2 repos/zvault_lzma2
real 0m0.003s
user 0m0.000s
sys 0m0.000s
++ mkdir -p repos/remotes/zvault_brotli3 repos/remotes/zvault_brotli6 repos/remotes/zvault_lzma2
+++ pwd
+++ pwd
++ target/release/zvault init --compression brotli/3 --remote /home/dschwerdel/shared/projekte/zvault.rs/repos/remotes/zvault_brotli3 /home/dschwerdel/shared/projekte/zvault.rs/repos/zvault_brotli3
Bundle size: 25.0 MiB
Chunker: fastcdc/16
Compression: brotli/3
Encryption: none
Hash method: blake2
+++ pwd
+++ pwd
++ target/release/zvault init --compression brotli/6 --remote /home/dschwerdel/shared/projekte/zvault.rs/repos/remotes/zvault_brotli6 /home/dschwerdel/shared/projekte/zvault.rs/repos/zvault_brotli6
Bundle size: 25.0 MiB
Chunker: fastcdc/16
Compression: brotli/6
Encryption: none
Hash method: blake2
+++ pwd
+++ pwd
++ target/release/zvault init --compression lzma2/2 --remote /home/dschwerdel/shared/projekte/zvault.rs/repos/remotes/zvault_lzma2 /home/dschwerdel/shared/projekte/zvault.rs/repos/zvault_lzma2
Bundle size: 25.0 MiB
Chunker: fastcdc/16
Compression: lzma/2
Encryption: none
Hash method: blake2
++ attic init repos/attic
Initializing repository at "repos/attic"
Encryption NOT enabled.
Use the "--encryption=passphrase|keyfile" to enable encryption.
Initializing cache...
real 0m0.169s
user 0m0.136s
sys 0m0.012s
++ borg init -e none repos/borg
real 0m0.364s
user 0m0.320s
sys 0m0.020s
++ borg init -e none repos/borg-zlib
real 0m0.393s
user 0m0.352s
sys 0m0.020s
++ zbackup init --non-encrypted repos/zbackup
++ find test_data/ubuntu -type f
++ xargs cat
+++ pwd
++ target/release/zvault backup test_data/ubuntu /home/dschwerdel/shared/projekte/zvault.rs/repos/zvault_brotli3::ubuntu1
info: No reference backup found, doing a full scan instead
info: Backup finished
Date: Sun, 4 Mar 2018 16:47:09 +0100
Source: lap-it-032:test_data/ubuntu
Duration: 0:00:02.0
Entries: 4418 files, 670 dirs
Total backup size: 83.2 MiB
Modified data size: 83.2 MiB
Deduplicated size: 74.7 MiB, -10.2%
Compressed size: 29.6 MiB in 3 bundles, -60.3%
Chunk count: 12038, avg size: 6.4 KiB
real 0m0.003s
user 0m0.000s
sys 0m0.000s
++ cat
++ target/release/zvault backup repos/zvault_brotli3::ubuntu1 test_data/ubuntu.tar
WARN - Partial backups are not implemented yet, creating full backup
real 0m2.009s
user 0m1.718s
sys 0m0.369s
+++ pwd
++ target/release/zvault backup test_data/ubuntu /home/dschwerdel/shared/projekte/zvault.rs/repos/zvault_brotli3::ubuntu2
info: Using backup ubuntu1 as reference
info: Backup finished
Date: Sun, 4 Mar 2018 16:47:11 +0100
Source: lap-it-032:test_data/ubuntu
Duration: 0:00:00.1
Entries: 4418 files, 670 dirs
Total backup size: 83.2 MiB
Modified data size: 0 Byte
Deduplicated size: 0 Byte, NaN%
Compressed size: 0 Byte in 0 bundles, NaN%
Chunk count: 0, avg size: 0 Byte
real 0m5.496s
user 0m5.000s
sys 0m0.492s
++ target/release/zvault backup repos/zvault_brotli3::ubuntu2 test_data/ubuntu.tar
WARN - Partial backups are not implemented yet, creating full backup
real 0m0.112s
user 0m0.032s
sys 0m0.079s
+++ pwd
++ target/release/zvault backup test_data/ubuntu /home/dschwerdel/shared/projekte/zvault.rs/repos/zvault_brotli6::ubuntu1
info: No reference backup found, doing a full scan instead
info: Backup finished
Date: Sun, 4 Mar 2018 16:47:11 +0100
Source: lap-it-032:test_data/ubuntu
Duration: 0:00:07.6
Entries: 4418 files, 670 dirs
Total backup size: 83.2 MiB
Modified data size: 83.2 MiB
Deduplicated size: 74.7 MiB, -10.2%
Compressed size: 24.1 MiB in 2 bundles, -67.7%
Chunk count: 12038, avg size: 6.4 KiB
real 0m1.156s
user 0m1.104s
sys 0m0.048s
++ target/release/zvault backup repos/zvault_brotli6::ubuntu1 test_data/ubuntu.tar
WARN - Partial backups are not implemented yet, creating full backup
real 0m7.572s
user 0m7.156s
sys 0m0.424s
+++ pwd
++ target/release/zvault backup test_data/ubuntu /home/dschwerdel/shared/projekte/zvault.rs/repos/zvault_brotli6::ubuntu2
info: Using backup ubuntu1 as reference
info: Backup finished
Date: Sun, 4 Mar 2018 16:47:19 +0100
Source: lap-it-032:test_data/ubuntu
Duration: 0:00:00.1
Entries: 4418 files, 670 dirs
Total backup size: 83.2 MiB
Modified data size: 0 Byte
Deduplicated size: 0 Byte, NaN%
Compressed size: 0 Byte in 0 bundles, NaN%
Chunk count: 0, avg size: 0 Byte
real 0m21.012s
user 0m20.524s
sys 0m0.464s
++ target/release/zvault backup repos/zvault_brotli6::ubuntu2 test_data/ubuntu.tar
WARN - Partial backups are not implemented yet, creating full backup
real 0m0.127s
user 0m0.058s
sys 0m0.065s
+++ pwd
++ target/release/zvault backup test_data/ubuntu /home/dschwerdel/shared/projekte/zvault.rs/repos/zvault_lzma2::ubuntu1
info: No reference backup found, doing a full scan instead
info: Backup finished
Date: Sun, 4 Mar 2018 16:47:19 +0100
Source: lap-it-032:test_data/ubuntu
Duration: 0:00:17.6
Entries: 4418 files, 670 dirs
Total backup size: 83.2 MiB
Modified data size: 83.2 MiB
Deduplicated size: 74.7 MiB, -10.2%
Compressed size: 21.6 MiB in 2 bundles, -71.1%
Chunk count: 12038, avg size: 6.4 KiB
real 0m0.999s
user 0m0.964s
sys 0m0.032s
++ target/release/zvault backup repos/zvault_lzma2::ubuntu1 test_data/ubuntu.tar
WARN - Partial backups are not implemented yet, creating full backup
real 0m17.619s
user 0m17.223s
sys 0m0.376s
+++ pwd
++ target/release/zvault backup test_data/ubuntu /home/dschwerdel/shared/projekte/zvault.rs/repos/zvault_lzma2::ubuntu2
info: Using backup ubuntu1 as reference
info: Backup finished
Date: Sun, 4 Mar 2018 16:47:37 +0100
Source: lap-it-032:test_data/ubuntu
Duration: 0:00:00.1
Entries: 4418 files, 670 dirs
Total backup size: 83.2 MiB
Modified data size: 0 Byte
Deduplicated size: 0 Byte, NaN%
Compressed size: 0 Byte in 0 bundles, NaN%
Chunk count: 0, avg size: 0 Byte
real 0m55.683s
user 0m54.992s
sys 0m0.656s
++ target/release/zvault backup repos/zvault_lzma2::ubuntu2 test_data/ubuntu.tar
WARN - Partial backups are not implemented yet, creating full backup
real 0m0.136s
user 0m0.080s
sys 0m0.056s
++ attic create repos/attic::ubuntu1 test_data/ubuntu
real 0m0.995s
user 0m0.968s
sys 0m0.024s
++ attic create repos/attic::ubuntu1 test_data/ubuntu.tar
real 0m6.915s
user 0m6.175s
sys 0m0.503s
++ attic create repos/attic::ubuntu2 test_data/ubuntu
real 0m13.093s
user 0m11.880s
sys 0m0.512s
++ attic create repos/attic::ubuntu2 test_data/ubuntu.tar
real 0m0.554s
user 0m0.416s
sys 0m0.107s
++ borg create -C none repos/borg::ubuntu1 test_data/ubuntu
real 0m1.722s
user 0m1.620s
sys 0m0.072s
++ borg create -C none repos/borg::ubuntu1 test_data/ubuntu.tar
real 0m3.047s
user 0m1.872s
sys 0m0.576s
++ borg create -C none repos/borg::ubuntu2 test_data/ubuntu
real 0m4.551s
user 0m2.120s
sys 0m1.012s
++ borg create -C none repos/borg::ubuntu2 test_data/ubuntu.tar
real 0m0.929s
user 0m0.695s
sys 0m0.175s
++ borg create -C zlib repos/borg-zlib::ubuntu1 test_data/ubuntu
real 0m2.403s
user 0m1.996s
sys 0m0.308s
++ borg create -C zlib repos/borg-zlib::ubuntu1 test_data/ubuntu.tar
real 0m7.859s
user 0m7.100s
sys 0m0.484s
++ borg create -C zlib repos/borg-zlib::ubuntu2 test_data/ubuntu
real 0m14.114s
user 0m12.768s
sys 0m0.648s
++ borg create -C zlib repos/borg-zlib::ubuntu2 test_data/ubuntu.tar
real 0m2.091s
user 0m1.780s
sys 0m0.280s
real 0m0.955s
user 0m0.720s
sys 0m0.183s
++ tar -c test_data/ubuntu
++ zbackup backup --non-encrypted repos/zbackup/backups/ubuntu1
Loading index...
Index loaded.
Using up to 4 thread(s) for compression
real 0m38.218s
user 2m21.564s
sys 0m3.832s
real 0m17.229s
user 0m58.868s
sys 0m1.395s
++ zbackup backup --non-encrypted repos/zbackup/backups/ubuntu2
++ tar -c test_data/ubuntu
Loading index...
Loading index file 4f106a9d29c26e4132ae67e9528e1ed6f8579fe6ee6fd671...
Loading index file 6429a26e69a74bb1ae139efc7fb1446881a15d3c4170c9b5...
Index loaded.
Using up to 4 thread(s) for compression
real 0m1.755s
user 0m1.728s
sys 0m0.024s
real 0m1.033s
user 0m0.856s
sys 0m0.177s
++ du -h test_data/ubuntu.tar
176M test_data/ubuntu.tar
++ du -sh repos/zvault_brotli3/bundles repos/zvault_brotli6/bundles repos/zvault_lzma2/bundles repos/attic repos/borg repos/borg-zlib repos/zbackup
77M repos/zvault_brotli3/bundles
68M repos/zvault_brotli6/bundles
63M repos/zvault_lzma2/bundles
84M repos/attic
176M repos/borg
83M repos/borg-zlib
64M repos/zbackup
98M test_data/ubuntu.tar
++ du -sh repos/remotes/zvault_brotli3 repos/remotes/zvault_brotli6 repos/remotes/zvault_lzma2 repos/attic repos/borg repos/borg-zlib repos/zbackup
30M repos/remotes/zvault_brotli3
25M repos/remotes/zvault_brotli6
22M repos/remotes/zvault_lzma2
35M repos/attic
83M repos/borg
36M repos/borg-zlib
24M repos/zbackup

25
docs/comparison.md Normal file
View File

@ -0,0 +1,25 @@
## Silesia corpus
| Tool | 1st run | 2nd run | Repo Size |
| -------------- | -------:| -------:| ---------:|
| zvault/brotli3 | 4.0s | 0.0s | 65 MiB |
| zvault/brotli6 | 16.1s | 0.0s | 58 MiB |
| zvault/lzma2 | 45.1s | 0.0s | 55 MiB |
| attic | 12.7s | 0.3s | 68 MiB |
| borg | 4.2s | 0.5s | 203 MiB |
| borg/zlib | 13.2s | 0.4s | 66 MiB |
| zbackup | 52.3s | 2.0s | 52 MiB |
## Ubuntu 16.04 docker image
| Tool | 1st run | 2nd run | Repo Size |
| -------------- | -------:| -------:| ---------:|
| zvault/brotli3 | 2.0s | 0.1s | 30 MiB |
| zvault/brotli6 | 7.6s | 0.1s | 25 MiB |
| zvault/lzma2 | 17.6s | 0.1s | 22 MiB |
| attic | 6.9s | 0.6s | 35 MiB |
| borg | 3.0s | 0.9s | 83 MiB |
| borg/zlib | 7.9s | 1.0s | 36 MiB |
| zbackup | 17.2s | 1.0s | 24 MiB |

View File

@ -1,8 +1,8 @@
# Mounted locations and pseudo filesystems
/cdrom
/dev
lost+found
/mnt
/dev
/sys
/proc
/run

BIN
docs/logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

View File

@ -3,7 +3,7 @@ zvault-addkey(1) -- Add a key pair to the repository
## SYNOPSIS
`zvault addkey [OPTIONS] <REPO> [FILE]`
`zvault addkey [OPTIONS] [FILE] <REPO>`
## DESCRIPTION
@ -14,7 +14,8 @@ If `FILE` is given, the key pair is read from the file and added to the
repository.
If `--generate` is set, a new key pair is generated, printed to console and
added to the repository.
added to the repository. If `--password` is also set, the key pair will be
derived from the given password instead of creating a random one.
If `--default` is set, encryption will be enabled (if not already) and the new
key will be set as default encryption key.
@ -22,27 +23,44 @@ key will be set as default encryption key.
## OPTIONS
* `-g`, `--generate`:
* `-g`, `--generate`:
Generate a new key pair
Generate a new key pair
* `-d`, `--default`:
* `-d`, `--default`:
Set the key pair as default
Set the key pair as default
* `-p`, `--password <PASSWORD>`:
* `-p`, `--password <PASSWORD>`:
Derive the key pair from the given password instead of randomly creating it.
Derive the key pair from the given password instead of randomly creating it.
This setting requires that `--generate` is set too.
* `-h`, `--help`:
Prints help information
* `-q`, `--quiet`:
Print less information
* `-v`, `--verbose`:
Print more information
* `-h`, `--help`:
Prints help information
* `-V`, `--version`:
Prints version information
## COPYRIGHT
Copyright (C) 2017 Dennis Schwerdel
Copyright (C) 2017-2018 Dennis Schwerdel
This software is licensed under GPL-3 or newer (see LICENSE.md)

View File

@ -62,12 +62,27 @@ The options are exactly the same as for _zvault-init(1)_.
values.
* `-q`, `--quiet`:
Print less information
* `-v`, `--verbose`:
Print more information
* `-h`, `--help`:
Prints help information
* `-V`, `--version`:
Prints version information
## COPYRIGHT
Copyright (C) 2017 Dennis Schwerdel
Copyright (C) 2017-2018 Dennis Schwerdel
This software is licensed under GPL-3 or newer (see LICENSE.md)

View File

@ -18,12 +18,27 @@ running _zvault-vacuum(1)_ with different ratios.
## OPTIONS
* `-h`, `--help`:
* `-q`, `--quiet`:
Prints help information
Print less information
* `-v`, `--verbose`:
Print more information
* `-h`, `--help`:
Prints help information
* `-V`, `--version`:
Prints version information
## COPYRIGHT
Copyright (C) 2017 Dennis Schwerdel
Copyright (C) 2017-2018 Dennis Schwerdel
This software is licensed under GPL-3 or newer (see LICENSE.md)

View File

@ -45,77 +45,92 @@ restore and access times change by reading files.
## OPTIONS
* `-e`, `--exclude <PATTERN>...`:
* `-e`, `--exclude <PATTERN>...`:
Exclude this path or file pattern. This option can be given multiple times.
Please see *EXCLUDE PATTERNS* for details on pattern.
Exclude this path or file pattern. This option can be given multiple times.
Please see *EXCLUDE PATTERNS* for details on pattern.
This option conflicts with `--tar`.
This option conflicts with `--tar`.
* `--excludes-from <FILE>`:
* `--excludes-from <FILE>`:
Read the list of excludes from this file.
Please see *EXCLUDE PATTERNS* for details on pattern.
Read the list of excludes from this file.
Please see *EXCLUDE PATTERNS* for details on pattern.
This option conflicts with `--tar`.
This option conflicts with `--tar`.
* `--full`:
* `--full`:
Create a full backup without using another backup as a reference. This makes
sure that all files in the source path (except excluded files) are fully
read. The file contents will still be deduplicated by using existing backups
but all files are read fully.
Create a full backup without using another backup as a reference. This makes
sure that all files in the source path (except excluded files) are fully
read. The file contents will still be deduplicated by using existing backups
but all files are read fully.
This option conflicts with `--ref`.
This option conflicts with `--ref`.
* `--no-default-excludes`:
* `--no-default-excludes`:
Do not load the default `excludes` file from the repository folder.
Those excludes are pre-filled with generic pattern to exclude like pseudo
filesystems or cache folders.
Do not load the default `excludes` file from the repository folder.
Those excludes are pre-filled with generic pattern to exclude like pseudo
filesystems or cache folders.
* `--ref <REF>`:
* `--ref <REF>`:
Base the new backup on this reference backup instead of automatically
selecting a matching one. The backup given as `REF` must be a valid backup
name as listed by zvault-list(1).
Base the new backup on this reference backup instead of automatically
selecting a matching one. The backup given as `REF` must be a valid backup
name as listed by zvault-list(1).
This option conflicts with `--full`.
This option conflicts with `--full`.
* `--tar`:
* `--tar`:
Read the source data from a tar archive instead of the filesystem. When this
flag is set, the `SRC` path must specify a valid tar file.
The contents of the archive are then read instead of the filesystem. Note
that the tar file contents are read as files and directories and not just
as a single file (this would happen when `SRC` is a tar file and `--tar` is
not set).
Read the source data from a tar archive instead of the filesystem. When this
flag is set, the `SRC` path must specify a valid tar file.
The contents of the archive are then read instead of the filesystem. Note
that the tar file contents are read as files and directories and not just
as a single file (this would happen when `SRC` is a tar file and `--tar` is
not set).
This option can be used to import a backup that has been exported using
zvault-restore(1) with the `--tar` flag.
This option can be used to import a backup that has been exported using
zvault-restore(1) with the `--tar` flag.
This flag conflicts with `--exclude` and `--excludes_from`.
This flag conflicts with `--exclude` and `--excludes_from`.
* `-x`, `--xdev`:
* `-x`, `--xdev`:
Allow to cross filesystem boundaries. By default, paths on different
filesystems than the start path will be ignored. If this flag is set,
the scan will traverse also into mounted filesystems.
**Note:** Please use this option with case. Some pseudo filesystems
contain arbitrarily deep nested directories that will send zVault into
an infinite loop. Also it should be avoided to include the remote storage
in the backup.
Allow to cross filesystem boundaries. By default, paths on different
filesystems than the start path will be ignored. If this flag is set,
the scan will traverse also into mounted filesystems.
**Note:** Please use this option with case. Some pseudo filesystems
contain arbitrarily deep nested directories that will send zVault into
an infinite loop. Also it should be avoided to include the remote storage
in the backup.
* `-h`, `--help`:
* `-q`, `--quiet`:
Prints help information
Print less information
* `-v`, `--verbose`:
Print more information
* `-h`, `--help`:
Prints help information
* `-V`, `--version`:
Prints version information
## EXCLUDE PATTERNS
@ -146,5 +161,5 @@ the case of directories) will be left out of the backup.
## COPYRIGHT
Copyright (C) 2017 Dennis Schwerdel
Copyright (C) 2017-2018 Dennis Schwerdel
This software is licensed under GPL-3 or newer (see LICENSE.md)

View File

@ -19,12 +19,27 @@ names on the remote storage that do not relate to the bundle id.
## OPTIONS
* `-h`, `--help`:
* `-q`, `--quiet`:
Prints help information
Print less information
* `-v`, `--verbose`:
Print more information
* `-h`, `--help`:
Prints help information
* `-V`, `--version`:
Prints version information
## COPYRIGHT
Copyright (C) 2017 Dennis Schwerdel
Copyright (C) 2017-2018 Dennis Schwerdel
This software is licensed under GPL-3 or newer (see LICENSE.md)

View File

@ -16,12 +16,27 @@ given its bundle id.
## OPTIONS
* `-h`, `--help`:
* `-q`, `--quiet`:
Prints help information
Print less information
* `-v`, `--verbose`:
Print more information
* `-h`, `--help`:
Prints help information
* `-V`, `--version`:
Prints version information
## COPYRIGHT
Copyright (C) 2017 Dennis Schwerdel
Copyright (C) 2017-2018 Dennis Schwerdel
This software is licensed under GPL-3 or newer (see LICENSE.md)

View File

@ -62,33 +62,48 @@ has become inaccessible.
## OPTIONS
* `-b`, `--bundles`:
* `-b`, `--bundles`:
Check the integrity of the bundles too.
* `--bundle-data`:
* `--bundle-data`:
Also check the contents of the bundles by fetching and decompressing them.
Note: This flag causes the check to be much slower.
Also check the contents of the bundles by fetching and decompressing them.
Note: This flag causes the check to be much slower.
* `-i`, `--index`:
* `-i`, `--index`:
Also check the integrity of the index and its contents.
Also check the integrity of the index and its contents.
* `-r`, `--repair`:
* `-r`, `--repair`:
Try to repair broken bundles, backups and rebuild local data when necessary.
Try to repair broken bundles, backups and rebuild local data when necessary.
* `-h`, `--help`:
* `-q`, `--quiet`:
Prints help information
Print less information
* `-v`, `--verbose`:
Print more information
* `-h`, `--help`:
Prints help information
* `-V`, `--version`:
Prints version information
## COPYRIGHT
Copyright (C) 2017 Dennis Schwerdel
Copyright (C) 2017-2018 Dennis Schwerdel
This software is licensed under GPL-3 or newer (see LICENSE.md)

View File

@ -25,53 +25,68 @@ data and can be changed at any time without any drawback.
## OPTIONS
* `--bundle-size <SIZE>`:
* `--bundle-size <SIZE>`:
Set the target bundle size in MiB (default: 25).
Please see _zvault(1)_ for more information on *bundle size*.
Set the target bundle size in MiB (default: 25).
Please see _zvault(1)_ for more information on *bundle size*.
* `--chunker <CHUNKER>`:
* `--chunker <CHUNKER>`:
Set the chunker algorithm and target chunk size (default: fastcdc/16).
Please see _zvault(1)_ for more information on *chunkers* and possible
values.
Set the chunker algorithm and target chunk size (default: fastcdc/16).
Please see _zvault(1)_ for more information on *chunkers* and possible
values.
* `-c`, `--compression <COMPRESSION>`:
* `-c`, `--compression <COMPRESSION>`:
Set the compression method and level (default: brotli/3).
Please see _zvault(1)_ for more information on *compression* and possible
values.
Set the compression method and level (default: brotli/3).
Please see _zvault(1)_ for more information on *compression* and possible
values.
* `-e`, `--encryption <PUBLIC_KEY>`:
* `-e`, `--encryption <PUBLIC_KEY>`:
Use the given public key for encryption. The key must be a valid public key
encoded as hexadecimal. Please use _zvault-genkey(1)_ to generate keys and
_zvault-addkey(1)_ to add keys to the repository.
Use the given public key for encryption. The key must be a valid public key
encoded as hexadecimal. Please use _zvault-genkey(1)_ to generate keys and
_zvault-addkey(1)_ to add keys to the repository.
If `none` is given as public key, encryption is deactivated.
If `none` is given as public key, encryption is deactivated.
**Warning:** ZVault does not verify that the matching secret key which is
needed for decryption is known.
**Warning:** ZVault does not verify that the matching secret key which is
needed for decryption is known.
Please see _zvault(1)_ for more information on *encryption*.
Please see _zvault(1)_ for more information on *encryption*.
* `--hash <HASH>`:
* `--hash <HASH>`:
Set the hash method (default: blake2).
Please see _zvault(1)_ for more information on *hash methods* and possible
values.
Set the hash method (default: blake2).
Please see _zvault(1)_ for more information on *hash methods* and possible
values.
* `-h`, `--help`:
* `-q`, `--quiet`:
Prints help information
Print less information
* `-v`, `--verbose`:
Print more information
* `-h`, `--help`:
Prints help information
* `-V`, `--version`:
Prints version information
## COPYRIGHT
Copyright (C) 2017 Dennis Schwerdel
Copyright (C) 2017-2018 Dennis Schwerdel
This software is licensed under GPL-3 or newer (see LICENSE.md)

43
docs/man/zvault-copy.1.md Normal file
View File

@ -0,0 +1,43 @@
zvault-copy(1) -- Create a copy of a backup
===========================================
## SYNOPSIS
`zvault copy [OPTIONS] <SRC> <DST>`
## DESCRIPTION
This subcommand copies the backup `SRC` to a new name `DST`.
The backups given by `SRC` and `DST` must be in the format
`[repository]::backup_name[::subtree]` as described in _zvault(1)_.
If `repository` is omitted, the default repository location is used instead.
## OPTIONS
* `-q`, `--quiet`:
Print less information
* `-v`, `--verbose`:
Print more information
* `-h`, `--help`:
Prints help information
* `-V`, `--version`:
Prints version information
## COPYRIGHT
Copyright (C) 2017-2018 Dennis Schwerdel
This software is licensed under GPL-3 or newer (see LICENSE.md)

View File

@ -25,12 +25,27 @@ modified (_mod_).
## OPTIONS
* `-h`, `--help`:
* `-q`, `--quiet`:
Prints help information
Print less information
* `-v`, `--verbose`:
Print more information
* `-h`, `--help`:
Prints help information
* `-V`, `--version`:
Prints version information
## COPYRIGHT
Copyright (C) 2017 Dennis Schwerdel
Copyright (C) 2017-2018 Dennis Schwerdel
This software is licensed under GPL-3 or newer (see LICENSE.md)

View File

@ -14,16 +14,32 @@ writes it to the given file `FILE`.
## OPTIONS
* `-p`, `--password <PASSWORD>`:
* `-p`, `--password <PASSWORD>`:
Derive the key pair from the given password instead of randomly creating it.
Derive the key pair from the given password instead of randomly creating it.
* `-h`, `--help`:
Prints help information
* `-q`, `--quiet`:
Print less information
* `-v`, `--verbose`:
Print more information
* `-h`, `--help`:
Prints help information
* `-V`, `--version`:
Prints version information
## COPYRIGHT
Copyright (C) 2017 Dennis Schwerdel
Copyright (C) 2017-2018 Dennis Schwerdel
This software is licensed under GPL-3 or newer (see LICENSE.md)

View File

@ -25,20 +25,35 @@ imported via _zvault-backup(1)_ also with the `--tar` flag.
## OPTIONS
* `-k`, `--key <FILE>...`:
* `-k`, `--key <FILE>...`:
Add the key pair in the given file to the repository before importing the
remote bundles. This option can be used to add keys that are needed to read
the bundles. If multiple keys are needed, this options can be given multiple
times.
Add the key pair in the given file to the repository before importing the
remote bundles. This option can be used to add keys that are needed to read
the bundles. If multiple keys are needed, this options can be given multiple
times.
* `-h`, `--help`:
* `-q`, `--quiet`:
Prints help information
Print less information
* `-v`, `--verbose`:
Print more information
* `-h`, `--help`:
Prints help information
* `-V`, `--version`:
Prints version information
## COPYRIGHT
Copyright (C) 2017 Dennis Schwerdel
Copyright (C) 2017-2018 Dennis Schwerdel
This software is licensed under GPL-3 or newer (see LICENSE.md)

View File

@ -17,12 +17,27 @@ The repository, backup or backup subtree given by `PATH` must be in the format
## OPTIONS
* `-h`, `--help`:
* `-q`, `--quiet`:
Prints help information
Print less information
* `-v`, `--verbose`:
Print more information
* `-h`, `--help`:
Prints help information
* `-V`, `--version`:
Prints version information
## COPYRIGHT
Copyright (C) 2017 Dennis Schwerdel
Copyright (C) 2017-2018 Dennis Schwerdel
This software is licensed under GPL-3 or newer (see LICENSE.md)

View File

@ -25,51 +25,72 @@ configuration can be changed by _zvault-config(1)_ later.
## OPTIONS
* `--bundle-size <SIZE>`:
* `--bundle-size <SIZE>`:
Set the target bundle size in MiB (default: 25).
Please see zvault(1) for more information on *bundle size*.
Set the target bundle size in MiB (default: 25).
Please see zvault(1) for more information on *bundle size*.
* `--chunker <CHUNKER>`:
* `--chunker <CHUNKER>`:
Set the chunker algorithm and target chunk size (default: fastcdc/16).
Please see _zvault(1)_ for more information on *chunkers* and possible
values.
Set the chunker algorithm and target chunk size (default: fastcdc/16).
Please see _zvault(1)_ for more information on *chunkers* and possible
values.
* `-c`, `--compression <COMPRESSION>`:
* `-c`, `--compression <COMPRESSION>`:
Set the compression method and level (default: brotli/3).
Please see _zvault(1)_ for more information on *compression* and possible
values.
Set the compression method and level (default: brotli/3).
Please see _zvault(1)_ for more information on *compression* and possible
values.
* `-e`, `--encrypt`:
* `-e`, `--encrypt`:
Generate a keypair and enable encryption.
Please see _zvault(1)_ for more information on *encryption*.
Generate a keypair and enable encryption.
Please see _zvault(1)_ for more information on *encryption*.
* `--hash <HASH>`:
* `--hash <HASH>`:
Set the hash method (default: blake2).
Please see _zvault(1)_ for more information on *hash methods* and possible
values.
Set the hash method (default: blake2).
Please see _zvault(1)_ for more information on *hash methods* and possible
values.
* `-h`, `--help`:
* `-h`, `--help`:
Prints help information
Prints help information
* `-r`, `--remote <REMOTE>`:
* `-r`, `--remote <REMOTE>`:
Set the path to the mounted remote storage. There should be an empty folder
at this location.
* `-q`, `--quiet`:
Print less information
* `-v`, `--verbose`:
Print more information
* `-h`, `--help`:
Prints help information
* `-V`, `--version`:
Prints version information
Set the path to the mounted remote storage. There should be an empty folder
at this location.
## COPYRIGHT
Copyright (C) 2017 Dennis Schwerdel
Copyright (C) 2017-2018 Dennis Schwerdel
This software is licensed under GPL-3 or newer (see LICENSE.md)

View File

@ -28,12 +28,27 @@ filesystem which is faster than _zvault-list(1)_ for multiple listings.
## OPTIONS
* `-h`, `--help`:
* `-q`, `--quiet`:
Prints help information
Print less information
* `-v`, `--verbose`:
Print more information
* `-h`, `--help`:
Prints help information
* `-V`, `--version`:
Prints version information
## COPYRIGHT
Copyright (C) 2017 Dennis Schwerdel
Copyright (C) 2017-2018 Dennis Schwerdel
This software is licensed under GPL-3 or newer (see LICENSE.md)

View File

@ -30,12 +30,27 @@ this way is slower than using _zvault-restore(1)_.
## OPTIONS
* `-h`, `--help`:
* `-q`, `--quiet`:
Prints help information
Print less information
* `-v`, `--verbose`:
Print more information
* `-h`, `--help`:
Prints help information
* `-V`, `--version`:
Prints version information
## COPYRIGHT
Copyright (C) 2017 Dennis Schwerdel
Copyright (C) 2017-2018 Dennis Schwerdel
This software is licensed under GPL-3 or newer (see LICENSE.md)

View File

@ -52,42 +52,57 @@ data of the deleted backups becomes inaccessible and can not be restored.**
## OPTIONS
* `-p`, `--prefix <PREFIX>`:
* `-p`, `--prefix <PREFIX>`:
Only consider backups starting with this prefix.
Only consider backups starting with this prefix.
* `-d`, `--daily <NUM>`:
* `-d`, `--daily <NUM>`:
Keep the newest backup for each of the last `NUM` days.
Keep the newest backup for each of the last `NUM` days.
* `-w`, `--weekly <NUM>`:
* `-w`, `--weekly <NUM>`:
Keep the newest backup for each of the last `NUM` weeks.
Keep the newest backup for each of the last `NUM` weeks.
* `-m`, `--monthly <NUM>`:
* `-m`, `--monthly <NUM>`:
Keep the newest backup for each of the last `NUM` months.
Keep the newest backup for each of the last `NUM` months.
* `-y`, `--yearly <NUM>`:
* `-y`, `--yearly <NUM>`:
Keep the newest backup for each of the last `NUM` years.
Keep the newest backup for each of the last `NUM` years.
* `-f`, `--force`:
* `-f`, `--force`:
Actually remove backups instead of displaying what would be removed.
Actually remove backups instead of displaying what would be removed.
* `-h`, `--help`:
* `-q`, `--quiet`:
Prints help information
Print less information
* `-v`, `--verbose`:
Print more information
* `-h`, `--help`:
Prints help information
* `-V`, `--version`:
Prints version information
## COPYRIGHT
Copyright (C) 2017 Dennis Schwerdel
Copyright (C) 2017-2018 Dennis Schwerdel
This software is licensed under GPL-3 or newer (see LICENSE.md)

View File

@ -35,17 +35,32 @@ data of the deleted backups becomes inaccessible and can not be restored.**
## OPTIONS
* `-f`, `--force`:
* `-f`, `--force`:
Remove multiple backups in a backup folder
Remove multiple backups in a backup folder
* `-h`, `--help`:
* `-q`, `--quiet`:
Prints help information
Print less information
* `-v`, `--verbose`:
Print more information
* `-h`, `--help`:
Prints help information
* `-V`, `--version`:
Prints version information
## COPYRIGHT
Copyright (C) 2017 Dennis Schwerdel
Copyright (C) 2017-2018 Dennis Schwerdel
This software is licensed under GPL-3 or newer (see LICENSE.md)

View File

@ -23,21 +23,36 @@ If `--tar` is not set, the data will be written into the existing folder `DST`.
## OPTIONS
* `--tar`:
* `--tar`:
Write the backup to a tar archive named `DST` instead of creating files and
folders at this location.
Write the backup to a tar archive named `DST` instead of creating files and
folders at this location.
This option can be used to export a backup that can be imported again using
zvault-backup(1) with the `--tar` flag.
This option can be used to export a backup that can be imported again using
zvault-backup(1) with the `--tar` flag.
* `-h`, `--help`:
* `-q`, `--quiet`:
Prints help information
Print less information
* `-v`, `--verbose`:
Print more information
* `-h`, `--help`:
Prints help information
* `-V`, `--version`:
Prints version information
## COPYRIGHT
Copyright (C) 2017 Dennis Schwerdel
Copyright (C) 2017-2018 Dennis Schwerdel
This software is licensed under GPL-3 or newer (see LICENSE.md)

View File

@ -43,28 +43,43 @@ should be avoided when the storage space permits it.
## OPTIONS
* `--combine`:
* `--combine`:
Also combine small bundles into larger ones.
Also combine small bundles into larger ones.
* `-r`, `--ratio <NUM>`:
* `-r`, `--ratio <NUM>`:
Do not rewrite bundles with more than `NUM`% of used chunks.
The ratio must be given in whole percentage, e.g. 50 mean 50%.
Do not rewrite bundles with more than `NUM`% of used chunks.
The ratio must be given in whole percentage, e.g. 50 mean 50%.
* `-f`, `--force`:
* `-f`, `--force`:
Actually run the vacuum instead of simulating it.
Actually run the vacuum instead of simulating it.
* `-h`, `--help`:
* `-q`, `--quiet`:
Prints help information
Print less information
* `-v`, `--verbose`:
Print more information
* `-h`, `--help`:
Prints help information
* `-V`, `--version`:
Prints version information
## COPYRIGHT
Copyright (C) 2017 Dennis Schwerdel
Copyright (C) 2017-2018 Dennis Schwerdel
This software is licensed under GPL-3 or newer (see LICENSE.md)

View File

@ -20,12 +20,27 @@ earliest backup that version appeared in.
## OPTIONS
* `-h`, `--help`:
* `-q`, `--quiet`:
Prints help information
Print less information
* `-v`, `--verbose`:
Print more information
* `-h`, `--help`:
Prints help information
* `-V`, `--version`:
Prints version information
## COPYRIGHT
Copyright (C) 2017 Dennis Schwerdel
Copyright (C) 2017-2018 Dennis Schwerdel
This software is licensed under GPL-3 or newer (see LICENSE.md)

View File

@ -18,6 +18,16 @@ location.
## OPTIONS
* `-q`, `--quiet`:
Print less information
* `-v`, `--verbose`:
Print more information
* `-h`, `--help`:
Prints help information
@ -43,6 +53,7 @@ location.
* `info` Display information on a repository, a backup or a subtree, _zvault-info(1)_
* `mount` Mount the repository, a backup or a subtree, _zvault-mount(1)_
* `remove` Remove a backup or a subtree, _zvault-remove(1)_
* `copy` Create a copy of a backup, _zvault-copy(1)_
* `prune` Remove backups based on age, _zvault-prune(1)_
* `vacuum` Reclaim space by rewriting bundles, _zvault-vacuum(1)_
@ -65,8 +76,10 @@ location.
### Path syntax
Most subcommands work with a repository that has to be specified as a parameter.
If this repository is specified as `::`, the default repository in `~/.zvault`
will be used instead.
If the given repository path is absolute, this path will be used as is.
If the given path is relative, the repository will be located in
`~/.zvault/repos`. If the path is empty (specified as `::`), the default
repository in `~/.zvault/repos/default` will be used.
Some subcommands need to reference a specific backup in the repository. This is
done via the syntax `repository::backup_name` where `repository` is the path to
@ -87,8 +100,8 @@ regarded as not set at all.
Examples:
- `~/.zvault` references the repository in `~/.zvault` and is identical with
`::`.
- `~/.zvault/repos/default` references the repository in
`~/.zvault/repos/default` and is identical with `::`.
- `::backup1` references the backup `backup1` in the default repository
- `::backup1::/` references the root folder of the backup `backup1` in the
default repository
@ -176,7 +189,7 @@ The chunker algortihm and chunk size are configured together in the format
`algorithm/size` where algorithm is one of `rabin`, `ae` and `fastcdc` and size
is the size in KiB e.g. `16`. So the recommended configuration is `fastcdc/16`.
Please not that since the chunker algorithm and chunk size affect the chunks
Please note that since the chunker algorithm and chunk size affect the chunks
created from the input data, any change to those values will make existing
chunks inaccessible for deduplication purposes. The old data is still readable
but new backups will have to store all data again.
@ -185,7 +198,7 @@ but new backups will have to store all data again.
### Compression
ZVault offers different compression algorithms that can be used to compress the
stored data after deduplication. The compression ratio that can be achieved
mostly depends on the input data (test data can be compressed well and media
mostly depends on the input data (text data can be compressed well and media
data like music and videos are already compressed and can not be compressed
significantly).
@ -328,5 +341,5 @@ To reclaim storage space after removing some backups vacuum needs to be run
## COPYRIGHT
Copyright (C) 2017 Dennis Schwerdel
Copyright (C) 2017-2018 Dennis Schwerdel
This software is licensed under GPL-3 or newer (see LICENSE.md)

View File

@ -1,4 +1,4 @@
# ZVault repository
# zVault repository
This folder is a zVault remote repository and contains backup data.
@ -181,11 +181,13 @@ The inode entries are encoded as defined in the appendix as `Inode`. The inode
structure contains all meta information on an inode entry, e.g. its file type,
the data size, modification time, permissions and ownership, etc. Also, the
structure contains optional information that is specific to the file type.
For regular files, the inode structure contains the data of that file either
inline (for very small files) or as a reference via a chunk list.
For directories, the inode structure contains a mapping of child inode entries
with their name as key and a chunk list referring their encoded `Inode`
structure as value.
For symlinks, the inode structure contains the target in the field
`symlink_target`.
@ -251,10 +253,12 @@ The `BundleMode` describes the contents of the chunks of a bundle.
- `Meta` means that the chunks either contain encoded chunk lists or encoded
inode metadata
BundleMode {
Data => 0,
Meta => 1
}
```
BundleMode {
Data => 0,
Meta => 1
}
```
#### `HashMethod`
@ -266,10 +270,12 @@ chunk data. This is not relevant for reading backups.
https://en.wikipedia.org/wiki/MurmurHash for the x64 architecture and with the
hash length set to 128 bits.
HashMethod {
Blake2 => 1,
Murmur3 => 2
}
```
HashMethod {
Blake2 => 1,
Murmur3 => 2
}
```
#### `EncryptionMethod`
@ -278,9 +284,11 @@ decrypt) data.
- `Sodium` means the `crypto_box_seal` method of `libsodium` as specified at
http://www.libsodium.org as a combination of `X25519` and `XSalsa20-Poly1305`.
EncryptionMethod {
Sodium => 0
}
```
EncryptionMethod {
Sodium => 0
}
```
#### `CompressionMethod`
@ -292,12 +300,14 @@ thus also decompress) data.
http://tukaani.org/xz/
- `Lz4` means the LZ4 method as described at http://www.lz4.org
CompressionMethod {
Deflate => 0,
Brotli => 1,
Lzma => 2,
Lz4 => 3
}
```
CompressionMethod {
Deflate => 0,
Brotli => 1,
Lzma => 2,
Lz4 => 3
}
```
#### `FileType`
@ -306,13 +316,20 @@ The `FileType` describes the type of an inode.
- `Directory` means a directory that does not contain data but might have
children
- `Symlink` means a symlink that points to a target
- `BlockDevice` means a block device
- `CharDevice` means a character device
- `NamedPipe` means a named pipe/fifo
FileType {
File => 0,
Directory => 1,
Symlink => 2
}
```
FileType {
File => 0,
Directory => 1,
Symlink => 2,
BlockDevice => 3,
CharDevice => 4,
NamedPipe => 5
}
```
### Types
The following types are used to simplify the encoding specifications. They can
@ -323,6 +340,7 @@ used in the encoding specifications instead of their definitions.
#### `Encryption`
The `Encryption` is a combination of an `EncryptionMethod` and a key.
The method specifies how the key was used to encrypt the data.
For the `Sodium` method, the key is the public key used to encrypt the data
with. The secret key needed for decryption, must correspond to that public key.
@ -343,6 +361,7 @@ compression level. The level is only used for compression.
The `BundleHeader` structure contains information on how to decrypt other parts
of a bundle. The structure is encoded using the MessagePack encoding that has
been defined in a previous section.
The `encryption` field contains the information needed to decrypt the rest of
the bundle parts. If the `encryption` option is set, the following parts are
encrypted using the specified method and key, otherwise the parts are not
@ -359,6 +378,7 @@ encrypted. The `info_size` contains the encrypted size of the following
The `BundleInfo` structure contains information on a bundle. The structure is
encoded using the MessagePack encoding that has been defined in a previous
section.
If the `compression` option is set, the chunk data is compressed with the
specified method, otherwise it is uncompressed. The encrypted size of the
following `ChunkList` is stored in the `chunk_list_size` field.
@ -398,20 +418,27 @@ the list in order or appearance in the list.
The `Inode` structure contains information on a backup inode, e.g. a file or
a directory. The structure is encoded using the MessagePack encoding that has
been defined in a previous section.
The `name` field contains the name of this inode which can be concatenated with
the names of all parent inodes (with a platform-dependent seperator) to form the
full path of the inode.
The `size` field contains the raw size of the data in
bytes (this is 0 for everything except files).
The `file_type` specifies the type of this inode.
The `mode` field specifies the permissions of the inode as a number which is
normally interpreted as octal.
The `user` and `group` fields specify the ownership of the inode in the form of
user and group id.
The `timestamp` specifies the modification time of the inode in whole seconds
since the UNIX epoch (1970-01-01 12:00 am).
The `symlink_target` specifies the target of symlink inodes and is only set for
symlinks.
The `data` specifies the data of a file and is only set for regular files. The
data is specified as a tuple of `nesting` and `bytes`. If `nesting` is `0`,
`bytes` contains the data of the file. This "inline" format is only used for
@ -421,18 +448,24 @@ the data of the file. If `nesting` is `2`, `bytes` is also an encoded
`ChunkList`, but the concatenated data of those chunks form again an encoded
`ChunkList` which in turn contains the chunks with the file data. Thus `nesting`
specifies the number of indirection steps via `ChunkList`s.
The `children` field specifies the child inodes of a directory and is only set
for directories. It is a mapping from the name of the child entry to the bytes
of the encoded chunklist of the encoded `Inode` structure of the child. It is
important that the names in the mapping correspond with the names in the
respective child `Inode`s and that the mapping is stored in alphabetic order of
the names.
The `cum_size`, `cum_dirs` and `cum_files` are cumulative values for the inode
as well as the whole subtree (including all children recursively). `cum_size` is
the sum of all inode data sizes plus 1000 bytes for each inode (for encoded
metadata). `cum_dirs` and `cum_files` is the count of directories and
non-directories (symlinks and regular files).
The `xattrs` contains a mapping of all extended attributes of the inode. And
`device` contains a tuple with the major and minor device id if the inode is a
block or character device.
Inode {
name: string => 0,
size: int => 1,
@ -447,6 +480,8 @@ non-directories (symlinks and regular files).
cum_size: int => 12,
cum_dirs: int => 13,
cum_files: int => 14
xattrs: {string => bytes}? => 15,
device: (int, int)? => 16
}
This structure is encoded with the following field default values:
@ -460,6 +495,7 @@ This structure is encoded with the following field default values:
The `BackupHeader` structure contains information on how to decrypt the rest of
the backup file. The structure is encoded using the MessagePack encoding that
has been defined in a previous section.
The `encryption` field contains the information needed to decrypt the rest of
the backup file. If the `encryption` option is set, the rest of the backup file
is encrypted using the specified method and key, otherwise the rest is not
@ -474,8 +510,10 @@ encrypted.
The `Backup` structure contains information on one specific backup and
references the root of the backup file tree. The structure is encoded using the
MessagePack encoding that has been defined in a previous section.
The `root` field contains an encoded `ChunkList` that references the root of the
backup file tree.
The fields `total_data_size`, `changed_data_size`, `deduplicated_data_size` and
`encoded_data_size` list the sizes of the backup in various stages in bytes.
- `total_data_size` gives the cumulative sizes of all entries in the backup.
@ -485,16 +523,21 @@ The fields `total_data_size`, `changed_data_size`, `deduplicated_data_size` and
this backup that have not been stored in the repository yet.
- `encoded_data_size` gives the cumulative encoded (and compressed) size of all
new bundles that have been written specifically to store this backup.
The fields `bundle_count` and `chunk_count` contain the number of new bundles
and chunks that had to be written to store this backup. `avg_chunk_size` is the
average size of new chunks in this backup.
The field `date` specifies the start of the backup run in seconds since the UNIX
epoch and the field `duration` contains the duration of the backup run in
seconds as a floating point number containing also fractions of seconds.
The fields `file_count` and `dir_count` contain the total number of
non-directories and directories in this backup.
The `host` and `path` field contain the host name and the the path on that host
where the root of the backup was located.
The field `config` contains the configuration of zVault during the backup run.
Backup {

55
index/Cargo.lock generated
View File

@ -1,55 +0,0 @@
[root]
name = "index"
version = "0.1.0"
dependencies = [
"mmap 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"quick-error 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "libc"
version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "libc"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "mmap"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
"tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "quick-error"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "rand"
version = "0.3.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "tempdir"
version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
]
[metadata]
"checksum libc 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "e32a70cf75e5846d53a673923498228bbec6a8624708a9ea5645f075d6276122"
"checksum libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)" = "88ee81885f9f04bff991e306fea7c1c60a5f0f9e409e99f6b40e3311a3363135"
"checksum mmap 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0bc85448a6006dd2ba26a385a564a8a0f1f2c7e78c70f1a70b2e0f4af286b823"
"checksum quick-error 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0aad603e8d7fb67da22dbdf1f4b826ce8829e406124109e73cf1b2454b93a71c"
"checksum rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "022e0636ec2519ddae48154b028864bdce4eaf7d35226ab8e65c611be97b189d"
"checksum tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "87974a6f5c1dfb344d733055601650059a3363de2a6104819293baff662132d6"

View File

@ -1,8 +0,0 @@
[package]
name = "index"
version = "0.1.0"
authors = ["Dennis Schwerdel <schwerdel@googlemail.com>"]
[dependencies]
mmap = "0.1"
quick-error = "1.1"

9
lang/Makefile Normal file
View File

@ -0,0 +1,9 @@
MO_FILES = de.mo
default: default.pot ${MO_FILES}
default.pot: excluded.po ../src
find ../src -name '*.rs' | xargs xgettext --debug -L python -n -F -a -E --from-code UTF-8 -x ../lang/excluded.po -o default.pot
%.mo : %.po
msgfmt $< -o $@

2215
lang/de.po Normal file

File diff suppressed because it is too large Load Diff

2208
lang/default.pot Normal file

File diff suppressed because it is too large Load Diff

1657
lang/excluded.po Normal file

File diff suppressed because it is too large Load Diff

2
rustfmt.toml Normal file
View File

@ -0,0 +1,2 @@
trailing_semicolon = false
trailing_comma = "Never"

View File

@ -1,4 +1,4 @@
use ::prelude::*;
use prelude::*;
use std::path::{Path, PathBuf};
use std::fs::{self, File};
@ -14,33 +14,33 @@ quick_error!{
pub enum BundleCacheError {
Read(err: io::Error) {
cause(err)
description("Failed to read bundle cache")
display("Bundle cache error: failed to read bundle cache\n\tcaused by: {}", err)
description(tr!("Failed to read bundle cache"))
display("{}", tr_format!("Bundle cache error: failed to read bundle cache\n\tcaused by: {}", err))
}
Write(err: io::Error) {
cause(err)
description("Failed to write bundle cache")
display("Bundle cache error: failed to write bundle cache\n\tcaused by: {}", err)
description(tr!("Failed to write bundle cache"))
display("{}", tr_format!("Bundle cache error: failed to write bundle cache\n\tcaused by: {}", err))
}
WrongHeader {
description("Wrong header")
display("Bundle cache error: wrong header on bundle cache")
description(tr!("Wrong header"))
display("{}", tr_format!("Bundle cache error: wrong header on bundle cache"))
}
UnsupportedVersion(version: u8) {
description("Wrong version")
display("Bundle cache error: unsupported version: {}", version)
description(tr!("Wrong version"))
display("{}", tr_format!("Bundle cache error: unsupported version: {}", version))
}
Decode(err: msgpack::DecodeError) {
from()
cause(err)
description("Failed to decode bundle cache")
display("Bundle cache error: failed to decode bundle cache\n\tcaused by: {}", err)
description(tr!("Failed to decode bundle cache"))
display("{}", tr_format!("Bundle cache error: failed to decode bundle cache\n\tcaused by: {}", err))
}
Encode(err: msgpack::EncodeError) {
from()
cause(err)
description("Failed to encode bundle cache")
display("Bundle cache error: failed to encode bundle cache\n\tcaused by: {}", err)
description(tr!("Failed to encode bundle cache"))
display("{}", tr_format!("Bundle cache error: failed to encode bundle cache\n\tcaused by: {}", err))
}
}
}
@ -62,7 +62,11 @@ impl StoredBundle {
self.info.id.clone()
}
pub fn copy_to<P: AsRef<Path>>(&self, base_path: &Path, path: P) -> Result<Self, BundleDbError> {
pub fn copy_to<P: AsRef<Path>>(
&self,
base_path: &Path,
path: P,
) -> Result<Self, BundleDbError> {
let src_path = base_path.join(&self.path);
let dst_path = path.as_ref();
try!(fs::copy(&src_path, dst_path).context(dst_path));
@ -71,7 +75,11 @@ impl StoredBundle {
Ok(bundle)
}
pub fn move_to<P: AsRef<Path>>(&mut self, base_path: &Path, path: P) -> Result<(), BundleDbError> {
pub fn move_to<P: AsRef<Path>>(
&mut self,
base_path: &Path,
path: P,
) -> Result<(), BundleDbError> {
let src_path = base_path.join(&self.path);
let dst_path = path.as_ref();
if fs::rename(&src_path, dst_path).is_err() {
@ -88,11 +96,11 @@ impl StoredBundle {
let mut header = [0u8; 8];
try!(file.read_exact(&mut header).map_err(BundleCacheError::Read));
if header[..CACHE_FILE_STRING.len()] != CACHE_FILE_STRING {
return Err(BundleCacheError::WrongHeader)
return Err(BundleCacheError::WrongHeader);
}
let version = header[CACHE_FILE_STRING.len()];
if version != CACHE_FILE_VERSION {
return Err(BundleCacheError::UnsupportedVersion(version))
return Err(BundleCacheError::UnsupportedVersion(version));
}
Ok(try!(msgpack::decode_from_stream(&mut file)))
}
@ -100,8 +108,12 @@ impl StoredBundle {
pub fn save_list_to<P: AsRef<Path>>(list: &[Self], path: P) -> Result<(), BundleCacheError> {
let path = path.as_ref();
let mut file = BufWriter::new(try!(File::create(path).map_err(BundleCacheError::Write)));
try!(file.write_all(&CACHE_FILE_STRING).map_err(BundleCacheError::Write));
try!(file.write_all(&[CACHE_FILE_VERSION]).map_err(BundleCacheError::Write));
try!(file.write_all(&CACHE_FILE_STRING).map_err(
BundleCacheError::Write
));
try!(file.write_all(&[CACHE_FILE_VERSION]).map_err(
BundleCacheError::Write
));
try!(msgpack::encode_to_stream(&list, &mut file));
Ok(())
}

View File

@ -1,4 +1,4 @@
use ::prelude::*;
use prelude::*;
use super::*;
use std::path::{Path, PathBuf};
@ -14,50 +14,56 @@ quick_error!{
pub enum BundleDbError {
ListBundles(err: io::Error) {
cause(err)
description("Failed to list bundles")
display("Bundle db error: failed to list bundles\n\tcaused by: {}", err)
description(tr!("Failed to list bundles"))
display("{}", tr_format!("Bundle db error: failed to list bundles\n\tcaused by: {}", err))
}
Reader(err: BundleReaderError) {
from()
cause(err)
description("Failed to read bundle")
display("Bundle db error: failed to read bundle\n\tcaused by: {}", err)
description(tr!("Failed to read bundle"))
display("{}", tr_format!("Bundle db error: failed to read bundle\n\tcaused by: {}", err))
}
Writer(err: BundleWriterError) {
from()
cause(err)
description("Failed to write bundle")
display("Bundle db error: failed to write bundle\n\tcaused by: {}", err)
description(tr!("Failed to write bundle"))
display("{}", tr_format!("Bundle db error: failed to write bundle\n\tcaused by: {}", err))
}
Cache(err: BundleCacheError) {
from()
cause(err)
description("Failed to read/write bundle cache")
display("Bundle db error: failed to read/write bundle cache\n\tcaused by: {}", err)
description(tr!("Failed to read/write bundle cache"))
display("{}", tr_format!("Bundle db error: failed to read/write bundle cache\n\tcaused by: {}", err))
}
UploadFailed {
description("Uploading a bundle failed")
description(tr!("Uploading a bundle failed"))
}
Io(err: io::Error, path: PathBuf) {
cause(err)
context(path: &'a Path, err: io::Error) -> (err, path.to_path_buf())
description("Io error")
display("Bundle db error: io error on {:?}\n\tcaused by: {}", path, err)
description(tr!("Io error"))
display("{}", tr_format!("Bundle db error: io error on {:?}\n\tcaused by: {}", path, err))
}
NoSuchBundle(bundle: BundleId) {
description("No such bundle")
display("Bundle db error: no such bundle: {:?}", bundle)
description(tr!("No such bundle"))
display("{}", tr_format!("Bundle db error: no such bundle: {:?}", bundle))
}
Remove(err: io::Error, bundle: BundleId) {
cause(err)
description("Failed to remove bundle")
display("Bundle db error: failed to remove bundle {}\n\tcaused by: {}", bundle, err)
description(tr!("Failed to remove bundle"))
display("{}", tr_format!("Bundle db error: failed to remove bundle {}\n\tcaused by: {}", bundle, err))
}
}
}
fn load_bundles(path: &Path, base: &Path, bundles: &mut HashMap<BundleId, StoredBundle>, crypto: Arc<Mutex<Crypto>>) -> Result<(Vec<StoredBundle>, Vec<StoredBundle>), BundleDbError> {
#[allow(needless_pass_by_value)]
fn load_bundles(
path: &Path,
base: &Path,
bundles: &mut HashMap<BundleId, StoredBundle>,
crypto: Arc<Mutex<Crypto>>,
) -> Result<(Vec<StoredBundle>, Vec<StoredBundle>), BundleDbError> {
let mut paths = vec![path.to_path_buf()];
let mut bundle_paths = HashSet::new();
while let Some(path) = paths.pop() {
@ -68,7 +74,7 @@ fn load_bundles(path: &Path, base: &Path, bundles: &mut HashMap<BundleId, Stored
paths.push(path);
} else {
if path.extension() != Some("bundle".as_ref()) {
continue
continue;
}
bundle_paths.insert(path.strip_prefix(base).unwrap().to_path_buf());
}
@ -89,10 +95,13 @@ fn load_bundles(path: &Path, base: &Path, bundles: &mut HashMap<BundleId, Stored
Err(err) => {
warn!("Failed to read bundle {:?}\n\tcaused by: {}", path, err);
info!("Ignoring unreadable bundle");
continue
continue;
}
};
let bundle = StoredBundle { info: info, path: path };
let bundle = StoredBundle {
info,
path
};
let id = bundle.info.id.clone();
if !bundles.contains_key(&id) {
new.push(bundle.clone());
@ -120,8 +129,8 @@ pub struct BundleDb {
impl BundleDb {
fn new(layout: RepositoryLayout, crypto: Arc<Mutex<Crypto>>) -> Self {
BundleDb {
layout: layout,
crypto: crypto,
layout,
crypto,
uploader: None,
local_bundles: HashMap::new(),
remote_bundles: HashMap::new(),
@ -129,31 +138,53 @@ impl BundleDb {
}
}
fn load_bundle_list(&mut self) -> Result<(Vec<StoredBundle>, Vec<StoredBundle>), BundleDbError> {
fn load_bundle_list(
&mut self,
online: bool
) -> Result<(Vec<StoredBundle>, Vec<StoredBundle>), BundleDbError> {
if let Ok(list) = StoredBundle::read_list_from(&self.layout.local_bundle_cache_path()) {
for bundle in list {
self.local_bundles.insert(bundle.id(), bundle);
}
} else {
warn!("Failed to read local bundle cache, rebuilding cache");
tr_warn!("Failed to read local bundle cache, rebuilding cache");
}
if let Ok(list) = StoredBundle::read_list_from(&self.layout.remote_bundle_cache_path()) {
for bundle in list {
self.remote_bundles.insert(bundle.id(), bundle);
}
} else {
warn!("Failed to read remote bundle cache, rebuilding cache");
tr_warn!("Failed to read remote bundle cache, rebuilding cache");
}
let base_path = self.layout.base_path();
let (new, gone) = try!(load_bundles(&self.layout.local_bundles_path(), base_path, &mut self.local_bundles, self.crypto.clone()));
let (new, gone) = try!(load_bundles(
&self.layout.local_bundles_path(),
base_path,
&mut self.local_bundles,
self.crypto.clone()
));
if !new.is_empty() || !gone.is_empty() {
let bundles: Vec<_> = self.local_bundles.values().cloned().collect();
try!(StoredBundle::save_list_to(&bundles, &self.layout.local_bundle_cache_path()));
try!(StoredBundle::save_list_to(
&bundles,
&self.layout.local_bundle_cache_path()
));
}
let (new, gone) = try!(load_bundles(&self.layout.remote_bundles_path(), base_path, &mut self.remote_bundles, self.crypto.clone()));
if !online {
return Ok((vec![], vec![]))
}
let (new, gone) = try!(load_bundles(
&self.layout.remote_bundles_path(),
base_path,
&mut self.remote_bundles,
self.crypto.clone()
));
if !new.is_empty() || !gone.is_empty() {
let bundles: Vec<_> = self.remote_bundles.values().cloned().collect();
try!(StoredBundle::save_list_to(&bundles, &self.layout.remote_bundle_cache_path()));
try!(StoredBundle::save_list_to(
&bundles,
&self.layout.remote_bundle_cache_path()
));
}
Ok((new, gone))
}
@ -164,9 +195,16 @@ impl BundleDb {
fn save_cache(&self) -> Result<(), BundleDbError> {
let bundles: Vec<_> = self.local_bundles.values().cloned().collect();
try!(StoredBundle::save_list_to(&bundles, &self.layout.local_bundle_cache_path()));
try!(StoredBundle::save_list_to(
&bundles,
&self.layout.local_bundle_cache_path()
));
let bundles: Vec<_> = self.remote_bundles.values().cloned().collect();
Ok(try!(StoredBundle::save_list_to(&bundles, &self.layout.remote_bundle_cache_path())))
try!(StoredBundle::save_list_to(
&bundles,
&self.layout.remote_bundle_cache_path()
));
Ok(())
}
fn update_cache(&mut self) -> Result<(), BundleDbError> {
@ -185,44 +223,80 @@ impl BundleDb {
for id in meta_bundles {
if !self.local_bundles.contains_key(&id) {
let bundle = self.remote_bundles[&id].clone();
debug!("Copying new meta bundle to local cache: {}", bundle.info.id);
tr_debug!("Copying new meta bundle to local cache: {}", bundle.info.id);
try!(self.copy_remote_bundle_to_cache(&bundle));
}
}
let base_path = self.layout.base_path();
for id in remove {
if let Some(bundle) = self.local_bundles.remove(&id) {
try!(fs::remove_file(base_path.join(&bundle.path)).map_err(|e| BundleDbError::Remove(e, id)))
try!(fs::remove_file(base_path.join(&bundle.path)).map_err(|e| {
BundleDbError::Remove(e, id)
}))
}
}
Ok(())
}
pub fn open(layout: RepositoryLayout, crypto: Arc<Mutex<Crypto>>) -> Result<(Self, Vec<BundleInfo>, Vec<BundleInfo>), BundleDbError> {
pub fn open(
layout: RepositoryLayout,
crypto: Arc<Mutex<Crypto>>,
online: bool
) -> Result<(Self, Vec<BundleInfo>, Vec<BundleInfo>), BundleDbError> {
let mut self_ = Self::new(layout, crypto);
let (new, gone) = try!(self_.load_bundle_list());
let (new, gone) = try!(self_.load_bundle_list(online));
try!(self_.update_cache());
let new = new.into_iter().map(|s| s.info).collect();
let gone = gone.into_iter().map(|s| s.info).collect();
Ok((self_, new, gone))
}
pub fn create(layout: RepositoryLayout) -> Result<(), BundleDbError> {
try!(fs::create_dir_all(layout.remote_bundles_path()).context(&layout.remote_bundles_path() as &Path));
try!(fs::create_dir_all(layout.local_bundles_path()).context(&layout.local_bundles_path() as &Path));
try!(fs::create_dir_all(layout.temp_bundles_path()).context(&layout.temp_bundles_path() as &Path));
try!(StoredBundle::save_list_to(&[], layout.local_bundle_cache_path()));
try!(StoredBundle::save_list_to(&[], layout.remote_bundle_cache_path()));
pub fn create(layout: &RepositoryLayout) -> Result<(), BundleDbError> {
try!(fs::create_dir_all(layout.remote_bundles_path()).context(
&layout.remote_bundles_path() as
&Path
));
try!(fs::create_dir_all(layout.local_bundles_path()).context(
&layout.local_bundles_path() as
&Path
));
try!(fs::create_dir_all(layout.temp_bundles_path()).context(
&layout.temp_bundles_path() as
&Path
));
try!(StoredBundle::save_list_to(
&[],
layout.local_bundle_cache_path()
));
try!(StoredBundle::save_list_to(
&[],
layout.remote_bundle_cache_path()
));
Ok(())
}
#[inline]
pub fn create_bundle(&self, mode: BundleMode, hash_method: HashMethod, compression: Option<Compression>, encryption: Option<Encryption>) -> Result<BundleWriter, BundleDbError> {
Ok(try!(BundleWriter::new(mode, hash_method, compression, encryption, self.crypto.clone())))
pub fn create_bundle(
&self,
mode: BundleMode,
hash_method: HashMethod,
compression: Option<Compression>,
encryption: Option<Encryption>,
) -> Result<BundleWriter, BundleDbError> {
Ok(try!(BundleWriter::new(
mode,
hash_method,
compression,
encryption,
self.crypto.clone()
)))
}
fn get_stored_bundle(&self, bundle_id: &BundleId) -> Result<&StoredBundle, BundleDbError> {
if let Some(stored) = self.local_bundles.get(bundle_id).or_else(|| self.remote_bundles.get(bundle_id)) {
if let Some(stored) = self.local_bundles.get(bundle_id).or_else(|| {
self.remote_bundles.get(bundle_id)
})
{
Ok(stored)
} else {
Err(BundleDbError::NoSuchBundle(bundle_id.clone()))
@ -232,21 +306,26 @@ impl BundleDb {
#[inline]
fn get_bundle(&self, stored: &StoredBundle) -> Result<BundleReader, BundleDbError> {
let base_path = self.layout.base_path();
Ok(try!(BundleReader::load(base_path.join(&stored.path), self.crypto.clone())))
Ok(try!(BundleReader::load(
base_path.join(&stored.path),
self.crypto.clone()
)))
}
pub fn get_chunk(&mut self, bundle_id: &BundleId, id: usize) -> Result<Vec<u8>, BundleDbError> {
if let Some(&mut (ref mut bundle, ref data)) = self.bundle_cache.get_mut(bundle_id) {
let (pos, len) = try!(bundle.get_chunk_position(id));
let mut chunk = Vec::with_capacity(len);
chunk.extend_from_slice(&data[pos..pos+len]);
chunk.extend_from_slice(&data[pos..pos + len]);
return Ok(chunk);
}
let mut bundle = try!(self.get_stored_bundle(bundle_id).and_then(|s| self.get_bundle(s)));
let mut bundle = try!(self.get_stored_bundle(bundle_id).and_then(
|s| self.get_bundle(s)
));
let (pos, len) = try!(bundle.get_chunk_position(id));
let mut chunk = Vec::with_capacity(len);
let data = try!(bundle.load_contents());
chunk.extend_from_slice(&data[pos..pos+len]);
chunk.extend_from_slice(&data[pos..pos + len]);
self.bundle_cache.put(bundle_id.clone(), (bundle, data));
Ok(chunk)
}
@ -255,7 +334,10 @@ impl BundleDb {
let id = bundle.id();
let (folder, filename) = self.layout.local_bundle_path(&id, self.local_bundles.len());
try!(fs::create_dir_all(&folder).context(&folder as &Path));
let bundle = try!(bundle.copy_to(self.layout.base_path(), folder.join(filename)));
let bundle = try!(bundle.copy_to(
self.layout.base_path(),
folder.join(filename)
));
self.local_bundles.insert(id, bundle);
Ok(())
}
@ -268,7 +350,10 @@ impl BundleDb {
let (folder, filename) = self.layout.remote_bundle_path(self.remote_bundles.len());
let dst_path = folder.join(filename);
let src_path = self.layout.base_path().join(bundle.path);
bundle.path = dst_path.strip_prefix(self.layout.base_path()).unwrap().to_path_buf();
bundle.path = dst_path
.strip_prefix(self.layout.base_path())
.unwrap()
.to_path_buf();
if self.uploader.is_none() {
self.uploader = Some(BundleUploader::new(5));
}
@ -288,7 +373,9 @@ impl BundleDb {
}
pub fn get_chunk_list(&self, bundle: &BundleId) -> Result<ChunkList, BundleDbError> {
let mut bundle = try!(self.get_stored_bundle(bundle).and_then(|stored| self.get_bundle(stored)));
let mut bundle = try!(self.get_stored_bundle(bundle).and_then(|stored| {
self.get_bundle(stored)
}));
Ok(try!(bundle.get_chunk_list()).clone())
}
@ -305,7 +392,9 @@ impl BundleDb {
pub fn delete_local_bundle(&mut self, bundle: &BundleId) -> Result<(), BundleDbError> {
if let Some(bundle) = self.local_bundles.remove(bundle) {
let path = self.layout.base_path().join(&bundle.path);
try!(fs::remove_file(path).map_err(|e| BundleDbError::Remove(e, bundle.id())))
try!(fs::remove_file(path).map_err(|e| {
BundleDbError::Remove(e, bundle.id())
}))
}
Ok(())
}
@ -322,30 +411,35 @@ impl BundleDb {
pub fn check(&mut self, full: bool, repair: bool) -> Result<bool, BundleDbError> {
let mut to_repair = vec![];
for (id, stored) in ProgressIter::new("checking bundles", self.remote_bundles.len(), self.remote_bundles.iter()) {
for (id, stored) in ProgressIter::new(
tr!("checking bundles"),
self.remote_bundles.len(),
self.remote_bundles.iter()
)
{
let mut bundle = match self.get_bundle(stored) {
Ok(bundle) => bundle,
Err(err) => {
if repair {
to_repair.push(id.clone());
continue
continue;
} else {
return Err(err)
return Err(err);
}
}
};
if let Err(err) = bundle.check(full) {
if repair {
to_repair.push(id.clone());
continue
continue;
} else {
return Err(err.into())
return Err(err.into());
}
}
}
if !to_repair.is_empty() {
for id in ProgressIter::new("repairing bundles", to_repair.len(), to_repair.iter()) {
try!(self.repair_bundle(id.clone()));
for id in ProgressIter::new(tr!("repairing bundles"), to_repair.len(), to_repair.iter()) {
try!(self.repair_bundle(id));
}
try!(self.flush());
}
@ -366,44 +460,61 @@ impl BundleDb {
Ok(())
}
fn repair_bundle(&mut self, id: BundleId) -> Result<(), BundleDbError> {
let stored = self.remote_bundles[&id].clone();
fn repair_bundle(&mut self, id: &BundleId) -> Result<(), BundleDbError> {
let stored = self.remote_bundles[id].clone();
let mut bundle = match self.get_bundle(&stored) {
Ok(bundle) => bundle,
Err(err) => {
warn!("Problem detected: failed to read bundle header: {}\n\tcaused by: {}", id, err);
tr_warn!(
"Problem detected: failed to read bundle header: {}\n\tcaused by: {}",
id,
err
);
return self.evacuate_broken_bundle(stored);
}
};
let chunks = match bundle.get_chunk_list() {
Ok(chunks) => chunks.clone(),
Err(err) => {
warn!("Problem detected: failed to read bundle chunks: {}\n\tcaused by: {}", id, err);
tr_warn!(
"Problem detected: failed to read bundle chunks: {}\n\tcaused by: {}",
id,
err
);
return self.evacuate_broken_bundle(stored);
}
};
let data = match bundle.load_contents() {
Ok(data) => data,
Err(err) => {
warn!("Problem detected: failed to read bundle data: {}\n\tcaused by: {}", id, err);
tr_warn!(
"Problem detected: failed to read bundle data: {}\n\tcaused by: {}",
id,
err
);
return self.evacuate_broken_bundle(stored);
}
};
warn!("Problem detected: bundle data was truncated: {}", id);
info!("Copying readable data into new bundle");
tr_warn!("Problem detected: bundle data was truncated: {}", id);
tr_info!("Copying readable data into new bundle");
let info = stored.info.clone();
let mut new_bundle = try!(self.create_bundle(info.mode, info.hash_method, info.compression, info.encryption));
let mut new_bundle = try!(self.create_bundle(
info.mode,
info.hash_method,
info.compression,
info.encryption
));
let mut pos = 0;
for (hash, mut len) in chunks.into_inner() {
if pos >= data.len() {
break
break;
}
len = min(len, (data.len() - pos) as u32);
try!(new_bundle.add(&data[pos..pos+len as usize], hash));
try!(new_bundle.add(&data[pos..pos + len as usize], hash));
pos += len as usize;
}
let bundle = try!(self.add_bundle(new_bundle));
info!("New bundle id is {}", bundle.id);
tr_info!("New bundle id is {}", bundle.id);
self.evacuate_broken_bundle(stored)
}
@ -412,4 +523,29 @@ impl BundleDb {
self.remote_bundles.len()
}
pub fn statistics(&self) -> BundleStatistics {
let bundles = self.list_bundles();
let bundles_meta: Vec<_> = bundles.iter().filter(|b| b.mode == BundleMode::Meta).collect();
let bundles_data: Vec<_> = bundles.iter().filter(|b| b.mode == BundleMode::Data).collect();
let mut hash_methods = HashMap::new();
let mut compressions = HashMap::new();
let mut encryptions = HashMap::new();
for bundle in &bundles {
*hash_methods.entry(bundle.hash_method).or_insert(0) += 1;
*compressions.entry(bundle.compression.clone()).or_insert(0) += 1;
*encryptions.entry(bundle.encryption.clone()).or_insert(0) += 1;
}
BundleStatistics {
hash_methods, compressions, encryptions,
raw_size: ValueStats::from_iter(|| bundles.iter().map(|b| b.raw_size as f32)),
encoded_size: ValueStats::from_iter(|| bundles.iter().map(|b| b.encoded_size as f32)),
chunk_count: ValueStats::from_iter(|| bundles.iter().map(|b| b.chunk_count as f32)),
raw_size_meta: ValueStats::from_iter(|| bundles_meta.iter().map(|b| b.raw_size as f32)),
encoded_size_meta: ValueStats::from_iter(|| bundles_meta.iter().map(|b| b.encoded_size as f32)),
chunk_count_meta: ValueStats::from_iter(|| bundles_meta.iter().map(|b| b.chunk_count as f32)),
raw_size_data: ValueStats::from_iter(|| bundles_data.iter().map(|b| b.raw_size as f32)),
encoded_size_data: ValueStats::from_iter(|| bundles_data.iter().map(|b| b.encoded_size as f32)),
chunk_count_data: ValueStats::from_iter(|| bundles_data.iter().map(|b| b.chunk_count as f32))
}
}
}

View File

@ -10,9 +10,10 @@ pub use self::reader::{BundleReader, BundleReaderError};
pub use self::db::*;
pub use self::uploader::BundleUploader;
use ::prelude::*;
use prelude::*;
use std::fmt;
use std::collections::HashMap;
use serde;
use rand;
@ -47,7 +48,10 @@ impl BundleId {
#[inline]
pub fn random() -> Self {
BundleId(Hash{high: rand::random(), low: rand::random()})
BundleId(Hash {
high: rand::random(),
low: rand::random()
})
}
}
@ -68,7 +72,8 @@ impl fmt::Debug for BundleId {
#[derive(Eq, Debug, PartialEq, Clone, Copy)]
pub enum BundleMode {
Data, Meta
Data,
Meta
}
serde_impl!(BundleMode(u8) {
Data => 0,
@ -129,3 +134,20 @@ impl Default for BundleInfo {
}
}
}
#[derive(Debug)]
pub struct BundleStatistics {
pub raw_size: ValueStats,
pub encoded_size: ValueStats,
pub chunk_count: ValueStats,
pub raw_size_meta: ValueStats,
pub encoded_size_meta: ValueStats,
pub chunk_count_meta: ValueStats,
pub raw_size_data: ValueStats,
pub encoded_size_data: ValueStats,
pub chunk_count_data: ValueStats,
pub hash_methods: HashMap<HashMethod, usize>,
pub compressions: HashMap<Option<Compression>, usize>,
pub encryptions: HashMap<Option<Encryption>, usize>
}

View File

@ -1,4 +1,4 @@
use ::prelude::*;
use prelude::*;
use super::*;
use std::path::{Path, PathBuf};
@ -15,42 +15,42 @@ quick_error!{
Read(err: io::Error, path: PathBuf) {
cause(err)
context(path: &'a Path, err: io::Error) -> (err, path.to_path_buf())
description("Failed to read data from file")
display("Bundle reader error: failed to read data from file {:?}\n\tcaused by: {}", path, err)
description(tr!("Failed to read data from file"))
display("{}", tr_format!("Bundle reader error: failed to read data from file {:?}\n\tcaused by: {}", path, err))
}
WrongHeader(path: PathBuf) {
description("Wrong header")
display("Bundle reader error: wrong header on bundle {:?}", path)
description(tr!("Wrong header"))
display("{}", tr_format!("Bundle reader error: wrong header on bundle {:?}", path))
}
UnsupportedVersion(path: PathBuf, version: u8) {
description("Wrong version")
display("Bundle reader error: unsupported version on bundle {:?}: {}", path, version)
description(tr!("Wrong version"))
display("{}", tr_format!("Bundle reader error: unsupported version on bundle {:?}: {}", path, version))
}
NoSuchChunk(bundle: BundleId, id: usize) {
description("Bundle has no such chunk")
display("Bundle reader error: bundle {:?} has no chunk with id {}", bundle, id)
description(tr!("Bundle has no such chunk"))
display("{}", tr_format!("Bundle reader error: bundle {:?} has no chunk with id {}", bundle, id))
}
Decode(err: msgpack::DecodeError, path: PathBuf) {
cause(err)
context(path: &'a Path, err: msgpack::DecodeError) -> (err, path.to_path_buf())
description("Failed to decode bundle header")
display("Bundle reader error: failed to decode bundle header of {:?}\n\tcaused by: {}", path, err)
description(tr!("Failed to decode bundle header"))
display("{}", tr_format!("Bundle reader error: failed to decode bundle header of {:?}\n\tcaused by: {}", path, err))
}
Decompression(err: CompressionError, path: PathBuf) {
cause(err)
context(path: &'a Path, err: CompressionError) -> (err, path.to_path_buf())
description("Decompression failed")
display("Bundle reader error: decompression failed on bundle {:?}\n\tcaused by: {}", path, err)
description(tr!("Decompression failed"))
display("{}", tr_format!("Bundle reader error: decompression failed on bundle {:?}\n\tcaused by: {}", path, err))
}
Decryption(err: EncryptionError, path: PathBuf) {
cause(err)
context(path: &'a Path, err: EncryptionError) -> (err, path.to_path_buf())
description("Decryption failed")
display("Bundle reader error: decryption failed on bundle {:?}\n\tcaused by: {}", path, err)
description(tr!("Decryption failed"))
display("{}", tr_format!("Bundle reader error: decryption failed on bundle {:?}\n\tcaused by: {}", path, err))
}
Integrity(bundle: BundleId, reason: &'static str) {
description("Bundle has an integrity error")
display("Bundle reader error: bundle {:?} has an integrity error: {}", bundle, reason)
description(tr!("Bundle has an integrity error"))
display("{}", tr_format!("Bundle reader error: bundle {:?} has an integrity error: {}", bundle, reason))
}
}
}
@ -67,14 +67,20 @@ pub struct BundleReader {
}
impl BundleReader {
pub fn new(path: PathBuf, version: u8, content_start: usize, crypto: Arc<Mutex<Crypto>>, info: BundleInfo) -> Self {
pub fn new(
path: PathBuf,
version: u8,
content_start: usize,
crypto: Arc<Mutex<Crypto>>,
info: BundleInfo,
) -> Self {
BundleReader {
info: info,
info,
chunks: None,
version: version,
path: path,
crypto: crypto,
content_start: content_start,
version,
path,
crypto,
content_start,
chunk_positions: None
}
}
@ -84,54 +90,91 @@ impl BundleReader {
self.info.id.clone()
}
fn load_header<P: AsRef<Path>>(path: P, crypto: Arc<Mutex<Crypto>>) -> Result<(BundleInfo, u8, usize), BundleReaderError> {
#[allow(needless_pass_by_value)]
fn load_header<P: AsRef<Path>>(
path: P,
crypto: Arc<Mutex<Crypto>>,
) -> Result<(BundleInfo, u8, usize), BundleReaderError> {
let path = path.as_ref();
let mut file = BufReader::new(try!(File::open(path).context(path)));
let mut header = [0u8; 8];
try!(file.read_exact(&mut header).context(path));
if header[..HEADER_STRING.len()] != HEADER_STRING {
return Err(BundleReaderError::WrongHeader(path.to_path_buf()))
return Err(BundleReaderError::WrongHeader(path.to_path_buf()));
}
let version = header[HEADER_STRING.len()];
if version != HEADER_VERSION {
return Err(BundleReaderError::UnsupportedVersion(path.to_path_buf(), version))
return Err(BundleReaderError::UnsupportedVersion(
path.to_path_buf(),
version
));
}
let header: BundleHeader = try!(msgpack::decode_from_stream(&mut file).context(path));
let mut info_data = Vec::with_capacity(header.info_size);
info_data.resize(header.info_size, 0);
try!(file.read_exact(&mut info_data).context(path));
if let Some(ref encryption) = header.encryption {
info_data = try!(crypto.lock().unwrap().decrypt(encryption, &info_data).context(path));
info_data = try!(
crypto
.lock()
.unwrap()
.decrypt(encryption, &info_data)
.context(path)
);
}
let mut info: BundleInfo = try!(msgpack::decode(&info_data).context(path));
info.encryption = header.encryption;
debug!("Load bundle {}", info.id);
let content_start = file.seek(SeekFrom::Current(0)).unwrap() as usize + info.chunk_list_size;
let content_start = file.seek(SeekFrom::Current(0)).unwrap() as usize +
info.chunk_list_size;
Ok((info, version, content_start))
}
#[inline]
pub fn load_info<P: AsRef<Path>>(path: P, crypto: Arc<Mutex<Crypto>>) -> Result<BundleInfo, BundleReaderError> {
pub fn load_info<P: AsRef<Path>>(
path: P,
crypto: Arc<Mutex<Crypto>>,
) -> Result<BundleInfo, BundleReaderError> {
Self::load_header(path, crypto).map(|b| b.0)
}
#[inline]
pub fn load(path: PathBuf, crypto: Arc<Mutex<Crypto>>) -> Result<Self, BundleReaderError> {
let (header, version, content_start) = try!(Self::load_header(&path, crypto.clone()));
Ok(BundleReader::new(path, version, content_start, crypto, header))
Ok(BundleReader::new(
path,
version,
content_start,
crypto,
header
))
}
fn load_chunklist(&mut self) -> Result<(), BundleReaderError> {
debug!("Load bundle chunklist {} ({:?})", self.info.id, self.info.mode);
tr_debug!(
"Load bundle chunklist {} ({:?})",
self.info.id,
self.info.mode
);
let mut file = BufReader::new(try!(File::open(&self.path).context(&self.path as &Path)));
let len = self.info.chunk_list_size;
let start = self.content_start - len;
try!(file.seek(SeekFrom::Start(start as u64)).context(&self.path as &Path));
try!(file.seek(SeekFrom::Start(start as u64)).context(
&self.path as &Path
));
let mut chunk_data = Vec::with_capacity(len);
chunk_data.resize(self.info.chunk_list_size, 0);
try!(file.read_exact(&mut chunk_data).context(&self.path as &Path));
try!(file.read_exact(&mut chunk_data).context(
&self.path as &Path
));
if let Some(ref encryption) = self.info.encryption {
chunk_data = try!(self.crypto.lock().unwrap().decrypt(encryption, &chunk_data).context(&self.path as &Path));
chunk_data = try!(
self.crypto
.lock()
.unwrap()
.decrypt(encryption, &chunk_data)
.context(&self.path as &Path)
);
}
let chunks = ChunkList::read_from(&chunk_data);
let mut chunk_positions = Vec::with_capacity(chunks.len());
@ -154,22 +197,33 @@ impl BundleReader {
}
fn load_encoded_contents(&self) -> Result<Vec<u8>, BundleReaderError> {
debug!("Load bundle data {} ({:?})", self.info.id, self.info.mode);
tr_debug!("Load bundle data {} ({:?})", self.info.id, self.info.mode);
let mut file = BufReader::new(try!(File::open(&self.path).context(&self.path as &Path)));
try!(file.seek(SeekFrom::Start(self.content_start as u64)).context(&self.path as &Path));
let mut data = Vec::with_capacity(max(self.info.encoded_size, self.info.raw_size)+1024);
try!(
file.seek(SeekFrom::Start(self.content_start as u64))
.context(&self.path as &Path)
);
let mut data = Vec::with_capacity(max(self.info.encoded_size, self.info.raw_size) + 1024);
try!(file.read_to_end(&mut data).context(&self.path as &Path));
Ok(data)
}
fn decode_contents(&self, mut data: Vec<u8>) -> Result<Vec<u8>, BundleReaderError> {
if let Some(ref encryption) = self.info.encryption {
data = try!(self.crypto.lock().unwrap().decrypt(encryption, &data).context(&self.path as &Path));
data = try!(
self.crypto
.lock()
.unwrap()
.decrypt(encryption, &data)
.context(&self.path as &Path)
);
}
if let Some(ref compression) = self.info.compression {
let mut stream = try!(compression.decompress_stream().context(&self.path as &Path));
let mut buffer = Vec::with_capacity(self.info.raw_size);
try!(stream.process(&data, &mut buffer).context(&self.path as &Path));
try!(stream.process(&data, &mut buffer).context(
&self.path as &Path
));
try!(stream.finish(&mut buffer).context(&self.path as &Path));
data = buffer;
}
@ -178,12 +232,14 @@ impl BundleReader {
#[inline]
pub fn load_contents(&self) -> Result<Vec<u8>, BundleReaderError> {
self.load_encoded_contents().and_then(|data| self.decode_contents(data))
self.load_encoded_contents().and_then(|data| {
self.decode_contents(data)
})
}
pub fn get_chunk_position(&mut self, id: usize) -> Result<(usize, usize), BundleReaderError> {
if id >= self.info.chunk_count {
return Err(BundleReaderError::NoSuchChunk(self.id(), id))
return Err(BundleReaderError::NoSuchChunk(self.id(), id));
}
if self.chunks.is_none() || self.chunk_positions.is_none() {
try!(self.load_chunklist());
@ -198,40 +254,72 @@ impl BundleReader {
try!(self.load_chunklist());
}
if self.info.chunk_count != self.chunks.as_ref().unwrap().len() {
return Err(BundleReaderError::Integrity(self.id(),
"Chunk list size does not match chunk count"))
return Err(BundleReaderError::Integrity(
self.id(),
tr!("Chunk list size does not match chunk count")
));
}
if self.chunks.as_ref().unwrap().iter().map(|c| c.1 as usize).sum::<usize>() != self.info.raw_size {
return Err(BundleReaderError::Integrity(self.id(),
"Individual chunk sizes do not add up to total size"))
if self.chunks
.as_ref()
.unwrap()
.iter()
.map(|c| c.1 as usize)
.sum::<usize>() != self.info.raw_size
{
return Err(BundleReaderError::Integrity(
self.id(),
tr!("Individual chunk sizes do not add up to total size")
));
}
if !full {
let size = try!(fs::metadata(&self.path).context(&self.path as &Path)).len();
if size as usize != self.info.encoded_size + self.content_start {
return Err(BundleReaderError::Integrity(self.id(),
"File size does not match size in header, truncated file"))
return Err(BundleReaderError::Integrity(
self.id(),
tr!("File size does not match size in header, truncated file")
));
}
return Ok(())
return Ok(());
}
let encoded_contents = try!(self.load_encoded_contents());
if self.info.encoded_size != encoded_contents.len() {
return Err(BundleReaderError::Integrity(self.id(),
"Encoded data size does not match size in header, truncated bundle"))
return Err(BundleReaderError::Integrity(
self.id(),
tr!("Encoded data size does not match size in header, truncated bundle")
));
}
let contents = try!(self.decode_contents(encoded_contents));
if self.info.raw_size != contents.len() {
return Err(BundleReaderError::Integrity(self.id(),
"Raw data size does not match size in header, truncated bundle"))
return Err(BundleReaderError::Integrity(
self.id(),
tr!("Raw data size does not match size in header, truncated bundle")
));
}
let mut pos = 0;
for chunk in self.chunks.as_ref().unwrap().as_ref() {
let data = &contents[pos..pos+chunk.1 as usize];
if self.info.hash_method.hash(data) != chunk.0 {
return Err(BundleReaderError::Integrity(
self.id(),
tr!("Stored hash does not match hash in header, modified data")
));
}
pos += chunk.1 as usize;
}
//TODO: verify checksum
Ok(())
}
}
impl Debug for BundleReader {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "Bundle(\n\tid: {}\n\tpath: {:?}\n\tchunks: {}\n\tsize: {}, encoded: {}\n\tcompression: {:?}\n)",
self.info.id.to_string(), self.path, self.info.chunk_count, self.info.raw_size,
self.info.encoded_size, self.info.compression)
write!(fmt, "{}",
tr_format!("Bundle(\n\tid: {}\n\tpath: {:?}\n\tchunks: {}\n\tsize: {}, encoded: {}\n\tcompression: {:?}\n)",
self.info.id.to_string(),
self.path,
self.info.chunk_count,
self.info.raw_size,
self.info.encoded_size,
self.info.compression
))
}
}

View File

@ -1,4 +1,4 @@
use ::prelude::*;
use prelude::*;
use std::sync::atomic::{Ordering, AtomicBool, AtomicUsize};
use std::sync::{Mutex, Condvar, Arc};
@ -20,7 +20,7 @@ pub struct BundleUploader {
impl BundleUploader {
pub fn new(capacity: usize) -> Arc<Self> {
let self_ = Arc::new(BundleUploader {
capacity: capacity,
capacity,
error_present: AtomicBool::new(false),
error: Mutex::new(None),
waiting: AtomicUsize::new(0),
@ -28,7 +28,10 @@ impl BundleUploader {
wait: (Condvar::new(), Mutex::new(()))
});
let self2 = self_.clone();
thread::Builder::new().name("uploader".to_string()).spawn(move || self2.worker_thread()).unwrap();
thread::Builder::new()
.name("uploader".to_string())
.spawn(move || self2.worker_thread())
.unwrap();
self_
}
@ -48,10 +51,10 @@ impl BundleUploader {
pub fn queue(&self, local_path: PathBuf, remote_path: PathBuf) -> Result<(), BundleDbError> {
while self.waiting.load(Ordering::SeqCst) >= self.capacity {
debug!("Upload queue is full, waiting for slots");
tr_debug!("Upload queue is full, waiting for slots");
let _ = self.wait.0.wait(self.wait.1.lock().unwrap()).unwrap();
}
trace!("Adding to upload queue: {:?}", local_path);
tr_trace!("Adding to upload queue: {:?}", local_path);
if !self.error_present.load(Ordering::SeqCst) {
self.waiting.fetch_add(1, Ordering::SeqCst);
self.queue.push(Some((local_path, remote_path)));
@ -72,21 +75,21 @@ impl BundleUploader {
fn worker_thread_inner(&self) -> Result<(), BundleDbError> {
while let Some((src_path, dst_path)) = self.queue.pop() {
trace!("Uploading {:?} to {:?}", src_path, dst_path);
tr_trace!("Uploading {:?} to {:?}", src_path, dst_path);
self.waiting.fetch_sub(1, Ordering::SeqCst);
self.wait.0.notify_all();
let folder = dst_path.parent().unwrap();
try!(fs::create_dir_all(&folder).context(folder as &Path));
try!(fs::copy(&src_path, &dst_path).context(&dst_path as &Path));
try!(fs::remove_file(&src_path).context(&src_path as &Path));
debug!("Uploaded {:?} to {:?}", src_path, dst_path);
tr_debug!("Uploaded {:?} to {:?}", src_path, dst_path);
}
Ok(())
}
fn worker_thread(&self) {
if let Err(err) = self.worker_thread_inner() {
debug!("Upload thread failed with error: {}", err);
tr_debug!("Upload thread failed with error: {}", err);
*self.error.lock().unwrap() = Some(err);
self.error_present.store(true, Ordering::SeqCst);
}

View File

@ -1,4 +1,4 @@
use ::prelude::*;
use prelude::*;
use super::*;
use std::path::{Path, PathBuf};
@ -14,31 +14,31 @@ quick_error!{
pub enum BundleWriterError {
CompressionSetup(err: CompressionError) {
cause(err)
description("Failed to setup compression")
display("Bundle writer error: failed to setup compression\n\tcaused by: {}", err)
description(tr!("Failed to setup compression"))
display("{}", tr_format!("Bundle writer error: failed to setup compression\n\tcaused by: {}", err))
}
Compression(err: CompressionError) {
cause(err)
description("Failed to compress data")
display("Bundle writer error: failed to compress data\n\tcaused by: {}", err)
description(tr!("Failed to compress data"))
display("{}", tr_format!("Bundle writer error: failed to compress data\n\tcaused by: {}", err))
}
Encryption(err: EncryptionError) {
from()
cause(err)
description("Encryption failed")
display("Bundle writer error: failed to encrypt data\n\tcaused by: {}", err)
description(tr!("Encryption failed"))
display("{}", tr_format!("Bundle writer error: failed to encrypt data\n\tcaused by: {}", err))
}
Encode(err: msgpack::EncodeError, path: PathBuf) {
cause(err)
context(path: &'a Path, err: msgpack::EncodeError) -> (err, path.to_path_buf())
description("Failed to encode bundle header to file")
display("Bundle writer error: failed to encode bundle header to file {:?}\n\tcaused by: {}", path, err)
description(tr!("Failed to encode bundle header to file"))
display("{}", tr_format!("Bundle writer error: failed to encode bundle header to file {:?}\n\tcaused by: {}", path, err))
}
Write(err: io::Error, path: PathBuf) {
cause(err)
context(path: &'a Path, err: io::Error) -> (err, path.to_path_buf())
description("Failed to write data to file")
display("Bundle writer error: failed to write data to file {:?}\n\tcaused by: {}", path, err)
description(tr!("Failed to write data to file"))
display("{}", tr_format!("Bundle writer error: failed to write data to file {:?}\n\tcaused by: {}", path, err))
}
}
}
@ -54,23 +54,31 @@ pub struct BundleWriter {
crypto: Arc<Mutex<Crypto>>,
raw_size: usize,
chunk_count: usize,
chunks: ChunkList,
chunks: ChunkList
}
impl BundleWriter {
pub fn new(mode: BundleMode, hash_method: HashMethod, compression: Option<Compression>, encryption: Option<Encryption>, crypto: Arc<Mutex<Crypto>>) -> Result<Self, BundleWriterError> {
pub fn new(
mode: BundleMode,
hash_method: HashMethod,
compression: Option<Compression>,
encryption: Option<Encryption>,
crypto: Arc<Mutex<Crypto>>,
) -> Result<Self, BundleWriterError> {
let compression_stream = match compression {
Some(ref compression) => Some(try!(compression.compress_stream().map_err(BundleWriterError::CompressionSetup))),
None => None
Some(ref compression) => Some(try!(compression.compress_stream().map_err(
BundleWriterError::CompressionSetup
))),
None => None,
};
Ok(BundleWriter {
mode: mode,
hash_method: hash_method,
mode,
hash_method,
data: vec![],
compression: compression,
compression_stream: compression_stream,
encryption: encryption,
crypto: crypto,
compression,
compression_stream,
encryption,
crypto,
raw_size: 0,
chunk_count: 0,
chunks: ChunkList::new()
@ -79,19 +87,23 @@ impl BundleWriter {
pub fn add(&mut self, chunk: &[u8], hash: Hash) -> Result<usize, BundleWriterError> {
if let Some(ref mut stream) = self.compression_stream {
try!(stream.process(chunk, &mut self.data).map_err(BundleWriterError::Compression))
try!(stream.process(chunk, &mut self.data).map_err(
BundleWriterError::Compression
))
} else {
self.data.extend_from_slice(chunk)
}
self.raw_size += chunk.len();
self.chunk_count += 1;
self.chunks.push((hash, chunk.len() as u32));
Ok(self.chunk_count-1)
Ok(self.chunk_count - 1)
}
pub fn finish(mut self, db: &BundleDb) -> Result<StoredBundle, BundleWriterError> {
if let Some(stream) = self.compression_stream {
try!(stream.finish(&mut self.data).map_err(BundleWriterError::Compression))
try!(stream.finish(&mut self.data).map_err(
BundleWriterError::Compression
))
}
if let Some(ref encryption) = self.encryption {
self.data = try!(self.crypto.lock().unwrap().encrypt(encryption, &self.data));
@ -115,7 +127,7 @@ impl BundleWriter {
chunk_count: self.chunk_count,
id: id.clone(),
raw_size: self.raw_size,
encoded_size: encoded_size,
encoded_size,
chunk_list_size: chunk_data.len(),
timestamp: Local::now().timestamp()
};
@ -127,12 +139,19 @@ impl BundleWriter {
encryption: self.encryption,
info_size: info_data.len()
};
try!(msgpack::encode_to_stream(&header, &mut file).context(&path as &Path));
try!(msgpack::encode_to_stream(&header, &mut file).context(
&path as &Path
));
try!(file.write_all(&info_data).context(&path as &Path));
try!(file.write_all(&chunk_data).context(&path as &Path));
try!(file.write_all(&self.data).context(&path as &Path));
path = path.strip_prefix(db.layout.base_path()).unwrap().to_path_buf();
Ok(StoredBundle { path: path, info: info })
path = path.strip_prefix(db.layout.base_path())
.unwrap()
.to_path_buf();
Ok(StoredBundle {
path,
info
})
}
#[inline]

View File

@ -25,13 +25,15 @@ impl ChunkerType {
"rabin" => Ok(ChunkerType::Rabin((avg_size, seed as u32))),
"fastcdc" => Ok(ChunkerType::FastCdc((avg_size, seed))),
"fixed" => Ok(ChunkerType::Fixed(avg_size)),
_ => Err("Unsupported chunker type")
_ => Err(tr!("Unsupported chunker type")),
}
}
pub fn from_string(name: &str) -> Result<Self, &'static str> {
let (name, size) = if let Some(pos) = name.find('/') {
let size = try!(usize::from_str(&name[pos+1..]).map_err(|_| "Chunk size must be a number"));
let size = try!(usize::from_str(&name[pos + 1..]).map_err(
|_| tr!("Chunk size must be a number")
));
let name = &name[..pos];
(name, size)
} else {
@ -62,21 +64,23 @@ impl ChunkerType {
pub fn avg_size(&self) -> usize {
match *self {
ChunkerType::Ae(size) | ChunkerType::Fixed(size) => size,
ChunkerType::Ae(size) |
ChunkerType::Fixed(size) => size,
ChunkerType::Rabin((size, _seed)) => size,
ChunkerType::FastCdc((size, _seed)) => size
ChunkerType::FastCdc((size, _seed)) => size,
}
}
pub fn to_string(&self) -> String {
format!("{}/{}", self.name(), self.avg_size()/1024)
format!("{}/{}", self.name(), self.avg_size() / 1024)
}
pub fn seed(&self) -> u64 {
match *self {
ChunkerType::Ae(_size) | ChunkerType::Fixed(_size) => 0,
ChunkerType::Rabin((_size, seed)) => seed as u64,
ChunkerType::FastCdc((_size, seed)) => seed
ChunkerType::Ae(_size) |
ChunkerType::Fixed(_size) => 0,
ChunkerType::Rabin((_size, seed)) => u64::from(seed),
ChunkerType::FastCdc((_size, seed)) => seed,
}
}
}

View File

@ -7,7 +7,7 @@ use std::ptr;
pub struct AeChunker {
buffer: [u8; 4096],
buffer: [u8; 0x1000],
buffered: usize,
window_size: usize
}
@ -18,16 +18,16 @@ impl AeChunker {
//let window_size = (avg_size as f64 / (consts::E - 1.0)) as usize;
let window_size = avg_size - 256;
AeChunker{
buffer: [0; 4096],
buffer: [0; 0x1000],
buffered: 0,
window_size: window_size,
window_size,
}
}
}
impl Chunker for AeChunker {
#[allow(unknown_lints,explicit_counter_loop)]
fn chunk(&mut self, r: &mut Read, mut w: &mut Write) -> Result<ChunkerStatus, ChunkerError> {
fn chunk(&mut self, r: &mut Read, w: &mut Write) -> Result<ChunkerStatus, ChunkerError> {
let mut max;
let mut pos = 0;
let mut max_pos = 0;

View File

@ -1,8 +1,3 @@
#![feature(test)]
extern crate test;
extern crate chunking;
use chunking::*;
use std::io::{self, Write, Cursor};
@ -26,10 +21,22 @@ fn random_data(seed: u64, size: usize) -> Vec<u8> {
}
struct DevNull;
struct CutPositions(Vec<u64>, u64);
impl Write for DevNull {
impl CutPositions {
pub fn new() -> Self {
CutPositions(vec![], 0)
}
pub fn positions(&self) -> &[u64] {
&self.0
}
}
impl Write for CutPositions {
fn write(&mut self, data: &[u8]) -> Result<usize, io::Error> {
self.1 += data.len() as u64;
self.0.push(self.1);
Ok(data.len())
}
@ -53,7 +60,9 @@ fn test_fixed_8192(b: &mut Bencher) {
b.iter(|| {
let mut chunker = FixedChunker::new(8*1024);
let mut cursor = Cursor::new(&data);
while chunker.chunk(&mut cursor, &mut DevNull).unwrap() == ChunkerStatus::Continue {}
let mut sink = CutPositions::new();
while chunker.chunk(&mut cursor, &mut sink).unwrap() == ChunkerStatus::Continue {};
test::black_box(sink.positions().len())
})
}
@ -72,7 +81,9 @@ fn test_ae_8192(b: &mut Bencher) {
b.iter(|| {
let mut chunker = AeChunker::new(8*1024);
let mut cursor = Cursor::new(&data);
while chunker.chunk(&mut cursor, &mut DevNull).unwrap() == ChunkerStatus::Continue {}
let mut sink = CutPositions::new();
while chunker.chunk(&mut cursor, &mut sink).unwrap() == ChunkerStatus::Continue {};
test::black_box(sink.positions().len())
})
}
@ -91,7 +102,9 @@ fn test_rabin_8192(b: &mut Bencher) {
b.iter(|| {
let mut chunker = RabinChunker::new(8*1024, 0);
let mut cursor = Cursor::new(&data);
while chunker.chunk(&mut cursor, &mut DevNull).unwrap() == ChunkerStatus::Continue {}
let mut sink = CutPositions::new();
while chunker.chunk(&mut cursor, &mut sink).unwrap() == ChunkerStatus::Continue {};
test::black_box(sink.positions().len())
})
}
@ -110,6 +123,8 @@ fn test_fastcdc_8192(b: &mut Bencher) {
b.iter(|| {
let mut chunker = FastCdcChunker::new(8*1024, 0);
let mut cursor = Cursor::new(&data);
while chunker.chunk(&mut cursor, &mut DevNull).unwrap() == ChunkerStatus::Continue {}
let mut sink = CutPositions::new();
while chunker.chunk(&mut cursor, &mut sink).unwrap() == ChunkerStatus::Continue {};
test::black_box(sink.positions().len())
})
}

130
src/chunking/fastcdc.rs Normal file
View File

@ -0,0 +1,130 @@
use super::*;
use std::ptr;
use std::cmp;
// FastCDC
// Paper: "FastCDC: a Fast and Efficient Content-Defined Chunking Approach for Data Deduplication"
// Paper-URL: https://www.usenix.org/system/files/conference/atc16/atc16-paper-xia.pdf
// Presentation: https://www.usenix.org/sites/default/files/conference/protected-files/atc16_slides_xia.pdf
// Creating 256 pseudo-random values (based on Knuth's MMIX)
fn create_gear(seed: u64) -> [u64; 256] {
let mut table = [0u64; 256];
let a = 6_364_136_223_846_793_005;
let c = 1_442_695_040_888_963_407;
let mut v = seed;
for t in &mut table.iter_mut() {
v = v.wrapping_mul(a).wrapping_add(c);
*t = v;
}
table
}
fn get_masks(avg_size: usize, nc_level: usize, seed: u64) -> (u64, u64) {
let bits = (avg_size.next_power_of_two() - 1).count_ones();
if bits == 13 {
// From the paper
return (0x0003_5907_0353_0000, 0x0000_d900_0353_0000);
}
let mut mask = 0u64;
let mut v = seed;
let a = 6_364_136_223_846_793_005;
let c = 1_442_695_040_888_963_407;
while mask.count_ones() < bits - nc_level as u32 {
v = v.wrapping_mul(a).wrapping_add(c);
mask = (mask | 1).rotate_left(v as u32 & 0x3f);
}
let mask_long = mask;
while mask.count_ones() < bits + nc_level as u32 {
v = v.wrapping_mul(a).wrapping_add(c);
mask = (mask | 1).rotate_left(v as u32 & 0x3f);
}
let mask_short = mask;
(mask_short, mask_long)
}
pub struct FastCdcChunker {
buffer: [u8; 0x1000],
buffered: usize,
gear: [u64; 256],
min_size: usize,
max_size: usize,
avg_size: usize,
mask_long: u64,
mask_short: u64,
}
impl FastCdcChunker {
pub fn new(avg_size: usize, seed: u64) -> Self {
let (mask_short, mask_long) = get_masks(avg_size, 2, seed);
FastCdcChunker {
buffer: [0; 0x1000],
buffered: 0,
gear: create_gear(seed),
min_size: avg_size/4,
max_size: avg_size*8,
avg_size,
mask_long,
mask_short,
}
}
}
impl FastCdcChunker {
fn write_output(&mut self, w: &mut Write, pos: usize, max: usize) -> Result<ChunkerStatus, ChunkerError> {
debug_assert!(max <= self.buffer.len());
debug_assert!(pos <= self.buffer.len());
try!(w.write_all(&self.buffer[..pos]).map_err(ChunkerError::Write));
unsafe { ptr::copy(self.buffer[pos..].as_ptr(), self.buffer.as_mut_ptr(), max-pos) };
self.buffered = max-pos;
Ok(ChunkerStatus::Continue)
}
}
impl Chunker for FastCdcChunker {
#[allow(unknown_lints,explicit_counter_loop,needless_range_loop)]
fn chunk(&mut self, r: &mut Read, w: &mut Write) -> Result<ChunkerStatus, ChunkerError> {
let mut max;
let mut hash = 0u64;
let mut pos = 0;
loop {
// Fill the buffer, there might be some bytes still in there from last chunk
max = try!(r.read(&mut self.buffer[self.buffered..]).map_err(ChunkerError::Read)) + self.buffered;
// If nothing to do, finish
if max == 0 {
return Ok(ChunkerStatus::Finished)
}
let min_size_p = cmp::min(max, cmp::max(self.min_size as isize - pos as isize, 0) as usize);
let avg_size_p = cmp::min(max, cmp::max(self.avg_size as isize - pos as isize, 0) as usize);
let max_size_p = cmp::min(max, cmp::max(self.max_size as isize - pos as isize, 0) as usize);
// Skipping first min_size bytes. This is ok as same data still results in same hash.
if self.avg_size > pos {
for i in min_size_p..avg_size_p {
hash = (hash << 1).wrapping_add(self.gear[self.buffer[i] as usize]);
if hash & self.mask_short == 0 {
return self.write_output(w, i + 1, max);
}
}
}
if self.max_size > pos {
for i in avg_size_p..max_size_p {
hash = (hash << 1).wrapping_add(self.gear[self.buffer[i] as usize]);
if hash & self.mask_long == 0 {
return self.write_output(w, i+1, max);
}
}
}
if max + pos >= self.max_size {
return self.write_output(w, max_size_p, max);
}
pos += max;
try!(w.write_all(&self.buffer[..max]).map_err(ChunkerError::Write));
self.buffered = 0;
}
}
}

View File

@ -4,14 +4,14 @@ use std::cmp::min;
pub struct FixedChunker {
buffer: [u8; 4096],
buffer: [u8; 0x1000],
size: usize
}
impl FixedChunker {
pub fn new(avg_size: usize) -> FixedChunker {
FixedChunker{
buffer: [0; 4096],
buffer: [0; 0x1000],
size: avg_size,
}
}
@ -19,7 +19,7 @@ impl FixedChunker {
impl Chunker for FixedChunker {
#[allow(unknown_lints,explicit_counter_loop)]
fn chunk(&mut self, r: &mut Read, mut w: &mut Write) -> Result<ChunkerStatus, ChunkerError> {
fn chunk(&mut self, r: &mut Read, w: &mut Write) -> Result<ChunkerStatus, ChunkerError> {
let mut todo = self.size;
loop {
// Fill the buffer, there might be some bytes still in there from last chunk

View File

@ -1,11 +1,11 @@
#[macro_use] extern crate quick_error;
use std::io::{self, Write, Read};
mod fixed;
mod ae;
mod rabin;
mod fastcdc;
#[cfg(test)] mod test;
#[cfg(feature = "bench")] mod benches;
pub use self::fixed::FixedChunker;
pub use self::ae::AeChunker;
@ -25,18 +25,18 @@ quick_error!{
pub enum ChunkerError {
Read(err: io::Error) {
cause(err)
description("Failed to read input")
display("Chunker error: failed to read input\n\tcaused by: {}", err)
description(tr!("Failed to read input"))
display("{}", tr_format!("Chunker error: failed to read input\n\tcaused by: {}", err))
}
Write(err: io::Error) {
cause(err)
description("Failed to write to output")
display("Chunker error: failed to write to output\n\tcaused by: {}", err)
description(tr!("Failed to write to output"))
display("{}", tr_format!("Chunker error: failed to write to output\n\tcaused by: {}", err))
}
Custom(reason: &'static str) {
from()
description("Custom error")
display("Chunker error: {}", reason)
description(tr!("Custom error"))
display("{}", tr_format!("Chunker error: {}", reason))
}
}
}

View File

@ -34,7 +34,7 @@ fn create_table(alpha: u32, window_size: usize) -> [u32; 256] {
pub struct RabinChunker {
buffer: [u8; 4096],
buffer: [u8; 0x1000],
buffered: usize,
seed: u32,
alpha: u32,
@ -50,24 +50,24 @@ impl RabinChunker {
pub fn new(avg_size: usize, seed: u32) -> Self {
let chunk_mask = (avg_size as u32).next_power_of_two() - 1;
let window_size = avg_size/4-1;
let alpha = 1664525;//153191;
let alpha = 1_664_525;//153191;
RabinChunker {
buffer: [0; 4096],
buffer: [0; 0x1000],
buffered: 0,
table: create_table(alpha, window_size),
alpha: alpha,
seed: seed,
alpha,
seed,
min_size: avg_size/4,
max_size: avg_size*4,
window_size: window_size,
chunk_mask: chunk_mask,
window_size,
chunk_mask,
}
}
}
impl Chunker for RabinChunker {
#[allow(unknown_lints,explicit_counter_loop)]
fn chunk(&mut self, r: &mut Read, mut w: &mut Write) -> Result<ChunkerStatus, ChunkerError> {
fn chunk(&mut self, r: &mut Read, w: &mut Write) -> Result<ChunkerStatus, ChunkerError> {
let mut max;
let mut hash = 0u32;
let mut pos = 0;
@ -88,7 +88,7 @@ impl Chunker for RabinChunker {
return Ok(ChunkerStatus::Continue);
}
// Hash update
hash = hash.wrapping_mul(self.alpha).wrapping_add(val as u32);
hash = hash.wrapping_mul(self.alpha).wrapping_add(u32::from(val));
if pos >= self.window_size {
let take = window.pop_front().unwrap();
hash = hash.wrapping_sub(self.table[take as usize]);

View File

@ -1,6 +1,4 @@
extern crate chunking;
use chunking::*;
use super::*;
use std::io::Cursor;
@ -21,7 +19,7 @@ fn random_data(seed: u64, size: usize) -> Vec<u8> {
data
}
fn test_chunking(chunker: &mut Chunker, data: &[u8]) -> usize {
fn test_chunking(chunker: &mut Chunker, data: &[u8], chunk_lens: Option<&[usize]>) -> usize {
let mut cursor = Cursor::new(&data);
let mut chunks = vec![];
let mut chunk = vec![];
@ -36,6 +34,12 @@ fn test_chunking(chunker: &mut Chunker, data: &[u8]) -> usize {
assert_eq!(&data[pos..pos+chunk.len()], chunk as &[u8]);
pos += chunk.len();
}
if let Some(chunk_lens) = chunk_lens {
assert_eq!(chunk_lens.len(), chunks.len());
for (i, chunk) in chunks.iter().enumerate() {
assert_eq!(chunk.len(), chunk_lens[i]);
}
}
assert_eq!(pos, data.len());
chunks.len()
}
@ -43,10 +47,13 @@ fn test_chunking(chunker: &mut Chunker, data: &[u8]) -> usize {
#[test]
fn test_fixed() {
test_chunking(&mut FixedChunker::new(8192), &random_data(0, 128*1024),
Some(&[8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
8192, 8192, 8192, 8192, 8192, 8192, 0]));
let data = random_data(0, 10*1024*1024);
for n in &[1usize,2,4,8,16,32,64,128,256,512,1024] {
let mut chunker = FixedChunker::new(1024*n);
let len = test_chunking(&mut chunker, &data);
let len = test_chunking(&mut chunker, &data, None);
assert!(len >= data.len()/n/1024/4);
assert!(len <= data.len()/n/1024*4);
}
@ -54,10 +61,13 @@ fn test_fixed() {
#[test]
fn test_ae() {
test_chunking(&mut AeChunker::new(8192), &random_data(0, 128*1024),
Some(&[7979, 8046, 7979, 8192, 8192, 8192, 7965, 8158, 8404, 8241,
8011, 8302, 8120, 8335, 8192, 8192, 572]));
let data = random_data(0, 10*1024*1024);
for n in &[1usize,2,4,8,16,32,64,128,256,512,1024] {
let mut chunker = AeChunker::new(1024*n);
let len = test_chunking(&mut chunker, &data);
let len = test_chunking(&mut chunker, &data, None);
assert!(len >= data.len()/n/1024/4);
assert!(len <= data.len()/n/1024*4);
}
@ -65,10 +75,13 @@ fn test_ae() {
#[test]
fn test_rabin() {
test_chunking(&mut RabinChunker::new(8192, 0), &random_data(0, 128*1024),
Some(&[8604, 4190, 32769, 3680, 26732, 3152, 9947, 6487, 25439, 3944,
6128]));
let data = random_data(0, 10*1024*1024);
for n in &[1usize,2,4,8,16,32,64,128,256,512,1024] {
let mut chunker = RabinChunker::new(1024*n, 0);
let len = test_chunking(&mut chunker, &data);
let len = test_chunking(&mut chunker, &data, None);
assert!(len >= data.len()/n/1024/4);
assert!(len <= data.len()/n/1024*4);
}
@ -76,10 +89,13 @@ fn test_rabin() {
#[test]
fn test_fastcdc() {
test_chunking(&mut FastCdcChunker::new(8192, 0), &random_data(0, 128*1024),
Some(&[8712, 8018, 2847, 9157, 8997, 8581, 8867, 5422, 5412, 9478,
11553, 9206, 4606, 8529, 3821, 11342, 6524]));
let data = random_data(0, 10*1024*1024);
for n in &[1usize,2,4,8,16,32,64,128,256,512,1024] {
let mut chunker = FastCdcChunker::new(1024*n, 0);
let len = test_chunking(&mut chunker, &data);
let len = test_chunking(&mut chunker, &data, None);
assert!(len >= data.len()/n/1024/4);
assert!(len <= data.len()/n/1024*4);
}

View File

@ -1,4 +1,4 @@
use ::prelude::*;
use prelude::*;
use std::io::{self, Cursor, Read, Write};
use std::fs::File;
@ -41,51 +41,80 @@ fn chunk(data: &[u8], mut chunker: Box<Chunker>, sink: &mut ChunkSink) {
}
#[allow(dead_code)]
pub fn run(path: &str, bundle_size: usize, chunker: ChunkerType, compression: Option<Compression>, encrypt: bool,hash: HashMethod) {
pub fn run(
path: &str,
bundle_size: usize,
chunker: ChunkerType,
compression: Option<Compression>,
encrypt: bool,
hash: HashMethod,
) {
let mut total_write_time = 0.0;
let mut total_read_time = 0.0;
println!("Reading input file ...");
tr_println!("Reading input file ...");
let mut file = File::open(path).unwrap();
let total_size = file.metadata().unwrap().len();
let mut size = total_size;
let mut data = Vec::with_capacity(size as usize);
let read_time = Duration::span(|| {
file.read_to_end(&mut data).unwrap();
}).num_milliseconds() as f32 / 1_000.0;
println!("- {}, {}", to_duration(read_time), to_speed(size, read_time));
let read_time = Duration::span(|| { file.read_to_end(&mut data).unwrap(); })
.num_milliseconds() as f32 / 1_000.0;
println!(
"- {}, {}",
to_duration(read_time),
to_speed(size, read_time)
);
println!();
println!("Chunking data with {}, avg chunk size {} ...", chunker.name(), to_file_size(chunker.avg_size() as u64));
tr_println!(
"Chunking data with {}, avg chunk size {} ...",
chunker.name(),
to_file_size(chunker.avg_size() as u64)
);
let mut chunk_sink = ChunkSink {
chunks: Vec::with_capacity(2*size as usize/chunker.avg_size()),
chunks: Vec::with_capacity(2 * size as usize / chunker.avg_size()),
written: 0,
pos: 0
};
let chunker = chunker.create();
let chunk_time = Duration::span(|| {
chunk(&data, chunker, &mut chunk_sink)
}).num_milliseconds() as f32 / 1_000.0;
let chunk_time = Duration::span(|| chunk(&data, chunker, &mut chunk_sink))
.num_milliseconds() as f32 / 1_000.0;
total_write_time += chunk_time;
println!("- {}, {}", to_duration(chunk_time), to_speed(size, chunk_time));
println!(
"- {}, {}",
to_duration(chunk_time),
to_speed(size, chunk_time)
);
let mut chunks = chunk_sink.chunks;
assert_eq!(chunks.iter().map(|c| c.1).sum::<usize>(), size as usize);
let chunk_size_avg = size as f32 / chunks.len() as f32;
let chunk_size_stddev = (chunks.iter().map(|c| (c.1 as f32 - chunk_size_avg).powi(2)).sum::<f32>() / (chunks.len() as f32 - 1.0)).sqrt();
println!("- {} chunks, avg size: {} ±{}", chunks.len(), to_file_size(chunk_size_avg as u64), to_file_size(chunk_size_stddev as u64));
let chunk_size_stddev = (chunks
.iter()
.map(|c| (c.1 as f32 - chunk_size_avg).powi(2))
.sum::<f32>() /
(chunks.len() as f32 - 1.0))
.sqrt();
tr_println!(
"- {} chunks, avg size: {} ±{}",
chunks.len(),
to_file_size(chunk_size_avg as u64),
to_file_size(chunk_size_stddev as u64)
);
println!();
println!("Hashing chunks with {} ...", hash.name());
tr_println!("Hashing chunks with {} ...", hash.name());
let mut hashes = Vec::with_capacity(chunks.len());
let hash_time = Duration::span(|| {
for &(pos, len) in &chunks {
hashes.push(hash.hash(&data[pos..pos+len]))
}
let hash_time = Duration::span(|| for &(pos, len) in &chunks {
hashes.push(hash.hash(&data[pos..pos + len]))
}).num_milliseconds() as f32 / 1_000.0;
total_write_time += hash_time;
println!("- {}, {}", to_duration(hash_time), to_speed(size, hash_time));
println!(
"- {}, {}",
to_duration(hash_time),
to_speed(size, hash_time)
);
let mut seen_hashes = HashSet::with_capacity(hashes.len());
let mut dups = Vec::new();
for (i, hash) in hashes.into_iter().enumerate() {
@ -99,7 +128,12 @@ pub fn run(path: &str, bundle_size: usize, chunker: ChunkerType, compression: Op
let (_, len) = chunks.remove(*i);
dup_size += len;
}
println!("- {} duplicate chunks, {}, {:.1}% saved", dups.len(), to_file_size(dup_size as u64), dup_size as f32 / size as f32*100.0);
tr_println!(
"- {} duplicate chunks, {}, {:.1}% saved by internal deduplication",
dups.len(),
to_file_size(dup_size as u64),
dup_size as f32 / size as f32 * 100.0
);
size -= dup_size as u64;
let mut bundles = Vec::new();
@ -107,16 +141,16 @@ pub fn run(path: &str, bundle_size: usize, chunker: ChunkerType, compression: Op
if let Some(compression) = compression.clone() {
println!();
println!("Compressing chunks with {} ...", compression.to_string());
tr_println!("Compressing chunks with {} ...", compression.to_string());
let compress_time = Duration::span(|| {
let mut bundle = Vec::with_capacity(bundle_size + 2*chunk_size_avg as usize);
let mut bundle = Vec::with_capacity(bundle_size + 2 * chunk_size_avg as usize);
let mut c = compression.compress_stream().unwrap();
for &(pos, len) in &chunks {
c.process(&data[pos..pos+len], &mut bundle).unwrap();
c.process(&data[pos..pos + len], &mut bundle).unwrap();
if bundle.len() >= bundle_size {
c.finish(&mut bundle).unwrap();
bundles.push(bundle);
bundle = Vec::with_capacity(bundle_size + 2*chunk_size_avg as usize);
bundle = Vec::with_capacity(bundle_size + 2 * chunk_size_avg as usize);
c = compression.compress_stream().unwrap();
}
}
@ -124,17 +158,26 @@ pub fn run(path: &str, bundle_size: usize, chunker: ChunkerType, compression: Op
bundles.push(bundle);
}).num_milliseconds() as f32 / 1_000.0;
total_write_time += compress_time;
println!("- {}, {}", to_duration(compress_time), to_speed(size, compress_time));
println!(
"- {}, {}",
to_duration(compress_time),
to_speed(size, compress_time)
);
let compressed_size = bundles.iter().map(|b| b.len()).sum::<usize>();
println!("- {} bundles, {}, {:.1}% saved", bundles.len(), to_file_size(compressed_size as u64), (size as f32 - compressed_size as f32)/size as f32*100.0);
tr_println!(
"- {} bundles, {}, {:.1}% saved",
bundles.len(),
to_file_size(compressed_size as u64),
(size as f32 - compressed_size as f32) / size as f32 * 100.0
);
size = compressed_size as u64;
} else {
let mut bundle = Vec::with_capacity(bundle_size + 2*chunk_size_avg as usize);
let mut bundle = Vec::with_capacity(bundle_size + 2 * chunk_size_avg as usize);
for &(pos, len) in &chunks {
bundle.extend_from_slice(&data[pos..pos+len]);
bundle.extend_from_slice(&data[pos..pos + len]);
if bundle.len() >= bundle_size {
bundles.push(bundle);
bundle = Vec::with_capacity(bundle_size + 2*chunk_size_avg as usize);
bundle = Vec::with_capacity(bundle_size + 2 * chunk_size_avg as usize);
}
}
bundles.push(bundle);
@ -148,49 +191,70 @@ pub fn run(path: &str, bundle_size: usize, chunker: ChunkerType, compression: Op
crypto.add_secret_key(public, secret);
let encryption = (EncryptionMethod::Sodium, public[..].to_vec().into());
println!("Encrypting bundles...");
tr_println!("Encrypting bundles...");
let mut encrypted_bundles = Vec::with_capacity(bundles.len());
let encrypt_time = Duration::span(|| {
for bundle in bundles {
encrypted_bundles.push(crypto.encrypt(&encryption, &bundle).unwrap());
}
let encrypt_time = Duration::span(|| for bundle in bundles {
encrypted_bundles.push(crypto.encrypt(&encryption, &bundle).unwrap());
}).num_milliseconds() as f32 / 1_000.0;
println!("- {}, {}", to_duration(encrypt_time), to_speed(size, encrypt_time));
println!(
"- {}, {}",
to_duration(encrypt_time),
to_speed(size, encrypt_time)
);
total_write_time += encrypt_time;
println!();
println!("Decrypting bundles...");
tr_println!("Decrypting bundles...");
bundles = Vec::with_capacity(encrypted_bundles.len());
let decrypt_time = Duration::span(|| {
for bundle in encrypted_bundles {
bundles.push(crypto.decrypt(&encryption, &bundle).unwrap());
}
let decrypt_time = Duration::span(|| for bundle in encrypted_bundles {
bundles.push(crypto.decrypt(&encryption, &bundle).unwrap());
}).num_milliseconds() as f32 / 1_000.0;
println!("- {}, {}", to_duration(decrypt_time), to_speed(size, decrypt_time));
println!(
"- {}, {}",
to_duration(decrypt_time),
to_speed(size, decrypt_time)
);
total_read_time += decrypt_time;
}
if let Some(compression) = compression {
println!();
println!("Decompressing bundles with {} ...", compression.to_string());
let mut dummy = ChunkSink { chunks: vec![], written: 0, pos: 0 };
let decompress_time = Duration::span(|| {
for bundle in &bundles {
let mut c = compression.decompress_stream().unwrap();
c.process(bundle, &mut dummy).unwrap();
c.finish(&mut dummy).unwrap();
}
tr_println!("Decompressing bundles with {} ...", compression.to_string());
let mut dummy = ChunkSink {
chunks: vec![],
written: 0,
pos: 0
};
let decompress_time = Duration::span(|| for bundle in &bundles {
let mut c = compression.decompress_stream().unwrap();
c.process(bundle, &mut dummy).unwrap();
c.finish(&mut dummy).unwrap();
}).num_milliseconds() as f32 / 1_000.0;
println!("- {}, {}", to_duration(decompress_time), to_speed(size, decompress_time));
println!(
"- {}, {}",
to_duration(decompress_time),
to_speed(total_size - dup_size as u64, decompress_time)
);
total_read_time += decompress_time;
}
println!();
println!("Total storage size: {} / {}, ratio: {:.1}%", to_file_size(size as u64), to_file_size(total_size as u64), size as f32/total_size as f32*100.0);
println!("Total processing speed: {}", to_speed(total_size, total_write_time));
println!("Total read speed: {}", to_speed(total_size, total_read_time));
tr_println!(
"Total storage size: {} / {}, ratio: {:.1}%",
to_file_size(size as u64),
to_file_size(total_size as u64),
size as f32 / total_size as f32 * 100.0
);
tr_println!(
"Total processing speed: {}",
to_speed(total_size, total_write_time)
);
tr_println!(
"Total read speed: {}",
to_speed(total_size, total_read_time)
);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,40 +1,45 @@
use log::{self, LogRecord, LogLevel, LogMetadata};
use log;
pub use log::SetLoggerError;
use ansi_term::{Color, Style};
use std::io::Write;
macro_rules! println_stderr(
($($arg:tt)*) => { {
let r = writeln!(&mut ::std::io::stderr(), $($arg)*);
r.expect("failed printing to stderr");
} }
);
struct Logger(LogLevel);
struct Logger(log::Level);
impl log::Log for Logger {
fn enabled(&self, metadata: &LogMetadata) -> bool {
fn enabled(&self, metadata: &log::Metadata) -> bool {
metadata.level() <= self.0
}
fn log(&self, record: &LogRecord) {
fn flush(&self) {}
fn log(&self, record: &log::Record) {
if self.enabled(record.metadata()) {
match record.level() {
LogLevel::Error => println_stderr!("{}: {}", Color::Red.bold().paint("error"), record.args()),
LogLevel::Warn => println_stderr!("{}: {}", Color::Yellow.bold().paint("warning"), record.args()),
LogLevel::Info => println_stderr!("{}: {}", Color::Green.bold().paint("info"), record.args()),
LogLevel::Debug => println_stderr!("{}: {}", Style::new().bold().paint("debug"), record.args()),
LogLevel::Trace => println_stderr!("{}: {}", "trace", record.args())
log::Level::Error => {
eprintln!("{}: {}", Color::Red.bold().paint("error"), record.args())
}
log::Level::Warn => {
eprintln!(
"{}: {}",
Color::Yellow.bold().paint("warning"),
record.args()
)
}
log::Level::Info => {
eprintln!("{}: {}", Color::Green.bold().paint("info"), record.args())
}
log::Level::Debug => {
eprintln!("{}: {}", Style::new().bold().paint("debug"), record.args())
}
log::Level::Trace => eprintln!("{}: {}", "trace", record.args()),
}
}
}
}
pub fn init(level: LogLevel) -> Result<(), SetLoggerError> {
log::set_logger(|max_log_level| {
max_log_level.set(level.to_log_level_filter());
Box::new(Logger(level))
})
pub fn init(level: log::Level) -> Result<(), SetLoggerError> {
let logger = Logger(level);
log::set_max_level(level.to_level_filter());
log::set_boxed_logger(Box::new(logger))
}

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,3 @@
extern crate mmap;
#[macro_use] extern crate quick_error;
use std::path::Path;
use std::fs::{File, OpenOptions};
use std::mem;
@ -11,6 +8,7 @@ use std::os::unix::io::AsRawFd;
use mmap::{MemoryMap, MapOption, MapError};
use ::prelude::*;
pub const MAX_USAGE: f64 = 0.9;
pub const MIN_USAGE: f64 = 0.35;
@ -23,30 +21,30 @@ quick_error!{
Io(err: io::Error) {
from()
cause(err)
description("Failed to open index file")
display("Index error: failed to open the index file\n\tcaused by: {}", err)
description(tr!("Failed to open index file"))
display("{}", tr_format!("Index error: failed to open the index file\n\tcaused by: {}", err))
}
Mmap(err: MapError) {
from()
cause(err)
description("Failed to memory-map the index file")
display("Index error: failed to memory-map the index file\n\tcaused by: {}", err)
description(tr!("Failed to memory-map the index file"))
display("{}", tr_format!("Index error: failed to memory-map the index file\n\tcaused by: {}", err))
}
WrongMagic {
description("Wrong header")
display("Index error: file has the wrong magic header")
description(tr!("Wrong header"))
display("{}", tr!("Index error: file has the wrong magic header"))
}
UnsupportedVersion(version: u8) {
description("Unsupported version")
display("Index error: index file has unsupported version: {}", version)
description(tr!("Unsupported version"))
display("{}", tr_format!("Index error: index file has unsupported version: {}", version))
}
WrongPosition(should: usize, is: LocateResult) {
description("Key at wrong position")
display("Index error: key has wrong position, expected at: {}, but is at: {:?}", should, is)
description(tr!("Key at wrong position"))
display("{}", tr_format!("Index error: key has wrong position, expected at: {}, but is at: {:?}", should, is))
}
WrongEntryCount(header: usize, actual: usize) {
description("Wrong entry count")
display("Index error: index has wrong entry count, expected {}, but is {}", header, actual)
description(tr!("Wrong entry count"))
display("{}", tr_format!("Index error: index has wrong entry count, expected {}, but is {}", header, actual))
}
}
}
@ -61,35 +59,66 @@ pub struct Header {
}
pub trait Key: Clone + Eq + Copy + Default {
pub trait Key: Eq + Copy + Default {
fn hash(&self) -> u64;
fn is_used(&self) -> bool;
fn clear(&mut self);
}
pub trait Value: Clone + Copy + Default {}
pub trait Value: Copy + Default {}
#[repr(packed)]
#[derive(Clone, Default)]
#[derive(Default)]
pub struct Entry<K, V> {
pub key: K,
pub data: V
key: K,
data: V
}
impl<K: Key, V> Entry<K, V> {
#[inline]
fn is_used(&self) -> bool {
self.key.is_used()
unsafe { self.key.is_used() }
}
#[inline]
fn clear(&mut self) {
self.key.clear()
unsafe { self.key.clear() }
}
#[inline]
fn get(&self) -> (&K, &V) {
unsafe { (&self.key, &self.data) }
}
#[inline]
fn get_mut(&mut self) -> (&K, &mut V) {
unsafe { (&self.key, &mut self.data) }
}
#[inline]
fn get_key(&self) -> &K {
unsafe { &self.key }
}
#[inline]
fn get_mut_key(&mut self) -> &mut K {
unsafe { &mut self.key }
}
#[inline]
fn get_data(&self) -> &V {
unsafe { &self.data }
}
#[inline]
fn get_mut_data(&mut self) -> &mut V {
unsafe { &mut self.data }
}
}
#[derive(Debug)]
pub enum LocateResult {
Found(usize), // Found the key at this position
@ -106,13 +135,14 @@ impl<'a, K: Key, V> Iterator for Iter<'a, K, V> {
while let Some((first, rest)) = self.0.split_first() {
self.0 = rest;
if first.is_used() {
return Some((&first.key, &first.data));
return Some(first.get())
}
}
None
}
}
#[allow(dead_code)]
pub struct IterMut<'a, K: 'static, V: 'static> (&'a mut [Entry<K, V>]);
impl<'a, K: Key, V> Iterator for IterMut<'a, K, V> {
@ -125,7 +155,7 @@ impl<'a, K: Key, V> Iterator for IterMut<'a, K, V> {
Some((first, rest)) => {
self.0 = rest;
if first.is_used() {
return Some((&first.key, &mut first.data))
return Some(first.get_mut())
}
}
}
@ -137,7 +167,7 @@ impl<'a, K: Key, V> Iterator for IterMut<'a, K, V> {
/// This method is unsafe as it potentially creates references to uninitialized memory
unsafe fn mmap_as_ref<K, V>(mmap: &MemoryMap, len: usize) -> (&'static mut Header, &'static mut [Entry<K, V>]) {
if mmap.len() < mem::size_of::<Header>() + len * mem::size_of::<Entry<K, V>>() {
panic!("Memory map too small");
tr_panic!("Memory map too small");
}
let header = &mut *(mmap.data() as *mut Header);
let ptr = mmap.data().offset(mem::size_of::<Header>() as isize) as *mut Entry<K, V>;
@ -192,12 +222,12 @@ impl<K: Key, V: Value> Index<K, V> {
max_entries: (header.capacity as f64 * MAX_USAGE) as usize,
min_entries: (header.capacity as f64 * MIN_USAGE) as usize,
entries: header.entries as usize,
fd: fd,
mmap: mmap,
data: data,
header: header
fd,
mmap,
data,
header
};
debug_assert!(index.check().is_ok(), "Inconsistent after creation");
debug_assert!(index.check().is_ok(), tr!("Inconsistent after creation"));
Ok(index)
}
@ -238,6 +268,7 @@ impl<K: Key, V: Value> Index<K, V> {
self.max_entries = (capacity as f64 * MAX_USAGE) as usize;
}
#[allow(redundant_field_names)]
fn reinsert(&mut self, start: usize, end: usize) -> Result<(), IndexError> {
for pos in start..end {
let key;
@ -302,7 +333,7 @@ impl<K: Key, V: Value> Index<K, V> {
continue;
}
entries += 1;
match self.locate(&entry.key) {
match self.locate(entry.get_key()) {
LocateResult::Found(p) if p == pos => true,
found => return Err(IndexError::WrongPosition(pos, found))
};
@ -335,6 +366,11 @@ impl<K: Key, V: Value> Index<K, V> {
self.header.capacity = self.capacity as u64;
}
#[inline]
fn get_displacement(&self, entry: &Entry<K, V>, pos: usize) -> usize {
(pos + self.capacity - (entry.get_key().hash() as usize & self.mask)) & self.mask
}
/// Finds the position for this key
/// If the key is in the table, it will be the position of the key,
/// otherwise it will be the position where this key should be inserted
@ -346,10 +382,10 @@ impl<K: Key, V: Value> Index<K, V> {
if !entry.is_used() {
return LocateResult::Hole(pos);
}
if entry.key == *key {
if entry.get_key() == key {
return LocateResult::Found(pos);
}
let odist = (pos + self.capacity - (entry.key.hash() as usize & self.mask)) & self.mask;
let odist = self.get_displacement(entry, pos);
if dist > odist {
return LocateResult::Steal(pos);
}
@ -372,12 +408,12 @@ impl<K: Key, V: Value> Index<K, V> {
// we found a hole, stop shifting here
break;
}
if entry.key.hash() as usize & self.mask == pos {
if (entry.get_key().hash() as usize & self.mask) == pos {
// we found an entry at the right position, stop shifting here
break;
}
}
self.data[last_pos] = self.data[pos].clone();
self.data.swap(last_pos, pos);
}
self.data[last_pos].clear();
}
@ -388,7 +424,7 @@ impl<K: Key, V: Value> Index<K, V> {
match self.locate(key) {
LocateResult::Found(pos) => {
let mut old = *data;
mem::swap(&mut old, &mut self.data[pos].data);
mem::swap(&mut old, self.data[pos].get_mut_data());
Ok(Some(old))
},
LocateResult::Hole(pos) => {
@ -415,8 +451,8 @@ impl<K: Key, V: Value> Index<K, V> {
cur_pos = (cur_pos + 1) & self.mask;
let entry = &mut self.data[cur_pos];
if entry.is_used() {
mem::swap(&mut stolen_key, &mut entry.key);
mem::swap(&mut stolen_data, &mut entry.data);
mem::swap(&mut stolen_key, entry.get_mut_key());
mem::swap(&mut stolen_data, entry.get_mut_data());
} else {
entry.key = stolen_key;
entry.data = stolen_data;
@ -431,7 +467,7 @@ impl<K: Key, V: Value> Index<K, V> {
#[inline]
pub fn contains(&self, key: &K) -> bool {
debug_assert!(self.check().is_ok(), "Inconsistent before get");
debug_assert!(self.check().is_ok(), tr!("Inconsistent before get"));
match self.locate(key) {
LocateResult::Found(_) => true,
_ => false
@ -440,7 +476,7 @@ impl<K: Key, V: Value> Index<K, V> {
#[inline]
pub fn pos(&self, key: &K) -> Option<usize> {
debug_assert!(self.check().is_ok(), "Inconsistent before get");
debug_assert!(self.check().is_ok(), tr!("Inconsistent before get"));
match self.locate(key) {
LocateResult::Found(pos) => Some(pos),
_ => None
@ -449,7 +485,7 @@ impl<K: Key, V: Value> Index<K, V> {
#[inline]
pub fn get(&self, key: &K) -> Option<V> {
debug_assert!(self.check().is_ok(), "Inconsistent before get");
debug_assert!(self.check().is_ok(), tr!("Inconsistent before get"));
match self.locate(key) {
LocateResult::Found(pos) => Some(self.data[pos].data),
_ => None
@ -457,11 +493,12 @@ impl<K: Key, V: Value> Index<K, V> {
}
#[inline]
#[allow(dead_code)]
pub fn modify<F>(&mut self, key: &K, mut f: F) -> bool where F: FnMut(&mut V) {
debug_assert!(self.check().is_ok(), "Inconsistent before get");
debug_assert!(self.check().is_ok(), tr!("Inconsistent before get"));
match self.locate(key) {
LocateResult::Found(pos) => {
f(&mut self.data[pos].data);
f(self.data[pos].get_mut_data());
true
},
_ => false
@ -487,7 +524,7 @@ impl<K: Key, V: Value> Index<K, V> {
while pos < self.capacity {
{
let entry = &mut self.data[pos];
if !entry.is_used() || f(&entry.key, &entry.data) {
if !entry.is_used() || f(entry.get_key(), entry.get_data()) {
pos += 1;
continue;
}
@ -507,6 +544,7 @@ impl<K: Key, V: Value> Index<K, V> {
}
#[inline]
#[allow(dead_code)]
pub fn iter_mut(&mut self) -> IterMut<K, V> {
IterMut(self.data)
}
@ -522,6 +560,7 @@ impl<K: Key, V: Value> Index<K, V> {
}
#[inline]
#[allow(dead_code)]
pub fn is_empty(&self) -> bool {
self.entries == 0
}
@ -538,4 +577,26 @@ impl<K: Key, V: Value> Index<K, V> {
}
self.entries = 0;
}
#[allow(dead_code)]
pub fn statistics(&self) -> IndexStatistics {
IndexStatistics {
count: self.entries,
capacity: self.capacity,
size: self.size(),
displacement: ValueStats::from_iter(|| self.data.iter().enumerate().filter(
|&(_, entry)| entry.is_used()).map(
|(index, entry)| self.get_displacement(entry, index) as f32))
}
}
}
#[derive(Debug)]
pub struct IndexStatistics {
pub count: usize,
pub capacity: usize,
pub size: usize,
pub displacement: ValueStats
}

View File

@ -1,24 +1,32 @@
#![recursion_limit="128"]
#![allow(unknown_lints, float_cmp)]
#![cfg_attr(feature = "bench", feature(test))]
#[cfg(feature = "bench")]
extern crate test;
extern crate serde;
extern crate serde_bytes;
extern crate rmp_serde;
#[macro_use] extern crate serde_utils;
#[macro_use]
extern crate serde_utils;
extern crate squash_sys as squash;
extern crate blake2_rfc as blake2;
extern crate murmurhash3;
extern crate serde_yaml;
#[macro_use] extern crate quick_error;
#[macro_use]
extern crate quick_error;
extern crate chrono;
#[macro_use] extern crate clap;
#[macro_use] extern crate log;
#[macro_use]
extern crate clap;
#[macro_use]
extern crate log;
extern crate byteorder;
extern crate sodiumoxide;
extern crate libsodium_sys;
extern crate ansi_term;
extern crate filetime;
extern crate regex;
#[macro_use] extern crate lazy_static;
#[macro_use]
extern crate lazy_static;
extern crate fuse;
extern crate rand;
extern crate time;
@ -28,9 +36,12 @@ extern crate pbr;
extern crate users;
extern crate libc;
extern crate tar;
extern crate index;
extern crate chunking;
#[macro_use]
extern crate runtime_fmt;
extern crate locale_config;
extern crate mmap;
#[macro_use] mod translation;
pub mod util;
mod bundledb;
mod repository;
@ -38,12 +49,14 @@ mod cli;
mod prelude;
mod mount;
mod chunker;
mod chunking;
mod index;
use std::process::exit;
fn main() {
match cli::run() {
Ok(()) => exit(0),
Err(code) => exit(code.code())
Err(code) => exit(code.code()),
}
}

View File

@ -1,4 +1,4 @@
use ::prelude::*;
use prelude::*;
use std::path::Path;
use std::ffi::OsStr;
@ -69,6 +69,9 @@ fn convert_file_type(kind: FileType) -> fuse::FileType {
FileType::Directory => fuse::FileType::Directory,
FileType::File => fuse::FileType::RegularFile,
FileType::Symlink => fuse::FileType::Symlink,
FileType::BlockDevice => fuse::FileType::BlockDevice,
FileType::CharDevice => fuse::FileType::CharDevice,
FileType::NamedPipe => fuse::FileType::NamedPipe,
}
}
@ -110,18 +113,21 @@ impl FuseInode {
kind: convert_file_type(self.inode.file_type),
perm: self.inode.mode as u16,
nlink: 1,
uid: uid,
gid: gid,
rdev: 0,
uid,
gid,
rdev: self.inode.device.map_or(
0,
|(major, minor)| (major << 8) + minor
),
flags: 0
}
}
pub fn dir_list(&self) -> Option<Vec<(u64, fuse::FileType, String)>> {
if self.inode.file_type != FileType::Directory {
return None
return None;
}
let mut list = Vec::with_capacity(self.children.len()+2);
let mut list = Vec::with_capacity(self.children.len() + 2);
list.push((self.num, fuse::FileType::Directory, ".".to_string()));
if let Some(ref parent) = self.parent {
let parent = parent.borrow();
@ -131,7 +137,11 @@ impl FuseInode {
}
for ch in self.children.values() {
let child = ch.borrow();
list.push((child.num, convert_file_type(child.inode.file_type), child.inode.name.clone()));
list.push((
child.num,
convert_file_type(child.inode.file_type),
child.inode.name.clone()
));
}
Some(list)
}
@ -148,16 +158,19 @@ impl<'a> FuseFilesystem<'a> {
pub fn new(repository: &'a mut Repository) -> Result<Self, RepositoryError> {
Ok(FuseFilesystem {
next_id: 1,
repository: repository,
repository,
inodes: HashMap::new()
})
}
pub fn from_repository(repository: &'a mut Repository, path: Option<&str>) -> Result<Self, RepositoryError> {
pub fn from_repository(
repository: &'a mut Repository,
path: Option<&str>,
) -> Result<Self, RepositoryError> {
let mut backups = vec![];
let backup_map = match path {
Some(path) => try!(repository.get_backups(path)),
None => try!(repository.get_all_backups())
None => try!(repository.get_all_backups()),
};
for (name, backup) in backup_map {
let inode = try!(repository.get_inode(&backup.root));
@ -170,7 +183,7 @@ impl<'a> FuseFilesystem<'a> {
for part in name.split('/') {
parent = match fs.get_child(&parent, part).unwrap() {
Some(child) => child,
None => fs.add_virtual_directory(part.to_string(), Some(parent))
None => fs.add_virtual_directory(part.to_string(), Some(parent)),
};
}
let mut parent_mut = parent.borrow_mut();
@ -182,30 +195,52 @@ impl<'a> FuseFilesystem<'a> {
Ok(fs)
}
pub fn from_backup(repository: &'a mut Repository, backup: Backup) -> Result<Self, RepositoryError> {
pub fn from_backup(
repository: &'a mut Repository,
backup: Backup,
) -> Result<Self, RepositoryError> {
let inode = try!(repository.get_inode(&backup.root));
let mut fs = try!(FuseFilesystem::new(repository));
fs.add_inode(inode, None, backup.user_names, backup.group_names);
Ok(fs)
}
pub fn from_inode(repository: &'a mut Repository, backup: Backup, inode: Inode) -> Result<Self, RepositoryError> {
pub fn from_inode(
repository: &'a mut Repository,
backup: Backup,
inode: Inode,
) -> Result<Self, RepositoryError> {
let mut fs = try!(FuseFilesystem::new(repository));
fs.add_inode(inode, None, backup.user_names, backup.group_names);
Ok(fs)
}
pub fn add_virtual_directory(&mut self, name: String, parent: Option<FuseInodeRef>) -> FuseInodeRef {
self.add_inode(Inode {
name: name,
file_type: FileType::Directory,
..Default::default()
}, parent, HashMap::default(), HashMap::default())
pub fn add_virtual_directory(
&mut self,
name: String,
parent: Option<FuseInodeRef>,
) -> FuseInodeRef {
self.add_inode(
Inode {
name,
file_type: FileType::Directory,
..Default::default()
},
parent,
HashMap::default(),
HashMap::default()
)
}
pub fn add_inode(&mut self, inode: Inode, parent: Option<FuseInodeRef>, user_names: HashMap<u32, String>, group_names: HashMap<u32, String>) -> FuseInodeRef {
pub fn add_inode(
&mut self,
inode: Inode,
parent: Option<FuseInodeRef>,
user_names: HashMap<u32, String>,
group_names: HashMap<u32, String>,
) -> FuseInodeRef {
let inode = FuseInode {
inode: inode,
inode,
num: self.next_id,
parent: parent.clone(),
chunks: None,
@ -225,22 +260,31 @@ impl<'a> FuseFilesystem<'a> {
}
pub fn mount<P: AsRef<Path>>(self, mountpoint: P) -> Result<(), RepositoryError> {
Ok(try!(fuse::mount(self, &mountpoint, &[
OsStr::new("default_permissions"),
OsStr::new("kernel_cache"),
OsStr::new("auto_cache"),
OsStr::new("readonly")
])))
try!(fuse::mount(
self,
&mountpoint,
&[
OsStr::new("default_permissions"),
OsStr::new("kernel_cache"),
OsStr::new("auto_cache"),
OsStr::new("readonly"),
]
));
Ok(())
}
pub fn get_inode(&mut self, num: u64) -> Option<FuseInodeRef> {
self.inodes.get(&num).cloned()
}
pub fn get_child(&mut self, parent: &FuseInodeRef, name: &str) -> Result<Option<FuseInodeRef>, RepositoryError> {
pub fn get_child(
&mut self,
parent: &FuseInodeRef,
name: &str,
) -> Result<Option<FuseInodeRef>, RepositoryError> {
let mut parent_mut = parent.borrow_mut();
if let Some(child) = parent_mut.children.get(name) {
return Ok(Some(child.clone()))
return Ok(Some(child.clone()));
}
let child;
if let Some(chunks) = parent_mut.inode.children.as_ref().and_then(|c| c.get(name)) {
@ -255,9 +299,9 @@ impl<'a> FuseFilesystem<'a> {
name_cache: parent_mut.name_cache.clone()
}));
self.inodes.insert(self.next_id, child.clone());
self.next_id +=1;
self.next_id += 1;
} else {
return Ok(None)
return Ok(None);
}
parent_mut.children.insert(name.to_string(), child.clone());
Ok(Some(child))
@ -281,7 +325,7 @@ impl<'a> FuseFilesystem<'a> {
name_cache: parent_mut.name_cache.clone()
}));
self.inodes.insert(self.next_id, child.clone());
self.next_id +=1;
self.next_id += 1;
parent_children.insert(name.clone(), child);
}
}
@ -294,10 +338,11 @@ impl<'a> FuseFilesystem<'a> {
let mut inode = inode.borrow_mut();
let mut chunks = None;
match inode.inode.data {
None | Some(FileData::Inline(_)) => (),
None |
Some(FileData::Inline(_)) => (),
Some(FileData::ChunkedDirect(ref c)) => {
chunks = Some(c.clone());
},
}
Some(FileData::ChunkedIndirect(ref c)) => {
let chunk_data = try!(self.repository.get_data(c));
chunks = Some(ChunkList::read_from(&chunk_data));
@ -310,9 +355,8 @@ impl<'a> FuseFilesystem<'a> {
impl<'a> fuse::Filesystem for FuseFilesystem<'a> {
/// Look up a directory entry by name and get its attributes.
fn lookup (&mut self, _req: &fuse::Request, parent: u64, name: &OsStr, reply: fuse::ReplyEntry) {
fn lookup(&mut self, _req: &fuse::Request, parent: u64, name: &OsStr, reply: fuse::ReplyEntry) {
let sname = str!(name, reply);
let parent = inode!(self, parent, reply);
let child = lookup!(self, &parent, sname, reply);
@ -321,7 +365,7 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> {
reply.entry(&ttl, &attrs, 0)
}
fn destroy (&mut self, _req: &fuse::Request) {
fn destroy(&mut self, _req: &fuse::Request) {
info!("destroy");
}
@ -332,66 +376,131 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> {
/// each forget. The filesystem may ignore forget calls, if the inodes don't need to
/// have a limited lifetime. On unmount it is not guaranteed, that all referenced
/// inodes will receive a forget message.
fn forget (&mut self, _req: &fuse::Request, ino: u64, _nlookup: u64) {
fn forget(&mut self, _req: &fuse::Request, ino: u64, _nlookup: u64) {
info!("forget {:?}", ino);
//self.fs.forget(ino).unwrap();
}
/// Get file attributes
fn getattr (&mut self, _req: &fuse::Request, ino: u64, reply: fuse::ReplyAttr) {
fn getattr(&mut self, _req: &fuse::Request, ino: u64, reply: fuse::ReplyAttr) {
let inode = inode!(self, ino, reply);
let ttl = Timespec::new(60, 0);
reply.attr(&ttl, &inode.borrow().to_attrs());
}
/// Set file attributes
fn setattr (&mut self, _req: &fuse::Request, _ino: u64, _mode: Option<u32>, _uid: Option<u32>, _gid: Option<u32>, _size: Option<u64>, _atime: Option<Timespec>, _mtime: Option<Timespec>, _fh: Option<u64>, _crtime: Option<Timespec>, _chgtime: Option<Timespec>, _bkuptime: Option<Timespec>, _flags: Option<u32>, reply: fuse::ReplyAttr) {
fn setattr(
&mut self,
_req: &fuse::Request,
_ino: u64,
_mode: Option<u32>,
_uid: Option<u32>,
_gid: Option<u32>,
_size: Option<u64>,
_atime: Option<Timespec>,
_mtime: Option<Timespec>,
_fh: Option<u64>,
_crtime: Option<Timespec>,
_chgtime: Option<Timespec>,
_bkuptime: Option<Timespec>,
_flags: Option<u32>,
reply: fuse::ReplyAttr,
) {
reply.error(libc::EROFS)
}
/// Read symbolic link
fn readlink (&mut self, _req: &fuse::Request, ino: u64, reply: fuse::ReplyData) {
fn readlink(&mut self, _req: &fuse::Request, ino: u64, reply: fuse::ReplyData) {
let inode = inode!(self, ino, reply);
let inode = inode.borrow();
match inode.inode.symlink_target {
None => reply.error(libc::EINVAL),
Some(ref link) => reply.data(link.as_bytes())
Some(ref link) => reply.data(link.as_bytes()),
}
}
/// Create a hard link
fn link (&mut self, _req: &fuse::Request, _ino: u64, _newparent: u64, _newname: &OsStr, reply: fuse::ReplyEntry) {
fn link(
&mut self,
_req: &fuse::Request,
_ino: u64,
_newparent: u64,
_newname: &OsStr,
reply: fuse::ReplyEntry,
) {
reply.error(libc::EROFS)
}
/// Create file node
/// Create a regular file, character device, block device, fifo or socket node.
fn mknod (&mut self, _req: &fuse::Request, _parent: u64, _name: &OsStr, _mode: u32, _rdev: u32, reply: fuse::ReplyEntry) {
fn mknod(
&mut self,
_req: &fuse::Request,
_parent: u64,
_name: &OsStr,
_mode: u32,
_rdev: u32,
reply: fuse::ReplyEntry,
) {
reply.error(libc::EROFS)
}
/// Create a directory
fn mkdir (&mut self, _req: &fuse::Request, _parent: u64, _name: &OsStr, _mode: u32, reply: fuse::ReplyEntry) {
fn mkdir(
&mut self,
_req: &fuse::Request,
_parent: u64,
_name: &OsStr,
_mode: u32,
reply: fuse::ReplyEntry,
) {
reply.error(libc::EROFS)
}
/// Remove a file
fn unlink (&mut self, _req: &fuse::Request, _parent: u64, _name: &OsStr, reply: fuse::ReplyEmpty) {
fn unlink(
&mut self,
_req: &fuse::Request,
_parent: u64,
_name: &OsStr,
reply: fuse::ReplyEmpty,
) {
reply.error(libc::EROFS)
}
/// Remove a directory
fn rmdir (&mut self, _req: &fuse::Request, _parent: u64, _name: &OsStr, reply: fuse::ReplyEmpty) {
fn rmdir(
&mut self,
_req: &fuse::Request,
_parent: u64,
_name: &OsStr,
reply: fuse::ReplyEmpty,
) {
reply.error(libc::EROFS)
}
/// Create a symbolic link
fn symlink (&mut self, _req: &fuse::Request, _parent: u64, _name: &OsStr, _link: &Path, reply: fuse::ReplyEntry) {
fn symlink(
&mut self,
_req: &fuse::Request,
_parent: u64,
_name: &OsStr,
_link: &Path,
reply: fuse::ReplyEntry,
) {
reply.error(libc::EROFS)
}
/// Rename a file
fn rename (&mut self, _req: &fuse::Request, _parent: u64, _name: &OsStr, _newparent: u64, _newname: &OsStr, reply: fuse::ReplyEmpty) {
fn rename(
&mut self,
_req: &fuse::Request,
_parent: u64,
_name: &OsStr,
_newparent: u64,
_newname: &OsStr,
reply: fuse::ReplyEmpty,
) {
reply.error(libc::EROFS)
}
@ -403,7 +512,7 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> {
/// anything in fh. There are also some flags (direct_io, keep_cache) which the
/// filesystem may set, to change the way the file is opened. See fuse_file_info
/// structure in <fuse_common.h> for more details.
fn open (&mut self, _req: &fuse::Request, ino: u64, flags: u32, reply: fuse::ReplyOpen) {
fn open(&mut self, _req: &fuse::Request, ino: u64, flags: u32, reply: fuse::ReplyOpen) {
if (flags & (libc::O_WRONLY | libc::O_RDWR | libc::O_TRUNC) as u32) != 0 {
return reply.error(libc::EROFS);
}
@ -415,33 +524,48 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> {
/// Read data
/// Read should send exactly the number of bytes requested except on EOF or error,
/// otherwise the rest of the data will be substituted with zeroes. An exception to
/// this is when the file has been opened in 'direct_io' mode, in which case the
/// this is when the file has been opened in direct_io mode, in which case the
/// return value of the read system call will reflect the return value of this
/// operation. fh will contain the value set by the open method, or will be undefined
/// if the open method didn't set any value.
fn read (&mut self, _req: &fuse::Request, ino: u64, _fh: u64, mut offset: u64, mut size: u32, reply: fuse::ReplyData) {
/// if the open method didnt set any value.
fn read(
&mut self,
_req: &fuse::Request,
ino: u64,
_fh: u64,
mut offset: i64,
mut size: u32,
reply: fuse::ReplyData,
) {
let inode = inode!(self, ino, reply);
let inode = inode.borrow();
match inode.inode.data {
None => return reply.data(&[]),
Some(FileData::Inline(ref data)) => return reply.data(&data[min(offset as usize, data.len())..min(offset as usize+size as usize, data.len())]),
_ => ()
Some(FileData::Inline(ref data)) => {
return reply.data(
&data[min(offset as usize, data.len())..
min(offset as usize + size as usize, data.len())]
)
}
_ => (),
}
if let Some(ref chunks) = inode.chunks {
let mut data = Vec::with_capacity(size as usize);
for &(hash, len) in chunks.iter() {
if len as u64 <= offset {
offset -= len as u64;
continue
if i64::from(len) <= offset {
offset -= i64::from(len);
continue;
}
let chunk = match fuse_try!(self.repository.get_chunk(hash), reply) {
Some(chunk) => chunk,
None => return reply.error(libc::EIO)
None => return reply.error(libc::EIO),
};
assert_eq!(chunk.len() as u32, len);
data.extend_from_slice(&chunk[offset as usize..min(offset as usize + size as usize, len as usize)]);
data.extend_from_slice(
&chunk[offset as usize..min(offset as usize + size as usize, len as usize)]
);
if len - offset as u32 >= size {
break
break;
}
size -= len - offset as u32;
offset = 0;
@ -453,12 +577,28 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> {
}
/// Write data
fn write (&mut self, _req: &fuse::Request, _ino: u64, _fh: u64, _offset: u64, _data: &[u8], _flags: u32, reply: fuse::ReplyWrite) {
fn write(
&mut self,
_req: &fuse::Request,
_ino: u64,
_fh: u64,
_offset: i64,
_data: &[u8],
_flags: u32,
reply: fuse::ReplyWrite,
) {
reply.error(libc::EROFS)
}
/// Flush method
fn flush (&mut self, _req: &fuse::Request, _ino: u64, _fh: u64, _lock_owner: u64, reply: fuse::ReplyEmpty) {
fn flush(
&mut self,
_req: &fuse::Request,
_ino: u64,
_fh: u64,
_lock_owner: u64,
reply: fuse::ReplyEmpty,
) {
reply.ok()
}
@ -468,9 +608,18 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> {
/// call there will be exactly one release call. The filesystem may reply with an
/// error, but error values are not returned to close() or munmap() which triggered
/// the release. fh will contain the value set by the open method, or will be undefined
/// if the open method didn't set any value. flags will contain the same flags as for
/// if the open method didnt set any value. flags will contain the same flags as for
/// open.
fn release (&mut self, _req: &fuse::Request, _ino: u64, _fh: u64, _flags: u32, _lock_owner: u64, _flush: bool, reply: fuse::ReplyEmpty) {
fn release(
&mut self,
_req: &fuse::Request,
_ino: u64,
_fh: u64,
_flags: u32,
_lock_owner: u64,
_flush: bool,
reply: fuse::ReplyEmpty,
) {
/*if self.read_fds.remove(&fh).is_some() || self.write_fds.remove(&fh).is_some() {
reply.ok();
} else {
@ -480,28 +629,42 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> {
}
/// Synchronize file contents
fn fsync (&mut self, _req: &fuse::Request, _ino: u64, _fh: u64, _datasync: bool, reply: fuse::ReplyEmpty) {
fn fsync(
&mut self,
_req: &fuse::Request,
_ino: u64,
_fh: u64,
_datasync: bool,
reply: fuse::ReplyEmpty,
) {
reply.ok()
}
/// Open a directory, finished
fn opendir (&mut self, _req: &fuse::Request, ino: u64, _flags: u32, reply: fuse::ReplyOpen) {
fn opendir(&mut self, _req: &fuse::Request, ino: u64, _flags: u32, reply: fuse::ReplyOpen) {
let dir = inode!(self, ino, reply);
fuse_try!(self.fetch_children(&dir), reply);
reply.opened(ino, 0);
}
/// Read directory, finished
fn readdir (&mut self, _req: &fuse::Request, ino: u64, _fh: u64, offset: u64, mut reply: fuse::ReplyDirectory) {
fn readdir(
&mut self,
_req: &fuse::Request,
ino: u64,
_fh: u64,
offset: i64,
mut reply: fuse::ReplyDirectory,
) {
let dir = inode!(self, ino, reply);
let dir = dir.borrow();
if let Some(entries) = dir.dir_list() {
for (i, (num, file_type, name)) in entries.into_iter().enumerate() {
if i < offset as usize {
continue
continue;
}
if reply.add(num, i as u64 +1, file_type, &Path::new(&name)) {
break
if reply.add(num, i as i64 + 1, file_type, &Path::new(&name)) {
break;
}
}
reply.ok()
@ -511,20 +674,34 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> {
}
/// Release an open directory, finished
fn releasedir (&mut self, _req: &fuse::Request, _ino: u64, _fh: u64, _flags: u32, reply: fuse::ReplyEmpty) {
fn releasedir(
&mut self,
_req: &fuse::Request,
_ino: u64,
_fh: u64,
_flags: u32,
reply: fuse::ReplyEmpty,
) {
reply.ok()
}
/// Synchronize directory contents, finished
fn fsyncdir (&mut self, _req: &fuse::Request, _ino: u64, _fh: u64, _datasync: bool, reply: fuse::ReplyEmpty) {
fn fsyncdir(
&mut self,
_req: &fuse::Request,
_ino: u64,
_fh: u64,
_datasync: bool,
reply: fuse::ReplyEmpty,
) {
reply.ok()
}
/// Get file system statistics
fn statfs (&mut self, _req: &fuse::Request, _ino: u64, reply: fuse::ReplyStatfs) {
fn statfs(&mut self, _req: &fuse::Request, _ino: u64, reply: fuse::ReplyStatfs) {
let info = self.repository.info();
reply.statfs(
info.raw_data_size/512 as u64, //total blocks
info.raw_data_size / 512 as u64, //total blocks
0, //free blocks for admin
0, //free blocks for users
0,
@ -536,12 +713,28 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> {
}
/// Set an extended attribute
fn setxattr (&mut self, _req: &fuse::Request, _ino: u64, _name: &OsStr, _value: &[u8], _flags: u32, _position: u32, reply: fuse::ReplyEmpty) {
fn setxattr(
&mut self,
_req: &fuse::Request,
_ino: u64,
_name: &OsStr,
_value: &[u8],
_flags: u32,
_position: u32,
reply: fuse::ReplyEmpty,
) {
reply.error(libc::EROFS)
}
/// Get an extended attribute
fn getxattr (&mut self, _req: &fuse::Request, ino: u64, name: &OsStr, size: u32, reply: fuse::ReplyXattr) {
fn getxattr(
&mut self,
_req: &fuse::Request,
ino: u64,
name: &OsStr,
size: u32,
reply: fuse::ReplyXattr,
) {
let inode = inode!(self, ino, reply);
let inode = inode.borrow();
if let Some(val) = inode.inode.xattrs.get(&name.to_string_lossy() as &str) {
@ -558,14 +751,31 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> {
}
/// List extended attribute names
fn listxattr (&mut self, _req: &fuse::Request, _ino: u64, _size: u32, reply: fuse::ReplyXattr) {
// #FIXME:20 If arg.size is zero, the size of the attribute list should be sent with fuse_getxattr_out
// #FIXME:10 If arg.size is non-zero, send the attribute list if it fits, or ERANGE otherwise
reply.error(libc::ENOSYS);
fn listxattr(&mut self, _req: &fuse::Request, ino: u64, size: u32, reply: fuse::ReplyXattr) {
let inode = inode!(self, ino, reply);
let inode = inode.borrow();
let mut names_str = String::new();
for name in inode.inode.xattrs.keys() {
names_str.push_str(name);
names_str.push('\0');
}
if size == 0 {
return reply.size(names_str.len() as u32);
}
if size < names_str.len() as u32 {
return reply.error(libc::ERANGE);
}
reply.data(names_str.as_bytes());
}
/// Remove an extended attribute
fn removexattr (&mut self, _req: &fuse::Request, _ino: u64, _name: &OsStr, reply: fuse::ReplyEmpty) {
fn removexattr(
&mut self,
_req: &fuse::Request,
_ino: u64,
_name: &OsStr,
reply: fuse::ReplyEmpty,
) {
reply.error(libc::EROFS)
}
@ -573,28 +783,65 @@ impl<'a> fuse::Filesystem for FuseFilesystem<'a> {
/// This will be called for the access() system call. If the 'default_permissions'
/// mount option is given, this method is not called. This method is not called
/// under Linux kernel versions 2.4.x
fn access (&mut self, _req: &fuse::Request, _ino: u64, _mask: u32, reply: fuse::ReplyEmpty) {
fn access(&mut self, _req: &fuse::Request, _ino: u64, _mask: u32, reply: fuse::ReplyEmpty) {
reply.error(libc::ENOSYS);
}
/// Create and open a file
fn create (&mut self, _req: &fuse::Request, _parent: u64, _name: &OsStr, _mode: u32, _flags: u32, reply: fuse::ReplyCreate) {
fn create(
&mut self,
_req: &fuse::Request,
_parent: u64,
_name: &OsStr,
_mode: u32,
_flags: u32,
reply: fuse::ReplyCreate,
) {
reply.error(libc::EROFS)
}
/// Test for a POSIX file lock
fn getlk (&mut self, _req: &fuse::Request, _ino: u64, _fh: u64, _lock_owner: u64, _start: u64, _end: u64, _typ: u32, _pid: u32, reply: fuse::ReplyLock) {
fn getlk(
&mut self,
_req: &fuse::Request,
_ino: u64,
_fh: u64,
_lock_owner: u64,
_start: u64,
_end: u64,
_typ: u32,
_pid: u32,
reply: fuse::ReplyLock,
) {
reply.error(libc::ENOSYS);
}
/// Acquire, modify or release a POSIX file lock
fn setlk (&mut self, _req: &fuse::Request, _ino: u64, _fh: u64, _lock_owner: u64, _start: u64, _end: u64, _typ: u32, _pid: u32, _sleep: bool, reply: fuse::ReplyEmpty) {
fn setlk(
&mut self,
_req: &fuse::Request,
_ino: u64,
_fh: u64,
_lock_owner: u64,
_start: u64,
_end: u64,
_typ: u32,
_pid: u32,
_sleep: bool,
reply: fuse::ReplyEmpty,
) {
reply.error(libc::ENOSYS);
}
/// Map block index within file to block index within device
fn bmap (&mut self, _req: &fuse::Request, _ino: u64, _blocksize: u32, _idx: u64, reply: fuse::ReplyBmap) {
fn bmap(
&mut self,
_req: &fuse::Request,
_ino: u64,
_blocksize: u32,
_idx: u64,
reply: fuse::ReplyBmap,
) {
reply.error(libc::ENOSYS);
}
}

View File

@ -1,9 +1,14 @@
pub use ::util::*;
pub use ::bundledb::{BundleReader, BundleMode, BundleWriter, BundleInfo, BundleId, BundleDbError, BundleDb, BundleWriterError, StoredBundle};
pub use ::chunker::{ChunkerType, Chunker, ChunkerStatus, ChunkerError};
pub use ::repository::{Repository, Backup, Config, RepositoryError, RepositoryInfo, Inode, FileType, IntegrityError, BackupFileError, BackupError, BackupOptions, BundleAnalysis, FileData, DiffType, InodeError, RepositoryLayout, Location};
pub use ::index::{Index, IndexError};
pub use ::mount::FuseFilesystem;
pub use util::*;
pub use bundledb::{BundleReader, BundleMode, BundleWriter, BundleInfo, BundleId, BundleDbError,
BundleDb, BundleWriterError, StoredBundle, BundleStatistics};
pub use chunker::{ChunkerType, Chunker, ChunkerStatus, ChunkerError};
pub use repository::{Repository, Backup, Config, RepositoryError, RepositoryInfo, Inode, FileType,
IntegrityError, BackupFileError, BackupError, BackupOptions, BundleAnalysis,
FileData, DiffType, InodeError, RepositoryLayout, Location,
RepositoryStatistics};
pub use index::{Index, IndexError, IndexStatistics};
pub use mount::FuseFilesystem;
pub use translation::CowStr;
pub use serde::{Serialize, Deserialize};

View File

@ -1,4 +1,4 @@
use ::prelude::*;
use prelude::*;
use std::fs;
use std::path::{self, Path, PathBuf};
@ -15,12 +15,12 @@ quick_error!{
#[allow(unknown_lints,large_enum_variant)]
pub enum BackupError {
FailedPaths(backup: Backup, failed: Vec<PathBuf>) {
description("Some paths could not be backed up")
display("Backup error: some paths could not be backed up")
description(tr!("Some paths could not be backed up"))
display("{}", tr_format!("Backup error: some paths could not be backed up"))
}
RemoveRoot {
description("The root of a backup can not be removed")
display("Backup error: the root of a backup can not be removed")
description(tr!("The root of a backup can not be removed"))
display("{}", tr_format!("Backup error: the root of a backup can not be removed"))
}
}
}
@ -33,17 +33,28 @@ pub struct BackupOptions {
pub enum DiffType {
Add, Mod, Del
Add,
Mod,
Del
}
impl Repository {
pub fn get_all_backups(&self) -> Result<HashMap<String, Backup>, RepositoryError> {
Ok(try!(Backup::get_all_from(&self.crypto.lock().unwrap(), self.layout.backups_path())))
Ok(try!(Backup::get_all_from(
&self.crypto.lock().unwrap(),
self.layout.backups_path()
)))
}
pub fn get_backups<P: AsRef<Path>>(&self, path: P) -> Result<HashMap<String, Backup>, RepositoryError> {
Ok(try!(Backup::get_all_from(&self.crypto.lock().unwrap(), self.layout.backups_path().join(path))))
pub fn get_backups<P: AsRef<Path>>(
&self,
path: P,
) -> Result<HashMap<String, Backup>, RepositoryError> {
Ok(try!(Backup::get_all_from(
&self.crypto.lock().unwrap(),
self.layout.backups_path().join(path)
)))
}
#[inline]
@ -52,14 +63,22 @@ impl Repository {
}
pub fn get_backup(&self, name: &str) -> Result<Backup, RepositoryError> {
Ok(try!(Backup::read_from(&self.crypto.lock().unwrap(), self.layout.backup_path(name))))
Ok(try!(Backup::read_from(
&self.crypto.lock().unwrap(),
self.layout.backup_path(name)
)))
}
pub fn save_backup(&mut self, backup: &Backup, name: &str) -> Result<(), RepositoryError> {
try!(self.write_mode());
let path = self.layout.backup_path(name);
try!(fs::create_dir_all(path.parent().unwrap()));
Ok(try!(backup.save_to(&self.crypto.lock().unwrap(), self.config.encryption.clone(), path)))
try!(backup.save_to(
&self.crypto.lock().unwrap(),
self.config.encryption.clone(),
path
));
Ok(())
}
pub fn delete_backup(&mut self, name: &str) -> Result<(), RepositoryError> {
@ -69,23 +88,32 @@ impl Repository {
loop {
path = path.parent().unwrap().to_owned();
if path == self.layout.backups_path() || fs::remove_dir(&path).is_err() {
break
break;
}
}
Ok(())
}
pub fn prune_backups(&mut self, prefix: &str, daily: usize, weekly: usize, monthly: usize, yearly: usize, force: bool) -> Result<(), RepositoryError> {
pub fn prune_backups(
&mut self,
prefix: &str,
daily: usize,
weekly: usize,
monthly: usize,
yearly: usize,
force: bool,
) -> Result<(), RepositoryError> {
try!(self.write_mode());
let mut backups = Vec::new();
let backup_map = match self.get_all_backups() {
Ok(backup_map) => backup_map,
Err(RepositoryError::BackupFile(BackupFileError::PartialBackupsList(backup_map, _failed))) => {
warn!("Some backups could not be read, ignoring them");
Err(RepositoryError::BackupFile(BackupFileError::PartialBackupsList(backup_map,
_failed))) => {
tr_warn!("Some backups could not be read, ignoring them");
backup_map
},
Err(err) => return Err(err)
}
Err(err) => return Err(err),
};
for (name, backup) in backup_map {
if name.starts_with(prefix) {
@ -96,7 +124,12 @@ impl Repository {
backups.sort_by_key(|backup| -backup.2.timestamp);
let mut keep = Bitmap::new(backups.len());
fn mark_needed<K: Eq, F: Fn(&DateTime<Local>) -> K>(backups: &[(String, DateTime<Local>, Backup)], keep: &mut Bitmap, max: usize, keyfn: F) {
fn mark_needed<K: Eq, F: Fn(&DateTime<Local>) -> K>(
backups: &[(String, DateTime<Local>, Backup)],
keep: &mut Bitmap,
max: usize,
keyfn: F,
) {
let mut kept = 0;
let mut last = None;
for (i, backup) in backups.iter().enumerate() {
@ -104,7 +137,7 @@ impl Repository {
let cur = Some(val);
if cur != last {
if kept >= max {
break
break;
}
last = cur;
keep.set(i);
@ -119,10 +152,18 @@ impl Repository {
mark_needed(&backups, &mut keep, monthly, |d| (d.year(), d.month()));
}
if weekly > 0 {
mark_needed(&backups, &mut keep, weekly, |d| (d.isoweekdate().0, d.isoweekdate().1));
mark_needed(&backups, &mut keep, weekly, |d| {
let week = d.iso_week();
(week.year(), week.week())
});
}
if daily > 0 {
mark_needed(&backups, &mut keep, daily, |d| (d.year(), d.month(), d.day()));
mark_needed(
&backups,
&mut keep,
daily,
|d| (d.year(), d.month(), d.day())
);
}
let mut remove = Vec::new();
println!("Removing the following backups");
@ -140,30 +181,43 @@ impl Repository {
Ok(())
}
pub fn restore_inode_tree<P: AsRef<Path>>(&mut self, backup: &Backup, inode: Inode, path: P) -> Result<(), RepositoryError> {
pub fn restore_inode_tree<P: AsRef<Path>>(
&mut self,
backup: &Backup,
inode: Inode,
path: P,
) -> Result<(), RepositoryError> {
let _lock = try!(self.lock(false));
let mut queue = VecDeque::new();
queue.push_back((path.as_ref().to_owned(), inode));
let cache = users::UsersCache::new();
let mut is_root = true;
while let Some((path, mut inode)) = queue.pop_front() {
if let Some(name) = backup.user_names.get(&inode.user) {
if let Some(user) = cache.get_user_by_name(name) {
inode.user = user.uid();
if inode.file_type != FileType::Directory || !is_root {
if let Some(name) = backup.user_names.get(&inode.user) {
if let Some(user) = cache.get_user_by_name(name) {
inode.user = user.uid();
}
}
}
if let Some(name) = backup.group_names.get(&inode.group) {
if let Some(group) = cache.get_group_by_name(name) {
inode.group = group.gid();
if let Some(name) = backup.group_names.get(&inode.group) {
if let Some(group) = cache.get_group_by_name(name) {
inode.group = group.gid();
}
}
try!(self.save_inode_at(&inode, &path));
}
try!(self.save_inode_at(&inode, &path));
if inode.file_type == FileType::Directory {
let path = path.join(inode.name);
let path = if is_root {
path.to_path_buf()
} else {
path.join(inode.name)
};
for chunks in inode.children.unwrap().values() {
let inode = try!(self.get_inode(chunks));
queue.push_back((path.clone(), inode));
}
}
is_root = false;
}
Ok(())
}
@ -174,22 +228,28 @@ impl Repository {
reference: Option<&Inode>,
options: &BackupOptions,
backup: &mut Backup,
failed_paths: &mut Vec<PathBuf>
failed_paths: &mut Vec<PathBuf>,
) -> Result<Inode, RepositoryError> {
let path = path.as_ref();
let mut inode = try!(self.create_inode(path, reference));
if !backup.user_names.contains_key(&inode.user) {
if let Some(user) = users::get_user_by_uid(inode.user) {
backup.user_names.insert(inode.user, user.name().to_string());
backup.user_names.insert(
inode.user,
user.name().to_string()
);
} else {
warn!("Failed to retrieve name of user {}", inode.user);
tr_warn!("Failed to retrieve name of user {}", inode.user);
}
}
if !backup.group_names.contains_key(&inode.group) {
if let Some(group) = users::get_group_by_gid(inode.group) {
backup.group_names.insert(inode.group, group.name().to_string());
backup.group_names.insert(
inode.group,
group.name().to_string()
);
} else {
warn!("Failed to retrieve name of group {}", inode.group);
tr_warn!("Failed to retrieve name of group {}", inode.group);
}
}
let mut meta_size = 0;
@ -204,33 +264,42 @@ impl Repository {
if options.same_device {
let child_dev = try!(child.metadata()).st_dev();
if child_dev != parent_dev {
continue
continue;
}
}
if let Some(ref excludes) = options.excludes {
let child_path_str = child_path.to_string_lossy();
if excludes.is_match(&child_path_str) {
continue
continue;
}
}
let name = child.file_name().to_string_lossy().to_string();
let ref_child = reference.as_ref()
let ref_child = reference
.as_ref()
.and_then(|inode| inode.children.as_ref())
.and_then(|map| map.get(&name))
.and_then(|chunks| self.get_inode(chunks).ok());
let child_inode = match self.create_backup_recurse(&child_path, ref_child.as_ref(), options, backup, failed_paths) {
let child_inode = match self.create_backup_recurse(
&child_path,
ref_child.as_ref(),
options,
backup,
failed_paths
) {
Ok(inode) => inode,
Err(RepositoryError::Inode(_)) | Err(RepositoryError::Chunker(_)) | Err(RepositoryError::Io(_)) => {
Err(RepositoryError::Inode(_)) |
Err(RepositoryError::Chunker(_)) |
Err(RepositoryError::Io(_)) => {
info!("Failed to backup {:?}", child_path);
failed_paths.push(child_path);
continue
},
Err(err) => return Err(err)
continue;
}
Err(err) => return Err(err),
};
let chunks = try!(self.put_inode(&child_inode));
inode.cum_size += child_inode.cum_size;
for &(_, len) in chunks.iter() {
meta_size += len as u64;
meta_size += u64::from(len);
}
inode.cum_dirs += child_inode.cum_dirs;
inode.cum_files += child_inode.cum_files;
@ -241,7 +310,7 @@ impl Repository {
inode.cum_files = 1;
if let Some(FileData::ChunkedIndirect(ref chunks)) = inode.data {
for &(_, len) in chunks.iter() {
meta_size += len as u64;
meta_size += u64::from(len);
}
}
}
@ -256,11 +325,16 @@ impl Repository {
Ok(inode)
}
pub fn create_backup_recursively<P: AsRef<Path>>(&mut self, path: P, reference: Option<&Backup>, options: &BackupOptions) -> Result<Backup, RepositoryError> {
pub fn create_backup_recursively<P: AsRef<Path>>(
&mut self,
path: P,
reference: Option<&Backup>,
options: &BackupOptions,
) -> Result<Backup, RepositoryError> {
try!(self.write_mode());
let _lock = try!(self.lock(false));
if self.dirty {
return Err(RepositoryError::Dirty)
return Err(RepositoryError::Dirty);
}
try!(self.set_dirty());
let reference_inode = reference.and_then(|b| self.get_inode(&b.root).ok());
@ -271,14 +345,20 @@ impl Repository {
let info_before = self.info();
let start = Local::now();
let mut failed_paths = vec![];
let root_inode = try!(self.create_backup_recurse(path, reference_inode.as_ref(), options, &mut backup, &mut failed_paths));
let root_inode = try!(self.create_backup_recurse(
path,
reference_inode.as_ref(),
options,
&mut backup,
&mut failed_paths
));
backup.root = try!(self.put_inode(&root_inode));
try!(self.flush());
let elapsed = Local::now().signed_duration_since(start);
backup.timestamp = start.timestamp();
backup.total_data_size = root_inode.cum_size;
for &(_, len) in backup.root.iter() {
backup.total_data_size += len as u64;
backup.total_data_size += u64::from(len);
}
backup.file_count = root_inode.cum_files;
backup.dir_count = root_inode.cum_dirs;
@ -297,20 +377,29 @@ impl Repository {
}
}
pub fn remove_backup_path<P: AsRef<Path>>(&mut self, backup: &mut Backup, path: P) -> Result<(), RepositoryError> {
pub fn remove_backup_path<P: AsRef<Path>>(
&mut self,
backup: &mut Backup,
path: P,
) -> Result<(), RepositoryError> {
try!(self.write_mode());
let _lock = try!(self.lock(false));
let mut inodes = try!(self.get_backup_path(backup, path));
let to_remove = inodes.pop().unwrap();
let mut remove_from = match inodes.pop() {
Some(inode) => inode,
None => return Err(BackupError::RemoveRoot.into())
None => return Err(BackupError::RemoveRoot.into()),
};
remove_from.children.as_mut().unwrap().remove(&to_remove.name);
remove_from.children.as_mut().unwrap().remove(
&to_remove.name
);
let mut last_inode_chunks = try!(self.put_inode(&remove_from));
let mut last_inode_name = remove_from.name;
while let Some(mut inode) = inodes.pop() {
inode.children.as_mut().unwrap().insert(last_inode_name, last_inode_chunks);
inode.children.as_mut().unwrap().insert(
last_inode_name,
last_inode_chunks
);
last_inode_chunks = try!(self.put_inode(&inode));
last_inode_name = inode.name;
}
@ -319,20 +408,32 @@ impl Repository {
Ok(())
}
pub fn get_backup_path<P: AsRef<Path>>(&mut self, backup: &Backup, path: P) -> Result<Vec<Inode>, RepositoryError> {
pub fn get_backup_path<P: AsRef<Path>>(
&mut self,
backup: &Backup,
path: P,
) -> Result<Vec<Inode>, RepositoryError> {
let mut inodes = vec![];
let mut inode = try!(self.get_inode(&backup.root));
for c in path.as_ref().components() {
if let path::Component::Normal(name) = c {
let name = name.to_string_lossy();
if inodes.is_empty() && inode.file_type != FileType::Directory && inode.name == name {
if inodes.is_empty() && inode.file_type != FileType::Directory &&
inode.name == name
{
return Ok(vec![inode]);
}
if let Some(chunks) = inode.children.as_mut().and_then(|c| c.remove(&name as &str)) {
if let Some(chunks) = inode.children.as_mut().and_then(
|c| c.remove(&name as &str)
)
{
inodes.push(inode);
inode = try!(self.get_inode(&chunks));
} else {
return Err(RepositoryError::NoSuchFileInBackup(backup.clone(), path.as_ref().to_owned()));
return Err(RepositoryError::NoSuchFileInBackup(
backup.clone(),
path.as_ref().to_owned()
));
}
}
}
@ -341,20 +442,32 @@ impl Repository {
}
#[inline]
pub fn get_backup_inode<P: AsRef<Path>>(&mut self, backup: &Backup, path: P) -> Result<Inode, RepositoryError> {
self.get_backup_path(backup, path).map(|mut inodes| inodes.pop().unwrap())
pub fn get_backup_inode<P: AsRef<Path>>(
&mut self,
backup: &Backup,
path: P,
) -> Result<Inode, RepositoryError> {
self.get_backup_path(backup, path).map(|mut inodes| {
inodes.pop().unwrap()
})
}
pub fn find_versions<P: AsRef<Path>>(&mut self, path: P) -> Result<Vec<(String, Inode)>, RepositoryError> {
pub fn find_versions<P: AsRef<Path>>(
&mut self,
path: P,
) -> Result<Vec<(String, Inode)>, RepositoryError> {
let path = path.as_ref();
let mut versions = HashMap::new();
for (name, backup) in try!(self.get_all_backups()) {
match self.get_backup_inode(&backup, path) {
Ok(inode) => {
versions.insert((inode.file_type, inode.timestamp, inode.size), (name, inode));
},
versions.insert(
(inode.file_type, inode.timestamp, inode.size),
(name, inode)
);
}
Err(RepositoryError::NoSuchFileInBackup(..)) => continue,
Err(err) => return Err(err)
Err(err) => return Err(err),
}
}
let mut versions: Vec<_> = versions.into_iter().map(|(_, v)| v).collect();
@ -362,7 +475,14 @@ impl Repository {
Ok(versions)
}
fn find_differences_recurse(&mut self, inode1: &Inode, inode2: &Inode, path: PathBuf, diffs: &mut Vec<(DiffType, PathBuf)>) -> Result<(), RepositoryError> {
#[allow(needless_pass_by_value)]
fn find_differences_recurse(
&mut self,
inode1: &Inode,
inode2: &Inode,
path: PathBuf,
diffs: &mut Vec<(DiffType, PathBuf)>,
) -> Result<(), RepositoryError> {
if !inode1.is_same_meta(inode2) || inode1.data != inode2.data {
diffs.push((DiffType::Mod, path.clone()));
}
@ -386,7 +506,12 @@ impl Repository {
if chunks1 != chunks2 {
let inode1 = try!(self.get_inode(chunks1));
let inode2 = try!(self.get_inode(chunks2));
try!(self.find_differences_recurse(&inode1, &inode2, path.join(name), diffs));
try!(self.find_differences_recurse(
&inode1,
&inode2,
path.join(name),
diffs
));
}
} else {
diffs.push((DiffType::Add, path.join(name)));
@ -402,10 +527,64 @@ impl Repository {
}
#[inline]
pub fn find_differences(&mut self, inode1: &Inode, inode2: &Inode) -> Result<Vec<(DiffType, PathBuf)>, RepositoryError> {
pub fn find_differences(
&mut self,
inode1: &Inode,
inode2: &Inode,
) -> Result<Vec<(DiffType, PathBuf)>, RepositoryError> {
let mut diffs = vec![];
let path = PathBuf::from("/");
try!(self.find_differences_recurse(inode1, inode2, path, &mut diffs));
try!(self.find_differences_recurse(
inode1,
inode2,
path,
&mut diffs
));
Ok(diffs)
}
fn count_sizes_recursive(&mut self, inode: &Inode, sizes: &mut HashMap<u64, usize>, min_size: u64) -> Result<(), RepositoryError> {
if inode.size >= min_size {
*sizes.entry(inode.size).or_insert(0) += 1;
}
if let Some(ref children) = inode.children {
for chunks in children.values() {
let ch = try!(self.get_inode(chunks));
try!(self.count_sizes_recursive(&ch, sizes, min_size));
}
}
Ok(())
}
fn find_duplicates_recursive(&mut self, inode: &Inode, path: &Path, sizes: &HashMap<u64, usize>, hashes: &mut HashMap<Hash, (Vec<PathBuf>, u64)>) -> Result<(), RepositoryError> {
let path = path.join(&inode.name);
if sizes.get(&inode.size).cloned().unwrap_or(0) > 1 {
if let Some(ref data) = inode.data {
let chunk_data = try!(msgpack::encode(data).map_err(InodeError::from));
let hash = HashMethod::Blake2.hash(&chunk_data);
hashes.entry(hash).or_insert((Vec::new(), inode.size)).0.push(path.clone());
}
}
if let Some(ref children) = inode.children {
for chunks in children.values() {
let ch = try!(self.get_inode(chunks));
try!(self.find_duplicates_recursive(&ch, &path, sizes, hashes));
}
}
Ok(())
}
pub fn find_duplicates(&mut self, inode: &Inode, min_size: u64) -> Result<Vec<(Vec<PathBuf>, u64)>, RepositoryError> {
let mut sizes = HashMap::new();
try!(self.count_sizes_recursive(inode, &mut sizes, min_size));
let mut hashes = HashMap::new();
if let Some(ref children) = inode.children {
for chunks in children.values() {
let ch = try!(self.get_inode(chunks));
try!(self.find_duplicates_recursive(&ch, Path::new(""), &sizes, &mut hashes));
}
}
let dups = hashes.into_iter().map(|(_,v)| v).filter(|&(ref v, _)| v.len() > 1).collect();
Ok(dups)
}
}

View File

@ -1,4 +1,4 @@
use ::prelude::*;
use prelude::*;
use std::io::{self, BufReader, BufWriter, Read, Write};
use std::fs::{self, File};
@ -15,49 +15,49 @@ quick_error!{
pub enum BackupFileError {
Read(err: io::Error, path: PathBuf) {
cause(err)
description("Failed to read backup")
display("Backup file error: failed to read backup file {:?}\n\tcaused by: {}", path, err)
description(tr!("Failed to read backup"))
display("{}", tr_format!("Backup file error: failed to read backup file {:?}\n\tcaused by: {}", path, err))
}
Write(err: io::Error, path: PathBuf) {
cause(err)
description("Failed to write backup")
display("Backup file error: failed to write backup file {:?}\n\tcaused by: {}", path, err)
description(tr!("Failed to write backup"))
display("{}", tr_format!("Backup file error: failed to write backup file {:?}\n\tcaused by: {}", path, err))
}
Decode(err: msgpack::DecodeError, path: PathBuf) {
cause(err)
context(path: &'a Path, err: msgpack::DecodeError) -> (err, path.to_path_buf())
description("Failed to decode backup")
display("Backup file error: failed to decode backup of {:?}\n\tcaused by: {}", path, err)
description(tr!("Failed to decode backup"))
display("{}", tr_format!("Backup file error: failed to decode backup of {:?}\n\tcaused by: {}", path, err))
}
Encode(err: msgpack::EncodeError, path: PathBuf) {
cause(err)
context(path: &'a Path, err: msgpack::EncodeError) -> (err, path.to_path_buf())
description("Failed to encode backup")
display("Backup file error: failed to encode backup of {:?}\n\tcaused by: {}", path, err)
description(tr!("Failed to encode backup"))
display("{}", tr_format!("Backup file error: failed to encode backup of {:?}\n\tcaused by: {}", path, err))
}
WrongHeader(path: PathBuf) {
description("Wrong header")
display("Backup file error: wrong header on backup {:?}", path)
description(tr!("Wrong header"))
display("{}", tr_format!("Backup file error: wrong header on backup {:?}", path))
}
UnsupportedVersion(path: PathBuf, version: u8) {
description("Wrong version")
display("Backup file error: unsupported version on backup {:?}: {}", path, version)
description(tr!("Wrong version"))
display("{}", tr_format!("Backup file error: unsupported version on backup {:?}: {}", path, version))
}
Decryption(err: EncryptionError, path: PathBuf) {
cause(err)
context(path: &'a Path, err: EncryptionError) -> (err, path.to_path_buf())
description("Decryption failed")
display("Backup file error: decryption failed on backup {:?}\n\tcaused by: {}", path, err)
description(tr!("Decryption failed"))
display("{}", tr_format!("Backup file error: decryption failed on backup {:?}\n\tcaused by: {}", path, err))
}
Encryption(err: EncryptionError) {
from()
cause(err)
description("Encryption failed")
display("Backup file error: encryption failed\n\tcaused by: {}", err)
description(tr!("Encryption failed"))
display("{}", tr_format!("Backup file error: encryption failed\n\tcaused by: {}", err))
}
PartialBackupsList(partial: HashMap<String, Backup>, failed: Vec<PathBuf>) {
description("Some backups could not be loaded")
display("Backup file error: some backups could not be loaded: {:?}", failed)
description(tr!("Some backups could not be loaded"))
display("{}", tr_format!("Backup file error: some backups could not be loaded: {:?}", failed))
}
}
}
@ -116,52 +116,80 @@ serde_impl!(Backup(u8?) {
impl Backup {
pub fn read_from<P: AsRef<Path>>(crypto: &Crypto, path: P) -> Result<Self, BackupFileError> {
let path = path.as_ref();
let mut file = BufReader::new(try!(File::open(path).map_err(|err| BackupFileError::Read(err, path.to_path_buf()))));
let mut file = BufReader::new(try!(File::open(path).map_err(|err| {
BackupFileError::Read(err, path.to_path_buf())
})));
let mut header = [0u8; 8];
try!(file.read_exact(&mut header).map_err(|err| BackupFileError::Read(err, path.to_path_buf())));
try!(file.read_exact(&mut header).map_err(|err| {
BackupFileError::Read(err, path.to_path_buf())
}));
if header[..HEADER_STRING.len()] != HEADER_STRING {
return Err(BackupFileError::WrongHeader(path.to_path_buf()))
return Err(BackupFileError::WrongHeader(path.to_path_buf()));
}
let version = header[HEADER_STRING.len()];
if version != HEADER_VERSION {
return Err(BackupFileError::UnsupportedVersion(path.to_path_buf(), version))
return Err(BackupFileError::UnsupportedVersion(
path.to_path_buf(),
version
));
}
let header: BackupHeader = try!(msgpack::decode_from_stream(&mut file).context(path));
let mut data = Vec::new();
try!(file.read_to_end(&mut data).map_err(|err| BackupFileError::Read(err, path.to_path_buf())));
try!(file.read_to_end(&mut data).map_err(|err| {
BackupFileError::Read(err, path.to_path_buf())
}));
if let Some(ref encryption) = header.encryption {
data = try!(crypto.decrypt(encryption, &data));
}
Ok(try!(msgpack::decode(&data).context(path)))
}
pub fn save_to<P: AsRef<Path>>(&self, crypto: &Crypto, encryption: Option<Encryption>, path: P) -> Result<(), BackupFileError> {
pub fn save_to<P: AsRef<Path>>(
&self,
crypto: &Crypto,
encryption: Option<Encryption>,
path: P,
) -> Result<(), BackupFileError> {
let path = path.as_ref();
let mut data = try!(msgpack::encode(self).context(path));
if let Some(ref encryption) = encryption {
data = try!(crypto.encrypt(encryption, &data));
}
let mut file = BufWriter::new(try!(File::create(path).map_err(|err| BackupFileError::Write(err, path.to_path_buf()))));
try!(file.write_all(&HEADER_STRING).map_err(|err| BackupFileError::Write(err, path.to_path_buf())));
try!(file.write_all(&[HEADER_VERSION]).map_err(|err| BackupFileError::Write(err, path.to_path_buf())));
let header = BackupHeader { encryption: encryption };
let mut file = BufWriter::new(try!(File::create(path).map_err(|err| {
BackupFileError::Write(err, path.to_path_buf())
})));
try!(file.write_all(&HEADER_STRING).map_err(|err| {
BackupFileError::Write(err, path.to_path_buf())
}));
try!(file.write_all(&[HEADER_VERSION]).map_err(|err| {
BackupFileError::Write(err, path.to_path_buf())
}));
let header = BackupHeader { encryption };
try!(msgpack::encode_to_stream(&header, &mut file).context(path));
try!(file.write_all(&data).map_err(|err| BackupFileError::Write(err, path.to_path_buf())));
try!(file.write_all(&data).map_err(|err| {
BackupFileError::Write(err, path.to_path_buf())
}));
Ok(())
}
pub fn get_all_from<P: AsRef<Path>>(crypto: &Crypto, path: P) -> Result<HashMap<String, Backup>, BackupFileError> {
pub fn get_all_from<P: AsRef<Path>>(
crypto: &Crypto,
path: P,
) -> Result<HashMap<String, Backup>, BackupFileError> {
let mut backups = HashMap::new();
let base_path = path.as_ref();
let path = path.as_ref();
if !path.exists() {
debug!("Backup root folder does not exist");
tr_debug!("Backup root folder does not exist");
return Ok(backups);
}
let mut paths = vec![path.to_path_buf()];
let mut failed_paths = vec![];
while let Some(path) = paths.pop() {
for entry in try!(fs::read_dir(&path).map_err(|e| BackupFileError::Read(e, path.clone()))) {
for entry in try!(fs::read_dir(&path).map_err(|e| {
BackupFileError::Read(e, path.clone())
}))
{
let entry = try!(entry.map_err(|e| BackupFileError::Read(e, path.clone())));
let path = entry.path();
if path.is_dir() {
@ -169,9 +197,12 @@ impl Backup {
} else {
let relpath = path.strip_prefix(&base_path).unwrap();
if relpath.extension() != Some("backup".as_ref()) {
continue
continue;
}
let name = relpath.with_file_name(relpath.file_stem().unwrap()).to_string_lossy().to_string();
let name = relpath
.with_file_name(relpath.file_stem().unwrap())
.to_string_lossy()
.to_string();
if let Ok(backup) = Backup::read_from(crypto, &path) {
backups.insert(name, backup);
} else {

View File

@ -1,4 +1,4 @@
use ::prelude::*;
use prelude::*;
use std::mem;
use std::cmp::min;
@ -16,7 +16,7 @@ pub struct ChunkReader<'a> {
impl<'a> ChunkReader<'a> {
pub fn new(repo: &'a mut Repository, chunks: ChunkList) -> Self {
ChunkReader {
repo: repo,
repo,
chunks: chunks.into_inner().into(),
data: vec![],
pos: 0
@ -25,26 +25,31 @@ impl<'a> ChunkReader<'a> {
}
impl<'a> Read for ChunkReader<'a> {
fn read(&mut self, mut buf: &mut [u8]) -> Result<usize, io::Error> {
fn read(&mut self, buf: &mut [u8]) -> Result<usize, io::Error> {
let mut bpos = 0;
loop {
if buf.len() == bpos {
break
break;
}
if self.data.len() == self.pos {
if let Some(chunk) = self.chunks.pop_front() {
self.data = match self.repo.get_chunk(chunk.0) {
Ok(Some(data)) => data,
Ok(None) => return Err(io::Error::new(io::ErrorKind::Other, IntegrityError::MissingChunk(chunk.0))),
Err(err) => return Err(io::Error::new(io::ErrorKind::Other, err))
Ok(None) => {
return Err(io::Error::new(
io::ErrorKind::Other,
IntegrityError::MissingChunk(chunk.0)
))
}
Err(err) => return Err(io::Error::new(io::ErrorKind::Other, err)),
};
self.pos = 0;
} else {
break
break;
}
}
let l = min(self.data.len()-self.pos, buf.len() - bpos);
buf[bpos..bpos+l].copy_from_slice(&self.data[self.pos..self.pos+l]);
let l = min(self.data.len() - self.pos, buf.len() - bpos);
buf[bpos..bpos + l].copy_from_slice(&self.data[self.pos..self.pos + l]);
bpos += l;
self.pos += l;
}
@ -56,7 +61,9 @@ impl<'a> Read for ChunkReader<'a> {
impl Repository {
#[inline]
pub fn get_bundle_id(&self, id: u32) -> Result<BundleId, RepositoryError> {
self.bundle_map.get(id).ok_or_else(|| IntegrityError::MissingBundleId(id).into())
self.bundle_map.get(id).ok_or_else(|| {
IntegrityError::MissingBundleId(id).into()
})
}
pub fn get_chunk(&mut self, hash: Hash) -> Result<Option<Vec<u8>>, RepositoryError> {
@ -64,27 +71,39 @@ impl Repository {
let found = if let Some(found) = self.index.get(&hash) {
found
} else {
return Ok(None)
return Ok(None);
};
// Lookup bundle id from map
let bundle_id = try!(self.get_bundle_id(found.bundle));
// Get chunk from bundle
Ok(Some(try!(self.bundles.get_chunk(&bundle_id, found.chunk as usize))))
Ok(Some(try!(
self.bundles.get_chunk(&bundle_id, found.chunk as usize)
)))
}
#[inline]
pub fn put_chunk(&mut self, mode: BundleMode, hash: Hash, data: &[u8]) -> Result<(), RepositoryError> {
pub fn put_chunk(
&mut self,
mode: BundleMode,
hash: Hash,
data: &[u8],
) -> Result<(), RepositoryError> {
// If this chunk is in the index, ignore it
if self.index.contains(&hash) {
return Ok(())
return Ok(());
}
self.put_chunk_override(mode, hash, data)
}
fn write_chunk_to_bundle_and_index(&mut self, mode: BundleMode, hash: Hash, data: &[u8]) -> Result<(), RepositoryError> {
fn write_chunk_to_bundle_and_index(
&mut self,
mode: BundleMode,
hash: Hash,
data: &[u8],
) -> Result<(), RepositoryError> {
let writer = match mode {
BundleMode::Data => &mut self.data_bundle,
BundleMode::Meta => &mut self.meta_bundle
BundleMode::Meta => &mut self.meta_bundle,
};
// ...alocate one if needed
if writer.is_none() {
@ -101,10 +120,13 @@ impl Repository {
let chunk_id = try!(writer_obj.add(data, hash));
let bundle_id = match mode {
BundleMode::Data => self.next_data_bundle,
BundleMode::Meta => self.next_meta_bundle
BundleMode::Meta => self.next_meta_bundle,
};
// Add location to the index
try!(self.index.set(&hash, &Location::new(bundle_id, chunk_id as u32)));
try!(self.index.set(
&hash,
&Location::new(bundle_id, chunk_id as u32)
));
Ok(())
}
@ -113,14 +135,14 @@ impl Repository {
let next_free_bundle_id = self.next_free_bundle_id();
let writer = match mode {
BundleMode::Data => &mut self.data_bundle,
BundleMode::Meta => &mut self.meta_bundle
BundleMode::Meta => &mut self.meta_bundle,
};
if writer.is_none() {
return Ok(())
return Ok(());
}
let bundle_id = match mode {
BundleMode::Data => self.next_data_bundle,
BundleMode::Meta => self.next_meta_bundle
BundleMode::Meta => self.next_meta_bundle,
};
let mut finished = None;
mem::swap(writer, &mut finished);
@ -139,12 +161,12 @@ impl Repository {
let (size, raw_size) = {
let writer = match mode {
BundleMode::Data => &mut self.data_bundle,
BundleMode::Meta => &mut self.meta_bundle
BundleMode::Meta => &mut self.meta_bundle,
};
if let Some(ref writer) = *writer {
(writer.estimate_final_size(), writer.raw_size())
} else {
return Ok(())
return Ok(());
}
};
if size >= self.config.bundle_size || raw_size >= 4 * self.config.bundle_size {
@ -158,18 +180,31 @@ impl Repository {
}
#[inline]
pub fn put_chunk_override(&mut self, mode: BundleMode, hash: Hash, data: &[u8]) -> Result<(), RepositoryError> {
pub fn put_chunk_override(
&mut self,
mode: BundleMode,
hash: Hash,
data: &[u8],
) -> Result<(), RepositoryError> {
try!(self.write_chunk_to_bundle_and_index(mode, hash, data));
self.finish_bundle_if_needed(mode)
}
#[inline]
pub fn put_data(&mut self, mode: BundleMode, data: &[u8]) -> Result<ChunkList, RepositoryError> {
pub fn put_data(
&mut self,
mode: BundleMode,
data: &[u8],
) -> Result<ChunkList, RepositoryError> {
let mut input = Cursor::new(data);
self.put_stream(mode, &mut input)
}
pub fn put_stream<R: Read>(&mut self, mode: BundleMode, data: &mut R) -> Result<ChunkList, RepositoryError> {
pub fn put_stream<R: Read>(
&mut self,
mode: BundleMode,
data: &mut R,
) -> Result<ChunkList, RepositoryError> {
let avg_size = self.config.chunker.avg_size();
let mut chunks = Vec::new();
let mut chunk = Vec::with_capacity(avg_size * 2);
@ -182,14 +217,15 @@ impl Repository {
try!(self.put_chunk(mode, hash, &chunk));
chunks.push((hash, chunk.len() as u32));
if res == ChunkerStatus::Finished {
break
break;
}
}
Ok(chunks.into())
}
pub fn get_data(&mut self, chunks: &[Chunk]) -> Result<Vec<u8>, RepositoryError> {
let mut data = Vec::with_capacity(chunks.iter().map(|&(_, size)| size).sum::<u32>() as usize);
let mut data =
Vec::with_capacity(chunks.iter().map(|&(_, size)| size).sum::<u32>() as usize);
try!(self.get_stream(chunks, &mut data));
Ok(data)
}
@ -199,9 +235,15 @@ impl Repository {
ChunkReader::new(self, chunks)
}
pub fn get_stream<W: Write>(&mut self, chunks: &[Chunk], w: &mut W) -> Result<(), RepositoryError> {
pub fn get_stream<W: Write>(
&mut self,
chunks: &[Chunk],
w: &mut W,
) -> Result<(), RepositoryError> {
for &(ref hash, len) in chunks {
let data = try!(try!(self.get_chunk(*hash)).ok_or_else(|| IntegrityError::MissingChunk(*hash)));
let data = try!(try!(self.get_chunk(*hash)).ok_or_else(|| {
IntegrityError::MissingChunk(*hash)
}));
debug_assert_eq!(data.len() as u32, len);
try!(w.write_all(&data));
}

View File

@ -1,4 +1,4 @@
use ::prelude::*;
use prelude::*;
use std::collections::HashMap;
use std::path::Path;
@ -16,24 +16,24 @@ quick_error!{
Io(err: io::Error) {
from()
cause(err)
description("Failed to read/write bundle map")
description(tr!("Failed to read/write bundle map"))
}
Decode(err: msgpack::DecodeError) {
from()
cause(err)
description("Failed to decode bundle map")
description(tr!("Failed to decode bundle map"))
}
Encode(err: msgpack::EncodeError) {
from()
cause(err)
description("Failed to encode bundle map")
description(tr!("Failed to encode bundle map"))
}
WrongHeader {
description("Wrong header")
description(tr!("Wrong header"))
}
WrongVersion(version: u8) {
description("Wrong version")
display("Wrong version: {}", version)
description(tr!("Wrong version"))
display("{}", tr_format!("Wrong version: {}", version))
}
}
}
@ -51,11 +51,11 @@ impl BundleMap {
let mut header = [0u8; 8];
try!(file.read_exact(&mut header));
if header[..HEADER_STRING.len()] != HEADER_STRING {
return Err(BundleMapError::WrongHeader)
return Err(BundleMapError::WrongHeader);
}
let version = header[HEADER_STRING.len()];
if version != HEADER_VERSION {
return Err(BundleMapError::WrongVersion(version))
return Err(BundleMapError::WrongVersion(version));
}
Ok(BundleMap(try!(msgpack::decode_from_stream(&mut file))))
}
@ -80,7 +80,7 @@ impl BundleMap {
pub fn find(&self, bundle: &BundleId) -> Option<u32> {
for (id, bundle_id) in &self.0 {
if bundle == bundle_id {
return Some(*id)
return Some(*id);
}
}
None
@ -92,7 +92,10 @@ impl BundleMap {
}
pub fn bundles(&self) -> Vec<(u32, BundleId)> {
self.0.iter().map(|(id, bundle)| (*id, bundle.clone())).collect()
self.0
.iter()
.map(|(id, bundle)| (*id, bundle.clone()))
.collect()
}
#[inline]

View File

@ -1,4 +1,4 @@
use ::prelude::*;
use prelude::*;
use serde_yaml;
@ -16,22 +16,22 @@ quick_error!{
}
Parse(reason: &'static str) {
from()
description("Failed to parse config")
display("Failed to parse config: {}", reason)
description(tr!("Failed to parse config"))
display("{}", tr_format!("Failed to parse config: {}", reason))
}
Yaml(err: serde_yaml::Error) {
from()
cause(err)
description("Yaml format error")
display("Yaml format error: {}", err)
description(tr!("Yaml format error"))
display("{}", tr_format!("Yaml format error: {}", err))
}
}
}
impl HashMethod {
fn from_yaml(yaml: String) -> Result<Self, ConfigError> {
HashMethod::from(&yaml).map_err(ConfigError::Parse)
fn from_yaml(yaml: &str) -> Result<Self, ConfigError> {
HashMethod::from(yaml).map_err(ConfigError::Parse)
}
fn to_yaml(&self) -> String {
@ -49,7 +49,7 @@ impl Default for ChunkerYaml {
fn default() -> Self {
ChunkerYaml {
method: "fastcdc".to_string(),
avg_size: 16*1024,
avg_size: 16 * 1024,
seed: 0
}
}
@ -61,7 +61,7 @@ serde_impl!(ChunkerYaml(String) {
});
impl ChunkerType {
fn from_yaml(yaml: ChunkerYaml) -> Result<Self, ConfigError> {
fn from_yaml(yaml: &ChunkerYaml) -> Result<Self, ConfigError> {
ChunkerType::from(&yaml.method, yaml.avg_size, yaml.seed).map_err(ConfigError::Parse)
}
@ -78,8 +78,8 @@ impl ChunkerType {
impl Compression {
#[inline]
fn from_yaml(yaml: String) -> Result<Self, ConfigError> {
Compression::from_string(&yaml).map_err(|_| ConfigError::Parse("Invalid codec"))
fn from_yaml(yaml: &str) -> Result<Self, ConfigError> {
Compression::from_string(yaml).map_err(|_| ConfigError::Parse(tr!("Invalid codec")))
}
#[inline]
@ -91,8 +91,8 @@ impl Compression {
impl EncryptionMethod {
#[inline]
fn from_yaml(yaml: String) -> Result<Self, ConfigError> {
EncryptionMethod::from_string(&yaml).map_err(|_| ConfigError::Parse("Invalid codec"))
fn from_yaml(yaml: &str) -> Result<Self, ConfigError> {
EncryptionMethod::from_string(yaml).map_err(|_| ConfigError::Parse(tr!("Invalid codec")))
}
#[inline]
@ -126,14 +126,14 @@ struct ConfigYaml {
encryption: Option<EncryptionYaml>,
bundle_size: usize,
chunker: ChunkerYaml,
hash: String,
hash: String
}
impl Default for ConfigYaml {
fn default() -> Self {
ConfigYaml {
compression: Some("brotli/5".to_string()),
encryption: None,
bundle_size: 25*1024*1024,
bundle_size: 25 * 1024 * 1024,
chunker: ChunkerYaml::default(),
hash: "blake2".to_string()
}
@ -162,7 +162,7 @@ impl Default for Config {
Config {
compression: Some(Compression::from_string("brotli/3").unwrap()),
encryption: None,
bundle_size: 25*1024*1024,
bundle_size: 25 * 1024 * 1024,
chunker: ChunkerType::from_string("fastcdc/16").unwrap(),
hash: HashMethod::Blake2
}
@ -179,30 +179,37 @@ serde_impl!(Config(u64) {
impl Config {
fn from_yaml(yaml: ConfigYaml) -> Result<Self, ConfigError> {
let compression = if let Some(c) = yaml.compression {
Some(try!(Compression::from_yaml(c)))
Some(try!(Compression::from_yaml(&c)))
} else {
None
};
let encryption = if let Some(e) = yaml.encryption {
let method = try!(EncryptionMethod::from_yaml(e.method));
let key = try!(parse_hex(&e.key).map_err(|_| ConfigError::Parse("Invalid public key")));
let method = try!(EncryptionMethod::from_yaml(&e.method));
let key = try!(parse_hex(&e.key).map_err(|_| {
ConfigError::Parse(tr!("Invalid public key"))
}));
Some((method, key.into()))
} else {
None
};
Ok(Config{
compression: compression,
encryption: encryption,
Ok(Config {
compression,
encryption,
bundle_size: yaml.bundle_size,
chunker: try!(ChunkerType::from_yaml(yaml.chunker)),
hash: try!(HashMethod::from_yaml(yaml.hash))
chunker: try!(ChunkerType::from_yaml(&yaml.chunker)),
hash: try!(HashMethod::from_yaml(&yaml.hash))
})
}
fn to_yaml(&self) -> ConfigYaml {
ConfigYaml {
compression: self.compression.as_ref().map(|c| c.to_yaml()),
encryption: self.encryption.as_ref().map(|e| EncryptionYaml{method: e.0.to_yaml(), key: to_hex(&e.1[..])}),
encryption: self.encryption.as_ref().map(|e| {
EncryptionYaml {
method: e.0.to_yaml(),
key: to_hex(&e.1[..])
}
}),
bundle_size: self.bundle_size,
chunker: self.chunker.to_yaml(),
hash: self.hash.to_yaml()

View File

@ -1,4 +1,4 @@
use ::prelude::*;
use prelude::*;
use std::io;
use std::path::PathBuf;
@ -15,95 +15,95 @@ quick_error!{
#[allow(unknown_lints,large_enum_variant)]
pub enum RepositoryError {
NoRemote {
description("Remote storage not found")
display("Repository error: The remote storage has not been found, may be it needs to be mounted?")
description(tr!("Remote storage not found"))
display("{}", tr_format!("Repository error: The remote storage has not been found, may be it needs to be mounted?"))
}
Index(err: IndexError) {
from()
cause(err)
description("Index error")
display("Repository error: index error\n\tcaused by: {}", err)
description(tr!("Index error"))
display("{}", tr_format!("Repository error: index error\n\tcaused by: {}", err))
}
BundleDb(err: BundleDbError) {
from()
cause(err)
description("Bundle error")
display("Repository error: bundle db error\n\tcaused by: {}", err)
description(tr!("Bundle error"))
display("{}", tr_format!("Repository error: bundle db error\n\tcaused by: {}", err))
}
BundleWriter(err: BundleWriterError) {
from()
cause(err)
description("Bundle write error")
display("Repository error: failed to write to new bundle\n\tcaused by: {}", err)
description(tr!("Bundle write error"))
display("{}", tr_format!("Repository error: failed to write to new bundle\n\tcaused by: {}", err))
}
BackupFile(err: BackupFileError) {
from()
cause(err)
description("Backup file error")
display("Repository error: backup file error\n\tcaused by: {}", err)
description(tr!("Backup file error"))
display("{}", tr_format!("Repository error: backup file error\n\tcaused by: {}", err))
}
Chunker(err: ChunkerError) {
from()
cause(err)
description("Chunker error")
display("Repository error: failed to chunk data\n\tcaused by: {}", err)
description(tr!("Chunker error"))
display("{}", tr_format!("Repository error: failed to chunk data\n\tcaused by: {}", err))
}
Config(err: ConfigError) {
from()
cause(err)
description("Configuration error")
display("Repository error: configuration error\n\tcaused by: {}", err)
description(tr!("Configuration error"))
display("{}", tr_format!("Repository error: configuration error\n\tcaused by: {}", err))
}
Inode(err: InodeError) {
from()
cause(err)
description("Inode error")
display("Repository error: inode error\n\tcaused by: {}", err)
description(tr!("Inode error"))
display("{}", tr_format!("Repository error: inode error\n\tcaused by: {}", err))
}
LoadKeys(err: EncryptionError) {
from()
cause(err)
description("Failed to load keys")
display("Repository error: failed to load keys\n\tcaused by: {}", err)
description(tr!("Failed to load keys"))
display("{}", tr_format!("Repository error: failed to load keys\n\tcaused by: {}", err))
}
BundleMap(err: BundleMapError) {
from()
cause(err)
description("Bundle map error")
display("Repository error: bundle map error\n\tcaused by: {}", err)
description(tr!("Bundle map error"))
display("{}", tr_format!("Repository error: bundle map error\n\tcaused by: {}", err))
}
Integrity(err: IntegrityError) {
from()
cause(err)
description("Integrity error")
display("Repository error: integrity error\n\tcaused by: {}", err)
description(tr!("Integrity error"))
display("{}", tr_format!("Repository error: integrity error\n\tcaused by: {}", err))
}
Dirty {
description("Dirty repository")
display("The repository is dirty, please run a check")
description(tr!("Dirty repository"))
display("{}", tr_format!("The repository is dirty, please run a check"))
}
Backup(err: BackupError) {
from()
cause(err)
description("Failed to create a backup")
display("Repository error: failed to create backup\n\tcaused by: {}", err)
description(tr!("Failed to create a backup"))
display("{}", tr_format!("Repository error: failed to create backup\n\tcaused by: {}", err))
}
Lock(err: LockError) {
from()
cause(err)
description("Failed to obtain lock")
display("Repository error: failed to obtain lock\n\tcaused by: {}", err)
description(tr!("Failed to obtain lock"))
display("{}", tr_format!("Repository error: failed to obtain lock\n\tcaused by: {}", err))
}
Io(err: io::Error) {
from()
cause(err)
description("IO error")
display("IO error: {}", err)
description(tr!("IO error"))
display("{}", tr_format!("IO error: {}", err))
}
NoSuchFileInBackup(backup: Backup, path: PathBuf) {
description("No such file in backup")
display("The backup does not contain the file {:?}", path)
description(tr!("No such file in backup"))
display("{}", tr_format!("The backup does not contain the file {:?}", path))
}
}
}

View File

@ -1,4 +1,4 @@
use ::prelude::*;
use prelude::*;
use std::collections::{HashMap, VecDeque};
@ -39,12 +39,24 @@ pub struct RepositoryInfo {
}
#[derive(Debug)]
pub struct RepositoryStatistics {
pub index: IndexStatistics,
pub bundles: BundleStatistics
}
impl Repository {
fn mark_used(&self, bundles: &mut HashMap<u32, BundleAnalysis>, chunks: &[Chunk]) -> Result<bool, RepositoryError> {
fn mark_used(
&self,
bundles: &mut HashMap<u32, BundleAnalysis>,
chunks: &[Chunk],
) -> Result<bool, RepositoryError> {
let mut new = false;
for &(hash, len) in chunks {
if let Some(pos) = self.index.get(&hash) {
if let Some(bundle) = bundles.get_mut(&pos.bundle) {
let bundle = pos.bundle;
if let Some(bundle) = bundles.get_mut(&bundle) {
if !bundle.chunk_usage.get(pos.chunk as usize) {
new = true;
bundle.chunk_usage.set(pos.chunk as usize);
@ -62,17 +74,22 @@ impl Repository {
pub fn analyze_usage(&mut self) -> Result<HashMap<u32, BundleAnalysis>, RepositoryError> {
if self.dirty {
return Err(RepositoryError::Dirty)
return Err(RepositoryError::Dirty);
}
try!(self.set_dirty());
let mut usage = HashMap::new();
for (id, bundle) in self.bundle_map.bundles() {
let bundle = try!(self.bundles.get_bundle_info(&bundle).ok_or_else(|| IntegrityError::MissingBundle(bundle)));
usage.insert(id, BundleAnalysis {
chunk_usage: Bitmap::new(bundle.info.chunk_count),
info: bundle.info.clone(),
used_raw_size: 0
});
let bundle = try!(self.bundles.get_bundle_info(&bundle).ok_or_else(|| {
IntegrityError::MissingBundle(bundle)
}));
usage.insert(
id,
BundleAnalysis {
chunk_usage: Bitmap::new(bundle.info.chunk_count),
info: bundle.info.clone(),
used_raw_size: 0
}
);
}
let backups = try!(self.get_all_backups());
let mut todo = VecDeque::new();
@ -81,15 +98,16 @@ impl Repository {
}
while let Some(chunks) = todo.pop_back() {
if !try!(self.mark_used(&mut usage, &chunks)) {
continue
continue;
}
let inode = try!(self.get_inode(&chunks));
// Mark the content chunks as used
match inode.data {
None | Some(FileData::Inline(_)) => (),
None |
Some(FileData::Inline(_)) => (),
Some(FileData::ChunkedDirect(chunks)) => {
try!(self.mark_used(&mut usage, &chunks));
},
}
Some(FileData::ChunkedIndirect(chunks)) => {
if try!(self.mark_used(&mut usage, &chunks)) {
let chunk_data = try!(self.get_data(&chunks));
@ -126,9 +144,9 @@ impl Repository {
let chunk_count = bundles.iter().map(|b| b.chunk_count).sum();
RepositoryInfo {
bundle_count: bundles.len(),
chunk_count: chunk_count,
encoded_data_size: encoded_data_size,
raw_data_size: raw_data_size,
chunk_count,
encoded_data_size,
raw_data_size,
compression_ratio: encoded_data_size as f32 / raw_data_size as f32,
avg_chunk_size: raw_data_size as f32 / chunk_count as f32,
index_size: self.index.size(),
@ -136,4 +154,12 @@ impl Repository {
index_entries: self.index.len()
}
}
#[allow(dead_code)]
pub fn statistics(&self) -> RepositoryStatistics {
RepositoryStatistics {
index: self.index.statistics(),
bundles: self.bundles.statistics()
}
}
}

View File

@ -1,4 +1,4 @@
use ::prelude::*;
use prelude::*;
use super::*;
@ -12,36 +12,36 @@ quick_error!{
#[derive(Debug)]
pub enum IntegrityError {
MissingChunk(hash: Hash) {
description("Missing chunk")
display("Missing chunk: {}", hash)
description(tr!("Missing chunk"))
display("{}", tr_format!("Missing chunk: {}", hash))
}
MissingBundleId(id: u32) {
description("Missing bundle")
display("Missing bundle: {}", id)
description(tr!("Missing bundle"))
display("{}", tr_format!("Missing bundle: {}", id))
}
MissingBundle(id: BundleId) {
description("Missing bundle")
display("Missing bundle: {}", id)
description(tr!("Missing bundle"))
display("{}", tr_format!("Missing bundle: {}", id))
}
NoSuchChunk(bundle: BundleId, chunk: u32) {
description("No such chunk")
display("Bundle {} does not contain the chunk {}", bundle, chunk)
description(tr!("No such chunk"))
display("{}", tr_format!("Bundle {} does not contain the chunk {}", bundle, chunk))
}
RemoteBundlesNotInMap {
description("Remote bundles missing from map")
description(tr!("Remote bundles missing from map"))
}
MapContainsDuplicates {
description("Map contains duplicates")
description(tr!("Map contains duplicates"))
}
BrokenInode(path: PathBuf, err: Box<RepositoryError>) {
cause(err)
description("Broken inode")
display("Broken inode: {:?}\n\tcaused by: {}", path, err)
description(tr!("Broken inode"))
display("{}", tr_format!("Broken inode: {:?}\n\tcaused by: {}", path, err))
}
MissingInodeData(path: PathBuf, err: Box<RepositoryError>) {
cause(err)
description("Missing inode data")
display("Missing inode data in: {:?}\n\tcaused by: {}", path, err)
description(tr!("Missing inode data"))
display("{}", tr_format!("Missing inode data in: {:?}\n\tcaused by: {}", path, err))
}
}
}
@ -49,32 +49,39 @@ quick_error!{
impl Repository {
fn check_index_chunks(&self) -> Result<(), RepositoryError> {
let mut progress = ProgressBar::new(self.index.len() as u64);
progress.message("checking index: ");
progress.message(tr!("checking index: "));
progress.set_max_refresh_rate(Some(Duration::from_millis(100)));
for (count,(_hash, location)) in self.index.iter().enumerate() {
for (count, (_hash, location)) in self.index.iter().enumerate() {
// Lookup bundle id from map
let bundle_id = try!(self.get_bundle_id(location.bundle));
// Get bundle object from bundledb
let bundle = if let Some(bundle) = self.bundles.get_bundle_info(&bundle_id) {
bundle
} else {
progress.finish_print("checking index: done.");
return Err(IntegrityError::MissingBundle(bundle_id.clone()).into())
progress.finish_print(tr!("checking index: done."));
return Err(IntegrityError::MissingBundle(bundle_id.clone()).into());
};
// Get chunk from bundle
if bundle.info.chunk_count <= location.chunk as usize {
progress.finish_print("checking index: done.");
return Err(IntegrityError::NoSuchChunk(bundle_id.clone(), location.chunk).into())
progress.finish_print(tr!("checking index: done."));
return Err(
IntegrityError::NoSuchChunk(bundle_id.clone(), location.chunk).into()
);
}
if count % 1000 == 0 {
progress.set(count as u64);
}
}
progress.finish_print("checking index: done.");
progress.finish_print(tr!("checking index: done."));
Ok(())
}
fn check_chunks(&self, checked: &mut Bitmap, chunks: &[Chunk], mark: bool) -> Result<bool, RepositoryError> {
fn check_chunks(
&self,
checked: &mut Bitmap,
chunks: &[Chunk],
mark: bool,
) -> Result<bool, RepositoryError> {
let mut new = false;
for &(hash, _len) in chunks {
if let Some(pos) = self.index.pos(&hash) {
@ -83,47 +90,63 @@ impl Repository {
checked.set(pos);
}
} else {
return Err(IntegrityError::MissingChunk(hash).into())
return Err(IntegrityError::MissingChunk(hash).into());
}
}
Ok(new)
}
fn check_inode_contents(&mut self, inode: &Inode, checked: &mut Bitmap) -> Result<(), RepositoryError> {
fn check_inode_contents(
&mut self,
inode: &Inode,
checked: &mut Bitmap,
) -> Result<(), RepositoryError> {
match inode.data {
None | Some(FileData::Inline(_)) => (),
None |
Some(FileData::Inline(_)) => (),
Some(FileData::ChunkedDirect(ref chunks)) => {
try!(self.check_chunks(checked, chunks, true));
},
}
Some(FileData::ChunkedIndirect(ref chunks)) => {
if try!(self.check_chunks(checked, chunks, true)) {
if try!(self.check_chunks(checked, chunks, false)) {
let chunk_data = try!(self.get_data(chunks));
let chunks = ChunkList::read_from(&chunk_data);
try!(self.check_chunks(checked, &chunks, true));
let chunks2 = ChunkList::read_from(&chunk_data);
try!(self.check_chunks(checked, &chunks2, true));
try!(self.check_chunks(checked, chunks, true));
}
}
}
Ok(())
}
fn check_subtree(&mut self, path: PathBuf, chunks: &[Chunk], checked: &mut Bitmap, repair: bool) -> Result<Option<ChunkList>, RepositoryError> {
fn check_subtree(
&mut self,
path: PathBuf,
chunks: &[Chunk],
checked: &mut Bitmap,
repair: bool,
) -> Result<Option<ChunkList>, RepositoryError> {
let mut modified = false;
match self.check_chunks(checked, chunks, false) {
Ok(false) => return Ok(None),
Ok(true) => (),
Err(err) => return Err(IntegrityError::BrokenInode(path, Box::new(err)).into())
Err(err) => return Err(IntegrityError::BrokenInode(path, Box::new(err)).into()),
}
let mut inode = try!(self.get_inode(chunks));
// Mark the content chunks as used
if let Err(err) = self.check_inode_contents(&inode, checked) {
if repair {
warn!("Problem detected: data of {:?} is corrupt\n\tcaused by: {}", path, err);
info!("Removing inode data");
tr_warn!(
"Problem detected: data of {:?} is corrupt\n\tcaused by: {}",
path,
err
);
tr_info!("Removing inode data");
inode.data = Some(FileData::Inline(vec![].into()));
inode.size = 0;
modified = true;
} else {
return Err(IntegrityError::MissingInodeData(path, Box::new(err)).into())
return Err(IntegrityError::MissingInodeData(path, Box::new(err)).into());
}
}
// Put children in todo
@ -135,14 +158,20 @@ impl Repository {
Ok(Some(c)) => {
*chunks = c;
modified = true;
},
Err(err) => if repair {
warn!("Problem detected: inode {:?} is corrupt\n\tcaused by: {}", path.join(name), err);
info!("Removing broken inode from backup");
removed.push(name.to_string());
modified = true;
} else {
return Err(err)
}
Err(err) => {
if repair {
tr_warn!(
"Problem detected: inode {:?} is corrupt\n\tcaused by: {}",
path.join(name),
err
);
tr_info!("Removing broken inode from backup");
removed.push(name.to_string());
modified = true;
} else {
return Err(err);
}
}
}
}
@ -159,7 +188,10 @@ impl Repository {
}
fn evacuate_broken_backup(&self, name: &str) -> Result<(), RepositoryError> {
warn!("The backup {} was corrupted and needed to be modified.", name);
tr_warn!(
"The backup {} was corrupted and needed to be modified.",
name
);
let src = self.layout.backup_path(name);
let mut dst = src.with_extension("backup.broken");
let mut num = 1;
@ -171,21 +203,31 @@ impl Repository {
try!(fs::copy(&src, &dst));
try!(fs::remove_file(&src));
}
info!("The original backup was renamed to {:?}", dst);
tr_info!("The original backup was renamed to {:?}", dst);
Ok(())
}
#[inline]
pub fn check_backup(&mut self, name: &str, backup: &mut Backup, repair: bool) -> Result<(), RepositoryError> {
pub fn check_backup(
&mut self,
name: &str,
backup: &mut Backup,
repair: bool,
) -> Result<(), RepositoryError> {
let _lock = if repair {
try!(self.write_mode());
Some(self.lock(false))
} else {
None
};
info!("Checking backup...");
tr_info!("Checking backup...");
let mut checked = Bitmap::new(self.index.capacity());
match self.check_subtree(Path::new("").to_path_buf(), &backup.root, &mut checked, repair) {
match self.check_subtree(
Path::new("").to_path_buf(),
&backup.root,
&mut checked,
repair
) {
Ok(None) => (),
Ok(Some(chunks)) => {
try!(self.flush());
@ -193,38 +235,56 @@ impl Repository {
backup.modified = true;
try!(self.evacuate_broken_backup(name));
try!(self.save_backup(backup, name));
},
Err(err) => if repair {
warn!("The root of the backup {} has been corrupted\n\tcaused by: {}", name, err);
try!(self.evacuate_broken_backup(name));
} else {
return Err(err)
}
Err(err) => {
if repair {
tr_warn!(
"The root of the backup {} has been corrupted\n\tcaused by: {}",
name,
err
);
try!(self.evacuate_broken_backup(name));
} else {
return Err(err);
}
}
}
Ok(())
}
pub fn check_backup_inode(&mut self, name: &str, backup: &mut Backup, path: &Path, repair: bool) -> Result<(), RepositoryError> {
pub fn check_backup_inode(
&mut self,
name: &str,
backup: &mut Backup,
path: &Path,
repair: bool,
) -> Result<(), RepositoryError> {
let _lock = if repair {
try!(self.write_mode());
Some(self.lock(false))
} else {
None
};
info!("Checking inode...");
tr_info!("Checking inode...");
let mut checked = Bitmap::new(self.index.capacity());
let mut inodes = try!(self.get_backup_path(backup, path));
let mut inode = inodes.pop().unwrap();
let mut modified = false;
if let Err(err) = self.check_inode_contents(&inode, &mut checked) {
if repair {
warn!("Problem detected: data of {:?} is corrupt\n\tcaused by: {}", path, err);
info!("Removing inode data");
tr_warn!(
"Problem detected: data of {:?} is corrupt\n\tcaused by: {}",
path,
err
);
tr_info!("Removing inode data");
inode.data = Some(FileData::Inline(vec![].into()));
inode.size = 0;
modified = true;
} else {
return Err(IntegrityError::MissingInodeData(path.to_path_buf(), Box::new(err)).into())
return Err(
IntegrityError::MissingInodeData(path.to_path_buf(), Box::new(err)).into()
);
}
}
if let Some(ref mut children) = inode.children {
@ -235,14 +295,20 @@ impl Repository {
Ok(Some(c)) => {
*chunks = c;
modified = true;
},
Err(err) => if repair {
warn!("Problem detected: inode {:?} is corrupt\n\tcaused by: {}", path.join(name), err);
info!("Removing broken inode from backup");
removed.push(name.to_string());
modified = true;
} else {
return Err(err)
}
Err(err) => {
if repair {
tr_warn!(
"Problem detected: inode {:?} is corrupt\n\tcaused by: {}",
path.join(name),
err
);
tr_info!("Removing broken inode from backup");
removed.push(name.to_string());
modified = true;
} else {
return Err(err);
}
}
}
}
@ -273,19 +339,27 @@ impl Repository {
} else {
None
};
info!("Checking backups...");
tr_info!("Checking backups...");
let mut checked = Bitmap::new(self.index.capacity());
let backup_map = match self.get_all_backups() {
Ok(backup_map) => backup_map,
Err(RepositoryError::BackupFile(BackupFileError::PartialBackupsList(backup_map, _failed))) => {
warn!("Some backups could not be read, ignoring them");
Err(RepositoryError::BackupFile(BackupFileError::PartialBackupsList(backup_map,
_failed))) => {
tr_warn!("Some backups could not be read, ignoring them");
backup_map
},
Err(err) => return Err(err)
}
Err(err) => return Err(err),
};
for (name, mut backup) in ProgressIter::new("checking backups", backup_map.len(), backup_map.into_iter()) {
for (name, mut backup) in
ProgressIter::new(tr!("checking backups"), backup_map.len(), backup_map.into_iter())
{
let path = format!("{}::", name);
match self.check_subtree(Path::new(&path).to_path_buf(), &backup.root, &mut checked, repair) {
match self.check_subtree(
Path::new(&path).to_path_buf(),
&backup.root,
&mut checked,
repair
) {
Ok(None) => (),
Ok(Some(chunks)) => {
try!(self.flush());
@ -293,12 +367,18 @@ impl Repository {
backup.modified = true;
try!(self.evacuate_broken_backup(&name));
try!(self.save_backup(&backup, &name));
},
Err(err) => if repair {
warn!("The root of the backup {} has been corrupted\n\tcaused by: {}", name, err);
try!(self.evacuate_broken_backup(&name));
} else {
return Err(err)
}
Err(err) => {
if repair {
tr_warn!(
"The root of the backup {} has been corrupted\n\tcaused by: {}",
name,
err
);
try!(self.evacuate_broken_backup(&name));
} else {
return Err(err);
}
}
}
}
@ -306,32 +386,35 @@ impl Repository {
}
pub fn check_repository(&mut self, repair: bool) -> Result<(), RepositoryError> {
info!("Checking repository integrity...");
tr_info!("Checking repository integrity...");
let mut rebuild = false;
for (_id, bundle_id) in self.bundle_map.bundles() {
if self.bundles.get_bundle_info(&bundle_id).is_none() {
if repair {
warn!("Problem detected: bundle map contains unknown bundle {}", bundle_id);
tr_warn!(
"Problem detected: bundle map contains unknown bundle {}",
bundle_id
);
rebuild = true;
} else {
return Err(IntegrityError::MissingBundle(bundle_id).into())
return Err(IntegrityError::MissingBundle(bundle_id).into());
}
}
}
if self.bundle_map.len() < self.bundles.len() {
if repair {
warn!("Problem detected: bundle map does not contain all remote bundles");
tr_warn!("Problem detected: bundle map does not contain all remote bundles");
rebuild = true;
} else {
return Err(IntegrityError::RemoteBundlesNotInMap.into())
return Err(IntegrityError::RemoteBundlesNotInMap.into());
}
}
if self.bundle_map.len() > self.bundles.len() {
if repair {
warn!("Problem detected: bundle map contains bundles multiple times");
tr_warn!("Problem detected: bundle map contains bundles multiple times");
rebuild = true;
} else {
return Err(IntegrityError::MapContainsDuplicates.into())
return Err(IntegrityError::MapContainsDuplicates.into());
}
}
if rebuild {
@ -342,12 +425,12 @@ impl Repository {
}
pub fn rebuild_bundle_map(&mut self) -> Result<(), RepositoryError> {
info!("Rebuilding bundle map from bundles");
tr_info!("Rebuilding bundle map from bundles");
self.bundle_map = BundleMap::create();
for bundle in self.bundles.list_bundles() {
let bundle_id = match bundle.mode {
BundleMode::Data => self.next_data_bundle,
BundleMode::Meta => self.next_meta_bundle
BundleMode::Meta => self.next_meta_bundle,
};
self.bundle_map.set(bundle_id, bundle.id.clone());
if self.next_meta_bundle == bundle_id {
@ -361,14 +444,20 @@ impl Repository {
}
pub fn rebuild_index(&mut self) -> Result<(), RepositoryError> {
info!("Rebuilding index from bundles");
tr_info!("Rebuilding index from bundles");
self.index.clear();
let mut bundles = self.bundle_map.bundles();
bundles.sort_by_key(|&(_, ref v)| v.clone());
for (num, id) in bundles {
for (num, id) in ProgressIter::new(tr!("Rebuilding index from bundles"), bundles.len(), bundles.into_iter()) {
let chunks = try!(self.bundles.get_chunk_list(&id));
for (i, (hash, _len)) in chunks.into_inner().into_iter().enumerate() {
try!(self.index.set(&hash, &Location{bundle: num as u32, chunk: i as u32}));
try!(self.index.set(
&hash,
&Location {
bundle: num as u32,
chunk: i as u32
}
));
}
}
Ok(())
@ -379,22 +468,28 @@ impl Repository {
if repair {
try!(self.write_mode());
}
info!("Checking index integrity...");
tr_info!("Checking index integrity...");
if let Err(err) = self.index.check() {
if repair {
warn!("Problem detected: index was corrupted\n\tcaused by: {}", err);
tr_warn!(
"Problem detected: index was corrupted\n\tcaused by: {}",
err
);
return self.rebuild_index();
} else {
return Err(err.into())
return Err(err.into());
}
}
info!("Checking index entries...");
tr_info!("Checking index entries...");
if let Err(err) = self.check_index_chunks() {
if repair {
warn!("Problem detected: index entries were inconsistent\n\tcaused by: {}", err);
tr_warn!(
"Problem detected: index entries were inconsistent\n\tcaused by: {}",
err
);
return self.rebuild_index();
} else {
return Err(err.into())
return Err(err);
}
}
Ok(())
@ -405,10 +500,10 @@ impl Repository {
if repair {
try!(self.write_mode());
}
info!("Checking bundle integrity...");
tr_info!("Checking bundle integrity...");
if try!(self.bundles.check(full, repair)) {
// Some bundles got repaired
warn!("Some bundles have been rewritten, please remove the broken bundles manually.");
tr_warn!("Some bundles have been rewritten, please remove the broken bundles manually.");
try!(self.rebuild_bundle_map());
try!(self.rebuild_index());
}

View File

@ -1,4 +1,4 @@
use ::prelude::*;
use prelude::*;
use std::path::{Path, PathBuf};
@ -62,7 +62,8 @@ impl RepositoryLayout {
#[inline]
pub fn remote_exists(&self) -> bool {
self.remote_bundles_path().exists() && self.backups_path().exists() && self.remote_locks_path().exists()
self.remote_bundles_path().exists() && self.backups_path().exists() &&
self.remote_locks_path().exists()
}
#[inline]
@ -85,15 +86,23 @@ impl RepositoryLayout {
self.0.join("bundles/cached")
}
fn bundle_path(&self, bundle: &BundleId, mut folder: PathBuf, mut count: usize) -> (PathBuf, PathBuf) {
let mut file = bundle.to_string().to_owned() + ".bundle";
while count >= 100 {
if file.len() < 10 {
break
fn bundle_path(
&self,
bundle: &BundleId,
mut folder: PathBuf,
mut count: usize,
) -> (PathBuf, PathBuf) {
let file = bundle.to_string().to_owned() + ".bundle";
{
let mut rest = &file as &str;
while count >= 100 {
if rest.len() < 10 {
break;
}
folder = folder.join(&rest[0..2]);
rest = &rest[2..];
count /= 250;
}
folder = folder.join(&file[0..2]);
file = file[2..].to_string();
count /= 250;
}
(folder, file.into())
}
@ -115,7 +124,10 @@ impl RepositoryLayout {
#[inline]
pub fn temp_bundle_path(&self) -> PathBuf {
self.temp_bundles_path().join(BundleId::random().to_string().to_owned() + ".bundle")
self.temp_bundles_path().join(
BundleId::random().to_string().to_owned() +
".bundle"
)
}
#[inline]

View File

@ -1,59 +1,62 @@
use ::prelude::*;
use prelude::*;
use filetime::{self, FileTime};
use xattr;
use libc;
use std::collections::BTreeMap;
use std::path::{Path, PathBuf};
use std::fs::{self, File, Permissions};
use std::os::linux::fs::MetadataExt;
use std::os::unix::fs::{PermissionsExt, symlink};
use std::os::unix::fs::{FileTypeExt, PermissionsExt, MetadataExt as UnixMetadataExt, symlink};
use std::io::{self, Read, Write};
use std::os::unix::ffi::OsStrExt;
use std::fmt;
use std::ffi;
quick_error!{
#[derive(Debug)]
pub enum InodeError {
UnsupportedFiletype(path: PathBuf) {
description("Unsupported file type")
display("Inode error: file {:?} has an unsupported type", path)
description(tr!("Unsupported file type"))
display("{}", tr_format!("Inode error: file {:?} has an unsupported type", path))
}
ReadMetadata(err: io::Error, path: PathBuf) {
cause(err)
description("Failed to obtain metadata for file")
display("Inode error: failed to obtain metadata for file {:?}\n\tcaused by: {}", path, err)
description(tr!("Failed to obtain metadata for file"))
display("{}", tr_format!("Inode error: failed to obtain metadata for file {:?}\n\tcaused by: {}", path, err))
}
ReadXattr(err: io::Error, path: PathBuf) {
cause(err)
description("Failed to obtain xattr for file")
display("Inode error: failed to obtain xattr for file {:?}\n\tcaused by: {}", path, err)
description(tr!("Failed to obtain xattr for file"))
display("{}", tr_format!("Inode error: failed to obtain xattr for file {:?}\n\tcaused by: {}", path, err))
}
ReadLinkTarget(err: io::Error, path: PathBuf) {
cause(err)
description("Failed to obtain link target for file")
display("Inode error: failed to obtain link target for file {:?}\n\tcaused by: {}", path, err)
description(tr!("Failed to obtain link target for file"))
display("{}", tr_format!("Inode error: failed to obtain link target for file {:?}\n\tcaused by: {}", path, err))
}
Create(err: io::Error, path: PathBuf) {
cause(err)
description("Failed to create entity")
display("Inode error: failed to create entity {:?}\n\tcaused by: {}", path, err)
description(tr!("Failed to create entity"))
display("{}", tr_format!("Inode error: failed to create entity {:?}\n\tcaused by: {}", path, err))
}
Integrity(reason: &'static str) {
description("Integrity error")
display("Inode error: inode integrity error: {}", reason)
description(tr!("Integrity error"))
display("{}", tr_format!("Inode error: inode integrity error: {}", reason))
}
Decode(err: msgpack::DecodeError) {
from()
cause(err)
description("Failed to decode metadata")
display("Inode error: failed to decode metadata\n\tcaused by: {}", err)
description(tr!("Failed to decode metadata"))
display("{}", tr_format!("Inode error: failed to decode metadata\n\tcaused by: {}", err))
}
Encode(err: msgpack::EncodeError) {
from()
cause(err)
description("Failed to encode metadata")
display("Inode error: failed to encode metadata\n\tcaused by: {}", err)
description(tr!("Failed to encode metadata"))
display("{}", tr_format!("Inode error: failed to encode metadata\n\tcaused by: {}", err))
}
}
}
@ -63,19 +66,28 @@ quick_error!{
pub enum FileType {
File,
Directory,
Symlink
Symlink,
BlockDevice,
CharDevice,
NamedPipe
}
serde_impl!(FileType(u8) {
File => 0,
Directory => 1,
Symlink => 2
Symlink => 2,
BlockDevice => 3,
CharDevice => 4,
NamedPipe => 5
});
impl fmt::Display for FileType {
fn fmt(&self, format: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
FileType::File => write!(format, "file"),
FileType::Directory => write!(format, "directory"),
FileType::Symlink => write!(format, "symlink")
FileType::File => write!(format, "{}", tr!("file")),
FileType::Directory => write!(format, "{}", tr!("directory")),
FileType::Symlink => write!(format, "{}", tr!("symlink")),
FileType::BlockDevice => write!(format, "{}", tr!("block device")),
FileType::CharDevice => write!(format, "{}", tr!("char device")),
FileType::NamedPipe => write!(format, "{}", tr!("named pipe")),
}
}
}
@ -109,7 +121,8 @@ pub struct Inode {
pub cum_size: u64,
pub cum_dirs: usize,
pub cum_files: usize,
pub xattrs: BTreeMap<String, msgpack::Bytes>
pub xattrs: BTreeMap<String, msgpack::Bytes>,
pub device: Option<(u32, u32)>
}
impl Default for Inode {
fn default() -> Self {
@ -127,7 +140,8 @@ impl Default for Inode {
cum_size: 0,
cum_dirs: 0,
cum_files: 0,
xattrs: BTreeMap::new()
xattrs: BTreeMap::new(),
device: None
}
}
}
@ -145,15 +159,20 @@ serde_impl!(Inode(u8?) {
cum_size: u64 => 12,
cum_dirs: usize => 13,
cum_files: usize => 14,
xattrs: BTreeMap<String, msgpack::Bytes> => 15
xattrs: BTreeMap<String, msgpack::Bytes> => 15,
device: Option<(u32, u32)> => 16
});
impl Inode {
pub fn get_from<P: AsRef<Path>>(path: P) -> Result<Self, InodeError> {
let path = path.as_ref();
let name = path.file_name().map(|s| s.to_string_lossy().to_string()).unwrap_or_else(|| "_".to_string());
let meta = try!(fs::symlink_metadata(path).map_err(|e| InodeError::ReadMetadata(e, path.to_owned())));
let name = path.file_name()
.map(|s| s.to_string_lossy().to_string())
.unwrap_or_else(|| "_".to_string());
let meta = try!(fs::symlink_metadata(path).map_err(|e| {
InodeError::ReadMetadata(e, path.to_owned())
}));
let mut inode = Inode::default();
inode.name = name;
if meta.is_file() {
@ -165,11 +184,28 @@ impl Inode {
FileType::Directory
} else if meta.file_type().is_symlink() {
FileType::Symlink
} else if meta.file_type().is_block_device() {
FileType::BlockDevice
} else if meta.file_type().is_char_device() {
FileType::CharDevice
} else if meta.file_type().is_fifo() {
FileType::NamedPipe
} else {
return Err(InodeError::UnsupportedFiletype(path.to_owned()));
};
if meta.file_type().is_symlink() {
inode.symlink_target = Some(try!(fs::read_link(path).map_err(|e| InodeError::ReadLinkTarget(e, path.to_owned()))).to_string_lossy().to_string());
inode.symlink_target = Some(
try!(fs::read_link(path).map_err(|e| {
InodeError::ReadLinkTarget(e, path.to_owned())
})).to_string_lossy()
.to_string()
);
}
if meta.file_type().is_block_device() || meta.file_type().is_char_device() {
let rdev = meta.rdev();
let major = (rdev >> 8) as u32;
let minor = (rdev & 0xff) as u32;
inode.device = Some((major, minor));
}
inode.mode = meta.permissions().mode();
inode.user = meta.st_uid();
@ -178,8 +214,15 @@ impl Inode {
if xattr::SUPPORTED_PLATFORM {
if let Ok(attrs) = xattr::list(path) {
for name in attrs {
let data = try!(xattr::get(path, &name).map_err(|e| InodeError::ReadXattr(e, path.to_owned())));
inode.xattrs.insert(name.to_string_lossy().to_string(), data.into());
if let Some(data) = try!(xattr::get(path, &name).map_err(|e| {
InodeError::ReadXattr(e, path.to_owned())
}))
{
inode.xattrs.insert(
name.to_string_lossy().to_string(),
data.into()
);
}
}
}
}
@ -191,55 +234,108 @@ impl Inode {
let mut file = None;
match self.file_type {
FileType::File => {
file = Some(try!(File::create(&full_path).map_err(|e| InodeError::Create(e, full_path.clone()))));
},
file = Some(try!(File::create(&full_path).map_err(|e| {
InodeError::Create(e, full_path.clone())
})));
}
FileType::Directory => {
try!(fs::create_dir(&full_path).map_err(|e| InodeError::Create(e, full_path.clone())));
},
try!(fs::create_dir(&full_path).map_err(|e| {
InodeError::Create(e, full_path.clone())
}));
}
FileType::Symlink => {
if let Some(ref src) = self.symlink_target {
try!(symlink(src, &full_path).map_err(|e| InodeError::Create(e, full_path.clone())));
try!(symlink(src, &full_path).map_err(|e| {
InodeError::Create(e, full_path.clone())
}));
} else {
return Err(InodeError::Integrity("Symlink without target"))
return Err(InodeError::Integrity(tr!("Symlink without target")));
}
}
FileType::NamedPipe => {
let name = try!(
ffi::CString::new(full_path.as_os_str().as_bytes())
.map_err(|_| InodeError::Integrity(tr!("Name contains nulls")))
);
let mode = self.mode | libc::S_IFIFO;
if unsafe { libc::mkfifo(name.as_ptr(), mode) } != 0 {
return Err(InodeError::Create(
io::Error::last_os_error(),
full_path.clone()
));
}
}
FileType::BlockDevice | FileType::CharDevice => {
let name = try!(
ffi::CString::new(full_path.as_os_str().as_bytes())
.map_err(|_| InodeError::Integrity(tr!("Name contains nulls")))
);
let mode = self.mode |
match self.file_type {
FileType::BlockDevice => libc::S_IFBLK,
FileType::CharDevice => libc::S_IFCHR,
_ => unreachable!(),
};
let device = if let Some((major, minor)) = self.device {
unsafe { libc::makedev(major, minor) }
} else {
return Err(InodeError::Integrity(tr!("Device without id")));
};
if unsafe { libc::mknod(name.as_ptr(), mode, device) } != 0 {
return Err(InodeError::Create(
io::Error::last_os_error(),
full_path.clone()
));
}
}
}
let time = FileTime::from_seconds_since_1970(self.timestamp as u64, 0);
if let Err(err) = filetime::set_file_times(&full_path, time, time) {
warn!("Failed to set file time on {:?}: {}", full_path, err);
tr_warn!("Failed to set file time on {:?}: {}", full_path, err);
}
if !self.xattrs.is_empty() {
if xattr::SUPPORTED_PLATFORM {
for (name, data) in &self.xattrs {
if let Err(err) = xattr::set(&full_path, name, data) {
warn!("Failed to set xattr {} on {:?}: {}", name, full_path, err);
tr_warn!("Failed to set xattr {} on {:?}: {}", name, full_path, err);
}
}
} else {
warn!("Not setting xattr on {:?}", full_path);
tr_warn!("Not setting xattr on {:?}", full_path);
}
}
if let Err(err) = fs::set_permissions(&full_path, Permissions::from_mode(self.mode)) {
warn!("Failed to set permissions {:o} on {:?}: {}", self.mode, full_path, err);
tr_warn!(
"Failed to set permissions {:o} on {:?}: {}",
self.mode,
full_path,
err
);
}
if let Err(err) = chown(&full_path, self.user, self.group) {
warn!("Failed to set user {} and group {} on {:?}: {}", self.user, self.group, full_path, err);
tr_warn!(
"Failed to set user {} and group {} on {:?}: {}",
self.user,
self.group,
full_path,
err
);
}
Ok(file)
}
#[inline]
pub fn is_same_meta(&self, other: &Inode) -> bool {
self.file_type == other.file_type && self.size == other.size && self.mode == other.mode
&& self.user == other.user && self.group == other.group && self.name == other.name
&& self.timestamp == other.timestamp && self.symlink_target == other.symlink_target
self.file_type == other.file_type && self.size == other.size &&
self.mode == other.mode && self.user == other.user &&
self.group == other.group && self.name == other.name &&
self.timestamp == other.timestamp && self.symlink_target == other.symlink_target
}
#[inline]
pub fn is_same_meta_quick(&self, other: &Inode) -> bool {
self.timestamp == other.timestamp
&& self.file_type == other.file_type
&& self.size == other.size
self.timestamp == other.timestamp && self.file_type == other.file_type &&
self.size == other.size
}
#[inline]
@ -255,13 +351,17 @@ impl Inode {
impl Repository {
pub fn create_inode<P: AsRef<Path>>(&mut self, path: P, reference: Option<&Inode>) -> Result<Inode, RepositoryError> {
pub fn create_inode<P: AsRef<Path>>(
&mut self,
path: P,
reference: Option<&Inode>,
) -> Result<Inode, RepositoryError> {
let mut inode = try!(Inode::get_from(path.as_ref()));
if inode.file_type == FileType::File && inode.size > 0 {
if let Some(reference) = reference {
if reference.is_same_meta_quick(&inode) {
inode.data = reference.data.clone();
return Ok(inode)
return Ok(inode);
}
}
let mut file = try!(File::open(path));
@ -294,16 +394,20 @@ impl Repository {
Ok(try!(Inode::decode(&try!(self.get_data(chunks)))))
}
pub fn save_inode_at<P: AsRef<Path>>(&mut self, inode: &Inode, path: P) -> Result<(), RepositoryError> {
pub fn save_inode_at<P: AsRef<Path>>(
&mut self,
inode: &Inode,
path: P,
) -> Result<(), RepositoryError> {
if let Some(mut file) = try!(inode.create_at(path.as_ref())) {
if let Some(ref contents) = inode.data {
match *contents {
FileData::Inline(ref data) => {
try!(file.write_all(data));
},
}
FileData::ChunkedDirect(ref chunks) => {
try!(self.get_stream(chunks, &mut file));
},
}
FileData::ChunkedIndirect(ref chunks) => {
let chunk_data = try!(self.get_data(chunks));
let chunks = ChunkList::read_from(&chunk_data);

View File

@ -11,7 +11,7 @@ mod backup_file;
mod tarfile;
mod layout;
use ::prelude::*;
use prelude::*;
use std::mem;
use std::cmp::max;
@ -27,19 +27,18 @@ pub use self::metadata::{Inode, FileType, FileData, InodeError};
pub use self::backup::{BackupError, BackupOptions, DiffType};
pub use self::backup_file::{Backup, BackupFileError};
pub use self::integrity::IntegrityError;
pub use self::info::{RepositoryInfo, BundleAnalysis};
pub use self::info::{RepositoryInfo, BundleAnalysis, RepositoryStatistics};
pub use self::layout::RepositoryLayout;
use self::bundle_map::BundleMap;
const REPOSITORY_README: &'static [u8] = include_bytes!("../../docs/repository_readme.md");
const DEFAULT_EXCLUDES: &'static [u8] = include_bytes!("../../docs/excludes.default");
const REPOSITORY_README: &[u8] = include_bytes!("../../docs/repository_readme.md");
const DEFAULT_EXCLUDES: &[u8] = include_bytes!("../../docs/excludes.default");
const INDEX_MAGIC: [u8; 7] = *b"zvault\x02";
const INDEX_VERSION: u8 = 1;
#[repr(packed)]
#[derive(Clone, Copy, PartialEq, Debug, Default)]
pub struct Location {
pub bundle: u32,
@ -47,7 +46,10 @@ pub struct Location {
}
impl Location {
pub fn new(bundle: u32, chunk: u32) -> Self {
Location{ bundle: bundle, chunk: chunk }
Location {
bundle,
chunk
}
}
}
@ -88,28 +90,42 @@ pub struct Repository {
impl Repository {
pub fn create<P: AsRef<Path>, R: AsRef<Path>>(path: P, config: Config, remote: R) -> Result<Self, RepositoryError> {
pub fn create<P: AsRef<Path>, R: AsRef<Path>>(
path: P,
config: &Config,
remote: R,
) -> Result<Self, RepositoryError> {
let layout = RepositoryLayout::new(path.as_ref().to_path_buf());
try!(fs::create_dir(layout.base_path()));
try!(File::create(layout.excludes_path()).and_then(|mut f| f.write_all(DEFAULT_EXCLUDES)));
try!(File::create(layout.excludes_path()).and_then(|mut f| {
f.write_all(DEFAULT_EXCLUDES)
}));
try!(fs::create_dir(layout.keys_path()));
try!(fs::create_dir(layout.local_locks_path()));
try!(symlink(remote, layout.remote_path()));
try!(File::create(layout.remote_readme_path()).and_then(|mut f| f.write_all(REPOSITORY_README)));
try!(File::create(layout.remote_readme_path()).and_then(
|mut f| {
f.write_all(REPOSITORY_README)
}
));
try!(fs::create_dir_all(layout.remote_locks_path()));
try!(config.save(layout.config_path()));
try!(BundleDb::create(layout.clone()));
try!(Index::<Hash, Location>::create(layout.index_path(), &INDEX_MAGIC, INDEX_VERSION));
try!(BundleDb::create(&layout));
try!(Index::<Hash, Location>::create(
layout.index_path(),
&INDEX_MAGIC,
INDEX_VERSION
));
try!(BundleMap::create().save(layout.bundle_map_path()));
try!(fs::create_dir_all(layout.backups_path()));
Self::open(path)
Self::open(path, true)
}
#[allow(unknown_lints,useless_let_if_seq)]
pub fn open<P: AsRef<Path>>(path: P) -> Result<Self, RepositoryError> {
#[allow(unknown_lints, useless_let_if_seq)]
pub fn open<P: AsRef<Path>>(path: P, online: bool) -> Result<Self, RepositoryError> {
let layout = RepositoryLayout::new(path.as_ref().to_path_buf());
if !layout.remote_exists() {
return Err(RepositoryError::NoRemote)
return Err(RepositoryError::NoRemote);
}
let config = try!(Config::load(layout.config_path()));
let remote_locks = LockFolder::new(layout.remote_locks_path());
@ -117,54 +133,67 @@ impl Repository {
let local_locks = LockFolder::new(layout.local_locks_path());
let lock = try!(local_locks.lock(false));
let crypto = Arc::new(Mutex::new(try!(Crypto::open(layout.keys_path()))));
let (bundles, new, gone) = try!(BundleDb::open(layout.clone(), crypto.clone()));
let (index, mut rebuild_index) = match unsafe { Index::open(layout.index_path(), &INDEX_MAGIC, INDEX_VERSION) } {
Ok(index) => (index, false),
Err(err) => {
error!("Failed to load local index:\n\tcaused by: {}", err);
(try!(Index::create(layout.index_path(), &INDEX_MAGIC, INDEX_VERSION)), true)
}
};
let (bundles, new, gone) = try!(BundleDb::open(layout.clone(), crypto.clone(), online));
let (index, mut rebuild_index) =
match unsafe { Index::open(layout.index_path(), &INDEX_MAGIC, INDEX_VERSION) } {
Ok(index) => (index, false),
Err(err) => {
tr_error!("Failed to load local index:\n\tcaused by: {}", err);
(
try!(Index::create(
layout.index_path(),
&INDEX_MAGIC,
INDEX_VERSION
)),
true
)
}
};
let (bundle_map, rebuild_bundle_map) = match BundleMap::load(layout.bundle_map_path()) {
Ok(bundle_map) => (bundle_map, false),
Err(err) => {
error!("Failed to load local bundle map:\n\tcaused by: {}", err);
tr_error!("Failed to load local bundle map:\n\tcaused by: {}", err);
(BundleMap::create(), true)
}
};
let dirty = layout.dirtyfile_path().exists();
let mut repo = Repository {
layout: layout,
layout,
dirty: true,
chunker: config.chunker.create(),
config: config,
index: index,
crypto: crypto,
bundle_map: bundle_map,
config,
index,
crypto,
bundle_map,
next_data_bundle: 0,
next_meta_bundle: 0,
bundles: bundles,
bundles,
data_bundle: None,
meta_bundle: None,
lock: lock,
remote_locks: remote_locks,
local_locks: local_locks
lock,
remote_locks,
local_locks
};
if !rebuild_bundle_map {
let mut save_bundle_map = false;
if !gone.is_empty() {
info!("Removig {} old bundles from index", gone.len());
tr_info!("Removing {} old bundles from index", gone.len());
try!(repo.write_mode());
for bundle in gone {
try!(repo.remove_gone_remote_bundle(bundle))
try!(repo.remove_gone_remote_bundle(&bundle))
}
save_bundle_map = true;
}
if !new.is_empty() {
info!("Adding {} new bundles to index", new.len());
tr_info!("Adding {} new bundles to index", new.len());
try!(repo.write_mode());
for bundle in ProgressIter::new("adding bundles to index", new.len(), new.into_iter()) {
try!(repo.add_new_remote_bundle(bundle))
for bundle in ProgressIter::new(
tr!("adding bundles to index"),
new.len(),
new.into_iter()
)
{
try!(repo.add_new_remote_bundle(&bundle))
}
save_bundle_map = true;
}
@ -188,29 +217,43 @@ impl Repository {
Ok(repo)
}
pub fn import<P: AsRef<Path>, R: AsRef<Path>>(path: P, remote: R, key_files: Vec<String>) -> Result<Self, RepositoryError> {
pub fn import<P: AsRef<Path>, R: AsRef<Path>>(
path: P,
remote: R,
key_files: Vec<String>,
) -> Result<Self, RepositoryError> {
let path = path.as_ref();
let mut repo = try!(Repository::create(path, Config::default(), remote));
let mut repo = try!(Repository::create(path, &Config::default(), remote));
for file in key_files {
try!(repo.crypto.lock().unwrap().register_keyfile(file));
}
repo = try!(Repository::open(path));
repo = try!(Repository::open(path, true));
let mut backups: Vec<(String, Backup)> = try!(repo.get_all_backups()).into_iter().collect();
backups.sort_by_key(|&(_, ref b)| b.timestamp);
if let Some((name, backup)) = backups.pop() {
info!("Taking configuration from the last backup '{}'", name);
tr_info!("Taking configuration from the last backup '{}'", name);
repo.config = backup.config;
try!(repo.save_config())
} else {
warn!("No backup found in the repository to take configuration from, please set the configuration manually.");
tr_warn!(
"No backup found in the repository to take configuration from, please set the configuration manually."
);
}
Ok(repo)
}
#[inline]
pub fn register_key(&mut self, public: PublicKey, secret: SecretKey) -> Result<(), RepositoryError> {
pub fn register_key(
&mut self,
public: PublicKey,
secret: SecretKey,
) -> Result<(), RepositoryError> {
try!(self.write_mode());
Ok(try!(self.crypto.lock().unwrap().register_secret_key(public, secret)))
try!(self.crypto.lock().unwrap().register_secret_key(
public,
secret
));
Ok(())
}
#[inline]
@ -224,7 +267,7 @@ impl Repository {
pub fn set_encryption(&mut self, public: Option<&PublicKey>) {
if let Some(key) = public {
if !self.crypto.lock().unwrap().contains_secret_key(key) {
warn!("The secret key for that public key is not stored in the repository.")
tr_warn!("The secret key for that public key is not stored in the repository.")
}
let mut key_bytes = Vec::new();
key_bytes.extend_from_slice(&key[..]);
@ -268,7 +311,10 @@ impl Repository {
mem::swap(&mut self.data_bundle, &mut finished);
{
let bundle = try!(self.bundles.add_bundle(finished.unwrap()));
self.bundle_map.set(self.next_data_bundle, bundle.id.clone());
self.bundle_map.set(
self.next_data_bundle,
bundle.id.clone()
);
}
self.next_data_bundle = self.next_free_bundle_id()
}
@ -277,7 +323,10 @@ impl Repository {
mem::swap(&mut self.meta_bundle, &mut finished);
{
let bundle = try!(self.bundles.add_bundle(finished.unwrap()));
self.bundle_map.set(self.next_meta_bundle, bundle.id.clone());
self.bundle_map.set(
self.next_meta_bundle,
bundle.id.clone()
);
}
self.next_meta_bundle = self.next_free_bundle_id()
}
@ -289,14 +338,14 @@ impl Repository {
Ok(())
}
fn add_new_remote_bundle(&mut self, bundle: BundleInfo) -> Result<(), RepositoryError> {
fn add_new_remote_bundle(&mut self, bundle: &BundleInfo) -> Result<(), RepositoryError> {
if self.bundle_map.find(&bundle.id).is_some() {
return Ok(())
return Ok(());
}
debug!("Adding new bundle to index: {}", bundle.id);
tr_debug!("Adding new bundle to index: {}", bundle.id);
let bundle_id = match bundle.mode {
BundleMode::Data => self.next_data_bundle,
BundleMode::Meta => self.next_meta_bundle
BundleMode::Meta => self.next_meta_bundle,
};
let chunks = try!(self.bundles.get_chunk_list(&bundle.id));
self.bundle_map.set(bundle_id, bundle.id.clone());
@ -307,7 +356,14 @@ impl Repository {
self.next_data_bundle = self.next_free_bundle_id()
}
for (i, (hash, _len)) in chunks.into_inner().into_iter().enumerate() {
if let Some(old) = try!(self.index.set(&hash, &Location{bundle: bundle_id as u32, chunk: i as u32})) {
if let Some(old) = try!(self.index.set(
&hash,
&Location {
bundle: bundle_id as u32,
chunk: i as u32
}
))
{
// Duplicate chunk, forced ordering: higher bundle id wins
let old_bundle_id = try!(self.get_bundle_id(old.bundle));
if old_bundle_id > bundle.id {
@ -318,9 +374,9 @@ impl Repository {
Ok(())
}
fn remove_gone_remote_bundle(&mut self, bundle: BundleInfo) -> Result<(), RepositoryError> {
fn remove_gone_remote_bundle(&mut self, bundle: &BundleInfo) -> Result<(), RepositoryError> {
if let Some(id) = self.bundle_map.find(&bundle.id) {
debug!("Removing bundle from index: {}", bundle.id);
tr_debug!("Removing bundle from index: {}", bundle.id);
try!(self.bundles.delete_local_bundle(&bundle.id));
try!(self.index.filter(|_key, data| data.bundle != id));
self.bundle_map.remove(id);
@ -330,7 +386,8 @@ impl Repository {
#[inline]
fn write_mode(&mut self) -> Result<(), RepositoryError> {
Ok(try!(self.local_locks.upgrade(&mut self.lock)))
try!(self.local_locks.upgrade(&mut self.lock));
Ok(())
}
#[inline]
@ -348,7 +405,7 @@ impl Repository {
impl Drop for Repository {
fn drop(&mut self) {
if let Err(err) = self.flush() {
error!("Failed to flush repository: {}", err);
tr_error!("Failed to flush repository: {}", err);
}
}
}

View File

@ -1,15 +1,19 @@
use ::prelude::*;
use prelude::*;
use std::collections::{HashMap, HashSet, BTreeMap};
use std::path::{Path, PathBuf};
use std::io::{self, Read, Write, Cursor};
use std::fs::File;
use std::str;
use std::os::unix::ffi::OsStrExt;
use chrono::prelude::*;
use tar;
static MAX_NAME_LEN: usize = 99;
static MAX_LINK_LEN: usize = 99;
struct PaxBuilder(Vec<u8>);
@ -38,6 +42,8 @@ impl PaxBuilder {
trait BuilderExt {
fn append_pax_extensions(&mut self, headers: &PaxBuilder) -> Result<(), io::Error>;
fn append_long_name(&mut self, path: &Path) -> Result<(), io::Error>;
fn append_long_link(&mut self, path: &Path) -> Result<(), io::Error>;
}
impl<T: Write> BuilderExt for tar::Builder<T> {
@ -48,6 +54,24 @@ impl<T: Write> BuilderExt for tar::Builder<T> {
header.set_cksum();
self.append(&header, headers.as_bytes())
}
fn append_long_name(&mut self, path: &Path) -> Result<(), io::Error> {
let mut header = tar::Header::new_gnu();
let bytes = path.as_os_str().as_bytes();
header.set_size(bytes.len() as u64);
header.set_entry_type(tar::EntryType::GNULongName);
header.set_cksum();
self.append(&header, bytes)
}
fn append_long_link(&mut self, path: &Path) -> Result<(), io::Error> {
let mut header = tar::Header::new_gnu();
let bytes = path.as_os_str().as_bytes();
header.set_size(bytes.len() as u64);
header.set_entry_type(tar::EntryType::GNULongLink);
header.set_cksum();
self.append(&header, bytes)
}
}
@ -58,20 +82,36 @@ fn inode_from_entry<R: Read>(entry: &mut tar::Entry<R>) -> Result<Inode, Reposit
let path = try!(entry.path());
let header = entry.header();
let file_type = match header.entry_type() {
tar::EntryType::Regular | tar::EntryType::Link | tar::EntryType::Continuous => FileType::File,
tar::EntryType::Regular |
tar::EntryType::Link |
tar::EntryType::Continuous => FileType::File,
tar::EntryType::Symlink => FileType::Symlink,
tar::EntryType::Directory => FileType::Directory,
_ => return Err(InodeError::UnsupportedFiletype(path.to_path_buf()).into())
tar::EntryType::Block => FileType::BlockDevice,
tar::EntryType::Char => FileType::CharDevice,
tar::EntryType::Fifo => FileType::NamedPipe,
_ => return Err(InodeError::UnsupportedFiletype(path.to_path_buf()).into()),
};
Inode {
file_type: file_type,
name: path.file_name().map(|s| s.to_string_lossy().to_string()).unwrap_or_else(|| "/".to_string()),
file_type,
name: path.file_name()
.map(|s| s.to_string_lossy().to_string())
.unwrap_or_else(|| "/".to_string()),
symlink_target: try!(entry.link_name()).map(|s| s.to_string_lossy().to_string()),
size: try!(header.size()),
mode: try!(header.mode()),
user: try!(header.uid()),
group: try!(header.gid()),
timestamp: try!(header.mtime()) as i64,
device: match file_type {
FileType::BlockDevice | FileType::CharDevice => Some((
try!(header.device_major())
.unwrap_or(0),
try!(header.device_minor())
.unwrap_or(0)
)),
_ => None,
},
..Default::default()
}
};
@ -80,7 +120,10 @@ fn inode_from_entry<R: Read>(entry: &mut tar::Entry<R>) -> Result<Inode, Reposit
let ext = try!(ext);
let key = ext.key().unwrap_or("");
if key.starts_with(PAX_XATTR_PREFIX) {
inode.xattrs.insert(key[PAX_XATTR_PREFIX.len()..].to_string(), ext.value_bytes().to_vec().into());
inode.xattrs.insert(
key[PAX_XATTR_PREFIX.len()..].to_string(),
ext.value_bytes().to_vec().into()
);
}
}
}
@ -91,7 +134,10 @@ fn inode_from_entry<R: Read>(entry: &mut tar::Entry<R>) -> Result<Inode, Reposit
}
impl Repository {
fn import_tar_entry<R: Read>(&mut self, entry: &mut tar::Entry<R>) -> Result<Inode, RepositoryError> {
fn import_tar_entry<R: Read>(
&mut self,
entry: &mut tar::Entry<R>,
) -> Result<Inode, RepositoryError> {
let mut inode = try!(inode_from_entry(entry));
if inode.size < 100 {
let mut data = Vec::with_capacity(inode.size as usize);
@ -111,7 +157,12 @@ impl Repository {
Ok(inode)
}
fn import_tarfile_as_inode<R: Read>(&mut self, backup: &mut Backup, input: R, failed_paths: &mut Vec<PathBuf>) -> Result<(Inode, ChunkList), RepositoryError> {
fn import_tarfile_as_inode<R: Read>(
&mut self,
backup: &mut Backup,
input: R,
failed_paths: &mut Vec<PathBuf>,
) -> Result<(Inode, ChunkList), RepositoryError> {
let mut tarfile = tar::Archive::new(input);
// Step 1: create inodes for all entries
let mut inodes = HashMap::<PathBuf, (Inode, HashSet<String>)>::new();
@ -126,7 +177,7 @@ impl Repository {
} else {
if let Some(FileData::ChunkedIndirect(ref chunks)) = inode.data {
for &(_, len) in chunks.iter() {
inode.cum_size += len as u64;
inode.cum_size += u64::from(len);
}
}
inode.cum_files = 1;
@ -143,12 +194,14 @@ impl Repository {
backup.group_names.insert(inode.group, name.to_string());
}
inodes.insert(path, (inode, HashSet::new()));
},
Err(RepositoryError::Inode(_)) | Err(RepositoryError::Chunker(_)) | Err(RepositoryError::Io(_)) => {
info!("Failed to backup {:?}", path);
}
Err(RepositoryError::Inode(_)) |
Err(RepositoryError::Chunker(_)) |
Err(RepositoryError::Io(_)) => {
tr_info!("Failed to backup {:?}", path);
failed_paths.push(path);
continue
},
continue;
}
Err(err) => {
return Err(err);
}
@ -167,16 +220,21 @@ impl Repository {
let (inode, _) = inodes.remove(&path).unwrap();
let chunks = try!(self.put_inode(&inode));
if let Some(parent_path) = path.parent() {
if let Some(&mut (ref mut parent_inode, ref mut children)) = inodes.get_mut(parent_path) {
if let Some(&mut (ref mut parent_inode, ref mut children)) =
inodes.get_mut(parent_path)
{
children.remove(&inode.name);
parent_inode.cum_size += inode.cum_size;
for &(_, len) in chunks.iter() {
parent_inode.cum_size += len as u64;
parent_inode.cum_size += u64::from(len);
}
parent_inode.cum_files += inode.cum_files;
parent_inode.cum_dirs += inode.cum_dirs;
parent_inode.children.as_mut().unwrap().insert(inode.name.clone(), chunks);
continue
parent_inode.children.as_mut().unwrap().insert(
inode.name.clone(),
chunks
);
continue;
}
}
roots.push((inode, chunks));
@ -185,7 +243,7 @@ impl Repository {
if roots.len() == 1 {
Ok(roots.pop().unwrap())
} else {
warn!("Tar file contains multiple roots, adding dummy folder");
tr_warn!("Tar file contains multiple roots, adding dummy folder");
let mut root_inode = Inode {
file_type: FileType::Directory,
mode: 0o755,
@ -199,7 +257,7 @@ impl Repository {
for (inode, chunks) in roots {
root_inode.cum_size += inode.cum_size;
for &(_, len) in chunks.iter() {
root_inode.cum_size += len as u64;
root_inode.cum_size += u64::from(len);
}
root_inode.cum_files += inode.cum_files;
root_inode.cum_dirs += inode.cum_dirs;
@ -211,11 +269,14 @@ impl Repository {
}
}
pub fn import_tarfile<P: AsRef<Path>>(&mut self, tarfile: P) -> Result<Backup, RepositoryError> {
pub fn import_tarfile<P: AsRef<Path>>(
&mut self,
tarfile: P,
) -> Result<Backup, RepositoryError> {
try!(self.write_mode());
let _lock = try!(self.lock(false));
if self.dirty {
return Err(RepositoryError::Dirty)
return Err(RepositoryError::Dirty);
}
try!(self.set_dirty());
let mut backup = Backup::default();
@ -227,9 +288,17 @@ impl Repository {
let mut failed_paths = vec![];
let tarfile = tarfile.as_ref();
let (root_inode, chunks) = if tarfile == Path::new("-") {
try!(self.import_tarfile_as_inode(&mut backup, try!(File::open(tarfile)), &mut failed_paths))
try!(self.import_tarfile_as_inode(
&mut backup,
io::stdin(),
&mut failed_paths
))
} else {
try!(self.import_tarfile_as_inode(&mut backup, io::stdin(), &mut failed_paths))
try!(self.import_tarfile_as_inode(
&mut backup,
try!(File::open(tarfile)),
&mut failed_paths
))
};
backup.root = chunks;
try!(self.flush());
@ -253,71 +322,131 @@ impl Repository {
}
}
fn export_xattrs<W: Write>(&mut self, inode: &Inode, tarfile: &mut tar::Builder<W>) -> Result<(), RepositoryError> {
fn export_xattrs<W: Write>(
&mut self,
inode: &Inode,
tarfile: &mut tar::Builder<W>,
) -> Result<(), RepositoryError> {
let mut pax = PaxBuilder::new();
for (key, value) in &inode.xattrs {
pax.add(&format!("{}{}", PAX_XATTR_PREFIX,key), str::from_utf8(value).unwrap());
pax.add(
&format!("{}{}", PAX_XATTR_PREFIX, key),
str::from_utf8(value).unwrap()
);
}
Ok(try!(tarfile.append_pax_extensions(&pax)))
try!(tarfile.append_pax_extensions(&pax));
Ok(())
}
fn export_tarfile_recurse<W: Write>(&mut self, backup: &Backup, path: &Path, inode: Inode, tarfile: &mut tar::Builder<W>) -> Result<(), RepositoryError> {
if !inode.xattrs.is_empty() {
try!(self.export_xattrs(&inode, tarfile));
}
let mut header = tar::Header::new_gnu();
header.set_size(inode.size);
let path = path.join(inode.name);
try!(header.set_path(&path));
if let Some(target) = inode.symlink_target {
try!(header.set_link_name(target));
}
header.set_mode(inode.mode);
header.set_uid(inode.user);
if let Some(name) = backup.user_names.get(&inode.user) {
header.set_username(name).ok();
}
header.set_gid(inode.group);
if let Some(name) = backup.group_names.get(&inode.group) {
header.set_groupname(name).ok();
}
header.set_mtime(inode.timestamp as u64);
header.set_entry_type(match inode.file_type {
FileType::File => tar::EntryType::Regular,
FileType::Symlink => tar::EntryType::Symlink,
FileType::Directory => tar::EntryType::Directory
});
header.set_cksum();
match inode.data {
None => try!(tarfile.append(&header, Cursor::new(&[]))),
Some(FileData::Inline(data)) => try!(tarfile.append(&header, Cursor::new(data))),
Some(FileData::ChunkedDirect(chunks)) => try!(tarfile.append(&header, self.get_reader(chunks))),
Some(FileData::ChunkedIndirect(chunks)) => {
let chunks = ChunkList::read_from(&try!(self.get_data(&chunks)));
try!(tarfile.append(&header, self.get_reader(chunks)))
fn export_tarfile_recurse<W: Write>(
&mut self,
backup: &Backup,
path: &Path,
inode: Inode,
tarfile: &mut tar::Builder<W>,
skip_root: bool,
) -> Result<(), RepositoryError> {
let path = if skip_root {
path.to_path_buf()
} else {
path.join(&inode.name)
};
if inode.file_type != FileType::Directory || !skip_root {
if !inode.xattrs.is_empty() {
try!(self.export_xattrs(&inode, tarfile));
}
let mut header = tar::Header::new_gnu();
header.set_size(inode.size);
if path.as_os_str().as_bytes().len() >= MAX_NAME_LEN {
try!(tarfile.append_long_name(&path));
} else {
try!(header.set_path(&path));
}
if let Some(target) = inode.symlink_target {
if target.len() >= MAX_LINK_LEN {
try!(tarfile.append_long_link(Path::new(&target)));
} else {
try!(header.set_link_name(target));
}
}
if let Some((major, minor)) = inode.device {
try!(header.set_device_major(major));
try!(header.set_device_minor(minor));
}
header.set_mode(inode.mode);
header.set_uid(inode.user);
if let Some(name) = backup.user_names.get(&inode.user) {
header.set_username(name).ok();
}
header.set_gid(inode.group);
if let Some(name) = backup.group_names.get(&inode.group) {
header.set_groupname(name).ok();
}
header.set_mtime(inode.timestamp as u64);
header.set_entry_type(match inode.file_type {
FileType::File => tar::EntryType::Regular,
FileType::Symlink => tar::EntryType::Symlink,
FileType::Directory => tar::EntryType::Directory,
FileType::BlockDevice => tar::EntryType::Block,
FileType::CharDevice => tar::EntryType::Char,
FileType::NamedPipe => tar::EntryType::Fifo,
});
header.set_cksum();
match inode.data {
None => try!(tarfile.append(&header, Cursor::new(&[]))),
Some(FileData::Inline(data)) => try!(tarfile.append(&header, Cursor::new(data))),
Some(FileData::ChunkedDirect(chunks)) => {
try!(tarfile.append(&header, self.get_reader(chunks)))
}
Some(FileData::ChunkedIndirect(chunks)) => {
let chunks = ChunkList::read_from(&try!(self.get_data(&chunks)));
try!(tarfile.append(&header, self.get_reader(chunks)))
}
}
}
if let Some(children) = inode.children {
for chunks in children.values() {
let inode = try!(self.get_inode(chunks));
try!(self.export_tarfile_recurse(backup, &path, inode, tarfile));
try!(self.export_tarfile_recurse(
backup,
&path,
inode,
tarfile,
false
));
}
}
Ok(())
}
pub fn export_tarfile<P: AsRef<Path>>(&mut self, backup: &Backup, inode: Inode, tarfile: P) -> Result<(), RepositoryError> {
pub fn export_tarfile<P: AsRef<Path>>(
&mut self,
backup: &Backup,
inode: Inode,
tarfile: P,
) -> Result<(), RepositoryError> {
let tarfile = tarfile.as_ref();
if tarfile == Path::new("-") {
let mut tarfile = tar::Builder::new(io::stdout());
try!(self.export_tarfile_recurse(backup, Path::new(""), inode, &mut tarfile));
try!(self.export_tarfile_recurse(
backup,
Path::new(""),
inode,
&mut tarfile,
true
));
try!(tarfile.finish());
} else {
let mut tarfile = tar::Builder::new(try!(File::create(tarfile)));
try!(self.export_tarfile_recurse(backup, Path::new(""), inode, &mut tarfile));
try!(self.export_tarfile_recurse(
backup,
Path::new(""),
inode,
&mut tarfile,
true
));
try!(tarfile.finish());
}
Ok(())
}
}

View File

@ -1,4 +1,4 @@
use ::prelude::*;
use prelude::*;
use std::collections::HashSet;
@ -13,13 +13,18 @@ impl Repository {
}
}
pub fn vacuum(&mut self, ratio: f32, combine: bool, force: bool) -> Result<(), RepositoryError> {
pub fn vacuum(
&mut self,
ratio: f32,
combine: bool,
force: bool,
) -> Result<(), RepositoryError> {
try!(self.flush());
info!("Locking repository");
tr_info!("Locking repository");
try!(self.write_mode());
let _lock = try!(self.lock(true));
// analyze_usage will set the dirty flag
info!("Analyzing chunk usage");
tr_info!("Analyzing chunk usage");
let usage = try!(self.analyze_usage());
let mut data_total = 0;
let mut data_used = 0;
@ -27,13 +32,20 @@ impl Repository {
data_total += bundle.info.encoded_size;
data_used += bundle.get_used_size();
}
info!("Usage: {} of {}, {:.1}%", to_file_size(data_used as u64), to_file_size(data_total as u64), data_used as f32/data_total as f32*100.0);
tr_info!(
"Usage: {} of {}, {:.1}%",
to_file_size(data_used as u64),
to_file_size(data_total as u64),
data_used as f32 / data_total as f32 * 100.0
);
let mut rewrite_bundles = HashSet::new();
let mut reclaim_space = 0;
let mut rewrite_data = 0;
for (id, bundle) in &usage {
if bundle.get_usage_ratio() <= ratio {
rewrite_bundles.insert(*id);
reclaim_space += bundle.get_unused_size();
rewrite_data += bundle.get_used_size();
}
}
if combine {
@ -58,12 +70,22 @@ impl Repository {
}
}
}
info!("Reclaiming {} by rewriting {} bundles", to_file_size(reclaim_space as u64), rewrite_bundles.len());
tr_info!(
"Reclaiming about {} by rewriting {} bundles ({})",
to_file_size(reclaim_space as u64),
rewrite_bundles.len(),
to_file_size(rewrite_data as u64)
);
if !force {
self.dirty = false;
return Ok(())
return Ok(());
}
for id in ProgressIter::new("rewriting bundles", rewrite_bundles.len(), rewrite_bundles.iter()) {
for id in ProgressIter::new(
tr!("rewriting bundles"),
rewrite_bundles.len(),
rewrite_bundles.iter()
)
{
let bundle = &usage[id];
let bundle_id = self.bundle_map.get(*id).unwrap();
let chunks = try!(self.bundles.get_chunk_list(&bundle_id));
@ -71,20 +93,27 @@ impl Repository {
for (chunk, &(hash, _len)) in chunks.into_iter().enumerate() {
if !bundle.chunk_usage.get(chunk) {
try!(self.index.delete(&hash));
continue
continue;
}
let data = try!(self.bundles.get_chunk(&bundle_id, chunk));
try!(self.put_chunk_override(mode, hash, &data));
}
}
try!(self.flush());
info!("Checking index");
tr_info!("Checking index");
for (hash, location) in self.index.iter() {
if rewrite_bundles.contains(&location.bundle) {
panic!("Removed bundle is still referenced in index: hash:{}, bundle:{}, chunk:{}", hash, location.bundle, location.chunk);
let loc_bundle = location.bundle;
let loc_chunk = location.chunk;
if rewrite_bundles.contains(&loc_bundle) {
tr_panic!(
"Removed bundle is still referenced in index: hash:{}, bundle:{}, chunk:{}",
hash,
loc_bundle,
loc_chunk
);
}
}
info!("Deleting {} bundles", rewrite_bundles.len());
tr_info!("Deleting {} bundles", rewrite_bundles.len());
for id in rewrite_bundles {
try!(self.delete_bundle(id));
}

216
src/translation.rs Normal file
View File

@ -0,0 +1,216 @@
use std::borrow::Cow;
use std::collections::HashMap;
use std::cmp::max;
use std::str;
use std::path::{Path, PathBuf};
use std::io::Read;
use std::fs::File;
use locale_config::Locale;
pub type CowStr = Cow<'static, str>;
fn read_u32(b: &[u8], reorder: bool) -> u32 {
if reorder {
(u32::from(b[0]) << 24) + (u32::from(b[1]) << 16) + (u32::from(b[2]) << 8) + u32::from(b[3])
} else {
(u32::from(b[3]) << 24) + (u32::from(b[2]) << 16) + (u32::from(b[1]) << 8) + u32::from(b[0])
}
}
struct MoFile<'a> {
data: &'a [u8],
count: usize,
orig_pos: usize,
trans_pos: usize,
reorder: bool,
i : usize
}
impl<'a> MoFile<'a> {
fn new(data: &'a [u8]) -> Result<Self, ()> {
if data.len() < 20 {
return Err(());
}
// Magic header
let magic = read_u32(&data[0..4], false);
let reorder = if magic == 0x9504_12de {
false
} else if magic == 0xde12_0495 {
true
} else {
return Err(());
};
// Version
if read_u32(&data[4..8], reorder) != 0x0000_0000 {
return Err(());
}
// Translation count
let count = read_u32(&data[8..12], reorder) as usize;
// Original string offset
let orig_pos = read_u32(&data[12..16], reorder) as usize;
// Original string offset
let trans_pos = read_u32(&data[16..20], reorder) as usize;
if data.len() < max(orig_pos, trans_pos) + count * 8 {
return Err(());
}
Ok(MoFile{
data,
count,
orig_pos,
trans_pos,
reorder,
i: 0
})
}
}
impl<'a> Iterator for MoFile<'a> {
type Item = (&'a str, &'a str);
fn next(&mut self) -> Option<Self::Item> {
if self.i >= self.count {
return None;
}
let length = read_u32(&self.data[self.orig_pos+self.i*8..], self.reorder) as usize;
let offset = read_u32(&self.data[self.orig_pos+self.i*8+4..], self.reorder) as usize;
let orig = match str::from_utf8(&self.data[offset..offset+length]) {
Ok(s) => s,
Err(_) => return None
};
let length = read_u32(&self.data[self.trans_pos+self.i*8..], self.reorder) as usize;
let offset = read_u32(&self.data[self.trans_pos+self.i*8+4..], self.reorder) as usize;
let trans = match str::from_utf8(&self.data[offset..offset+length]) {
Ok(s) => s,
Err(_) => return None
};
self.i += 1;
Some((orig, trans))
}
}
pub struct Translation(HashMap<CowStr, CowStr>);
impl Translation {
pub fn new() -> Self {
Translation(Default::default())
}
pub fn from_mo_data(data: &'static[u8]) -> Self {
let mut translation = Translation::new();
match MoFile::new(data) {
Ok(mo_file) => for (orig, trans) in mo_file {
translation.set(orig, trans);
}
Err(_) => error!("Invalid translation data")
}
translation
}
pub fn from_mo_file(path: &Path) -> Self {
let mut translation = Translation::new();
if let Ok(mut file) = File::open(&path) {
let mut data = vec![];
if file.read_to_end(&mut data).is_ok() {
match MoFile::new(&data) {
Ok(mo_file) => for (orig, trans) in mo_file {
translation.set(orig.to_string(), trans.to_string());
}
Err(_) => error!("Invalid translation data")
}
}
}
translation
}
pub fn set<O: Into<CowStr>, T: Into<CowStr>>(&mut self, orig: O, trans: T) {
let trans = trans.into();
if !trans.is_empty() {
self.0.insert(orig.into(), trans);
}
}
pub fn get<'a, 'b: 'a>(&'b self, orig: &'a str) -> &'a str {
self.0.get(orig).map(|s| s as &'a str).unwrap_or(orig)
}
}
fn get_translation(locale: &str) -> Translation {
if let Some(trans) = find_translation(locale) {
return trans;
}
let country = locale.split('_').next().unwrap();
if let Some(trans) = find_translation(country) {
return trans;
}
Translation::new()
}
fn find_translation(name: &str) -> Option<Translation> {
if EMBEDDED_TRANS.contains_key(name) {
return Some(Translation::from_mo_data(EMBEDDED_TRANS[name]));
}
let path = PathBuf::from(format!("/usr/share/locale/{}/LC_MESSAGES/zvault.mo", name));
if path.exists() {
return Some(Translation::from_mo_file(&path));
}
let path = PathBuf::from(format!("lang/{}.mo", name));
if path.exists() {
return Some(Translation::from_mo_file(&path));
}
None
}
lazy_static! {
pub static ref EMBEDDED_TRANS: HashMap<&'static str, &'static[u8]> = {
HashMap::new()
//map.insert("de", include_bytes!("../lang/de.mo") as &'static [u8]);
};
pub static ref TRANS: Translation = {
let locale = Locale::current();
let locale_str = locale.tags_for("").next().unwrap().as_ref().to_string();
get_translation(&locale_str)
};
}
#[macro_export] macro_rules! tr {
($fmt:tt) => (::translation::TRANS.get($fmt));
}
#[macro_export] macro_rules! tr_format {
($fmt:tt) => (tr!($fmt));
($fmt:tt, $($arg:tt)*) => (rt_format!(tr!($fmt), $($arg)*).expect("invalid format"));
}
#[macro_export] macro_rules! tr_println {
($fmt:tt) => (println!("{}", tr!($fmt)));
($fmt:tt, $($arg:tt)*) => (rt_println!(tr!($fmt), $($arg)*).expect("invalid format"));
}
#[macro_export] macro_rules! tr_trace {
($($arg:tt)*) => (debug!("{}", tr_format!($($arg)*)));
}
#[macro_export] macro_rules! tr_debug {
($($arg:tt)*) => (debug!("{}", tr_format!($($arg)*)));
}
#[macro_export] macro_rules! tr_info {
($($arg:tt)*) => (info!("{}", tr_format!($($arg)*)));
}
#[macro_export] macro_rules! tr_warn {
($($arg:tt)*) => (warn!("{}", tr_format!($($arg)*)));
}
#[macro_export] macro_rules! tr_error {
($($arg:tt)*) => (error!("{}", tr_format!($($arg)*)));
}
#[macro_export] macro_rules! tr_panic {
($($arg:tt)*) => (panic!("{}", tr_format!($($arg)*)));
}

View File

@ -1,22 +1,26 @@
use std::ops::Deref;
#[derive(Clone)]
pub struct Bitmap {
bytes: Vec<u8>
}
impl Bitmap {
/// Creates a new bitmap
pub fn new(len: usize) -> Self {
let len = (len+7)/8;
let len = (len + 7) / 8;
let mut bytes = Vec::with_capacity(len);
bytes.resize(len, 0);
Self { bytes: bytes }
Self { bytes }
}
/// Returns the number of bits in the bitmap
#[inline]
pub fn len(&self) -> usize {
self.bytes.len() * 8
}
/// Returns whether the bitmap is empty, i.e. contains no bits
#[inline]
pub fn is_empty(&self) -> bool {
self.len() == 0
@ -24,7 +28,7 @@ impl Bitmap {
#[inline]
fn convert_index(&self, index: usize) -> (usize, u8) {
(index/8, 1u8<<(index%8))
(index / 8, 1u8 << (index % 8))
}
#[inline]
@ -63,7 +67,7 @@ impl Bitmap {
#[inline]
pub fn from_bytes(bytes: Vec<u8>) -> Self {
Self { bytes: bytes }
Self { bytes }
}
}
@ -75,3 +79,89 @@ impl Deref for Bitmap {
&self.bytes
}
}
mod tests {
#[allow(unused_imports)]
use super::Bitmap;
#[test]
fn test_new() {
Bitmap::new(1024);
}
#[test]
fn test_len() {
assert_eq!(Bitmap::new(1024).len(), 1024);
}
#[test]
fn test_is_empty() {
assert!(!Bitmap::new(1024).is_empty());
assert!(Bitmap::new(0).is_empty());
}
#[test]
fn test_set() {
let mut bitmap = Bitmap::new(1024);
assert!(!bitmap.get(5));
assert!(!bitmap.get(154));
bitmap.set(5);
assert!(bitmap.get(5));
assert!(!bitmap.get(154));
bitmap.set(154);
assert!(bitmap.get(5));
assert!(bitmap.get(154));
}
#[test]
fn test_unset() {
let mut bitmap = Bitmap::new(1024);
assert!(!bitmap.get(5));
bitmap.set(5);
assert!(bitmap.get(5));
bitmap.unset(5);
assert!(!bitmap.get(5));
assert!(!bitmap.get(154));
bitmap.unset(154);
assert!(!bitmap.get(154));
}
#[test]
fn test_flip() {
let mut bitmap = Bitmap::new(1024);
assert!(!bitmap.get(5));
bitmap.flip(5);
assert!(bitmap.get(5));
bitmap.set(154);
assert!(bitmap.get(154));
bitmap.flip(154);
assert!(!bitmap.get(154));
}
#[test]
fn test_as_bytes() {
let mut bitmap = Bitmap::new(16);
assert_eq!(bitmap.as_bytes(), &[0, 0]);
bitmap.set(0);
assert_eq!(bitmap.as_bytes(), &[1, 0]);
bitmap.set(8);
bitmap.set(9);
assert_eq!(bitmap.as_bytes(), &[1, 3]);
}
#[test]
fn test_into_bytes() {
let mut bitmap = Bitmap::new(16);
bitmap.set(0);
bitmap.set(8);
bitmap.set(9);
assert_eq!(bitmap.as_bytes(), &bitmap.clone().into_bytes() as &[u8]);
}
#[test]
fn test_from_bytes() {
assert_eq!(&[1, 3], Bitmap::from_bytes(vec![1, 3]).as_bytes());
}
}

View File

@ -1,64 +0,0 @@
use serde::bytes::ByteBuf;
use blake2::blake2b::Blake2b;
#[derive(Clone, Debug, Copy)]
#[allow(non_camel_case_types)]
pub enum ChecksumType {
Blake2_256
}
serde_impl!(ChecksumType(u64) {
Blake2_256 => 1
});
impl ChecksumType {
#[inline]
pub fn from(name: &str) -> Result<Self, &'static str> {
match name {
"blake2_256" => Ok(ChecksumType::Blake2_256),
_ => Err("Unsupported checksum type")
}
}
#[inline]
pub fn name(&self) -> &'static str {
match *self {
ChecksumType::Blake2_256 => "blake2_256",
}
}
}
pub type Checksum = (ChecksumType, ByteBuf);
#[allow(non_camel_case_types, unknown_lints, large_enum_variant)]
pub enum ChecksumCreator {
Blake2_256(Blake2b)
}
impl ChecksumCreator {
#[inline]
pub fn new(type_: ChecksumType) -> Self {
match type_ {
ChecksumType::Blake2_256 => ChecksumCreator::Blake2_256(Blake2b::new(32))
}
}
#[inline]
pub fn update(&mut self, data: &[u8]) {
match *self {
ChecksumCreator::Blake2_256(ref mut state) => state.update(data)
}
}
#[inline]
pub fn finish(self) -> Checksum {
match self {
ChecksumCreator::Blake2_256(state) => {
let mut buf = Vec::with_capacity(32);
buf.extend_from_slice(state.finalize().as_bytes());
(ChecksumType::Blake2_256, buf.into())
}
}
}
}

View File

@ -61,9 +61,9 @@ impl ChunkList {
#[inline]
pub fn read_from(src: &[u8]) -> Self {
if src.len() % 20 != 0 {
warn!("Reading truncated chunk list");
tr_warn!("Reading truncated chunk list");
}
ChunkList::read_n_from(src.len()/20, &mut Cursor::new(src)).unwrap()
ChunkList::read_n_from(src.len() / 20, &mut Cursor::new(src)).unwrap()
}
#[inline]
@ -111,7 +111,10 @@ impl DerefMut for ChunkList {
impl Serialize for ChunkList {
#[inline]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let mut buf = Vec::with_capacity(self.encoded_size());
self.write_to(&mut buf).unwrap();
Bytes::from(&buf as &[u8]).serialize(serializer)
@ -120,11 +123,159 @@ impl Serialize for ChunkList {
impl<'a> Deserialize<'a> for ChunkList {
#[inline]
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'a> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'a>,
{
let data: Vec<u8> = try!(ByteBuf::deserialize(deserializer)).into();
if data.len() % 20 != 0 {
return Err(D::Error::custom("Invalid chunk list length"));
return Err(D::Error::custom(tr!("Invalid chunk list length")));
}
Ok(ChunkList::read_n_from(data.len()/20, &mut Cursor::new(data)).unwrap())
Ok(
ChunkList::read_n_from(data.len() / 20, &mut Cursor::new(data)).unwrap()
)
}
}
mod tests {
#[allow(unused_imports)]
use super::ChunkList;
#[allow(unused_imports)]
use super::super::Hash;
#[allow(unused_imports)]
use super::super::msgpack;
#[test]
fn test_new() {
ChunkList::new();
}
#[test]
fn test_with_capacity() {
ChunkList::with_capacity(0);
ChunkList::with_capacity(1024);
}
#[test]
fn test_push() {
let mut list = ChunkList::new();
assert!(list.is_empty());
assert_eq!(list.len(), 0);
list.push((Hash::default(), 0));
assert!(!list.is_empty());
assert_eq!(list.len(), 1);
list.push((Hash::default(), 1));
assert!(!list.is_empty());
assert_eq!(list.len(), 2);
}
#[test]
fn test_into_inner() {
let mut list = ChunkList::new();
list.push((Hash::default(), 0));
list.push((Hash::default(), 1));
assert_eq!(
list.into_inner(),
vec![(Hash::default(), 0), (Hash::default(), 1)]
);
}
#[test]
fn test_write_to() {
let mut list = ChunkList::new();
list.push((Hash::default(), 0));
list.push((Hash::default(), 1));
let mut buf = Vec::new();
assert!(list.write_to(&mut buf).is_ok());
assert_eq!(buf.len(), 40);
assert_eq!(&buf[16..20], &[0, 0, 0, 0]);
assert_eq!(&buf[36..40], &[1, 0, 0, 0]);
}
#[test]
fn test_encoded_size() {
let mut list = ChunkList::new();
list.push((Hash::default(), 0));
list.push((Hash::default(), 1));
assert_eq!(list.encoded_size(), 40);
}
#[test]
fn test_read_from() {
let data = vec![
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
];
let list = ChunkList::read_from(&data);
assert_eq!(list.len(), 2);
assert_eq!(list[0], (Hash::default(), 0));
assert_eq!(list[1], (Hash::default(), 1));
}
#[test]
fn test_serialize() {
let mut list = ChunkList::new();
list.push((Hash::default(), 0));
list.push((Hash::default(), 1));
let mut buf = Vec::new();
assert!(list.write_to(&mut buf).is_ok());
let encoded = msgpack::encode(&list).unwrap();
assert_eq!(buf, &encoded[2..]);
assert_eq!(&[196, 40], &encoded[..2]);
}
#[test]
fn test_deserialize() {
let mut list = ChunkList::new();
list.push((Hash::default(), 0));
list.push((Hash::default(), 1));
let mut buf = vec![196, 40];
assert!(list.write_to(&mut buf).is_ok());
assert!(msgpack::decode::<ChunkList>(&buf).is_ok());
assert_eq!(msgpack::decode::<ChunkList>(&buf).unwrap(), list);
}
}

View File

@ -7,7 +7,7 @@ pub fn to_file_size(size: u64) -> String {
if size >= 512.0 {
size /= 1024.0;
} else {
return format!("{:.0} Bytes", size);
return format!("{:.0} Byte", size);
}
if size >= 512.0 {
size /= 1024.0;
@ -55,7 +55,11 @@ impl<T> ProgressIter<T> {
let msg = format!("{}: ", msg);
bar.message(&msg);
bar.set_max_refresh_rate(Some(Duration::from_millis(100)));
ProgressIter { inner: inner, bar: bar, msg: msg }
ProgressIter {
inner,
bar,
msg
}
}
}
@ -69,10 +73,10 @@ impl<T: Iterator> Iterator for ProgressIter<T> {
fn next(&mut self) -> Option<Self::Item> {
match self.inner.next() {
None => {
let msg = self.msg.clone() + "done.";
let msg = self.msg.clone() + tr!("done.");
self.bar.finish_print(&msg);
None
},
}
Some(item) => {
self.bar.inc();
Some(item)
@ -80,3 +84,52 @@ impl<T: Iterator> Iterator for ProgressIter<T> {
}
}
}
mod tests {
#[allow(unused_imports)]
use super::*;
#[test]
fn test_to_file_size() {
assert_eq!("0 Byte", to_file_size(0));
assert_eq!("1 Byte", to_file_size(1));
assert_eq!("15 Byte", to_file_size(15));
assert_eq!("456 Byte", to_file_size(456));
assert_eq!("0.7 KiB", to_file_size(670));
assert_eq!("237.0 KiB", to_file_size(242670));
assert_eq!("442.5 KiB", to_file_size(453170));
assert_eq!("0.7 MiB", to_file_size(753170));
assert_eq!("12.2 MiB", to_file_size(12753170));
assert_eq!("222.0 MiB", to_file_size(232753170));
assert_eq!("5.1 GiB", to_file_size(5435353170));
assert_eq!("291.1 GiB", to_file_size(312534553170));
assert_eq!("3.9 TiB", to_file_size(4312534553170));
}
#[test]
fn test_to_speed() {
assert_eq!("0 Byte/s", to_speed(0, 1.0));
assert_eq!("100 Byte/s", to_speed(100, 1.0));
assert_eq!("1.0 KiB/s", to_speed(100, 0.1));
assert_eq!("10 Byte/s", to_speed(100, 10.0));
assert_eq!("237.0 KiB/s", to_speed(242670, 1.0));
assert_eq!("0.7 MiB/s", to_speed(753170, 1.0));
assert_eq!("222.0 MiB/s", to_speed(232753170, 1.0));
assert_eq!("291.1 GiB/s", to_speed(312534553170, 1.0));
assert_eq!("3.9 TiB/s", to_speed(4312534553170, 1.0));
}
#[test]
fn test_to_duration() {
assert_eq!("0:00:00.0", to_duration(0.0));
assert_eq!("0:00:00.1", to_duration(0.1));
assert_eq!("0:00:01.0", to_duration(1.0));
assert_eq!("0:01:00.0", to_duration(60.0));
assert_eq!("1:00:00.0", to_duration(3600.0));
assert_eq!("2:02:02.2", to_duration(7322.2));
}
}

View File

@ -3,7 +3,6 @@ use std::ffi::{CStr, CString};
use std::io::{self, Write};
use std::str::FromStr;
use libc;
use squash::*;
@ -11,31 +10,31 @@ quick_error!{
#[derive(Debug)]
pub enum CompressionError {
UnsupportedCodec(name: String) {
description("Unsupported codec")
display("Unsupported codec: {}", name)
description(tr!("Unsupported codec"))
display("{}", tr_format!("Unsupported codec: {}", name))
}
InitializeCodec {
description("Failed to initialize codec")
description(tr!("Failed to initialize codec"))
}
InitializeOptions {
description("Failed to set codec options")
description(tr!("Failed to set codec options"))
}
InitializeStream {
description("Failed to create stream")
description(tr!("Failed to create stream"))
}
Operation(reason: &'static str) {
description("Operation failed")
display("Operation failed: {}", reason)
description(tr!("Operation failed"))
display("{}", tr_format!("Operation failed: {}", reason))
}
Output(err: io::Error) {
from()
cause(err)
description("Failed to write to output")
description(tr!("Failed to write to output"))
}
}
}
#[derive(Clone, Debug, Copy, Eq, PartialEq)]
#[derive(Clone, Debug, Copy, Eq, PartialEq, Hash)]
pub enum CompressionMethod {
Deflate, // Standardized
Brotli, // Good speed and ratio
@ -50,14 +49,17 @@ serde_impl!(CompressionMethod(u8) {
});
#[derive(Clone, Debug, Eq, PartialEq)]
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct Compression {
method: CompressionMethod,
level: u8
}
impl Default for Compression {
fn default() -> Self {
Compression { method: CompressionMethod::Brotli, level: 3 }
Compression {
method: CompressionMethod::Brotli,
level: 3
}
}
}
serde_impl!(Compression(u64) {
@ -74,7 +76,9 @@ impl Compression {
pub fn from_string(name: &str) -> Result<Self, CompressionError> {
let (name, level) = if let Some(pos) = name.find('/') {
let level = try!(u8::from_str(&name[pos+1..]).map_err(|_| CompressionError::UnsupportedCodec(name.to_string())));
let level = try!(u8::from_str(&name[pos + 1..]).map_err(|_| {
CompressionError::UnsupportedCodec(name.to_string())
}));
let name = &name[..pos];
(name, level)
} else {
@ -85,9 +89,12 @@ impl Compression {
"brotli" => CompressionMethod::Brotli,
"lzma" | "lzma2" | "xz" => CompressionMethod::Lzma,
"lz4" => CompressionMethod::Lz4,
_ => return Err(CompressionError::UnsupportedCodec(name.to_string()))
_ => return Err(CompressionError::UnsupportedCodec(name.to_string())),
};
Ok(Compression { method: method, level: level })
Ok(Compression {
method,
level
})
}
pub fn name(&self) -> &'static str {
@ -103,7 +110,7 @@ impl Compression {
let name = CString::new(self.name().as_bytes()).unwrap();
let codec = unsafe { squash_get_codec(name.as_ptr()) };
if codec.is_null() {
return Err(CompressionError::InitializeCodec)
return Err(CompressionError::InitializeCodec);
}
Ok(codec)
}
@ -117,25 +124,27 @@ impl Compression {
let codec = try!(self.codec());
let options = unsafe { squash_options_new(codec, ptr::null::<()>()) };
if options.is_null() {
return Err(CompressionError::InitializeOptions)
return Err(CompressionError::InitializeOptions);
}
let option = CString::new("level");
let value = CString::new(format!("{}", self.level));
let res = unsafe { squash_options_parse_option(
options,
option.unwrap().as_ptr(),
value.unwrap().as_ptr()
)};
let res = unsafe {
squash_options_parse_option(options, option.unwrap().as_ptr(), value.unwrap().as_ptr())
};
if res != SQUASH_OK {
//panic!(unsafe { CStr::from_ptr(squash_status_to_string(res)).to_str().unwrap() });
return Err(CompressionError::InitializeOptions)
return Err(CompressionError::InitializeOptions);
}
Ok(options)
}
#[inline]
fn error(code: SquashStatus) -> CompressionError {
CompressionError::Operation(unsafe { CStr::from_ptr(squash_status_to_string(code)).to_str().unwrap() })
CompressionError::Operation(unsafe {
CStr::from_ptr(squash_status_to_string(code))
.to_str()
.unwrap()
})
}
pub fn compress(&self, data: &[u8]) -> Result<Vec<u8>, CompressionError> {
@ -148,18 +157,20 @@ impl Compression {
data.len() as usize
)};*/
let mut buf = Vec::with_capacity(size as usize);
let res = unsafe { squash_codec_compress_with_options(
codec,
&mut size,
buf.as_mut_ptr(),
data.len(),
data.as_ptr(),
options)
let res = unsafe {
squash_codec_compress_with_options(
codec,
&mut size,
buf.as_mut_ptr(),
data.len(),
data.as_ptr(),
options
)
};
if res != SQUASH_OK {
println!("{:?}", data);
println!("{:?}", data);
println!("{}, {}", data.len(), size);
return Err(Self::error(res))
return Err(Self::error(res));
}
unsafe { buf.set_len(size) };
Ok(buf)
@ -167,25 +178,24 @@ impl Compression {
pub fn decompress(&self, data: &[u8]) -> Result<Vec<u8>, CompressionError> {
let codec = try!(self.codec());
let mut size = unsafe { squash_codec_get_uncompressed_size(
codec,
data.len(),
data.as_ptr()
)};
let mut size =
unsafe { squash_codec_get_uncompressed_size(codec, data.len(), data.as_ptr()) };
if size == 0 {
size = 100 * data.len();
}
let mut buf = Vec::with_capacity(size);
let res = unsafe { squash_codec_decompress(
codec,
&mut size,
buf.as_mut_ptr(),
data.len(),
data.as_ptr(),
ptr::null_mut::<()>())
let res = unsafe {
squash_codec_decompress(
codec,
&mut size,
buf.as_mut_ptr(),
data.len(),
data.as_ptr(),
ptr::null_mut::<()>()
)
};
if res != SQUASH_OK {
return Err(Self::error(res))
return Err(Self::error(res));
}
unsafe { buf.set_len(size) };
Ok(buf)
@ -194,9 +204,8 @@ impl Compression {
pub fn compress_stream(&self) -> Result<CompressionStream, CompressionError> {
let codec = try!(self.codec());
let options = try!(self.options());
let stream = unsafe { squash_stream_new_with_options(
codec, SQUASH_STREAM_COMPRESS, options
) };
let stream =
unsafe { squash_stream_new_with_options(codec, SQUASH_STREAM_COMPRESS, options) };
if stream.is_null() {
return Err(CompressionError::InitializeStream);
}
@ -205,9 +214,8 @@ impl Compression {
pub fn decompress_stream(&self) -> Result<CompressionStream, CompressionError> {
let codec = try!(self.codec());
let stream = unsafe { squash_stream_new(
codec, SQUASH_STREAM_DECOMPRESS, ptr::null::<()>()
) };
let stream =
unsafe { squash_stream_new(codec, SQUASH_STREAM_DECOMPRESS, ptr::null::<()>()) };
if stream.is_null() {
return Err(CompressionError::InitializeStream);
}
@ -218,19 +226,23 @@ impl Compression {
pub struct CompressionStream {
stream: *mut SquashStream,
buffer: [u8; 16*1024]
buffer: [u8; 16 * 1024]
}
impl CompressionStream {
#[inline]
fn new(stream: *mut SquashStream) -> Self {
CompressionStream {
stream: stream,
buffer: [0; 16*1024]
stream,
buffer: [0; 16 * 1024]
}
}
pub fn process<W: Write>(&mut self, input: &[u8], output: &mut W) -> Result<(), CompressionError> {
pub fn process<W: Write>(
&mut self,
input: &[u8],
output: &mut W,
) -> Result<(), CompressionError> {
let stream = unsafe { &mut (*self.stream) };
stream.next_in = input.as_ptr();
stream.avail_in = input.len();
@ -239,12 +251,12 @@ impl CompressionStream {
stream.avail_out = self.buffer.len();
let res = unsafe { squash_stream_process(stream) };
if res < 0 {
return Err(Compression::error(res))
return Err(Compression::error(res));
}
let output_size = self.buffer.len() - stream.avail_out;
try!(output.write_all(&self.buffer[..output_size]));
if res != SQUASH_PROCESSING {
break
break;
}
}
Ok(())
@ -257,12 +269,12 @@ impl CompressionStream {
stream.avail_out = self.buffer.len();
let res = unsafe { squash_stream_finish(stream) };
if res < 0 {
return Err(Compression::error(res))
return Err(Compression::error(res));
}
let output_size = self.buffer.len() - stream.avail_out;
try!(output.write_all(&self.buffer[..output_size]));
if res != SQUASH_PROCESSING {
break
break;
}
}
Ok(())
@ -271,6 +283,591 @@ impl CompressionStream {
impl Drop for CompressionStream {
fn drop(&mut self) {
unsafe { squash_object_unref(self.stream as *mut libc::c_void); }
unsafe {
//squash_object_unref(self.stream as *mut ::std::os::raw::c_void);
use libc;
squash_object_unref(self.stream as *mut libc::c_void);
}
}
}
mod tests {
#[allow(unused_imports)]
use super::*;
#[test]
fn test_parse() {
let method = Compression::from_string("deflate/1").unwrap();
assert_eq!(("deflate", 1), (method.name(), method.level()));
let method = Compression::from_string("zlib/2").unwrap();
assert_eq!(("deflate", 2), (method.name(), method.level()));
let method = Compression::from_string("gzip/3").unwrap();
assert_eq!(("deflate", 3), (method.name(), method.level()));
let method = Compression::from_string("brotli/1").unwrap();
assert_eq!(("brotli", 1), (method.name(), method.level()));
let method = Compression::from_string("lzma/1").unwrap();
assert_eq!(("lzma", 1), (method.name(), method.level()));
let method = Compression::from_string("lzma2/2").unwrap();
assert_eq!(("lzma", 2), (method.name(), method.level()));
let method = Compression::from_string("xz/3").unwrap();
assert_eq!(("lzma", 3), (method.name(), method.level()));
let method = Compression::from_string("lz4/1").unwrap();
assert_eq!(("lz4", 1), (method.name(), method.level()));
}
#[test]
fn test_to_string() {
assert_eq!(
"brotli/1",
Compression::from_string("brotli/1").unwrap().to_string()
);
assert_eq!(
"deflate/1",
Compression::from_string("gzip/1").unwrap().to_string()
);
}
#[allow(dead_code, needless_range_loop)]
fn test_data(n: usize) -> Vec<u8> {
let mut input = vec![0; n];
for i in 0..input.len() {
input[i] = (i * i * i) as u8;
}
input
}
#[allow(dead_code)]
fn test_compression(method: &str, min_lvl: u8, max_lvl: u8) {
let input = test_data(16 * 1024);
for i in min_lvl..max_lvl + 1 {
let method = Compression::from_string(&format!("{}/{}", method, i)).unwrap();
println!("{}", method.to_string());
let compressed = method.compress(&input).unwrap();
let decompressed = method.decompress(&compressed).unwrap();
assert_eq!(input.len(), decompressed.len());
for i in 0..input.len() {
assert_eq!(input[i], decompressed[i]);
}
}
}
#[test]
fn test_compression_deflate() {
test_compression("deflate", 1, 9)
}
#[test]
fn test_compression_brotli() {
test_compression("brotli", 1, 11)
}
#[test]
fn test_compression_lzma() {
test_compression("lzma", 1, 9)
}
#[test]
fn test_compression_lz4() {
test_compression("lz4", 1, 11)
}
#[allow(dead_code)]
fn test_stream_compression(method: &str, min_lvl: u8, max_lvl: u8) {
let input = test_data(512 * 1024);
for i in min_lvl..max_lvl + 1 {
let method = Compression::from_string(&format!("{}/{}", method, i)).unwrap();
println!("{}", method.to_string());
let mut compressor = method.compress_stream().unwrap();
let mut compressed = Vec::with_capacity(input.len());
compressor.process(&input, &mut compressed).unwrap();
compressor.finish(&mut compressed).unwrap();
let mut decompressor = method.decompress_stream().unwrap();
let mut decompressed = Vec::with_capacity(input.len());
decompressor
.process(&compressed, &mut decompressed)
.unwrap();
decompressor.finish(&mut decompressed).unwrap();
assert_eq!(input.len(), decompressed.len());
for i in 0..input.len() {
assert_eq!(input[i], decompressed[i]);
}
}
}
#[test]
fn test_stream_compression_deflate() {
test_stream_compression("deflate", 1, 9)
}
#[test]
fn test_stream_compression_brotli() {
test_stream_compression("brotli", 1, 11)
}
#[test]
fn test_stream_compression_lzma() {
test_stream_compression("lzma", 1, 9)
}
#[test]
fn test_stream_compression_lz4() {
test_stream_compression("lz4", 1, 11)
}
}
#[cfg(feature = "bench")]
mod benches {
#[allow(unused_imports)]
use super::*;
use test::Bencher;
#[allow(dead_code, needless_range_loop)]
fn test_data(n: usize) -> Vec<u8> {
let mut input = vec![0; n];
for i in 0..input.len() {
input[i] = (i * i * i) as u8;
}
input
}
#[allow(dead_code)]
fn bench_stream_compression(b: &mut Bencher, method: Compression) {
let input = test_data(512 * 1024);
b.iter(|| {
let mut compressor = method.compress_stream().unwrap();
let mut compressed = Vec::with_capacity(input.len());
compressor.process(&input, &mut compressed).unwrap();
compressor.finish(&mut compressed).unwrap();
});
b.bytes = input.len() as u64;
}
#[allow(dead_code)]
fn bench_stream_decompression(b: &mut Bencher, method: Compression) {
let input = test_data(512 * 1024);
let mut compressor = method.compress_stream().unwrap();
let mut compressed = Vec::with_capacity(input.len());
compressor.process(&input, &mut compressed).unwrap();
compressor.finish(&mut compressed).unwrap();
b.iter(|| {
let mut decompressor = method.decompress_stream().unwrap();
let mut decompressed = Vec::with_capacity(compressed.len());
decompressor
.process(&compressed, &mut decompressed)
.unwrap();
decompressor.finish(&mut decompressed).unwrap();
});
b.bytes = input.len() as u64;
}
#[bench]
fn bench_deflate_1_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("deflate/1").unwrap())
}
#[bench]
fn bench_deflate_2_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("deflate/2").unwrap())
}
#[bench]
fn bench_deflate_3_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("deflate/3").unwrap())
}
#[bench]
fn bench_deflate_4_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("deflate/4").unwrap())
}
#[bench]
fn bench_deflate_5_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("deflate/5").unwrap())
}
#[bench]
fn bench_deflate_6_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("deflate/6").unwrap())
}
#[bench]
fn bench_deflate_7_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("deflate/7").unwrap())
}
#[bench]
fn bench_deflate_8_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("deflate/8").unwrap())
}
#[bench]
fn bench_deflate_9_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("deflate/9").unwrap())
}
#[bench]
fn bench_deflate_1_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("deflate/1").unwrap())
}
#[bench]
fn bench_deflate_2_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("deflate/2").unwrap())
}
#[bench]
fn bench_deflate_3_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("deflate/3").unwrap())
}
#[bench]
fn bench_deflate_4_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("deflate/4").unwrap())
}
#[bench]
fn bench_deflate_5_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("deflate/5").unwrap())
}
#[bench]
fn bench_deflate_6_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("deflate/6").unwrap())
}
#[bench]
fn bench_deflate_7_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("deflate/7").unwrap())
}
#[bench]
fn bench_deflate_8_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("deflate/8").unwrap())
}
#[bench]
fn bench_deflate_9_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("deflate/9").unwrap())
}
#[bench]
fn bench_brotli_1_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("brotli/1").unwrap())
}
#[bench]
fn bench_brotli_2_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("brotli/2").unwrap())
}
#[bench]
fn bench_brotli_3_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("brotli/3").unwrap())
}
#[bench]
fn bench_brotli_4_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("brotli/4").unwrap())
}
#[bench]
fn bench_brotli_5_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("brotli/5").unwrap())
}
#[bench]
fn bench_brotli_6_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("brotli/6").unwrap())
}
#[bench]
fn bench_brotli_7_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("brotli/7").unwrap())
}
#[bench]
fn bench_brotli_8_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("brotli/8").unwrap())
}
#[bench]
fn bench_brotli_9_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("brotli/9").unwrap())
}
#[bench]
fn bench_brotli_10_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("brotli/10").unwrap())
}
#[bench]
fn bench_brotli_11_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("brotli/11").unwrap())
}
#[bench]
fn bench_brotli_1_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("brotli/1").unwrap())
}
#[bench]
fn bench_brotli_2_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("brotli/2").unwrap())
}
#[bench]
fn bench_brotli_3_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("brotli/3").unwrap())
}
#[bench]
fn bench_brotli_4_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("brotli/4").unwrap())
}
#[bench]
fn bench_brotli_5_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("brotli/5").unwrap())
}
#[bench]
fn bench_brotli_6_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("brotli/6").unwrap())
}
#[bench]
fn bench_brotli_7_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("brotli/7").unwrap())
}
#[bench]
fn bench_brotli_8_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("brotli/8").unwrap())
}
#[bench]
fn bench_brotli_9_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("brotli/9").unwrap())
}
#[bench]
fn bench_brotli_10_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("brotli/10").unwrap())
}
#[bench]
fn bench_brotli_11_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("brotli/11").unwrap())
}
#[bench]
fn bench_lzma_1_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("lzma/1").unwrap())
}
#[bench]
fn bench_lzma_2_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("lzma/2").unwrap())
}
#[bench]
fn bench_lzma_3_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("lzma/3").unwrap())
}
#[bench]
fn bench_lzma_4_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("lzma/4").unwrap())
}
#[bench]
fn bench_lzma_5_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("lzma/5").unwrap())
}
#[bench]
fn bench_lzma_6_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("lzma/6").unwrap())
}
#[bench]
fn bench_lzma_7_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("lzma/7").unwrap())
}
#[bench]
fn bench_lzma_8_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("lzma/8").unwrap())
}
#[bench]
fn bench_lzma_9_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("lzma/9").unwrap())
}
#[bench]
fn bench_lzma_1_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("lzma/1").unwrap())
}
#[bench]
fn bench_lzma_2_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("lzma/2").unwrap())
}
#[bench]
fn bench_lzma_3_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("lzma/3").unwrap())
}
#[bench]
fn bench_lzma_4_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("lzma/4").unwrap())
}
#[bench]
fn bench_lzma_5_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("lzma/5").unwrap())
}
#[bench]
fn bench_lzma_6_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("lzma/6").unwrap())
}
#[bench]
fn bench_lzma_7_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("lzma/7").unwrap())
}
#[bench]
fn bench_lzma_8_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("lzma/8").unwrap())
}
#[bench]
fn bench_lzma_9_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("lzma/9").unwrap())
}
#[bench]
fn bench_lz4_1_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("lz4/1").unwrap())
}
#[bench]
fn bench_lz4_2_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("lz4/2").unwrap())
}
#[bench]
fn bench_lz4_3_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("lz4/3").unwrap())
}
#[bench]
fn bench_lz4_4_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("lz4/4").unwrap())
}
#[bench]
fn bench_lz4_5_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("lz4/5").unwrap())
}
#[bench]
fn bench_lz4_6_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("lz4/6").unwrap())
}
#[bench]
fn bench_lz4_7_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("lz4/7").unwrap())
}
#[bench]
fn bench_lz4_8_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("lz4/8").unwrap())
}
#[bench]
fn bench_lz4_9_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("lz4/9").unwrap())
}
#[bench]
fn bench_lz4_10_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("lz4/10").unwrap())
}
#[bench]
fn bench_lz4_11_compress(b: &mut Bencher) {
bench_stream_compression(b, Compression::from_string("lz4/11").unwrap())
}
#[bench]
fn bench_lz4_1_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("lz4/1").unwrap())
}
#[bench]
fn bench_lz4_2_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("lz4/2").unwrap())
}
#[bench]
fn bench_lz4_3_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("lz4/3").unwrap())
}
#[bench]
fn bench_lz4_4_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("lz4/4").unwrap())
}
#[bench]
fn bench_lz4_5_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("lz4/5").unwrap())
}
#[bench]
fn bench_lz4_6_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("lz4/6").unwrap())
}
#[bench]
fn bench_lz4_7_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("lz4/7").unwrap())
}
#[bench]
fn bench_lz4_8_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("lz4/8").unwrap())
}
#[bench]
fn bench_lz4_9_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("lz4/9").unwrap())
}
#[bench]
fn bench_lz4_10_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("lz4/10").unwrap())
}
#[bench]
fn bench_lz4_11_decompress(b: &mut Bencher) {
bench_stream_decompression(b, Compression::from_string("lz4/11").unwrap())
}
}

View File

@ -14,16 +14,14 @@ use sodiumoxide::crypto::box_;
use sodiumoxide::crypto::pwhash;
pub use sodiumoxide::crypto::box_::{SecretKey, PublicKey};
use ::util::*;
use util::*;
static INIT: Once = ONCE_INIT;
fn sodium_init() {
INIT.call_once(|| {
if !sodiumoxide::init() {
panic!("Failed to initialize sodiumoxide");
}
INIT.call_once(|| if !sodiumoxide::init() {
tr_panic!("Failed to initialize sodiumoxide");
});
}
@ -31,36 +29,36 @@ quick_error!{
#[derive(Debug)]
pub enum EncryptionError {
InvalidKey {
description("Invalid key")
description(tr!("Invalid key"))
}
MissingKey(key: PublicKey) {
description("Missing key")
display("Missing key: {}", to_hex(&key[..]))
description(tr!("Missing key"))
display("{}", tr_format!("Missing key: {}", to_hex(&key[..])))
}
Operation(reason: &'static str) {
description("Operation failed")
display("Operation failed: {}", reason)
description(tr!("Operation failed"))
display("{}", tr_format!("Operation failed: {}", reason))
}
Io(err: io::Error) {
from()
cause(err)
description("IO error")
display("IO error: {}", err)
description(tr!("IO error"))
display("{}", tr_format!("IO error: {}", err))
}
Yaml(err: serde_yaml::Error) {
from()
cause(err)
description("Yaml format error")
display("Yaml format error: {}", err)
description(tr!("Yaml format error"))
display("{}", tr_format!("Yaml format error: {}", err))
}
}
}
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
#[allow(unknown_lints,non_camel_case_types)]
#[allow(unknown_lints, non_camel_case_types)]
pub enum EncryptionMethod {
Sodium,
Sodium
}
serde_impl!(EncryptionMethod(u64) {
Sodium => 0
@ -70,13 +68,13 @@ impl EncryptionMethod {
pub fn from_string(val: &str) -> Result<Self, &'static str> {
match val {
"sodium" => Ok(EncryptionMethod::Sodium),
_ => Err("Unsupported encryption method")
_ => Err(tr!("Unsupported encryption method")),
}
}
pub fn to_string(&self) -> String {
match *self {
EncryptionMethod::Sodium => "sodium".to_string()
EncryptionMethod::Sodium => "sodium".to_string(),
}
}
}
@ -85,7 +83,7 @@ impl EncryptionMethod {
pub type Encryption = (EncryptionMethod, ByteBuf);
struct KeyfileYaml {
pub struct KeyfileYaml {
public: String,
secret: String
}
@ -110,13 +108,14 @@ impl KeyfileYaml {
pub fn save<P: AsRef<Path>>(&self, path: P) -> Result<(), EncryptionError> {
let mut f = try!(File::create(path));
Ok(try!(serde_yaml::to_writer(&mut f, &self)))
try!(serde_yaml::to_writer(&mut f, &self));
Ok(())
}
}
pub struct Crypto {
path: PathBuf,
path: Option<PathBuf>,
keys: HashMap<PublicKey, SecretKey>
}
@ -124,7 +123,10 @@ impl Crypto {
#[inline]
pub fn dummy() -> Self {
sodium_init();
Crypto { path: PathBuf::new(), keys: HashMap::new() }
Crypto {
path: None,
keys: HashMap::new()
}
}
pub fn open<P: AsRef<Path>>(path: P) -> Result<Self, EncryptionError> {
@ -134,13 +136,24 @@ impl Crypto {
for entry in try!(fs::read_dir(&path)) {
let entry = try!(entry);
let keyfile = try!(KeyfileYaml::load(entry.path()));
let public = try!(parse_hex(&keyfile.public).map_err(|_| EncryptionError::InvalidKey));
let public = try!(PublicKey::from_slice(&public).ok_or(EncryptionError::InvalidKey));
let secret = try!(parse_hex(&keyfile.secret).map_err(|_| EncryptionError::InvalidKey));
let secret = try!(SecretKey::from_slice(&secret).ok_or(EncryptionError::InvalidKey));
let public = try!(parse_hex(&keyfile.public).map_err(
|_| EncryptionError::InvalidKey
));
let public = try!(PublicKey::from_slice(&public).ok_or(
EncryptionError::InvalidKey
));
let secret = try!(parse_hex(&keyfile.secret).map_err(
|_| EncryptionError::InvalidKey
));
let secret = try!(SecretKey::from_slice(&secret).ok_or(
EncryptionError::InvalidKey
));
keys.insert(public, secret);
}
Ok(Crypto { path: path, keys: keys })
Ok(Crypto {
path: Some(path),
keys
})
}
#[inline]
@ -154,24 +167,58 @@ impl Crypto {
self.register_secret_key(public, secret)
}
pub fn load_keypair_from_file<P: AsRef<Path>>(path: P) -> Result<(PublicKey, SecretKey), EncryptionError> {
let keyfile = try!(KeyfileYaml::load(path));
let public = try!(parse_hex(&keyfile.public).map_err(|_| EncryptionError::InvalidKey));
let public = try!(PublicKey::from_slice(&public).ok_or(EncryptionError::InvalidKey));
let secret = try!(parse_hex(&keyfile.secret).map_err(|_| EncryptionError::InvalidKey));
let secret = try!(SecretKey::from_slice(&secret).ok_or(EncryptionError::InvalidKey));
#[inline]
pub fn load_keypair_from_file<P: AsRef<Path>>(
path: P,
) -> Result<(PublicKey, SecretKey), EncryptionError> {
Self::load_keypair_from_file_data(&try!(KeyfileYaml::load(path)))
}
pub fn load_keypair_from_file_data(
keyfile: &KeyfileYaml,
) -> Result<(PublicKey, SecretKey), EncryptionError> {
let public = try!(parse_hex(&keyfile.public).map_err(
|_| EncryptionError::InvalidKey
));
let public = try!(PublicKey::from_slice(&public).ok_or(
EncryptionError::InvalidKey
));
let secret = try!(parse_hex(&keyfile.secret).map_err(
|_| EncryptionError::InvalidKey
));
let secret = try!(SecretKey::from_slice(&secret).ok_or(
EncryptionError::InvalidKey
));
Ok((public, secret))
}
#[inline]
pub fn save_keypair_to_file<P: AsRef<Path>>(public: &PublicKey, secret: &SecretKey, path: P) -> Result<(), EncryptionError> {
KeyfileYaml { public: to_hex(&public[..]), secret: to_hex(&secret[..]) }.save(path)
pub fn save_keypair_to_file_data(public: &PublicKey, secret: &SecretKey) -> KeyfileYaml {
KeyfileYaml {
public: to_hex(&public[..]),
secret: to_hex(&secret[..])
}
}
#[inline]
pub fn register_secret_key(&mut self, public: PublicKey, secret: SecretKey) -> Result<(), EncryptionError> {
let path = self.path.join(to_hex(&public[..]) + ".yaml");
try!(Self::save_keypair_to_file(&public, &secret, path));
pub fn save_keypair_to_file<P: AsRef<Path>>(
public: &PublicKey,
secret: &SecretKey,
path: P,
) -> Result<(), EncryptionError> {
Self::save_keypair_to_file_data(public, secret).save(path)
}
#[inline]
pub fn register_secret_key(
&mut self,
public: PublicKey,
secret: SecretKey,
) -> Result<(), EncryptionError> {
if let Some(ref path) = self.path {
let path = path.join(to_hex(&public[..]) + ".yaml");
try!(Self::save_keypair_to_file(&public, &secret, path));
}
self.keys.insert(public, secret);
Ok(())
}
@ -182,28 +229,34 @@ impl Crypto {
}
fn get_secret_key(&self, public: &PublicKey) -> Result<&SecretKey, EncryptionError> {
self.keys.get(public).ok_or_else(|| EncryptionError::MissingKey(*public))
self.keys.get(public).ok_or_else(
|| EncryptionError::MissingKey(*public)
)
}
#[inline]
pub fn encrypt(&self, enc: &Encryption, data: &[u8]) -> Result<Vec<u8>, EncryptionError> {
let &(ref method, ref public) = enc;
let public = try!(PublicKey::from_slice(public).ok_or(EncryptionError::InvalidKey));
let public = try!(PublicKey::from_slice(public).ok_or(
EncryptionError::InvalidKey
));
match *method {
EncryptionMethod::Sodium => {
Ok(sealedbox::seal(data, &public))
}
EncryptionMethod::Sodium => Ok(sealedbox::seal(data, &public)),
}
}
#[inline]
pub fn decrypt(&self, enc: &Encryption, data: &[u8]) -> Result<Vec<u8>, EncryptionError> {
let &(ref method, ref public) = enc;
let public = try!(PublicKey::from_slice(public).ok_or(EncryptionError::InvalidKey));
let public = try!(PublicKey::from_slice(public).ok_or(
EncryptionError::InvalidKey
));
let secret = try!(self.get_secret_key(&public));
match *method {
EncryptionMethod::Sodium => {
sealedbox::open(data, &public, secret).map_err(|_| EncryptionError::Operation("Decryption failed"))
sealedbox::open(data, &public, secret).map_err(|_| {
EncryptionError::Operation(tr!("Decryption failed"))
})
}
}
}
@ -217,17 +270,176 @@ impl Crypto {
pub fn keypair_from_password(password: &str) -> (PublicKey, SecretKey) {
let salt = pwhash::Salt::from_slice(b"the_great_zvault_password_salt_1").unwrap();
let mut key = [0u8; pwhash::HASHEDPASSWORDBYTES];
let key = pwhash::derive_key(&mut key, password.as_bytes(), &salt, pwhash::OPSLIMIT_INTERACTIVE, pwhash::MEMLIMIT_INTERACTIVE).unwrap();
let key = pwhash::derive_key(
&mut key,
password.as_bytes(),
&salt,
pwhash::OPSLIMIT_INTERACTIVE,
pwhash::MEMLIMIT_INTERACTIVE
).unwrap();
let mut seed = [0u8; 32];
let offset = key.len()-seed.len();
let offset = key.len() - seed.len();
for (i, b) in seed.iter_mut().enumerate() {
*b = key[i+offset];
*b = key[i + offset];
}
let mut pk = [0u8; 32];
let mut sk = [0u8; 32];
if unsafe { libsodium_sys::crypto_box_seed_keypair(&mut pk, &mut sk, &seed) } != 0 {
panic!("Libsodium failed");
tr_panic!("Libsodium failed");
}
(PublicKey::from_slice(&pk).unwrap(), SecretKey::from_slice(&sk).unwrap())
(
PublicKey::from_slice(&pk).unwrap(),
SecretKey::from_slice(&sk).unwrap()
)
}
}
mod tests {
#[allow(unused_imports)]
use super::*;
#[test]
fn test_gen_keypair() {
let key1 = Crypto::gen_keypair();
let key2 = Crypto::gen_keypair();
assert!(key1.0 != key2.0);
}
#[test]
fn test_keypair_from_password() {
let key1 = Crypto::keypair_from_password("foo");
let key2 = Crypto::keypair_from_password("foo");
assert_eq!(key1.0, key2.0);
let key3 = Crypto::keypair_from_password("bar");
assert!(key1.0 != key3.0);
}
#[test]
fn test_add_keypair() {
let mut crypto = Crypto::dummy();
let (pk, sk) = Crypto::gen_keypair();
assert!(!crypto.contains_secret_key(&pk));
crypto.add_secret_key(pk, sk);
assert!(crypto.contains_secret_key(&pk));
}
#[test]
fn test_save_load_keyfile() {
let (pk, sk) = Crypto::gen_keypair();
let data = Crypto::save_keypair_to_file_data(&pk, &sk);
let res = Crypto::load_keypair_from_file_data(&data);
assert!(res.is_ok());
let (pk2, sk2) = res.unwrap();
assert_eq!(pk, pk2);
assert_eq!(sk, sk2);
}
#[test]
fn test_encrypt_decrpyt() {
let mut crypto = Crypto::dummy();
let (pk, sk) = Crypto::gen_keypair();
crypto.add_secret_key(pk, sk);
let encryption = (EncryptionMethod::Sodium, ByteBuf::from(&pk[..]));
let cleartext = b"test123";
let result = crypto.encrypt(&encryption, cleartext);
assert!(result.is_ok());
let ciphertext = result.unwrap();
assert!(&ciphertext != cleartext);
let result = crypto.decrypt(&encryption, &ciphertext);
assert!(result.is_ok());
let unciphered = result.unwrap();
assert_eq!(&cleartext[..] as &[u8], &unciphered as &[u8]);
}
#[test]
fn test_wrong_key() {
let mut crypto = Crypto::dummy();
let (pk, sk) = Crypto::gen_keypair();
crypto.add_secret_key(pk, sk.clone());
let encryption = (EncryptionMethod::Sodium, ByteBuf::from(&pk[..]));
let cleartext = b"test123";
let result = crypto.encrypt(&encryption, cleartext);
assert!(result.is_ok());
let ciphertext = result.unwrap();
assert!(&ciphertext != cleartext);
let mut crypto2 = Crypto::dummy();
let mut sk2 = sk[..].to_vec();
sk2[4] ^= 53;
assert!(&sk[..] as &[u8] != &sk2[..] as &[u8]);
crypto2.add_secret_key(pk, SecretKey::from_slice(&sk2).unwrap());
let result = crypto2.decrypt(&encryption, &ciphertext);
assert!(result.is_err());
}
#[test]
fn test_modified_ciphertext() {
let mut crypto = Crypto::dummy();
let (pk, sk) = Crypto::gen_keypair();
crypto.add_secret_key(pk, sk.clone());
let encryption = (EncryptionMethod::Sodium, ByteBuf::from(&pk[..]));
let cleartext = b"test123";
let result = crypto.encrypt(&encryption, cleartext);
assert!(result.is_ok());
let mut ciphertext = result.unwrap();
assert!(&ciphertext != cleartext);
ciphertext[4] ^= 53;
let result = crypto.decrypt(&encryption, &ciphertext);
assert!(result.is_err());
}
}
#[cfg(feature = "bench")]
mod benches {
#[allow(unused_imports)]
use super::*;
use test::Bencher;
#[allow(dead_code, needless_range_loop)]
fn test_data(n: usize) -> Vec<u8> {
let mut input = vec![0; n];
for i in 0..input.len() {
input[i] = (i * i * i) as u8;
}
input
}
#[bench]
fn bench_key_generate(b: &mut Bencher) {
b.iter(|| Crypto::gen_keypair());
}
#[bench]
fn bench_encrypt(b: &mut Bencher) {
let mut crypto = Crypto::dummy();
let (pk, sk) = Crypto::gen_keypair();
crypto.add_secret_key(pk, sk.clone());
let encryption = (EncryptionMethod::Sodium, ByteBuf::from(&pk[..]));
let input = test_data(512 * 1024);
b.iter(|| crypto.encrypt(&encryption, &input));
b.bytes = input.len() as u64;
}
#[bench]
fn bench_decrypt(b: &mut Bencher) {
let mut crypto = Crypto::dummy();
let (pk, sk) = Crypto::gen_keypair();
crypto.add_secret_key(pk, sk.clone());
let encryption = (EncryptionMethod::Sodium, ByteBuf::from(&pk[..]));
let input = test_data(512 * 1024);
let output = crypto.encrypt(&encryption, &input).unwrap();
b.iter(|| crypto.decrypt(&encryption, &output));
b.bytes = input.len() as u64;
}
}

View File

@ -7,15 +7,21 @@ mod linux {
use std::os::unix::ffi::OsStringExt;
#[inline]
pub fn chown<P: AsRef<Path>>(path: P, uid: libc::uid_t, gid: libc::gid_t) -> Result<(), io::Error> {
pub fn chown<P: AsRef<Path>>(
path: P,
uid: libc::uid_t,
gid: libc::gid_t,
) -> Result<(), io::Error> {
let path = CString::new(path.as_ref().to_path_buf().into_os_string().into_vec()).unwrap();
let result = unsafe { libc::lchown((&path).as_ptr(), uid, gid) };
match result {
0 => Ok(()),
-1 => Err(io::Error::last_os_error()),
_ => unreachable!()
_ => unreachable!(),
}
}
}
pub use self::linux::*;
// Not testing since this requires root

View File

@ -12,7 +12,6 @@ use std::u64;
use std::io::{self, Read, Write};
#[repr(packed)]
#[derive(Clone, Copy, PartialEq, Hash, Eq, Default, Ord, PartialOrd)]
pub struct Hash {
pub high: u64,
@ -27,7 +26,7 @@ impl Hash {
#[inline]
pub fn empty() -> Self {
Hash{high: 0, low: 0}
Hash { high: 0, low: 0 }
}
#[inline]
@ -45,14 +44,20 @@ impl Hash {
pub fn read_from(src: &mut Read) -> Result<Self, io::Error> {
let high = try!(src.read_u64::<LittleEndian>());
let low = try!(src.read_u64::<LittleEndian>());
Ok(Hash { high: high, low: low })
Ok(Hash {
high,
low
})
}
#[inline]
pub fn from_string(val: &str) -> Result<Self, ()> {
let high = try!(u64::from_str_radix(&val[..16], 16).map_err(|_| ()));
let low = try!(u64::from_str_radix(&val[16..], 16).map_err(|_| ()));
Ok(Self { high: high, low: low })
Ok(Self {
high,
low
})
}
}
@ -72,7 +77,10 @@ impl fmt::Debug for Hash {
impl Serialize for Hash {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let mut dat = [0u8; 16];
LittleEndian::write_u64(&mut dat[..8], self.high);
LittleEndian::write_u64(&mut dat[8..], self.low);
@ -81,12 +89,15 @@ impl Serialize for Hash {
}
impl<'a> Deserialize<'a> for Hash {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'a> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'a>,
{
let dat: Vec<u8> = try!(ByteBuf::deserialize(deserializer)).into();
if dat.len() != 16 {
return Err(D::Error::custom("Invalid key length"));
return Err(D::Error::custom(tr!("Invalid key length")));
}
Ok(Hash{
Ok(Hash {
high: LittleEndian::read_u64(&dat[..8]),
low: LittleEndian::read_u64(&dat[8..])
})
@ -94,7 +105,7 @@ impl<'a> Deserialize<'a> for Hash {
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
pub enum HashMethod {
Blake2,
Murmur3
@ -111,9 +122,13 @@ impl HashMethod {
match *self {
HashMethod::Blake2 => {
let hash = blake2b(16, &[], data);
let hash = unsafe { &*mem::transmute::<_, *mut (u64, u64)>(hash.as_bytes().as_ptr()) };
Hash { high: u64::from_be(hash.0), low: u64::from_be(hash.1) }
},
let hash =
unsafe { &*mem::transmute::<_, *const (u64, u64)>(hash.as_bytes().as_ptr()) };
Hash {
high: u64::from_be(hash.0),
low: u64::from_be(hash.1)
}
}
HashMethod::Murmur3 => {
let (a, b) = murmurhash3_x64_128(data, 0);
Hash { high: a, low: b }
@ -126,7 +141,7 @@ impl HashMethod {
match name {
"blake2" => Ok(HashMethod::Blake2),
"murmur3" => Ok(HashMethod::Murmur3),
_ => Err("Unsupported hash method")
_ => Err(tr!("Unsupported hash method")),
}
}
@ -134,8 +149,88 @@ impl HashMethod {
pub fn name(&self) -> &'static str {
match *self {
HashMethod::Blake2 => "blake2",
HashMethod::Murmur3 => "murmur3"
HashMethod::Murmur3 => "murmur3",
}
}
}
mod tests {
#[allow(unused_imports)]
use super::*;
#[test]
fn test_parse() {
assert_eq!(HashMethod::from("blake2"), Ok(HashMethod::Blake2));
assert_eq!(HashMethod::from("murmur3"), Ok(HashMethod::Murmur3));
assert!(HashMethod::from("foo").is_err());
}
#[test]
fn test_to_str() {
assert_eq!(HashMethod::Blake2.name(), "blake2");
assert_eq!(HashMethod::Murmur3.name(), "murmur3");
}
#[test]
fn test_blake2() {
assert_eq!(
HashMethod::Blake2.hash(b"abc"),
Hash {
high: 0xcf4ab791c62b8d2b,
low: 0x2109c90275287816
}
);
}
#[test]
fn test_murmur3() {
assert_eq!(
HashMethod::Murmur3.hash(b"123"),
Hash {
high: 10978418110857903978,
low: 4791445053355511657
}
);
}
}
#[cfg(feature = "bench")]
mod benches {
#[allow(unused_imports)]
use super::*;
use test::Bencher;
#[allow(dead_code, needless_range_loop)]
fn test_data(n: usize) -> Vec<u8> {
let mut input = vec![0; n];
for i in 0..input.len() {
input[i] = (i * i * i) as u8;
}
input
}
#[bench]
fn bench_blake2(b: &mut Bencher) {
let data = test_data(16 * 1024);
b.bytes = data.len() as u64;
b.iter(|| HashMethod::Blake2.hash(&data));
}
#[bench]
fn bench_murmur3(b: &mut Bencher) {
let data = test_data(16 * 1024);
b.bytes = data.len() as u64;
b.iter(|| HashMethod::Murmur3.hash(&data));
}
}

View File

@ -1,5 +1,8 @@
pub fn to_hex(data: &[u8]) -> String {
data.iter().map(|b| format!("{:02x}", b)).collect::<Vec<String>>().join("")
data.iter()
.map(|b| format!("{:02x}", b))
.collect::<Vec<String>>()
.join("")
}
pub fn parse_hex(hex: &str) -> Result<Vec<u8>, ()> {
@ -12,9 +15,9 @@ pub fn parse_hex(hex: &str) -> Result<Vec<u8>, ()> {
b'A'...b'F' => buf |= byte - b'A' + 10,
b'a'...b'f' => buf |= byte - b'a' + 10,
b'0'...b'9' => buf |= byte - b'0',
b' '|b'\r'|b'\n'|b'\t' => {
b' ' | b'\r' | b'\n' | b'\t' => {
buf >>= 4;
continue
continue;
}
_ => return Err(()),
}
@ -29,3 +32,33 @@ pub fn parse_hex(hex: &str) -> Result<Vec<u8>, ()> {
_ => Err(()),
}
}
mod tests {
#[allow(unused_imports)]
use super::*;
#[test]
fn test_to_hex() {
assert_eq!(to_hex(&[0]), "00");
assert_eq!(to_hex(&[1]), "01");
assert_eq!(to_hex(&[15]), "0f");
assert_eq!(to_hex(&[16]), "10");
assert_eq!(to_hex(&[255]), "ff");
assert_eq!(to_hex(&[5, 255]), "05ff");
}
#[test]
fn test_parse_hex() {
assert_eq!(parse_hex("00"), Ok(vec![0]));
assert_eq!(parse_hex("01"), Ok(vec![1]));
assert_eq!(parse_hex("0f"), Ok(vec![15]));
assert_eq!(parse_hex("0fff"), Ok(vec![15, 255]));
assert_eq!(parse_hex("0F"), Ok(vec![15]));
assert_eq!(parse_hex("01 02\n03\t04"), Ok(vec![1, 2, 3, 4]));
}
}

View File

@ -1,14 +1,20 @@
use libc;
use std::ffi;
extern {
extern "C" {
fn gethostname(name: *mut libc::c_char, size: libc::size_t) -> libc::c_int;
}
pub fn get_hostname() -> Result<String, ()> {
let mut buf = Vec::with_capacity(255);
buf.resize(255, 0u8);
if unsafe { gethostname(buf.as_mut_ptr() as *mut libc::c_char, buf.len() as libc::size_t) } == 0 {
if unsafe {
gethostname(
buf.as_mut_ptr() as *mut libc::c_char,
buf.len() as libc::size_t
)
} == 0
{
buf[254] = 0; //enforce null-termination
let name = unsafe { ffi::CStr::from_ptr(buf.as_ptr() as *const libc::c_char) };
name.to_str().map(|s| s.to_string()).map_err(|_| ())
@ -16,3 +22,21 @@ pub fn get_hostname() -> Result<String, ()> {
Err(())
}
}
mod tests {
#[allow(unused_imports)]
use super::*;
#[test]
fn test_gethostname() {
let res = get_hostname();
assert!(res.is_ok());
let name = res.unwrap();
assert!(name.len() >= 1);
}
}

View File

@ -1,4 +1,4 @@
use ::prelude::*;
use prelude::*;
use serde_yaml;
use chrono::prelude::*;
@ -15,22 +15,22 @@ quick_error!{
Io(err: io::Error) {
from()
cause(err)
description("IO error")
display("Lock error: IO error\n\tcaused by: {}", err)
description(tr!("IO error"))
display("{}", tr_format!("Lock error: IO error\n\tcaused by: {}", err))
}
Yaml(err: serde_yaml::Error) {
from()
cause(err)
description("Yaml format error")
display("Lock error: yaml format error\n\tcaused by: {}", err)
description(tr!("Yaml format error"))
display("{}", tr_format!("Lock error: yaml format error\n\tcaused by: {}", err))
}
InvalidLockState(reason: &'static str) {
description("Invalid lock state")
display("Lock error: invalid lock state: {}", reason)
description(tr!("Invalid lock state"))
display("{}", tr_format!("Lock error: invalid lock state: {}", reason))
}
Locked {
description("Locked")
display("Lock error: locked")
description(tr!("Locked"))
display("{}", tr_format!("Lock error: locked"))
}
}
}
@ -58,7 +58,8 @@ impl LockFile {
pub fn save<P: AsRef<Path>>(&self, path: P) -> Result<(), LockError> {
let mut f = try!(File::create(path));
Ok(try!(serde_yaml::to_writer(&mut f, &self)))
try!(serde_yaml::to_writer(&mut f, &self));
Ok(())
}
}
@ -85,7 +86,7 @@ impl LockHandle {
pub fn refresh(&self) -> Result<(), LockError> {
let mut file = try!(LockFile::load(&self.path));
file.date = UTC::now().timestamp();
file.date = Utc::now().timestamp();
file.save(&self.path)
}
}
@ -121,12 +122,14 @@ impl LockFolder {
for lock in try!(self.get_locks()) {
if lock.exclusive {
if level == LockLevel::Exclusive {
return Err(LockError::InvalidLockState("multiple exclusive locks"))
return Err(LockError::InvalidLockState(tr!("multiple exclusive locks")));
} else {
level = LockLevel::Exclusive
}
} else if level == LockLevel::Exclusive {
return Err(LockError::InvalidLockState("exclusive lock and shared locks"))
return Err(LockError::InvalidLockState(
tr!("exclusive lock and shared locks")
));
} else {
level = LockLevel::Shared
}
@ -137,20 +140,27 @@ impl LockFolder {
pub fn lock(&self, exclusive: bool) -> Result<LockHandle, LockError> {
let level = try!(self.get_lock_level());
if level == LockLevel::Exclusive || level == LockLevel::Shared && exclusive {
return Err(LockError::Locked)
return Err(LockError::Locked);
}
let lockfile = LockFile {
hostname: get_hostname().unwrap(),
processid: unsafe { libc::getpid() } as usize,
date: UTC::now().timestamp(),
exclusive: exclusive
date: Utc::now().timestamp(),
exclusive
};
let path = self.path.join(format!("{}-{}.lock", &lockfile.hostname, lockfile.processid));
let path = self.path.join(format!(
"{}-{}.lock",
&lockfile.hostname,
lockfile.processid
));
try!(lockfile.save(&path));
let handle = LockHandle{lock: lockfile, path: path};
let handle = LockHandle {
lock: lockfile,
path
};
if self.get_lock_level().is_err() {
try!(handle.release());
return Err(LockError::Locked)
return Err(LockError::Locked);
}
Ok(handle)
}
@ -158,19 +168,23 @@ impl LockFolder {
pub fn upgrade(&self, lock: &mut LockHandle) -> Result<(), LockError> {
let lockfile = &mut lock.lock;
if lockfile.exclusive {
return Ok(())
return Ok(());
}
let level = try!(self.get_lock_level());
if level == LockLevel::Exclusive {
return Err(LockError::Locked)
return Err(LockError::Locked);
}
lockfile.exclusive = true;
let path = self.path.join(format!("{}-{}.lock", &lockfile.hostname, lockfile.processid));
let path = self.path.join(format!(
"{}-{}.lock",
&lockfile.hostname,
lockfile.processid
));
try!(lockfile.save(&path));
if self.get_lock_level().is_err() {
lockfile.exclusive = false;
try!(lockfile.save(&path));
return Err(LockError::Locked)
return Err(LockError::Locked);
}
Ok(())
}
@ -178,10 +192,14 @@ impl LockFolder {
pub fn downgrade(&self, lock: &mut LockHandle) -> Result<(), LockError> {
let lockfile = &mut lock.lock;
if !lockfile.exclusive {
return Ok(())
return Ok(());
}
lockfile.exclusive = false;
let path = self.path.join(format!("{}-{}.lock", &lockfile.hostname, lockfile.processid));
let path = self.path.join(format!(
"{}-{}.lock",
&lockfile.hostname,
lockfile.processid
));
lockfile.save(&path)
}
}

View File

@ -10,13 +10,13 @@ pub struct LruCache<K, V> {
}
impl<K: Eq+Hash, V> LruCache<K, V> {
impl<K: Eq + Hash, V> LruCache<K, V> {
#[inline]
pub fn new(min_size: usize, max_size: usize) -> Self {
LruCache {
items: HashMap::default(),
min_size: min_size,
max_size: max_size,
min_size,
max_size,
next: 0
}
}
@ -55,9 +55,9 @@ impl<K: Eq+Hash, V> LruCache<K, V> {
fn shrink(&mut self) {
let mut tags: Vec<u64> = self.items.values().map(|&(_, n)| n).collect();
tags.sort();
let min = tags[tags.len()-self.min_size];
let min = tags[tags.len() - self.min_size];
let mut new = HashMap::with_capacity(self.min_size);
new.extend(self.items.drain().filter(|&(_,(_, n))| n>=min));
new.extend(self.items.drain().filter(|&(_, (_, n))| n >= min));
self.items = new;
}
}

View File

@ -1,4 +1,3 @@
//mod checksum; not used
mod compression;
mod encryption;
mod hash;
@ -10,6 +9,7 @@ mod cli;
mod hostname;
mod fs;
mod lock;
mod statistics;
pub mod msgpack;
pub use self::fs::*;
@ -23,3 +23,4 @@ pub use self::hex::*;
pub use self::cli::*;
pub use self::hostname::*;
pub use self::lock::*;
pub use self::statistics::*;

57
src/util/statistics.rs Normal file
View File

@ -0,0 +1,57 @@
#[derive(Debug, Default)]
pub struct ValueStats {
pub min: f32,
pub max: f32,
pub avg: f32,
pub stddev: f32,
pub count: usize,
pub count_xs: usize,
pub count_s: usize,
pub count_m: usize,
pub count_l: usize,
pub count_xl: usize,
}
impl ValueStats {
pub fn from_iter<T: Iterator<Item=f32>, F: Fn() -> T>(iter: F) -> ValueStats {
let mut stats = ValueStats::default();
stats.min = ::std::f32::INFINITY;
let mut sum = 0.0f64;
for val in iter() {
if stats.min > val {
stats.min = val;
}
if stats.max < val {
stats.max = val;
}
sum += f64::from(val);
stats.count += 1;
}
stats.avg = (sum as f32) / (stats.count as f32);
if stats.count < 2 {
stats.count_m = stats.count;
return stats;
}
sum = 0.0;
for val in iter() {
sum += f64::from(val - stats.avg) * f64::from(val - stats.avg);
}
stats.stddev = ((sum as f32)/(stats.count as f32-1.0)).sqrt();
for val in iter() {
if val < stats.avg - 2.0 * stats.stddev {
stats.count_xs += 1;
} else if val < stats.avg - stats.stddev {
stats.count_s += 1;
} else if val < stats.avg + stats.stddev {
stats.count_m += 1;
} else if val < stats.avg + 2.0 * stats.stddev {
stats.count_l += 1;
} else {
stats.count_xl += 1;
}
}
stats
}
}

78
test.sh
View File

@ -2,58 +2,60 @@ set -ex
rm -rf repos
mkdir repos
target/release/zvault init --compression brotli/3 repos/zvault_brotli3
target/release/zvault init --compression brotli/6 repos/zvault_brotli6
target/release/zvault init --compression lzma2/2 repos/zvault_lzma2
mkdir -p repos/remotes/zvault_brotli3 repos/remotes/zvault_brotli6 repos/remotes/zvault_lzma2
target/release/zvault init --compression brotli/3 --remote $(pwd)/repos/remotes/zvault_brotli3 $(pwd)/repos/zvault_brotli3
target/release/zvault init --compression brotli/6 --remote $(pwd)/repos/remotes/zvault_brotli6 $(pwd)/repos/zvault_brotli6
target/release/zvault init --compression lzma2/2 --remote $(pwd)/repos/remotes/zvault_lzma2 $(pwd)/repos/zvault_lzma2
attic init repos/attic
borg init -e none repos/borg
borg init -e none repos/borg-zlib
zbackup init --non-encrypted repos/zbackup
cat < test_data/silesia.tar > /dev/null
time target/release/zvault backup repos/zvault_brotli3::silesia1 test_data/silesia.tar
time target/release/zvault backup repos/zvault_brotli3::silesia2 test_data/silesia.tar
time target/release/zvault backup repos/zvault_brotli6::silesia1 test_data/silesia.tar
time target/release/zvault backup repos/zvault_brotli6::silesia2 test_data/silesia.tar
time target/release/zvault backup repos/zvault_lzma2::silesia1 test_data/silesia.tar
time target/release/zvault backup repos/zvault_lzma2::silesia2 test_data/silesia.tar
time attic create repos/attic::silesia1 test_data/silesia.tar
time attic create repos/attic::silesia2 test_data/silesia.tar
time borg create -C none repos/borg::silesia1 test_data/silesia.tar
time borg create -C none repos/borg::silesia2 test_data/silesia.tar
time borg create -C zlib repos/borg-zlib::silesia1 test_data/silesia.tar
time borg create -C zlib repos/borg-zlib::silesia2 test_data/silesia.tar
time zbackup backup --non-encrypted repos/zbackup/backups/silesia1 < test_data/silesia.tar
time zbackup backup --non-encrypted repos/zbackup/backups/silesia2 < test_data/silesia.tar
find test_data/silesia -type f | xargs cat > /dev/null
time target/release/zvault backup test_data/silesia $(pwd)/repos/zvault_brotli3::silesia1
time target/release/zvault backup test_data/silesia $(pwd)/repos/zvault_brotli3::silesia2
time target/release/zvault backup test_data/silesia $(pwd)/repos/zvault_brotli6::silesia1
time target/release/zvault backup test_data/silesia $(pwd)/repos/zvault_brotli6::silesia2
time target/release/zvault backup test_data/silesia $(pwd)/repos/zvault_lzma2::silesia1
time target/release/zvault backup test_data/silesia $(pwd)/repos/zvault_lzma2::silesia2
time attic create repos/attic::silesia1 test_data/silesia
time attic create repos/attic::silesia2 test_data/silesia
time borg create -C none repos/borg::silesia1 test_data/silesia
time borg create -C none repos/borg::silesia2 test_data/silesia
time borg create -C zlib repos/borg-zlib::silesia1 test_data/silesia
time borg create -C zlib repos/borg-zlib::silesia2 test_data/silesia
time tar -c test_data/silesia | zbackup backup --non-encrypted repos/zbackup/backups/silesia1
time tar -c test_data/silesia | zbackup backup --non-encrypted repos/zbackup/backups/silesia2
du -h test_data/silesia.tar
du -sh repos/zvault*/bundles repos/attic repos/borg repos/borg-zlib repos/zbackup
du -sh repos/remotes/zvault* repos/attic repos/borg repos/borg-zlib repos/zbackup
rm -rf repos
mkdir repos
target/release/zvault init --compression brotli/3 repos/zvault_brotli3
target/release/zvault init --compression brotli/6 repos/zvault_brotli6
target/release/zvault init --compression lzma2/2 repos/zvault_lzma2
mkdir -p repos/remotes/zvault_brotli3 repos/remotes/zvault_brotli6 repos/remotes/zvault_lzma2
target/release/zvault init --compression brotli/3 --remote $(pwd)/repos/remotes/zvault_brotli3 $(pwd)/repos/zvault_brotli3
target/release/zvault init --compression brotli/6 --remote $(pwd)/repos/remotes/zvault_brotli6 $(pwd)/repos/zvault_brotli6
target/release/zvault init --compression lzma2/2 --remote $(pwd)/repos/remotes/zvault_lzma2 $(pwd)/repos/zvault_lzma2
attic init repos/attic
borg init -e none repos/borg
borg init -e none repos/borg-zlib
zbackup init --non-encrypted repos/zbackup
cat < test_data/ubuntu.tar > /dev/null
time target/release/zvault backup repos/zvault_brotli3::ubuntu1 test_data/ubuntu.tar
time target/release/zvault backup repos/zvault_brotli3::ubuntu2 test_data/ubuntu.tar
time target/release/zvault backup repos/zvault_brotli6::ubuntu1 test_data/ubuntu.tar
time target/release/zvault backup repos/zvault_brotli6::ubuntu2 test_data/ubuntu.tar
time target/release/zvault backup repos/zvault_lzma2::ubuntu1 test_data/ubuntu.tar
time target/release/zvault backup repos/zvault_lzma2::ubuntu2 test_data/ubuntu.tar
time attic create repos/attic::ubuntu1 test_data/ubuntu.tar
time attic create repos/attic::ubuntu2 test_data/ubuntu.tar
time borg create -C none repos/borg::ubuntu1 test_data/ubuntu.tar
time borg create -C none repos/borg::ubuntu2 test_data/ubuntu.tar
time borg create -C zlib repos/borg-zlib::ubuntu1 test_data/ubuntu.tar
time borg create -C zlib repos/borg-zlib::ubuntu2 test_data/ubuntu.tar
time zbackup backup --non-encrypted repos/zbackup/backups/ubuntu1 < test_data/ubuntu.tar
time zbackup backup --non-encrypted repos/zbackup/backups/ubuntu2 < test_data/ubuntu.tar
find test_data/ubuntu -type f | xargs cat > /dev/null
time target/release/zvault backup test_data/ubuntu $(pwd)/repos/zvault_brotli3::ubuntu1
time target/release/zvault backup test_data/ubuntu $(pwd)/repos/zvault_brotli3::ubuntu2
time target/release/zvault backup test_data/ubuntu $(pwd)/repos/zvault_brotli6::ubuntu1
time target/release/zvault backup test_data/ubuntu $(pwd)/repos/zvault_brotli6::ubuntu2
time target/release/zvault backup test_data/ubuntu $(pwd)/repos/zvault_lzma2::ubuntu1
time target/release/zvault backup test_data/ubuntu $(pwd)/repos/zvault_lzma2::ubuntu2
time attic create repos/attic::ubuntu1 test_data/ubuntu
time attic create repos/attic::ubuntu2 test_data/ubuntu
time borg create -C none repos/borg::ubuntu1 test_data/ubuntu
time borg create -C none repos/borg::ubuntu2 test_data/ubuntu
time borg create -C zlib repos/borg-zlib::ubuntu1 test_data/ubuntu
time borg create -C zlib repos/borg-zlib::ubuntu2 test_data/ubuntu
time tar -c test_data/ubuntu | zbackup backup --non-encrypted repos/zbackup/backups/ubuntu1
time tar -c test_data/ubuntu | zbackup backup --non-encrypted repos/zbackup/backups/ubuntu2
du -h test_data/ubuntu.tar
du -sh repos/zvault*/bundles repos/attic repos/borg repos/borg-zlib repos/zbackup
du -sh repos/remotes/zvault* repos/attic repos/borg repos/borg-zlib repos/zbackup