From 50248e72f9f7df8ad70ba2f0f1ac1291dfc0a9fd Mon Sep 17 00:00:00 2001 From: simonsan <14062932+simonsan@users.noreply.github.com> Date: Thu, 17 Aug 2023 00:39:32 +0200 Subject: [PATCH 1/3] Refactor rustic_core API & add documentation Co-authored-by: Alexander Weiss --- Cargo.lock | 557 +++++++------- Cargo.toml | 12 +- crates/rustic_core/Cargo.toml | 1 + crates/rustic_core/examples/backup.rs | 13 +- crates/rustic_core/examples/check.rs | 6 +- crates/rustic_core/examples/config.rs | 7 +- crates/rustic_core/examples/forget.rs | 4 +- crates/rustic_core/examples/init.rs | 6 +- crates/rustic_core/examples/key.rs | 4 +- crates/rustic_core/examples/ls.rs | 7 +- crates/rustic_core/examples/merge.rs | 4 +- crates/rustic_core/examples/prune.rs | 4 +- crates/rustic_core/examples/restore.rs | 11 +- crates/rustic_core/src/archiver.rs | 55 ++ .../rustic_core/src/archiver/file_archiver.rs | 56 +- crates/rustic_core/src/archiver/parent.rs | 105 ++- crates/rustic_core/src/archiver/tree.rs | 21 +- .../rustic_core/src/archiver/tree_archiver.rs | 87 ++- crates/rustic_core/src/backend.rs | 224 +++++- crates/rustic_core/src/backend/cache.rs | 184 ++++- crates/rustic_core/src/backend/choose.rs | 83 +- crates/rustic_core/src/backend/decrypt.rs | 202 +++++ crates/rustic_core/src/backend/dry_run.rs | 34 +- crates/rustic_core/src/backend/hotcold.rs | 17 + crates/rustic_core/src/backend/ignore.rs | 122 ++- crates/rustic_core/src/backend/local.rs | 429 ++++++++++- crates/rustic_core/src/backend/node.rs | 144 +++- crates/rustic_core/src/backend/rclone.rs | 125 ++- crates/rustic_core/src/backend/rest.rs | 145 +++- crates/rustic_core/src/backend/stdin.rs | 13 +- crates/rustic_core/src/blob.rs | 46 +- crates/rustic_core/src/blob/packer.rs | 288 ++++++- crates/rustic_core/src/blob/tree.rs | 217 +++++- crates/rustic_core/src/cdc/polynom.rs | 9 +- crates/rustic_core/src/cdc/rolling_hash.rs | 57 ++ crates/rustic_core/src/chunker.rs | 53 +- crates/rustic_core/src/commands.rs | 5 + crates/rustic_core/src/commands/backup.rs | 106 ++- crates/rustic_core/src/commands/cat.rs | 67 +- crates/rustic_core/src/commands/check.rs | 128 +++- crates/rustic_core/src/commands/config.rs | 92 ++- crates/rustic_core/src/commands/copy.rs | 47 +- crates/rustic_core/src/commands/dump.rs | 22 +- crates/rustic_core/src/commands/forget.rs | 159 +++- crates/rustic_core/src/commands/init.rs | 55 +- crates/rustic_core/src/commands/key.rs | 69 +- crates/rustic_core/src/commands/merge.rs | 51 +- crates/rustic_core/src/commands/prune.rs | 314 +++++++- .../rustic_core/src/commands/repair/index.rs | 33 +- .../src/commands/repair/snapshots.rs | 69 +- crates/rustic_core/src/commands/repoinfo.rs | 88 ++- crates/rustic_core/src/commands/restore.rs | 181 ++++- crates/rustic_core/src/commands/snapshots.rs | 24 +- crates/rustic_core/src/crypto.rs | 20 + crates/rustic_core/src/crypto/aespoly1305.rs | 44 +- crates/rustic_core/src/crypto/hasher.rs | 9 + crates/rustic_core/src/error.rs | 18 +- crates/rustic_core/src/id.rs | 64 +- crates/rustic_core/src/index.rs | 196 ++++- crates/rustic_core/src/index/binarysorted.rs | 28 +- crates/rustic_core/src/index/indexer.rs | 83 +- crates/rustic_core/src/lib.rs | 108 ++- crates/rustic_core/src/progress.rs | 39 + crates/rustic_core/src/repofile.rs | 21 +- crates/rustic_core/src/repofile/configfile.rs | 101 ++- crates/rustic_core/src/repofile/indexfile.rs | 66 +- crates/rustic_core/src/repofile/keyfile.rs | 150 +++- crates/rustic_core/src/repofile/packfile.rs | 153 +++- .../rustic_core/src/repofile/snapshotfile.rs | 573 ++++++++++++-- crates/rustic_core/src/repository.rs | 711 ++++++++++++++++-- crates/rustic_core/src/repository/warm_up.rs | 51 +- src/commands.rs | 13 + src/commands/backup.rs | 51 +- src/commands/cat.rs | 6 +- src/commands/check.rs | 4 +- src/commands/config.rs | 4 +- src/commands/copy.rs | 4 +- src/commands/diff.rs | 18 +- src/commands/init.rs | 10 +- src/commands/key.rs | 4 +- src/commands/list.rs | 2 +- src/commands/ls.rs | 16 +- src/commands/merge.rs | 15 +- src/commands/prune.rs | 9 +- src/commands/restore.rs | 12 +- src/commands/snapshots.rs | 5 +- src/commands/tag.rs | 2 +- src/filtering.rs | 2 +- 88 files changed, 6553 insertions(+), 921 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 636966f2b..ab7a03b2b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -43,9 +43,9 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" +checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" dependencies = [ "gimli", ] @@ -105,15 +105,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "aho-corasick" -version = "0.7.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" -dependencies = [ - "memchr", -] - [[package]] name = "aho-corasick" version = "1.0.4" @@ -155,15 +146,15 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41ed9a86bf92ae6580e0a31281f65a1b1d867c0cc68d5346e2ae128dddfa6a7d" +checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" [[package]] name = "anstyle-parse" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e765fd216e48e067936442276d1d57399e37bce53c264d6fefbe298080cb57ee" +checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" dependencies = [ "utf8parse", ] @@ -179,9 +170,9 @@ dependencies = [ [[package]] name = "anstyle-wincon" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188" +checksum = "c677ab05e09154296dd37acecd46420c17b9713e8366facafa8fc0885167cf4c" dependencies = [ "anstyle", "windows-sys 0.48.0", @@ -224,15 +215,15 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.67" +version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" +checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" dependencies = [ "addr2line", "cc", "cfg-if", "libc", - "miniz_oxide 0.6.2", + "miniz_oxide", "object", "rustc-demangle", ] @@ -275,9 +266,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" +checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" [[package]] name = "block-buffer" @@ -290,9 +281,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a246e68bb43f6cd9db24bea052a53e40405417c5fb372e3d1a8a7f770a564ef5" +checksum = "6798148dccfbff0fae41c7574d2fa8f1ef3492fba0face179de5d8d447d67b05" dependencies = [ "memchr", "serde", @@ -367,9 +358,9 @@ dependencies = [ [[package]] name = "camino" -version = "1.1.4" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c530edf18f37068ac2d977409ed5cd50d53d73bc653c7647b48eb78976ac9ae2" +checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" dependencies = [ "serde", ] @@ -392,9 +383,9 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbdb825da8a5df079a43676dbe042702f1707b1109f713a01420fbb4cc71fa27" +checksum = "2cfa25e60aea747ec7e1124f238816749faa93759c6ff5b31f1ccdda137f4479" dependencies = [ "serde", ] @@ -415,11 +406,12 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.79" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" +checksum = "305fe645edc1442a0fa8b6726ba61d422798d37a52e12eaecf4b022ebbb88f01" dependencies = [ "jobserver", + "libc", ] [[package]] @@ -532,9 +524,9 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "comfy-table" -version = "6.2.0" +version = "7.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e959d788268e3bf9d35ace83e81b124190378e4c91c9067524675e33394b8ba" +checksum = "9ab77dbd8adecaf3f0db40581631b995f312a8a5ae3aa9993188bb8f23d83a5b" dependencies = [ "crossterm", "strum", @@ -601,9 +593,9 @@ checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" -version = "0.2.7" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e4c1eaa2012c47becbbad2ab175484c2a84d1185b566fb2cc5b8707343dfe58" +checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" dependencies = [ "libc", ] @@ -654,14 +646,14 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.14" +version = "0.9.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" +checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" dependencies = [ "autocfg", "cfg-if", "crossbeam-utils", - "memoffset 0.8.0", + "memoffset 0.9.0", "scopeguard", ] @@ -677,9 +669,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.15" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ "cfg-if", ] @@ -702,9 +694,9 @@ dependencies = [ [[package]] name = "crossterm_winapi" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ae1b35a484aa10e07fe0638d02301c5ad24de82d310ccbd2f3693da5f09bf1c" +checksum = "acdd7c62a3665c7f6830a51635d9ac9b23ed385797f70a83bb8bafe9c572ab2b" dependencies = [ "winapi", ] @@ -746,12 +738,12 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.1" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0558d22a7b463ed0241e993f76f09f30b126687447751a8638587b864e4b3944" +checksum = "0209d94da627ab5605dcccf08bb18afa5009cfbef48d8a8b7d7bdbc79be25c5e" dependencies = [ - "darling_core 0.20.1", - "darling_macro 0.20.1", + "darling_core 0.20.3", + "darling_macro 0.20.3", ] [[package]] @@ -770,9 +762,9 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.1" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb" +checksum = "177e3443818124b357d8e76f53be906d60937f0d3a90773a664fa63fa253e621" dependencies = [ "fnv", "ident_case", @@ -795,15 +787,24 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.20.1" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" +checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ - "darling_core 0.20.1", + "darling_core 0.20.3", "quote", "syn 2.0.28", ] +[[package]] +name = "deranged" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7684a49fb1af197853ef7b2ee694bc1f5b4179556f1e5710e1760c5db6f5e929" +dependencies = [ + "serde", +] + [[package]] name = "derivative" version = "2.2.0" @@ -834,7 +835,7 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4e8ef033054e131169b8f0f9a7af8f5533a9436fadf3c500ed547f730f07090d" dependencies = [ - "darling 0.20.1", + "darling 0.20.3", "proc-macro2", "quote", "syn 2.0.28", @@ -924,9 +925,9 @@ dependencies = [ [[package]] name = "dissimilar" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "210ec60ae7d710bed8683e333e9d2855a8a56a3e9892b38bad3bb0d4d29b0d5e" +checksum = "86e3bdc80eee6e16b2b6b0f87fbc98c04bee3455e35174c0de1a125d0688c632" [[package]] name = "duct" @@ -948,9 +949,9 @@ checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" [[package]] name = "either" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" +checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] name = "encode_unicode" @@ -999,15 +1000,15 @@ dependencies = [ [[package]] name = "equivalent" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88bffebc5d80432c9b140ee17875ff173a8ab62faad5b257da912bd2f6c1c0a1" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" +checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f" dependencies = [ "errno-dragonfly", "libc", @@ -1064,12 +1065,12 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" +checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" dependencies = [ "crc32fast", - "miniz_oxide 0.7.1", + "miniz_oxide", ] [[package]] @@ -1211,14 +1212,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0176e0459c2e4a1fe232f984bca6890e681076abb9934f6cea7c326f3fc47818" dependencies = [ "libc", - "windows-targets 0.48.0", + "windows-targets 0.48.2", ] [[package]] name = "getrandom" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", "libc", @@ -1227,9 +1228,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.27.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" +checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" [[package]] name = "glob" @@ -1239,11 +1240,11 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "globset" -version = "0.4.10" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "029d74589adefde59de1a0c4f4732695c32805624aec7b68d91503d4dba79afc" +checksum = "759c97c1e17c55525b57192c06a267cda0ac5210b222d6b82189a2338fa1c13d" dependencies = [ - "aho-corasick 0.7.20", + "aho-corasick", "bstr", "fnv", "log", @@ -1252,9 +1253,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.19" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782" +checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049" dependencies = [ "bytes", "fnv", @@ -1301,18 +1302,9 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" -dependencies = [ - "libc", -] - -[[package]] -name = "hermit-abi" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" +checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" [[package]] name = "hex" @@ -1362,9 +1354,9 @@ checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "humantime" @@ -1374,9 +1366,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.26" +version = "0.14.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" +checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" dependencies = [ "bytes", "futures-channel", @@ -1389,7 +1381,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2", + "socket2 0.4.9", "tokio", "tower-service", "tracing", @@ -1398,10 +1390,11 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0646026eb1b3eea4cd9ba47912ea5ce9cc07713d105b1a14698f4e6433d348b7" +checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" dependencies = [ + "futures-util", "http", "hyper", "rustls", @@ -1411,9 +1404,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.56" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0722cd7114b7de04316e7ea5456a0bbb20e4adb46fd27a3697adb812cff0f37c" +checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -1539,43 +1532,42 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi 0.3.1", + "hermit-abi", "libc", "windows-sys 0.48.0", ] [[package]] name = "ipnet" -version = "2.7.2" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" +checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" [[package]] name = "is-terminal" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" +checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ - "hermit-abi 0.3.1", - "io-lifetimes", - "rustix 0.37.19", + "hermit-abi", + "rustix 0.38.8", "windows-sys 0.48.0", ] [[package]] name = "itertools" -version = "0.10.5" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" dependencies = [ "either", ] [[package]] name = "itoa" -version = "1.0.6" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" +checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "jobserver" @@ -1588,9 +1580,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.63" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f37a4a5928311ac501dee68b3c7613a1037d0edb30c8e5427bd832d55d1b790" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ "wasm-bindgen", ] @@ -1615,9 +1607,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0" +checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" [[package]] name = "lock_api" @@ -1641,7 +1633,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ - "regex-automata", + "regex-automata 0.1.10", ] [[package]] @@ -1661,9 +1653,9 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ "autocfg", ] @@ -1696,15 +1688,6 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" -[[package]] -name = "miniz_oxide" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" -dependencies = [ - "adler", -] - [[package]] name = "miniz_oxide" version = "0.7.1" @@ -1752,20 +1735,20 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" dependencies = [ "autocfg", ] [[package]] name = "num_cpus" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.2.6", + "hermit-abi", "libc", ] @@ -1786,9 +1769,9 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.30.4" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03b4680b86d9cfafba8fc491dc9b6df26b68cf40e9e6cd73909194759a63c385" +checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" dependencies = [ "memchr", ] @@ -1870,7 +1853,7 @@ dependencies = [ "libc", "redox_syscall 0.3.5", "smallvec", - "windows-targets 0.48.0", + "windows-targets 0.48.2", ] [[package]] @@ -1884,9 +1867,9 @@ dependencies = [ [[package]] name = "pbkdf2" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0ca0b5a68607598bf3bad68f32227a8164f6254833f84eafaac409cd6746c31" +checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" dependencies = [ "digest", "hmac", @@ -1900,9 +1883,9 @@ checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pin-project-lite" -version = "0.2.9" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" +checksum = "12cc1b0bf1727a77a54b6654e7b5f1af8604923edc8b81885f8ec92f9e3f0a05" [[package]] name = "pin-utils" @@ -1929,9 +1912,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.3.3" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "767eb9f07d4a5ebcb39bbf2d452058a93c011373abf6832e24194a1c3f004794" +checksum = "f32154ba0af3a075eefa1eda8bb414ee928f62303a54ea85b8d6638ff1a6ee9e" [[package]] name = "ppv-lite86" @@ -1981,9 +1964,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.63" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b368fba921b0dce7e60f5e04ec15e565b3303972b42bcfde1d0713b881959eb" +checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" dependencies = [ "unicode-ident", ] @@ -2034,9 +2017,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.29" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105" +checksum = "50f3b39ccfb720540debaa0164757101c08ecb8d326b15358ce76a62c7e85965" dependencies = [ "proc-macro2", ] @@ -2124,13 +2107,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.8.4" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f" +checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" dependencies = [ - "aho-corasick 1.0.4", + "aho-corasick", "memchr", - "regex-syntax 0.7.2", + "regex-automata 0.3.6", + "regex-syntax 0.7.4", ] [[package]] @@ -2142,6 +2126,17 @@ dependencies = [ "regex-syntax 0.6.29", ] +[[package]] +name = "regex-automata" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.7.4", +] + [[package]] name = "regex-syntax" version = "0.6.29" @@ -2150,9 +2145,15 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.2" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" + +[[package]] +name = "relative-path" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78" +checksum = "4bf2521270932c3c7bed1a59151222bd7643c79310f2916f01925e1e16255698" [[package]] name = "reqwest" @@ -2240,9 +2241,9 @@ dependencies = [ [[package]] name = "rstest" -version = "0.17.0" +version = "0.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de1bb486a691878cd320c2f0d319ba91eeaa2e894066d8b5f8f117c000e9d962" +checksum = "97eeab2f3c0a199bc4be135c36c924b6590b88c377d416494288c14f2db30199" dependencies = [ "futures", "futures-timer", @@ -2252,15 +2253,18 @@ dependencies = [ [[package]] name = "rstest_macros" -version = "0.17.0" +version = "0.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290ca1a1c8ca7edb7c3283bd44dc35dd54fdec6253a3912e201ba1072018fca8" +checksum = "d428f8247852f894ee1be110b375111b586d4fa431f6c46e64ba5a0dcccbe605" dependencies = [ "cfg-if", + "glob", "proc-macro2", "quote", + "regex", + "relative-path", "rustc_version", - "syn 1.0.109", + "syn 2.0.28", "unicode-ident", ] @@ -2307,7 +2311,7 @@ version = "0.5.4-dev" dependencies = [ "abscissa_core", "aes256ctr_poly1305aes", - "aho-corasick 1.0.4", + "aho-corasick", "anyhow", "binrw", "bytes", @@ -2366,7 +2370,7 @@ dependencies = [ "tracing-error", "tracing-subscriber", "walkdir", - "xattr 1.0.1", + "xattr", "zstd", ] @@ -2375,7 +2379,7 @@ name = "rustic_core" version = "0.6.0" dependencies = [ "aes256ctr_poly1305aes", - "aho-corasick 1.0.4", + "aho-corasick", "backoff", "binrw", "bytes", @@ -2427,10 +2431,11 @@ dependencies = [ "sha2", "shell-words", "simplelog", + "tempfile", "thiserror", "url", "walkdir", - "xattr 1.0.1", + "xattr", "zstd", ] @@ -2438,16 +2443,16 @@ dependencies = [ name = "rustic_testing" version = "0.1.0" dependencies = [ - "aho-corasick 1.0.4", + "aho-corasick", "once_cell", "tempfile", ] [[package]] name = "rustix" -version = "0.37.19" +version = "0.37.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acf8729d8542766f1b2cf77eb034d52f40d375bb8b615d0b147089946e16613d" +checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06" dependencies = [ "bitflags 1.3.2", "errno", @@ -2459,22 +2464,22 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.4" +version = "0.38.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a962918ea88d644592894bc6dc55acc6c0956488adcebbfb6e273506b7fd6e5" +checksum = "19ed4fa021d81c8392ce04db050a3da9a60299050b7ae1cf482d862b54a7218f" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.0", "errno", "libc", - "linux-raw-sys 0.4.3", + "linux-raw-sys 0.4.5", "windows-sys 0.48.0", ] [[package]] name = "rustls" -version = "0.21.1" +version = "0.21.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c911ba11bc8433e811ce56fde130ccf32f5127cab0e0194e9c68c5a5b671791e" +checksum = "1d1feddffcfcc0b33f5c6ce9a29e341e4cd59c3f78e7ee45f4a40c038b1d6cbb" dependencies = [ "log", "ring", @@ -2484,9 +2489,9 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" dependencies = [ "openssl-probe", "rustls-pemfile", @@ -2496,18 +2501,18 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" +checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ "base64", ] [[package]] name = "rustls-webpki" -version = "0.100.2" +version = "0.101.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e98ff011474fa39949b7e5c0428f9b4937eda7da7848bbb947786b7be0b27dab" +checksum = "7d93931baf2d282fff8d3a532bbfd7653f734643161b87e3e01e59a04439bf0d" dependencies = [ "ring", "untrusted", @@ -2524,15 +2529,15 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.12" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] name = "ryu" -version = "1.0.13" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" +checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" [[package]] name = "salsa20" @@ -2554,18 +2559,18 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" +checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] name = "scopeguard" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "scrypt" @@ -2600,9 +2605,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.9.1" +version = "2.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" +checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" dependencies = [ "bitflags 1.3.2", "core-foundation", @@ -2613,9 +2618,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" +checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" dependencies = [ "core-foundation-sys", "libc", @@ -2623,9 +2628,9 @@ dependencies = [ [[package]] name = "self_update" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca4e4e6f29fddb78b3e7a6e5a395e8274d4aca2d36b2278a297fa49673a5b7c7" +checksum = "a667e18055120bcc9a658d55d36f2f6bfc82e07968cc479ee7774e3bfb501e14" dependencies = [ "either", "flate2", @@ -2737,7 +2742,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9197f1ad0e3c173a0222d3c4404fb04c3afe87e962bcb327af73e8301fa203c7" dependencies = [ - "darling 0.20.1", + "darling 0.20.3", "proc-macro2", "quote", "syn 2.0.28", @@ -2757,9 +2762,9 @@ dependencies = [ [[package]] name = "sha2-asm" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf27176fb5d15398e3a479c652c20459d9dac830dedd1fa55b42a77dbcdbfcea" +checksum = "f27ba7066011e3fb30d808b51affff34f0a66d3a03a58edd787c6e420e40e44e" dependencies = [ "cc", ] @@ -2791,9 +2796,9 @@ checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" [[package]] name = "signal-hook" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "732768f1176d21d09e076c23a93123d40bba92d50c4058da34d45c8de8e682b9" +checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801" dependencies = [ "libc", "signal-hook-registry", @@ -2841,9 +2846,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" +checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" dependencies = [ "serde", ] @@ -2870,6 +2875,16 @@ dependencies = [ "winapi", ] +[[package]] +name = "socket2" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + [[package]] name = "spin" version = "0.5.2" @@ -2949,13 +2964,13 @@ dependencies = [ [[package]] name = "tar" -version = "0.4.38" +version = "0.4.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b55807c0344e1e6c04d7c965f5289c39a8d94ae23ed5c0b57aabac549f871c6" +checksum = "b16afcea1f22891c49a00c751c7b63b2233284064f11a200fc624137c51e2ddb" dependencies = [ "filetime", "libc", - "xattr 0.2.3", + "xattr", ] [[package]] @@ -2967,7 +2982,7 @@ dependencies = [ "cfg-if", "fastrand", "redox_syscall 0.3.5", - "rustix 0.38.4", + "rustix 0.38.8", "windows-sys 0.48.0", ] @@ -2986,7 +3001,7 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e6bf6f19e9f8ed8d4048dc22981458ebcf406d67e94cd422e5ecd73d63b3237" dependencies = [ - "rustix 0.37.19", + "rustix 0.37.23", "windows-sys 0.48.0", ] @@ -3022,10 +3037,11 @@ dependencies = [ [[package]] name = "time" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3403384eaacbca9923fa06940178ac13e4edb725486d70e8e15881d0c836cc" +checksum = "b0fdd63d58b18d663fbdf70e049f00a22c8e42be082203be7f26589213cd75ea" dependencies = [ + "deranged", "itoa", "libc", "num_threads", @@ -3042,9 +3058,9 @@ checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.9" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" +checksum = "eb71511c991639bb078fd5bf97757e03914361c48100d52878b8e52b46fb92cd" dependencies = [ "time-core", ] @@ -3075,18 +3091,18 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.28.2" +version = "1.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94d7b1cfd2aa4011f2de74c2c4c63665e27a71006b0a192dcd2710272e73dfa2" +checksum = "17ed6077ed6cd6c74735e21f37eb16dc3935f96878b1fe961074089cc80893f9" dependencies = [ - "autocfg", + "backtrace", "bytes", "libc", "mio", "num_cpus", "parking_lot", "pin-project-lite", - "socket2", + "socket2 0.5.3", "tokio-macros", "windows-sys 0.48.0", ] @@ -3104,9 +3120,9 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0d409377ff5b1e3ca6437aa86c1eb7d40c134bfec254e44c830defa92669db5" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ "rustls", "tokio", @@ -3159,9 +3175,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.19.12" +version = "0.19.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c500344a19072298cd05a7224b3c0c629348b78692bf48466c5238656e315a78" +checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a" dependencies = [ "indexmap 2.0.0", "serde", @@ -3202,9 +3218,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.24" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" +checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", @@ -3280,9 +3296,9 @@ checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.9" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0" +checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" [[package]] name = "unicode-normalization" @@ -3334,9 +3350,9 @@ dependencies = [ [[package]] name = "urlencoding" -version = "2.1.2" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8db7427f936968176eaa7cdf81b7f98b980b18495ec28f1b5791ac3bfe3eea9" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" [[package]] name = "utf8parse" @@ -3377,11 +3393,10 @@ dependencies = [ [[package]] name = "want" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ - "log", "try-lock", ] @@ -3393,9 +3408,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bba0e8cb82ba49ff4e229459ff22a191bbe9a1cb3a341610c9c33efc27ddf73" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -3403,9 +3418,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b04bc93f9d6bdee709f6bd2118f57dd6679cf1176a1af464fca3ab0d66d8fb" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", "log", @@ -3418,9 +3433,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.36" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d1985d03709c53167ce907ff394f5316aa22cb4e12761295c5dc57dacb6297e" +checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ "cfg-if", "js-sys", @@ -3430,9 +3445,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14d6b024f1a526bb0234f52840389927257beb670610081360e5a03c5df9c258" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3440,9 +3455,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", @@ -3453,9 +3468,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed9d5b4305409d1fc9482fee2d7f9bcbf24b3972bf59817ef757e23982242a93" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "wasm-streams" @@ -3472,9 +3487,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.63" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bdd9ef4e984da1187bf8110c5cf5b845fbc87a23602cdf912386a76fcd3a7c2" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" dependencies = [ "js-sys", "wasm-bindgen", @@ -3536,22 +3551,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows-targets 0.48.0", -] - -[[package]] -name = "windows-sys" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", + "windows-targets 0.48.2", ] [[package]] @@ -3569,7 +3569,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.48.0", + "windows-targets 0.48.2", ] [[package]] @@ -3589,17 +3589,17 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.48.0" +version = "0.48.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +checksum = "d1eeca1c172a285ee6c2c84c341ccea837e7c01b12fbb2d0fe3c9e550ce49ec8" dependencies = [ - "windows_aarch64_gnullvm 0.48.0", - "windows_aarch64_msvc 0.48.0", - "windows_i686_gnu 0.48.0", - "windows_i686_msvc 0.48.0", - "windows_x86_64_gnu 0.48.0", - "windows_x86_64_gnullvm 0.48.0", - "windows_x86_64_msvc 0.48.0", + "windows_aarch64_gnullvm 0.48.2", + "windows_aarch64_msvc 0.48.2", + "windows_i686_gnu 0.48.2", + "windows_i686_msvc 0.48.2", + "windows_x86_64_gnu 0.48.2", + "windows_x86_64_gnullvm 0.48.2", + "windows_x86_64_msvc 0.48.2", ] [[package]] @@ -3610,9 +3610,9 @@ checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.48.0" +version = "0.48.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" +checksum = "b10d0c968ba7f6166195e13d593af609ec2e3d24f916f081690695cf5eaffb2f" [[package]] name = "windows_aarch64_msvc" @@ -3622,9 +3622,9 @@ checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" -version = "0.48.0" +version = "0.48.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" +checksum = "571d8d4e62f26d4932099a9efe89660e8bd5087775a2ab5cdd8b747b811f1058" [[package]] name = "windows_i686_gnu" @@ -3634,9 +3634,9 @@ checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" -version = "0.48.0" +version = "0.48.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" +checksum = "2229ad223e178db5fbbc8bd8d3835e51e566b8474bfca58d2e6150c48bb723cd" [[package]] name = "windows_i686_msvc" @@ -3646,9 +3646,9 @@ checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" -version = "0.48.0" +version = "0.48.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" +checksum = "600956e2d840c194eedfc5d18f8242bc2e17c7775b6684488af3a9fff6fe3287" [[package]] name = "windows_x86_64_gnu" @@ -3658,9 +3658,9 @@ checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" -version = "0.48.0" +version = "0.48.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" +checksum = "ea99ff3f8b49fb7a8e0d305e5aec485bd068c2ba691b6e277d29eaeac945868a" [[package]] name = "windows_x86_64_gnullvm" @@ -3670,9 +3670,9 @@ checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_gnullvm" -version = "0.48.0" +version = "0.48.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" +checksum = "8f1a05a1ece9a7a0d5a7ccf30ba2c33e3a61a30e042ffd247567d1de1d94120d" [[package]] name = "windows_x86_64_msvc" @@ -3682,15 +3682,15 @@ checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" -version = "0.48.0" +version = "0.48.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +checksum = "d419259aba16b663966e29e6d7c6ecfa0bb8425818bb96f6f1f3c3eb71a6e7b9" [[package]] name = "winnow" -version = "0.4.6" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61de7bac303dc551fe038e2b3cef0f571087a47571ea6e79a87692ac99b99699" +checksum = "83817bbecf72c73bad717ee86820ebf286203d2e04c3951f3cd538869c897364" dependencies = [ "memchr", ] @@ -3704,15 +3704,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "xattr" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d1526bbe5aaeb5eb06885f4d987bcdfa5e23187055de9b83fe00156a821fabc" -dependencies = [ - "libc", -] - [[package]] name = "xattr" version = "1.0.1" @@ -3757,9 +3748,9 @@ dependencies = [ [[package]] name = "zstd-safe" -version = "6.0.5+zstd.1.5.4" +version = "6.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d56d9e60b4b1758206c238a10165fbcae3ca37b01744e394c463463f6529d23b" +checksum = "ee98ffd0b48ee95e6c5168188e44a54550b1564d9d530ee21d5f0eaed1069581" dependencies = [ "libc", "zstd-sys", diff --git a/Cargo.toml b/Cargo.toml index c65c4e977..1d2555bf3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -195,7 +195,7 @@ aho-corasick = "1" # rest backend reqwest = { version = "0.11", default-features = false, features = ["json", "rustls-tls-native-roots", "stream", "blocking"] } backoff = "0.4" -url = "2.3.1" +url = "2.4.0" # rclone backend semver = "1" @@ -208,7 +208,7 @@ enum-map = "2" enum-map-derive = "0.13" rhai = { version = "1.15", features = ["sync", "serde", "no_optimize", "no_module", "no_custom_syntax", "only_i64"] } simplelog = "0.12" -comfy-table = "6.1.4" +comfy-table = "7.0.1" # cache dirs = "5" @@ -223,15 +223,15 @@ path-dedot = "3" dunce = "1" gethostname = "0.4" bytesize = "1" -itertools = "0.10" +itertools = "0.11" humantime = "2" clap_complete = "4" clap = { version = "4", features = ["derive", "env", "wrap_help"] } -once_cell = "1.17" -self_update = { version = "0.36", default-features = false, features = ["rustls", "archive-tar", "compression-flate2"] } +once_cell = "1.18" +self_update = { version = "0.37", default-features = false, features = ["rustls", "archive-tar", "compression-flate2"] } # dev dependencies -rstest = "0.17" +rstest = "0.18" quickcheck = "1" quickcheck_macros = "1" tempfile = "3.8" diff --git a/crates/rustic_core/Cargo.toml b/crates/rustic_core/Cargo.toml index 9dd71e127..58e836d38 100644 --- a/crates/rustic_core/Cargo.toml +++ b/crates/rustic_core/Cargo.toml @@ -132,6 +132,7 @@ rstest = { workspace = true } rustdoc-json = "0.8.7" rustup-toolchain = "0.1.4" simplelog = { workspace = true } +tempfile = { workspace = true } [profile.dev] opt-level = 0 diff --git a/crates/rustic_core/examples/backup.rs b/crates/rustic_core/examples/backup.rs index 0755faa29..7689cfa3e 100644 --- a/crates/rustic_core/examples/backup.rs +++ b/crates/rustic_core/examples/backup.rs @@ -1,5 +1,5 @@ //! `backup` example -use rustic_core::{BackupOpts, PathList, Repository, RepositoryOptions, SnapshotFile}; +use rustic_core::{BackupOptions, PathList, Repository, RepositoryOptions, SnapshotOptions}; use simplelog::{Config, LevelFilter, SimpleLogger}; use std::error::Error; @@ -13,11 +13,14 @@ fn main() -> Result<(), Box> { .password("test"); let repo = Repository::new(&repo_opts)?.open()?.to_indexed_ids()?; - let backup_opts = BackupOpts::default(); - let source = PathList::from_string(".", true)?; // true: sanitize the given string - let dry_run = false; + let backup_opts = BackupOptions::default(); + let source = PathList::from_string(".")?.sanitize()?; + let snap = SnapshotOptions::default() + .add_tags("tag1,tag2")? + .to_snapshot()?; - let snap = repo.backup(&backup_opts, source, SnapshotFile::default(), dry_run)?; + // Create snapshot + let snap = repo.backup(&backup_opts, source, snap)?; println!("successfully created snapshot:\n{snap:#?}"); Ok(()) diff --git a/crates/rustic_core/examples/check.rs b/crates/rustic_core/examples/check.rs index 6ef7faa78..2921f7a54 100644 --- a/crates/rustic_core/examples/check.rs +++ b/crates/rustic_core/examples/check.rs @@ -1,5 +1,5 @@ //! `check` example -use rustic_core::{CheckOpts, Repository, RepositoryOptions}; +use rustic_core::{CheckOptions, Repository, RepositoryOptions}; use simplelog::{Config, LevelFilter, SimpleLogger}; use std::error::Error; @@ -13,8 +13,8 @@ fn main() -> Result<(), Box> { .password("test"); let repo = Repository::new(&repo_opts)?.open()?; - // Check respository with standard options - let opts = CheckOpts::default(); + // Check respository with standard options but omitting cache checks + let opts = CheckOptions::default().trust_cache(true); repo.check(opts)?; Ok(()) } diff --git a/crates/rustic_core/examples/config.rs b/crates/rustic_core/examples/config.rs index 6bd9b424c..f40125bbb 100644 --- a/crates/rustic_core/examples/config.rs +++ b/crates/rustic_core/examples/config.rs @@ -1,5 +1,5 @@ //! `config` example -use rustic_core::{ConfigOpts, Repository, RepositoryOptions}; +use rustic_core::{max_compression_level, ConfigOptions, Repository, RepositoryOptions}; use simplelog::{Config, LevelFilter, SimpleLogger}; use std::error::Error; @@ -14,10 +14,7 @@ fn main() -> Result<(), Box> { let repo = Repository::new(&repo_opts)?.open()?; // Set Config, e.g. Compression level - let config_opts = ConfigOpts { - set_compression: Some(22), - ..Default::default() - }; + let config_opts = ConfigOptions::default().set_compression(max_compression_level()); repo.apply_config(&config_opts)?; Ok(()) } diff --git a/crates/rustic_core/examples/forget.rs b/crates/rustic_core/examples/forget.rs index 51f9967fd..aa8fec406 100644 --- a/crates/rustic_core/examples/forget.rs +++ b/crates/rustic_core/examples/forget.rs @@ -15,9 +15,7 @@ fn main() -> Result<(), Box> { // Check respository with standard options let group_by = SnapshotGroupCriterion::default(); - let mut keep = KeepOptions::default(); - keep.keep_daily = 5; - keep.keep_weekly = 10; + let keep = KeepOptions::default().keep_daily(5).keep_weekly(10); let snaps = repo.get_forget_snapshots(&keep, group_by, |_| true)?; println!("{snaps:?}"); // to remove the snapshots-to-forget, uncomment this line: diff --git a/crates/rustic_core/examples/init.rs b/crates/rustic_core/examples/init.rs index 1ead46176..0b04632e6 100644 --- a/crates/rustic_core/examples/init.rs +++ b/crates/rustic_core/examples/init.rs @@ -1,5 +1,5 @@ //! `init` example -use rustic_core::{ConfigOpts, KeyOpts, Repository, RepositoryOptions}; +use rustic_core::{ConfigOptions, KeyOptions, Repository, RepositoryOptions}; use simplelog::{Config, LevelFilter, SimpleLogger}; use std::error::Error; @@ -11,8 +11,8 @@ fn main() -> Result<(), Box> { let repo_opts = RepositoryOptions::default() .repository("/tmp/repo") .password("test"); - let key_opts = KeyOpts::default(); - let config_opts = ConfigOpts::default(); + let key_opts = KeyOptions::default(); + let config_opts = ConfigOptions::default(); let _repo = Repository::new(&repo_opts)?.init(&key_opts, &config_opts)?; // -> use _repo for any operation on an open repository diff --git a/crates/rustic_core/examples/key.rs b/crates/rustic_core/examples/key.rs index 345f59c50..4ef11f74b 100644 --- a/crates/rustic_core/examples/key.rs +++ b/crates/rustic_core/examples/key.rs @@ -1,5 +1,5 @@ //! `key` example -use rustic_core::{KeyOpts, Repository, RepositoryOptions}; +use rustic_core::{KeyOptions, Repository, RepositoryOptions}; use simplelog::{Config, LevelFilter, SimpleLogger}; use std::error::Error; @@ -14,7 +14,7 @@ fn main() -> Result<(), Box> { let repo = Repository::new(&repo_opts)?.open()?; // Add a new key with the given password - let key_opts = KeyOpts::default(); + let key_opts = KeyOptions::default(); repo.add_key("new_password", &key_opts)?; Ok(()) } diff --git a/crates/rustic_core/examples/ls.rs b/crates/rustic_core/examples/ls.rs index 414dd9262..ab156181c 100644 --- a/crates/rustic_core/examples/ls.rs +++ b/crates/rustic_core/examples/ls.rs @@ -1,5 +1,5 @@ //! `ls` example -use rustic_core::{Repository, RepositoryOptions, TreeStreamerOptions}; +use rustic_core::{LsOptions, Repository, RepositoryOptions}; use simplelog::{Config, LevelFilter, SimpleLogger}; use std::error::Error; @@ -17,9 +17,8 @@ fn main() -> Result<(), Box> { let node = repo.node_from_snapshot_path("latest", |_| true)?; // recursively list the snapshot contents using no additional filtering - let recursive = true; - let streamer_opts = TreeStreamerOptions::default(); - for item in repo.ls(&node, &streamer_opts, recursive)? { + let ls_opts = LsOptions::default(); + for item in repo.ls(&node, &ls_opts)? { let (path, _) = item?; println!("{path:?} "); } diff --git a/crates/rustic_core/examples/merge.rs b/crates/rustic_core/examples/merge.rs index 40321fe90..13dd44437 100644 --- a/crates/rustic_core/examples/merge.rs +++ b/crates/rustic_core/examples/merge.rs @@ -1,5 +1,5 @@ //! `merge` example -use rustic_core::{latest_node, Repository, RepositoryOptions, SnapshotFile}; +use rustic_core::{last_modified_node, repofile::SnapshotFile, Repository, RepositoryOptions}; use simplelog::{Config, LevelFilter, SimpleLogger}; use std::error::Error; @@ -16,7 +16,7 @@ fn main() -> Result<(), Box> { // Merge all snapshots using the latest entry for duplicate entries let snaps = repo.get_all_snapshots()?; // This creates a new snapshot without removing the used ones - let snap = repo.merge_snapshots(&snaps, &latest_node, SnapshotFile::default())?; + let snap = repo.merge_snapshots(&snaps, &last_modified_node, SnapshotFile::default())?; println!("successfully created snapshot:\n{snap:#?}"); Ok(()) diff --git a/crates/rustic_core/examples/prune.rs b/crates/rustic_core/examples/prune.rs index 6b62403e5..e6d0c4970 100644 --- a/crates/rustic_core/examples/prune.rs +++ b/crates/rustic_core/examples/prune.rs @@ -1,5 +1,5 @@ //! `prune` example -use rustic_core::{PruneOpts, Repository, RepositoryOptions}; +use rustic_core::{PruneOptions, Repository, RepositoryOptions}; use simplelog::{Config, LevelFilter, SimpleLogger}; use std::error::Error; @@ -13,7 +13,7 @@ fn main() -> Result<(), Box> { .password("test"); let repo = Repository::new(&repo_opts)?.open()?; - let prune_opts = PruneOpts::default(); + let prune_opts = PruneOptions::default(); let prune_plan = repo.prune_plan(&prune_opts)?; println!("{:?}", prune_plan.stats); println!("to repack: {:?}", prune_plan.repack_packs()); diff --git a/crates/rustic_core/examples/restore.rs b/crates/rustic_core/examples/restore.rs index 38a5be92f..085e5d3e4 100644 --- a/crates/rustic_core/examples/restore.rs +++ b/crates/rustic_core/examples/restore.rs @@ -1,7 +1,5 @@ //! `restore` example -use rustic_core::{ - LocalDestination, Repository, RepositoryOptions, RestoreOpts, TreeStreamerOptions, -}; +use rustic_core::{LocalDestination, LsOptions, Repository, RepositoryOptions, RestoreOptions}; use simplelog::{Config, LevelFilter, SimpleLogger}; use std::error::Error; @@ -19,15 +17,14 @@ fn main() -> Result<(), Box> { let node = repo.node_from_snapshot_path("latest", |_| true)?; // use list of the snapshot contents using no additional filtering - let recursive = true; - let streamer_opts = TreeStreamerOptions::default(); - let ls = repo.ls(&node, &streamer_opts, recursive)?; + let streamer_opts = LsOptions::default(); + let ls = repo.ls(&node, &streamer_opts)?; let destination = "./restore/"; // restore to this destination dir let create = true; // create destination dir, if it doesn't exist let dest = LocalDestination::new(destination, create, !node.is_dir())?; - let opts = RestoreOpts::default(); + let opts = RestoreOptions::default(); let dry_run = false; // create restore infos. Note: this also already creates needed dirs in the destination let restore_infos = repo.prepare_restore(&opts, ls.clone(), &dest, dry_run)?; diff --git a/crates/rustic_core/src/archiver.rs b/crates/rustic_core/src/archiver.rs index a56332788..e85a57156 100644 --- a/crates/rustic_core/src/archiver.rs +++ b/crates/rustic_core/src/archiver.rs @@ -20,17 +20,51 @@ use crate::{ repofile::{configfile::ConfigFile, snapshotfile::SnapshotFile}, Progress, RusticResult, }; + +/// The `Archiver` is responsible for archiving files and trees. +/// It will read the file, chunk it, and write the chunks to the backend. +/// +/// # Type Parameters +/// +/// * `BE` - The backend type. +/// * `I` - The index to read from. #[allow(missing_debug_implementations)] pub struct Archiver { + /// The `FileArchiver` is responsible for archiving files. file_archiver: FileArchiver, + + /// The `TreeArchiver` is responsible for archiving trees. tree_archiver: TreeArchiver, + + /// The parent snapshot to use. parent: Parent, + + /// The SharedIndexer is used to index the data. indexer: SharedIndexer, + + /// The backend to write to. be: BE, + + /// The SnapshotFile to write to. snap: SnapshotFile, } impl Archiver { + /// Creates a new `Archiver`. + /// + /// # Arguments + /// + /// * `be` - The backend to write to. + /// * `index` - The index to read from. + /// * `config` - The config file. + /// * `parent` - The parent snapshot to use. + /// * `snap` - The `SnapshotFile` to write to. + /// + /// # Errors + /// + /// * [`PackerErrorKind::ZstdError`] - If the zstd compression level is invalid. + /// * [`PackerErrorKind::SendingCrossbeamMessageFailed`] - If sending the message to the raw packer fails. + /// * [`PackerErrorKind::IntConversionFailed`] - If converting the data length to u64 fails pub fn new( be: BE, index: I, @@ -54,6 +88,27 @@ impl Archiver { }) } + /// Archives the given source. + /// + /// This will archive all files and trees in the given source. + /// + /// # Type Parameters + /// + /// * `R` - The type of the source. + /// + /// # Arguments + /// + /// * `index` - The index to read from. + /// * `src` - The source to archive. + /// * `backup_path` - The path to the backup. + /// * `as_path` - The path to archive the backup as. + /// * `p` - The progress bar. + /// + /// # Errors + /// + /// * [`PackerErrorKind::SendingCrossbeamMessageFailed`] - If sending the message to the raw packer fails. + /// * [`CryptBackendErrorKind::SerializingToJsonByteVectorFailed`] - If the index file could not be serialized. + /// * [`SnapshotFileErrorKind::OutOfRange`] - If the time is not in the range of `Local::now()` pub fn archive( mut self, index: &I, diff --git a/crates/rustic_core/src/archiver/file_archiver.rs b/crates/rustic_core/src/archiver/file_archiver.rs index b30622184..71e3e075d 100644 --- a/crates/rustic_core/src/archiver/file_archiver.rs +++ b/crates/rustic_core/src/archiver/file_archiver.rs @@ -19,11 +19,19 @@ use crate::{ chunker::ChunkIter, crypto::hasher::hash, error::ArchiverErrorKind, + error::RusticResult, index::{indexer::SharedIndexer, IndexedBackend}, + progress::Progress, repofile::configfile::ConfigFile, - Progress, RusticResult, }; +/// The `FileArchiver` is responsible for archiving files. +/// It will read the file, chunk it, and write the chunks to the backend. +/// +/// # Type Parameters +/// +/// * `BE` - The backend type. +/// * `I` - The index to read from. #[derive(Clone)] pub(crate) struct FileArchiver { index: I, @@ -32,6 +40,25 @@ pub(crate) struct FileArchiver { } impl FileArchiver { + /// Creates a new `FileArchiver`. + /// + /// # Type Parameters + /// + /// * `BE` - The backend type. + /// * `I` - The index to read from. + /// + /// # Arguments + /// + /// * `be` - The backend to write to. + /// * `index` - The index to read from. + /// * `indexer` - The indexer to write to. + /// * `config` - The config file. + /// + /// # Errors + /// + /// * [`PackerErrorKind::ZstdError`] - If the zstd compression level is invalid. + /// * [`PackerErrorKind::SendingCrossbeamMessageFailed`] - If sending the message to the raw packer fails. + /// * [`PackerErrorKind::IntConversionFailed`] - If converting the data length to u64 fails pub(crate) fn new( be: BE, index: I, @@ -55,6 +82,24 @@ impl FileArchiver { }) } + /// Processes the given item. + /// + /// # Type Parameters + /// + /// * `O` - The type of the tree item. + /// + /// # Arguments + /// + /// * `item` - The item to process. + /// * `p` - The progress tracker. + /// + /// # Errors + /// + /// If the item could not be processed. + /// + /// # Returns + /// + /// The processed item. pub(crate) fn process( &self, item: ItemWithParent>, @@ -114,6 +159,15 @@ impl FileArchiver { Ok((node, filesize)) } + /// Finalizes the archiver. + /// + /// # Returns + /// + /// The statistics of the archiver. + /// + /// # Panics + /// + /// If the channel could not be dropped pub(crate) fn finalize(self) -> RusticResult { self.data_packer.finalize() } diff --git a/crates/rustic_core/src/archiver/parent.rs b/crates/rustic_core/src/archiver/parent.rs index bcf6e0f71..482df32eb 100644 --- a/crates/rustic_core/src/archiver/parent.rs +++ b/crates/rustic_core/src/archiver/parent.rs @@ -7,27 +7,62 @@ use log::warn; use crate::{ archiver::tree::TreeType, backend::node::Node, blob::tree::Tree, error::ArchiverErrorKind, - id::Id, index::IndexedBackend, RusticResult, + error::RusticResult, id::Id, index::IndexedBackend, }; +/// The `ItemWithParent` is a `TreeType` wrapping the result of a parent search and a type `O`. +/// +/// # Type Parameters +/// +/// * `O` - The type of the `TreeType`. +pub(crate) type ItemWithParent = TreeType<(O, ParentResult<()>), ParentResult>; + +/// The `Parent` is responsible for finding the parent tree of a given tree. #[derive(Debug)] pub struct Parent { + /// The tree id of the parent tree. tree_id: Option, + /// The parent tree. tree: Option, + /// The current node index. node_idx: usize, + /// The stack of parent trees. stack: Vec<(Option, usize)>, + /// Ignore ctime when comparing nodes. ignore_ctime: bool, + /// Ignore inode number when comparing nodes. ignore_inode: bool, } +/// The result of a parent search. +/// +/// # Type Parameters +/// +/// * `T` - The type of the matched parent. #[derive(Clone, Debug)] pub(crate) enum ParentResult { + /// The parent was found and matches. Matched(T), + /// The parent was not found. NotFound, + /// The parent was found but doesn't match. NotMatched, } impl ParentResult { + /// Maps a `ParentResult` to a `ParentResult` by applying a function to a contained value. + /// + /// # Type Parameters + /// + /// * `R` - The type of the returned `ParentResult`. + /// + /// # Arguments + /// + /// * `f` - The function to apply. + /// + /// # Returns + /// + /// A `ParentResult` with the result of the function for each `ParentResult`. fn map(self, f: impl FnOnce(T) -> R) -> ParentResult { match self { Self::Matched(t) => ParentResult::Matched(f(t)), @@ -37,9 +72,19 @@ impl ParentResult { } } -pub(crate) type ItemWithParent = TreeType<(O, ParentResult<()>), ParentResult>; - impl Parent { + /// Creates a new `Parent`. + /// + /// # Type Parameters + /// + /// * `BE` - The type of the backend. + /// + /// # Arguments + /// + /// * `be` - The backend to read from. + /// * `tree_id` - The tree id of the parent tree. + /// * `ignore_ctime` - Ignore ctime when comparing nodes. + /// * `ignore_inode` - Ignore inode number when comparing nodes. pub(crate) fn new( be: &BE, tree_id: Option, @@ -64,6 +109,15 @@ impl Parent { } } + /// Returns the parent node with the given name. + /// + /// # Arguments + /// + /// * `name` - The name of the parent node. + /// + /// # Returns + /// + /// The parent node with the given name, or `None` if the parent node is not found. fn p_node(&mut self, name: &OsStr) -> Option<&Node> { match &self.tree { None => None, @@ -87,6 +141,20 @@ impl Parent { } } + /// Returns whether the given node is the parent of the given tree. + /// + /// # Arguments + /// + /// * `node` - The node to check. + /// * `name` - The name of the tree. + /// + /// # Returns + /// + /// Whether the given node is the parent of the given tree. + /// + /// # Note + /// + /// TODO: This function does not check whether the given node is a directory. fn is_parent(&mut self, node: &Node, name: &OsStr) -> ParentResult<&Node> { // use new variables as the mutable borrow is used later let ignore_ctime = self.ignore_ctime; @@ -106,6 +174,16 @@ impl Parent { }) } + // TODO: add documentation! + /// + /// # Type Parameters + /// + /// * `BE` - The type of the backend. + /// + /// # Arguments + /// + /// * `be` - The backend to read from. + /// * `name` - The name of the parent node. fn set_dir(&mut self, be: &BE, name: &OsStr) { let tree = self.p_node(name).and_then(|p_node| { p_node.subtree.map_or_else( @@ -127,6 +205,11 @@ impl Parent { self.node_idx = 0; } + // TODO: add documentation! + /// + /// # Errors + /// + /// * [`ArchiverErrorKind::TreeStackEmpty`] - If the tree stack is empty. fn finish_dir(&mut self) -> RusticResult<()> { let (tree, node_idx) = self .stack @@ -139,10 +222,26 @@ impl Parent { Ok(()) } + // TODO: add documentation! pub(crate) fn tree_id(&self) -> Option { self.tree_id } + // TODO: add documentation! + /// + /// # Type Parameters + /// + /// * `BE` - The type of the backend. + /// * `O` - The type of the tree item. + /// + /// # Arguments + /// + /// * `be` - The backend to read from. + /// * `item` - The item to process. + /// + /// # Errors + /// + /// * [`ArchiverErrorKind::TreeStackEmpty`] - If the tree stack is empty. pub(crate) fn process( &mut self, be: &BE, diff --git a/crates/rustic_core/src/archiver/tree.rs b/crates/rustic_core/src/archiver/tree.rs index 10060723f..84f87219b 100644 --- a/crates/rustic_core/src/archiver/tree.rs +++ b/crates/rustic_core/src/archiver/tree.rs @@ -9,10 +9,17 @@ use crate::{ /// Iterator which ensures that all subdirectories are visited and closed. /// The resulting Iterator yielss a `TreeType` which either contains the original /// item, a new tree to be inserted or a pseudo item which indicates that a tree is finished. - +/// +/// # Type Parameters +/// +/// * `T` - The type of the current item. +/// * `I` - The type of the original Iterator. pub(crate) struct TreeIterator { + /// The original Iterator. iter: I, + /// The current path. path: PathBuf, + /// The current item. item: Option, } @@ -30,10 +37,22 @@ where } } +/// `TreeType` is the type returned by the `TreeIterator`. +/// +/// It either contains the original item, a new tree to be inserted +/// or a pseudo item which indicates that a tree is finished. +/// +/// # Type Parameters +/// +/// * `T` - The type of the original item. +/// * `U` - The type of the new tree. #[derive(Debug)] pub(crate) enum TreeType { + /// New tree to be inserted. NewTree((PathBuf, Node, U)), + /// A pseudo item which indicates that a tree is finished. EndTree, + /// Original item. Other((PathBuf, Node, T)), } diff --git a/crates/rustic_core/src/archiver/tree_archiver.rs b/crates/rustic_core/src/archiver/tree_archiver.rs index 08ec88835..e302d06c4 100644 --- a/crates/rustic_core/src/archiver/tree_archiver.rs +++ b/crates/rustic_core/src/archiver/tree_archiver.rs @@ -8,23 +8,56 @@ use crate::{ backend::{decrypt::DecryptWriteBackend, node::Node}, blob::{packer::Packer, tree::Tree, BlobType}, error::ArchiverErrorKind, + error::RusticResult, id::Id, index::{indexer::SharedIndexer, IndexedBackend}, repofile::{configfile::ConfigFile, snapshotfile::SnapshotSummary}, - RusticResult, }; +pub(crate) type TreeItem = TreeType<(ParentResult<()>, u64), ParentResult>; + +/// The `TreeArchiver` is responsible for archiving trees. +/// +/// # Type Parameters +/// +/// * `BE` - The backend type. +/// * `I` - The index to read from. +/// +// TODO: Add documentation pub(crate) struct TreeArchiver { + /// The current tree. tree: Tree, + /// The stack of trees. stack: Vec<(PathBuf, Node, ParentResult, Tree)>, + /// The index to read from. index: I, + /// The packer to write to. tree_packer: Packer, + /// The summary of the snapshot. summary: SnapshotSummary, } -pub(crate) type TreeItem = TreeType<(ParentResult<()>, u64), ParentResult>; - impl TreeArchiver { + /// Creates a new `TreeArchiver`. + /// + /// # Type Parameters + /// + /// * `BE` - The backend type. + /// * `I` - The index to read from. + /// + /// # Arguments + /// + /// * `be` - The backend to write to. + /// * `index` - The index to read from. + /// * `indexer` - The indexer to write to. + /// * `config` - The config file. + /// * `summary` - The summary of the snapshot. + /// + /// # Errors + /// + /// * [`PackerErrorKind::ZstdError`] - If the zstd compression level is invalid. + /// * [`PackerErrorKind::SendingCrossbeamMessageFailed`] - If sending the message to the raw packer fails. + /// * [`PackerErrorKind::IntConversionFailed`] - If converting the data length to u64 fails pub(crate) fn new( be: BE, index: I, @@ -48,6 +81,16 @@ impl TreeArchiver { }) } + /// Adds the given item to the tree. + /// + /// # Arguments + /// + /// * `item` - The item to add. + /// + /// # Errors + /// + /// * [`ArchiverErrorKind::TreeStackEmpty`] - If the tree stack is empty. + // TODO: Add more errors! pub(crate) fn add(&mut self, item: TreeItem) -> RusticResult<()> { match item { TreeType::NewTree((path, node, parent)) => { @@ -78,6 +121,13 @@ impl TreeArchiver { Ok(()) } + /// Adds the given file to the tree. + /// + /// # Arguments + /// + /// * `path` - The path of the file. + /// * `node` - The node of the file. + /// * `parent` - The parent result of the file. fn add_file(&mut self, path: &Path, node: Node, parent: &ParentResult<()>, size: u64) { let filename = path.join(node.name()); match parent { @@ -99,6 +149,20 @@ impl TreeArchiver { self.tree.add(node); } + /// Backups the current tree. + /// + /// # Arguments + /// + /// * `path` - The path of the tree. + /// * `parent` - The parent result of the tree. + /// + /// # Errors + /// + /// * [`PackerErrorKind::SendingCrossbeamMessageFailed`] - If sending the message to the raw packer fails. + /// + /// # Returns + /// + /// The id of the tree. fn backup_tree(&mut self, path: &Path, parent: &ParentResult) -> RusticResult { let (chunk, id) = self.tree.serialize()?; let dirsize = chunk.len() as u64; @@ -129,6 +193,23 @@ impl TreeArchiver { Ok(id) } + /// Finalizes the tree archiver. + /// + /// # Arguments + /// + /// * `parent_tree` - The parent tree. + /// + /// # Errors + /// + /// * [`PackerErrorKind::SendingCrossbeamMessageFailed`] - If sending the message to the raw packer fails. + /// + /// # Returns + /// + /// A tuple containing the id of the tree and the summary of the snapshot. + /// + /// # Panics + /// + /// If the channel of the tree packer is not dropped. pub(crate) fn finalize( mut self, parent_tree: Option, diff --git a/crates/rustic_core/src/backend.rs b/crates/rustic_core/src/backend.rs index 3a3a5d9da..cb76f56d5 100644 --- a/crates/rustic_core/src/backend.rs +++ b/crates/rustic_core/src/backend.rs @@ -13,11 +13,10 @@ pub(crate) mod stdin; use std::{io::Read, path::PathBuf}; use bytes::Bytes; -use displaydoc::Display; use log::trace; use serde::{Deserialize, Serialize}; -use crate::{backend::node::Node, error::BackendErrorKind, id::Id, RusticResult}; +use crate::{backend::node::Node, error::BackendErrorKind, error::RusticResult, id::Id}; /// All [`FileType`]s which are located in separated directories pub const ALL_FILE_TYPES: [FileType; 4] = [ @@ -28,38 +27,37 @@ pub const ALL_FILE_TYPES: [FileType; 4] = [ ]; /// Type for describing the kind of a file that can occur. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Display, Serialize, Deserialize)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] pub enum FileType { - /// config + /// Config file #[serde(rename = "config")] Config, - /// index + /// Index #[serde(rename = "index")] Index, - /// keys + /// Keys #[serde(rename = "key")] Key, - /// snapshots + /// Snapshots #[serde(rename = "snapshot")] Snapshot, - /// data + /// Data #[serde(rename = "pack")] Pack, } -impl From for &'static str { - fn from(value: FileType) -> &'static str { - match value { - FileType::Config => "config", - FileType::Snapshot => "snapshots", - FileType::Index => "index", - FileType::Key => "keys", - FileType::Pack => "data", +impl FileType { + const fn dirname(self) -> &'static str { + match self { + Self::Config => "config", + Self::Snapshot => "snapshots", + Self::Index => "index", + Self::Key => "keys", + Self::Pack => "data", } } -} -impl FileType { + /// Returns if the file type is cacheable. const fn is_cacheable(self) -> bool { match self { Self::Config | Self::Key | Self::Pack => false, @@ -68,13 +66,45 @@ impl FileType { } } +/// Trait for backends that can read. +/// +/// This trait is implemented by all backends that can read data. pub trait ReadBackend: Clone + Send + Sync + 'static { + /// Returns the location of the backend. fn location(&self) -> String; + /// Sets an option of the backend. + /// + /// # Arguments + /// + /// * `option` - The option to set. + /// * `value` - The value to set the option to. + /// + /// # Errors + /// + /// If the option is not supported. fn set_option(&mut self, option: &str, value: &str) -> RusticResult<()>; + /// Lists all files with their size of the given type. + /// + /// # Arguments + /// + /// * `tpe` - The type of the files to list. + /// + /// # Errors + /// + /// If the files could not be listed. fn list_with_size(&self, tpe: FileType) -> RusticResult>; + /// Lists all files of the given type. + /// + /// # Arguments + /// + /// * `tpe` - The type of the files to list. + /// + /// # Errors + /// + /// If the files could not be listed. fn list(&self, tpe: FileType) -> RusticResult> { Ok(self .list_with_size(tpe)? @@ -83,8 +113,31 @@ pub trait ReadBackend: Clone + Send + Sync + 'static { .collect()) } + /// Reads full data of the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// + /// # Errors + /// + /// If the file could not be read. fn read_full(&self, tpe: FileType, id: &Id) -> RusticResult; + /// Reads partial data of the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// * `cacheable` - Whether the file should be cached. + /// * `offset` - The offset to read from. + /// * `length` - The length to read. + /// + /// # Errors + /// + /// If the file could not be read. fn read_partial( &self, tpe: FileType, @@ -94,7 +147,27 @@ pub trait ReadBackend: Clone + Send + Sync + 'static { length: u32, ) -> RusticResult; - fn find_starts_with(&self, tpe: FileType, vec: &[String]) -> RusticResult> { + /// Finds the id of the file starting with the given string. + /// + /// # Type Parameters + /// + /// * `T` - The type of the strings. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `vec` - The strings to search for. + /// + /// # Errors + /// + /// * [`BackendErrorKind::NoSuitableIdFound`] - If no id could be found. + /// * [`BackendErrorKind::IdNotUnique`] - If the id is not unique. + /// + /// # Note + /// + /// This function is used to find the id of a snapshot or index file. + /// The id of a snapshot or index file is the id of the first pack file. + fn find_starts_with>(&self, tpe: FileType, vec: &[T]) -> RusticResult> { #[derive(Clone, Copy, PartialEq, Eq)] enum MapResult { None, @@ -105,7 +178,7 @@ pub trait ReadBackend: Clone + Send + Sync + 'static { for id in self.list(tpe)? { let id_hex = id.to_hex(); for (i, v) in vec.iter().enumerate() { - if id_hex.starts_with(v) { + if id_hex.starts_with(v.as_ref()) { if results[i] == MapResult::None { results[i] = MapResult::Some(id); } else { @@ -121,20 +194,50 @@ pub trait ReadBackend: Clone + Send + Sync + 'static { .map(|(i, id)| match id { MapResult::Some(id) => Ok(id), MapResult::None => { - Err(BackendErrorKind::NoSuitableIdFound((vec[i]).clone()).into()) + Err(BackendErrorKind::NoSuitableIdFound((vec[i]).as_ref().to_string()).into()) + } + MapResult::NonUnique => { + Err(BackendErrorKind::IdNotUnique((vec[i]).as_ref().to_string()).into()) } - MapResult::NonUnique => Err(BackendErrorKind::IdNotUnique((vec[i]).clone()).into()), }) .collect() } + /// Finds the id of the file starting with the given string. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The string to search for. + /// + /// # Errors + /// + /// * [`IdErrorKind::HexError`] - If the string is not a valid hexadecimal string + /// * [`BackendErrorKind::NoSuitableIdFound`] - If no id could be found. + /// * [`BackendErrorKind::IdNotUnique`] - If the id is not unique. fn find_id(&self, tpe: FileType, id: &str) -> RusticResult { Ok(self.find_ids(tpe, &[id.to_string()])?.remove(0)) } - fn find_ids(&self, tpe: FileType, ids: &[String]) -> RusticResult> { + /// Finds the ids of the files starting with the given strings. + /// + /// # Type Parameters + /// + /// * `T` - The type of the strings. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `ids` - The strings to search for. + /// + /// # Errors + /// + /// * [`IdErrorKind::HexError`] - If the string is not a valid hexadecimal string + /// * [`BackendErrorKind::NoSuitableIdFound`] - If no id could be found. + /// * [`BackendErrorKind::IdNotUnique`] - If the id is not unique. + fn find_ids>(&self, tpe: FileType, ids: &[T]) -> RusticResult> { ids.iter() - .map(|id| Id::from_hex(id)) + .map(|id| Id::from_hex(id.as_ref())) .collect::>>() .or_else(|err|{ trace!("no valid IDs given: {err}, searching for ID starting with given strings instead"); @@ -142,37 +245,110 @@ pub trait ReadBackend: Clone + Send + Sync + 'static { } } +/// Trait for backends that can write. +/// This trait is implemented by all backends that can write data. pub trait WriteBackend: ReadBackend { + /// Creates a new backend. fn create(&self) -> RusticResult<()>; + /// Writes bytes to the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// * `cacheable` - Whether the data should be cached. + /// * `buf` - The data to write. fn write_bytes(&self, tpe: FileType, id: &Id, cacheable: bool, buf: Bytes) -> RusticResult<()>; + /// Removes the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// * `cacheable` - Whether the file is cacheable. fn remove(&self, tpe: FileType, id: &Id, cacheable: bool) -> RusticResult<()>; } +/// Information about an entry to be able to open it. +/// +/// # Type Parameters +/// +/// * `O` - The type of the open information. #[derive(Debug, Clone)] pub struct ReadSourceEntry { + /// The path of the entry. pub path: PathBuf, + + /// The node information of the entry. pub node: Node, + + /// Information about how to open the entry. pub open: Option, } +/// Trait for backends that can read and open sources. +/// This trait is implemented by all backends that can read data and open from a source. pub trait ReadSourceOpen { type Reader: Read + Send + 'static; + /// Opens the source. fn open(self) -> RusticResult; } +/// Trait for backends that can read from a source. +/// +/// This trait is implemented by all backends that can read data from a source. pub trait ReadSource { type Open: ReadSourceOpen; type Iter: Iterator>>; + /// Returns the size of the source. fn size(&self) -> RusticResult>; + + /// Returns an iterator over the entries of the source. fn entries(self) -> Self::Iter; } +/// Trait for backends that can write to a source. +/// +/// This trait is implemented by all backends that can write data to a source. pub trait WriteSource: Clone { + /// Create a new source. + /// + /// # Type Parameters + /// + /// * `P` - The type of the path. + /// + /// # Arguments + /// + /// * `path` - The path of the source. + /// * `node` - The node information of the source. fn create>(&self, path: P, node: Node); + + /// Set the metadata of a source. + /// + /// # Type Parameters + /// + /// * `P` - The type of the path. + /// + /// # Arguments + /// + /// * `path` - The path of the source. + /// * `node` - The node information of the source. fn set_metadata>(&self, path: P, node: Node); + + /// Write data to a source at the given offset. + /// + /// # Type Parameters + /// + /// * `P` - The type of the path. + /// + /// # Arguments + /// + /// * `path` - The path of the source. + /// * `offset` - The offset to write at. + /// * `data` - The data to write. fn write_at>(&self, path: P, offset: u64, data: Bytes); } diff --git a/crates/rustic_core/src/backend/cache.rs b/crates/rustic_core/src/backend/cache.rs index 384a72b30..aa1151e53 100644 --- a/crates/rustic_core/src/backend/cache.rs +++ b/crates/rustic_core/src/backend/cache.rs @@ -13,31 +13,66 @@ use walkdir::WalkDir; use crate::{ backend::{FileType, ReadBackend, WriteBackend}, error::CacheBackendErrorKind, + error::RusticResult, id::Id, - RusticResult, }; +/// Backend that caches data. +/// +/// This backend caches data in a directory. +/// It can be used to cache data from a remote backend. +/// +/// # Type Parameters +/// +/// * `BE` - The backend to cache. #[derive(Clone, Debug)] pub struct CachedBackend { + /// The backend to cache. be: BE, + /// The cache. cache: Option, } impl CachedBackend { + /// Create a new [`CachedBackend`] from a given backend. + /// + /// # Type Parameters + /// + /// * `BE` - The backend to cache. pub fn new(be: BE, cache: Option) -> Self { Self { be, cache } } } impl ReadBackend for CachedBackend { + /// Returns the location of the backend as a String. fn location(&self) -> String { self.be.location() } + /// Sets an option of the backend. + /// + /// # Arguments + /// + /// * `option` - The option to set. + /// * `value` - The value to set the option to. fn set_option(&mut self, option: &str, value: &str) -> RusticResult<()> { self.be.set_option(option, value) } + /// Lists all files with their size of the given type. + /// + /// # Arguments + /// + /// * `tpe` - The type of the files to list. + /// + /// # Errors + /// + /// If the backend does not support listing files. + /// + /// # Returns + /// + /// A vector of tuples containing the id and size of the files. fn list_with_size(&self, tpe: FileType) -> RusticResult> { let list = self.be.list_with_size(tpe)?; @@ -50,6 +85,20 @@ impl ReadBackend for CachedBackend { Ok(list) } + /// Reads full data of the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// + /// # Errors + /// + /// * [`CacheBackendErrorKind::FromIoError`] - If the file could not be read. + /// + /// # Returns + /// + /// The data read. fn read_full(&self, tpe: FileType, id: &Id) -> RusticResult { match (&self.cache, tpe.is_cacheable()) { (None, _) | (Some(_), false) => self.be.read_full(tpe, id), @@ -67,6 +116,23 @@ impl ReadBackend for CachedBackend { } } + /// Reads partial data of the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// * `cacheable` - Whether the file is cacheable. + /// * `offset` - The offset to read from. + /// * `length` - The length to read. + /// + /// # Errors + /// + /// * [`CacheBackendErrorKind::FromIoError`] - If the file could not be read. + /// + /// # Returns + /// + /// The data read. fn read_partial( &self, tpe: FileType, @@ -105,10 +171,21 @@ impl ReadBackend for CachedBackend { } impl WriteBackend for CachedBackend { + /// Creates the backend. fn create(&self) -> RusticResult<()> { self.be.create() } + /// Writes the given data to the given file. + /// + /// If the file is cacheable, it will also be written to the cache. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// * `cacheable` - Whether the file is cacheable. + /// * `buf` - The data to write. fn write_bytes(&self, tpe: FileType, id: &Id, cacheable: bool, buf: Bytes) -> RusticResult<()> { if let Some(cache) = &self.cache { if cacheable || tpe.is_cacheable() { @@ -118,6 +195,14 @@ impl WriteBackend for CachedBackend { self.be.write_bytes(tpe, id, cacheable, buf) } + /// Removes the given file. + /// + /// If the file is cacheable, it will also be removed from the cache. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. fn remove(&self, tpe: FileType, id: &Id, cacheable: bool) -> RusticResult<()> { if let Some(cache) = &self.cache { if cacheable || tpe.is_cacheable() { @@ -128,12 +213,27 @@ impl WriteBackend for CachedBackend { } } +/// Backend that caches data in a directory. #[derive(Clone, Debug)] pub struct Cache { + /// The path to the cache. path: PathBuf, } impl Cache { + /// Creates a new [`Cache`] with the given id. + /// + /// If no path is given, the cache will be created in the default cache directory. + /// + /// # Arguments + /// + /// * `id` - The id of the cache. + /// * `path` - The path to the cache. + /// + /// # Errors + /// + /// * [`CacheBackendErrorKind::NoCacheDirectory`] - If no path is given and the default cache directory could not be determined. + /// * [`CacheBackendErrorKind::FromIoError`] - If the cache directory could not be created. pub fn new(id: Id, path: Option) -> RusticResult { let mut path = path.unwrap_or({ let mut dir = cache_dir().ok_or_else(|| CacheBackendErrorKind::NoCacheDirectory)?; @@ -152,28 +252,51 @@ impl Cache { /// # Panics /// /// Panics if the path is not valid unicode. + // TODO: Does this need to panic? Result? #[must_use] pub fn location(&self) -> &str { self.path.to_str().unwrap() } + /// Returns the path to the directory of the given type. + /// + /// # Arguments + /// + /// * `tpe` - The type of the directory. + /// * `id` - The id of the directory. #[must_use] pub fn dir(&self, tpe: FileType, id: &Id) -> PathBuf { let hex_id = id.to_hex(); - self.path.join(tpe.to_string()).join(&hex_id[0..2]) + self.path.join(tpe.dirname()).join(&hex_id[0..2]) } + /// Returns the path to the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. #[must_use] pub fn path(&self, tpe: FileType, id: &Id) -> PathBuf { let hex_id = id.to_hex(); self.path - .join(tpe.to_string()) + .join(tpe.dirname()) .join(&hex_id[0..2]) .join(hex_id) } + /// Lists all files with their size of the given type. + /// + /// # Arguments + /// + /// * `tpe` - The type of the files to list. + /// + /// # Errors + /// + /// * [`CacheBackendErrorKind::FromIoError`] - If the cache directory could not be read. + /// * [`IdErrorKind::HexError`] - If the string is not a valid hexadecimal string pub fn list_with_size(&self, tpe: FileType) -> RusticResult> { - let path = self.path.join(tpe.to_string()); + let path = self.path.join(tpe.dirname()); let walker = WalkDir::new(path) .into_iter() @@ -200,6 +323,16 @@ impl Cache { Ok(walker.collect()) } + /// Removes all files from the cache that are not in the given list. + /// + /// # Arguments + /// + /// * `tpe` - The type of the files. + /// * `list` - The list of files. + /// + /// # Errors + /// + /// * [`CacheBackendErrorKind::FromIoError`] - If the cache directory could not be read. pub fn remove_not_in_list(&self, tpe: FileType, list: &Vec<(Id, u32)>) -> RusticResult<()> { let mut list_cache = self.list_with_size(tpe)?; // remove present files from the cache list @@ -218,6 +351,16 @@ impl Cache { Ok(()) } + /// Reads full data of the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// + /// # Errors + /// + /// * [`CacheBackendErrorKind::FromIoError`] - If the file could not be read. pub fn read_full(&self, tpe: FileType, id: &Id) -> RusticResult { trace!("cache reading tpe: {:?}, id: {}", &tpe, &id); let data = fs::read(self.path(tpe, id)).map_err(CacheBackendErrorKind::FromIoError)?; @@ -225,6 +368,18 @@ impl Cache { Ok(data.into()) } + /// Reads partial data of the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// * `offset` - The offset to read from. + /// * `length` - The length to read. + /// + /// # Errors + /// + /// * [`CacheBackendErrorKind::FromIoError`] - If the file could not be read. pub fn read_partial( &self, tpe: FileType, @@ -250,6 +405,17 @@ impl Cache { Ok(vec.into()) } + /// Writes the given data to the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// * `buf` - The data to write. + /// + /// # Errors + /// + /// * [`CacheBackendErrorKind::FromIoError`] - If the file could not be written. pub fn write_bytes(&self, tpe: FileType, id: &Id, buf: Bytes) -> RusticResult<()> { trace!("cache writing tpe: {:?}, id: {}", &tpe, &id); fs::create_dir_all(self.dir(tpe, id)).map_err(CacheBackendErrorKind::FromIoError)?; @@ -264,6 +430,16 @@ impl Cache { Ok(()) } + /// Removes the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// + /// # Errors + /// + /// * [`CacheBackendErrorKind::FromIoError`] - If the file could not be removed. pub fn remove(&self, tpe: FileType, id: &Id) -> RusticResult<()> { trace!("cache writing tpe: {:?}, id: {}", &tpe, &id); let filename = self.path(tpe, id); diff --git a/crates/rustic_core/src/backend/choose.rs b/crates/rustic_core/src/backend/choose.rs index 74e40af9f..b635876e3 100644 --- a/crates/rustic_core/src/backend/choose.rs +++ b/crates/rustic_core/src/backend/choose.rs @@ -6,18 +6,34 @@ use crate::{ WriteBackend, }, error::BackendErrorKind, + error::RusticResult, id::Id, - RusticResult, }; +/// Backend helper that chooses the correct backend based on the url. #[derive(Clone, Debug)] pub enum ChooseBackend { + /// Local backend. Local(LocalBackend), + /// REST backend. Rest(RestBackend), + /// Rclone backend. Rclone(RcloneBackend), } impl ChooseBackend { + /// Create a new [`ChooseBackend`] from a given url. + /// + /// # Arguments + /// + /// * `url` - The url to create the [`ChooseBackend`] from. + /// + /// # Errors + /// + /// * [`BackendErrorKind::BackendNotSupported`] - If the backend is not supported. + /// * [`LocalErrorKind::DirectoryCreationFailed`] - If the directory could not be created. + /// * [`RestErrorKind::UrlParsingFailed`] - If the url could not be parsed. + /// * [`RestErrorKind::BuildingClientFailed`] - If the client could not be built. pub fn from_url(url: &str) -> RusticResult { Ok(match url.split_once(':') { #[cfg(windows)] @@ -34,6 +50,7 @@ impl ChooseBackend { } impl ReadBackend for ChooseBackend { + /// Returns the location of the backend. fn location(&self) -> String { match self { Self::Local(local) => local.location(), @@ -42,6 +59,12 @@ impl ReadBackend for ChooseBackend { } } + /// Sets an option of the backend. + /// + /// # Arguments + /// + /// * `option` - The option to set. + /// * `value` - The value to set the option to. fn set_option(&mut self, option: &str, value: &str) -> RusticResult<()> { match self { Self::Local(local) => local.set_option(option, value), @@ -50,6 +73,19 @@ impl ReadBackend for ChooseBackend { } } + /// Lists all files with their size of the given type. + /// + /// # Arguments + /// + /// * `tpe` - The type of the files to list. + /// + /// # Errors + /// + /// If the backend does not support listing files. + /// + /// # Returns + /// + /// A vector of tuples containing the id and size of the files. fn list_with_size(&self, tpe: FileType) -> RusticResult> { match self { Self::Local(local) => local.list_with_size(tpe), @@ -58,6 +94,22 @@ impl ReadBackend for ChooseBackend { } } + /// Reads full data of the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// + /// # Errors + /// + /// * [`LocalErrorKind::ReadingContentsOfFileFailed`] - If the file could not be read. + /// * [`reqwest::Error`] - If the request failed. + /// * [`RestErrorKind::BackoffError`] - If the backoff failed. + /// + /// # Returns + /// + /// The data read. fn read_full(&self, tpe: FileType, id: &Id) -> RusticResult { match self { Self::Local(local) => local.read_full(tpe, id), @@ -66,6 +118,19 @@ impl ReadBackend for ChooseBackend { } } + /// Reads partial data of the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// * `cacheable` - Whether the file is cacheable. + /// * `offset` - The offset to read from. + /// * `length` - The length to read. + /// + /// # Returns + /// + /// The data read. fn read_partial( &self, tpe: FileType, @@ -83,6 +148,7 @@ impl ReadBackend for ChooseBackend { } impl WriteBackend for ChooseBackend { + /// Creates the backend. fn create(&self) -> RusticResult<()> { match self { Self::Local(local) => local.create(), @@ -91,6 +157,14 @@ impl WriteBackend for ChooseBackend { } } + /// Writes the given data to the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// * `cacheable` - Whether the file is cacheable. + /// * `buf` - The data to write. fn write_bytes(&self, tpe: FileType, id: &Id, cacheable: bool, buf: Bytes) -> RusticResult<()> { match self { Self::Local(local) => local.write_bytes(tpe, id, cacheable, buf), @@ -99,6 +173,13 @@ impl WriteBackend for ChooseBackend { } } + /// Removes the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// * `cacheable` - Whether the file is cacheable. fn remove(&self, tpe: FileType, id: &Id, cacheable: bool) -> RusticResult<()> { match self { Self::Local(local) => local.remove(tpe, id, cacheable), diff --git a/crates/rustic_core/src/backend/decrypt.rs b/crates/rustic_core/src/backend/decrypt.rs index 5623f389a..756d14b7b 100644 --- a/crates/rustic_core/src/backend/decrypt.rs +++ b/crates/rustic_core/src/backend/decrypt.rs @@ -5,6 +5,13 @@ use crossbeam_channel::{unbounded, Receiver}; use rayon::prelude::*; use zstd::stream::{copy_encode, decode_all}; +pub use zstd::compression_level_range; + +/// The maximum compression level allowed by zstd +pub fn max_compression_level() -> i32 { + *compression_level_range().end() +} + use crate::{ backend::FileType, backend::ReadBackend, @@ -16,14 +23,49 @@ use crate::{ Progress, RusticResult, }; +/// A backend that can decrypt data. +/// This is a trait that is implemented by all backends that can decrypt data. +/// It is implemented for all backends that implement `DecryptWriteBackend` and `DecryptReadBackend`. +/// This trait is used by the `Repository` to decrypt data. pub trait DecryptFullBackend: DecryptWriteBackend + DecryptReadBackend {} + impl DecryptFullBackend for T {} pub trait DecryptReadBackend: ReadBackend { + /// Decrypts the given data. + /// + /// # Arguments + /// + /// * `data` - The data to decrypt. + /// + /// # Errors + /// + /// If the data could not be decrypted. fn decrypt(&self, data: &[u8]) -> RusticResult>; + /// Reads the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// + /// # Errors + /// + /// If the file could not be read. fn read_encrypted_full(&self, tpe: FileType, id: &Id) -> RusticResult; + /// Reads the given file from partial data. + /// + /// # Arguments + /// + /// * `data` - The partial data to decrypt. + /// * `uncompressed_length` - The length of the uncompressed data. + /// + /// # Errors + /// + /// * [`CryptBackendErrorKind::DecodingZstdCompressedDataFailed`] - If the data could not be decoded. + /// * [`CryptBackendErrorKind::LengthOfUncompressedDataDoesNotMatch`] - If the length of the uncompressed data does not match the given length. fn read_encrypted_from_partial( &self, data: &[u8], @@ -40,6 +82,20 @@ pub trait DecryptReadBackend: ReadBackend { Ok(data.into()) } + /// Reads the given file with the given offset and length. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// * `cacheable` - Whether the file should be cached. + /// * `offset` - The offset to read from. + /// * `length` - The length to read. + /// * `uncompressed_length` - The length of the uncompressed data. + /// + /// # Errors + /// + /// If the file could not be read. fn read_encrypted_partial( &self, tpe: FileType, @@ -55,12 +111,30 @@ pub trait DecryptReadBackend: ReadBackend { ) } + /// Gets the given file. + /// + /// # Arguments + /// + /// * `id` - The id of the file. + /// + /// # Errors + /// + /// If the file could not be read. fn get_file(&self, id: &Id) -> RusticResult { let data = self.read_encrypted_full(F::TYPE, id)?; Ok(serde_json::from_slice(&data) .map_err(CryptBackendErrorKind::DeserializingFromBytesOfJsonTextFailed)?) } + /// Streams all files. + /// + /// # Arguments + /// + /// * `p` - The progress bar. + /// + /// # Errors + /// + /// If the files could not be read. fn stream_all( &self, p: &impl Progress, @@ -69,6 +143,16 @@ pub trait DecryptReadBackend: ReadBackend { self.stream_list(list, p) } + /// Streams a list of files. + /// + /// # Arguments + /// + /// * `list` - The list of files to stream. + /// * `p` - The progress bar. + /// + /// # Errors + /// + /// If the files could not be read. fn stream_list( &self, list: Vec, @@ -88,17 +172,57 @@ pub trait DecryptReadBackend: ReadBackend { } pub trait DecryptWriteBackend: WriteBackend { + /// The type of the key. type Key: CryptoKey; + /// Gets the key. fn key(&self) -> &Self::Key; + + /// Writes the given data to the backend and returns the id of the data. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `data` - The data to write. + /// + /// # Errors + /// + /// If the data could not be written. + /// + /// # Returns + /// + /// The id of the data. (TODO: Check if this is correct) fn hash_write_full(&self, tpe: FileType, data: &[u8]) -> RusticResult; + /// Saves the given file. + /// + /// # Arguments + /// + /// * `file` - The file to save. + /// + /// # Errors + /// + /// * [`CryptBackendErrorKind::SerializingToJsonByteVectorFailed`] - If the file could not be serialized to json. + /// + /// # Returns + /// + /// The id of the file. fn save_file(&self, file: &F) -> RusticResult { let data = serde_json::to_vec(file) .map_err(CryptBackendErrorKind::SerializingToJsonByteVectorFailed)?; self.hash_write_full(F::TYPE, &data) } + /// Saves the given list of files. + /// + /// # Arguments + /// + /// * `list` - The list of files to save. + /// * `p` - The progress bar. + /// + /// # Errors + /// + /// * [`CryptBackendErrorKind::SerializingToJsonByteVectorFailed`] - If the file could not be serialized to json. fn save_list<'a, F: RepoFile, I: ExactSizeIterator + Send>( &self, list: I, @@ -114,6 +238,18 @@ pub trait DecryptWriteBackend: WriteBackend { Ok(()) } + /// Deletes the given list of files. + /// + /// # Arguments + /// + /// * `tpe` - The type of the files. + /// * `cacheable` - Whether the files should be cached. + /// * `list` - The list of files to delete. + /// * `p` - The progress bar. + /// + /// # Panics + /// + /// If the files could not be deleted. fn delete_list<'a, I: ExactSizeIterator + Send>( &self, tpe: FileType, @@ -123,6 +259,7 @@ pub trait DecryptWriteBackend: WriteBackend { ) -> RusticResult<()> { p.set_length(list.len() as u64); list.par_bridge().try_for_each(|id| -> RusticResult<_> { + // TODO: Don't panic on file not being able to be deleted. self.remove(tpe, id, cacheable).unwrap(); p.inc(1); Ok(()) @@ -135,14 +272,38 @@ pub trait DecryptWriteBackend: WriteBackend { fn set_zstd(&mut self, zstd: Option); } +/// A backend that can decrypt data. +/// +/// # Type Parameters +/// +/// * `R` - The type of the backend to decrypt. +/// * `C` - The type of the key to decrypt the backend with. #[derive(Clone, Debug)] pub struct DecryptBackend { + /// The backend to decrypt. backend: R, + /// The key to decrypt the backend with. key: C, + /// The compression level to use for zstd. zstd: Option, } impl DecryptBackend { + /// Creates a new decrypt backend. + /// + /// # Type Parameters + /// + /// * `R` - The type of the backend to decrypt. + /// * `C` - The type of the key to decrypt the backend with. + /// + /// # Arguments + /// + /// * `be` - The backend to decrypt. + /// * `key` - The key to decrypt the backend with. + /// + /// # Returns + /// + /// The new decrypt backend. pub fn new(be: &R, key: C) -> Self { Self { backend: be.clone(), @@ -153,12 +314,28 @@ impl DecryptBackend { } impl DecryptWriteBackend for DecryptBackend { + /// The type of the key. type Key = C; + /// Gets the key. fn key(&self) -> &Self::Key { &self.key } + /// Writes the given data to the backend and returns the id of the data. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `data` - The data to write. + /// + /// # Errors + /// + /// * [`CryptBackendErrorKind::CopyEncodingDataFailed`] - If the data could not be encoded. + /// + /// # Returns + /// + /// The id of the data. fn hash_write_full(&self, tpe: FileType, data: &[u8]) -> RusticResult { let data = match self.zstd { Some(level) => { @@ -174,16 +351,41 @@ impl DecryptWriteBackend for DecryptBackend Ok(id) } + /// Sets the compression level to use for zstd. + /// + /// # Arguments + /// + /// * `zstd` - The compression level to use for zstd. fn set_zstd(&mut self, zstd: Option) { self.zstd = zstd; } } impl DecryptReadBackend for DecryptBackend { + /// Decrypts the given data. + /// + /// # Arguments + /// + /// * `data` - The data to decrypt. + /// + /// # Returns + /// + /// A vector containing the decrypted data. fn decrypt(&self, data: &[u8]) -> RusticResult> { self.key.decrypt_data(data) } + /// Reads encrypted data from the backend. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// + /// # Errors + /// + /// * [`CryptBackendErrorKind::DecryptionNotSupportedForBackend`] - If the backend does not support decryption. + /// * [`CryptBackendErrorKind::DecodingZstdCompressedDataFailed`] - If the data could not be decoded. fn read_encrypted_full(&self, tpe: FileType, id: &Id) -> RusticResult { let decrypted = self.decrypt(&self.read_full(tpe, id)?)?; Ok(match decrypted.first() { diff --git a/crates/rustic_core/src/backend/dry_run.rs b/crates/rustic_core/src/backend/dry_run.rs index 6d1ae1247..9b67c6ade 100644 --- a/crates/rustic_core/src/backend/dry_run.rs +++ b/crates/rustic_core/src/backend/dry_run.rs @@ -4,19 +4,32 @@ use zstd::decode_all; use crate::{ backend::{ decrypt::DecryptFullBackend, decrypt::DecryptReadBackend, decrypt::DecryptWriteBackend, - FileType, Id, ReadBackend, WriteBackend, + FileType, ReadBackend, WriteBackend, }, - error::CryptBackendErrorKind, - RusticResult, + error::{CryptBackendErrorKind, RusticResult}, + id::Id, }; +/// A backend implementation that does not actually write to the backend. #[derive(Clone, Debug)] pub struct DryRunBackend { + /// The backend to use. be: BE, + /// Whether to actually write to the backend. dry_run: bool, } impl DryRunBackend { + /// Create a new [`DryRunBackend`]. + /// + /// # Type Parameters + /// + /// * `BE` - The backend to use. + /// + /// # Arguments + /// + /// * `be` - The backend to use. + /// * `dry_run` - Whether to actually write to the backend. pub const fn new(be: BE, dry_run: bool) -> Self { Self { be, dry_run } } @@ -27,6 +40,21 @@ impl DecryptReadBackend for DryRunBackend { self.be.decrypt(data) } + /// Reads encrypted data of the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// + /// # Errors + /// + /// * [`CryptBackendErrorKind::DecryptionNotSupportedForBackend`] - If the backend does not support decryption. + /// * [`CryptBackendErrorKind::DecodingZstdCompressedDataFailed`] - If decoding the zstd compressed data failed. + /// + /// # Returns + /// + /// The data read. fn read_encrypted_full(&self, tpe: FileType, id: &Id) -> RusticResult { let decrypted = self.decrypt(&self.read_full(tpe, id)?)?; Ok(match decrypted.first() { diff --git a/crates/rustic_core/src/backend/hotcold.rs b/crates/rustic_core/src/backend/hotcold.rs index d03b158f1..2f3455c22 100644 --- a/crates/rustic_core/src/backend/hotcold.rs +++ b/crates/rustic_core/src/backend/hotcold.rs @@ -2,13 +2,30 @@ use bytes::Bytes; use crate::{backend::FileType, backend::ReadBackend, backend::WriteBackend, id::Id, RusticResult}; +/// A hot/cold backend implementation. +/// +/// # Type Parameters +/// +/// * `BE` - The backend to use. #[derive(Clone, Debug)] pub struct HotColdBackend { + /// The backend to use. be: BE, + /// The backend to use for hot files. hot_be: Option, } impl HotColdBackend { + /// Creates a new `HotColdBackend`. + /// + /// # Type Parameters + /// + /// * `BE` - The backend to use. + /// + /// # Arguments + /// + /// * `be` - The backend to use. + /// * `hot_be` - The backend to use for hot files. pub fn new(be: BE, hot_be: Option) -> Self { Self { be, hot_be } } diff --git a/crates/rustic_core/src/backend/ignore.rs b/crates/rustic_core/src/backend/ignore.rs index ab93345d8..bb95bdcfa 100644 --- a/crates/rustic_core/src/backend/ignore.rs +++ b/crates/rustic_core/src/backend/ignore.rs @@ -14,6 +14,7 @@ use cached::proc_macro::cached; #[cfg(not(windows))] use chrono::TimeZone; use chrono::{DateTime, Local, Utc}; +use derive_setters::Setters; use ignore::{overrides::OverrideBuilder, DirEntry, Walk, WalkBuilder}; use log::warn; #[cfg(not(windows))] @@ -27,88 +28,111 @@ use crate::{ node::{Metadata, Node, NodeType}, ReadSource, ReadSourceEntry, ReadSourceOpen, }, - error::IgnoreErrorKind, - RusticResult, + error::{IgnoreErrorKind, RusticResult}, }; // Walk doesn't implement Debug #[allow(missing_debug_implementations)] +/// A [`LocalSource`] is a source from local paths which is used to be read from (i.e. to backup it). pub struct LocalSource { + /// The walk builder. builder: WalkBuilder, + /// The walk iterator. walker: Walk, + /// The save options to use. save_opts: LocalSourceSaveOptions, } #[serde_as] #[cfg_attr(feature = "clap", derive(clap::Parser))] #[cfg_attr(feature = "merge", derive(merge::Merge))] -#[derive(serde::Deserialize, Default, Clone, Copy, Debug)] +#[derive(serde::Deserialize, serde::Serialize, Default, Clone, Copy, Debug, Setters)] #[serde(default, rename_all = "kebab-case", deny_unknown_fields)] +#[setters(into)] +/// [`LocalSourceSaveOptions`] describes how entries from a local source will be saved in the repository. pub struct LocalSourceSaveOptions { /// Save access time for files and directories #[cfg_attr(feature = "clap", clap(long))] #[cfg_attr(feature = "merge", merge(strategy = merge::bool::overwrite_false))] - with_atime: bool, + pub with_atime: bool, /// Don't save device ID for files and directories #[cfg_attr(feature = "clap", clap(long))] #[cfg_attr(feature = "merge", merge(strategy = merge::bool::overwrite_false))] - ignore_devid: bool, + pub ignore_devid: bool, } #[serde_as] #[cfg_attr(feature = "clap", derive(clap::Parser))] #[cfg_attr(feature = "merge", derive(merge::Merge))] -#[derive(serde::Deserialize, Default, Clone, Debug)] +#[derive(serde::Deserialize, serde::Serialize, Default, Clone, Debug, Setters)] #[serde(default, rename_all = "kebab-case", deny_unknown_fields)] +#[setters(into)] +/// [`LocalSourceFilterOptions`] allow to filter a local source by various criteria. pub struct LocalSourceFilterOptions { /// Glob pattern to exclude/include (can be specified multiple times) #[cfg_attr(feature = "clap", clap(long))] #[cfg_attr(feature = "merge", merge(strategy = merge::vec::overwrite_empty))] - glob: Vec, + pub glob: Vec, /// Same as --glob pattern but ignores the casing of filenames #[cfg_attr(feature = "clap", clap(long, value_name = "GLOB"))] #[cfg_attr(feature = "merge", merge(strategy = merge::vec::overwrite_empty))] - iglob: Vec, + pub iglob: Vec, /// Read glob patterns to exclude/include from this file (can be specified multiple times) #[cfg_attr(feature = "clap", clap(long, value_name = "FILE"))] #[cfg_attr(feature = "merge", merge(strategy = merge::vec::overwrite_empty))] - glob_file: Vec, + pub glob_file: Vec, /// Same as --glob-file ignores the casing of filenames in patterns #[cfg_attr(feature = "clap", clap(long, value_name = "FILE"))] #[cfg_attr(feature = "merge", merge(strategy = merge::vec::overwrite_empty))] - iglob_file: Vec, + pub iglob_file: Vec, /// Ignore files based on .gitignore files #[cfg_attr(feature = "clap", clap(long))] #[cfg_attr(feature = "merge", merge(strategy = merge::bool::overwrite_false))] - git_ignore: bool, + pub git_ignore: bool, /// Do not require a git repository to apply git-ignore rule #[cfg_attr(feature = "clap", clap(long))] #[cfg_attr(feature = "merge", merge(strategy = merge::bool::overwrite_false))] - no_require_git: bool, + pub no_require_git: bool, /// Exclude contents of directories containing this filename (can be specified multiple times) #[cfg_attr(feature = "clap", clap(long, value_name = "FILE"))] #[cfg_attr(feature = "merge", merge(strategy = merge::vec::overwrite_empty))] - exclude_if_present: Vec, + pub exclude_if_present: Vec, /// Exclude other file systems, don't cross filesystem boundaries and subvolumes #[cfg_attr(feature = "clap", clap(long, short = 'x'))] #[cfg_attr(feature = "merge", merge(strategy = merge::bool::overwrite_false))] - one_file_system: bool, + pub one_file_system: bool, - /// Maximum size of files to be backuped. Larger files will be excluded. + /// Maximum size of files to be backed up. Larger files will be excluded. #[cfg_attr(feature = "clap", clap(long, value_name = "SIZE"))] #[serde_as(as = "Option")] - exclude_larger_than: Option, + pub exclude_larger_than: Option, } impl LocalSource { + /// Create a local source from [`LocalSourceSaveOptions`], [`LocalSourceFilterOptions`] and backup path(s). + /// + /// # Arguments + /// + /// * `save_opts` - The [`LocalSourceSaveOptions`] to use. + /// * `filter_opts` - The [`LocalSourceFilterOptions`] to use. + /// * `backup_paths` - The backup path(s) to use. + /// + /// # Returns + /// + /// The created local source. + /// + /// # Errors + /// + /// * [`IgnoreErrorKind::GenericError`] - If the a glob pattern could not be added to the override builder. + /// * [`IgnoreErrorKind::FromIoError`] - If a glob file could not be read. pub fn new( save_opts: LocalSourceSaveOptions, filter_opts: &LocalSourceFilterOptions, @@ -201,11 +225,21 @@ impl LocalSource { } #[derive(Debug)] +/// Describes an open file from the local backend. pub struct OpenFile(PathBuf); impl ReadSourceOpen for OpenFile { type Reader = File; + /// Open the file from the local backend. + /// + /// # Returns + /// + /// The read handle to the file from the local backend. + /// + /// # Errors + /// + /// * [`IgnoreErrorKind::UnableToOpenFile`] - If the file could not be opened. fn open(self) -> RusticResult { let path = self.0; File::open(path).map_err(|err| IgnoreErrorKind::UnableToOpenFile(err).into()) @@ -216,6 +250,15 @@ impl ReadSource for LocalSource { type Open = OpenFile; type Iter = Self; + /// Get the size of the local source. + /// + /// # Returns + /// + /// The size of the local source or `None` if the size could not be determined. + /// + /// # Errors + /// + /// If the size could not be determined. fn size(&self) -> RusticResult> { let mut size = 0; for entry in self.builder.build() { @@ -228,6 +271,11 @@ impl ReadSource for LocalSource { Ok(Some(size)) } + /// Iterate over the entries of the local source. + /// + /// # Returns + /// + /// An iterator over the entries of the local source. fn entries(self) -> Self::Iter { self } @@ -255,6 +303,18 @@ impl Iterator for LocalSource { } } +/// Maps a [`DirEntry`] to a [`ReadSourceEntry`]. +/// +/// # Arguments +/// +/// * `entry` - The [`DirEntry`] to map. +/// * `with_atime` - Whether to save access time for files and directories. +/// * `ignore_devid` - Whether to save device ID for files and directories. +/// +/// # Errors +/// +/// * [`IgnoreErrorKind::GenericError`] - If metadata could not be read. +/// * [`IgnoreErrorKind::FromIoError`] - If path of the entry could not be read. #[cfg(windows)] fn map_entry( entry: DirEntry, @@ -324,6 +384,15 @@ fn map_entry( Ok(ReadSourceEntry { path, node, open }) } +/// Get the user name for the given uid. +/// +/// # Arguments +/// +/// * `uid` - The uid to get the user name for. +/// +/// # Returns +/// +/// The user name for the given uid or `None` if the user could not be found. #[cfg(not(windows))] #[cached] fn get_user_by_uid(uid: u32) -> Option { @@ -337,6 +406,15 @@ fn get_user_by_uid(uid: u32) -> Option { } } +/// Get the group name for the given gid. +/// +/// # Arguments +/// +/// * `gid` - The gid to get the group name for. +/// +/// # Returns +/// +/// The group name for the given gid or `None` if the group could not be found. #[cfg(not(windows))] #[cached] fn get_group_by_gid(gid: u32) -> Option { @@ -350,6 +428,18 @@ fn get_group_by_gid(gid: u32) -> Option { } } +/// Maps a [`DirEntry`] to a [`ReadSourceEntry`]. +/// +/// # Arguments +/// +/// * `entry` - The [`DirEntry`] to map. +/// * `with_atime` - Whether to save access time for files and directories. +/// * `ignore_devid` - Whether to save device ID for files and directories. +/// +/// # Errors +/// +/// * [`IgnoreErrorKind::GenericError`] - If metadata could not be read. +/// * [`IgnoreErrorKind::FromIoError`] - If the xattr of the entry could not be read. #[cfg(not(windows))] // map_entry: turn entry into (Path, Node) fn map_entry( diff --git a/crates/rustic_core/src/backend/local.rs b/crates/rustic_core/src/backend/local.rs index e90f05392..d819e6b1a 100644 --- a/crates/rustic_core/src/backend/local.rs +++ b/crates/rustic_core/src/backend/local.rs @@ -27,20 +27,62 @@ use crate::backend::node::NodeType; use crate::{ backend::{ node::{ExtendedAttribute, Metadata, Node}, - FileType, Id, ReadBackend, WriteBackend, ALL_FILE_TYPES, + FileType, ReadBackend, WriteBackend, ALL_FILE_TYPES, }, - error::LocalErrorKind, - RusticResult, + error::{LocalErrorKind, RusticResult}, + id::Id, }; +/// Local backend, used when backing up. +/// +/// This backend is used when backing up to a local directory. +/// It will create a directory structure like this: +/// +/// ```text +/// / +/// ├── config +/// ├── data +/// │ ├── 00 +/// │ │ └── +/// │ ├── 01 +/// │ │ └── +/// │ └── ... +/// ├── index +/// │ └── +/// ├── keys +/// │ └── +/// ├── snapshots +/// │ └── +/// └── ... +/// ``` +/// +/// The `data` directory will contain all data files, split into 256 subdirectories. +/// The `config` directory will contain the config file. +/// The `index` directory will contain the index file. +/// The `keys` directory will contain the keys file. +/// The `snapshots` directory will contain the snapshots file. +/// All other directories will contain the pack files. #[derive(Clone, Debug)] pub struct LocalBackend { + /// The base path of the backend. path: PathBuf, + /// The command to call after a file was created. post_create_command: Option, + /// The command to call after a file was deleted. post_delete_command: Option, } impl LocalBackend { + /// Create a new [`LocalBackend`] + /// + /// # Arguments + /// + /// * `path` - The base path of the backend + /// + /// # Errors + /// + /// * [`LocalErrorKind::DirectoryCreationFailed`] - If the directory could not be created. + // TODO: We should use `impl Into` here. we even use it in the body! pub fn new(path: &str) -> RusticResult { let path = path.into(); fs::create_dir_all(&path).map_err(LocalErrorKind::DirectoryCreationFailed)?; @@ -51,20 +93,54 @@ impl LocalBackend { }) } + /// Path to the given file type and id. + /// + /// If the file type is `FileType::Pack`, the id will be used to determine the subdirectory. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// + /// # Returns + /// + /// The path to the file. fn path(&self, tpe: FileType, id: &Id) -> PathBuf { let hex_id = id.to_hex(); match tpe { FileType::Config => self.path.join("config"), FileType::Pack => self.path.join("data").join(&hex_id[0..2]).join(hex_id), - _ => self.path.join(tpe.to_string()).join(hex_id), + _ => self.path.join(tpe.dirname()).join(hex_id), } } + /// Call the given command. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// * `filename` - The path to the file. + /// * `command` - The command to call. + /// + /// # Errors + /// + /// * [`LocalErrorKind::FromAhoCorasick`] - If the patterns could not be compiled. + /// * [`LocalErrorKind::FromNomError`] - If the command could not be parsed. + /// * [`LocalErrorKind::CommandExecutionFailed`] - If the command could not be executed. + /// * [`LocalErrorKind::CommandNotSuccessful`] - If the command was not successful. + /// + /// # Notes + /// + /// The following placeholders are supported: + /// * `%file` - The path to the file. + /// * `%type` - The type of the file. + /// * `%id` - The id of the file. fn call_command(tpe: FileType, id: &Id, filename: &Path, command: &str) -> RusticResult<()> { let id = id.to_hex(); let patterns = &["%file", "%type", "%id"]; let ac = AhoCorasick::new(patterns).map_err(LocalErrorKind::FromAhoCorasick)?; - let replace_with = &[filename.to_str().unwrap(), tpe.into(), id.as_str()]; + let replace_with = &[filename.to_str().unwrap(), tpe.dirname(), id.as_str()]; let actual_command = ac.replace_all(command, replace_with); debug!("calling {actual_command}..."); let commands = split(&actual_command).map_err(LocalErrorKind::FromSplitError)?; @@ -86,12 +162,27 @@ impl LocalBackend { } impl ReadBackend for LocalBackend { + /// Returns the location of the backend. + /// + /// This is `local:`. fn location(&self) -> String { let mut location = "local:".to_string(); location.push_str(&self.path.to_string_lossy()); location } + /// Sets an option of the backend. + /// + /// # Arguments + /// + /// * `option` - The option to set. + /// * `value` - The value to set the option to. + /// + /// # Notes + /// + /// The following options are supported: + /// * `post-create-command` - The command to call after a file was created. + /// * `post-delete-command` - The command to call after a file was deleted. fn set_option(&mut self, option: &str, value: &str) -> RusticResult<()> { match option { "post-create-command" => { @@ -107,6 +198,19 @@ impl ReadBackend for LocalBackend { Ok(()) } + /// Lists all files of the given type. + /// + /// # Arguments + /// + /// * `tpe` - The type of the files to list. + /// + /// # Errors + /// + /// * [`IdErrorKind::HexError`] - If the string is not a valid hexadecimal string + /// + /// # Notes + /// + /// If the file type is `FileType::Config`, this will return a list with a single default id. fn list(&self, tpe: FileType) -> RusticResult> { trace!("listing tpe: {tpe:?}"); if tpe == FileType::Config { @@ -117,7 +221,7 @@ impl ReadBackend for LocalBackend { }); } - let walker = WalkDir::new(self.path.join(tpe.to_string())) + let walker = WalkDir::new(self.path.join(tpe.dirname())) .into_iter() .filter_map(walkdir::Result::ok) .filter(|e| e.file_type().is_file()) @@ -126,9 +230,21 @@ impl ReadBackend for LocalBackend { Ok(walker.collect()) } + /// Lists all files with their size of the given type. + /// + /// # Arguments + /// + /// * `tpe` - The type of the files to list. + /// + /// # Errors + /// + /// * [`LocalErrorKind::QueryingMetadataFailed`] - If the metadata of the file could not be queried. + /// * [`LocalErrorKind::FromTryIntError`] - If the length of the file could not be converted to u32. + /// * [`LocalErrorKind::QueryingWalkDirMetadataFailed`] - If the metadata of the file could not be queried. + /// * [`IdErrorKind::HexError`] - If the string is not a valid hexadecimal string fn list_with_size(&self, tpe: FileType) -> RusticResult> { trace!("listing tpe: {tpe:?}"); - let path = self.path.join(tpe.to_string()); + let path = self.path.join(tpe.dirname()); if tpe == FileType::Config { return Ok(if path.exists() { @@ -164,6 +280,16 @@ impl ReadBackend for LocalBackend { Ok(walker.collect()) } + /// Reads full data of the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// + /// # Errors + /// + /// * [`LocalErrorKind::ReadingContentsOfFileFailed`] - If the file could not be read. fn read_full(&self, tpe: FileType, id: &Id) -> RusticResult { trace!("reading tpe: {tpe:?}, id: {id}"); Ok(fs::read(self.path(tpe, id)) @@ -171,6 +297,22 @@ impl ReadBackend for LocalBackend { .into()) } + /// Reads partial data of the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// * `cacheable` - Whether the file is cacheable. + /// * `offset` - The offset to read from. + /// * `length` - The length to read. + /// + /// # Errors + /// + /// * [`LocalErrorKind::OpeningFileFailed`] - If the file could not be opened. + /// * [`LocalErrorKind::CouldNotSeekToPositionInFile`] - If the file could not be seeked to the given position. + /// * [`LocalErrorKind::FromTryIntError`] - If the length of the file could not be converted to u32. + /// * [`LocalErrorKind::ReadingExactLengthOfFileFailed`] - If the length of the file could not be read. fn read_partial( &self, tpe: FileType, @@ -196,11 +338,16 @@ impl ReadBackend for LocalBackend { } impl WriteBackend for LocalBackend { + /// Create a repository on the backend. + /// + /// # Errors + /// + /// * [`LocalErrorKind::DirectoryCreationFailed`] - If the directory could not be created. fn create(&self) -> RusticResult<()> { trace!("creating repo at {:?}", self.path); for tpe in ALL_FILE_TYPES { - fs::create_dir_all(self.path.join(tpe.to_string())) + fs::create_dir_all(self.path.join(tpe.dirname())) .map_err(LocalErrorKind::DirectoryCreationFailed)?; } for i in 0u8..=255 { @@ -210,6 +357,22 @@ impl WriteBackend for LocalBackend { Ok(()) } + /// Write the given bytes to the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// * `cacheable` - Whether the file is cacheable. + /// * `buf` - The bytes to write. + /// + /// # Errors + /// + /// * [`LocalErrorKind::OpeningFileFailed`] - If the file could not be opened. + /// * [`LocalErrorKind::FromTryIntError`] - If the length of the bytes could not be converted to u64. + /// * [`LocalErrorKind::SettingFileLengthFailed`] - If the length of the file could not be set. + /// * [`LocalErrorKind::CouldNotWriteToBuffer`] - If the bytes could not be written to the file. + /// * [`LocalErrorKind::SyncingOfOsMetadataFailed`] - If the metadata of the file could not be synced. fn write_bytes( &self, tpe: FileType, @@ -242,6 +405,17 @@ impl WriteBackend for LocalBackend { Ok(()) } + /// Remove the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// * `cacheable` - Whether the file is cacheable. + /// + /// # Errors + /// + /// * [`LocalErrorKind::FileRemovalFailed`] - If the file could not be removed. fn remove(&self, tpe: FileType, id: &Id, _cacheable: bool) -> RusticResult<()> { trace!("removing tpe: {:?}, id: {}", &tpe, &id); let filename = self.path(tpe, id); @@ -256,12 +430,27 @@ impl WriteBackend for LocalBackend { } #[derive(Clone, Debug)] +/// Local destination, used when restoring. pub struct LocalDestination { + /// The base path of the destination. path: PathBuf, + /// Whether we expect a single file as destination. is_file: bool, } impl LocalDestination { + /// Create a new [`LocalDestination`] + /// + /// # Arguments + /// + /// * `path` - The base path of the destination + /// * `create` - If `create` is true, create the base path if it doesn't exist. + /// * `expect_file` - Whether we expect a single file as destination. + /// + /// # Errors + /// + /// * [`LocalErrorKind::DirectoryCreationFailed`] - If the directory could not be created. + // TODO: We should use `impl Into` here. we even use it in the body! pub fn new(path: &str, create: bool, expect_file: bool) -> RusticResult { let is_dir = path.ends_with('/'); let path: PathBuf = path.into(); @@ -280,6 +469,20 @@ impl LocalDestination { Ok(Self { path, is_file }) } + /// Path to the given item (relative to the base path) + /// + /// # Arguments + /// + /// * `item` - The item to get the path for + /// + /// # Returns + /// + /// The path to the item. + /// + /// # Notes + /// + /// * If the destination is a file, this will return the base path. + /// * If the destination is a directory, this will return the base path joined with the item. pub(crate) fn path(&self, item: impl AsRef) -> PathBuf { if self.is_file { self.path.clone() @@ -288,20 +491,72 @@ impl LocalDestination { } } + /// Remove the given directory (relative to the base path) + /// + /// # Arguments + /// + /// * `dirname` - The directory to remove + /// + /// # Errors + /// + /// * [`LocalErrorKind::DirectoryRemovalFailed`] - If the directory could not be removed. + /// + /// # Notes + /// + /// This will remove the directory recursively. pub fn remove_dir(&self, dirname: impl AsRef) -> RusticResult<()> { Ok(fs::remove_dir_all(dirname).map_err(LocalErrorKind::DirectoryRemovalFailed)?) } + /// Remove the given file (relative to the base path) + /// + /// # Arguments + /// + /// * `filename` - The file to remove + /// + /// # Errors + /// + /// * [`LocalErrorKind::FileRemovalFailed`] - If the file could not be removed. + /// + /// # Notes + /// + /// This will remove the file. + /// + /// * If the file is a symlink, the symlink will be removed, not the file it points to. + /// * If the file is a directory or device, this will fail. pub fn remove_file(&self, filename: impl AsRef) -> RusticResult<()> { Ok(fs::remove_file(filename).map_err(LocalErrorKind::FileRemovalFailed)?) } + /// Create the given directory (relative to the base path) + /// + /// # Arguments + /// + /// * `item` - The directory to create + /// + /// # Errors + /// + /// * [`LocalErrorKind::DirectoryCreationFailed`] - If the directory could not be created. + /// + /// # Notes + /// + /// This will create the directory structure recursively. pub fn create_dir(&self, item: impl AsRef) -> RusticResult<()> { let dirname = self.path.join(item); fs::create_dir_all(dirname).map_err(LocalErrorKind::DirectoryCreationFailed)?; Ok(()) } + /// Set changed and modified times for `item` (relative to the base path) utilizing the file metadata + /// + /// # Arguments + /// + /// * `item` - The item to set the times for + /// * `meta` - The metadata to get the times from + /// + /// # Errors + /// + /// * [`LocalErrorKind::SettingTimeMetadataFailed`] - If the times could not be set pub fn set_times(&self, item: impl AsRef, meta: &Metadata) -> RusticResult<()> { let filename = self.path(item); if let Some(mtime) = meta.mtime { @@ -319,6 +574,16 @@ impl LocalDestination { #[cfg(windows)] // TODO: Windows support + /// Set user/group for `item` (relative to the base path) utilizing the file metadata + /// + /// # Arguments + /// + /// * `item` - The item to set the user/group for + /// * `meta` - The metadata to get the user/group from + /// + /// # Errors + /// + /// If the user/group could not be set. pub fn set_user_group(&self, _item: impl AsRef, _meta: &Metadata) -> RusticResult<()> { // https://learn.microsoft.com/en-us/windows/win32/fileio/file-security-and-access-rights // https://microsoft.github.io/windows-docs-rs/doc/windows/Win32/Security/struct.SECURITY_ATTRIBUTES.html @@ -327,6 +592,16 @@ impl LocalDestination { } #[cfg(not(windows))] + /// Set user/group for `item` (relative to the base path) utilizing the file metadata + /// + /// # Arguments + /// + /// * `item` - The item to set the user/group for + /// * `meta` - The metadata to get the user/group from + /// + /// # Errors + /// + /// * [`LocalErrorKind::FromErrnoError`] - If the user/group could not be set. pub fn set_user_group(&self, item: impl AsRef, meta: &Metadata) -> RusticResult<()> { let filename = self.path(item); @@ -351,11 +626,31 @@ impl LocalDestination { #[cfg(windows)] // TODO: Windows support + /// Set uid/gid for `item` (relative to the base path) utilizing the file metadata + /// + /// # Arguments + /// + /// * `item` - The item to set the uid/gid for + /// * `meta` - The metadata to get the uid/gid from + /// + /// # Errors + /// + /// If the uid/gid could not be set. pub fn set_uid_gid(&self, _item: impl AsRef, _meta: &Metadata) -> RusticResult<()> { Ok(()) } #[cfg(not(windows))] + /// Set uid/gid for `item` (relative to the base path) utilizing the file metadata + /// + /// # Arguments + /// + /// * `item` - The item to set the uid/gid for + /// * `meta` - The metadata to get the uid/gid from + /// + /// # Errors + /// + /// * [`LocalErrorKind::FromErrnoError`] - If the uid/gid could not be set. pub fn set_uid_gid(&self, item: impl AsRef, meta: &Metadata) -> RusticResult<()> { let filename = self.path(item); @@ -369,13 +664,33 @@ impl LocalDestination { #[cfg(windows)] // TODO: Windows support + /// Set permissions for `item` (relative to the base path) from `node` + /// + /// # Arguments + /// + /// * `item` - The item to set the permissions for + /// * `node` - The node to get the permissions from + /// + /// # Errors + /// + /// If the permissions could not be set. pub fn set_permission(&self, _item: impl AsRef, _node: &Node) -> RusticResult<()> { Ok(()) } #[cfg(not(windows))] + /// Set permissions for `item` (relative to the base path) from `node` + /// + /// # Arguments + /// + /// * `item` - The item to set the permissions for + /// * `node` - The node to get the permissions from + /// + /// # Errors + /// + /// * [`LocalErrorKind::SettingFilePermissionsFailed`] - If the permissions could not be set. pub fn set_permission(&self, item: impl AsRef, node: &Node) -> RusticResult<()> { - if node.node_type.is_symlink() { + if node.is_symlink() { return Ok(()); } @@ -392,6 +707,16 @@ impl LocalDestination { #[cfg(any(windows, target_os = "openbsd"))] // TODO: Windows support // TODO: openbsd support + /// Set extended attributes for `item` (relative to the base path) + /// + /// # Arguments + /// + /// * `item` - The item to set the extended attributes for + /// * `extended_attributes` - The extended attributes to set + /// + /// # Errors + /// + /// If the extended attributes could not be set. pub fn set_extended_attributes( &self, _item: impl AsRef, @@ -401,6 +726,18 @@ impl LocalDestination { } #[cfg(not(any(windows, target_os = "openbsd")))] + /// Set extended attributes for `item` (relative to the base path) + /// + /// # Arguments + /// + /// * `item` - The item to set the extended attributes for + /// * `extended_attributes` - The extended attributes to set + /// + /// # Errors + /// + /// * [`LocalErrorKind::ListingXattrsFailed`] - If listing the extended attributes failed. + /// * [`LocalErrorKind::GettingXattrFailed`] - If getting an extended attribute failed. + /// * [`LocalErrorKind::SettingXattrFailed`] - If setting an extended attribute failed. pub fn set_extended_attributes( &self, item: impl AsRef, @@ -457,7 +794,24 @@ impl LocalDestination { Ok(()) } - // set_length sets the length of the given file. If it doesn't exist, create a new (empty) one with given length + /// Set length of `item` (relative to the base path) + /// + /// # Arguments + /// + /// * `item` - The item to set the length for + /// * `size` - The size to set the length to + /// + /// # Errors + /// + /// * [`LocalErrorKind::FileDoesNotHaveParent`] - If the file does not have a parent. + /// * [`LocalErrorKind::DirectoryCreationFailed`] - If the directory could not be created. + /// * [`LocalErrorKind::OpeningFileFailed`] - If the file could not be opened. + /// * [`LocalErrorKind::SettingFileLengthFailed`] - If the length of the file could not be set. + /// + /// # Notes + /// + /// If the file exists, truncate it to the given length. (TODO: check if this is correct) + /// If it doesn't exist, create a new (empty) one with given length. pub fn set_length(&self, item: impl AsRef, size: u64) -> RusticResult<()> { let filename = self.path(item); let dir = filename @@ -477,11 +831,24 @@ impl LocalDestination { #[cfg(windows)] // TODO: Windows support + /// Create a special file (relative to the base path) pub fn create_special(&self, _item: impl AsRef, _node: &Node) -> RusticResult<()> { Ok(()) } #[cfg(not(windows))] + /// Create a special file (relative to the base path) + /// + /// # Arguments + /// + /// * `item` - The item to create + /// * `node` - The node to get the type from + /// + /// # Errors + /// + /// * [`LocalErrorKind::SymlinkingFailed`] - If the symlink could not be created. + /// * [`LocalErrorKind::FromTryIntError`] - If the device could not be converted to the correct type. + /// * [`LocalErrorKind::FromErrnoError`] - If the device could not be created. pub fn create_special(&self, item: impl AsRef, node: &Node) -> RusticResult<()> { let filename = self.path(item); @@ -535,6 +902,20 @@ impl LocalDestination { Ok(()) } + /// Read the given item (relative to the base path) + /// + /// # Arguments + /// + /// * `item` - The item to read + /// * `offset` - The offset to read from + /// * `length` - The length to read + /// + /// # Errors + /// + /// * [`LocalErrorKind::OpeningFileFailed`] - If the file could not be opened. + /// * [`LocalErrorKind::CouldNotSeekToPositionInFile`] - If the file could not be seeked to the given position. + /// * [`LocalErrorKind::FromTryIntError`] - If the length of the file could not be converted to u32. + /// * [`LocalErrorKind::ReadingExactLengthOfFileFailed`] - If the length of the file could not be read. pub fn read_at(&self, item: impl AsRef, offset: u64, length: u64) -> RusticResult { let filename = self.path(item); let mut file = File::open(filename).map_err(LocalErrorKind::OpeningFileFailed)?; @@ -547,6 +928,17 @@ impl LocalDestination { Ok(vec.into()) } + /// Check if a matching file exists. + /// + /// # Arguments + /// + /// * `item` - The item to check + /// * `size` - The size to check + /// + /// # Returns + /// + /// If a file exists and size matches, this returns a `File` open for reading. + /// In all other cases, returns `None` pub fn get_matching_file(&self, item: impl AsRef, size: u64) -> Option { let filename = self.path(item); fs::symlink_metadata(&filename).map_or_else( @@ -561,6 +953,23 @@ impl LocalDestination { ) } + /// Write `data` to given item (relative to the base path) at `offset` + /// + /// # Arguments + /// + /// * `item` - The item to write to + /// * `offset` - The offset to write at + /// * `data` - The data to write + /// + /// # Errors + /// + /// * [`LocalErrorKind::OpeningFileFailed`] - If the file could not be opened. + /// * [`LocalErrorKind::CouldNotSeekToPositionInFile`] - If the file could not be seeked to the given position. + /// * [`LocalErrorKind::CouldNotWriteToBuffer`] - If the bytes could not be written to the file. + /// + /// # Notes + /// + /// This will create the file if it doesn't exist. pub fn write_at(&self, item: impl AsRef, offset: u64, data: &[u8]) -> RusticResult<()> { let filename = self.path(item); let mut file = fs::OpenOptions::new() diff --git a/crates/rustic_core/src/backend/node.rs b/crates/rustic_core/src/backend/node.rs index 77975801e..0eee30cad 100644 --- a/crates/rustic_core/src/backend/node.rs +++ b/crates/rustic_core/src/backend/node.rs @@ -15,7 +15,7 @@ use std::os::unix::ffi::OsStrExt; use crate::RusticResult; use chrono::{DateTime, Local}; -use derive_more::{Constructor, IsVariant}; +use derive_more::Constructor; use serde::{Deserialize, Deserializer, Serialize}; use serde_aux::prelude::*; use serde_with::{ @@ -30,44 +30,83 @@ use crate::error::NodeErrorKind; use crate::id::Id; #[derive(Default, Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Constructor)] +/// A node within the tree hierarchy pub struct Node { + /// Name of the node: filename or dirname. + /// + /// # Warning + /// + /// This contains an escaped variant of the name in order to handle non-unicode filenames. + /// Don't access this field directly, use the [`Node::name()`] method instead! pub name: String, #[serde(flatten)] + /// Information about node type pub node_type: NodeType, #[serde(flatten)] + /// Node Metadata pub meta: Metadata, #[serde(default, deserialize_with = "deserialize_default_from_null")] + /// Contents of the Node + /// + /// # Note + /// + /// This should be only set for regular files. pub content: Option>, #[serde(default, skip_serializing_if = "Option::is_none")] + /// Subtree of the Node. + /// + /// # Note + /// + /// This should be only set for directories. (TODO: Check if this is correct) pub subtree: Option, } #[serde_as] -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, IsVariant)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(tag = "type", rename_all = "lowercase")] +/// Types a [`Node`] can have with type-specific additional information pub enum NodeType { + /// Node is a regular file File, + /// Node is a directory Dir, + /// Node is a symlink Symlink { + /// The target of the symlink + /// + /// # Warning + /// + /// This contains the target only if it is a valid unicode target. + /// Dont't access this field directly, use the [`NodeType::to_link()`] method instead! linktarget: String, #[serde_as(as = "Option")] #[serde(default, skip_serializing_if = "Option::is_none")] + /// The raw link target saved as bytes. + /// + /// This is only filled (and mandatory) if the link target is non-unicode. linktarget_raw: Option>, }, + /// Node is a block device file Dev { #[serde(default)] + /// Device id device: u64, }, + /// Node is a char device file Chardev { #[serde(default)] + /// Device id device: u64, }, + /// Node is a fifo Fifo, + /// Node is a socket Socket, } impl NodeType { #[cfg(not(windows))] + /// Get a [`NodeType`] from a linktarget path pub fn from_link(target: &Path) -> Self { let (linktarget, linktarget_raw) = target.to_str().map_or_else( || { @@ -87,6 +126,7 @@ impl NodeType { #[cfg(windows)] // Windows doen't support non-unicode link targets, so we assume unicode here. // TODO: Test and check this! + /// Get a [`NodeType`] from a linktarget path pub fn from_link(target: &Path) -> Self { Self::Symlink { linktarget: target.as_os_str().to_string_lossy().to_string(), @@ -95,6 +135,7 @@ impl NodeType { } // Must be only called on NodeType::Symlink! + /// Get the link path from a `NodeType::Symlink`. #[cfg(not(windows))] pub fn to_link(&self) -> &Path { match self { @@ -109,7 +150,16 @@ impl NodeType { } } - // Must be only called on NodeType::Symlink! + /// Convert a `NodeType::Symlink` to a `Path`. + /// + /// # Warning + /// + /// Must be only called on `NodeType::Symlink`! + /// + /// # Panics + /// + /// * If called on a non-symlink node + /// * If the link target is not valid unicode // TODO: Implement non-unicode link targets correctly for windows #[cfg(windows)] pub fn to_link(&self) -> &Path { @@ -126,29 +176,58 @@ impl Default for NodeType { } } +/// Metadata of a [`Node`] #[serde_with::apply( Option => #[serde(default, skip_serializing_if = "Option::is_none")], u64 => #[serde(default, skip_serializing_if = "is_default")], )] #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq)] pub struct Metadata { + /// Unix file mode pub mode: Option, + /// Unix mtime (last modification time) pub mtime: Option>, + /// Unix atime (last access time) pub atime: Option>, + /// Unix ctime (last status change time) pub ctime: Option>, + /// Unix uid (user id) pub uid: Option, + /// Unix gid (group id) pub gid: Option, + /// Unix user name pub user: Option, + /// Unix group name pub group: Option, + /// Unix inode number pub inode: u64, + /// Unix device id pub device_id: u64, + /// Size of the node pub size: u64, + /// Number of hardlinks to this node pub links: u64, + /// Extended attributes of the node #[serde(default, skip_serializing_if = "Vec::is_empty")] pub extended_attributes: Vec, } // Deserialize a Base64-encoded value into Vec. +// +// # Arguments +// +// * `deserializer` - The deserializer to use. +// +// # Errors +// +// If the value is not a valid Base64-encoded value. +// +// # Returns +// +// The deserialized value. +// +// # Note +// // Handles '"value" = null' by first deserializing into a Option. fn deserialize_value<'de, D>(deserializer: D) -> Result, D::Error> where @@ -158,9 +237,12 @@ where Ok(value.unwrap_or_default()) } +/// Extended attribute of a [`Node`] #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct ExtendedAttribute { + /// Name of the extended attribute pub(crate) name: String, + /// Value of the extended attribute #[serde( serialize_with = "Base64::::serialize_as", deserialize_with = "deserialize_value" @@ -173,8 +255,19 @@ pub(crate) fn is_default(t: &T) -> bool { } impl Node { + /// Create a new [`Node`] with the given name, type and metadata + /// + /// # Arguments + /// + /// * `name` - Name of the node + /// * `node_type` - Type of the node + /// * `meta` - Metadata of the node + /// + /// # Returns + /// + /// The created [`Node`] #[must_use] - pub fn new_node(name: &OsStr, node_type: NodeType, meta: Metadata) -> Self { + pub(crate) fn new_node(name: &OsStr, node_type: NodeType, meta: Metadata) -> Self { Self { name: escape_filename(name), node_type, @@ -184,16 +277,25 @@ impl Node { } } #[must_use] + /// Evaluates if this node is a directory pub const fn is_dir(&self) -> bool { matches!(self.node_type, NodeType::Dir) } #[must_use] + /// Evaluates if this node is a symlink + pub const fn is_symlink(&self) -> bool { + matches!(self.node_type, NodeType::Symlink { .. }) + } + + #[must_use] + /// Evaluates if this node is a regular file pub const fn is_file(&self) -> bool { matches!(self.node_type, NodeType::File) } #[must_use] + /// Evaluates if this node is a special file pub const fn is_special(&self) -> bool { matches!( self.node_type, @@ -206,27 +308,54 @@ impl Node { } #[must_use] + /// Get the node name as `OsString`, handling name ecaping + /// + /// # Panics + /// + /// If the name is not valid unicode pub fn name(&self) -> OsString { unescape_filename(&self.name).unwrap_or_else(|_| OsString::from_str(&self.name).unwrap()) } } -pub fn latest_node(n1: &Node, n2: &Node) -> Ordering { +/// An ordering function returning the latest node by mtime +/// +/// # Arguments +/// +/// * `n1` - First node +/// * `n2` - Second node +/// +/// # Returns +/// +/// The ordering of the two nodes +pub fn last_modified_node(n1: &Node, n2: &Node) -> Ordering { n1.meta.mtime.cmp(&n2.meta.mtime) } +// TODO: Should be probably called `_lossy` // TODO(Windows): This is not able to handle non-unicode filenames and // doesn't treat filenames which need and escape (like `\`, `"`, ...) correctly #[cfg(windows)] fn escape_filename(name: &OsStr) -> String { name.to_string_lossy().to_string() } + +/// Unescape a filename +/// +/// # Arguments +/// +/// * `s` - The escaped filename #[cfg(windows)] fn unescape_filename(s: &str) -> Result { OsString::from_str(s) } #[cfg(not(windows))] +/// Escape a filename +/// +/// # Arguments +/// +/// * `name` - The filename to escape // This escapes the filename in a way that *should* be compatible to golangs // stconv.Quote, see https://pkg.go.dev/strconv#Quote // However, so far there was no specification what Quote really does, so this @@ -280,6 +409,11 @@ fn escape_filename(name: &OsStr) -> String { } #[cfg(not(windows))] +/// Unescape a filename +/// +/// # Arguments +/// +/// * `s` - The escaped filename // inspired by the enquote crate fn unescape_filename(s: &str) -> RusticResult { let mut chars = s.chars(); diff --git a/crates/rustic_core/src/backend/rclone.rs b/crates/rustic_core/src/backend/rclone.rs index ff1eebfb1..3d12a0149 100644 --- a/crates/rustic_core/src/backend/rclone.rs +++ b/crates/rustic_core/src/backend/rclone.rs @@ -13,31 +13,51 @@ use rand::{ }; use crate::{ - backend::{rest::RestBackend, FileType, Id, ReadBackend, WriteBackend}, + backend::{rest::RestBackend, FileType, ReadBackend, WriteBackend}, error::{ProviderErrorKind, RusticResult}, + id::Id, }; +pub(super) mod constants { + /// The string to search for in the rclone output. + pub(super) const SEARCHSTRING: &str = "Serving restic REST API on "; +} + +/// `ChildToKill` is a wrapper around a `Child` process that kills the child when it is dropped. #[derive(Debug)] struct ChildToKill(Child); impl Drop for ChildToKill { + /// Kill the child process. fn drop(&mut self) { debug!("killing rclone."); self.0.kill().unwrap(); } } -pub(super) mod constants { - pub(super) const SEARCHSTRING: &str = "Serving restic REST API on "; -} - +/// `RcloneBackend` is a backend that uses rclone to access a remote backend. #[derive(Clone, Debug)] pub struct RcloneBackend { + /// The REST backend. rest: RestBackend, + /// The url of the backend. url: String, + /// The child data contains the child process and is used to kill the child process when the backend is dropped. _child_data: Arc, } +/// Get the rclone version. +/// +/// # Errors +/// +/// * [`ProviderErrorKind::FromIoError`] - If the rclone version could not be determined. +/// * [`ProviderErrorKind::FromUtf8Error`] - If the rclone version could not be determined. +/// * [`ProviderErrorKind::NoOutputForRcloneVersion`] - If the rclone version could not be determined. +/// * [`ProviderErrorKind::FromParseIntError`] - If the rclone version could not be determined. +/// +/// # Returns +/// +/// The rclone version as a tuple of (major, minor, patch). fn rclone_version() -> RusticResult<(i32, i32, i32)> { let rclone_version_output = Command::new("rclone") .arg("version") @@ -65,6 +85,20 @@ fn rclone_version() -> RusticResult<(i32, i32, i32)> { } impl RcloneBackend { + /// Create a new [`RcloneBackend`] from a given url. + /// + /// # Arguments + /// + /// * `url` - The url to create the [`RcloneBackend`] from. + /// + /// # Errors + /// + /// * [`ProviderErrorKind::FromIoError`] - If the rclone version could not be determined. + /// * [`ProviderErrorKind::NoStdOutForRclone`] - If the rclone version could not be determined. + /// * [`ProviderErrorKind::RCloneExitWithBadStatus`] - If rclone exited with a bad status. + /// * [`ProviderErrorKind::UrlNotStartingWithHttp`] - If the URL does not start with `http`. + /// * [`RestErrorKind::UrlParsingFailed`] - If the URL could not be parsed. + /// * [`RestErrorKind::BuildingClientFailed`] - If the client could not be built. pub fn new(url: &str) -> RusticResult { match rclone_version() { Ok((major, minor, patch)) => { @@ -74,6 +108,8 @@ impl RcloneBackend { .then(patch.cmp(&2)) .is_lt() { + // TODO: This should be an error, and explicitly agreed to with a flag passed to `rustic`, + // check #812 for details // for rclone < 1.52.2 setting user/password via env variable doesn't work. This means // we are setting up an rclone without authentication which is a security issue! // (however, it still works, so we give a warning) @@ -154,24 +190,75 @@ impl RcloneBackend { } impl ReadBackend for RcloneBackend { + /// Returns the location of the backend. fn location(&self) -> String { let mut location = "rclone:".to_string(); location.push_str(&self.url); location } + /// Sets an option of the backend. + /// + /// # Arguments + /// + /// * `option` - The option to set. + /// * `value` - The value to set the option to. + /// + /// # Errors + /// + /// If the option is not supported. fn set_option(&mut self, option: &str, value: &str) -> RusticResult<()> { self.rest.set_option(option, value) } + /// Returns the size of the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// + /// # Errors + /// + /// If the size could not be determined. fn list_with_size(&self, tpe: FileType) -> RusticResult> { self.rest.list_with_size(tpe) } + /// Reads full data of the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// + /// # Errors + /// + /// * [`RestErrorKind::BackoffError`] - If the backoff failed. + /// + /// # Returns + /// + /// The data read. fn read_full(&self, tpe: FileType, id: &Id) -> RusticResult { self.rest.read_full(tpe, id) } + /// Reads partial data of the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// * `cacheable` - Whether the data should be cached. + /// * `offset` - The offset to read from. + /// * `length` - The length to read. + /// + /// # Errors + /// + /// * [`RestErrorKind::BackoffError`] - If the backoff failed. + /// + /// # Returns + /// + /// The data read. fn read_partial( &self, tpe: FileType, @@ -185,14 +272,42 @@ impl ReadBackend for RcloneBackend { } impl WriteBackend for RcloneBackend { + /// Creates a new file. + /// + /// # Errors + /// + /// * [`RestErrorKind::BackoffError`] - If the backoff failed. fn create(&self) -> RusticResult<()> { self.rest.create() } + /// Writes bytes to the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// * `cacheable` - Whether the data should be cached. + /// * `buf` - The data to write. + /// + /// # Errors + /// + /// * [`RestErrorKind::BackoffError`] - If the backoff failed. fn write_bytes(&self, tpe: FileType, id: &Id, cacheable: bool, buf: Bytes) -> RusticResult<()> { self.rest.write_bytes(tpe, id, cacheable, buf) } + /// Removes the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// * `cacheable` - Whether the file is cacheable. + /// + /// # Errors + /// + /// * [`RestErrorKind::BackoffError`] - If the backoff failed. fn remove(&self, tpe: FileType, id: &Id, cacheable: bool) -> RusticResult<()> { self.rest.remove(tpe, id, cacheable) } diff --git a/crates/rustic_core/src/backend/rest.rs b/crates/rustic_core/src/backend/rest.rs index 60e2c9b51..4e43bfdd6 100644 --- a/crates/rustic_core/src/backend/rest.rs +++ b/crates/rustic_core/src/backend/rest.rs @@ -18,16 +18,26 @@ use crate::{ }; mod consts { + /// Default number of retries pub(super) const DEFAULT_RETRY: usize = 5; } // trait CheckError to add user-defined method check_error on Response pub(crate) trait CheckError { + /// Check reqwest Response for error and treat errors as permanent or transient fn check_error(self) -> Result>; } impl CheckError for Response { - // Check reqwest Response for error and treat errors as permanent or transient + /// Check reqwest Response for error and treat errors as permanent or transient + /// + /// # Errors + /// + /// If the response is an error, it will return an error of type Error + /// + /// # Returns + /// + /// The response if it is not an error fn check_error(self) -> Result> { match self.error_for_status() { Ok(t) => Ok(t), @@ -41,10 +51,14 @@ impl CheckError for Response { } } +/// A backoff implementation that limits the number of retries #[derive(Clone, Debug)] struct LimitRetryBackoff { + /// The maximum number of retries max_retries: usize, + /// The current number of retries retries: usize, + /// The exponential backoff exp: ExponentialBackoff, } @@ -61,6 +75,11 @@ impl Default for LimitRetryBackoff { } impl Backoff for LimitRetryBackoff { + /// Returns the next backoff duration. + /// + /// # Notes + /// + /// If the number of retries exceeds the maximum number of retries, it returns None. fn next_backoff(&mut self) -> Option { self.retries += 1; if self.retries > self.max_retries { @@ -70,24 +89,45 @@ impl Backoff for LimitRetryBackoff { } } + /// Resets the backoff to the initial state. fn reset(&mut self) { self.retries = 0; self.exp.reset(); } } +/// A backend implementation that uses REST to access the backend. #[derive(Clone, Debug)] pub struct RestBackend { + /// The url of the backend. url: Url, + /// The client to use. client: Client, + /// The backoff implementation to use. backoff: LimitRetryBackoff, } +/// Notify function for backoff in case of error +/// +/// # Arguments +/// +/// * `err` - The error that occurred +/// * `duration` - The duration of the backoff fn notify(err: reqwest::Error, duration: Duration) { warn!("Error {err} at {duration:?}, retrying"); } impl RestBackend { + /// Create a new [`RestBackend`] from a given url. + /// + /// # Arguments + /// + /// * `url` - The url to create the [`RestBackend`] from. + /// + /// # Errors + /// + /// * [`RestErrorKind::UrlParsingFailed`] - If the url could not be parsed. + /// * [`RestErrorKind::BuildingClientFailed`] - If the client could not be built. pub fn new(url: &str) -> RusticResult { let url = if url.ends_with('/') { Url::parse(url).map_err(RestErrorKind::UrlParsingFailed)? @@ -114,12 +154,22 @@ impl RestBackend { }) } + /// Returns the url for a given type and id. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// + /// # Errors + /// + /// If the url could not be created. fn url(&self, tpe: FileType, id: &Id) -> RusticResult { let id_path = if tpe == FileType::Config { "config".to_string() } else { let hex_id = id.to_hex(); - let mut path = tpe.to_string(); + let mut path = tpe.dirname().to_string(); path.push('/'); path.push_str(&hex_id); path @@ -132,6 +182,7 @@ impl RestBackend { } impl ReadBackend for RestBackend { + /// Returns the location of the backend. fn location(&self) -> String { let mut location = "rest:".to_string(); let mut url = self.url.clone(); @@ -142,6 +193,22 @@ impl ReadBackend for RestBackend { location } + /// Sets an option of the backend. + /// + /// # Arguments + /// + /// * `option` - The option to set. + /// * `value` - The value to set the option to. + /// + /// # Errors + /// + /// If the option is not supported. + /// + /// # Notes + /// + /// Currently supported options: + /// * `retry` - The number of retries to use for transient errors. Default is 5. Set to 0 to disable retries. + /// * `timeout` - The timeout to use for requests. Default is 10 minutes. Format is described in [humantime](https://docs.rs/humantime/2.1.0/humantime/fn.parse_duration.html). fn set_option(&mut self, option: &str, value: &str) -> RusticResult<()> { if option == "retry" { let max_retries = match value { @@ -164,6 +231,25 @@ impl ReadBackend for RestBackend { Ok(()) } + /// Returns a list of all files of a given type with their size. + /// + /// # Arguments + /// + /// * `tpe` - The type of the files to list. + /// + /// # Errors + /// + /// * [`RestErrorKind::JoiningUrlFailed`] - If the url could not be created. + /// * [`RestErrorKind::BackoffError`] - If the backoff failed. + /// * [`IdErrorKind::HexError`] - If the string is not a valid hexadecimal string + /// + /// # Notes + /// + /// The returned list is sorted by id. + /// + /// # Returns + /// + /// A vector of tuples containing the id and size of the files. fn list_with_size(&self, tpe: FileType) -> RusticResult> { trace!("listing tpe: {tpe:?}"); let url = if tpe == FileType::Config { @@ -171,7 +257,7 @@ impl ReadBackend for RestBackend { .join("config") .map_err(RestErrorKind::JoiningUrlFailed)? } else { - let mut path = tpe.to_string(); + let mut path = tpe.dirname().to_string(); path.push('/'); self.url .join(&path) @@ -221,6 +307,17 @@ impl ReadBackend for RestBackend { } } + /// Returns the content of a file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// + /// # Errors + /// + /// * [`reqwest::Error`] - If the request failed. + /// * [`RestErrorKind::BackoffError`] - If the backoff failed. fn read_full(&self, tpe: FileType, id: &Id) -> RusticResult { trace!("reading tpe: {tpe:?}, id: {id}"); let url = self.url(tpe, id)?; @@ -239,6 +336,19 @@ impl ReadBackend for RestBackend { .map_err(RestErrorKind::BackoffError)?) } + /// Returns a part of the content of a file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// * `cacheable` - Whether the file is cacheable. + /// * `offset` - The offset to read from. + /// * `length` - The length to read. + /// + /// # Errors + /// + /// * [`RestErrorKind::BackoffError`] - If the backoff failed. fn read_partial( &self, tpe: FileType, @@ -269,6 +379,11 @@ impl ReadBackend for RestBackend { } impl WriteBackend for RestBackend { + /// Creates a new file. + /// + /// # Errors + /// + /// * [`RestErrorKind::BackoffError`] - If the backoff failed. fn create(&self) -> RusticResult<()> { let url = self .url @@ -285,6 +400,19 @@ impl WriteBackend for RestBackend { .map_err(RestErrorKind::BackoffError)?) } + /// Writes bytes to the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// * `cacheable` - Whether the file is cacheable. + /// * `buf` - The bytes to write. + /// + /// # Errors + /// + /// * [`RestErrorKind::BackoffError`] - If the backoff failed. + // TODO: If the file is not cacheable, the bytes could be written to a temporary file and then moved to the final location. fn write_bytes( &self, tpe: FileType, @@ -306,6 +434,17 @@ impl WriteBackend for RestBackend { .map_err(RestErrorKind::BackoffError)?) } + /// Removes the given file. + /// + /// # Arguments + /// + /// * `tpe` - The type of the file. + /// * `id` - The id of the file. + /// * `cacheable` - Whether the file is cacheable. + /// + /// # Errors + /// + /// * [`RestErrorKind::BackoffError`] - If the backoff failed. fn remove(&self, tpe: FileType, id: &Id, _cacheable: bool) -> RusticResult<()> { trace!("removing tpe: {:?}, id: {}", &tpe, &id); let url = self.url(tpe, id)?; diff --git a/crates/rustic_core/src/backend/stdin.rs b/crates/rustic_core/src/backend/stdin.rs index 68839116c..5467016ca 100644 --- a/crates/rustic_core/src/backend/stdin.rs +++ b/crates/rustic_core/src/backend/stdin.rs @@ -4,16 +4,20 @@ use crate::{ backend::{ node::Metadata, node::Node, node::NodeType, ReadSource, ReadSourceEntry, ReadSourceOpen, }, - RusticResult, + error::RusticResult, }; +/// The `StdinSource` is a `ReadSource` for stdin. #[derive(Debug)] pub struct StdinSource { + /// Whether we have already yielded the stdin entry. finished: bool, + /// The path of the stdin entry. path: PathBuf, } impl StdinSource { + /// Creates a new `StdinSource`. pub const fn new(path: PathBuf) -> RusticResult { Ok(Self { finished: false, @@ -22,25 +26,32 @@ impl StdinSource { } } +/// The `OpenStdin` is a `ReadSourceOpen` for stdin. #[derive(Debug, Copy, Clone)] pub struct OpenStdin(); impl ReadSourceOpen for OpenStdin { + /// The reader type. type Reader = std::io::Stdin; + /// Opens stdin. fn open(self) -> RusticResult { Ok(stdin()) } } impl ReadSource for StdinSource { + /// The open type. type Open = OpenStdin; + /// The iterator type. type Iter = Self; + /// Returns the size of the source. fn size(&self) -> RusticResult> { Ok(None) } + /// Returns an iterator over the source. fn entries(self) -> Self::Iter { self } diff --git a/crates/rustic_core/src/blob.rs b/crates/rustic_core/src/blob.rs index 347b4422f..27ecc5105 100644 --- a/crates/rustic_core/src/blob.rs +++ b/crates/rustic_core/src/blob.rs @@ -1,27 +1,36 @@ pub(crate) mod packer; pub(crate) mod tree; -use std::ops::Add; - use derive_more::Constructor; use enum_map::{Enum, EnumMap}; use serde::{Deserialize, Serialize}; use crate::id::Id; +/// All [`BlobType`]s which are supported by the repository +pub const ALL_BLOB_TYPES: [BlobType; 2] = [BlobType::Tree, BlobType::Data]; + #[derive( Serialize, Deserialize, Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Enum, )] +/// The type a `blob` or a `packfile` can have pub enum BlobType { #[serde(rename = "tree")] + /// This is a tree blob Tree, #[serde(rename = "data")] + /// This is a data blob Data, } impl BlobType { + /// Defines the cacheability of a [`BlobType`] + /// + /// # Returns + /// + /// `true` if the [`BlobType`] is cacheable, `false` otherwise #[must_use] - pub const fn is_cacheable(self) -> bool { + pub(crate) const fn is_cacheable(self) -> bool { match self { Self::Tree => true, Self::Data => false, @@ -33,11 +42,20 @@ pub type BlobTypeMap = EnumMap; /// Initialize is a new trait to define the method init() for a [`BlobTypeMap`] pub trait Initialize { - /// initialize a [`BlobTypeMap`] by processing a given function for each [`BlobType`] + /// Initialize a [`BlobTypeMap`] by processing a given function for each [`BlobType`] fn init T>(init: F) -> BlobTypeMap; } impl Initialize for BlobTypeMap { + /// Initialize a [`BlobTypeMap`] by processing a given function for each [`BlobType`] + /// + /// # Arguments + /// + /// * `init` - The function to process for each [`BlobType`] + /// + /// # Returns + /// + /// A [`BlobTypeMap`] with the result of the function for each [`BlobType`] fn init T>(mut init: F) -> Self { let mut btm = Self::default(); for i in 0..BlobType::LENGTH { @@ -48,19 +66,17 @@ impl Initialize for BlobTypeMap { } } -/// Sum is a new trait to define the method sum() for a [`BlobTypeMap`] -pub trait Sum { - fn sum(&self) -> T; -} - -impl> Sum for BlobTypeMap { - fn sum(&self) -> T { - self.values().fold(T::default(), |acc, x| acc + *x) - } -} - +/// A `Blob` is a file that is stored in the backend. +/// +/// It can be a `tree` or a `data` blob. +/// +/// A `tree` blob is a file that contains a list of other blobs. +/// A `data` blob is a file that contains the actual data. #[derive(Debug, PartialEq, Eq, Clone, Constructor)] pub(crate) struct Blob { + /// The type of the blob tpe: BlobType, + + /// The id of the blob id: Id, } diff --git a/crates/rustic_core/src/blob/packer.rs b/crates/rustic_core/src/blob/packer.rs index 0e00b534b..89f5b814f 100644 --- a/crates/rustic_core/src/blob/packer.rs +++ b/crates/rustic_core/src/blob/packer.rs @@ -17,36 +17,59 @@ use crate::{ blob::BlobType, crypto::{hasher::hash, CryptoKey}, error::PackerErrorKind, + error::RusticResult, id::Id, index::indexer::SharedIndexer, repofile::{ configfile::ConfigFile, indexfile::IndexBlob, indexfile::IndexPack, packfile::PackHeaderLength, packfile::PackHeaderRef, snapshotfile::SnapshotSummary, }, - RusticResult, }; + pub(super) mod constants { use std::time::Duration; + /// Kilobyte in bytes pub(super) const KB: u32 = 1024; + /// Megabyte in bytes pub(super) const MB: u32 = 1024 * KB; - // the absolute maximum size of a pack: including headers it should not exceed 4 GB + /// The absolute maximum size of a pack: including headers it should not exceed 4 GB pub(super) const MAX_SIZE: u32 = 4076 * MB; + /// The maximum number of blobs in a pack pub(super) const MAX_COUNT: u32 = 10_000; + /// The maximum age of a pack pub(super) const MAX_AGE: Duration = Duration::from_secs(300); } +/// The pack sizer is responsible for computing the size of the pack file. #[derive(Debug, Clone, Copy)] pub struct PackSizer { + /// The default size of a pack file. default_size: u32, + /// The grow factor of a pack file. grow_factor: u32, + /// The size limit of a pack file. size_limit: u32, + /// The current size of a pack file. current_size: u64, + /// The minimum pack size tolerance in percent before a repack is triggered. min_packsize_tolerate_percent: u32, + /// The maximum pack size tolerance in percent before a repack is triggered. max_packsize_tolerate_percent: u32, } impl PackSizer { + /// Creates a new `PackSizer` from a config file. + /// + /// # Arguments + /// + /// * `config` - The config file. + /// * `blob_type` - The blob type. + /// * `current_size` - The current size of the pack file. + /// + /// # Returns + /// + /// A new `PackSizer`. #[must_use] pub fn from_config(config: &ConfigFile, blob_type: BlobType, current_size: u64) -> Self { let (default_size, grow_factor, size_limit) = config.packsize(blob_type); @@ -62,6 +85,7 @@ impl PackSizer { } } + /// Computes the size of the pack file. #[must_use] pub fn pack_size(&self) -> u32 { (self.current_size.integer_sqrt() as u32 * self.grow_factor + self.default_size) @@ -69,7 +93,11 @@ impl PackSizer { .min(constants::MAX_SIZE) } - // returns whether the given size is not too small or too large + /// Evaluates whether the given size is not too small or too large + /// + /// # Arguments + /// + /// * `size` - The size to check #[must_use] pub fn size_ok(&self, size: u32) -> bool { let target_size = self.pack_size(); @@ -80,23 +108,60 @@ impl PackSizer { <= u64::from(target_size) * u64::from(self.max_packsize_tolerate_percent) } + /// Adds the given size to the current size. + /// + /// # Arguments + /// + /// * `added` - The size to add + /// + /// # Panics + /// + /// If the size is too large fn add_size(&mut self, added: u32) { self.current_size += u64::from(added); } } +/// The `Packer` is responsible for packing blobs into pack files. +/// +/// # Type Parameters +/// +/// * `BE` - The backend type. #[allow(missing_debug_implementations)] #[derive(Clone)] pub struct Packer { + /// The raw packer wrapped in an Arc and RwLock. // This is a hack: raw_packer and indexer are only used in the add_raw() method. // TODO: Refactor as actor, like the other add() methods raw_packer: Arc>>, + /// The shared indexer containing the backend. indexer: SharedIndexer, + /// The sender to send blobs to the raw packer. sender: Sender<(Bytes, Id, Option)>, + /// The receiver to receive the status from the raw packer. finish: Receiver>, } impl Packer { + /// Creates a new `Packer`. + /// + /// # Type Parameters + /// + /// * `BE` - The backend type. + /// + /// # Arguments + /// + /// * `be` - The backend to write to. + /// * `blob_type` - The blob type. + /// * `indexer` - The indexer to write to. + /// * `config` - The config file. + /// * `total_size` - The total size of the pack file. + /// + /// # Errors + /// + /// * [`PackerErrorKind::ZstdError`] - If the zstd compression level is invalid. + /// * [`PackerErrorKind::SendingCrossbeamMessageFailed`] - If sending the message to the raw packer fails. + /// * [`PackerErrorKind::IntConversionFailed`] - If converting the data length to u64 fails pub fn new( be: BE, blob_type: BlobType, @@ -172,13 +237,32 @@ impl Packer { Ok(packer) } - /// adds the blob to the packfile + /// Adds the blob to the packfile + /// + /// # Arguments + /// + /// * `data` - The blob data + /// * `id` - The blob id + /// + /// # Errors + /// + /// * [`PackerErrorKind::SendingCrossbeamMessageFailed`] - If sending the message to the raw packer fails. pub fn add(&self, data: Bytes, id: Id) -> RusticResult<()> { // compute size limit based on total size and size bounds self.add_with_sizelimit(data, id, None) } - /// adds the blob to the packfile, allows specifying a size limit for the pack file + /// Adds the blob to the packfile, allows specifying a size limit for the pack file + /// + /// # Arguments + /// + /// * `data` - The blob data + /// * `id` - The blob id + /// * `size_limit` - The size limit for the pack file + /// + /// # Errors + /// + /// * [`PackerErrorKind::SendingCrossbeamMessageFailed`] - If sending the message to the raw packer fails. fn add_with_sizelimit(&self, data: Bytes, id: Id, size_limit: Option) -> RusticResult<()> { self.sender .send((data, id, size_limit)) @@ -186,7 +270,20 @@ impl Packer { Ok(()) } - /// adds the already encrypted (and maybe compressed) blob to the packfile + /// Adds the already encrypted (and maybe compressed) blob to the packfile + /// + /// # Arguments + /// + /// * `data` - The blob data + /// * `id` - The blob id + /// * `data_len` - The length of the blob data + /// * `uncompressed_length` - The length of the blob data before compression + /// * `size_limit` - The size limit for the pack file + /// + /// # Errors + /// + /// If the blob is already present in the index + /// If sending the message to the raw packer fails. fn add_raw( &self, data: &[u8], @@ -209,6 +306,11 @@ impl Packer { } } + /// Finalizes the packer and does cleanup + /// + /// # Panics + /// + /// If the channel could not be dropped pub fn finalize(self) -> RusticResult { // cancel channel drop(self.sender); @@ -217,14 +319,28 @@ impl Packer { } } +// TODO: add documentation! #[derive(Default, Debug, Clone, Copy)] pub struct PackerStats { + /// The number of blobs added blobs: u64, + /// The number of data blobs added data: u64, + /// The number of packed data blobs added data_packed: u64, } impl PackerStats { + /// Adds the stats to the summary + /// + /// # Arguments + /// + /// * `summary` - The summary to add to + /// * `tpe` - The blob type + /// + /// # Panics + /// + /// If the blob type is invalid pub fn apply(self, summary: &mut SnapshotSummary, tpe: BlobType) { summary.data_added += self.data; summary.data_added_packed += self.data_packed; @@ -243,21 +359,49 @@ impl PackerStats { } } +/// The `RawPacker` is responsible for packing blobs into pack files. +/// +/// # Type Parameters +/// +/// * `BE` - The backend type. #[allow(missing_debug_implementations, clippy::module_name_repetitions)] pub(crate) struct RawPacker { + /// The backend to write to. be: BE, + /// The blob type to pack. blob_type: BlobType, + /// The file to write to file: BytesMut, + /// The size of the file size: u32, + /// The number of blobs in the pack count: u32, + /// The time the pack was created created: SystemTime, + /// The index of the pack index: IndexPack, + /// The actor to write the pack file file_writer: Option, + /// The pack sizer pack_sizer: PackSizer, + /// The packer stats stats: PackerStats, } impl RawPacker { + /// Creates a new `RawPacker`. + /// + /// # Type Parameters + /// + /// * `BE` - The backend type. + /// + /// # Arguments + /// + /// * `be` - The backend to write to. + /// * `blob_type` - The blob type. + /// * `indexer` - The indexer to write to. + /// * `config` - The config file. + /// * `total_size` - The total size of the pack file. fn new( be: BE, blob_type: BlobType, @@ -291,12 +435,26 @@ impl RawPacker { } } + /// Saves the packfile and returns the stats + /// + /// # Errors + /// + /// If the packfile could not be saved fn finalize(&mut self) -> RusticResult { self.save()?; self.file_writer.take().unwrap().finalize()?; Ok(std::mem::take(&mut self.stats)) } + /// Writes the given data to the packfile. + /// + /// # Arguments + /// + /// * `data` - The data to write. + /// + /// # Returns + /// + /// The number of bytes written. fn write_data(&mut self, data: &[u8]) -> RusticResult { let len = data .len() @@ -307,7 +465,20 @@ impl RawPacker { Ok(len) } - // adds the already compressed/encrypted blob to the packfile without any check + /// Adds the already compressed/encrypted blob to the packfile without any check + /// + /// # Arguments + /// + /// * `data` - The blob data + /// * `id` - The blob id + /// * `data_len` - The length of the blob data + /// * `uncompressed_length` - The length of the blob data before compression + /// * `size_limit` - The size limit for the pack file + /// + /// # Errors + /// + /// * [`PackerErrorKind::IntConversionFailed`] - If converting the data length to u64 fails + /// * [`PackerErrorKind::CouldNotGetElapsedTimeFromSystemTime`] - If elapsed time could not be retrieved from system time fn add_raw( &mut self, data: &[u8], @@ -349,9 +520,14 @@ impl RawPacker { Ok(()) } - /// writes header and length of header to packfile + /// Writes header and length of header to packfile + /// + /// # Errors + /// + /// * [`PackerErrorKind::IntConversionFailed`] - If converting the header length to u32 fails + /// * [`PackFileErrorKind::WritingBinaryRepresentationFailed`] - If the header could not be written fn write_header(&mut self) -> RusticResult<()> { - // comput the pack header + // compute the pack header let data = PackHeaderRef::from_index_pack(&self.index).to_binary()?; // encrypt and write to pack file @@ -369,6 +545,16 @@ impl RawPacker { Ok(()) } + /// Saves the packfile + /// + /// # Errors + /// + /// If the header could not be written + /// + /// # Errors + /// + /// * [`PackerErrorKind::IntConversionFailed`] - If converting the header length to u32 fails + /// * [`PackFileErrorKind::WritingBinaryRepresentationFailed`] - If the header could not be written fn save(&mut self) -> RusticResult<()> { if self.size == 0 { return Ok(()); @@ -392,14 +578,22 @@ impl RawPacker { } } +// TODO: add documentation +/// # Type Parameters +/// +/// * `BE` - The backend type. #[derive(Clone)] pub(crate) struct FileWriterHandle { + /// The backend to write to. be: BE, + /// The shared indexer containing the backend. indexer: SharedIndexer, + /// Whether the file is cacheable. cacheable: bool, } impl FileWriterHandle { + // TODO: add documentation fn process(&self, load: (Bytes, Id, IndexPack)) -> RusticResult { let (file, id, mut index) = load; index.id = id; @@ -415,12 +609,26 @@ impl FileWriterHandle { } } +// TODO: add documentation pub(crate) struct Actor { + /// The sender to send blobs to the raw packer. sender: Sender<(Bytes, IndexPack)>, + /// The receiver to receive the status from the raw packer. finish: Receiver>, } impl Actor { + /// Creates a new `Actor`. + /// + /// # Type Parameters + /// + /// * `BE` - The backend type. + /// + /// # Arguments + /// + /// * `fwh` - The file writer handle. + /// * `queue_len` - The length of the queue. + /// * `par` - The number of parallel threads. fn new( fwh: FileWriterHandle, queue_len: usize, @@ -453,6 +661,15 @@ impl Actor { } } + /// Sends the given data to the actor. + /// + /// # Arguments + /// + /// * `load` - The data to send. + /// + /// # Errors + /// + /// If sending the message to the actor fails. fn send(&self, load: (Bytes, IndexPack)) -> RusticResult<()> { self.sender .send(load) @@ -460,6 +677,11 @@ impl Actor { Ok(()) } + /// Finalizes the actor and does cleanup + /// + /// # Panics + /// + /// If the receiver is not present fn finalize(self) -> RusticResult<()> { // cancel channel drop(self.sender); @@ -468,17 +690,42 @@ impl Actor { } } +/// The `Repacker` is responsible for repacking blobs into pack files. +/// +/// # Type Parameters +/// +/// * `BE` - The backend to read from. #[allow(missing_debug_implementations)] pub struct Repacker where BE: DecryptFullBackend, { + /// The backend to read from. be: BE, + /// The packer to write to. packer: Packer, + /// The size limit of the pack file. size_limit: u32, } impl Repacker { + /// Creates a new `Repacker`. + /// + /// # Type Parameters + /// + /// * `BE` - The backend to read from. + /// + /// # Arguments + /// + /// * `be` - The backend to read from. + /// * `blob_type` - The blob type. + /// * `indexer` - The indexer to write to. + /// * `config` - The config file. + /// * `total_size` - The total size of the pack file. + /// + /// # Errors + /// + /// If the Packer could not be created pub fn new( be: BE, blob_type: BlobType, @@ -495,6 +742,17 @@ impl Repacker { }) } + /// Adds the blob to the packfile without any check + /// + /// # Arguments + /// + /// * `pack_id` - The pack id + /// * `blob` - The blob to add + /// + /// # Errors + /// + /// If the blob could not be added + /// If reading the blob from the backend fails pub fn add_fast(&self, pack_id: &Id, blob: &IndexBlob) -> RusticResult<()> { let data = self.be.read_partial( FileType::Pack, @@ -513,6 +771,17 @@ impl Repacker { Ok(()) } + /// Adds the blob to the packfile + /// + /// # Arguments + /// + /// * `pack_id` - The pack id + /// * `blob` - The blob to add + /// + /// # Errors + /// + /// If the blob could not be added + /// If reading the blob from the backend fails pub fn add(&self, pack_id: &Id, blob: &IndexBlob) -> RusticResult<()> { let data = self.be.read_encrypted_partial( FileType::Pack, @@ -527,6 +796,7 @@ impl Repacker { Ok(()) } + /// Finalizes the repacker and returns the stats pub fn finalize(self) -> RusticResult { self.packer.finalize() } diff --git a/crates/rustic_core/src/blob/tree.rs b/crates/rustic_core/src/blob/tree.rs index 0c8ec2ff6..bdcaa8a09 100644 --- a/crates/rustic_core/src/blob/tree.rs +++ b/crates/rustic_core/src/blob/tree.rs @@ -8,6 +8,8 @@ use std::{ }; use crossbeam_channel::{bounded, unbounded, Receiver, Sender}; +use derivative::Derivative; +use derive_setters::Setters; use ignore::overrides::{Override, OverrideBuilder}; use ignore::Match; @@ -16,25 +18,33 @@ use serde::{Deserialize, Deserializer, Serialize}; use crate::{ backend::{node::Metadata, node::Node, node::NodeType}, crypto::hasher::hash, + error::RusticResult, error::TreeErrorKind, id::Id, index::IndexedBackend, + progress::Progress, repofile::snapshotfile::SnapshotSummary, - Progress, RusticResult, }; pub(super) mod constants { + /// The maximum number of trees that are loaded in parallel pub(super) const MAX_TREE_LOADER: usize = 4; } pub(crate) type TreeStreamItem = RusticResult<(PathBuf, Tree)>; +type NodeStreamItem = RusticResult<(PathBuf, Node)>; #[derive(Default, Serialize, Deserialize, Clone, Debug)] +/// A [`Tree`] is a list of [`Node`]s pub struct Tree { #[serde(deserialize_with = "deserialize_null_default")] + /// The nodes contained in the tree. + /// + /// This is usually sorted by `Node.name()`, i.e. by the node name as `OsString` pub nodes: Vec, } +/// Deserializes `Option` as `T::default()` if the value is `null` pub(crate) fn deserialize_null_default<'de, D, T>(deserializer: D) -> Result where T: Default + Deserialize<'de>, @@ -45,23 +55,49 @@ where } impl Tree { + /// Creates a new `Tree` with no nodes. #[must_use] - pub const fn new() -> Self { + pub(crate) const fn new() -> Self { Self { nodes: Vec::new() } } - pub fn add(&mut self, node: Node) { + /// Adds a node to the tree. + /// + /// # Arguments + /// + /// * `node` - The node to add. + pub(crate) fn add(&mut self, node: Node) { self.nodes.push(node); } - pub fn serialize(&self) -> RusticResult<(Vec, Id)> { + /// Serializes the tree. + /// + /// # Returns + /// + /// A tuple of the serialized tree as `Vec` and the tree's ID + pub(crate) fn serialize(&self) -> RusticResult<(Vec, Id)> { let mut chunk = serde_json::to_vec(&self).map_err(TreeErrorKind::SerializingTreeFailed)?; chunk.push(b'\n'); // for whatever reason, restic adds a newline, so to be compatible... let id = hash(&chunk); Ok((chunk, id)) } - pub fn from_backend(be: &impl IndexedBackend, id: Id) -> RusticResult { + /// Deserializes a tree from the backend. + /// + /// # Arguments + /// + /// * `be` - The backend to read from. + /// * `id` - The ID of the tree to deserialize. + /// + /// # Errors + /// + /// * [`TreeErrorKind::BlobIdNotFound`] - If the tree ID is not found in the backend. + /// * [`TreeErrorKind::DeserializingTreeFailed`] - If deserialization fails. + /// + /// # Returns + /// + /// The deserialized tree. + pub(crate) fn from_backend(be: &impl IndexedBackend, id: Id) -> RusticResult { let data = be .get_tree(&id) .ok_or_else(|| TreeErrorKind::BlobIdNotFound(id))? @@ -70,7 +106,24 @@ impl Tree { Ok(serde_json::from_slice(&data).map_err(TreeErrorKind::DeserializingTreeFailed)?) } - pub fn node_from_path(be: &impl IndexedBackend, id: Id, path: &Path) -> RusticResult { + /// Creates a new node from a path. + /// + /// # Arguments + /// + /// * `be` - The backend to read from. + /// * `id` - The ID of the tree to deserialize. + /// * `path` - The path to create the node from. + /// + /// # Errors + /// + /// * [`TreeErrorKind::NotADirectory`] - If the path is not a directory. + /// * [`TreeErrorKind::PathNotFound`] - If the path is not found. + /// * [`TreeErrorKind::PathIsNotUtf8Conform`] - If the path is not UTF-8 conform. + pub(crate) fn node_from_path( + be: &impl IndexedBackend, + id: Id, + path: &Path, + ) -> RusticResult { let mut node = Node::new_node(OsStr::new(""), NodeType::Dir, Metadata::default()); node.subtree = Some(id); @@ -92,6 +145,16 @@ impl Tree { } } +/// Converts a [`Component`] to an [`OsString`]. +/// +/// # Arguments +/// +/// * `p` - The component to convert. +/// +/// # Errors +/// +/// * [`TreeErrorKind::ContainsCurrentOrParentDirectory`] - If the component is a current or parent directory. +/// * [`TreeErrorKind::PathIsNotUtf8Conform`] - If the component is not UTF-8 conform. pub(crate) fn comp_to_osstr(p: Component<'_>) -> RusticResult> { let s = match p { Component::RootDir => None, @@ -119,32 +182,40 @@ impl IntoIterator for Tree { } #[cfg_attr(feature = "clap", derive(clap::Parser))] -#[derive(Default, Clone, Debug)] +#[derive(Derivative, Clone, Debug, Setters)] +#[derivative(Default)] +#[setters(into)] +/// Options for listing the `Nodes` of a `Tree` pub struct TreeStreamerOptions { /// Glob pattern to exclude/include (can be specified multiple times) #[cfg_attr(feature = "clap", clap(long, help_heading = "Exclude options"))] - glob: Vec, + pub glob: Vec, /// Same as --glob pattern but ignores the casing of filenames #[cfg_attr( feature = "clap", clap(long, value_name = "GLOB", help_heading = "Exclude options") )] - iglob: Vec, + pub iglob: Vec, /// Read glob patterns to exclude/include from this file (can be specified multiple times) #[cfg_attr( feature = "clap", clap(long, value_name = "FILE", help_heading = "Exclude options") )] - glob_file: Vec, + pub glob_file: Vec, /// Same as --glob-file ignores the casing of filenames in patterns #[cfg_attr( feature = "clap", clap(long, value_name = "FILE", help_heading = "Exclude options") )] - iglob_file: Vec, + pub iglob_file: Vec, + + /// recursively list the dir + #[cfg_attr(feature = "clap", clap(long))] + #[derivative(Default(value = "true"))] + pub recursive: bool, } /// [`NodeStreamer`] recursively streams all nodes of a given tree including all subtrees in-order @@ -153,11 +224,17 @@ pub struct NodeStreamer where BE: IndexedBackend, { + /// The open iterators for subtrees open_iterators: Vec>, + /// Inner iterator for the current subtree nodes inner: std::vec::IntoIter, + /// The current path path: PathBuf, + /// The backend to read from be: BE, + /// The glob overrides overrides: Option, + /// Whether to stream recursively recursive: bool, } @@ -165,10 +242,35 @@ impl NodeStreamer where BE: IndexedBackend, { + /// Creates a new `NodeStreamer`. + /// + /// # Arguments + /// + /// * `be` - The backend to read from. + /// * `node` - The node to start from. + /// + /// # Errors + /// + /// * [`TreeErrorKind::BlobIdNotFound`] - If the tree ID is not found in the backend. + /// * [`TreeErrorKind::DeserializingTreeFailed`] - If deserialization fails. + #[allow(unused)] pub fn new(be: BE, node: &Node) -> RusticResult { Self::new_streamer(be, node, None, true) } + /// Creates a new `NodeStreamer`. + /// + /// # Arguments + /// + /// * `be` - The backend to read from. + /// * `node` - The node to start from. + /// * `overrides` - The glob overrides. + /// * `recursive` - Whether to stream recursively. + /// + /// # Errors + /// + /// * [`TreeErrorKind::BlobIdNotFound`] - If the tree ID is not found in the backend. + /// * [`TreeErrorKind::DeserializingTreeFailed`] - If deserialization fails. fn new_streamer( be: BE, node: &Node, @@ -191,13 +293,20 @@ where recursive, }) } - - pub fn new_with_glob( - be: BE, - node: &Node, - opts: &TreeStreamerOptions, - recursive: bool, - ) -> RusticResult { + /// Creates a new `NodeStreamer` with glob patterns. + /// + /// # Arguments + /// + /// * `be` - The backend to read from. + /// * `node` - The node to start from. + /// * `opts` - The options for the streamer. + /// * `recursive` - Whether to stream recursively. + /// + /// # Errors + /// + /// * [`TreeErrorKind::BuildingNodeStreamerFailed`] - If building the streamer fails. + /// * [`TreeErrorKind::ReadingFileStringFromGlobsFailed`] - If reading a glob file fails. + pub fn new_with_glob(be: BE, node: &Node, opts: &TreeStreamerOptions) -> RusticResult { let mut override_builder = OverrideBuilder::new(""); for g in &opts.glob { @@ -240,12 +349,10 @@ where .build() .map_err(TreeErrorKind::BuildingNodeStreamerFailed)?; - Self::new_streamer(be, node, Some(overrides), recursive) + Self::new_streamer(be, node, Some(overrides), opts.recursive) } } -type NodeStreamItem = RusticResult<(PathBuf, Node)>; - // TODO: This is not parallel at the moment... impl Iterator for NodeStreamer where @@ -292,18 +399,43 @@ where } /// [`TreeStreamerOnce`] recursively visits all trees and subtrees, but each tree ID only once - +/// +/// # Type Parameters +/// +/// * `P` - The progress indicator #[derive(Debug)] pub struct TreeStreamerOnce

{ + /// The visited tree IDs visited: HashSet, + /// The queue to send tree IDs to queue_in: Option>, + /// The queue to receive trees from queue_out: Receiver>, + /// The progress indicator p: P, + /// The number of trees that are not yet finished counter: Vec, + /// The number of finished trees finished_ids: usize, } impl TreeStreamerOnce

{ + /// Creates a new `TreeStreamerOnce`. + /// + /// # Type Parameters + /// + /// * `BE` - The type of the backend. + /// * `P` - The type of the progress indicator. + /// + /// # Arguments + /// + /// * `be` - The backend to read from. + /// * `ids` - The IDs of the trees to visit. + /// * `p` - The progress indicator. + /// + /// # Errors + /// + /// * [`TreeErrorKind::SendingCrossbeamMessageFailed`] - If sending the message fails. pub fn new(be: BE, ids: Vec, p: P) -> RusticResult { p.set_length(ids.len() as u64); @@ -343,6 +475,21 @@ impl TreeStreamerOnce

{ Ok(streamer) } + /// Adds a tree ID to the queue. + /// + /// # Arguments + /// + /// * `path` - The path of the tree. + /// * `id` - The ID of the tree. + /// * `count` - The index of the tree. + /// + /// # Returns + /// + /// Whether the tree ID was added to the queue. + /// + /// # Errors + /// + /// * [`TreeErrorKind::SendingCrossbeamMessageFailed`] - If sending the message fails. fn add_pending(&mut self, path: PathBuf, id: Id, count: usize) -> RusticResult { if self.visited.insert(id) { self.queue_in @@ -396,6 +543,19 @@ impl Iterator for TreeStreamerOnce

{ } } +/// Merge trees from a list of trees +/// +/// # Arguments +/// +/// * `be` - The backend to read from. +/// * `trees` - The IDs of the trees to merge. +/// * `cmp` - The comparison function for the nodes. +/// * `save` - The function to save the tree. +/// * `summary` - The summary of the snapshot. +/// +/// # Errors +/// +// TODO: * [`TreeErrorKind::MergingTreesFailed`] - If merging the trees fails. pub(crate) fn merge_trees( be: &impl IndexedBackend, trees: &[Id], @@ -493,6 +653,19 @@ pub(crate) fn merge_trees( Ok(id) } +/// Merge nodes from a list of nodes +/// +/// # Arguments +/// +/// * `be` - The backend to read from. +/// * `nodes` - The nodes to merge. +/// * `cmp` - The comparison function for the nodes. +/// * `save` - The function to save the tree. +/// * `summary` - The summary of the snapshot. +/// +/// # Errors +/// +// TODO: add errors pub(crate) fn merge_nodes( be: &impl IndexedBackend, nodes: Vec, diff --git a/crates/rustic_core/src/cdc/polynom.rs b/crates/rustic_core/src/cdc/polynom.rs index 71a7ada8d..25d4c1881 100644 --- a/crates/rustic_core/src/cdc/polynom.rs +++ b/crates/rustic_core/src/cdc/polynom.rs @@ -1,17 +1,22 @@ -// The irreductible polynom to be used in the fingerprint function. +/// The irreductible polynom to be used in the fingerprint function. pub(crate) trait Polynom { + /// The degree of the polynom. fn degree(&self) -> i32; + + /// Returns the modulo of the polynom. fn modulo(self, m: Self) -> Self; } +/// A 64 bit polynom. pub(crate) type Polynom64 = u64; impl Polynom for Polynom64 { - // The degree of the polynom. + /// The degree of the polynom. fn degree(&self) -> i32 { 63 - self.leading_zeros() as i32 } + /// Returns the modulo of the polynom. fn modulo(self, m: Self) -> Self { let mut p = self; while p.degree() >= m.degree() { diff --git a/crates/rustic_core/src/cdc/rolling_hash.rs b/crates/rustic_core/src/cdc/rolling_hash.rs index e6f909704..fe9d4a18f 100644 --- a/crates/rustic_core/src/cdc/rolling_hash.rs +++ b/crates/rustic_core/src/cdc/rolling_hash.rs @@ -1,35 +1,77 @@ use crate::cdc::polynom::{Polynom, Polynom64}; +/// A rolling hash implementataion for 64 bit polynoms. pub(crate) trait RollingHash64 { + /// Resets the rolling hash. fn reset(&mut self); + + /// Attempt to prefill the window + /// + /// # Arguments + /// + /// * `iter` - The iterator to read from. fn prefill_window(&mut self, iter: &mut I) -> usize where I: Iterator; + + /// Combines a reset with a prefill in an optimized way. + /// + /// # Arguments + /// + /// * `iter` - The iterator to read from. fn reset_and_prefill_window(&mut self, iter: &mut I) -> usize where I: Iterator; + + /// Slides the window by byte. + /// + /// # Arguments + /// + /// * `byte` - The byte to slide in. fn slide(&mut self, byte: u8); + + /// Returns the current hash as a `Polynom64`. fn get_hash(&self) -> &Polynom64; } +/// A rolling hash implementataion for 64 bit polynoms from Rabin. #[derive(Clone)] pub(crate) struct Rabin64 { // Configuration + /// Window size. pub(crate) window_size: usize, // The size of the data window used in the hash calculation. + /// Window size mask. pub(crate) window_size_mask: usize, // = window_size - 1, supposing that it is an exponent of 2. // Precalculations + /// The number of bits to shift the polynom to the left. pub(crate) polynom_shift: i32, + + /// Precalculated out table. pub(crate) out_table: [Polynom64; 256], + /// Precalculated mod table. pub(crate) mod_table: [Polynom64; 256], // Current state + /// The data window. pub(crate) window_data: Vec, + /// The current window index. pub(crate) window_index: usize, + /// The current hash. pub(crate) hash: Polynom64, } impl Rabin64 { + /// Calculates the out table + /// + /// # Arguments + /// + /// * `window_size` - The window size. + /// * `mod_polynom` - The modulo polynom. + /// + /// # Returns + /// + /// An array of 256 `Polynom64` values. fn calculate_out_table(window_size: usize, mod_polynom: Polynom64) -> [Polynom64; 256] { let mut out_table = [0; 256]; for (b, elem) in out_table.iter_mut().enumerate() { @@ -44,6 +86,15 @@ impl Rabin64 { out_table } + /// Calculates the mod table + /// + /// # Arguments + /// + /// * `mod_polynom` - The modulo polynom. + /// + /// # Returns + /// + /// An array of 256 `Polynom64` values. fn calculate_mod_table(mod_polynom: Polynom64) -> [Polynom64; 256] { let mut mod_table = [0; 256]; let k = mod_polynom.degree(); @@ -55,6 +106,12 @@ impl Rabin64 { mod_table } + /// Creates a new `Rabin64` with the given window size and modulo polynom. + /// + /// # Arguments + /// + /// * `window_size_nb_bits` - The number of bits of the window size. + /// * `mod_polynom` - The modulo polynom. pub(crate) fn new_with_polynom(window_size_nb_bits: u32, mod_polynom: Polynom64) -> Self { let window_size = 1 << window_size_nb_bits; diff --git a/crates/rustic_core/src/chunker.rs b/crates/rustic_core/src/chunker.rs index bf579c3a5..913024122 100644 --- a/crates/rustic_core/src/chunker.rs +++ b/crates/rustic_core/src/chunker.rs @@ -7,38 +7,70 @@ use crate::{ polynom::{Polynom, Polynom64}, rolling_hash::{Rabin64, RollingHash64}, }, - error::PolynomialErrorKind, - RusticResult, + error::{PolynomialErrorKind, RusticResult}, }; pub(super) mod constants { + /// The Splitmask is used to determine if a chunk is a chunk boundary. pub(super) const SPLITMASK: u64 = (1u64 << 20) - 1; + /// The size of a kilobyte. pub(super) const KB: usize = 1024; + /// The size of a megabyte. pub(super) const MB: usize = 1024 * KB; + /// The minimum size of a chunk. pub(super) const MIN_SIZE: usize = 512 * KB; + /// The maximum size of a chunk. pub(super) const MAX_SIZE: usize = 8 * MB; + /// Buffer size used for reading. pub(super) const BUF_SIZE: usize = 64 * KB; + /// Random polynomial maximum tries. pub(super) const RAND_POLY_MAX_TRIES: i32 = 1_000_000; } +/// Default predicate for chunking. #[inline] const fn default_predicate(x: u64) -> bool { (x & constants::SPLITMASK) == 0 } +/// `ChunkIter` is an iterator that chunks data. pub(crate) struct ChunkIter { + /// The buffer used for reading. buf: Vec, + + /// The position in the buffer. pos: usize, + + /// The reader. reader: R, + + /// The predicate used to determine if a chunk is a chunk boundary. predicate: fn(u64) -> bool, + + /// The rolling hash. rabin: Rabin64, + + /// The size hint is used to optimize memory allocation; this should be an upper bound on the size. size_hint: usize, + + /// The minimum size of a chunk. min_size: usize, + + /// The maximum size of a chunk. max_size: usize, + + /// If the iterator is finished. finished: bool, } impl ChunkIter { + /// Creates a new `ChunkIter`. + /// + /// # Arguments + /// + /// * `reader` - The reader to read from. + /// * `size_hint` - The size hint is used to optimize memory allocation; this should be an upper bound on the size. + /// * `rabin` - The rolling hash. pub(crate) fn new(reader: R, size_hint: usize, rabin: Rabin64) -> Self { Self { buf: Vec::with_capacity(4 * constants::KB), @@ -137,8 +169,11 @@ impl Iterator for ChunkIter { /// (largest prime number below 64-8) /// There are (2^53-2/53) irreducible polynomials of degree 53 in /// `F_2[X]`, c.f. Michael O. Rabin (1981): "Fingerprinting by Random -/// Polynomials", page 4. If no polynomial could be found in one -/// million tries, an error is returned. +/// Polynomials", page 4. +/// +/// # Errors +/// +/// * [`PolynomialErrorKind::NoSuitablePolynomialFound`] - If no polynomial could be found in one million tries. pub fn random_poly() -> RusticResult { for _ in 0..constants::RAND_POLY_MAX_TRIES { let mut poly: u64 = thread_rng().gen(); @@ -157,17 +192,25 @@ pub fn random_poly() -> RusticResult { Err(PolynomialErrorKind::NoSuitablePolynomialFound.into()) } +/// A trait for extending polynomials. pub(crate) trait PolynomExtend { + /// Returns true IFF x is irreducible over `F_2`. fn irreducible(&self) -> bool; + + /// Returns the degree of the polynomial. fn gcd(self, other: Self) -> Self; + + /// Adds two polynomials. fn add(self, other: Self) -> Self; + + /// Multiplies two polynomials modulo another polynomial. fn mulmod(self, other: Self, modulo: Self) -> Self; } // implementation goes along the lines of // https://github.com/restic/chunker/blob/master/polynomials.go impl PolynomExtend for Polynom64 { - // Irreducible returns true iff x is irreducible over F_2. This function + // Irreducible returns true IFF x is irreducible over F_2. This function // uses Ben Or's reducibility test. // // For details see "Tests and Constructions of Irreducible Polynomials over diff --git a/crates/rustic_core/src/commands.rs b/crates/rustic_core/src/commands.rs index c79216007..7e08e1749 100644 --- a/crates/rustic_core/src/commands.rs +++ b/crates/rustic_core/src/commands.rs @@ -1,15 +1,20 @@ pub mod backup; +/// The `cat` command. pub mod cat; pub mod check; pub mod config; +/// The `copy` command. pub mod copy; +/// The `dump` command. pub mod dump; pub mod forget; pub mod init; pub mod key; pub mod merge; pub mod prune; +/// The `repair` command. pub mod repair; +/// The `repoinfo` command. pub mod repoinfo; pub mod restore; pub mod snapshots; diff --git a/crates/rustic_core/src/commands/backup.rs b/crates/rustic_core/src/commands/backup.rs index c2ad62117..4660d8d5b 100644 --- a/crates/rustic_core/src/commands/backup.rs +++ b/crates/rustic_core/src/commands/backup.rs @@ -1,37 +1,39 @@ //! `backup` subcommand - -/// App-local prelude includes `app_reader()`/`app_writer()`/`app_config()` -/// accessors along with logging macros. Customize as you see fit. +use derive_setters::Setters; use log::info; use std::path::PathBuf; use path_dedot::ParseDot; -use serde::Deserialize; +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, DisplayFromStr}; use crate::{ archiver::{parent::Parent, Archiver}, - backend::dry_run::DryRunBackend, - repository::{IndexedIds, IndexedTree}, - Id, LocalSource, LocalSourceFilterOptions, LocalSourceSaveOptions, Open, PathList, - ProgressBars, Repository, RusticResult, SnapshotFile, SnapshotGroup, SnapshotGroupCriterion, - StdinSource, + backend::ignore::{LocalSource, LocalSourceFilterOptions, LocalSourceSaveOptions}, + backend::{dry_run::DryRunBackend, stdin::StdinSource}, + error::RusticResult, + id::Id, + progress::ProgressBars, + repofile::snapshotfile::{SnapshotGroup, SnapshotGroupCriterion}, + repofile::{PathList, SnapshotFile}, + repository::{IndexedIds, IndexedTree, Repository}, }; /// `backup` subcommand +#[serde_as] #[cfg_attr(feature = "clap", derive(clap::Parser))] #[cfg_attr(feature = "merge", derive(merge::Merge))] -#[derive(Clone, Default, Debug, Deserialize)] +#[derive(Clone, Default, Debug, Deserialize, Serialize, Setters)] #[serde(default, rename_all = "kebab-case", deny_unknown_fields)] -// Note: using sources and source within this struct is a hack to support serde(deny_unknown_fields) -// for deserializing the backup options from TOML -// Unfortunately we cannot work with nested flattened structures, see -// https://github.com/serde-rs/serde/issues/1547 -// A drawback is that a wrongly set "source(s) = ..." won't get correct error handling and need to be manually checked, see below. +#[setters(into)] #[allow(clippy::struct_excessive_bools)] -pub struct ParentOpts { +#[non_exhaustive] +/// Options how the backup command uses a parent snapshot. +pub struct ParentOptions { /// Group snapshots by any combination of host,label,paths,tags to find a suitable parent (default: host,label,paths) #[cfg_attr(feature = "clap", clap(long, short = 'g', value_name = "CRITERION",))] + #[serde_as(as = "Option")] pub group_by: Option, /// Snapshot to use as parent @@ -57,8 +59,24 @@ pub struct ParentOpts { pub ignore_inode: bool, } -impl ParentOpts { - pub fn get_parent( +impl ParentOptions { + /// Get parent snapshot. + /// + /// # Type Parameters + /// + /// * `P` - The type of the progress bars. + /// * `S` - The type of the indexed tree. + /// + /// # Arguments + /// + /// * `repo` - The repository to use + /// * `snap` - The snapshot to use + /// * `backup_stdin` - Whether the backup is from stdin + /// + /// # Returns + /// + /// The parent snapshot id and the parent object or `None` if no parent is used. + pub(crate) fn get_parent( &self, repo: &Repository, snap: &SnapshotFile, @@ -68,7 +86,7 @@ impl ParentOpts { (true, _, _) | (false, true, _) => None, (false, false, None) => { // get suitable snapshot group from snapshot and opts.group_by. This is used to filter snapshots for the parent detection - let group = SnapshotGroup::from_sn(snap, self.group_by.unwrap_or_default()); + let group = SnapshotGroup::from_snapshot(snap, self.group_by.unwrap_or_default()); SnapshotFile::latest( repo.dbe(), |snap| snap.has_group(&group), @@ -95,9 +113,12 @@ impl ParentOpts { #[cfg_attr(feature = "clap", derive(clap::Parser))] #[cfg_attr(feature = "merge", derive(merge::Merge))] -#[derive(Clone, Default, Debug, Deserialize)] +#[derive(Clone, Default, Debug, Deserialize, Serialize, Setters)] #[serde(default, rename_all = "kebab-case", deny_unknown_fields)] -pub struct BackupOpts { +#[setters(into)] +#[non_exhaustive] +/// Options for the `backup` command. +pub struct BackupOptions { /// Set filename to be used when backing up from stdin #[cfg_attr( feature = "clap", @@ -110,29 +131,62 @@ pub struct BackupOpts { #[cfg_attr(feature = "clap", clap(long, value_name = "PATH"))] pub as_path: Option, + /// Dry-run mode: Don't write any data or snapshot + #[cfg_attr(feature = "clap", clap(long))] + #[cfg_attr(feature = "merge", merge(strategy = merge::bool::overwrite_false))] + pub dry_run: bool, + #[cfg_attr(feature = "clap", clap(flatten))] #[serde(flatten)] - pub parent_opts: ParentOpts, + /// Options how to use a parent snapshot + pub parent_opts: ParentOptions, #[cfg_attr(feature = "clap", clap(flatten))] #[serde(flatten)] + /// Options how to save entries from a local source pub ignore_save_opts: LocalSourceSaveOptions, #[cfg_attr(feature = "clap", clap(flatten))] #[serde(flatten)] + /// Options how to filter from a local source pub ignore_filter_opts: LocalSourceFilterOptions, } +/// Backup data, create a snapshot. +/// +/// # Type Parameters +/// +/// * `P` - The type of the progress bars. +/// * `S` - The type of the indexed tree. +/// +/// # Arguments +/// +/// * `repo` - The repository to use +/// * `opts` - The backup options +/// * `source` - The source to backup +/// * `snap` - The snapshot to backup +/// +/// # Errors +/// +/// * [`PackerErrorKind::ZstdError`] - If the zstd compression level is invalid. +/// * [`PackerErrorKind::SendingCrossbeamMessageFailed`] - If sending the message to the raw packer fails. +/// * [`PackerErrorKind::IntConversionFailed`] - If converting the data length to u64 fails +/// * [`PackerErrorKind::SendingCrossbeamMessageFailed`] - If sending the message to the raw packer fails. +/// * [`CryptBackendErrorKind::SerializingToJsonByteVectorFailed`] - If the index file could not be serialized. +/// * [`SnapshotFileErrorKind::OutOfRange`] - If the time is not in the range of `Local::now()` +/// +/// # Returns +/// +/// The snapshot pointing to the backup'ed data. pub(crate) fn backup( repo: &Repository, - opts: &BackupOpts, + opts: &BackupOptions, source: PathList, mut snap: SnapshotFile, - dry_run: bool, ) -> RusticResult { let index = repo.index(); - let backup_stdin = source == PathList::from_string("-", false)?; + let backup_stdin = source == PathList::from_string("-")?; let backup_path = if backup_stdin { vec![PathBuf::from(&opts.stdin_filename)] } else { @@ -161,7 +215,7 @@ pub(crate) fn backup( } }; - let be = DryRunBackend::new(repo.dbe().clone(), dry_run); + let be = DryRunBackend::new(repo.dbe().clone(), opts.dry_run); info!("starting to backup {source}..."); let archiver = Archiver::new(be, index.clone(), repo.config(), parent, snap)?; let p = repo.pb.progress_bytes("determining size..."); diff --git a/crates/rustic_core/src/commands/cat.rs b/crates/rustic_core/src/commands/cat.rs index 2af0ad84f..982307b32 100644 --- a/crates/rustic_core/src/commands/cat.rs +++ b/crates/rustic_core/src/commands/cat.rs @@ -3,12 +3,39 @@ use std::path::Path; use bytes::Bytes; use crate::{ + backend::{decrypt::DecryptReadBackend, FileType, ReadBackend}, + blob::{tree::Tree, BlobType}, error::CommandErrorKind, + error::RusticResult, + id::Id, + index::IndexedBackend, + progress::ProgressBars, + repofile::SnapshotFile, repository::{IndexedFull, IndexedTree, Open, Repository}, - BlobType, DecryptReadBackend, FileType, Id, IndexedBackend, ProgressBars, ReadBackend, - RusticResult, SnapshotFile, Tree, }; +/// Prints the contents of a file. +/// +/// # Type Parameters +/// +/// * `P` - The progress bar type. +/// * `S` - The state the repository is in. +/// +/// # Arguments +/// +/// * `repo` - The repository to read from. +/// * `tpe` - The type of the file. +/// * `id` - The id of the file. +/// +/// # Errors +/// +/// * [`IdErrorKind::HexError`] - If the string is not a valid hexadecimal string +/// * [`BackendErrorKind::NoSuitableIdFound`] - If no id could be found. +/// * [`BackendErrorKind::IdNotUnique`] - If the id is not unique. +/// +/// # Returns +/// +/// The data read. pub(crate) fn cat_file( repo: &Repository, tpe: FileType, @@ -19,6 +46,22 @@ pub(crate) fn cat_file( Ok(data) } +// TODO: Add documentation! +/// +/// # Type Parameters +/// +/// * `P` - The progress bar type. +/// * `S` - The type of the indexed tree. +/// +/// # Arguments +/// +/// * `repo` - The repository to read from. +/// * `tpe` - The type of the file. +/// * `id` - The id of the file. +/// +/// # Errors +/// +/// * [`IdErrorKind::HexError`] - If the string is not a valid hexadecimal string pub(crate) fn cat_blob( repo: &Repository, tpe: BlobType, @@ -30,6 +73,26 @@ pub(crate) fn cat_blob( Ok(data) } +/// Prints the contents of a tree. +/// +/// # Type Parameters +/// +/// * `P` - The progress bar type. +/// * `S` - The type of the indexed tree. +/// +/// # Arguments +/// +/// * `repo` - The repository to read from. +/// * `snap` - The snapshot to read from. +/// * `sn_filter` - The filter to apply to the snapshot. +/// +/// # Errors +/// +/// * [`CommandErrorKind::PathIsNoDir`] - If the path is not a directory. +/// +/// # Returns +/// +/// The data read. pub(crate) fn cat_tree( repo: &Repository, snap: &str, diff --git a/crates/rustic_core/src/commands/check.rs b/crates/rustic_core/src/commands/check.rs index 6804294cf..493d768e9 100644 --- a/crates/rustic_core/src/commands/check.rs +++ b/crates/rustic_core/src/commands/check.rs @@ -2,26 +2,33 @@ use std::collections::HashMap; use bytes::Bytes; +use derive_setters::Setters; use itertools::Itertools; use log::{debug, error, warn}; use rayon::prelude::{IntoParallelIterator, ParallelBridge, ParallelIterator}; use zstd::stream::decode_all; use crate::{ - backend::cache::Cache, - hash, - index::binarysorted::{IndexCollector, IndexType}, + backend::{cache::Cache, decrypt::DecryptReadBackend, node::NodeType, FileType, ReadBackend}, + blob::{tree::TreeStreamerOnce, BlobType}, + crypto::hasher::hash, + error::RusticResult, + id::Id, + index::{ + binarysorted::{IndexCollector, IndexType}, + IndexBackend, IndexedBackend, + }, + progress::Progress, progress::ProgressBars, + repofile::{IndexFile, IndexPack, PackHeader, PackHeaderLength, PackHeaderRef, SnapshotFile}, repository::{Open, Repository}, - BlobType, DecryptReadBackend, FileType, Id, IndexBackend, IndexFile, IndexPack, IndexedBackend, - NodeType, PackHeader, PackHeaderLength, PackHeaderRef, Progress, ReadBackend, RusticResult, - SnapshotFile, TreeStreamerOnce, }; -/// `check` subcommand #[cfg_attr(feature = "clap", derive(clap::Parser))] -#[derive(Clone, Copy, Debug, Default)] -pub struct CheckOpts { +#[derive(Clone, Copy, Debug, Default, Setters)] +#[setters(into)] +/// Options for the `check` command +pub struct CheckOptions { /// Don't verify the data saved in the cache #[cfg_attr(feature = "clap", clap(long, conflicts_with = "no_cache"))] pub trust_cache: bool, @@ -31,7 +38,21 @@ pub struct CheckOpts { pub read_data: bool, } -impl CheckOpts { +impl CheckOptions { + /// Runs the `check` command + /// + /// # Type Parameters + /// + /// * `P` - The progress bar type. + /// * `S` - The state the repository is in. + /// + /// # Arguments + /// + /// * `repo` - The repository to check + /// + /// # Errors + /// + /// If the repository is corrupted pub(crate) fn run(self, repo: &Repository) -> RusticResult<()> { let be = repo.dbe(); let cache = repo.cache(); @@ -47,7 +68,7 @@ impl CheckOpts { // TODO: Only list the files once... _ = be.list_with_size(file_type)?; - let p = pb.progress_bytes(format!("checking {file_type} in cache...")); + let p = pb.progress_bytes(format!("checking {file_type:?} in cache...")); // TODO: Make concurrency (20) customizable check_cache_files(20, cache, raw_be, file_type, &p)?; } @@ -111,13 +132,25 @@ impl CheckOpts { } } +/// Checks if all files in the backend are also in the hot backend +/// +/// # Arguments +/// +/// * `be` - The backend to check +/// * `be_hot` - The hot backend to check +/// * `file_type` - The type of the files to check +/// * `pb` - The progress bar to use +/// +/// # Errors +/// +/// If a file is missing or has a different size fn check_hot_files( be: &impl ReadBackend, be_hot: &impl ReadBackend, file_type: FileType, pb: &impl ProgressBars, ) -> RusticResult<()> { - let p = pb.progress_spinner(format!("checking {file_type} in hot repo...")); + let p = pb.progress_spinner(format!("checking {file_type:?} in hot repo...")); let mut files = be .list_with_size(file_type)? .into_iter() @@ -129,6 +162,7 @@ fn check_hot_files( match files.remove(&id) { None => error!("hot file Type: {file_type:?}, Id: {id} does not exist in repo"), Some(size) if size != size_hot => { + // TODO: This should be an actual error not a log entry error!("Type: {file_type:?}, Id: {id}: hot size: {size_hot}, actual size: {size}"); } _ => {} //everything ok @@ -143,6 +177,19 @@ fn check_hot_files( Ok(()) } +/// Checks if all files in the cache are also in the backend +/// +/// # Arguments +/// +/// * `concurrency` - The number of threads to use +/// * `cache` - The cache to check +/// * `be` - The backend to check +/// * `file_type` - The type of the files to check +/// * `p` - The progress bar to use +/// +/// # Errors +/// +/// If a file is missing or has a different size fn check_cache_files( _concurrency: usize, cache: &Cache, @@ -188,7 +235,22 @@ fn check_cache_files( Ok(()) } -// check if packs correspond to index +/// Check if packs correspond to index and are present in the backend +/// +/// # Arguments +/// +/// * `be` - The backend to check +/// * `hot_be` - The hot backend to check +/// * `read_data` - Whether to read the data of the packs +/// * `pb` - The progress bar to use +/// +/// # Errors +/// +/// If a pack is missing or has a different size +/// +/// # Returns +/// +/// The index collector fn check_packs( be: &impl DecryptReadBackend, hot_be: &Option, @@ -200,7 +262,7 @@ fn check_packs( let mut index_collector = IndexCollector::new(if read_data { IndexType::Full } else { - IndexType::FullTrees + IndexType::DataIds }); let mut process_pack = |p: IndexPack, check_time: bool| { @@ -265,6 +327,17 @@ fn check_packs( Ok(index_collector) } +// TODO: Add documentation +/// Checks if all packs in the backend are also in the index +/// +/// # Arguments +/// +/// * `be` - The backend to check +/// * `packs` - The packs to check +/// +/// # Errors +/// +/// If a pack is missing or has a different size fn check_packs_list(be: &impl ReadBackend, mut packs: HashMap) -> RusticResult<()> { for (id, size) in be.list_with_size(FileType::Pack)? { match packs.remove(&id) { @@ -282,7 +355,16 @@ fn check_packs_list(be: &impl ReadBackend, mut packs: HashMap) -> Rusti Ok(()) } -// check if all snapshots and contained trees can be loaded and contents exist in the index +/// Check if all snapshots and contained trees can be loaded and contents exist in the index +/// +/// # Arguments +/// +/// * `index` - The index to check +/// * `pb` - The progress bar to use +/// +/// # Errors +/// +/// If a snapshot or tree is missing or has a different size fn check_snapshots(index: &impl IndexedBackend, pb: &impl ProgressBars) -> RusticResult<()> { let p = pb.progress_counter("reading snapshots..."); let snap_trees: Vec<_> = index @@ -340,6 +422,22 @@ fn check_snapshots(index: &impl IndexedBackend, pb: &impl ProgressBars) -> Rusti Ok(()) } +/// Check if a pack is valid +/// +/// # Arguments +/// +/// * `be` - The backend to use +/// * `index_pack` - The pack to check +/// * `data` - The data of the pack +/// * `p` - The progress bar to use +/// +/// # Errors +/// +/// If the pack is invalid +/// +/// # Panics +/// +/// If zstd decompression fails. fn check_pack( be: &impl DecryptReadBackend, index_pack: IndexPack, diff --git a/crates/rustic_core/src/commands/config.rs b/crates/rustic_core/src/commands/config.rs index 768767c51..5462c3038 100644 --- a/crates/rustic_core/src/commands/config.rs +++ b/crates/rustic_core/src/commands/config.rs @@ -1,14 +1,45 @@ //! `config` subcommand use bytesize::ByteSize; +use derive_setters::Setters; use crate::{ - backend::decrypt::DecryptBackend, error::CommandErrorKind, ConfigFile, DecryptWriteBackend, - Key, Open, Repository, RusticResult, + backend::decrypt::{DecryptBackend, DecryptWriteBackend}, + crypto::aespoly1305::Key, + error::CommandErrorKind, + error::RusticResult, + repofile::ConfigFile, + repository::{Open, Repository}, }; +/// Apply the [`ConfigOptions`] to a given [`ConfigFile`] +/// +/// # Type Parameters +/// +/// * `P` - The progress bar type. +/// * `S` - The state the repository is in. +/// +/// # Arguments +/// +/// * `repo` - The repository to apply the config to +/// * `opts` - The options to apply +/// +/// # Errors +/// +/// * [`CommandErrorKind::VersionNotSupported`] - If the version is not supported +/// * [`CommandErrorKind::CannotDowngrade`] - If the version is lower than the current version +/// * [`CommandErrorKind::NoCompressionV1Repo`] - If compression is set for a v1 repo +/// * [`CommandErrorKind::CompressionLevelNotSupported`] - If the compression level is not supported +/// * [`CommandErrorKind::SizeTooLarge`] - If the size is too large +/// * [`CommandErrorKind::MinPackSizeTolerateWrong`] - If the min packsize tolerance percent is wrong +/// * [`CommandErrorKind::MaxPackSizeTolerateWrong`] - If the max packsize tolerance percent is wrong +/// * [`CryptBackendErrorKind::SerializingToJsonByteVectorFailed`] - If the file could not be serialized to json. +/// +/// # Returns +/// +/// Whether the config was changed pub(crate) fn apply_config( repo: &Repository, - opts: &ConfigOpts, + opts: &ConfigOptions, ) -> RusticResult { let mut new_config = repo.config().clone(); opts.apply(&mut new_config)?; @@ -20,6 +51,22 @@ pub(crate) fn apply_config( } } +/// Save a [`ConfigFile`] to the repository +/// +/// # Type Parameters +/// +/// * `P` - The progress bar type. +/// * `S` - The state the repository is in. +/// +/// # Arguments +/// +/// * `repo` - The repository to save the config to +/// * `new_config` - The config to save +/// * `key` - The key to encrypt the config with +/// +/// # Errors +/// +/// * [`CryptBackendErrorKind::SerializingToJsonByteVectorFailed`] - If the file could not be serialized to json. pub(crate) fn save_config( repo: &Repository, mut new_config: ConfigFile, @@ -44,8 +91,10 @@ pub(crate) fn save_config( } #[cfg_attr(feature = "clap", derive(clap::Parser))] -#[derive(Debug, Clone, Copy, Default)] -pub struct ConfigOpts { +#[derive(Debug, Clone, Copy, Default, Setters)] +#[setters(into)] +/// Options for the `config` command, used to set repository-wide options +pub struct ConfigOptions { /// Set compression level. Allowed levels are 1 to 22 and -1 to -7, see . /// Note that 0 equals to no compression #[cfg_attr(feature = "clap", clap(long, value_name = "LEVEL"))] @@ -57,55 +106,70 @@ pub struct ConfigOpts { /// Set default packsize for tree packs. rustic tries to always produce packs greater than this value. /// Note that for large repos, this value is grown by the grown factor. - /// Defaults to 4 MiB if not set. + /// Defaults to `4 MiB` if not set. #[cfg_attr(feature = "clap", clap(long, value_name = "SIZE"))] pub set_treepack_size: Option, /// Set upper limit for default packsize for tree packs. /// Note that packs actually can get up to some MiBs larger. - /// If not set, pack sizes can grow up to approximately 4 GiB. + /// If not set, pack sizes can grow up to approximately `4 GiB`. #[cfg_attr(feature = "clap", clap(long, value_name = "SIZE"))] pub set_treepack_size_limit: Option, /// Set grow factor for tree packs. The default packsize grows by the square root of the total size of all /// tree packs multiplied with this factor. This means 32 kiB times this factor per square root of total /// treesize in GiB. - /// Defaults to 32 (= 1MB per square root of total treesize in GiB) if not set. + /// Defaults to `32` (= 1MB per square root of total treesize in GiB) if not set. #[cfg_attr(feature = "clap", clap(long, value_name = "FACTOR"))] pub set_treepack_growfactor: Option, /// Set default packsize for data packs. rustic tries to always produce packs greater than this value. /// Note that for large repos, this value is grown by the grown factor. - /// Defaults to 32 MiB if not set. + /// Defaults to `32 MiB` if not set. #[cfg_attr(feature = "clap", clap(long, value_name = "SIZE"))] pub set_datapack_size: Option, /// Set grow factor for data packs. The default packsize grows by the square root of the total size of all /// data packs multiplied with this factor. This means 32 kiB times this factor per square root of total /// datasize in GiB. - /// Defaults to 32 (= 1MB per square root of total datasize in GiB) if not set. + /// Defaults to `32` (= 1MB per square root of total datasize in GiB) if not set. #[cfg_attr(feature = "clap", clap(long, value_name = "FACTOR"))] pub set_datapack_growfactor: Option, /// Set upper limit for default packsize for tree packs. /// Note that packs actually can get up to some MiBs larger. - /// If not set, pack sizes can grow up to approximately 4 GiB. + /// If not set, pack sizes can grow up to approximately `4 GiB`. #[cfg_attr(feature = "clap", clap(long, value_name = "SIZE"))] pub set_datapack_size_limit: Option, /// Set minimum tolerated packsize in percent of the targeted packsize. - /// Defaults to 30 if not set. + /// Defaults to `30` if not set. #[cfg_attr(feature = "clap", clap(long, value_name = "PERCENT"))] pub set_min_packsize_tolerate_percent: Option, /// Set maximum tolerated packsize in percent of the targeted packsize - /// A value of 0 means packs larger than the targeted packsize are always + /// A value of `0` means packs larger than the targeted packsize are always /// tolerated. Default if not set: larger packfiles are always tolerated. #[cfg_attr(feature = "clap", clap(long, value_name = "PERCENT"))] pub set_max_packsize_tolerate_percent: Option, } -impl ConfigOpts { +impl ConfigOptions { + /// Apply the [`ConfigOptions`] to a given [`ConfigFile`] + /// + /// # Arguments + /// + /// * `config` - The config to apply the options to + /// + /// # Errors + /// + /// * [`CommandErrorKind::VersionNotSupported`] - If the version is not supported + /// * [`CommandErrorKind::CannotDowngrade`] - If the version is lower than the current version + /// * [`CommandErrorKind::NoCompressionV1Repo`] - If compression is set for a v1 repo + /// * [`CommandErrorKind::CompressionLevelNotSupported`] - If the compression level is not supported + /// * [`CommandErrorKind::SizeTooLarge`] - If the size is too large + /// * [`CommandErrorKind::MinPackSizeTolerateWrong`] - If the min packsize tolerate percent is wrong + /// * [`CommandErrorKind::MaxPackSizeTolerateWrong`] - If the max packsize tolerate percent is wrong pub fn apply(&self, config: &mut ConfigFile) -> RusticResult<()> { if let Some(version) = self.set_version { let range = 1..=2; diff --git a/crates/rustic_core/src/commands/copy.rs b/crates/rustic_core/src/commands/copy.rs index 0f41ee6d0..d8ba8942b 100644 --- a/crates/rustic_core/src/commands/copy.rs +++ b/crates/rustic_core/src/commands/copy.rs @@ -4,17 +4,39 @@ use log::trace; use rayon::prelude::{IntoParallelRefIterator, ParallelBridge, ParallelIterator}; use crate::{ - repository::{IndexedFull, IndexedIds, IndexedTree}, - BlobType, DecryptWriteBackend, IndexedBackend, Indexer, NodeType, Open, Packer, ProgressBars, - ReadIndex, Repository, RusticResult, SnapshotFile, TreeStreamerOnce, + backend::{decrypt::DecryptWriteBackend, node::NodeType}, + blob::{packer::Packer, tree::TreeStreamerOnce, BlobType}, + error::RusticResult, + index::{indexer::Indexer, IndexedBackend, ReadIndex}, + progress::ProgressBars, + repofile::SnapshotFile, + repository::{IndexedFull, IndexedIds, IndexedTree, Open, Repository}, }; +/// This struct enhances `[SnapshotFile]` with the attribute `relevant` +/// which indicates if the snapshot is relevant for copying. #[derive(Debug)] pub struct CopySnapshot { - pub relevant: bool, + /// The snapshot pub sn: SnapshotFile, + /// Whether it is relevant + pub relevant: bool, } +/// Copy the given snapshots to the destination repository. +/// +/// # Type Parameters +/// +/// * `Q` - The progress bar type. +/// * `R` - The type of the indexed tree. +/// * `P` - The progress bar type. +/// * `S` - The type of the indexed tree. +/// +/// # Arguments +/// +/// * `repo` - The repository to copy from +/// * `repo_dest` - The repository to copy to +/// * `snapshots` - The snapshots to copy pub(crate) fn copy<'a, Q, R: IndexedFull, P: ProgressBars, S: IndexedIds>( repo: &Repository, repo_dest: &Repository, @@ -105,6 +127,23 @@ pub(crate) fn copy<'a, Q, R: IndexedFull, P: ProgressBars, S: IndexedIds>( Ok(()) } +/// Filter out relevant snapshots from the given list of snapshots. +/// +/// # Type Parameters +/// +/// * `F` - The type of the filter. +/// * `P` - The progress bar type. +/// * `S` - The state of the repository. +/// +/// # Arguments +/// +/// * `snaps` - The snapshots to filter +/// * `dest_repo` - The destination repository +/// * `filter` - The filter to apply to the snapshots +/// +/// # Returns +/// +/// A list of snapshots with the attribute `relevant` set to `true` if the snapshot is relevant for copying. pub(crate) fn relevant_snapshots( snaps: &[SnapshotFile], dest_repo: &Repository, diff --git a/crates/rustic_core/src/commands/dump.rs b/crates/rustic_core/src/commands/dump.rs index 6d7336037..e80be2ccb 100644 --- a/crates/rustic_core/src/commands/dump.rs +++ b/crates/rustic_core/src/commands/dump.rs @@ -1,11 +1,29 @@ use std::io::Write; use crate::{ - error::CommandErrorKind, + backend::node::{Node, NodeType}, + blob::BlobType, + error::{CommandErrorKind, RusticResult}, + index::IndexedBackend, repository::{IndexedFull, IndexedTree, Repository}, - BlobType, IndexedBackend, Node, NodeType, RusticResult, }; +/// Dumps the contents of a file. +/// +/// # Type Parameters +/// +/// * `P` - The progress bar type. +/// * `S` - The type of the indexed tree. +/// +/// # Arguments +/// +/// * `repo` - The repository to read from. +/// * `node` - The node to dump. +/// * `w` - The writer to write to. +/// +/// # Errors +/// +/// * [`CommandErrorKind::DumpNotSupported`] - If the node is not a file. pub(crate) fn dump( repo: &Repository, node: &Node, diff --git a/crates/rustic_core/src/commands/forget.rs b/crates/rustic_core/src/commands/forget.rs index 2eef6e54d..2fc43cc81 100644 --- a/crates/rustic_core/src/commands/forget.rs +++ b/crates/rustic_core/src/commands/forget.rs @@ -2,33 +2,47 @@ use chrono::{DateTime, Datelike, Duration, Local, Timelike}; use derivative::Derivative; +use derive_setters::Setters; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, DisplayFromStr}; use crate::{ - repository::Open, Id, ProgressBars, Repository, RusticResult, SnapshotFile, SnapshotGroup, - SnapshotGroupCriterion, StringList, + error::RusticResult, + id::Id, + progress::ProgressBars, + repofile::snapshotfile::{SnapshotGroup, SnapshotGroupCriterion}, + repofile::{SnapshotFile, StringList}, + repository::{Open, Repository}, }; type CheckFunction = fn(&SnapshotFile, &SnapshotFile) -> bool; #[derive(Debug, Serialize)] +/// A newtype for `[Vec]` pub struct ForgetGroups(pub Vec); #[derive(Debug, Serialize)] +/// All snapshots of a group with group and forget information pub struct ForgetGroup { + /// The group pub group: SnapshotGroup, + /// The list of snapshots within this group pub snapshots: Vec, } #[derive(Debug, Serialize)] +/// This struct enhances `[SnapshotFile]` with the attributes `keep` and `reasons` which indicates if the snapshot should be kept and why. pub struct ForgetSnapshot { + /// The snapshot pub snapshot: SnapshotFile, + /// Whether it should be kept pub keep: bool, + /// reason(s) for keeping / not keeping the snapshot pub reasons: Vec, } impl ForgetGroups { + /// Turn `ForgetGroups` into the list of all snapshot IDs to remove. pub fn into_forget_ids(self) -> Vec { self.0 .into_iter() @@ -41,6 +55,23 @@ impl ForgetGroups { } } +/// Get the list of snapshots to forget. +/// +/// # Type Parameters +/// +/// * `P` - The progress bar type. +/// * `S` - The state the repository is in. +/// +/// # Arguments +/// +/// * `repo` - The repository to use +/// * `keep` - The keep options to use +/// * `group_by` - The criterion to group snapshots by +/// * `filter` - The filter to apply to the snapshots +/// +/// # Returns +/// +/// The list of snapshot groups with the corresponding snapshots and forget information pub(crate) fn get_forget_snapshots( repo: &Repository, keep: &KeepOptions, @@ -64,10 +95,12 @@ pub(crate) fn get_forget_snapshots( #[cfg_attr(feature = "clap", derive(clap::Parser))] #[cfg_attr(feature = "merge", derive(merge::Merge))] #[serde_as] -#[derive(Clone, Debug, PartialEq, Eq, Derivative, Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, Derivative, Deserialize, Setters)] #[derivative(Default)] #[serde(default, rename_all = "kebab-case", deny_unknown_fields)] +#[setters(into)] #[non_exhaustive] +/// Options which snapshots should be kept. Used by the `forget` command. pub struct KeepOptions { /// Keep snapshots with this taglist (can be specified multiple times) #[cfg_attr(feature = "clap", clap(long, value_name = "TAG[,TAG,..]"))] @@ -225,6 +258,26 @@ pub struct KeepOptions { pub keep_within_yearly: humantime::Duration, } +/// Overwrite the value of `left` with `right` if `left` is zero. +/// +/// This is used to overwrite the default values of `KeepOptions` with the values from the config file. +/// +/// # Arguments +/// +/// * `left` - The value to overwrite +/// * `right` - The value to overwrite with +/// +/// # Example +/// +/// ``` +/// use rustic_core::commands::forget::overwrite_zero_duration; +/// use humantime::Duration; +/// +/// let mut left = "0s".parse::().unwrap().into(); +/// let right = "60s".parse::().unwrap().into(); +/// overwrite_zero_duration(&mut left, right); +/// assert_eq!(left, "60s".parse::().unwrap().into()); +/// ``` #[cfg(feature = "merge")] fn overwrite_zero_duration(left: &mut humantime::Duration, right: humantime::Duration) { if *left == std::time::Duration::ZERO.into() { @@ -232,46 +285,134 @@ fn overwrite_zero_duration(left: &mut humantime::Duration, right: humantime::Dur } } +/// Always return false +/// +/// # Arguments +/// +/// * `_sn1` - The first snapshot +/// * `_sn2` - The second snapshot const fn always_false(_sn1: &SnapshotFile, _sn2: &SnapshotFile) -> bool { false } +/// Evaluate the year of the given snapshots +/// +/// # Arguments +/// +/// * `sn1` - The first snapshot +/// * `sn2` - The second snapshot +/// +/// # Returns +/// +/// Whether the year of the snapshots is equal fn equal_year(sn1: &SnapshotFile, sn2: &SnapshotFile) -> bool { let (t1, t2) = (sn1.time, sn2.time); t1.year() == t2.year() } +/// Evaluate the half year of the given snapshots +/// +/// # Arguments +/// +/// * `sn1` - The first snapshot +/// * `sn2` - The second snapshot +/// +/// # Returns +/// +/// Whether the half year of the snapshots is equal fn equal_half_year(sn1: &SnapshotFile, sn2: &SnapshotFile) -> bool { let (t1, t2) = (sn1.time, sn2.time); t1.year() == t2.year() && t1.month0() / 6 == t2.month0() / 6 } +/// Evaluate the quarter year of the given snapshots +/// +/// # Arguments +/// +/// * `sn1` - The first snapshot +/// * `sn2` - The second snapshot +/// +/// # Returns +/// +/// Whether the quarter year of the snapshots is equal fn equal_quarter_year(sn1: &SnapshotFile, sn2: &SnapshotFile) -> bool { let (t1, t2) = (sn1.time, sn2.time); t1.year() == t2.year() && t1.month0() / 3 == t2.month0() / 3 } +/// Evaluate the month of the given snapshots +/// +/// # Arguments +/// +/// * `sn1` - The first snapshot +/// * `sn2` - The second snapshot +/// +/// # Returns +/// +/// Whether the month of the snapshots is equal fn equal_month(sn1: &SnapshotFile, sn2: &SnapshotFile) -> bool { let (t1, t2) = (sn1.time, sn2.time); t1.year() == t2.year() && t1.month() == t2.month() } +/// Evaluate the week of the given snapshots +/// +/// # Arguments +/// +/// * `sn1` - The first snapshot +/// * `sn2` - The second snapshot +/// +/// # Returns +/// +/// Whether the week of the snapshots is equal fn equal_week(sn1: &SnapshotFile, sn2: &SnapshotFile) -> bool { let (t1, t2) = (sn1.time, sn2.time); t1.year() == t2.year() && t1.iso_week().week() == t2.iso_week().week() } +/// Evaluate the day of the given snapshots +/// +/// # Arguments +/// +/// * `sn1` - The first snapshot +/// * `sn2` - The second snapshot +/// +/// # Returns +/// +/// Whether the day of the snapshots is equal fn equal_day(sn1: &SnapshotFile, sn2: &SnapshotFile) -> bool { let (t1, t2) = (sn1.time, sn2.time); t1.year() == t2.year() && t1.ordinal() == t2.ordinal() } +/// Evaluate the hours of the given snapshots +/// +/// # Arguments +/// +/// * `sn1` - The first snapshot +/// * `sn2` - The second snapshot +/// +/// # Returns +/// +/// Whether the hours of the snapshots are equal fn equal_hour(sn1: &SnapshotFile, sn2: &SnapshotFile) -> bool { let (t1, t2) = (sn1.time, sn2.time); t1.year() == t2.year() && t1.ordinal() == t2.ordinal() && t1.hour() == t2.hour() } impl KeepOptions { + /// Check if the given snapshot matches the keep options. + /// + /// # Arguments + /// + /// * `sn` - The snapshot to check + /// * `last` - The last snapshot + /// * `has_next` - Whether there is a next snapshot + /// * `latest_time` - The time of the latest snapshot + /// + /// # Returns + /// + /// The list of reasons why the snapshot should be kept fn matches( &mut self, sn: &SnapshotFile, @@ -369,6 +510,18 @@ impl KeepOptions { reason } + /// Apply the `[KeepOptions]` to the given list of [`SnapshotFile`]s returning the corresponding + /// list of [`ForgetSnapshot`]s + /// + /// # Arguments + /// + /// * `snapshots` - The list of snapshots to apply the options to + /// * `now` - The current time + /// + /// # Returns + /// + /// The list of snapshots with the attribute `keep` set to `true` if the snapshot should be kept and + /// `reasons` set to the list of reasons why the snapshot should be kept pub fn apply( &self, mut snapshots: Vec, diff --git a/crates/rustic_core/src/commands/init.rs b/crates/rustic_core/src/commands/init.rs index 538d9fe08..01bdf238c 100644 --- a/crates/rustic_core/src/commands/init.rs +++ b/crates/rustic_core/src/commands/init.rs @@ -3,15 +3,43 @@ use log::info; use crate::{ - chunker::random_poly, commands::config::save_config, ConfigFile, ConfigOpts, Id, Key, KeyOpts, - Repository, RusticResult, WriteBackend, + backend::WriteBackend, + chunker::random_poly, + commands::config::{save_config, ConfigOptions}, + commands::key::KeyOptions, + crypto::aespoly1305::Key, + error::RusticResult, + id::Id, + repofile::ConfigFile, + repository::Repository, }; +/// Initialize a new repository. +/// +/// # Type Parameters +/// +/// * `P` - The progress bar type. +/// * `S` - The state the repository is in. +/// +/// # Arguments +/// +/// * `repo` - The repository to initialize. +/// * `pass` - The password to encrypt the key with. +/// * `key_opts` - The options to create the key with. +/// * `config_opts` - The options to create the config with. +/// +/// # Errors +/// +/// * [`PolynomialErrorKind::NoSuitablePolynomialFound`] - If no polynomial could be found in one million tries. +/// +/// # Returns +/// +/// A tuple of the key and the config file. pub(crate) fn init( repo: &Repository, pass: &str, - key_opts: &KeyOpts, - config_opts: &ConfigOpts, + key_opts: &KeyOptions, + config_opts: &ConfigOptions, ) -> RusticResult<(Key, ConfigFile)> { // Create config first to allow catching errors from here without writing anything let repo_id = Id::random(); @@ -25,10 +53,27 @@ pub(crate) fn init( Ok((key, config)) } +/// Initialize a new repository with a given config. +/// +/// # Type Parameters +/// +/// * `P` - The progress bar type. +/// * `S` - The state the repository is in. +/// +/// # Arguments +/// +/// * `repo` - The repository to initialize. +/// * `pass` - The password to encrypt the key with. +/// * `key_opts` - The options to create the key with. +/// * `config` - The config to use. +/// +/// # Returns +/// +/// The key used to encrypt the config. pub(crate) fn init_with_config( repo: &Repository, pass: &str, - key_opts: &KeyOpts, + key_opts: &KeyOptions, config: &ConfigFile, ) -> RusticResult { repo.be.create()?; diff --git a/crates/rustic_core/src/commands/key.rs b/crates/rustic_core/src/commands/key.rs index cbc72b766..ed491439a 100644 --- a/crates/rustic_core/src/commands/key.rs +++ b/crates/rustic_core/src/commands/key.rs @@ -1,12 +1,22 @@ //! `key` subcommand +use derive_setters::Setters; + use crate::{ - error::CommandErrorKind, hash, FileType, Id, Key, KeyFile, Open, Repository, RusticResult, - WriteBackend, + backend::{FileType, WriteBackend}, + crypto::aespoly1305::Key, + crypto::hasher::hash, + error::CommandErrorKind, + error::RusticResult, + id::Id, + repofile::KeyFile, + repository::{Open, Repository}, }; #[cfg_attr(feature = "clap", derive(clap::Parser))] -#[derive(Debug, Clone, Default)] -pub struct KeyOpts { +#[derive(Debug, Clone, Default, Setters)] +#[setters(into)] +/// Options for the `key` command. These are used when creating a new key. +pub struct KeyOptions { /// Set 'hostname' in public key information #[cfg_attr(feature = "clap", clap(long))] pub hostname: Option, @@ -20,7 +30,26 @@ pub struct KeyOpts { pub with_created: bool, } -impl KeyOpts { +impl KeyOptions { + /// Add the current key to the repository. + /// + /// # Type Parameters + /// + /// * `P` - The progress bar type. + /// * `S` - The state the repository is in. + /// + /// # Arguments + /// + /// * `repo` - The repository to add the key to. + /// * `pass` - The password to encrypt the key with. + /// + /// # Errors + /// + /// * [`CommandErrorKind::FromJsonError`] - If the key could not be serialized. + /// + /// # Returns + /// + /// The id of the key. pub(crate) fn add_key( &self, repo: &Repository, @@ -30,6 +59,21 @@ impl KeyOpts { self.add(repo, pass, *key) } + /// Initialize a new key. + /// + /// # Type Parameters + /// + /// * `P` - The progress bar type. + /// * `S` - The state the repository is in. + /// + /// # Arguments + /// + /// * `repo` - The repository to add the key to. + /// * `pass` - The password to encrypt the key with. + /// + /// # Returns + /// + /// A tuple of the key and the id of the key. pub(crate) fn init_key( &self, repo: &Repository, @@ -40,6 +84,21 @@ impl KeyOpts { Ok((key, self.add(repo, pass, key)?)) } + /// Add a key to the repository. + /// + /// # Arguments + /// + /// * `repo` - The repository to add the key to. + /// * `pass` - The password to encrypt the key with. + /// * `key` - The key to add. + /// + /// # Errors + /// + /// * [`CommandErrorKind::FromJsonError`] - If the key could not be serialized. + /// + /// # Returns + /// + /// The id of the key. fn add(&self, repo: &Repository, pass: &str, key: Key) -> RusticResult { let ko = self.clone(); let keyfile = KeyFile::generate(key, &pass, ko.hostname, ko.username, ko.with_created)?; diff --git a/crates/rustic_core/src/commands/merge.rs b/crates/rustic_core/src/commands/merge.rs index 3c0422c60..36f9ae0aa 100644 --- a/crates/rustic_core/src/commands/merge.rs +++ b/crates/rustic_core/src/commands/merge.rs @@ -5,11 +5,33 @@ use std::cmp::Ordering; use chrono::Local; use crate::{ - blob::tree, error::CommandErrorKind, repofile::snapshotfile::SnapshotSummary, - repository::IndexedTree, BlobType, DecryptWriteBackend, Id, Indexer, Node, Open, Packer, - PathList, Progress, ProgressBars, ReadIndex, Repository, RusticResult, SnapshotFile, Tree, + backend::{decrypt::DecryptWriteBackend, node::Node}, + blob::{ + packer::Packer, + tree::{self, Tree}, + BlobType, + }, + error::CommandErrorKind, + error::RusticResult, + id::Id, + index::{indexer::Indexer, ReadIndex}, + progress::{Progress, ProgressBars}, + repofile::{PathList, SnapshotFile, SnapshotSummary}, + repository::{IndexedTree, Repository}, }; +/// Merges the given snapshots into a new snapshot. +/// +/// # Arguments +/// +/// * `repo` - The repository to merge into +/// * `snapshots` - The snapshots to merge +/// * `cmp` - The comparison function for the trees +/// * `snap` - The snapshot to merge into +/// +/// # Returns +/// +/// The merged snapshot pub(crate) fn merge_snapshots( repo: &Repository, snapshots: &[SnapshotFile], @@ -18,7 +40,7 @@ pub(crate) fn merge_snapshots( ) -> RusticResult { let now = Local::now(); - let paths = PathList::from_strings(snapshots.iter().flat_map(|snap| snap.paths.iter()), false)?; + let paths = PathList::from_strings(snapshots.iter().flat_map(|snap| snap.paths.iter())).merge(); snap.paths.set_paths(&paths.paths())?; // set snapshot time to time of latest snapshot to be merged @@ -40,6 +62,27 @@ pub(crate) fn merge_snapshots( Ok(snap) } +/// Merges the given trees into a new tree. +/// +/// # Type Parameters +/// +/// * `P` - The progress bar type. +/// * `S` - The type of the indexed tree. +/// +/// # Arguments +/// +/// * `repo` - The repository to merge into +/// * `trees` - The trees to merge +/// * `cmp` - The comparison function for the trees +/// * `summary` - The summary to update +/// +/// # Errors +/// +/// * [`CommandErrorKind::ConversionToU64Failed`] - If the size of the tree is too large +/// +/// # Returns +/// +/// The merged tree pub(crate) fn merge_trees( repo: &Repository, trees: &[Id], diff --git a/crates/rustic_core/src/commands/prune.rs b/crates/rustic_core/src/commands/prune.rs index e428e7dc8..dba5afbf3 100644 --- a/crates/rustic_core/src/commands/prune.rs +++ b/crates/rustic_core/src/commands/prune.rs @@ -1,5 +1,6 @@ //! `prune` subcommand +use derive_setters::Setters; /// App-local prelude includes `app_reader()`/`app_writer()`/`app_config()` /// accessors along with logging macros. Customize as you see fit. use log::{info, warn}; @@ -19,26 +20,40 @@ use itertools::Itertools; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use crate::{ - blob::packer::{PackSizer, Repacker}, + backend::{ + decrypt::{DecryptReadBackend, DecryptWriteBackend}, + node::NodeType, + FileType, ReadBackend, + }, + blob::{ + packer::{PackSizer, Repacker}, + tree::TreeStreamerOnce, + BlobType, BlobTypeMap, Initialize, + }, error::CommandErrorKind, - index::binarysorted::{IndexCollector, IndexType}, - repository::Open, - BlobType, BlobTypeMap, DecryptReadBackend, DecryptWriteBackend, FileType, HeaderEntry, Id, - IndexBackend, IndexBlob, IndexFile, IndexPack, IndexedBackend, Indexer, Initialize, NodeType, - Progress, ProgressBars, ReadBackend, ReadIndex, Repository, RusticResult, SnapshotFile, Sum, - TreeStreamerOnce, + error::RusticResult, + id::Id, + index::{ + binarysorted::{IndexCollector, IndexType}, + indexer::Indexer, + IndexBackend, IndexedBackend, ReadIndex, + }, + progress::{Progress, ProgressBars}, + repofile::{HeaderEntry, IndexBlob, IndexFile, IndexPack, SnapshotFile}, + repository::{Open, Repository}, }; pub(super) mod constants { + /// Minimum size of an index file to be considered for pruning pub(super) const MIN_INDEX_LEN: usize = 10_000; } -/// `prune` subcommand #[allow(clippy::struct_excessive_bools)] #[cfg_attr(feature = "clap", derive(clap::Parser))] -#[derive(Debug, Clone)] -#[cfg_attr(feature = "clap", group(id = "prune_opts"))] -pub struct PruneOpts { +#[derive(Debug, Clone, Setters)] +#[setters(into)] +/// Options for the `prune` command +pub struct PruneOptions { /// Define maximum data to repack in % of reposize or as size (e.g. '5b', '2 kB', '3M', '4TiB') or 'unlimited' #[cfg_attr( feature = "clap", @@ -70,7 +85,10 @@ pub struct PruneOpts { pub keep_delete: humantime::Duration, /// Delete files immediately instead of marking them. This also removes all files already marked for deletion. - /// WARNING: Only use if you are sure the repository is not accessed by parallel processes! + /// + /// # Warning + /// + /// Only use if you are sure the repository is not accessed by parallel processes! #[cfg_attr(feature = "clap", clap(long))] pub instant_delete: bool, @@ -96,10 +114,18 @@ pub struct PruneOpts { pub no_resize: bool, #[cfg_attr(feature = "clap", clap(skip))] + /// Ignore these snapshots when looking for data-still-in-use. + /// + /// # Warning + /// + /// Use this option with care! + /// + /// If you specify snapshots which are not deleted, running the resulting `PrunePlan` + /// will remove data which is used within those snapshots! pub ignore_snaps: Vec, } -impl Default for PruneOpts { +impl Default for PruneOptions { fn default() -> Self { Self { max_repack: LimitOption::Unlimited, @@ -117,7 +143,22 @@ impl Default for PruneOpts { } } -impl PruneOpts { +impl PruneOptions { + /// Get a `PrunePlan` from the given `PruneOptions`. + /// + /// # Type Parameters + /// + /// * `P` - The progress bar type. + /// * `S` - The state the repository is in. + /// + /// # Arguments + /// + /// * `repo` - The repository to get the `PrunePlan` for. + /// + /// # Errors + /// + /// * [`CommandErrorKind::RepackUncompressedRepoV1`] - If `repack_uncompressed` is set and the repository is a version 1 repository + /// * [`CommandErrorKind::FromOutOfRangeError`] - If `keep_pack` or `keep_delete` is out of range pub fn get_plan( &self, repo: &Repository, @@ -189,10 +230,14 @@ impl PruneOpts { } } +/// Enum to specify a size limit #[derive(Clone, Copy, Debug)] pub enum LimitOption { + /// Size in bytes Size(ByteSize), + /// Size in percentage of repository size Percentage(u64), + /// No limit Unlimited, } @@ -211,97 +256,180 @@ impl FromStr for LimitOption { } } +/// Statistics about what is deleted or kept within `prune` #[derive(Default, Debug, Clone, Copy)] pub struct DeleteStats { + /// Number of blobs to remove pub remove: u64, + /// Number of blobs to recover pub recover: u64, + /// Number of blobs to keep pub keep: u64, } impl DeleteStats { + /// Returns the total number of blobs pub const fn total(&self) -> u64 { self.remove + self.recover + self.keep } } #[derive(Debug, Default, Clone, Copy)] +/// Statistics about packs within `prune` pub struct PackStats { + /// Number of used packs pub used: u64, + /// Number of partly used packs pub partly_used: u64, + /// Number of unused packs pub unused: u64, // this equals to packs-to-remove + /// Number of packs-to-repack pub repack: u64, + /// Number of packs-to-keep pub keep: u64, } + #[derive(Debug, Default, Clone, Copy, Add)] +/// Statistics about sizes within `prune` pub struct SizeStats { + /// Number of used blobs pub used: u64, + /// Number of unused blobs pub unused: u64, + /// Number of blobs to remove pub remove: u64, + /// Number of blobs to repack pub repack: u64, + /// Number of blobs to remove after repacking pub repackrm: u64, } impl SizeStats { + /// Returns the total number of blobs pub const fn total(&self) -> u64 { self.used + self.unused } + + /// Returns the total number of blobs after pruning pub const fn total_after_prune(&self) -> u64 { self.used + self.unused_after_prune() } + + /// Returns the total number of unused blobs after pruning pub const fn unused_after_prune(&self) -> u64 { self.unused - self.remove - self.repackrm } } +/// Statistics about a [`PrunePlan`] #[derive(Default, Debug)] pub struct PruneStats { + /// Statistics about pack count pub packs_to_delete: DeleteStats, + /// Statistics about pack sizes pub size_to_delete: DeleteStats, + /// Statistics about current pack situation pub packs: PackStats, + /// Statistics about blobs in the repository pub blobs: BlobTypeMap, + /// Statistics about total sizes of blobs in the repository pub size: BlobTypeMap, + /// Number of unreferenced pack files pub packs_unref: u64, + /// total size of unreferenced pack files pub size_unref: u64, + /// Number of index files pub index_files: u64, + /// Number of index files which will be rebuilt during the prune pub index_files_rebuild: u64, } +impl PruneStats { + /// Compute statistics about blobs of all types + pub fn blobs_sum(&self) -> SizeStats { + self.blobs + .values() + .fold(SizeStats::default(), |acc, x| acc + *x) + } + + /// Compute total size statistics for blobs of all types + pub fn size_sum(&self) -> SizeStats { + self.size + .values() + .fold(SizeStats::default(), |acc, x| acc + *x) + } +} + +// TODO: add documentation! #[derive(Debug)] struct PruneIndex { + /// The id of the index file id: Id, + /// Whether the index file was modified modified: bool, + /// The packs in the index file packs: Vec, } impl PruneIndex { + // TODO: add documentation! fn len(&self) -> usize { self.packs.iter().map(|p| p.blobs.len()).sum() } } +/// Task to be executed by a `PrunePlan` on Packs #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum PackToDo { + // TODO: Add documentation Undecided, + /// The pack should be kept Keep, + /// The pack should be repacked Repack, + /// The pack should be marked for deletion MarkDelete, + // TODO: Add documentation KeepMarked, + // TODO: Add documentation KeepMarkedAndCorrect, + /// The pack should be recovered Recover, + /// The pack should be deleted Delete, } +impl Default for PackToDo { + fn default() -> Self { + Self::Undecided + } +} + +/// A pack which is to be pruned #[derive(Debug)] struct PrunePack { + /// The id of the pack id: Id, + /// The type of the pack blob_type: BlobType, + /// The size of the pack size: u32, + /// Whether the pack is marked for deletion delete_mark: bool, + /// The task to be executed on the pack to_do: PackToDo, + /// The time the pack was created time: Option>, + /// The blobs in the pack blobs: Vec, } impl PrunePack { + /// Create a new `PrunePack` from an `IndexPack` + /// + /// # Arguments + /// + /// * `p` - The `IndexPack` to create the `PrunePack` from + /// * `delete_mark` - Whether the pack is marked for deletion fn from_index_pack(p: IndexPack, delete_mark: bool) -> Self { Self { id: p.id, @@ -314,14 +442,25 @@ impl PrunePack { } } + /// Create a new `PrunePack` from an `IndexPack` which is not marked for deletion + /// + /// # Arguments + /// + /// * `p` - The `IndexPack` to create the `PrunePack` from fn from_index_pack_unmarked(p: IndexPack) -> Self { Self::from_index_pack(p, false) } + /// Create a new `PrunePack` from an `IndexPack` which is marked for deletion + /// + /// # Arguments + /// + /// * `p` - The `IndexPack` to create the `PrunePack` from fn from_index_pack_marked(p: IndexPack) -> Self { Self::from_index_pack(p, true) } + /// Convert the `PrunePack` into an `IndexPack` fn into_index_pack(self) -> IndexPack { IndexPack { id: self.id, @@ -331,6 +470,11 @@ impl PrunePack { } } + /// Convert the `PrunePack` into an `IndexPack` with the given time + /// + /// # Arguments + /// + /// * `time` - The time to set fn into_index_pack_with_time(self, time: DateTime) -> IndexPack { IndexPack { id: self.id, @@ -340,6 +484,13 @@ impl PrunePack { } } + /// Set the task to be executed on the pack + /// + /// # Arguments + /// + /// * `todo` - The task to be executed on the pack + /// * `pi` - The `PackInfo` of the pack + /// * `stats` - The `PruneStats` of the `PrunePlan` fn set_todo(&mut self, todo: PackToDo, pi: &PackInfo, stats: &mut PruneStats) { let tpe = self.blob_type; match todo { @@ -385,6 +536,7 @@ impl PrunePack { self.to_do = todo; } + /// Returns whether the pack is compressed fn is_compressed(&self) -> bool { self.blobs .iter() @@ -392,25 +544,42 @@ impl PrunePack { } } +/// Reasons why a pack should be repacked #[derive(PartialEq, Eq, Debug)] enum RepackReason { + /// The pack is partly used PartlyUsed, + /// The pack is to be compressed ToCompress, + /// The pack has a size mismatch SizeMismatch, } -use RepackReason::{PartlyUsed, SizeMismatch, ToCompress}; +/// A plan what should be repacked or removed by a `prune` run #[derive(Debug)] pub struct PrunePlan { + /// The time the plan was created time: DateTime, + /// The ids of the blobs which are used used_ids: HashMap, + /// The ids of the existing packs existing_packs: HashMap, + /// The packs which should be repacked repack_candidates: Vec<(PackInfo, RepackReason, usize, usize)>, + /// The index files index_files: Vec, + /// `prune` statistics pub stats: PruneStats, } impl PrunePlan { + /// Create a new `PrunePlan` + /// + /// # Arguments + /// + /// * `used_ids` - The ids of the blobs which are used + /// * `existing_packs` - The ids of the existing packs + /// * `index_files` - The index files fn new( used_ids: HashMap, existing_packs: HashMap, @@ -478,6 +647,7 @@ impl PrunePlan { } } + /// This function counts the number of times a blob is used in the index files. fn count_used_blobs(&mut self) { for blob in self .index_files @@ -494,8 +664,12 @@ impl PrunePlan { } } + /// This function checks whether all used blobs are present in the index files. + /// + /// # Errors + /// + /// * [`CommandErrorKind::BlobsMissing`] - If a blob is missing fn check(&self) -> RusticResult<()> { - // check that all used blobs are present in index for (id, count) in &self.used_ids { if *count == 0 { return Err(CommandErrorKind::BlobsMissing(*id).into()); @@ -504,6 +678,20 @@ impl PrunePlan { Ok(()) } + /// Decides what to do with the packs + /// + /// # Arguments + /// + /// * `keep_pack` - The minimum duration to keep packs before repacking or removing + /// * `keep_delete` - The minimum duration to keep packs marked for deletion + /// * `repack_cacheable_only` - Whether to only repack cacheable packs + /// * `repack_uncompressed` - Whether to repack packs containing uncompressed blobs + /// * `repack_all` - Whether to repack all packs + /// * `pack_sizer` - The `PackSizer` for the packs + /// + /// # Errors + /// + // TODO: add documentation! fn decide_packs( &mut self, keep_pack: Duration, @@ -551,12 +739,16 @@ impl PrunePlan { if too_young || keep_uncacheable { pack.set_todo(PackToDo::Keep, &pi, &mut self.stats); } else if to_compress || repack_all { - self.repack_candidates - .push((pi, ToCompress, index_num, pack_num)); + self.repack_candidates.push(( + pi, + RepackReason::ToCompress, + index_num, + pack_num, + )); } else if size_mismatch { self.repack_candidates.push(( pi, - SizeMismatch, + RepackReason::SizeMismatch, index_num, pack_num, )); @@ -574,8 +766,12 @@ impl PrunePlan { pack.set_todo(PackToDo::Keep, &pi, &mut self.stats); } else { // other partly used pack => candidate for repacking - self.repack_candidates - .push((pi, PartlyUsed, index_num, pack_num)); + self.repack_candidates.push(( + pi, + RepackReason::PartlyUsed, + index_num, + pack_num, + )); } } (true, 0, _) => match pack.time { @@ -599,6 +795,19 @@ impl PrunePlan { Ok(()) } + /// Decides if packs should be repacked + /// + /// # Arguments + /// + /// * `max_repack` - The maximum size of packs to repack + /// * `max_unused` - The maximum size of unused blobs + /// * `repack_uncompressed` - Whether to repack packs containing uncompressed blobs + /// * `no_resize` - Whether to resize packs + /// * `pack_sizer` - The `PackSizer` for the packs + /// + /// # Errors + /// + // TODO: add documentation! fn decide_repack( &mut self, max_repack: &LimitOption, @@ -614,13 +823,13 @@ impl PrunePlan { // if percentag is given, we want to have // unused <= p/100 * size_after = p/100 * (size_used + unused) // which equals (1 - p/100) * unused <= p/100 * size_used - (false, LimitOption::Percentage(p)) => (p * self.stats.size.sum().used) / (100 - p), + (false, LimitOption::Percentage(p)) => (p * self.stats.size_sum().used) / (100 - p), }; let max_repack = match max_repack { LimitOption::Unlimited => u64::MAX, LimitOption::Size(size) => size.as_u64(), - LimitOption::Percentage(p) => (p * self.stats.size.sum().total()) / 100, + LimitOption::Percentage(p) => (p * self.stats.size_sum().total()) / 100, }; self.repack_candidates.sort_unstable_by_key(|rc| rc.0); @@ -635,13 +844,13 @@ impl PrunePlan { let total_repack_size: u64 = repack_size.into_values().sum(); if total_repack_size + u64::from(pi.used_size) >= max_repack - || (self.stats.size.sum().unused_after_prune() < max_unused - && repack_reason == PartlyUsed + || (self.stats.size_sum().unused_after_prune() < max_unused + && repack_reason == RepackReason::PartlyUsed && blob_type == BlobType::Data) - || (repack_reason == SizeMismatch && no_resize) + || (repack_reason == RepackReason::SizeMismatch && no_resize) { pack.set_todo(PackToDo::Keep, &pi, &mut self.stats); - } else if repack_reason == SizeMismatch { + } else if repack_reason == RepackReason::SizeMismatch { resize_packs[blob_type].push((pi, index_num, pack_num)); repack_size[blob_type] += u64::from(pi.used_size); } else { @@ -667,6 +876,13 @@ impl PrunePlan { } } + /// Checks if the existing packs are ok + /// + /// # Errors + /// + /// * [`CommandErrorKind::NoDecision`] - If a pack is undecided + /// * [`CommandErrorKind::PackSizeNotMatching`] - If the size of a pack does not match + /// * [`CommandErrorKind::PackNotExisting`] - If a pack does not exist fn check_existing_packs(&mut self) -> RusticResult<()> { for pack in self.index_files.iter().flat_map(|index| &index.packs) { let existing_size = self.existing_packs.remove(&pack.id); @@ -683,7 +899,7 @@ impl PrunePlan { }; match pack.to_do { - PackToDo::Undecided => return Err(CommandErrorKind::NoDecicion(pack.id).into()), + PackToDo::Undecided => return Err(CommandErrorKind::NoDecision(pack.id).into()), PackToDo::Keep | PackToDo::Recover => { for blob in &pack.blobs { _ = self.used_ids.remove(&blob.id); @@ -712,6 +928,11 @@ impl PrunePlan { Ok(()) } + /// Filter out index files which do not need processing + /// + /// # Arguments + /// + /// * `instant_delete` - Whether to instantly delete unreferenced packs fn filter_index_files(&mut self, instant_delete: bool) { let mut any_must_modify = false; self.stats.index_files = self.index_files.len() as u64; @@ -741,6 +962,7 @@ impl PrunePlan { // repacks come at end } + /// Get the list of packs-to-repack from the [`PrunePlan`]. pub fn repack_packs(&self) -> Vec { self.index_files .iter() @@ -750,11 +972,17 @@ impl PrunePlan { .collect() } + /// Perform the pruning on the given repository. + /// + /// # Arguments + /// + /// * `repo` - The repository to prune + /// * `opts` - The options for the pruning #[allow(clippy::significant_drop_tightening)] pub fn do_prune( self, repo: &Repository, - opts: &PruneOpts, + opts: &PruneOptions, ) -> RusticResult<()> { repo.warm_up_wait(self.repack_packs().into_iter())?; let be = repo.dbe(); @@ -829,7 +1057,7 @@ impl PrunePlan { (false, false) => pb.progress_spinner("rebuilding index..."), }; - p.set_length(self.stats.size.sum().repack - self.stats.size.sum().repackrm); + p.set_length(self.stats.size_sum().repack - self.stats.size_sum().repackrm); let mut indexes_remove = Vec::new(); let tree_packs_remove = Arc::new(Mutex::new(Vec::new())); @@ -859,7 +1087,7 @@ impl PrunePlan { .into_par_iter() .try_for_each(|pack| -> RusticResult<_> { match pack.to_do { - PackToDo::Undecided => return Err(CommandErrorKind::NoDecicion(pack.id).into()), + PackToDo::Undecided => return Err(CommandErrorKind::NoDecision(pack.id).into()), PackToDo::Keep => { // keep pack: add to new index let pack = pack.into_index_pack(); @@ -950,12 +1178,18 @@ impl PrunePlan { } } +/// `PackInfo` contains information about a pack which is needed to decide what to do with the pack. #[derive(PartialEq, Eq, Clone, Copy, Debug)] struct PackInfo { + /// What type of blobs are in the pack blob_type: BlobType, + /// The number of used blobs in the pack used_blobs: u16, + /// The number of unused blobs in the pack unused_blobs: u16, + /// The size of the used blobs in the pack used_size: u32, + /// The size of the unused blobs in the pack unused_size: u32, } @@ -979,6 +1213,12 @@ impl Ord for PackInfo { } impl PackInfo { + /// Create a `PackInfo` from a `PrunePack`. + /// + /// # Arguments + /// + /// * `pack` - The `PrunePack` to create the `PackInfo` from + /// * `used_ids` - The `HashMap` of used ids fn from_pack(pack: &PrunePack, used_ids: &mut HashMap) -> Self { let mut pi = Self { blob_type: pack.blob_type, @@ -1054,7 +1294,17 @@ impl PackInfo { } } -// find used blobs in repo +/// Find used blobs in repo and return a map of used ids. +/// +/// # Arguments +/// +/// * `index` - The index to use +/// * `ignore_snaps` - The snapshots to ignore +/// * `pb` - The progress bars +/// +/// # Errors +/// +/// * [`CommandErrorKind::Backend`] - If an error occurs while reading from the backend fn find_used_blobs( index: &impl IndexedBackend, ignore_snaps: &[Id], diff --git a/crates/rustic_core/src/commands/repair/index.rs b/crates/rustic_core/src/commands/repair/index.rs index efcda9f4b..fbb4e806b 100644 --- a/crates/rustic_core/src/commands/repair/index.rs +++ b/crates/rustic_core/src/commands/repair/index.rs @@ -1,23 +1,44 @@ //! `repair` index subcommand +use derive_setters::Setters; use log::{debug, info, warn}; use std::collections::HashMap; use crate::{ - error::CommandErrorKind, DecryptReadBackend, DecryptWriteBackend, FileType, IndexFile, - IndexPack, Indexer, Open, PackHeader, PackHeaderRef, Progress, ProgressBars, ReadBackend, - Repository, RusticResult, WriteBackend, + backend::{ + decrypt::{DecryptReadBackend, DecryptWriteBackend}, + FileType, ReadBackend, WriteBackend, + }, + error::{CommandErrorKind, RusticResult}, + index::indexer::Indexer, + progress::{Progress, ProgressBars}, + repofile::{IndexFile, IndexPack, PackHeader, PackHeaderRef}, + repository::{Open, Repository}, }; #[cfg_attr(feature = "clap", derive(clap::Parser))] -#[derive(Default, Debug, Clone, Copy)] +#[derive(Default, Debug, Clone, Copy, Setters)] +#[setters(into)] +#[non_exhaustive] +/// Options for the `repair index` command pub struct RepairIndexOptions { - // Read all data packs, i.e. completely re-create the index + /// Read all data packs, i.e. completely re-create the index #[cfg_attr(feature = "clap", clap(long))] - read_all: bool, + pub read_all: bool, } impl RepairIndexOptions { + /// Runs the `repair index` command + /// + /// # Type Parameters + /// + /// * `P` - The progress bar type + /// * `S` - The state the repository is in + /// + /// # Arguments + /// + /// * `repo` - The repository to repair + /// * `dry_run` - Whether to actually modify the repository or just print what would be done pub(crate) fn repair( self, repo: &Repository, diff --git a/crates/rustic_core/src/commands/repair/snapshots.rs b/crates/rustic_core/src/commands/repair/snapshots.rs index a46e4efcf..5ff8c5a49 100644 --- a/crates/rustic_core/src/commands/repair/snapshots.rs +++ b/crates/rustic_core/src/commands/repair/snapshots.rs @@ -1,36 +1,59 @@ //! `repair snapshots` subcommand +use derive_setters::Setters; use log::{info, warn}; use std::collections::{HashMap, HashSet}; use crate::{ - repository::{IndexedFull, IndexedTree}, - BlobType, DecryptWriteBackend, FileType, Id, IndexedBackend, Indexer, NodeType, Open, Packer, - ProgressBars, ReadIndex, Repository, RusticResult, SnapshotFile, StringList, Tree, + backend::{decrypt::DecryptWriteBackend, node::NodeType, FileType}, + blob::{packer::Packer, tree::Tree, BlobType}, + error::RusticResult, + id::Id, + index::{indexer::Indexer, IndexedBackend, ReadIndex}, + progress::ProgressBars, + repofile::{SnapshotFile, StringList}, + repository::{IndexedFull, IndexedTree, Repository}, }; #[cfg_attr(feature = "clap", derive(clap::Parser))] -#[derive(Default, Debug)] +#[derive(Debug, Setters)] +#[setters(into)] +/// Options for the `repair snapshots` command pub struct RepairSnapshotsOptions { - /// Also remove defect snapshots - WARNING: This can result in data loss! + /// Also remove defect snapshots + /// + /// # Warning + /// + /// This can result in data loss! #[cfg_attr(feature = "clap", clap(long))] - delete: bool, + pub delete: bool, /// Append this suffix to repaired directory or file name #[cfg_attr( feature = "clap", clap(long, value_name = "SUFFIX", default_value = ".repaired") )] - suffix: String, + pub suffix: String, /// Tag list to set on repaired snapshots (can be specified multiple times) #[cfg_attr( feature = "clap", clap(long, value_name = "TAG[,TAG,..]", default_value = "repaired") )] - tag: Vec, + pub tag: Vec, } +impl Default for RepairSnapshotsOptions { + fn default() -> Self { + Self { + delete: true, + suffix: ".repaired".to_string(), + tag: vec![StringList(vec!["repaired".to_string()])], + } + } +} + +// TODO: add documentation #[derive(Clone, Copy)] enum Changed { This, @@ -39,6 +62,18 @@ enum Changed { } impl RepairSnapshotsOptions { + /// Runs the `repair snapshots` command + /// + /// # Type Parameters + /// + /// * `P` - The progress bar type + /// * `S` - The type of the indexed tree. + /// + /// # Arguments + /// + /// * `repo` - The repository to repair + /// * `snapshots` - The snapshots to repair + /// * `dry_run` - Whether to actually modify the repository or just print what would be done pub(crate) fn repair( &self, repo: &Repository, @@ -118,6 +153,24 @@ impl RepairSnapshotsOptions { Ok(()) } + /// Repairs a tree + /// + /// # Type Parameters + /// + /// * `BE` - The type of the backend. + /// + /// # Arguments + /// + /// * `be` - The backend to use + /// * `packer` - The packer to use + /// * `id` - The id of the tree to repair + /// * `replaced` - A map of already replaced trees + /// * `seen` - A set of already seen trees + /// * `dry_run` - Whether to actually modify the repository or just print what would be done + /// + /// # Returns + /// + /// A tuple containing the change status and the id of the repaired tree fn repair_tree( &self, be: &impl IndexedBackend, diff --git a/crates/rustic_core/src/commands/repoinfo.rs b/crates/rustic_core/src/commands/repoinfo.rs index b8117bed1..6eaf750ae 100644 --- a/crates/rustic_core/src/commands/repoinfo.rs +++ b/crates/rustic_core/src/commands/repoinfo.rs @@ -1,31 +1,53 @@ use serde::{Deserialize, Serialize}; use crate::{ + backend::{decrypt::DecryptReadBackend, FileType, ReadBackend, ALL_FILE_TYPES}, + blob::{BlobType, BlobTypeMap}, + error::RusticResult, index::IndexEntry, + progress::{Progress, ProgressBars}, repofile::indexfile::{IndexFile, IndexPack}, - repository::Open, - BlobType, BlobTypeMap, DecryptReadBackend, FileType, Progress, ProgressBars, ReadBackend, - Repository, RusticResult, ALL_FILE_TYPES, + repository::{Open, Repository}, }; #[derive(Default, Clone, Debug, Serialize, Deserialize)] +/// Index information from `repoinfo` pub struct IndexInfos { + /// Infos about blobs pub blobs: Vec, + /// Infos about blobs in packs marked for deletion pub blobs_delete: Vec, + /// Infos about packs pub packs: Vec, + /// Infos about packs marked for deletion pub packs_delete: Vec, } #[derive(Clone, Copy, Debug, Serialize, Deserialize)] +/// Information about blobs within `repoinfo` pub struct BlobInfo { + /// Blob type pub blob_type: BlobType, + /// Number of blobs of the type pub count: u64, + /// Total size saved in the repository of all blobs of the type. + /// + /// This is the size of the blobs after compression and encryption. pub size: u64, + /// Total data size of all blobs of the type. + /// + /// This is the raw size of the blobs without compression or encryption. pub data_size: u64, } impl BlobInfo { - pub fn add(&mut self, ie: IndexEntry) { + /// Add the given [`IndexEntry`] length to the data size and count. + /// + /// # Arguments + /// + /// * `ie` - The [`IndexEntry`] to add. + // TODO: What happens if the [`IndexEntry`] is not of the same [`BlobType`] as this [`BlobInfo`]? + pub(crate) fn add(&mut self, ie: IndexEntry) { self.count += 1; self.size += u64::from(ie.length); self.data_size += u64::from(ie.data_length()); @@ -34,15 +56,29 @@ impl BlobInfo { #[serde_with::apply(Option => #[serde(default, skip_serializing_if = "Option::is_none")])] #[derive(Clone, Copy, Debug, Serialize, Deserialize)] +/// Information about packs within `repoinfo` pub struct PackInfo { + /// Packs of the given blob type pub blob_type: BlobType, + /// Number of packs of the type pub count: u64, + /// Minimal pack size for packs of the type, None, if there is no pack. pub min_size: Option, + /// Maximal pack size for packs of the type, None, if there is no pack. pub max_size: Option, } impl PackInfo { - pub fn add(&mut self, ip: &IndexPack) { + /// Add the given [`IndexPack`] to the count and update the min and max size. + /// + /// # Arguments + /// + /// * `ip` - The [`IndexPack`] to add. + /// + /// # Panics + /// + // TODO: What happens if the [`IndexEntry`] is not of the same [`BlobType`] as this [`PackInfo`]? + pub(crate) fn add(&mut self, ip: &IndexPack) { self.count += 1; let size = u64::from(ip.pack_size()); self.min_size = self @@ -54,6 +90,16 @@ impl PackInfo { } } +/// Collects the index infos from the given repository. +/// +/// # Type Parameters +/// +/// * `P` - The progress bar type. +/// * `S` - The state the repository is in. +/// +/// # Arguments +/// +/// * `repo` - The repository to collect the infos from. pub(crate) fn collect_index_infos( repo: &Repository, ) -> RusticResult { @@ -108,18 +154,34 @@ pub(crate) fn collect_index_infos( #[serde_with::apply(Option => #[serde(default, skip_serializing_if = "Option::is_none")])] #[derive(Default, Clone, Debug, Serialize, Deserialize)] +/// Information about repository files pub struct RepoFileInfos { + /// Repository files pub repo: Vec, + /// Hot repository files, if we have a hot/cold repository pub repo_hot: Option>, } #[derive(Clone, Copy, Debug, Serialize, Deserialize)] +/// Information about a repository files of a given [`FileType`] pub struct RepoFileInfo { + /// The type of the files pub tpe: FileType, + /// The total # of files pub count: u64, + /// The total size of all files pub size: u64, } +/// Collects the file info from the given backend. +/// +/// # Arguments +/// +/// * `be` - The backend to collect the infos from. +/// +/// # Errors +/// +/// If files could not be listed. pub(crate) fn collect_file_info(be: &impl ReadBackend) -> RusticResult> { let mut files = Vec::with_capacity(ALL_FILE_TYPES.len()); for tpe in ALL_FILE_TYPES { @@ -131,7 +193,21 @@ pub(crate) fn collect_file_info(be: &impl ReadBackend) -> RusticResult( +/// Collects the file infos from the given repository. +/// +/// # Type Parameters +/// +/// * `P` - The progress bar type. +/// * `S` - The type of the indexed tree. +/// +/// # Arguments +/// +/// * `repo` - The repository to collect the infos from. +/// +/// # Errors +/// +// TODO: add errors! +pub(crate) fn collect_file_infos( repo: &Repository, ) -> RusticResult { let p = repo.pb.progress_spinner("scanning files..."); diff --git a/crates/rustic_core/src/commands/restore.rs b/crates/rustic_core/src/commands/restore.rs index 0f4cbfd22..4d5c42484 100644 --- a/crates/rustic_core/src/commands/restore.rs +++ b/crates/rustic_core/src/commands/restore.rs @@ -1,5 +1,6 @@ //! `restore` subcommand +use derive_setters::Setters; use log::{debug, error, info, trace, warn}; use std::{ @@ -16,23 +17,39 @@ use itertools::Itertools; use rayon::ThreadPoolBuilder; use crate::{ + backend::{ + decrypt::DecryptReadBackend, + local::LocalDestination, + node::{Node, NodeType}, + FileType, ReadBackend, + }, + blob::BlobType, error::CommandErrorKind, - repository::{IndexedFull, IndexedTree}, - BlobType, DecryptReadBackend, FileType, Id, LocalDestination, Node, NodeType, Open, Progress, - ProgressBars, ReadBackend, Repository, RusticResult, + error::RusticResult, + id::Id, + progress::{Progress, ProgressBars}, + repository::{IndexedFull, IndexedTree, Open, Repository}, }; pub(crate) mod constants { + /// The maximum number of reader threads to use for restoring. pub(crate) const MAX_READER_THREADS_NUM: usize = 20; } -/// `restore` subcommand +type RestoreInfo = BTreeMap<(Id, BlobLocation), Vec>; +type Filenames = Vec; + #[allow(clippy::struct_excessive_bools)] #[cfg_attr(feature = "clap", derive(clap::Parser))] -#[derive(Debug, Copy, Clone, Default)] -pub struct RestoreOpts { +#[derive(Debug, Copy, Clone, Default, Setters)] +#[setters(into)] +/// Options for the `restore` command +pub struct RestoreOptions { /// Remove all files/dirs in destination which are not contained in snapshot. - /// WARNING: Use with care, maybe first try this with --dry-run? + /// + /// # Warning + /// + /// Use with care, maybe first try this with --dry-run? #[cfg_attr(feature = "clap", clap(long))] pub delete: bool, @@ -50,24 +67,50 @@ pub struct RestoreOpts { } #[derive(Default, Debug, Clone, Copy)] +/// Statistics for files or directories pub struct FileDirStats { + /// Number of files or directories to restore pub restore: u64, + /// Number of files or directories which are unchanged (determined by date, but not verified) pub unchanged: u64, + /// Number of files or directories which are verified and unchanged pub verified: u64, + /// Number of files or directories which are modified pub modify: u64, + /// Number of additional entries pub additional: u64, } #[derive(Default, Debug, Clone, Copy)] +/// Restore statistics pub struct RestoreStats { + /// file statistics pub files: FileDirStats, + /// directory statistics pub dirs: FileDirStats, } -impl RestoreOpts { +impl RestoreOptions { + /// Restore the repository to the given destination. + /// + /// # Type Parameters + /// + /// * `P` - The progress bar type. + /// * `S` - The type of the indexed tree. + /// + /// # Arguments + /// + /// * `file_infos` - The restore information. + /// * `repo` - The repository to restore. + /// * `node_streamer` - The node streamer to use. + /// * `dest` - The destination to restore to. + /// + /// # Errors + /// + /// If the restore failed. pub(crate) fn restore( self, - file_infos: RestoreInfos, + file_infos: RestorePlan, repo: &Repository, node_streamer: impl Iterator>, dest: &LocalDestination, @@ -82,19 +125,36 @@ impl RestoreOpts { Ok(()) } - /// collect restore information, scan existing files, create needed dirs and remove superfluous files + /// Collect restore information, scan existing files, create needed dirs and remove superfluous files + /// + /// # Type Parameters + /// + /// * `P` - The progress bar type. + /// * `S` - The type of the indexed tree. + /// + /// # Arguments + /// + /// * `repo` - The repository to restore. + /// * `node_streamer` - The node streamer to use. + /// * `dest` - The destination to restore to. + /// * `dry_run` - If true, don't actually restore anything, but only print out what would be done. + /// + /// # Errors + /// + /// * [`CommandErrorKind::ErrorCreating`] - If a directory could not be created. + /// * [`CommandErrorKind::ErrorCollecting`] - If the restore information could not be collected. pub(crate) fn collect_and_prepare( self, repo: &Repository, mut node_streamer: impl Iterator>, dest: &LocalDestination, dry_run: bool, - ) -> RusticResult { + ) -> RusticResult { let p = repo.pb.progress_spinner("collecting file information..."); let dest_path = dest.path(""); let mut stats = RestoreStats::default(); - let mut restore_infos = RestoreInfos::default(); + let mut restore_infos = RestorePlan::default(); let mut additional_existing = false; let mut removed_dir = None; @@ -256,6 +316,16 @@ impl RestoreOpts { Ok(restore_infos) } + /// Restore the metadata of the files and directories. + /// + /// # Arguments + /// + /// * `node_streamer` - The node streamer to use. + /// * `dest` - The destination to restore to. + /// + /// # Errors + /// + /// If the restore failed. fn restore_metadata( self, mut node_streamer: impl Iterator>, @@ -288,6 +358,18 @@ impl RestoreOpts { Ok(()) } + /// Set the metadata of the given file or directory. + /// + /// # Arguments + /// + /// * `dest` - The destination to restore to. + /// * `path` - The path of the file or directory. + /// * `node` - The node information of the file or directory. + /// + /// # Errors + /// + /// If the metadata could not be set. + // TODO: Return a result here, introduce errors and get rid of logging. fn set_metadata(self, dest: &LocalDestination, path: &PathBuf, node: &Node) { debug!("setting metadata for {:?}", path); dest.create_special(path, node) @@ -312,12 +394,28 @@ impl RestoreOpts { /// [`restore_contents`] restores all files contents as described by `file_infos` /// using the [`DecryptReadBackend`] `be` and writing them into the [`LocalBackend`] `dest`. +/// +/// # Type Parameters +/// +/// * `P` - The progress bar type. +/// * `S` - The state the repository is in. +/// +/// # Arguments +/// +/// * `repo` - The repository to restore. +/// * `dest` - The destination to restore to. +/// * `file_infos` - The restore information. +/// +/// # Errors +/// +/// * [`CommandErrorKind::ErrorSettingLength`] - If the length of a file could not be set. +/// * [`CommandErrorKind::FromRayonError`] - If the restore failed. fn restore_contents( repo: &Repository, dest: &LocalDestination, - file_infos: RestoreInfos, + file_infos: RestorePlan, ) -> RusticResult<()> { - let RestoreInfos { + let RestorePlan { names: filenames, file_lengths, r: restore_info, @@ -440,31 +538,42 @@ fn restore_contents( Ok(()) } -/// struct that contains information of file contents grouped by +/// Information about what will be restored. +/// +/// Struct that contains information of file contents grouped by /// 1) pack ID, /// 2) blob within this pack /// 3) the actual files and position of this blob within those +/// 4) Statistical information #[derive(Debug, Default)] -pub struct RestoreInfos { +pub struct RestorePlan { + /// The names of the files to restore names: Filenames, + /// The length of the files to restore file_lengths: Vec, + /// The restore information r: RestoreInfo, + /// The total restore size pub restore_size: u64, + /// The total size of matched content, i.e. content with needs no restore. pub matched_size: u64, + /// Statistics about the restore. pub stats: RestoreStats, } -type RestoreInfo = BTreeMap<(Id, BlobLocation), Vec>; -type Filenames = Vec; - +/// `BlobLocation` contains information about a blob within a pack #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] struct BlobLocation { + /// The offset of the blob within the pack offset: u32, + /// The length of the blob length: u32, + /// The uncompressed length of the blob uncompressed_length: Option, } impl BlobLocation { + /// Get the length of the data contained in this blob fn data_length(&self) -> u64 { self.uncompressed_length .map_or( @@ -475,21 +584,46 @@ impl BlobLocation { } } +/// `FileLocation` contains information about a file within a blob #[derive(Debug)] struct FileLocation { + // TODO: The index of the file within ... ? file_idx: usize, + /// The start of the file within the blob file_start: u64, - matches: bool, //indicates that the file exists and these contents are already correct + /// Whether the file matches the blob + /// + /// This indicates that the file exists and these contents are already correct. + matches: bool, } +/// `AddFileResult` indicates the result of adding a file to [`FileInfos`] +// TODO: Add documentation! enum AddFileResult { Existing, Verified, Modify, } -impl RestoreInfos { +impl RestorePlan { /// Add the file to [`FileInfos`] using `index` to get blob information. + /// + /// # Type Parameters + /// + /// * `P` - The progress bar type. + /// * `S` - The type of the indexed tree. + /// + /// # Arguments + /// + /// * `dest` - The destination to restore to. + /// * `file` - The file to add. + /// * `name` - The name of the file. + /// * `repo` - The repository to restore. + /// * `ignore_mtime` - If true, ignore the modification time of the file. + /// + /// # Errors + /// + /// If the file could not be added. fn add_file( &mut self, dest: &LocalDestination, @@ -500,7 +634,7 @@ impl RestoreInfos { ) -> RusticResult { let mut open_file = dest.get_matching_file(&name, file.meta.size); - // Empty files which exists with correct size should always return Ok(Existsing)! + // Empty files which exists with correct size should always return Ok(Existing)! if file.meta.size == 0 { if let Some(meta) = open_file.as_ref().map(|f| f.metadata()).transpose()? { if meta.len() == 0 { @@ -569,6 +703,9 @@ impl RestoreInfos { } } + /// Get a list of all pack files needed to perform the restore + /// + /// This can be used e.g. to warm-up those pack files before doing the atual restore. pub fn to_packs(&self) -> Vec { self.r .iter() diff --git a/crates/rustic_core/src/commands/snapshots.rs b/crates/rustic_core/src/commands/snapshots.rs index 58b3925a0..7ffb8b6b2 100644 --- a/crates/rustic_core/src/commands/snapshots.rs +++ b/crates/rustic_core/src/commands/snapshots.rs @@ -1,10 +1,30 @@ //! `smapshot` subcommand use crate::{ - repository::Open, ProgressBars, Repository, RusticResult, SnapshotFile, SnapshotGroup, - SnapshotGroupCriterion, + error::RusticResult, + progress::ProgressBars, + repofile::snapshotfile::{SnapshotGroup, SnapshotGroupCriterion}, + repofile::SnapshotFile, + repository::{Open, Repository}, }; +/// Get the snapshots from the repository. +/// +/// # Type Parameters +/// +/// * `P` - The progress bar type. +/// * `S` - The state the repository is in. +/// +/// # Arguments +/// +/// * `repo` - The repository to get the snapshots from. +/// * `ids` - The ids of the snapshots to get. +/// * `group_by` - The criterion to group the snapshots by. +/// * `filter` - The filter to apply to the snapshots. +/// +/// # Returns +/// +/// The snapshots grouped by the given criterion. pub(crate) fn get_snapshot_group( repo: &Repository, ids: &[String], diff --git a/crates/rustic_core/src/crypto.rs b/crates/rustic_core/src/crypto.rs index dec481d55..6019db071 100644 --- a/crates/rustic_core/src/crypto.rs +++ b/crates/rustic_core/src/crypto.rs @@ -3,7 +3,27 @@ use crate::RusticResult; pub(crate) mod aespoly1305; pub(crate) mod hasher; +/// A trait for encrypting and decrypting data. pub trait CryptoKey: Clone + Sized + Send + Sync + 'static { + /// Decrypt the given data. + /// + /// # Arguments + /// + /// * `data` - The data to decrypt. + /// + /// # Returns + /// + /// A vector containing the decrypted data. fn decrypt_data(&self, data: &[u8]) -> RusticResult>; + + /// Encrypt the given data. + /// + /// # Arguments + /// + /// * `data` - The data to encrypt. + /// + /// # Returns + /// + /// A vector containing the encrypted data. fn encrypt_data(&self, data: &[u8]) -> RusticResult>; } diff --git a/crates/rustic_core/src/crypto/aespoly1305.rs b/crates/rustic_core/src/crypto/aespoly1305.rs index 0338c9f4b..069ed6770 100644 --- a/crates/rustic_core/src/crypto/aespoly1305.rs +++ b/crates/rustic_core/src/crypto/aespoly1305.rs @@ -4,15 +4,26 @@ use aes256ctr_poly1305aes::{ }; use rand::{thread_rng, RngCore}; -use crate::{crypto::CryptoKey, error::CryptoErrorKind, RusticResult}; +use crate::{crypto::CryptoKey, error::CryptoErrorKind, error::RusticResult}; pub(crate) type Nonce = aead::Nonce; pub(crate) type AeadKey = aead::Key; +/// The `Key` is used to encrypt/MAC and check/decrypt data. +/// +/// It is a 64 byte key that is used to derive the AES256 encryption key and the numbers `k` and `r` used in the `Poly1305AES` MAC. +/// +/// The first 32 bytes are used for the AES256 encryption. +/// +/// The next 16 bytes are used for the number `k` of `Poly1305AES`. +/// +/// The last 16 bytes are used for the number `r` of `Poly1305AES`. +/// #[derive(Clone, Default, Debug, Copy)] pub struct Key(AeadKey); impl Key { + /// Create a new random [`Key`] using a suitable entropy source. #[must_use] pub fn new() -> Self { let mut key = AeadKey::default(); @@ -20,11 +31,23 @@ impl Key { Self(key) } + /// Create a new [`Key`] from a slice. + /// + /// # Arguments + /// + /// * `key` - The slice to create the [`Key`] from. #[must_use] pub fn from_slice(key: &[u8]) -> Self { Self(*AeadKey::from_slice(key)) } + /// Create a new [`Key`] from the AES key and numbers `k` and `r` for `Poly1305AES`. + /// + /// # Arguments + /// + /// * `encrypt` - The AES key. + /// * `k` - The number k for `Poly1305AES`. + /// * `r` - The number r for `Poly1305AES`. #[must_use] pub fn from_keys(encrypt: &[u8], k: &[u8], r: &[u8]) -> Self { let mut key = AeadKey::default(); @@ -35,6 +58,7 @@ impl Key { Self(key) } + /// Returns the AES key and numbers `k`and `r` for `Poly1305AES`. #[must_use] pub fn to_keys(self) -> (Vec, Vec, Vec) { let mut encrypt = vec![0; 32]; @@ -49,6 +73,15 @@ impl Key { } impl CryptoKey for Key { + /// Returns the decrypted data from the given encrypted/MACed data. + /// + /// # Arguments + /// + /// * `data` - The encrypted/MACed data. + /// + /// # Errors + /// + /// If the MAC couldn't be checked. fn decrypt_data(&self, data: &[u8]) -> RusticResult> { if data.len() < 16 { return Err(CryptoErrorKind::CryptoKeyTooShort)?; @@ -60,6 +93,15 @@ impl CryptoKey for Key { .map_err(|err| CryptoErrorKind::DataDecryptionFailed(err).into()) } + /// Returns the encrypted+MACed data from the given data. + /// + /// # Arguments + /// + /// * `data` - The data to encrypt. + /// + /// # Errors + /// + /// If the data could not be encrypted. fn encrypt_data(&self, data: &[u8]) -> RusticResult> { let mut nonce = Nonce::default(); thread_rng().fill_bytes(&mut nonce); diff --git a/crates/rustic_core/src/crypto/hasher.rs b/crates/rustic_core/src/crypto/hasher.rs index 239d22189..38af85aea 100644 --- a/crates/rustic_core/src/crypto/hasher.rs +++ b/crates/rustic_core/src/crypto/hasher.rs @@ -2,6 +2,15 @@ use sha2::{Digest, Sha256}; use crate::id::Id; +/// Hashes the given data. +/// +/// # Arguments +/// +/// * `data` - The data to hash. +/// +/// # Returns +/// +/// The hash Id of the data. #[must_use] pub fn hash(data: &[u8]) -> Id { Id::new(Sha256::digest(data).into()) diff --git a/crates/rustic_core/src/error.rs b/crates/rustic_core/src/error.rs index 64f331ace..2eb0d5658 100644 --- a/crates/rustic_core/src/error.rs +++ b/crates/rustic_core/src/error.rs @@ -22,24 +22,32 @@ use chrono::OutOfRangeError; use displaydoc::Display; use thiserror::Error; -use crate::{id::Id, repofile::indexfile::IndexPack, NodeType}; +use crate::{backend::node::NodeType, id::Id, repofile::indexfile::IndexPack}; -/// Result type often returned from methods that can have rustic `Error`s. +/// Result type that is being returned from methods that can fail and thus have [`RusticError`]s. pub type RusticResult = Result; // [`Error`] is public, but opaque and easy to keep compatible. #[derive(Error, Debug)] #[error(transparent)] +/// Errors that can result from rustic. pub struct RusticError(#[from] RusticErrorKind); // Accessors for anything we do want to expose publicly. impl RusticError { + /// Expose the inner error kind. + /// + /// This is useful for matching on the error kind. pub fn into_inner(self) -> RusticErrorKind { self.0 } } -// Private (pub(crate)) and free to change across minor version of the crate. +/// [`RusticErrorKind`] describes the errors that can happen while executing a high-level command. +/// +/// This is a non-exhaustive enum, so additional variants may be added in future. It is +/// recommended to match against the wildcard `_` instead of listing all possible variants, +/// to avoid problems when new variants are added. #[non_exhaustive] #[derive(Error, Debug)] pub enum RusticErrorKind { @@ -151,8 +159,8 @@ pub enum CommandErrorKind { PackSizeNotMatching(Id, u32, u32), /// "used pack {0} does not exist! PackNotExisting(Id), - /// pack {0} got no decicion what to do - NoDecicion(Id), + /// pack {0} got no decision what to do + NoDecision(Id), /// {0:?} FromParseIntError(#[from] ParseIntError), /// {0} diff --git a/crates/rustic_core/src/id.rs b/crates/rustic_core/src/id.rs index ac1f05cb4..f991eb3ce 100644 --- a/crates/rustic_core/src/id.rs +++ b/crates/rustic_core/src/id.rs @@ -5,13 +5,18 @@ use derive_more::{Constructor, Display}; use rand::{thread_rng, RngCore}; use serde::{Deserialize, Serialize}; -use crate::{error::IdErrorKind, hash, RusticResult}; +use crate::{crypto::hasher::hash, error::IdErrorKind, RusticResult}; pub(super) mod constants { + /// The length of the hash in bytes pub(super) const LEN: usize = 32; + /// The length of the hash in hexadecimal characters pub(super) const HEX_LEN: usize = LEN * 2; } +/// `Id` is the hash id of an object. +/// +/// It is being used to identify blobs or files saved in the repository. #[derive( Serialize, Deserialize, @@ -30,12 +35,32 @@ pub(super) mod constants { )] #[display(fmt = "{}", "&self.to_hex()[0..8]")] pub struct Id( + /// The actual hash #[serde(serialize_with = "hex::serde::serialize")] #[serde(deserialize_with = "hex::serde::deserialize")] [u8; constants::LEN], ); impl Id { + /// Parse an `Id` from a hexadecimal string + /// + /// # Arguments + /// + /// * `s` - The hexadecimal string to parse + /// + /// # Errors + /// + /// * [`IdErrorKind::HexError`] - If the string is not a valid hexadecimal string + /// + /// # Examples + /// + /// ``` + /// use rustic_core::Id; + /// + /// let id = Id::from_hex("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef").unwrap(); + /// + /// assert_eq!(id.to_hex().as_str(), "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"); + /// ``` pub fn from_hex(s: &str) -> RusticResult { let mut id = Self::default(); @@ -44,6 +69,7 @@ impl Id { Ok(id) } + /// Generate a random `Id`. #[must_use] pub fn random() -> Self { let mut id = Self::default(); @@ -51,6 +77,17 @@ impl Id { id } + /// Convert to [`HexId`]. + /// + /// # Examples + /// + /// ``` + /// use rustic_core::Id; + /// + /// let id = Id::from_hex("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef").unwrap(); + /// + /// assert_eq!(id.to_hex().as_str(), "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"); + /// ``` #[must_use] pub fn to_hex(self) -> HexId { let mut hex_id = HexId::EMPTY; @@ -59,11 +96,32 @@ impl Id { hex_id } + /// Checks if the [`Id`] is zero + /// + /// # Examples + /// + /// ``` + /// use rustic_core::Id; + /// + /// let id = Id::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap(); + /// + /// assert!(id.is_null()); + /// ``` #[must_use] pub fn is_null(&self) -> bool { self == &Self::default() } + /// Checks if this [`Id`] matches the content of a reader + /// + /// # Arguments + /// + /// * `length` - The length of the blob + /// * `r` - The reader to check + /// + /// # Returns + /// + /// `true` if the SHA256 matches, `false` otherwise pub fn blob_matches_reader(&self, length: usize, r: &mut impl Read) -> bool { // check if SHA256 matches let mut vec = vec![0; length]; @@ -72,17 +130,21 @@ impl Id { } impl fmt::Debug for Id { + /// Format the `Id` as a hexadecimal string fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", &*self.to_hex()) } } +/// An `Id` in hexadecimal format #[derive(Copy, Clone, Debug)] pub struct HexId([u8; constants::HEX_LEN]); impl HexId { + /// An empty [`HexId`] const EMPTY: Self = Self([b'0'; constants::HEX_LEN]); + /// Get the string representation of a [`HexId`] pub fn as_str(&self) -> &str { // This is only ever filled with hex chars, which are ascii std::str::from_utf8(&self.0).unwrap() diff --git a/crates/rustic_core/src/index.rs b/crates/rustic_core/src/index.rs index 80a3d3fef..bc6723ceb 100644 --- a/crates/rustic_core/src/index.rs +++ b/crates/rustic_core/src/index.rs @@ -9,23 +9,35 @@ use crate::{ error::{IndexErrorKind, RusticResult}, id::Id, index::binarysorted::{Index, IndexCollector, IndexType}, + progress::Progress, repofile::indexfile::{IndexBlob, IndexFile}, - Progress, }; pub(crate) mod binarysorted; pub(crate) mod indexer; +/// An entry in the index #[derive(Debug, Clone, Copy, PartialEq, Eq, Constructor)] pub struct IndexEntry { + /// The type of the blob blob_type: BlobType, + /// The pack the blob is in pub pack: Id, + /// The offset of the blob in the pack pub offset: u32, + /// The length of the blob in the pack pub length: u32, + /// The uncompressed length of the blob pub uncompressed_length: Option, } impl IndexEntry { + /// Create an [`IndexEntry`] from an [`IndexBlob`] + /// + /// # Arguments + /// + /// * `blob` - The [`IndexBlob`] to create the [`IndexEntry`] from + /// * `pack` - The pack the blob is in #[must_use] pub const fn from_index_blob(blob: &IndexBlob, pack: Id) -> Self { Self { @@ -39,9 +51,13 @@ impl IndexEntry { /// Get a blob described by [`IndexEntry`] from the backend /// + /// # Arguments + /// + /// * `be` - The backend to read from + /// /// # Errors /// - /// TODO This function will return an error if . + // TODO: add error! This function will return an error if the blob is not found in the backend. pub fn read_data(&self, be: &B) -> RusticResult { let data = be.read_encrypted_partial( FileType::Pack, @@ -54,6 +70,7 @@ impl IndexEntry { Ok(data) } + /// Get the length of the data described by the [`IndexEntry`] #[must_use] pub const fn data_length(&self) -> u32 { match self.uncompressed_length { @@ -63,56 +80,178 @@ impl IndexEntry { } } +/// The index of the repository +/// +/// The index is a list of [`IndexEntry`]s pub trait ReadIndex { + /// Get an [`IndexEntry`] from the index + /// + /// # Arguments + /// + /// * `tpe` - The type of the blob + /// * `id` - The id of the blob + /// + /// # Returns + /// + /// The [`IndexEntry`] - If it exists otherwise `None` fn get_id(&self, tpe: BlobType, id: &Id) -> Option; + + /// Get the total size of all blobs of the given type + /// + /// # Arguments + /// + /// * `tpe` - The type of the blobs fn total_size(&self, tpe: BlobType) -> u64; + + /// Check if the index contains the given blob + /// + /// # Arguments + /// + /// * `tpe` - The type of the blob + /// * `id` - The id of the blob fn has(&self, tpe: BlobType, id: &Id) -> bool; + /// Get a tree from the index + /// + /// # Arguments + /// + /// * `id` - The id of the tree + /// + /// # Returns + /// + /// The [`IndexEntry`] of the tree if it exists otherwise `None` fn get_tree(&self, id: &Id) -> Option { self.get_id(BlobType::Tree, id) } + /// Get a data blob from the index + /// + /// # Arguments + /// + /// * `id` - The id of the data blob + /// + /// # Returns + /// + /// The [`IndexEntry`] of the data blob if it exists otherwise `None` fn get_data(&self, id: &Id) -> Option { self.get_id(BlobType::Data, id) } + /// Check if the index contains the given tree + /// + /// # Arguments + /// + /// * `id` - The id of the tree + /// + /// # Returns + /// + /// `true` if the index contains the tree otherwise `false` fn has_tree(&self, id: &Id) -> bool { self.has(BlobType::Tree, id) } + /// Check if the index contains the given data blob + /// + /// # Arguments + /// + /// * `id` - The id of the data blob + /// + /// # Returns + /// + /// `true` if the index contains the data blob otherwise `false` fn has_data(&self, id: &Id) -> bool { self.has(BlobType::Data, id) } } +/// A trait for backends with an index pub trait IndexedBackend: ReadIndex + Clone + Sync + Send + 'static { + /// The backend type type Backend: DecryptReadBackend; + /// Get a reference to the backend fn be(&self) -> &Self::Backend; + /// Get a blob from the backend + /// + /// # Arguments + /// + /// * `tpe` - The type of the blob + /// * `id` - The id of the blob + /// + /// # Errors + /// + /// If the blob could not be found in the backend + /// + /// # Returns + /// + /// The data of the blob fn blob_from_backend(&self, tpe: BlobType, id: &Id) -> RusticResult; } +/// A backend with an index +/// +/// # Type Parameters +/// +/// * `BE` - The backend type #[derive(Clone, Debug)] pub struct IndexBackend { + /// The backend to read from. be: BE, + /// The atomic reference counted, sharable index. index: Arc, } impl ReadIndex for IndexBackend { + /// Get an [`IndexEntry`] from the index + /// + /// # Arguments + /// + /// * `tpe` - The type of the blob + /// * `id` - The id of the blob + /// + /// # Returns + /// + /// The [`IndexEntry`] - If it exists otherwise `None` fn get_id(&self, tpe: BlobType, id: &Id) -> Option { self.index.get_id(tpe, id) } + /// Get the total size of all blobs of the given type + /// + /// # Arguments + /// + /// * `tpe` - The type of the blobs fn total_size(&self, tpe: BlobType) -> u64 { self.index.total_size(tpe) } + + /// Check if the index contains the given blob + /// + /// # Arguments + /// + /// * `tpe` - The type of the blob + /// * `id` - The id of the blob + /// + /// # Returns + /// + /// `true` if the index contains the blob otherwise `false` fn has(&self, tpe: BlobType, id: &Id) -> bool { self.index.has(tpe, id) } } impl IndexBackend { + /// Create a new [`IndexBackend`] from an [`Index`] + /// + /// # Type Parameters + /// + /// * `BE` - The backend type + /// + /// # Arguments + /// + /// * `be` - The backend to read from + /// * `index` - The index to use pub fn new_from_index(be: &BE, index: Index) -> Self { Self { be: be.clone(), @@ -120,6 +259,21 @@ impl IndexBackend { } } + /// Create a new [`IndexBackend`] from an [`IndexCollector`] + /// + /// # Type Parameters + /// + /// * `BE` - The backend type + /// + /// # Arguments + /// + /// * `be` - The backend to read from + /// * `p` - The progress tracker + /// * `collector` - The [`IndexCollector`] to use + /// + /// # Errors + /// + /// If the index could not be read fn new_from_collector( be: &BE, p: &impl Progress, @@ -135,14 +289,39 @@ impl IndexBackend { Ok(Self::new_from_index(be, collector.into_index())) } + /// Create a new [`IndexBackend`] + /// + /// # Type Parameters + /// + /// * `BE` - The backend type + /// + /// # Arguments + /// + /// * `be` - The backend to read from + /// * `p` - The progress tracker pub fn new(be: &BE, p: &impl Progress) -> RusticResult { Self::new_from_collector(be, p, IndexCollector::new(IndexType::Full)) } + /// Create a new [`IndexBackend`] with only full trees + /// + /// # Type Parameters + /// + /// * `BE` - The backend type + /// + /// # Arguments + /// + /// * `be` - The backend to read from + /// * `p` - The progress tracker + /// + /// # Errors + /// + /// If the index could not be read pub fn only_full_trees(be: &BE, p: &impl Progress) -> RusticResult { - Self::new_from_collector(be, p, IndexCollector::new(IndexType::FullTrees)) + Self::new_from_collector(be, p, IndexCollector::new(IndexType::DataIds)) } + /// Convert the Arc to an Index pub fn into_index(self) -> Index { match Arc::try_unwrap(self.index) { Ok(index) => index, @@ -159,10 +338,21 @@ impl IndexBackend { impl IndexedBackend for IndexBackend { type Backend = BE; + /// Get a reference to the backend fn be(&self) -> &Self::Backend { &self.be } + /// Get a blob from the backend + /// + /// # Arguments + /// + /// * `tpe` - The type of the blob + /// * `id` - The id of the blob + /// + /// # Errors + /// + /// * [`IndexErrorKind::BlobInIndexNotFound`] - If the blob could not be found in the index fn blob_from_backend(&self, tpe: BlobType, id: &Id) -> RusticResult { self.get_id(tpe, id).map_or_else( || Err(IndexErrorKind::BlobInIndexNotFound.into()), diff --git a/crates/rustic_core/src/index/binarysorted.rs b/crates/rustic_core/src/index/binarysorted.rs index 470eea08b..531b2bc08 100644 --- a/crates/rustic_core/src/index/binarysorted.rs +++ b/crates/rustic_core/src/index/binarysorted.rs @@ -8,22 +8,33 @@ use crate::{ repofile::indexfile::{IndexBlob, IndexPack}, }; +/// A sorted entry in the index. #[derive(Debug, PartialEq, Eq)] pub(crate) struct SortedEntry { + /// The ID of the entry. id: Id, + /// The index of the pack containing the entry. pack_idx: usize, + /// The offset of the entry in the pack. offset: u32, + /// The length of the entry in the pack. length: u32, + /// The uncompressed length of the entry. uncompressed_length: Option, } +/// `IndexType` determines which information is stored in the index. #[derive(Debug, Clone, Copy)] pub enum IndexType { + /// Index everything. Full, - FullTrees, + /// Index only Ids for data blobs (+ full information for tree blobs) + DataIds, + /// Index only trees. OnlyTrees, } +// TODO: add documentation! #[derive(Debug)] pub(crate) enum EntriesVariants { None, @@ -72,7 +83,7 @@ impl IndexCollector { collector.0[BlobType::Tree].entries = EntriesVariants::FullEntries(Vec::new()); collector.0[BlobType::Data].entries = match tpe { IndexType::OnlyTrees => EntriesVariants::None, - IndexType::FullTrees => EntriesVariants::Ids(Vec::new()), + IndexType::DataIds => EntriesVariants::Ids(Vec::new()), IndexType::Full => EntriesVariants::FullEntries(Vec::new()), }; @@ -318,13 +329,22 @@ mod tests { collector.into_index() } + /// Parses a hex string into an ID. + /// + /// # Arguments + /// + /// * `s` - The hex string to parse. + /// + /// # Panics + /// + /// If the string is not a valid hexadecimal string. fn parse(s: &str) -> Id { Id::from_hex(s).unwrap() } #[test] fn all_index_types() { - for it in [IndexType::OnlyTrees, IndexType::FullTrees, IndexType::Full] { + for it in [IndexType::OnlyTrees, IndexType::DataIds, IndexType::Full] { let index = index(it); let id = parse("0000000000000000000000000000000000000000000000000000000000000000"); @@ -375,7 +395,7 @@ mod tests { #[test] fn full_trees() { - let index = index(IndexType::FullTrees); + let index = index(IndexType::DataIds); let id = parse("fac5e908151e565267570108127b96e6bae22bcdda1d3d867f63ed1555fc8aef"); assert!(index.has(BlobType::Data, &id)); diff --git a/crates/rustic_core/src/index/indexer.rs b/crates/rustic_core/src/index/indexer.rs index 513824252..93bc8ebe7 100644 --- a/crates/rustic_core/src/index/indexer.rs +++ b/crates/rustic_core/src/index/indexer.rs @@ -6,32 +6,50 @@ use std::{ use crate::{ backend::decrypt::DecryptWriteBackend, - error::IndexErrorKind, + error::{IndexErrorKind, RusticResult}, id::Id, repofile::indexfile::{IndexFile, IndexPack}, - RusticResult, }; + pub(super) mod constants { use std::time::Duration; + + /// The maximum number of blobs to index before saving the index. pub(super) const MAX_COUNT: usize = 50_000; + /// The maximum age of an index before saving the index. pub(super) const MAX_AGE: Duration = Duration::from_secs(300); } pub(crate) type SharedIndexer = Arc>>; +/// The `Indexer` is responsible for indexing blobs. #[derive(Debug)] pub struct Indexer where BE: DecryptWriteBackend, { + /// The backend to write to. be: BE, + /// The index file. file: IndexFile, + /// The number of blobs indexed. count: usize, + /// The time the indexer was created. created: SystemTime, + /// The set of indexed blob ids. indexed: Option>, } impl Indexer { + /// Creates a new `Indexer`. + /// + /// # Type Parameters + /// + /// * `BE` - The backend type. + /// + /// # Arguments + /// + /// * `be` - The backend to write to. pub fn new(be: BE) -> Self { Self { be, @@ -42,6 +60,15 @@ impl Indexer { } } + /// Creates a new `Indexer` without an index. + /// + /// # Type Parameters + /// + /// * `BE` - The backend type. + /// + /// # Arguments + /// + /// * `be` - The backend to write to. pub fn new_unindexed(be: BE) -> Self { Self { be, @@ -52,20 +79,36 @@ impl Indexer { } } + /// Resets the indexer. pub fn reset(&mut self) { self.file = IndexFile::default(); self.count = 0; self.created = SystemTime::now(); } + /// Returns a `SharedIndexer` to use in multiple threads. + /// + /// # Type Parameters + /// + /// * `BE` - The backend type. pub fn into_shared(self) -> SharedIndexer { Arc::new(RwLock::new(self)) } + /// Finalizes the `Indexer`. + /// + /// # Errors + /// + /// * [`CryptBackendErrorKind::SerializingToJsonByteVectorFailed`] - If the index file could not be serialized. pub fn finalize(&self) -> RusticResult<()> { self.save() } + /// Save file if length of packs and `packs_to_delete` is greater than `0`. + /// + /// # Errors + /// + /// * [`CryptBackendErrorKind::SerializingToJsonByteVectorFailed`] - If the index file could not be serialized. pub fn save(&self) -> RusticResult<()> { if (self.file.packs.len() + self.file.packs_to_delete.len()) > 0 { _ = self.be.save_file(&self.file)?; @@ -73,14 +116,45 @@ impl Indexer { Ok(()) } + /// Adds a pack to the `Indexer`. + /// + /// # Arguments + /// + /// * `pack` - The pack to add. + /// + /// # Errors + /// + /// * [`IndexErrorKind::CouldNotGetElapsedTimeFromSystemTime`] - If the elapsed time could not be retrieved from the system time. + /// * [`CryptBackendErrorKind::SerializingToJsonByteVectorFailed`] - If the index file could not be serialized. pub fn add(&mut self, pack: IndexPack) -> RusticResult<()> { self.add_with(pack, false) } + /// Adds a pack to the `Indexer` and removes it from the backend. + /// + /// # Arguments + /// + /// * `pack` - The pack to add. + /// + /// # Errors + /// + /// * [`IndexErrorKind::CouldNotGetElapsedTimeFromSystemTime`] - If the elapsed time could not be retrieved from the system time. + /// * [`CryptBackendErrorKind::SerializingToJsonByteVectorFailed`] - If the index file could not be serialized. pub fn add_remove(&mut self, pack: IndexPack) -> RusticResult<()> { self.add_with(pack, true) } + /// Adds a pack to the `Indexer`. + /// + /// # Arguments + /// + /// * `pack` - The pack to add. + /// * `delete` - Whether to delete the pack from the backend. + /// + /// # Errors + /// + /// * [`IndexErrorKind::CouldNotGetElapsedTimeFromSystemTime`] - If the elapsed time could not be retrieved from the system time. + /// * [`CryptBackendErrorKind::SerializingToJsonByteVectorFailed`] - If the index file could not be serialized. pub fn add_with(&mut self, pack: IndexPack, delete: bool) -> RusticResult<()> { self.count += pack.blobs.len(); @@ -106,6 +180,11 @@ impl Indexer { Ok(()) } + /// Returns whether the given id is indexed. + /// + /// # Arguments + /// + /// * `id` - The id to check. pub fn has(&self, id: &Id) -> bool { self.indexed .as_ref() diff --git a/crates/rustic_core/src/lib.rs b/crates/rustic_core/src/lib.rs index 761dc9028..037da4e67 100644 --- a/crates/rustic_core/src/lib.rs +++ b/crates/rustic_core/src/lib.rs @@ -1,5 +1,5 @@ /*! -A library for deduplicated and encrypted backups, inspired by [`restic`](https://restic.net/). +A library for deduplicated and encrypted backups, using repositories as specified in the [`restic repository design`](https://github.com/restic/restic/blob/master/doc/design.rst). # Overview @@ -7,9 +7,51 @@ This section gives a brief overview of the primary types in this crate: TODO -# Examples +# Example - initialize a repository, backup to it and get snapshots -TODO +``` + use rustic_core::{BackupOptions, ConfigOptions, KeyOptions, PathList, Repository, RepositoryOptions, SnapshotOptions}; + + // Initialize the repository in a temporary dir + let repo_dir = tempfile::tempdir().unwrap(); + let repo_opts = RepositoryOptions::default() + .repository(repo_dir.path().to_str().unwrap()) + .password("test"); + let key_opts = KeyOptions::default(); + let config_opts = ConfigOptions::default(); + let _repo = Repository::new(&repo_opts).unwrap().init(&key_opts, &config_opts).unwrap(); + + // We could have used _repo directly, but open the repository again to show how to open it... + let repo = Repository::new(&repo_opts).unwrap().open().unwrap(); + + // Get all snapshots from the repository + let snaps = repo.get_all_snapshots().unwrap(); + // Should be zero, as the repository has just been initialized + assert_eq!(snaps.len(), 0); + + // Turn repository state to indexed (for backup): + let repo = repo.to_indexed_ids().unwrap(); + + // Pre-define the snapshot-to-backup + let snap = SnapshotOptions::default() + .add_tags("tag1,tag2").unwrap() + .to_snapshot().unwrap(); + + // Specify backup options and source + let backup_opts = BackupOptions::default(); + let source = PathList::from_string("src").unwrap().sanitize().unwrap(); + + // run the backup and return the snapshot pointing to the backup'ed data. + let snap = repo.backup(&backup_opts, source, snap).unwrap(); + // assert_eq!(&snap.paths, ["src"]); + + // Get all snapshots from the repository + let snaps = repo.get_all_snapshots().unwrap(); + // Should now be 1, we just created a snapshot + assert_eq!(snaps.len(), 1); + + assert_eq!(snaps[0], snap); +``` # Lower level APIs @@ -33,19 +75,19 @@ This crate exposes a few features for controlling dependency usage. #![allow(dead_code)] #![forbid(unsafe_code)] #![warn( - // unreachable_pub, // frequently check - // TODO: Activate and create better docs - // missing_docs, - rust_2018_idioms, - trivial_casts, - unused_lifetimes, - unused_qualifications, + // TODO: frequently check + // unreachable_pub, // TODO: Activate if you're feeling like fixing stuff // clippy::pedantic, // clippy::correctness, // clippy::suspicious, // clippy::complexity, // clippy::perf, + missing_docs, + rust_2018_idioms, + trivial_casts, + unused_lifetimes, + unused_qualifications, clippy::nursery, bad_style, dead_code, @@ -96,51 +138,37 @@ pub(crate) mod error; pub(crate) mod id; pub(crate) mod index; pub(crate) mod progress; -pub(crate) mod repofile; +/// Structs which are saved in JSON or binary format in the repository +pub mod repofile; pub(crate) mod repository; -pub(crate) use crate::crypto::aespoly1305::Key; // rustic_core Public API pub use crate::{ backend::{ - decrypt::{DecryptReadBackend, DecryptWriteBackend}, + decrypt::{compression_level_range, max_compression_level}, ignore::{LocalSource, LocalSourceFilterOptions, LocalSourceSaveOptions}, local::LocalDestination, - node::{latest_node, Node, NodeType}, - stdin::StdinSource, - FileType, ReadBackend, ReadSourceEntry, WriteBackend, ALL_FILE_TYPES, - }, - blob::{ - packer::Packer, - tree::{NodeStreamer, Tree, TreeStreamerOnce, TreeStreamerOptions}, - BlobType, BlobTypeMap, Initialize, Sum, + node::last_modified_node, + ReadSourceEntry, }, + blob::tree::TreeStreamerOptions as LsOptions, commands::{ - backup::{BackupOpts, ParentOpts}, - check::CheckOpts, - config::ConfigOpts, + backup::{BackupOptions, ParentOptions}, + check::CheckOptions, + config::ConfigOptions, copy::CopySnapshot, forget::{ForgetGroup, ForgetGroups, ForgetSnapshot, KeepOptions}, - key::KeyOpts, - prune::{PruneOpts, PrunePlan, PruneStats}, + key::KeyOptions, + prune::{PruneOptions, PrunePlan, PruneStats}, repair::{index::RepairIndexOptions, snapshots::RepairSnapshotsOptions}, repoinfo::{BlobInfo, IndexInfos, PackInfo, RepoFileInfo, RepoFileInfos}, - restore::{FileDirStats, RestoreInfos, RestoreOpts, RestoreStats}, + restore::{FileDirStats, RestoreOptions, RestorePlan, RestoreStats}, }, - crypto::hasher::hash, error::{RusticError, RusticResult}, - id::Id, - index::{indexer::Indexer, IndexBackend, IndexedBackend, ReadIndex}, + id::{HexId, Id}, progress::{NoProgress, NoProgressBars, Progress, ProgressBars}, - repofile::{ - configfile::ConfigFile, - indexfile::{IndexBlob, IndexFile, IndexPack}, - keyfile::KeyFile, - packfile::{HeaderEntry, PackHeader, PackHeaderLength, PackHeaderRef}, - snapshotfile::{ - DeleteOption, PathList, SnapshotFile, SnapshotGroup, SnapshotGroupCriterion, - SnapshotOptions, StringList, - }, + repofile::snapshotfile::{ + PathList, SnapshotGroup, SnapshotGroupCriterion, SnapshotOptions, StringList, }, - repository::{IndexedFull, Open, OpenStatus, Repository, RepositoryOptions}, + repository::{IndexedFull, OpenStatus, Repository, RepositoryOptions}, }; diff --git a/crates/rustic_core/src/progress.rs b/crates/rustic_core/src/progress.rs index 2ba552420..ec6ae2091 100644 --- a/crates/rustic_core/src/progress.rs +++ b/crates/rustic_core/src/progress.rs @@ -3,36 +3,73 @@ use std::borrow::Cow; use log::info; /// Trait to report progress information for any rustic action which supports that. +/// /// Implement this trait when you want to display this progress to your users. pub trait Progress: Send + Sync + Clone { /// Check if progress is hidden fn is_hidden(&self) -> bool; + /// Set total length for this progress + /// + /// # Arguments + /// + /// * `len` - The total length of this progress fn set_length(&self, len: u64); + /// Set title for this progress + /// + /// # Arguments + /// + /// * `title` - The title of this progress fn set_title(&self, title: &'static str); + /// Advance progress by given increment + /// + /// # Arguments + /// + /// * `inc` - The increment to advance this progress fn inc(&self, inc: u64); + /// Finish the progress fn finish(&self); } /// Trait to start progress information report progress information for any rustic action which supports that. +/// /// Implement this trait when you want to display this progress to your users. pub trait ProgressBars { + /// The actual type which is able to show the progress type P: Progress; + /// Start a new progress, which is hidden fn progress_hidden(&self) -> Self::P; + /// Start a new progress spinner. Note that this progress doesn't get a length and is not advanced, only finished. + /// + /// # Arguments + /// + /// * `prefix` - The prefix of the progress fn progress_spinner(&self, prefix: impl Into>) -> Self::P; + /// Start a new progress which counts something + /// + /// # Arguments + /// + /// * `prefix` - The prefix of the progress fn progress_counter(&self, prefix: impl Into>) -> Self::P; + /// Start a new progress which counts bytes + /// + /// # Arguments + /// + /// * `prefix` - The prefix of the progress fn progress_bytes(&self, prefix: impl Into>) -> Self::P; } +/// A dummy struct which shows no progress but only logs titles and end of a progress. #[derive(Clone, Copy, Debug)] pub struct NoProgress; + impl Progress for NoProgress { fn is_hidden(&self) -> bool { true @@ -47,8 +84,10 @@ impl Progress for NoProgress { } } +/// Don't show progress bars, only log rudimentary progress information. #[derive(Clone, Copy, Debug)] pub struct NoProgressBars; + impl ProgressBars for NoProgressBars { type P = NoProgress; fn progress_spinner(&self, prefix: impl Into>) -> Self::P { diff --git a/crates/rustic_core/src/repofile.rs b/crates/rustic_core/src/repofile.rs index 93dec30cc..7b7107946 100644 --- a/crates/rustic_core/src/repofile.rs +++ b/crates/rustic_core/src/repofile.rs @@ -1,13 +1,30 @@ use serde::{de::DeserializeOwned, Serialize}; -use crate::FileType; - pub(crate) mod configfile; pub(crate) mod indexfile; pub(crate) mod keyfile; pub(crate) mod packfile; pub(crate) mod snapshotfile; +/// Marker trait for repository files which are stored as encrypted JSON pub trait RepoFile: Serialize + DeserializeOwned + Sized + Send + Sync + 'static { + /// The [`FileType`] associated with the repository file const TYPE: FileType; } + +// Part of public API + +pub use { + crate::{ + backend::{ + node::{Node, NodeType}, + FileType, ALL_FILE_TYPES, + }, + blob::{tree::Tree, BlobType, ALL_BLOB_TYPES}, + }, + configfile::ConfigFile, + indexfile::{IndexBlob, IndexFile, IndexPack}, + keyfile::KeyFile, + packfile::{HeaderEntry, PackHeader, PackHeaderLength, PackHeaderRef}, + snapshotfile::{DeleteOption, PathList, SnapshotFile, SnapshotSummary, StringList}, +}; diff --git a/crates/rustic_core/src/repofile/configfile.rs b/crates/rustic_core/src/repofile/configfile.rs index 1b997a385..6dfb90701 100644 --- a/crates/rustic_core/src/repofile/configfile.rs +++ b/crates/rustic_core/src/repofile/configfile.rs @@ -9,42 +9,106 @@ pub(super) mod constants { pub(super) const KB: u32 = 1024; pub(super) const MB: u32 = 1024 * KB; - // default pack size + + /// Default Tree size pub(super) const DEFAULT_TREE_SIZE: u32 = 4 * MB; + + /// Default Data size pub(super) const DEFAULT_DATA_SIZE: u32 = 32 * MB; - // the default factor used for repo-size dependent pack size. - // 32 * sqrt(reposize in bytes) = 1 MB * sqrt(reposize in GB) + + /// the default factor used for repo-size dependent pack size. + /// 32 * sqrt(reposize in bytes) = 1 MB * sqrt(reposize in GB) pub(super) const DEFAULT_GROW_FACTOR: u32 = 32; + + /// The default maximum targeted pack size. pub(super) const DEFAULT_SIZE_LIMIT: u32 = u32::MAX; + + /// The default minimum percentage of targeted pack size. + pub(super) const DEFAULT_MIN_PERCENTAGE: u32 = 30; } #[serde_with::apply(Option => #[serde(default, skip_serializing_if = "Option::is_none")])] #[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq, Eq)] +/// The config file describes all repository-wide information. +/// +/// It is usually saved in the repository as `config` pub struct ConfigFile { + /// Repository version. Currently 1 and 2 are supported pub version: u32, + + /// The [`Id`] identifying the repsitors pub id: Id, + + /// The chunker polynomial used to chunk data pub chunker_polynomial: String, + + /// Marker if this is a hot repository. If not set, this is no hot repository + /// + /// # Note + /// + /// When using hot/cold repositories, this is only set within the hot part of the repository. pub is_hot: Option, - /// compression level + + /// Compression level + /// + /// # Note /// - /// Note: that `Some(0)` means no compression + /// `Some(0)` means no compression. If not set, use the default compression: + /// * for repository version 1, use no compression (as not supported) + /// * for repository version 2, use the zstd default compression pub compression: Option, + + /// Size of tree packs. This will be enhanced by the `treepack_growfactor` depending on the repository size + /// + /// If not set, defaults to 4 MiB pub treepack_size: Option, + + /// Grow factor to increase size of tree packs depending on the repository size + /// + /// If not set, defaults to `32` pub treepack_growfactor: Option, + + /// Maximum targeted tree pack size. pub treepack_size_limit: Option, + + /// Size of data packs. This will be enhanced by the `datapack_growfactor` depending on the repository size + /// + /// If not set, defaults to `32 MiB` pub datapack_size: Option, + + /// Grow factor to increase size of data packs depending on the repository size + /// + /// If not set, defaults to `32` pub datapack_growfactor: Option, + + /// maximum targeted data pack size. pub datapack_size_limit: Option, + + /// Tolerate pack sizes which are larger than given percentage of targeted pack size + /// + /// If not set, defaults to `30` pub min_packsize_tolerate_percent: Option, + + /// Tolerate pack sizes which are smaller than given percentage of targeted pack size + /// + /// If not set or set to `0` this is unlimited. pub max_packsize_tolerate_percent: Option, } impl RepoFile for ConfigFile { + /// The file type of the config file const TYPE: FileType = FileType::Config; } impl ConfigFile { #[must_use] + /// Creates a new `ConfigFile`. + /// + /// # Arguments + /// + /// * `version` - The version of the repository + /// * `id` - The id of the repository + /// * `poly` - The chunker polynomial pub fn new(version: u32, id: Id, poly: u64) -> Self { Self { version, @@ -54,11 +118,21 @@ impl ConfigFile { } } + /// Get the chunker polynomial + /// + /// # Errors + /// + /// * [`ConfigFileErrorKind::ParsingFailedForPolynomial`] - If the polynomial could not be parsed pub fn poly(&self) -> RusticResult { Ok(u64::from_str_radix(&self.chunker_polynomial, 16) .map_err(ConfigFileErrorKind::ParsingFailedForPolynomial)?) } + /// Get the compression level + /// + /// # Errors + /// + /// * [`ConfigFileErrorKind::ConfigVersionNotSupported`] - If the version is not supported pub fn zstd(&self) -> RusticResult> { match (self.version, self.compression) { (1, _) | (2, Some(0)) => Ok(None), @@ -68,6 +142,15 @@ impl ConfigFile { } } + /// Get pack size parameter + /// + /// # Arguments + /// + /// * `blob` - The blob type to get the pack size parameters for + /// + /// # Returns + /// + /// A tuple containing the pack size, the grow factor and the size limit #[must_use] pub fn packsize(&self, blob: BlobType) -> (u32, u32, u32) { match blob { @@ -88,10 +171,16 @@ impl ConfigFile { } } + /// Get pack size toleration limits + /// + /// # Returns + /// + /// #[must_use] pub fn packsize_ok_percents(&self) -> (u32, u32) { ( - self.min_packsize_tolerate_percent.unwrap_or(30), + self.min_packsize_tolerate_percent + .unwrap_or(constants::DEFAULT_MIN_PERCENTAGE), match self.max_packsize_tolerate_percent { None | Some(0) => u32::MAX, Some(percent) => percent, diff --git a/crates/rustic_core/src/repofile/indexfile.rs b/crates/rustic_core/src/repofile/indexfile.rs index be86b3758..fe933390f 100644 --- a/crates/rustic_core/src/repofile/indexfile.rs +++ b/crates/rustic_core/src/repofile/indexfile.rs @@ -9,21 +9,34 @@ use crate::{ repofile::RepoFile, }; +/// Index files describe index information about multiple `pack` files. +/// +/// They are usually stored in the repository under `/index/` #[derive(Serialize, Deserialize, Debug, Default)] pub struct IndexFile { #[serde(skip_serializing_if = "Option::is_none")] + /// which other index files are superseded by this (not actively used) pub supersedes: Option>, + /// Index information about used packs pub packs: Vec, #[serde(default, skip_serializing_if = "Vec::is_empty")] + /// Index information about unused packs which are already marked for deletion pub packs_to_delete: Vec, } impl RepoFile for IndexFile { + /// The [`FileType`] associated with the [`IndexFile`] const TYPE: FileType = FileType::Index; } impl IndexFile { - pub fn add(&mut self, p: IndexPack, delete: bool) { + /// Add a new pack to the index file + /// + /// # Arguments + /// + /// * `p` - The pack to add + /// * `delete` - If the pack should be marked for deletion + pub(crate) fn add(&mut self, p: IndexPack, delete: bool) { if delete { self.packs_to_delete.push(p); } else { @@ -33,17 +46,31 @@ impl IndexFile { } #[derive(Serialize, Deserialize, Default, Debug, Clone)] +/// Index information about a `pack` pub struct IndexPack { + /// pack Id pub id: Id, + /// Index information about contained blobs pub blobs: Vec, #[serde(skip_serializing_if = "Option::is_none")] + /// The pack creation time or time when the pack was marked for deletion pub time: Option>, #[serde(skip_serializing_if = "Option::is_none")] + /// The pack size pub size: Option, } impl IndexPack { - pub fn add( + /// Add a new blob to the pack + /// + /// # Arguments + /// + /// * `id` - The blob id + /// * `tpe` - The blob type + /// * `offset` - The blob offset within the pack + /// * `length` - The blob length within the pack + /// * `uncompressed_length` - The blob uncompressed length within the pack + pub(crate) fn add( &mut self, id: Id, tpe: BlobType, @@ -60,15 +87,18 @@ impl IndexPack { }); } - // calculate the pack size from the contained blobs + /// Calculate the pack size from the contained blobs #[must_use] - pub fn pack_size(&self) -> u32 { + pub(crate) fn pack_size(&self) -> u32 { self.size .unwrap_or_else(|| PackHeaderRef::from_index_pack(self).pack_size()) } - /// returns the blob type of the pack. Note that only packs with - /// identical blob types are allowed + /// Returns the blob type of the pack. + /// + /// # Note + /// + /// Only packs with identical blob types are allowed. #[must_use] pub fn blob_type(&self) -> BlobType { // TODO: This is a hack to support packs without blobs (e.g. when deleting unreferenced files) @@ -81,23 +111,47 @@ impl IndexPack { } #[derive(Serialize, Deserialize, Debug, Clone, Eq, PartialEq, Copy)] +/// Index information about a `blob` pub struct IndexBlob { + /// Blob Id pub id: Id, #[serde(rename = "type")] + /// Type of the blob pub tpe: BlobType, + /// Offset of the blob within the `pack` file pub offset: u32, + /// Length of the blob as stored within the `pack` file pub length: u32, + /// Data length of the blob. This is only set if the blob is compressed. #[serde(skip_serializing_if = "Option::is_none")] pub uncompressed_length: Option, } impl PartialOrd for IndexBlob { + /// Compare two blobs by their offset + /// + /// # Arguments + /// + /// * `other` - The other blob to compare to + /// + /// # Returns + /// + /// The ordering of the two blobs fn partial_cmp(&self, other: &Self) -> Option { self.offset.partial_cmp(&other.offset) } } impl Ord for IndexBlob { + /// Compare two blobs by their offset + /// + /// # Arguments + /// + /// * `other` - The other blob to compare to + /// + /// # Returns + /// + /// The ordering of the two blobs fn cmp(&self, other: &Self) -> Ordering { self.offset.cmp(&other.offset) } diff --git a/crates/rustic_core/src/repofile/keyfile.rs b/crates/rustic_core/src/repofile/keyfile.rs index 580d48b60..283e177ac 100644 --- a/crates/rustic_core/src/repofile/keyfile.rs +++ b/crates/rustic_core/src/repofile/keyfile.rs @@ -7,37 +7,70 @@ use serde_with::{base64::Base64, serde_as}; use crate::{ backend::{FileType, ReadBackend}, crypto::{aespoly1305::Key, CryptoKey}, - error::KeyFileErrorKind, + error::{KeyFileErrorKind, RusticResult}, id::Id, - RusticResult, }; pub(super) mod constants { + /// Returns the number of bits of the given type. pub(super) const fn num_bits() -> usize { std::mem::size_of::() * 8 } } +/// Key files describe information about repository access keys. +/// +/// They are usually stored in the repository under `/keys/` #[serde_as] #[serde_with::apply(Option => #[serde(default, skip_serializing_if = "Option::is_none")])] #[derive(Serialize, Deserialize, Debug)] pub struct KeyFile { + /// Hostname where the key was created hostname: Option, + + /// User which created the key username: Option, + + /// Creation time of the key created: Option>, + + /// The used key derivation function (currently only `scrypt`) kdf: String, + + /// Parameter N for `scrypt` #[serde(rename = "N")] n: u32, + + /// Parameter r for `scrypt` r: u32, + + /// Parameter p for `scrypt` p: u32, + + /// The key data encrypted by `scrypt` #[serde_as(as = "Base64")] data: Vec, + + /// The salt used with `scrypt` #[serde_as(as = "Base64")] salt: Vec, } impl KeyFile { /// Generate a Key using the key derivation function from [`KeyFile`] and a given password + /// + /// # Arguments + /// + /// * `passwd` - The password to use for the key derivation function + /// + /// # Errors + /// + /// * [`KeyFileErrorKind::InvalidSCryptParameters`] - If the parameters of the key derivation function are invalid + /// * [`KeyFileErrorKind::OutputLengthInvalid`] - If the output length of the key derivation function is invalid + /// + /// # Returns + /// + /// The generated key pub fn kdf_key(&self, passwd: &impl AsRef<[u8]>) -> RusticResult { let params = Params::new(log_2(self.n)?, self.r, self.p, Params::RECOMMENDED_LEN) .map_err(KeyFileErrorKind::InvalidSCryptParameters)?; @@ -51,6 +84,18 @@ impl KeyFile { /// Extract a key from the data of the [`KeyFile`] using the given key. /// The key usually should be the key generated by [`kdf_key()`](Self::kdf_key) + /// + /// # Arguments + /// + /// * `key` - The key to use for decryption + /// + /// # Errors + /// + /// * [`KeyFileErrorKind::DeserializingFromSliceFailed`] - If the data could not be deserialized + /// + /// # Returns + /// + /// The extracted key pub fn key_from_data(&self, key: &Key) -> RusticResult { let dec_data = key.decrypt_data(&self.data)?; Ok(serde_json::from_slice::(&dec_data) @@ -60,11 +105,40 @@ impl KeyFile { /// Extract a key from the data of the [`KeyFile`] using the key /// from the derivation function in combination with the given password. + /// + /// # Arguments + /// + /// * `passwd` - The password to use for the key derivation function + /// + /// # Errors + /// + /// * [`KeyFileErrorKind::InvalidSCryptParameters`] - If the parameters of the key derivation function are invalid + /// + /// # Returns + /// + /// The extracted key pub fn key_from_password(&self, passwd: &impl AsRef<[u8]>) -> RusticResult { self.key_from_data(&self.kdf_key(passwd)?) } /// Generate a new [`KeyFile`] from a given key and password. + /// + /// # Arguments + /// + /// * `key` - The key to use for encryption + /// * `passwd` - The password to use for the key derivation function + /// * `hostname` - The hostname to use for the [`KeyFile`] + /// * `username` - The username to use for the [`KeyFile`] + /// * `with_created` - Whether to set the creation time of the [`KeyFile`] to the current time + /// + /// # Errors + /// + /// * [`KeyFileErrorKind::OutputLengthInvalid`] - If the output length of the key derivation function is invalid + /// * [`KeyFileErrorKind::CouldNotSerializeAsJsonByteVector`] - If the [`KeyFile`] could not be serialized + /// + /// # Returns + /// + /// The generated [`KeyFile`] pub fn generate( key: Key, passwd: &impl AsRef<[u8]>, @@ -101,6 +175,19 @@ impl KeyFile { } /// Get a [`KeyFile`] from the backend + /// + /// # Arguments + /// + /// * `be` - The backend to use + /// * `id` - The id of the [`KeyFile`] + /// + /// # Errors + /// + /// * [`KeyFileErrorKind::ReadingFromBackendFailed`] - If the [`KeyFile`] could not be read from the backend + /// + /// # Returns + /// + /// The [`KeyFile`] read from the backend fn from_backend(be: &B, id: &Id) -> RusticResult { let data = be.read_full(FileType::Key, id)?; Ok( @@ -110,6 +197,19 @@ impl KeyFile { } } +/// Calculate the logarithm to base 2 of the given number +/// +/// # Arguments +/// +/// * `x` - The number to calculate the logarithm to base 2 of +/// +/// # Errors +/// +/// * [`KeyFileErrorKind::ConversionFromU32ToU8Failed`] - If the conversion from `u32` to `u8` failed +/// +/// # Returns +/// +/// The logarithm to base 2 of the given number fn log_2(x: u32) -> RusticResult { assert!(x > 0); Ok(u8::try_from(constants::num_bits::()) @@ -118,24 +218,44 @@ fn log_2(x: u32) -> RusticResult { - 1) } +/// The mac of a [`Key`] +/// +/// This is used to verify the integrity of the key #[serde_as] #[derive(Serialize, Deserialize, Debug)] pub(crate) struct Mac { + /// The key used for the mac #[serde_as(as = "Base64")] k: Vec, + + /// The random value used for the mac #[serde_as(as = "Base64")] r: Vec, } +/// The master key of a [`Key`] +/// +/// This is used to encrypt the key #[serde_as] #[derive(Serialize, Deserialize, Debug)] pub(crate) struct MasterKey { + /// The mac of the key mac: Mac, + /// The encrypted key #[serde_as(as = "Base64")] encrypt: Vec, } impl MasterKey { + /// Create a [`MasterKey`] from a [`Key`] + /// + /// # Arguments + /// + /// * `key` - The key to create the [`MasterKey`] from + /// + /// # Returns + /// + /// The created [`MasterKey`] fn from_key(key: Key) -> Self { let (encrypt, k, r) = key.to_keys(); Self { @@ -144,11 +264,23 @@ impl MasterKey { } } + /// Get the [`Key`] from the [`MasterKey`] fn key(&self) -> Key { Key::from_keys(&self.encrypt, &self.mac.k, &self.mac.r) } } +/// Get a [`KeyFile`] from the backend +/// +/// # Arguments +/// +/// * `be` - The backend to use +/// * `id` - The id of the [`KeyFile`] +/// * `passwd` - The password to use +/// +/// # Errors +/// +/// * [`KeyFileErrorKind::ReadingFromBackendFailed`] - If the [`KeyFile`] could not be read from the backend pub(crate) fn key_from_backend( be: &B, id: &Id, @@ -160,6 +292,20 @@ pub(crate) fn key_from_backend( /// Find a [`KeyFile`] in the backend that fits to the given password and return the contained key. /// If a key hint is given, only this key is tested. /// This is recommended for a large number of keys. +/// +/// # Arguments +/// +/// * `be` - The backend to use +/// * `passwd` - The password to use +/// * `hint` - The key hint to use +/// +/// # Errors +/// +/// * [`KeyFileErrorKind::NoSuitableKeyFound`] - If no suitable key was found +/// +/// # Returns +/// +/// The found key pub(crate) fn find_key_in_backend( be: &B, passwd: &impl AsRef<[u8]>, diff --git a/crates/rustic_core/src/repofile/packfile.rs b/crates/rustic_core/src/repofile/packfile.rs index 00a91dc3e..e4cb5454c 100644 --- a/crates/rustic_core/src/repofile/packfile.rs +++ b/crates/rustic_core/src/repofile/packfile.rs @@ -14,27 +14,44 @@ use crate::{ pub(super) mod constants { // 32 equals the size of the crypto overhead // TODO: use from crypto mod + /// The overhead of compression and encryption pub(super) const COMP_OVERHEAD: u32 = 32; + /// The length of the length field within the pack header pub(super) const LENGTH_LEN: u32 = 4; } +/// The length field within the pack header (which is the total length of the pack header) #[derive(BinWrite, BinRead, Debug, Clone, Copy)] #[brw(little)] -pub struct PackHeaderLength(u32); +pub struct PackHeaderLength(pub u32); impl PackHeaderLength { + /// Create a new [`PackHeaderLength`] from a [`u32`] + /// + /// # Arguments + /// + /// * `len` - The length of the pack header #[must_use] - pub const fn from_u32(len: u32) -> Self { + pub(crate) const fn from_u32(len: u32) -> Self { Self(len) } + /// Convert this pack header length into a [`u32`] #[must_use] - pub const fn to_u32(&self) -> u32 { + pub(crate) const fn to_u32(self) -> u32 { self.0 } /// Read pack header length from binary representation - pub fn from_binary(data: &[u8]) -> RusticResult { + /// + /// # Arguments + /// + /// * `data` - The binary representation of the pack header length + /// + /// # Errors + /// + /// * [`PackFileErrorKind::ReadingBinaryRepresentationFailed`] - If reading the binary representation failed + pub(crate) fn from_binary(data: &[u8]) -> RusticResult { let mut reader = Cursor::new(data); Ok( Self::read(&mut reader) @@ -42,8 +59,12 @@ impl PackHeaderLength { ) } - /// generate the binary representation of the pack header length - pub fn to_binary(&self) -> RusticResult> { + /// Generate the binary representation of the pack header length + /// + /// # Errors + /// + /// * [`PackFileErrorKind::WritingBinaryRepresentationFailed`] - If writing the binary representation failed + pub(crate) fn to_binary(self) -> RusticResult> { let mut writer = Cursor::new(Vec::with_capacity(4)); self.write(&mut writer) .map_err(PackFileErrorKind::WritingBinaryRepresentationFailed)?; @@ -51,26 +72,63 @@ impl PackHeaderLength { } } +/// An entry in the pack header #[derive(BinRead, BinWrite, Debug, Clone, Copy)] #[brw(little)] pub enum HeaderEntry { + /// Entry for an uncompressed data blob #[brw(magic(0u8))] - Data { len: u32, id: Id }, + Data { + /// Lengths within a packfile + len: u32, + /// Id of data blob + id: Id, + }, + /// Entry for an uncompressed tree blob #[brw(magic(1u8))] - Tree { len: u32, id: Id }, + Tree { + /// Lengths within a packfile + len: u32, + /// Id of tree blob + id: Id, + }, + /// Entry for a compressed data blob #[brw(magic(2u8))] - CompData { len: u32, len_data: u32, id: Id }, + CompData { + /// Lengths within a packfile + len: u32, + /// Raw blob length without compression/encryption + len_data: u32, + /// Id of compressed data blob + id: Id, + }, + /// Entry for a compressed tree blob #[brw(magic(3u8))] - CompTree { len: u32, len_data: u32, id: Id }, + CompTree { + /// Lengths within a packfile + len: u32, + /// Raw blob length withou compression/encryption + len_data: u32, + /// Id of compressed tree blob + id: Id, + }, } impl HeaderEntry { + /// The length of an uncompressed header entry const ENTRY_LEN: u32 = 37; - pub const ENTRY_LEN_COMPRESSED: u32 = 41; + /// The length of a compressed header entry + pub(crate) const ENTRY_LEN_COMPRESSED: u32 = 41; + + /// Read a [`HeaderEntry`] from an [`IndexBlob`] + /// + /// # Arguments + /// + /// * `blob` - The [`IndexBlob`] to read from const fn from_blob(blob: &IndexBlob) -> Self { match (blob.uncompressed_length, blob.tpe) { (None, BlobType::Data) => Self::Data { @@ -94,7 +152,7 @@ impl HeaderEntry { } } - // the length of this header entry + /// The length of this header entry const fn length(&self) -> u32 { match &self { Self::Data { .. } | Self::Tree { .. } => Self::ENTRY_LEN, @@ -102,6 +160,11 @@ impl HeaderEntry { } } + /// Convert this header entry into a [`IndexBlob`] + /// + /// # Arguments + /// + /// * `offset` - The offset to read from const fn into_blob(self, offset: u32) -> IndexBlob { match self { Self::Data { len, id } => IndexBlob { @@ -136,12 +199,21 @@ impl HeaderEntry { } } +/// Header of the pack file #[derive(Debug, Clone)] -pub struct PackHeader(Vec); +pub struct PackHeader(pub Vec); impl PackHeader { - /// Read the binary representation of the pack header - pub fn from_binary(pack: &[u8]) -> RusticResult { + /// Create a new [`PackHeader`] from a [`IndexPack`] + /// + /// # Arguments + /// + /// * `pack` - The binary representation of the pack header + /// + /// # Errors + /// + /// * [`PackFileErrorKind::ReadingBinaryRepresentationFailed`] - If reading the binary representation failed + pub(crate) fn from_binary(pack: &[u8]) -> RusticResult { let mut reader = Cursor::new(pack); let mut offset = 0; let mut blobs = Vec::new(); @@ -160,7 +232,21 @@ impl PackHeader { } /// Read the pack header directly from a packfile using the backend - pub fn from_file( + /// + /// # Arguments + /// + /// * `be` - The backend to use + /// * `id` - The id of the packfile + /// * `size_hint` - The size hint for the pack header + /// * `pack_size` - The size of the packfile + /// + /// # Errors + /// + /// * [`PackFileErrorKind::ReadingBinaryRepresentationFailed`] - If reading the binary representation failed + /// * [`PackFileErrorKind::HeaderLengthTooLarge`] - If the header length is too large + /// * [`PackFileErrorKind::HeaderLengthDoesNotMatchHeaderContents`] - If the header length does not match the header contents + /// * [`PackFileErrorKind::HeaderPackSizeComputedDoesNotMatchRealPackFile`] - If the pack size computed from the header does not match the real pack file size + pub(crate) fn from_file( be: &impl DecryptReadBackend, id: Id, size_hint: Option, @@ -222,52 +308,63 @@ impl PackHeader { Ok(header) } - // destructor for [`PackHeader`] cannot be evaluated at compile time + /// Convert this [`PackHeader`] into a [`Vec`] of [`IndexBlob`]s + // Clippy lint: Destructor for [`PackHeader`] cannot be evaluated at compile time #[allow(clippy::missing_const_for_fn)] #[must_use] - pub fn into_blobs(self) -> Vec { + pub(crate) fn into_blobs(self) -> Vec { self.0 } - // calculate the pack header size from the contained blobs + /// Calculate the pack header size from the contained blobs fn size(&self) -> u32 { PackHeaderRef(&self.0).size() } - // calculate the pack size from the contained blobs + /// Calculate the pack size from the contained blobs fn pack_size(&self) -> u32 { PackHeaderRef(&self.0).pack_size() } } +/// As [`PackHeader`], but utilizing a reference instead #[derive(Debug, Clone)] -pub struct PackHeaderRef<'a>(&'a [IndexBlob]); +pub struct PackHeaderRef<'a>(pub &'a [IndexBlob]); impl<'a> PackHeaderRef<'a> { + /// Create a new [`PackHeaderRef`] from a [`IndexPack`] + /// + /// # Arguments + /// + /// * `pack` - The [`IndexPack`] to create the [`PackHeaderRef`] from #[must_use] - pub fn from_index_pack(pack: &'a IndexPack) -> Self { + pub(crate) fn from_index_pack(pack: &'a IndexPack) -> Self { Self(&pack.blobs) } - // calculate the pack header size from the contained blobs + /// Calculate the pack header size from the contained blobs #[must_use] - pub fn size(&self) -> u32 { + pub(crate) fn size(&self) -> u32 { self.0.iter().fold(constants::COMP_OVERHEAD, |acc, blob| { acc + HeaderEntry::from_blob(blob).length() }) } - // calculate the pack size from the contained blobs + /// Calculate the pack size from the contained blobs #[must_use] - pub fn pack_size(&self) -> u32 { + pub(crate) fn pack_size(&self) -> u32 { self.0.iter().fold( constants::COMP_OVERHEAD + constants::LENGTH_LEN, |acc, blob| acc + blob.length + HeaderEntry::from_blob(blob).length(), ) } - /// generate the binary representation of the pack header - pub fn to_binary(&self) -> RusticResult> { + /// Generate the binary representation of the pack header + /// + /// # Errors + /// + /// * [`PackFileErrorKind::WritingBinaryRepresentationFailed`] - If writing the binary representation failed + pub(crate) fn to_binary(&self) -> RusticResult> { let mut writer = Cursor::new(Vec::with_capacity(self.pack_size() as usize)); // collect header entries for blob in self.0 { diff --git a/crates/rustic_core/src/repofile/snapshotfile.rs b/crates/rustic_core/src/repofile/snapshotfile.rs index d673572cc..54746cc2c 100644 --- a/crates/rustic_core/src/repofile/snapshotfile.rs +++ b/crates/rustic_core/src/repofile/snapshotfile.rs @@ -1,106 +1,207 @@ use std::{ cmp::Ordering, fmt::{self, Display}, - path::PathBuf, + path::{Path, PathBuf}, str::FromStr, }; use chrono::{DateTime, Duration, Local}; use derivative::Derivative; +use derive_setters::Setters; use dunce::canonicalize; use gethostname::gethostname; use itertools::Itertools; use log::info; use path_dedot::ParseDot; use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, DeserializeFromStr, DisplayFromStr}; +use serde_with::{serde_as, DisplayFromStr}; use shell_words::split; use crate::{ backend::{decrypt::DecryptReadBackend, FileType}, error::SnapshotFileErrorKind, + error::{RusticError, RusticResult}, id::Id, + progress::Progress, repofile::RepoFile, - Progress, RusticError, RusticResult, }; +/// Options for creating a new [`SnapshotFile`] structure for a new backup snapshot. +/// +/// This struct derives [`serde::Deserialize`] allowing to use it in config files. +/// +/// # Features +/// +/// * With the feature `merge` enabled, this also derives [`merge::Merge`] to allow merging [`SnapshotOptions`] from multiple sources. +/// * With the feature `clap` enabled, this also derives [`clap::Parser`] allowing it to be used as CLI options. +/// +/// # Note +/// +/// The preferred way is to use [`SnapshotFile::from_options`] to create a SnapshotFile for a new backup. #[serde_as] #[cfg_attr(feature = "merge", derive(merge::Merge))] #[cfg_attr(feature = "clap", derive(clap::Parser))] -#[derive(Deserialize, Clone, Default, Debug)] +#[derive(Deserialize, Serialize, Clone, Default, Debug, Setters)] #[serde(default, rename_all = "kebab-case", deny_unknown_fields)] +#[setters(into)] +#[non_exhaustive] pub struct SnapshotOptions { /// Label snapshot with given label #[cfg_attr(feature = "clap", clap(long, value_name = "LABEL"))] - label: Option, + pub label: Option, /// Tags to add to snapshot (can be specified multiple times) #[cfg_attr(feature = "clap", clap(long, value_name = "TAG[,TAG,..]"))] #[serde_as(as = "Vec")] #[cfg_attr(feature = "merge", merge(strategy = merge::vec::overwrite_empty))] - tag: Vec, + pub tag: Vec, /// Add description to snapshot #[cfg_attr(feature = "clap", clap(long, value_name = "DESCRIPTION"))] - description: Option, + pub description: Option, /// Add description to snapshot from file #[cfg_attr( feature = "clap", clap(long, value_name = "FILE", conflicts_with = "description") )] - description_from: Option, + pub description_from: Option, + + /// Set the backup time manually + #[cfg_attr(feature = "clap", clap(long))] + pub time: Option>, /// Mark snapshot as uneraseable #[cfg_attr(feature = "clap", clap(long, conflicts_with = "delete_after"))] #[cfg_attr(feature = "merge", merge(strategy = merge::bool::overwrite_false))] - delete_never: bool, + pub delete_never: bool, /// Mark snapshot to be deleted after given duration (e.g. 10d) #[cfg_attr(feature = "clap", clap(long, value_name = "DURATION"))] #[serde_as(as = "Option")] - delete_after: Option, + pub delete_after: Option, /// Set the host name manually #[cfg_attr(feature = "clap", clap(long, value_name = "NAME"))] - host: Option, + pub host: Option, + + /// Set the backup command manually + #[cfg_attr(feature = "clap", clap(long))] + pub command: Option, } +impl SnapshotOptions { + /// Add tags to this [`SnapshotOptions`] + /// + /// # Arguments + /// + /// * `tag` - The tag to add + pub fn add_tags(mut self, tag: &str) -> RusticResult { + self.tag.push(StringList::from_str(tag)?); + Ok(self) + } + + /// Create a new [`SnapshotFile`] using this `SnapshotOption`s + pub fn to_snapshot(&self) -> RusticResult { + SnapshotFile::from_options(self) + } +} + +/// Summary information about a snapshot. +/// /// This is an extended version of the summaryOutput structure of restic in /// restic/internal/ui/backup$/json.go #[derive(Serialize, Deserialize, Debug, Clone, Derivative)] #[derivative(Default)] +#[non_exhaustive] pub struct SnapshotSummary { + /// New files compared to the last (i.e. parent) snapshot pub files_new: u64, + + /// Changed files compared to the last (i.e. parent) snapshot pub files_changed: u64, + + /// Unchanged files compared to the last (i.e. parent) snapshot pub files_unmodified: u64, + + /// Total processed files + pub total_files_processed: u64, + + /// Total size of all processed files + pub total_bytes_processed: u64, + + /// New directories compared to the last (i.e. parent) snapshot pub dirs_new: u64, + + /// Changed directories compared to the last (i.e. parent) snapshot pub dirs_changed: u64, + + /// Unchanged directories compared to the last (i.e. parent) snapshot pub dirs_unmodified: u64, + + /// Total processed directories + pub total_dirs_processed: u64, + + /// Total number of data blobs added by this snapshot + pub total_dirsize_processed: u64, + + /// Total size of all processed dirs pub data_blobs: u64, + + /// Total number of tree blobs added by this snapshot pub tree_blobs: u64, + + /// Total uncompressed bytes added by this snapshot pub data_added: u64, + + /// Total bytes added to the repository by this snapshot pub data_added_packed: u64, + + /// Total uncompressed bytes (new/changed files) added by this snapshot pub data_added_files: u64, + + /// Total bytes for new/changed files added to the repository by this snapshot pub data_added_files_packed: u64, + + /// Total uncompressed bytes (new/changed directories) added by this snapshot pub data_added_trees: u64, + + /// Total bytes (new/changed directories) added to the repository by this snapshot pub data_added_trees_packed: u64, - pub total_files_processed: u64, - pub total_dirs_processed: u64, - pub total_bytes_processed: u64, - pub total_dirsize_processed: u64, - pub total_duration: f64, // in seconds + /// The command used to make this backup pub command: String, + + /// Start time of the backup. + /// + /// # Note + /// + /// This may differ from the snapshot `time`. #[derivative(Default(value = "Local::now()"))] pub backup_start: DateTime, + + /// The time that the backup has been finished. #[derivative(Default(value = "Local::now()"))] pub backup_end: DateTime, - pub backup_duration: f64, // in seconds + + /// Total duration of the backup in seconds, i.e. the time between `backup_start` and `backup_end` + pub backup_duration: f64, + + /// Total duration that the rustic command ran in seconds + pub total_duration: f64, } impl SnapshotSummary { - pub fn finalize(&mut self, snap_time: DateTime) -> RusticResult<()> { + /// Create a new [`SnapshotSummary`]. + /// + /// # Arguments + /// + /// * `snap_time` - The time of the snapshot + /// + /// # Errors + /// + /// * [`SnapshotFileErrorKind::OutOfRange`] - If the time is not in the range of `Local::now()` + pub(crate) fn finalize(&mut self, snap_time: DateTime) -> RusticResult<()> { let end_time = Local::now(); self.backup_duration = (end_time - self.backup_start) .to_std() @@ -115,16 +216,21 @@ impl SnapshotSummary { } } +/// Options for deleting snapshots. #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Derivative, Copy)] #[derivative(Default)] pub enum DeleteOption { + /// No delete option set. #[derivative(Default)] NotSet, + /// This snapshot should be never deleted (remove-protection). Never, + /// Remove this snapshot after the given timestamp, but prevent removing it before. After(DateTime), } impl DeleteOption { + /// Returns whether the delete option is set to `NotSet`. const fn is_not_set(&self) -> bool { matches!(self, Self::NotSet) } @@ -133,51 +239,100 @@ impl DeleteOption { #[serde_with::apply(Option => #[serde(default, skip_serializing_if = "Option::is_none")])] #[derive(Debug, Clone, Serialize, Deserialize, Derivative)] #[derivative(Default)] +/// A [`SnapshotFile`] is the repository representation of the snapshot metadata saved in a repository. +/// +/// It is usually saved in the repository under `snapshot/` +/// +/// # Note +/// +/// [`SnapshotFile`] implements [`Eq`], [`PartialEq`], [`Ord`], [`PartialOrd`] by comparing only the `time` field. +/// If you need another ordering, you have to implement that yourself. pub struct SnapshotFile { #[derivative(Default(value = "Local::now()"))] + /// Timestamp of this snapshot pub time: DateTime, + + /// Program identifier and its version that have been used to create this snapshot. #[derivative(Default( value = "\"rustic \".to_string() + option_env!(\"PROJECT_VERSION\").unwrap_or(env!(\"CARGO_PKG_VERSION\"))" ))] #[serde(default, skip_serializing_if = "String::is_empty")] pub program_version: String, + + /// The Id of the parent snapshot that this snapshot has been based on pub parent: Option, + + /// The tree blob id where the contents of this snapshot are stored pub tree: Id, + + /// Label for the snapshot #[serde(default, skip_serializing_if = "String::is_empty")] pub label: String, + + /// The list of paths contained in this snapshot pub paths: StringList, + + /// The hostname of the device on which the snapshot has been created #[serde(default)] pub hostname: String, + + /// The username that started the backup run #[serde(default)] pub username: String, + + /// The uid of the username that started the backup run #[serde(default)] pub uid: u32, + + /// The gid of the username that started the backup run #[serde(default)] pub gid: u32, + + /// A list of tags for this snapshot #[serde(default)] pub tags: StringList, + + /// The original Id of this snapshot. This is stored when the snapshot is modified. pub original: Option, + + /// Options for deletion of the snapshot #[serde(default, skip_serializing_if = "DeleteOption::is_not_set")] pub delete: DeleteOption, + /// Summary information about the backup run pub summary: Option, + + /// A description of what is contained in this snapshot pub description: Option, + /// The snapshot Id (not stored within the JSON) #[serde(default, skip_serializing_if = "Id::is_null")] pub id: Id, } impl RepoFile for SnapshotFile { + /// The file type of a [`SnapshotFile`] is always [`FileType::Snapshot`] const TYPE: FileType = FileType::Snapshot; } impl SnapshotFile { - pub fn new_from_options( - opts: &SnapshotOptions, - time: DateTime, - command: String, - ) -> RusticResult { - let hostname = if let Some(ref host) = opts.host { + /// Create a [`SnapshotFile`] from [`SnapshotOptions`]. + /// + /// # Arguments + /// + /// * `opts` - The [`SnapshotOptions`] to use + /// + /// # Errors + /// + /// * [`SnapshotFileErrorKind::NonUnicodeHostname`] - If the hostname is not valid unicode + /// * [`SnapshotFileErrorKind::OutOfRange`] - If the delete time is not in the range of `Local::now()` + /// * [`SnapshotFileErrorKind::ReadingDescriptionFailed`] - If the description file could not be read + /// + /// # Note + /// + /// This is the preferred way to create a new [`SnapshotFile`] to be used within [`crate::Repository::backup`]. + pub fn from_options(opts: &SnapshotOptions) -> RusticResult { + let hostname = if let Some(host) = &opts.host { host.clone() } else { let hostname = gethostname(); @@ -187,6 +342,8 @@ impl SnapshotFile { .to_string() }; + let time = opts.time.unwrap_or_else(Local::now); + let delete = match (opts.delete_never, opts.delete_after) { (true, _) => DeleteOption::Never, (_, Some(d)) => DeleteOption::After( @@ -195,6 +352,16 @@ impl SnapshotFile { (false, None) => DeleteOption::NotSet, }; + let command: String = opts.command.as_ref().map_or_else( + || { + std::env::args_os() + .map(|s| s.to_string_lossy().to_string()) + .collect::>() + .join(" ") + }, + |command| command.clone(), + ); + let mut snap = Self { time, hostname, @@ -221,6 +388,11 @@ impl SnapshotFile { Ok(snap) } + /// Create a [`SnapshotFile`] from a given [`Id`] and [`RepoFile`]. + /// + /// # Arguments + /// + /// * `tuple` - A tuple of the [`Id`] and the [`RepoFile`] to use fn set_id(tuple: (Id, Self)) -> Self { let (id, mut snap) = tuple; snap.id = id; @@ -229,11 +401,30 @@ impl SnapshotFile { } /// Get a [`SnapshotFile`] from the backend + /// + /// # Arguments + /// + /// * `be` - The backend to use + /// * `id` - The id of the snapshot fn from_backend(be: &B, id: &Id) -> RusticResult { Ok(Self::set_id((*id, be.get_file(id)?))) } - pub fn from_str( + /// Get a [`SnapshotFile`] from the backend by (part of the) Id + /// + /// # Arguments + /// + /// * `be` - The backend to use + /// * `string` - The (part of the) id of the snapshot + /// * `predicate` - A predicate to filter the snapshots + /// * `p` - A progress bar to use + /// + /// # Errors + /// + /// * [`IdErrorKind::HexError`] - If the string is not a valid hexadecimal string + /// * [`BackendErrorKind::NoSuitableIdFound`] - If no id could be found. + /// * [`BackendErrorKind::IdNotUnique`] - If the id is not unique. + pub(crate) fn from_str( be: &B, string: &str, predicate: impl FnMut(&Self) -> bool + Send + Sync, @@ -246,7 +437,17 @@ impl SnapshotFile { } /// Get the latest [`SnapshotFile`] from the backend - pub fn latest( + /// + /// # Arguments + /// + /// * `be` - The backend to use + /// * `predicate` - A predicate to filter the snapshots + /// * `p` - A progress bar to use + /// + /// # Errors + /// + /// * [`SnapshotFileErrorKind::NoSnapshotsFound`] - If no snapshots are found + pub(crate) fn latest( be: &B, predicate: impl FnMut(&Self) -> bool + Send + Sync, p: &impl Progress, @@ -274,16 +475,38 @@ impl SnapshotFile { } /// Get a [`SnapshotFile`] from the backend by (part of the) id - pub fn from_id(be: &B, id: &str) -> RusticResult { + /// + /// # Arguments + /// + /// * `be` - The backend to use + /// * `id` - The (part of the) id of the snapshot + /// + /// # Errors + /// * [`IdErrorKind::HexError`] - If the string is not a valid hexadecimal string + /// * [`BackendErrorKind::NoSuitableIdFound`] - If no id could be found. + /// * [`BackendErrorKind::IdNotUnique`] - If the id is not unique. + pub(crate) fn from_id(be: &B, id: &str) -> RusticResult { info!("getting snapshot..."); let id = be.find_id(FileType::Snapshot, id)?; Self::from_backend(be, &id) } - /// Get a Vector of [`SnapshotFile`] from the backend by list of (parts of the) ids - pub fn from_ids( + /// Get a list of [`SnapshotFile`]s from the backend by supplying a list of/parts of their Ids + /// + /// # Arguments + /// + /// * `be` - The backend to use + /// * `ids` - The list of (parts of the) ids of the snapshots + /// * `p` - A progress bar to use + /// + /// # Errors + /// + /// * [`IdErrorKind::HexError`] - If the string is not a valid hexadecimal string + /// * [`BackendErrorKind::NoSuitableIdFound`] - If no id could be found. + /// * [`BackendErrorKind::IdNotUnique`] - If the id is not unique. + pub(crate) fn from_ids>( be: &B, - ids: &[String], + ids: &[T], p: &impl Progress, ) -> RusticResult> { let ids = be.find_ids(FileType::Snapshot, ids)?; @@ -293,6 +516,16 @@ impl SnapshotFile { .try_collect() } + /// Compare two [`SnapshotFile`]s by criteria from [`SnapshotGroupCriterion`]. + /// + /// # Arguments + /// + /// * `crit` - The criteria to use for comparison + /// * `other` - The other [`SnapshotFile`] to compare to + /// + /// # Returns + /// + /// The ordering of the two [`SnapshotFile`]s fn cmp_group(&self, crit: SnapshotGroupCriterion, other: &Self) -> Ordering { if crit.hostname { self.hostname.cmp(&other.hostname) @@ -322,6 +555,11 @@ impl SnapshotFile { }) } + /// Check if the [`SnapshotFile`] is in the given [`SnapshotGroup`]. + /// + /// # Arguments + /// + /// * `group` - The [`SnapshotGroup`] to check #[must_use] pub fn has_group(&self, group: &SnapshotGroup) -> bool { group @@ -335,7 +573,14 @@ impl SnapshotFile { /// Get [`SnapshotFile`]s which match the filter grouped by the group criterion /// from the backend - pub fn group_from_backend( + /// + /// # Arguments + /// + /// * `be` - The backend to use + /// * `filter` - A filter to filter the snapshots + /// * `crit` - The criteria to use for grouping + /// * `p` - A progress bar to use + pub(crate) fn group_from_backend( be: &B, filter: F, crit: SnapshotGroupCriterion, @@ -351,7 +596,7 @@ impl SnapshotFile { let mut result = Vec::new(); for (group, snaps) in &snaps .into_iter() - .group_by(|sn| SnapshotGroup::from_sn(sn, crit)) + .group_by(|sn| SnapshotGroup::from_snapshot(sn, crit)) { result.push((group, snaps.collect())); } @@ -359,7 +604,12 @@ impl SnapshotFile { Ok(result) } - pub fn all_from_backend(be: &B, filter: F, p: &impl Progress) -> RusticResult> + // TODO: add documentation! + pub(crate) fn all_from_backend( + be: &B, + filter: F, + p: &impl Progress, + ) -> RusticResult> where B: DecryptReadBackend, F: FnMut(&Self) -> bool, @@ -371,7 +621,15 @@ impl SnapshotFile { .try_collect() } - /// Add tag lists to snapshot. return whether snapshot was changed + /// Add tag lists to snapshot. + /// + /// # Arguments + /// + /// * `tag_lists` - The tag lists to add + /// + /// # Returns + /// + /// Returns whether snapshot was changed. pub fn add_tags(&mut self, tag_lists: Vec) -> bool { let old_tags = self.tags.clone(); self.tags.add_all(tag_lists); @@ -380,7 +638,15 @@ impl SnapshotFile { old_tags != self.tags } - /// Set tag lists to snapshot. return whether snapshot was changed + /// Set tag lists to snapshot. + /// + /// # Arguments + /// + /// * `tag_lists` - The tag lists to set + /// + /// # Returns + /// + /// Returns whether snapshot was changed. pub fn set_tags(&mut self, tag_lists: Vec) -> bool { let old_tags = std::mem::take(&mut self.tags); self.tags.add_all(tag_lists); @@ -389,7 +655,15 @@ impl SnapshotFile { old_tags != self.tags } - /// Remove tag lists from snapshot. return whether snapshot was changed + /// Remove tag lists from snapshot. + /// + /// # Arguments + /// + /// * `tag_lists` - The tag lists to remove + /// + /// # Returns + /// + /// Returns whether snapshot was changed. pub fn remove_tags(&mut self, tag_lists: &[StringList]) -> bool { let old_tags = self.tags.clone(); self.tags.remove_all(tag_lists); @@ -398,12 +672,20 @@ impl SnapshotFile { } /// Returns whether a snapshot must be deleted now + /// + /// # Arguments + /// + /// * `now` - The current time #[must_use] pub fn must_delete(&self, now: DateTime) -> bool { matches!(self.delete,DeleteOption::After(time) if time < now) } /// Returns whether a snapshot must be kept now + /// + /// # Arguments + /// + /// * `now` - The current time #[must_use] pub fn must_keep(&self, now: DateTime) -> bool { match self.delete { @@ -413,6 +695,19 @@ impl SnapshotFile { } } + /// Modifies the snapshot setting/adding/removing tag(s) and modifying [`DeleteOption`]s. + /// + /// # Arguments + /// + /// * `set` - The tags to set + /// * `add` - The tags to add + /// * `remove` - The tags to remove + /// * `delete` - The delete option to set + /// + /// # Returns + /// + /// `None` if the snapshot was not changed and + /// `Some(snap)` with a copy of the changed snapshot if it was changed. pub fn modify_sn( &mut self, set: Vec, @@ -438,9 +733,13 @@ impl SnapshotFile { changed.then_some(self.clone()) } - // clear ids which are not saved by the copy command (and not compared when checking if snapshots already exist in the copy target) + /// Clear ids which are not saved by the copy command (and not compared when checking if snapshots already exist in the copy target) + /// + /// # Arguments + /// + /// * `sn` - The snapshot to clear the ids from #[must_use] - pub fn clear_ids(mut sn: Self) -> Self { + pub(crate) fn clear_ids(mut sn: Self) -> Self { sn.id = Id::default(); sn.parent = None; sn @@ -452,6 +751,7 @@ impl PartialEq for SnapshotFile { self.time.eq(&other.time) } } + impl Eq for SnapshotFile {} impl PartialOrd for SnapshotFile { @@ -465,12 +765,24 @@ impl Ord for SnapshotFile { } } +/// [`SnapshotGroupCriterion`] determines how to group snapshots. +/// +/// `Default` grouping is by hostname, label and paths. #[allow(clippy::struct_excessive_bools)] -#[derive(DeserializeFromStr, Clone, Debug, Copy)] +#[derive(Clone, Debug, Copy, Setters)] +#[setters(into)] +#[non_exhaustive] pub struct SnapshotGroupCriterion { + /// Whether to group by hostnames pub hostname: bool, + + /// Whether to group by labels pub label: bool, + + /// Whether to group by paths pub paths: bool, + + /// Whether to group by tags pub tags: bool, } @@ -503,13 +815,42 @@ impl FromStr for SnapshotGroupCriterion { } } +impl Display for SnapshotGroupCriterion { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut display = Vec::new(); + if self.hostname { + display.push("host"); + } + if self.label { + display.push("label"); + } + if self.paths { + display.push("paths"); + } + if self.tags { + display.push("tags"); + } + write!(f, "{}", display.join(","))?; + Ok(()) + } +} + #[serde_with::apply(Option => #[serde(default, skip_serializing_if = "Option::is_none")])] #[derive(Serialize, Default, Debug, PartialEq, Eq)] +#[non_exhaustive] +/// [`SnapshotGroup`] specifies the group after a grouping using [`SnapshotGroupCriterion`]. pub struct SnapshotGroup { - hostname: Option, - label: Option, - paths: Option, - tags: Option, + /// Group hostname, if grouped by hostname + pub hostname: Option, + + /// Group label, if grouped by label + pub label: Option, + + /// Group paths, if grouped by paths + pub paths: Option, + + /// Group tags, if grouped by tags + pub tags: Option, } impl Display for SnapshotGroup { @@ -535,8 +876,13 @@ impl Display for SnapshotGroup { } impl SnapshotGroup { - #[must_use] - pub fn from_sn(sn: &SnapshotFile, crit: SnapshotGroupCriterion) -> Self { + /// Extracts the suitable [`SnapshotGroup`] from a [`SnapshotFile`] using a given [`SnapshotGroupCriterion`]. + /// + /// # Arguments + /// + /// * `sn` - The [`SnapshotFile`] to extract the [`SnapshotGroup`] from + /// * `crit` - The [`SnapshotGroupCriterion`] to use + pub fn from_snapshot(sn: &SnapshotFile, crit: SnapshotGroupCriterion) -> Self { Self { hostname: crit.hostname.then(|| sn.hostname.clone()), label: crit.label.then(|| sn.label.clone()), @@ -545,14 +891,16 @@ impl SnapshotGroup { } } + /// Returns whether this is an empty group, i.e. no grouping information is contained. #[must_use] pub fn is_empty(&self) -> bool { self == &Self::default() } } +/// `StringList` is a rustic-internal list of Strings. It is used within [`SnapshotFile`] #[derive(Serialize, Deserialize, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Clone)] -pub struct StringList(Vec); +pub struct StringList(pub(crate) Vec); impl FromStr for StringList { type Err = RusticError; @@ -571,68 +919,118 @@ impl Display for StringList { } impl StringList { - fn contains(&self, s: &String) -> bool { - self.0.contains(s) + /// Returns whether a [`StringList`] contains a given String. + /// + /// # Arguments + /// + /// * `s` - The String to check + pub fn contains(&self, s: &str) -> bool { + self.0.iter().any(|m| m == s) } - fn contains_all(&self, sl: &Self) -> bool { + /// Returns whether a [`StringList`] contains all Strings of another [`StringList`]. + /// + /// # Arguments + /// + /// * `sl` - The [`StringList`] to check + pub fn contains_all(&self, sl: &Self) -> bool { sl.0.iter().all(|s| self.contains(s)) } + /// Returns whether a [`StringList`] matches a list of [`StringList`]s, + /// i.e. whether it contains all Strings of one the given [`StringList`]s. + /// + /// # Arguments + /// + /// * `sls` - The list of [`StringList`]s to check #[must_use] pub fn matches(&self, sls: &[Self]) -> bool { sls.is_empty() || sls.iter().any(|sl| self.contains_all(sl)) } - fn add(&mut self, s: String) { + /// Add a String to a [`StringList`]. + /// + /// # Arguments + /// + /// * `s` - The String to add + pub fn add(&mut self, s: String) { if !self.contains(&s) { self.0.push(s); } } - fn add_list(&mut self, sl: Self) { + /// Add all Strings from another [`StringList`] to this [`StringList`]. + /// + /// # Arguments + /// + /// * `sl` - The [`StringList`] to add + pub fn add_list(&mut self, sl: Self) { for s in sl.0 { self.add(s); } } - fn add_all(&mut self, string_lists: Vec) { + /// Add all Strings from all given [`StringList`]s to this [`StringList`]. + /// + /// # Arguments + /// + /// * `string_lists` - The [`StringList`]s to add + pub fn add_all(&mut self, string_lists: Vec) { for sl in string_lists { self.add_list(sl); } } - pub fn set_paths(&mut self, paths: &[PathBuf]) -> RusticResult<()> { + /// Adds the given Paths as Strings to this [`StringList`]. + /// + /// # Arguments + /// + /// * `paths` - The Paths to add + /// + /// # Errors + /// + /// * [`SnapshotFileErrorKind::NonUnicodePath`] - If a path is not valid unicode + pub(crate) fn set_paths>(&mut self, paths: &[T]) -> RusticResult<()> { self.0 = paths .iter() .map(|p| { - Ok(p.to_str() - .ok_or_else(|| SnapshotFileErrorKind::NonUnicodePath(p.into()))? + Ok(p.as_ref() + .to_str() + .ok_or_else(|| SnapshotFileErrorKind::NonUnicodePath(p.as_ref().to_path_buf()))? .to_string()) }) .collect::>>()?; Ok(()) } - fn remove_all(&mut self, string_lists: &[Self]) { + /// Remove all Strings from all given [`StringList`]s from this [`StringList`]. + /// + /// # Arguments + /// + /// * `string_lists` - The [`StringList`]s to remove + pub fn remove_all(&mut self, string_lists: &[Self]) { self.0 .retain(|s| !string_lists.iter().any(|sl| sl.contains(s))); } - fn sort(&mut self) { + /// Sort the Strings in the [`StringList`] + pub fn sort(&mut self) { self.0.sort_unstable(); } + /// Format this [`StringList`] using newlines #[must_use] pub fn formatln(&self) -> String { self.0.join("\n") } + /// Turn this [`StringList`] into an Iterator pub fn iter(&self) -> std::slice::Iter<'_, String> { self.0.iter() } } +/// `PathList` is a rustic-internal list of `PathBuf`s. It is used in the [`crate::Repository::backup`] command. #[derive(Default, Debug, PartialEq, Eq, PartialOrd, Ord, Clone)] pub struct PathList(Vec); @@ -649,47 +1047,63 @@ impl Display for PathList { } impl PathList { - pub fn from_strings(source: I, sanitize: bool) -> RusticResult + /// Create a `PathList` from `String`s. + /// + /// # Arguments + /// + /// * `source` - The `String`s to use + pub fn from_strings(source: I) -> Self where I: IntoIterator, I::Item: AsRef, { - let mut paths = Self( + Self( source .into_iter() .map(|source| PathBuf::from(source.as_ref())) .collect(), - ); + ) + } - if sanitize { - paths.sanitize()?; - } - paths.merge_paths(); - Ok(paths) + /// Create a `PathList` by parsing a Strings containing paths separated by whitspaces. + /// + /// # Arguments + /// + /// * `sources` - The String to parse + /// + /// # Errors + /// + /// * [`SnapshotFileErrorKind::FromNomError`] - If the parsing failed + pub fn from_string(sources: &str) -> RusticResult { + let sources = split(sources).map_err(SnapshotFileErrorKind::FromSplitError)?; + Ok(Self::from_strings(sources)) } + /// Number of paths in the `PathList`. #[must_use] pub fn len(&self) -> usize { self.0.len() } + /// Returns whether the `PathList` is empty. #[must_use] pub fn is_empty(&self) -> bool { self.0.len() == 0 } - pub fn from_string(sources: &str, sanitize: bool) -> RusticResult { - let sources = split(sources).map_err(SnapshotFileErrorKind::FromSplitError)?; - Self::from_strings(sources, sanitize) - } - + /// Clone the internal `Vec`. #[must_use] - pub fn paths(&self) -> Vec { + pub(crate) fn paths(&self) -> Vec { self.0.clone() } - // sanitize paths: parse dots and absolutize if needed - fn sanitize(&mut self) -> RusticResult<()> { + /// Sanitize paths: Parse dots, absolutize if needed and merge paths. + /// + /// # Errors + /// + /// * [`SnapshotFileErrorKind::RemovingDotsFromPathFailed`] - If removing dots from path failed + /// * [`SnapshotFileErrorKind::CanonicalizingPathFailed`] - If canonicalizing path failed + pub fn sanitize(mut self) -> RusticResult { for path in &mut self.0 { *path = path .parse_dot() @@ -702,23 +1116,26 @@ impl PathList { canonicalize(&path).map_err(SnapshotFileErrorKind::CanonicalizingPathFailed)?; } } - Ok(()) + Ok(self.merge()) } - // sort paths and filters out subpaths of already existing paths - fn merge_paths(&mut self) { + /// Sort paths and filters out subpaths of already existing paths. + pub fn merge(self) -> Self { + let mut paths = self.0; // sort paths - self.0.sort_unstable(); + paths.sort_unstable(); let mut root_path = None; // filter out subpaths - self.0.retain(|path| match &root_path { + paths.retain(|path| match &root_path { Some(root_path) if path.starts_with(root_path) => false, _ => { root_path = Some(path.clone()); true } }); + + Self(paths) } } diff --git a/crates/rustic_core/src/repository.rs b/crates/rustic_core/src/repository.rs index 80788f08c..bb611698f 100644 --- a/crates/rustic_core/src/repository.rs +++ b/crates/rustic_core/src/repository.rs @@ -20,39 +20,48 @@ use crate::{ choose::ChooseBackend, decrypt::{DecryptBackend, DecryptFullBackend, DecryptReadBackend, DecryptWriteBackend}, hotcold::HotColdBackend, + local::LocalDestination, + node::Node, FileType, ReadBackend, }, + blob::{ + tree::{NodeStreamer, TreeStreamerOptions as LsOptions}, + BlobType, + }, commands::{ self, - backup::BackupOpts, - check::CheckOpts, - config::ConfigOpts, + backup::BackupOptions, + check::CheckOptions, + config::ConfigOptions, copy::CopySnapshot, forget::{ForgetGroups, KeepOptions}, - key::KeyOpts, + key::KeyOptions, + prune::{PruneOptions, PrunePlan}, repair::{index::RepairIndexOptions, snapshots::RepairSnapshotsOptions}, repoinfo::{IndexInfos, RepoFileInfos}, - restore::{RestoreInfos, RestoreOpts}, + restore::{RestoreOptions, RestorePlan}, }, crypto::aespoly1305::Key, + error::RusticResult, error::{KeyFileErrorKind, RepositoryErrorKind, RusticErrorKind}, - index::IndexEntry, + id::Id, + index::{IndexBackend, IndexEntry, IndexedBackend, ReadIndex}, + progress::{NoProgressBars, ProgressBars}, repofile::{ - configfile::ConfigFile, keyfile::find_key_in_backend, snapshotfile::SnapshotSummary, - RepoFile, + keyfile::find_key_in_backend, + snapshotfile::{SnapshotGroup, SnapshotGroupCriterion}, + ConfigFile, PathList, RepoFile, SnapshotFile, SnapshotSummary, Tree, }, - BlobType, Id, IndexBackend, IndexedBackend, LocalDestination, NoProgressBars, Node, - NodeStreamer, PathList, ProgressBars, PruneOpts, PrunePlan, ReadIndex, RusticResult, - SnapshotFile, SnapshotGroup, SnapshotGroupCriterion, Tree, TreeStreamerOptions, }; mod warm_up; use warm_up::{warm_up, warm_up_wait}; +/// Options for using and opening a [`Repository`] #[serde_as] #[cfg_attr(feature = "clap", derive(clap::Parser))] #[cfg_attr(feature = "merge", derive(merge::Merge))] -#[derive(Clone, Default, Debug, serde::Deserialize, Setters)] +#[derive(Clone, Default, Debug, serde::Deserialize, serde::Serialize, Setters)] #[serde(default, rename_all = "kebab-case", deny_unknown_fields)] #[setters(into, strip_option)] pub struct RepositoryOptions { @@ -70,9 +79,13 @@ pub struct RepositoryOptions { )] pub repo_hot: Option, - /// Password of the repository - WARNING: Using --password can reveal the password in the process list! + /// Password of the repository + /// + /// # Warning + /// + /// Using --password can reveal the password in the process list! #[cfg_attr(feature = "clap", clap(long, global = true, env = "RUSTIC_PASSWORD"))] - // TODO: use `secrecy` library + // TODO: Security related: use `secrecy` library (#663) pub password: Option, /// File to read the password from @@ -131,17 +144,54 @@ pub struct RepositoryOptions { #[serde_as(as = "Option")] pub warm_up_wait: Option, + /// Other options for this repository #[cfg_attr(feature = "clap", clap(skip))] #[cfg_attr(feature = "merge", merge(strategy = overwrite))] pub options: HashMap, } -// TODO: Unused function -#[allow(dead_code)] +/// Overwrite the left value with the right value +/// +/// This is used for merging [`RepositoryOptions`] and [`ConfigOptions`] +/// +/// # Arguments +/// +/// * `left` - The left value +/// * `right` - The right value +#[cfg(feature = "merge")] pub(crate) fn overwrite(left: &mut T, right: T) { *left = right; } +impl RepositoryOptions { + /// Create a [`Repository`] using the given repository options + /// + /// # Errors + /// + /// * [`RepositoryErrorKind::NoRepositoryGiven`] - If no repository is given + /// * [`RepositoryErrorKind::NoIDSpecified`] - If the warm-up command does not contain `%id` + /// * [`BackendErrorKind::BackendNotSupported`] - If the backend is not supported. + /// * [`LocalErrorKind::DirectoryCreationFailed`] - If the directory could not be created. + /// * [`RestErrorKind::UrlParsingFailed`] - If the url could not be parsed. + /// * [`RestErrorKind::BuildingClientFailed`] - If the client could not be built. + /// + /// # Returns + /// + /// The repository without progress bars + pub fn to_repository(&self) -> RusticResult> { + Repository::new(self) + } +} + +/// Read a password from a reader +/// +/// # Arguments +/// +/// * `file` - The reader to read the password from +/// +/// # Errors +/// +/// * [`RepositoryErrorKind::ReadingPasswordFromReaderFailed`] - If reading the password failed pub fn read_password_from_reader(file: &mut impl BufRead) -> RusticResult { let mut password = String::new(); _ = file @@ -162,22 +212,76 @@ pub fn read_password_from_reader(file: &mut impl BufRead) -> RusticResult { + /// The name of the repository pub name: String, - pub be: HotColdBackend, - pub be_hot: Option, + + /// The HotColdBackend to use for this repository + pub(crate) be: HotColdBackend, + + /// The Backende to use for hot files + pub(crate) be_hot: Option, + + /// The options used for this repository opts: RepositoryOptions, + + /// The progress bar to use pub(crate) pb: P, + + /// The status status: S, } impl Repository { + /// Create a new repository from the given [`RepositoryOptions`] (without progress bars) + /// + /// # Arguments + /// + /// * `opts` - The options to use for the repository + /// + /// # Errors + /// + /// * [`RepositoryErrorKind::NoRepositoryGiven`] - If no repository is given + /// * [`RepositoryErrorKind::NoIDSpecified`] - If the warm-up command does not contain `%id` + /// * [`BackendErrorKind::BackendNotSupported`] - If the backend is not supported. + /// * [`LocalErrorKind::DirectoryCreationFailed`] - If the directory could not be created. + /// * [`RestErrorKind::UrlParsingFailed`] - If the url could not be parsed. + /// * [`RestErrorKind::BuildingClientFailed`] - If the client could not be built. pub fn new(opts: &RepositoryOptions) -> RusticResult { Self::new_with_progress(opts, NoProgressBars {}) } } impl

Repository { + /// Create a new repository from the given [`RepositoryOptions`] with given progress bars + /// + /// # Type Parameters + /// + /// * `P` - The type of the progress bar + /// + /// # Arguments + /// + /// * `opts` - The options to use for the repository + /// * `pb` - The progress bars to use + /// + /// # Errors + /// + /// * [`RepositoryErrorKind::NoRepositoryGiven`] - If no repository is given + /// * [`RepositoryErrorKind::NoIDSpecified`] - If the warm-up command does not contain `%id` + /// * [`BackendErrorKind::BackendNotSupported`] - If the backend is not supported. + /// * [`LocalErrorKind::DirectoryCreationFailed`] - If the directory could not be created. + /// * [`RestErrorKind::UrlParsingFailed`] - If the url could not be parsed. + /// * [`RestErrorKind::BuildingClientFailed`] - If the client could not be built. pub fn new_with_progress(opts: &RepositoryOptions, pb: P) -> RusticResult { let be = match &opts.repository { Some(repo) => ChooseBackend::from_url(repo)?, @@ -217,7 +321,21 @@ impl

Repository { }) } } + impl Repository { + /// Evaluates the password given by the repository options + /// + /// # Errors + /// + /// * [`RepositoryErrorKind::OpeningPasswordFileFailed`] - If opening the password file failed + /// * [`RepositoryErrorKind::ReadingPasswordFromReaderFailed`] - If reading the password failed + /// * [`RepositoryErrorKind::FromSplitError`] - If splitting the password command failed + /// * [`RepositoryErrorKind::PasswordCommandParsingFailed`] - If parsing the password command failed + /// * [`RepositoryErrorKind::ReadingPasswordFromCommandFailed`] - If reading the password from the command failed + /// + /// # Returns + /// + /// The password or `None` if no password is given pub fn password(&self) -> RusticResult> { match ( &self.opts.password, @@ -263,6 +381,16 @@ impl Repository { } } + /// Returns the Id of the config file + /// + /// # Errors + /// + /// * [`RepositoryErrorKind::ListingRepositoryConfigFileFailed`] - If listing the repository config file failed + /// * [`RepositoryErrorKind::MoreThanOneRepositoryConfig`] - If there is more than one repository config file + /// + /// # Returns + /// + /// The id of the config file or `None` if no config file is found pub fn config_id(&self) -> RusticResult> { let config_ids = self .be @@ -276,6 +404,28 @@ impl Repository { } } + /// Open the repository. + /// + /// This gets the decryption key and reads the config file + /// + /// # Errors + /// + /// * [`RepositoryErrorKind::NoPasswordGiven`] - If no password is given + /// * [`RepositoryErrorKind::ReadingPasswordFromReaderFailed`] - If reading the password failed + /// * [`RepositoryErrorKind::OpeningPasswordFileFailed`] - If opening the password file failed + /// * [`RepositoryErrorKind::PasswordCommandParsingFailed`] - If parsing the password command failed + /// * [`RepositoryErrorKind::ReadingPasswordFromCommandFailed`] - If reading the password from the command failed + /// * [`RepositoryErrorKind::FromSplitError`] - If splitting the password command failed + /// * [`RepositoryErrorKind::NoRepositoryConfigFound`] - If no repository config file is found + /// * [`RepositoryErrorKind::KeysDontMatchForRepositories`] - If the keys of the hot and cold backend don't match + /// * [`RepositoryErrorKind::IncorrectPassword`] - If the password is incorrect + /// * [`RepositoryErrorKind::NoSuitableKeyFound`] - If no suitable key is found + /// * [`RepositoryErrorKind::ListingRepositoryConfigFileFailed`] - If listing the repository config file failed + /// * [`RepositoryErrorKind::MoreThanOneRepositoryConfig`] - If there is more than one repository config file + /// + /// # Returns + /// + /// The open repository pub fn open(self) -> RusticResult> { let password = self .password()? @@ -283,6 +433,22 @@ impl Repository { self.open_with_password(&password) } + /// Open the repository with a given password. + /// + /// This gets the decryption key and reads the config file + /// + /// # Arguments + /// + /// * `password` - The password to use + /// + /// # Errors + /// + /// * [`RepositoryErrorKind::NoRepositoryConfigFound`] - If no repository config file is found + /// * [`RepositoryErrorKind::KeysDontMatchForRepositories`] - If the keys of the hot and cold backend don't match + /// * [`RepositoryErrorKind::IncorrectPassword`] - If the password is incorrect + /// * [`RepositoryErrorKind::NoSuitableKeyFound`] - If no suitable key is found + /// * [`RepositoryErrorKind::ListingRepositoryConfigFileFailed`] - If listing the repository config file failed + /// * [`RepositoryErrorKind::MoreThanOneRepositoryConfig`] - If there is more than one repository config file pub fn open_with_password(self, password: &str) -> RusticResult> { let config_id = self .config_id()? @@ -314,10 +480,31 @@ impl Repository { self.open_raw(key, config) } + /// Initialize a new repository with given options using the password defined in `RepositoryOptions` + /// + /// This returns an open repository which can be directly used. + /// + /// # Type Parameters + /// + /// * `P` - The type of the progress bar + /// + /// # Arguments + /// + /// * `key_opts` - The options to use for the key + /// * `config_opts` - The options to use for the config + /// + /// # Errors + /// + /// * [`RepositoryErrorKind::NoPasswordGiven`] - If no password is given + /// * [`RepositoryErrorKind::ReadingPasswordFromReaderFailed`] - If reading the password failed + /// * [`RepositoryErrorKind::OpeningPasswordFileFailed`] - If opening the password file failed + /// * [`RepositoryErrorKind::PasswordCommandParsingFailed`] - If parsing the password command failed + /// * [`RepositoryErrorKind::ReadingPasswordFromCommandFailed`] - If reading the password from the command failed + /// * [`RepositoryErrorKind::FromSplitError`] - If splitting the password command failed pub fn init( self, - key_opts: &KeyOpts, - config_opts: &ConfigOpts, + key_opts: &KeyOptions, + config_opts: &ConfigOptions, ) -> RusticResult> { let password = self .password()? @@ -325,11 +512,30 @@ impl Repository { self.init_with_password(&password, key_opts, config_opts) } + /// Initialize a new repository with given password and options. + /// + /// This returns an open repository which can be directly used. + /// + /// # Type Parameters + /// + /// * `P` - The type of the progress bar + /// + /// # Arguments + /// + /// * `pass` - The password to use + /// * `key_opts` - The options to use for the key + /// * `config_opts` - The options to use for the config + /// + /// # Errors + /// + /// * [`RepositoryErrorKind::ConfigFileExists`] - If a config file already exists + /// * [`RepositoryErrorKind::ListingRepositoryConfigFileFailed`] - If listing the repository config file failed + /// * [`RepositoryErrorKind::MoreThanOneRepositoryConfig`] - If there is more than one repository config file pub fn init_with_password( self, pass: &str, - key_opts: &KeyOpts, - config_opts: &ConfigOpts, + key_opts: &KeyOptions, + config_opts: &ConfigOptions, ) -> RusticResult> { if self.config_id()?.is_some() { return Err(RepositoryErrorKind::ConfigFileExists.into()); @@ -338,17 +544,45 @@ impl Repository { self.open_raw(key, config) } + /// Initialize a new repository with given password and a ready [`ConfigFile`]. + /// + /// This returns an open repository which can be directly used. + /// + /// # Type Parameters + /// + /// * `P` - The type of the progress bar + /// + /// # Arguments + /// + /// * `password` - The password to use + /// * `key_opts` - The options to use for the key + /// * `config` - The config file to use pub fn init_with_config( self, - pass: &str, - key_opts: &KeyOpts, + password: &str, + key_opts: &KeyOptions, config: ConfigFile, ) -> RusticResult> { - let key = commands::init::init_with_config(&self, pass, key_opts, &config)?; + let key = commands::init::init_with_config(&self, password, key_opts, &config)?; info!("repository {} successfully created.", config.id); self.open_raw(key, config) } + /// Open the repository with given [`Key`] and [`ConfigFile`]. + /// + /// # Type Parameters + /// + /// * `P` - The type of the progress bar + /// + /// # Arguments + /// + /// * `key` - The key to use + /// * `config` - The config file to use + /// + /// # Errors + /// + /// * [`RepositoryErrorKind::HotRepositoryFlagMissing`] - If the config file has `is_hot` set to `true` but the repository is not hot + /// * [`RepositoryErrorKind::IsNotHotRepository`] - If the config file has `is_hot` set to `false` but the repository is hot fn open_raw(self, key: Key, config: ConfigFile) -> RusticResult> { match (config.is_hot == Some(true), self.be_hot.is_some()) { (true, false) => return Err(RepositoryErrorKind::HotRepositoryFlagMissing.into()), @@ -384,88 +618,209 @@ impl Repository { }) } + /// List all file [`Id`]s of the given [`FileType`] which are present in the repository + /// + /// # Arguments + /// + /// * `tpe` - The type of the files to list pub fn list(&self, tpe: FileType) -> RusticResult> { Ok(self.be.list(tpe)?.into_iter()) } } impl Repository { + /// Collect information about repository files + /// + /// # Errors + /// + /// If files could not be listed. pub fn infos_files(&self) -> RusticResult { commands::repoinfo::collect_file_infos(self) } + + /// Warm up the given pack files without waiting. + /// + /// # Arguments + /// + /// * `packs` - The pack files to warm up + /// + /// * [`RepositoryErrorKind::FromNomError`] - If the command could not be parsed. + /// * [`RepositoryErrorKind::FromThreadPoolbilderError`] - If the thread pool could not be created. pub fn warm_up(&self, packs: impl ExactSizeIterator) -> RusticResult<()> { warm_up(self, packs) } + /// Warm up the given pack files and wait the configured waiting time. + /// + /// # Arguments + /// + /// * `packs` - The pack files to warm up + /// + /// # Errors + /// + /// * [`RepositoryErrorKind::FromNomError`] - If the command could not be parsed. + /// * [`RepositoryErrorKind::FromThreadPoolbilderError`] - If the thread pool could not be created. pub fn warm_up_wait(&self, packs: impl ExactSizeIterator) -> RusticResult<()> { warm_up_wait(self, packs) } } +/// A repository which is open, i.e. the password has been checked and the decryption key is available. pub trait Open { + /// The [`DecryptBackend`] used by this repository type DBE: DecryptFullBackend; + + /// Get the decryption key fn key(&self) -> &Key; + + /// Get the cache fn cache(&self) -> Option<&Cache>; + + /// Get the [`DecryptBackend`] fn dbe(&self) -> &Self::DBE; + + /// Get the [`ConfigFile`] fn config(&self) -> &ConfigFile; } impl Open for Repository { + /// The [`DecryptBackend`] used by this repository type DBE = S::DBE; + + /// Get the decryption key fn key(&self) -> &Key { self.status.key() } + + /// Get the cache fn cache(&self) -> Option<&Cache> { self.status.cache() } + + /// Get the [`DecryptBackend`] fn dbe(&self) -> &Self::DBE { self.status.dbe() } + + /// Get the [`ConfigFile`] fn config(&self) -> &ConfigFile { self.status.config() } } #[derive(Debug)] +/// Open Status: This repository is open, i.e. the password has been checked and the decryption key is available. pub struct OpenStatus { + /// The decryption key key: Key, + /// The cache cache: Option, + /// The [`DecryptBackend`] dbe: DecryptBackend>, Key>, + /// The [`ConfigFile`] config: ConfigFile, } impl Open for OpenStatus { + /// The [`DecryptBackend`] used by this repository type DBE = DecryptBackend>, Key>; + /// Get the decryption key fn key(&self) -> &Key { &self.key } + + /// Get the cache fn cache(&self) -> Option<&Cache> { self.cache.as_ref() } + + /// Get the [`DecryptBackend`] fn dbe(&self) -> &Self::DBE { &self.dbe } + + /// Get the [`ConfigFile`] fn config(&self) -> &ConfigFile { &self.config } } impl Repository { + /// Get the content of the decrypted repository file given by id and [`FileType`] + /// + /// # Arguments + /// + /// * `tpe` - The type of the file to get + /// * `id` - The id of the file to get + /// + /// # Errors + /// + /// * [`IdErrorKind::HexError`] - If the string is not a valid hexadecimal string + /// * [`BackendErrorKind::NoSuitableIdFound`] - If no id could be found. + /// * [`BackendErrorKind::IdNotUnique`] - If the id is not unique. pub fn cat_file(&self, tpe: FileType, id: &str) -> RusticResult { commands::cat::cat_file(self, tpe, id) } - pub fn add_key(&self, pass: &str, opts: &KeyOpts) -> RusticResult { + /// Add a new key to the repository + /// + /// # Arguments + /// + /// * `pass` - The password to use for the new key + /// * `opts` - The options to use for the new key + /// + /// # Errors + /// + /// * [`CommandErrorKind::FromJsonError`] - If the key could not be serialized. + pub fn add_key(&self, pass: &str, opts: &KeyOptions) -> RusticResult { opts.add_key(self, pass) } - pub fn apply_config(&self, opts: &ConfigOpts) -> RusticResult { + /// Update the repository config by applying the given [`ConfigOptions`] + /// + /// # Arguments + /// + /// * `opts` - The options to apply + /// + /// # Errors + /// + /// * [`CommandErrorKind::VersionNotSupported`] - If the version is not supported + /// * [`CommandErrorKind::CannotDowngrade`] - If the version is lower than the current version + /// * [`CommandErrorKind::NoCompressionV1Repo`] - If compression is set for a v1 repo + /// * [`CommandErrorKind::CompressionLevelNotSupported`] - If the compression level is not supported + /// * [`CommandErrorKind::SizeTooLarge`] - If the size is too large + /// * [`CommandErrorKind::MinPackSizeTolerateWrong`] - If the min packsize tolerance percent is wrong + /// * [`CommandErrorKind::MaxPackSizeTolerateWrong`] - If the max packsize tolerance percent is wrong + /// * [`CryptBackendErrorKind::SerializingToJsonByteVectorFailed`] - If the file could not be serialized to json. + pub fn apply_config(&self, opts: &ConfigOptions) -> RusticResult { commands::config::apply_config(self, opts) } + + /// Get the repository configuration + pub fn config(&self) -> &ConfigFile { + self.status.config() + } + + // TODO: add documentation! + pub(crate) fn dbe(&self) -> &S::DBE { + self.status.dbe() + } } impl Repository { + /// Get grouped snapshots. + /// + /// # Arguments + /// + /// * `ids` - The ids of the snapshots to group. If empty, all snapshots are grouped. + /// * `group_by` - The criterion to group by + /// * `filter` - The filter to use + /// + /// # Returns + /// + /// If `ids` are given, this will try to resolve the ids (or `latest` with respect to the given filter) and return a single group + /// If `ids` is empty, return and group all snapshots respecting the filter. pub fn get_snapshot_group( &self, ids: &[String], @@ -475,6 +830,23 @@ impl Repository { commands::snapshots::get_snapshot_group(self, ids, group_by, filter) } + /// Get a single snapshot + /// + /// # Arguments + /// + /// * `id` - The id of the snapshot to get + /// * `filter` - The filter to use + /// + /// # Errors + /// + /// * [`IdErrorKind::HexError`] - If the string is not a valid hexadecimal string + /// * [`BackendErrorKind::NoSuitableIdFound`] - If no id could be found. + /// * [`BackendErrorKind::IdNotUnique`] - If the id is not unique. + /// + /// # Returns + /// + /// If `id` is (part of) an `Id`, return this snapshot. + /// If `id` is "latest", return the latest snapshot respecting the giving filter. pub fn get_snapshot_from_str( &self, id: &str, @@ -485,15 +857,31 @@ impl Repository { Ok(snap) } - pub fn get_snapshots(&self, ids: &[String]) -> RusticResult> { + /// Get the given snapshots. + /// + /// # Arguments + /// + /// * `ids` - The ids of the snapshots to get + /// + /// # Notes + /// + /// `ids` may contain part of snapshots id which will be resolved. + /// However, "latest" is not supported in this function. + pub fn get_snapshots>(&self, ids: &[T]) -> RusticResult> { let p = self.pb.progress_counter("getting snapshots..."); SnapshotFile::from_ids(self.dbe(), ids, &p) } + /// Get all snapshots from the repository pub fn get_all_snapshots(&self) -> RusticResult> { self.get_matching_snapshots(|_| true) } + /// Get all snapshots from the repository respecting the given `filter` + /// + /// # Arguments + /// + /// * `filter` - The filter to use pub fn get_matching_snapshots( &self, filter: impl FnMut(&SnapshotFile) -> bool, @@ -502,6 +890,17 @@ impl Repository { SnapshotFile::all_from_backend(self.dbe(), filter, &p) } + /// Get snapshots to forget depending on the given [`KeepOptions`] + /// + /// # Arguments + /// + /// * `keep` - The keep options to use + /// * `group_by` - The criterion to group by + /// * `filter` - The filter to use + /// + /// # Returns + /// + /// pub fn get_forget_snapshots( &self, keep: &KeepOptions, @@ -511,6 +910,16 @@ impl Repository { commands::forget::get_forget_snapshots(self, keep, group_by, filter) } + /// Get snapshots which are not already present and should be present. + /// + /// # Arguments + /// + /// * `filter` - The filter to use + /// * `snaps` - The snapshots to check + /// + /// # Note + /// + /// This method should be called on the *destination repository* pub fn relevant_copy_snapshots( &self, filter: impl FnMut(&SnapshotFile) -> bool, @@ -521,6 +930,15 @@ impl Repository { // TODO: Maybe only offer a method to remove &[Snapshotfile] and check if they must be kept. // See e.g. the merge command of the CLI + /// Remove the given snapshots from the repository + /// + /// # Arguments + /// + /// * `ids` - The ids of the snapshots to remove + /// + /// # Panics + /// + /// If the files could not be deleted. pub fn delete_snapshots(&self, ids: &[Id]) -> RusticResult<()> { let p = self.pb.progress_counter("removing snapshots..."); self.dbe() @@ -528,6 +946,15 @@ impl Repository { Ok(()) } + /// Save the given snapshots to the repository. + /// + /// # Arguments + /// + /// * `snaps` - The snapshots to save + /// + /// # Errors + /// + /// * [`CryptBackendErrorKind::SerializingToJsonByteVectorFailed`] - If the file could not be serialized to json. pub fn save_snapshots(&self, mut snaps: Vec) -> RusticResult<()> { for snap in &mut snaps { snap.id = Id::default(); @@ -537,14 +964,29 @@ impl Repository { Ok(()) } - pub fn check(&self, opts: CheckOpts) -> RusticResult<()> { + /// Check the repository for errors or inconsistencies + /// + /// # Arguments + /// + /// * `opts` - The options to use + pub fn check(&self, opts: CheckOptions) -> RusticResult<()> { opts.run(self) } - pub fn prune_plan(&self, opts: &PruneOpts) -> RusticResult { + /// Get the plan about what should be pruned and/or repacked. + /// + /// # Arguments + /// + /// * `opts` - The options to use + pub fn prune_plan(&self, opts: &PruneOptions) -> RusticResult { opts.get_plan(self) } + /// Turn the repository into the `IndexedFull` state by reading and storing the index + /// + /// # Note + /// + /// This saves the full index in memory which can be quite memory-consuming! pub fn to_indexed(self) -> RusticResult>> { let index = IndexBackend::new(self.dbe(), &self.pb.progress_counter(""))?; let status = IndexedStatus { @@ -562,6 +1004,10 @@ impl Repository { }) } + /// Turn the repository into the `IndexedIds` state by reading and storing a size-optimized index + /// + /// This saves only the `Id`s for data blobs. Therefore, not all operations are possible on the repository. + /// However, operations which add data are fully functional. pub fn to_indexed_ids(self) -> RusticResult>> { let index = IndexBackend::only_full_trees(self.dbe(), &self.pb.progress_counter(""))?; let status = IndexedStatus { @@ -579,10 +1025,14 @@ impl Repository { }) } + /// Get statistical information from the index + /// + /// This method reads all index files, even if an index is already available in memory. pub fn infos_index(&self) -> RusticResult { commands::repoinfo::collect_index_infos(self) } + /// Read all files of a given [`RepoFile`] pub fn stream_files( &self, ) -> RusticResult>> { @@ -592,17 +1042,31 @@ impl Repository { .into_iter()) } + /// Repair the index + /// + /// This compares the index with existing pack files and reads packfile headers to ensure the index + /// correctly represents the pack files. + /// + /// # Arguments + /// + /// * `opts` - The options to use + /// * `dry_run` - If true, only print what would be done pub fn repair_index(&self, opts: &RepairIndexOptions, dry_run: bool) -> RusticResult<()> { opts.repair(self, dry_run) } } +/// A repository which is indexed such that all tree blobs are contained in the index. pub trait IndexedTree: Open { type I: IndexedBackend; fn index(&self) -> &Self::I; } +/// A repository which is indexed such that all tree blobs are contained in the index +/// and additionally the `Id`s of data blobs are also contained in the index. pub trait IndexedIds: IndexedTree {} + +/// A repository which is indexed such that all blob information is fully contained in the index. pub trait IndexedFull: IndexedIds {} impl IndexedTree for Repository { @@ -612,15 +1076,25 @@ impl IndexedTree for Repository { } } +/// The indexed status of a repository +/// +/// # Type Parameters +/// +/// * `T` - The type of index +/// * `S` - The type of the open status #[derive(Debug)] pub struct IndexedStatus { + /// The open status open: S, + /// The index backend index: IndexBackend, + /// The marker for the type of index marker: std::marker::PhantomData, } #[derive(Debug, Clone, Copy)] pub struct IdIndex {} + #[derive(Debug, Clone, Copy)] pub struct FullIndex {} @@ -654,6 +1128,16 @@ impl Open for IndexedStatus { } impl Repository { + /// Get the [`IndexEntry`] of the given blob + /// + /// # Arguments + /// + /// * `tpe` - The type of the blob + /// * `id` - The id of the blob + /// + /// # Errors + /// + /// * [`RepositoryErrorKind::IdNotFound`] - If the id is not found in the index pub fn get_index_entry(&self, tpe: BlobType, id: &Id) -> RusticResult { let ie = self .index() @@ -664,6 +1148,20 @@ impl Repository { } impl Repository { + /// Get a [`Node`] from a "SNAP\[:PATH\]" syntax + /// + /// This parses for a snapshot (using the filter when "latest" is used) and then traverses into the path to get the node. + /// + /// # Arguments + /// + /// * `snap_path` - The path to the snapshot + /// * `filter` - The filter to use + /// + /// # Errors + /// + /// * [`IdErrorKind::HexError`] - If the string is not a valid hexadecimal string + /// * [`BackendErrorKind::NoSuitableIdFound`] - If no id could be found. + /// * [`BackendErrorKind::IdNotUnique`] - If the id is not unique. pub fn node_from_snapshot_path( &self, snap_path: &str, @@ -677,6 +1175,14 @@ impl Repository { Tree::node_from_path(self.index(), snap.tree, Path::new(path)) } + /// Get a [`Node`] from a [`SnapshotFile`] and a `path` + /// + /// This traverses into the path to get the node. + /// + /// # Arguments + /// + /// * `snap` - The snapshot to use + /// * `path` - The path to the node pub fn node_from_snapshot_and_path( &self, snap: &SnapshotFile, @@ -685,6 +1191,14 @@ impl Repository { Tree::node_from_path(self.index(), snap.tree, Path::new(path)) } + /// Reads a raw tree from a "SNAP\[:PATH\]" syntax + /// + /// This parses a snapshot (using the filter when "latest" is used) and then traverses into the path to get the tree. + /// + /// # Arguments + /// + /// * `snap` - The snapshot to use + /// * `sn_filter` - The filter to use pub fn cat_tree( &self, snap: &str, @@ -693,25 +1207,61 @@ impl Repository { commands::cat::cat_tree(self, snap, sn_filter) } + /// List the contents of a given [`Node`] + /// + /// # Arguments + /// + /// * `node` - The node to list + /// * `ls_opts` - The options to use + /// + /// # Returns + /// + /// If `node` is a tree node, this will list the content of that tree. + /// If `node` is a file node, this will only return one element. + /// + /// # Note + /// + /// The `PathBuf` returned will be relative to the given `node`. pub fn ls( &self, node: &Node, - streamer_opts: &TreeStreamerOptions, - recursive: bool, + ls_opts: &LsOptions, ) -> RusticResult> + Clone> { - NodeStreamer::new_with_glob(self.index().clone(), node, streamer_opts, recursive) + NodeStreamer::new_with_glob(self.index().clone(), node, ls_opts) } + /// Restore a given [`RestorePlan`] to a local destination + /// + /// # Arguments + /// + /// * `restore_infos` - The restore plan to use + /// * `opts` - The options to use + /// * `node_streamer` - The node streamer to use + /// * `dest` - The destination to use pub fn restore( &self, - restore_infos: RestoreInfos, - opts: &RestoreOpts, + restore_infos: RestorePlan, + opts: &RestoreOptions, node_streamer: impl Iterator>, dest: &LocalDestination, ) -> RusticResult<()> { opts.restore(restore_infos, self, node_streamer, dest) } + /// Merge the given trees. + /// + /// This method creates needed tree blobs within the repository. + /// Merge conflicts (identical filenames which do not match) will be resolved using the ordering given by `cmp`. + /// + /// # Arguments + /// + /// * `trees` - The trees to merge + /// * `cmp` - The comparison function to use for merge conflicts + /// * `summary` - The summary to use + /// + /// # Returns + /// + /// This method returns the blob [`Id`] of the merged tree. pub fn merge_trees( &self, trees: &[Id], @@ -721,6 +1271,20 @@ impl Repository { commands::merge::merge_trees(self, trees, cmp, summary) } + /// Merge the given snapshots. + /// + /// This method will create needed tree blobs within the repository. + /// Merge conflicts (identical filenames which do not match) will be resolved using the ordering given by `cmp`. + /// + /// # Arguments + /// + /// * `snaps` - The snapshots to merge + /// * `cmp` - The comparison function to use for merge conflicts + /// * `snap` - The snapshot to save + /// + /// # Returns + /// + /// This method returns the modified and already saved [`SnapshotFile`]. pub fn merge_snapshots( &self, snaps: &[SnapshotFile], @@ -732,44 +1296,94 @@ impl Repository { } impl Repository { + /// Run a backup of `source` using the given options. + /// + /// You have to give a preflled [`SnapshotFile`] which is modified and saved. + /// + /// # Arguments + /// + /// * `opts` - The options to use + /// * `source` - The source to backup + /// * `snap` - The snapshot to modify and save + /// + /// # Returns + /// + /// The saved snapshot. pub fn backup( &self, - opts: &BackupOpts, + opts: &BackupOptions, source: PathList, snap: SnapshotFile, - dry_run: bool, ) -> RusticResult { - commands::backup::backup(self, opts, source, snap, dry_run) + commands::backup::backup(self, opts, source, snap) } } impl Repository { + /// Read a raw blob + /// + /// # Arguments + /// + /// * `tpe` - The type of the blob + /// * `id` - The id of the blob pub fn cat_blob(&self, tpe: BlobType, id: &str) -> RusticResult { commands::cat::cat_blob(self, tpe, id) } + /// Dump a [`Node`] using the given writer. + /// + /// # Arguments + /// + /// * `node` - The node to dump + /// * `w` - The writer to use + /// + /// # Note + /// + /// Currently, only regular file nodes are supported. pub fn dump(&self, node: &Node, w: &mut impl Write) -> RusticResult<()> { commands::dump::dump(self, node, w) } /// Prepare the restore. + /// /// If `dry_run` is set to false, it will also: /// - remove existing files from the destination, if `opts.delete` is set to true /// - create all dirs for the restore + /// + /// # Arguments + /// + /// * `opts` - The options to use + /// * `node_streamer` - The node streamer to use + /// * `dest` - The destination to use + /// * `dry_run` - If true, only print what would be done pub fn prepare_restore( &self, - opts: &RestoreOpts, + opts: &RestoreOptions, node_streamer: impl Iterator>, dest: &LocalDestination, dry_run: bool, - ) -> RusticResult { + ) -> RusticResult { opts.collect_and_prepare(self, node_streamer, dest, dry_run) } /// Copy the given `snapshots` to `repo_dest`. - /// Note: This command copies snapshots even if they already exist. For already existing snapshots, a + /// + /// # Type Parameters + /// + /// * `Q` - The type of the progress bar + /// * `R` - The type of the index. + /// + /// # Arguments + /// + /// * `repo_dest` - The destination repository + /// * `snapshots` - The snapshots to copy + /// + /// # Note + /// + /// This command copies snapshots even if they already exist. For already existing snapshots, a /// copy will be created in the destination repository. - /// To omit already existing snapshots, use `relevante_copy_snapshots` and filter out the non-relevant ones. + /// + /// To omit already existing snapshots, use `relevant_copy_snapshots` and filter out the non-relevant ones. pub fn copy<'a, Q: ProgressBars, R: IndexedIds>( &self, repo_dest: &Repository, @@ -778,6 +1392,19 @@ impl Repository { commands::copy::copy(self, repo_dest, snapshots) } + /// Repair snapshots. + /// + /// This traverses all trees of all snapshots and repairs defect trees. + /// + /// # Arguments + /// + /// * `opts` - The options to use + /// * `snapshots` - The snapshots to repair + /// * `dry_run` - If true, only print what would be done + /// + /// # Warning + /// + /// If you remove the original snapshots, you may loose data! pub fn repair_snapshots( &self, opts: &RepairSnapshotsOptions, diff --git a/crates/rustic_core/src/repository/warm_up.rs b/crates/rustic_core/src/repository/warm_up.rs index 17de1b307..a044aa5c5 100644 --- a/crates/rustic_core/src/repository/warm_up.rs +++ b/crates/rustic_core/src/repository/warm_up.rs @@ -6,14 +6,29 @@ use rayon::ThreadPoolBuilder; use shell_words::split; use crate::{ - error::RepositoryErrorKind, FileType, Id, Progress, ProgressBars, ReadBackend, Repository, - RusticResult, + backend::{FileType, ReadBackend}, + error::{RepositoryErrorKind, RusticResult}, + id::Id, + progress::{Progress, ProgressBars}, + repository::Repository, }; pub(super) mod constants { + /// The maximum number of reader threads to use for warm-up. pub(super) const MAX_READER_THREADS_NUM: usize = 20; } +/// Warm up the repository and wait. +/// +/// # Arguments +/// +/// * `repo` - The repository to warm up. +/// * `packs` - The packs to warm up. +/// +/// # Errors +/// +/// * [`RepositoryErrorKind::FromNomError`] - If the command could not be parsed. +/// * [`RepositoryErrorKind::FromThreadPoolbilderError`] - If the thread pool could not be created. pub(crate) fn warm_up_wait( repo: &Repository, packs: impl ExactSizeIterator, @@ -27,6 +42,17 @@ pub(crate) fn warm_up_wait( Ok(()) } +/// Warm up the repository. +/// +/// # Arguments +/// +/// * `repo` - The repository to warm up. +/// * `packs` - The packs to warm up. +/// +/// # Errors +/// +/// * [`RepositoryErrorKind::FromNomError`] - If the command could not be parsed. +/// * [`RepositoryErrorKind::FromThreadPoolbilderError`] - If the thread pool could not be created. pub(crate) fn warm_up( repo: &Repository, packs: impl ExactSizeIterator, @@ -39,6 +65,17 @@ pub(crate) fn warm_up( Ok(()) } +/// Warm up the repository using a command. +/// +/// # Arguments +/// +/// * `packs` - The packs to warm up. +/// * `command` - The command to execute. +/// * `pb` - The progress bar to use. +/// +/// # Errors +/// +/// * [`RepositoryErrorKind::FromNomError`] - If the command could not be parsed. fn warm_up_command( packs: impl ExactSizeIterator, command: &str, @@ -59,6 +96,16 @@ fn warm_up_command( Ok(()) } +/// Warm up the repository using access. +/// +/// # Arguments +/// +/// * `repo` - The repository to warm up. +/// * `packs` - The packs to warm up. +/// +/// # Errors +/// +/// * [`RepositoryErrorKind::FromThreadPoolbilderError`] - If the thread pool could not be created. fn warm_up_access( repo: &Repository, packs: impl ExactSizeIterator, diff --git a/src/commands.rs b/src/commands.rs index 8b75e4db9..35feb7691 100644 --- a/src/commands.rs +++ b/src/commands.rs @@ -171,6 +171,19 @@ impl Configurable for EntryPoint { } } +/// Open the repository with the given config +/// +/// # Arguments +/// +/// * `config` - The config file +/// +/// # Errors +/// +/// * [`RepositoryErrorKind::ReadingPasswordFromReaderFailed`] - If reading the password failed +/// * [`RepositoryErrorKind::OpeningPasswordFileFailed`] - If opening the password file failed +/// * [`RepositoryErrorKind::PasswordCommandParsingFailed`] - If parsing the password command failed +/// * [`RepositoryErrorKind::ReadingPasswordFromCommandFailed`] - If reading the password from the command failed +/// * [`RepositoryErrorKind::FromSplitError`] - If splitting the password command failed fn open_repository(config: &Arc) -> Result> { let po = config.global.progress_options; let repo = Repository::new_with_progress(&config.repository, po)?; diff --git a/src/commands/backup.rs b/src/commands/backup.rs index a514a1d8e..40348857b 100644 --- a/src/commands/backup.rs +++ b/src/commands/backup.rs @@ -10,17 +10,15 @@ use crate::{ {status_err, Application, RUSTIC_APP}, }; use abscissa_core::{Command, Runnable, Shutdown}; -use anyhow::{bail, Result}; +use anyhow::{bail, Context, Result}; use log::{debug, info, warn}; -use chrono::Local; - use merge::Merge; use serde::Deserialize; use rustic_core::{ - BackupOpts, LocalSourceFilterOptions, LocalSourceSaveOptions, ParentOpts, PathList, - SnapshotFile, SnapshotOptions, + BackupOptions, LocalSourceFilterOptions, LocalSourceSaveOptions, ParentOptions, PathList, + SnapshotOptions, }; /// `backup` subcommand @@ -60,7 +58,7 @@ pub struct BackupCmd { #[clap(flatten, next_help_heading = "Options for parent processing")] #[serde(flatten)] - parent_opts: ParentOpts, + parent_opts: ParentOptions, #[clap(flatten, next_help_heading = "Exclude options")] #[serde(flatten)] @@ -98,13 +96,6 @@ impl Runnable for BackupCmd { impl BackupCmd { fn inner_run(&self) -> Result<()> { - let time = Local::now(); - - let command: String = std::env::args_os() - .map(|s| s.to_string_lossy().to_string()) - .collect::>() - .join(" "); - let config = RUSTIC_APP.config(); let repo = open_repository(&config)?.to_indexed_ids()?; @@ -123,13 +114,18 @@ impl BackupCmd { let config_sources: Vec<_> = config_opts .iter() - .filter_map(|opt| match PathList::from_string(&opt.source, true) { + .map(|opt| -> Result<_> { + Ok(PathList::from_string(&opt.source)? + .sanitize() + .with_context(|| { + format!("error sanitizing source=\"{}\" in config file", opt.source) + })? + .merge()) + }) + .filter_map(|p| match p { Ok(paths) => Some(paths), Err(err) => { - warn!( - "error sanitizing source=\"{}\" in config file: {err}", - opt.source - ); + warn!("{err}"); None } }) @@ -137,7 +133,7 @@ impl BackupCmd { let sources = match (self.cli_sources.is_empty(), config_opts.is_empty()) { (false, _) => { - let item = PathList::from_strings(&self.cli_sources, true)?; + let item = PathList::from_strings(&self.cli_sources).sanitize()?; vec![item] } (true, false) => { @@ -174,15 +170,14 @@ impl BackupCmd { // merge "backup" section from config file, if given opts.merge(config.backup.clone()); - let snap = SnapshotFile::new_from_options(&opts.snap_opts, time, command.clone())?; - let backup_opts = BackupOpts { - stdin_filename: opts.stdin_filename, - as_path: opts.as_path, - parent_opts: opts.parent_opts, - ignore_save_opts: opts.ignore_save_opts, - ignore_filter_opts: opts.ignore_filter_opts, - }; - let snap = repo.backup(&backup_opts, source.clone(), snap, config.global.dry_run)?; + let backup_opts = BackupOptions::default() + .stdin_filename(opts.stdin_filename) + .as_path(opts.as_path) + .parent_opts(opts.parent_opts) + .ignore_save_opts(opts.ignore_save_opts) + .ignore_filter_opts(opts.ignore_filter_opts) + .dry_run(config.global.dry_run); + let snap = repo.backup(&backup_opts, source.clone(), opts.snap_opts.to_snapshot()?)?; if opts.json { let mut stdout = std::io::stdout(); diff --git a/src/commands/cat.rs b/src/commands/cat.rs index 63c457996..35cc208b5 100644 --- a/src/commands/cat.rs +++ b/src/commands/cat.rs @@ -8,7 +8,7 @@ use abscissa_core::{Command, Runnable, Shutdown}; use anyhow::Result; -use rustic_core::{BlobType, FileType}; +use rustic_core::repofile::{BlobType, FileType}; /// `cat` subcommand #[derive(clap::Parser, Command, Debug)] @@ -64,10 +64,10 @@ impl CatCmd { CatSubCmd::Config => repo.cat_file(FileType::Config, "")?, CatSubCmd::Index(opt) => repo.cat_file(FileType::Index, &opt.id)?, CatSubCmd::Snapshot(opt) => repo.cat_file(FileType::Snapshot, &opt.id)?, - // special treatment for cating blobs: read the index and use it to locate the blob + // special treatment for 'cat'ing blobs: read the index and use it to locate the blob CatSubCmd::TreeBlob(opt) => repo.to_indexed()?.cat_blob(BlobType::Tree, &opt.id)?, CatSubCmd::DataBlob(opt) => repo.to_indexed()?.cat_blob(BlobType::Data, &opt.id)?, - // special treatment for cating a tree within a snapshot + // special treatment for 'cat'ing a tree within a snapshot CatSubCmd::Tree(opt) => repo .to_indexed()? .cat_tree(&opt.snap, |sn| config.snapshot_filter.matches(sn))?, diff --git a/src/commands/check.rs b/src/commands/check.rs index 381214804..ac994b2c3 100644 --- a/src/commands/check.rs +++ b/src/commands/check.rs @@ -6,13 +6,13 @@ use crate::{commands::open_repository, status_err, Application, RUSTIC_APP}; use abscissa_core::{Command, Runnable, Shutdown}; use anyhow::Result; -use rustic_core::CheckOpts; +use rustic_core::CheckOptions; /// `check` subcommand #[derive(clap::Parser, Command, Debug)] pub(crate) struct CheckCmd { #[clap(flatten)] - opts: CheckOpts, + opts: CheckOptions, } impl Runnable for CheckCmd { diff --git a/src/commands/config.rs b/src/commands/config.rs index 7d882dd7d..c561d04ad 100644 --- a/src/commands/config.rs +++ b/src/commands/config.rs @@ -8,13 +8,13 @@ use abscissa_core::{Command, Runnable, Shutdown}; use anyhow::Result; -use rustic_core::ConfigOpts; +use rustic_core::ConfigOptions; /// `config` subcommand #[derive(clap::Parser, Command, Debug)] pub(crate) struct ConfigCmd { #[clap(flatten)] - config_opts: ConfigOpts, + config_opts: ConfigOptions, } impl Runnable for ConfigCmd { diff --git a/src/commands/copy.rs b/src/commands/copy.rs index 2864a410b..fc6b1fc86 100644 --- a/src/commands/copy.rs +++ b/src/commands/copy.rs @@ -12,7 +12,7 @@ use log::{error, info}; use merge::Merge; use serde::Deserialize; -use rustic_core::{CopySnapshot, Id, KeyOpts, Open, Repository, RepositoryOptions}; +use rustic_core::{CopySnapshot, Id, KeyOptions, Repository, RepositoryOptions}; /// `copy` subcommand #[derive(clap::Parser, Command, Debug)] @@ -26,7 +26,7 @@ pub(crate) struct CopyCmd { init: bool, #[clap(flatten, next_help_heading = "Key options (when using --init)")] - key_opts: KeyOpts, + key_opts: KeyOptions, } #[derive(Default, Clone, Debug, Deserialize, Merge)] diff --git a/src/commands/diff.rs b/src/commands/diff.rs index 38edeafba..ff1943882 100644 --- a/src/commands/diff.rs +++ b/src/commands/diff.rs @@ -11,9 +11,9 @@ use std::path::{Path, PathBuf}; use anyhow::{bail, Context, Result}; use rustic_core::{ - BlobType, IndexedFull, LocalDestination, LocalSource, LocalSourceFilterOptions, - LocalSourceSaveOptions, Node, NodeType, ReadSourceEntry, Repository, RusticResult, - TreeStreamerOptions, + repofile::{BlobType, Node, NodeType}, + IndexedFull, LocalDestination, LocalSource, LocalSourceFilterOptions, LocalSourceSaveOptions, + LsOptions, ReadSourceEntry, Repository, RusticResult, }; /// `diff` subcommand @@ -60,7 +60,7 @@ impl DiffCmd { _ = match (id1, id2) { (Some(id1), Some(id2)) => { // diff between two snapshots - let snaps = repo.get_snapshots(&[id1.to_string(), id2.to_string()])?; + let snaps = repo.get_snapshots(&[id1, id2])?; let snap1 = &snaps[0]; let snap2 = &snaps[1]; @@ -69,8 +69,8 @@ impl DiffCmd { let node2 = repo.node_from_snapshot_and_path(snap2, path2)?; diff( - repo.ls(&node1, &TreeStreamerOptions::default(), true)?, - repo.ls(&node2, &TreeStreamerOptions::default(), true)?, + repo.ls(&node1, &LsOptions::default())?, + repo.ls(&node2, &LsOptions::default())?, self.no_content, |_path, node1, node2| Ok(node1.content == node2.content), self.metadata, @@ -106,7 +106,7 @@ impl DiffCmd { }); diff( - repo.ls(&node1, &TreeStreamerOptions::default(), true)?, + repo.ls(&node1, &LsOptions::default())?, src, self.no_content, |path, node1, _node2| identical_content_local(&local, &repo, path, node1), @@ -141,7 +141,9 @@ fn identical_content_local( path: &Path, node: &Node, ) -> Result { - let Some(mut open_file) = local.get_matching_file(path, node.meta.size) else { return Ok(false) }; + let Some(mut open_file) = local.get_matching_file(path, node.meta.size) else { + return Ok(false); + }; for id in node.content.iter().flatten() { let ie = repo.get_index_entry(BlobType::Data, id)?; diff --git a/src/commands/init.rs b/src/commands/init.rs index b13763be1..d0f331ed2 100644 --- a/src/commands/init.rs +++ b/src/commands/init.rs @@ -9,16 +9,16 @@ use crate::{Application, RUSTIC_APP}; use dialoguer::Password; -use rustic_core::{ConfigOpts, KeyOpts, Repository}; +use rustic_core::{ConfigOptions, KeyOptions, Repository}; /// `init` subcommand #[derive(clap::Parser, Command, Debug)] pub(crate) struct InitCmd { #[clap(flatten, next_help_heading = "Key options")] - key_opts: KeyOpts, + key_opts: KeyOptions, #[clap(flatten, next_help_heading = "Config options")] - config_opts: ConfigOpts, + config_opts: ConfigOptions, } impl Runnable for InitCmd { @@ -48,8 +48,8 @@ impl InitCmd { pub(crate) fn init( repo: Repository, - key_opts: &KeyOpts, - config_opts: &ConfigOpts, + key_opts: &KeyOptions, + config_opts: &ConfigOptions, ) -> Result<()> { let pass = repo.password()?.unwrap_or_else(|| { match Password::new() diff --git a/src/commands/key.rs b/src/commands/key.rs index 62ebc53b4..cdd5f7964 100644 --- a/src/commands/key.rs +++ b/src/commands/key.rs @@ -11,7 +11,7 @@ use anyhow::Result; use dialoguer::Password; use log::info; -use rustic_core::{KeyOpts, Repository, RepositoryOptions}; +use rustic_core::{KeyOptions, Repository, RepositoryOptions}; /// `key` subcommand #[derive(clap::Parser, Command, Debug)] @@ -33,7 +33,7 @@ pub(crate) struct AddCmd { pub(crate) new_password_file: Option, #[clap(flatten)] - pub(crate) key_opts: KeyOpts, + pub(crate) key_opts: KeyOptions, } impl Runnable for KeyCmd { diff --git a/src/commands/list.rs b/src/commands/list.rs index 6fb068ee6..ea167fbe9 100644 --- a/src/commands/list.rs +++ b/src/commands/list.rs @@ -8,7 +8,7 @@ use abscissa_core::{Command, Runnable, Shutdown}; use anyhow::{bail, Result}; -use rustic_core::{FileType, IndexFile}; +use rustic_core::repofile::{FileType, IndexFile}; /// `list` subcommand #[derive(clap::Parser, Command, Debug)] diff --git a/src/commands/ls.rs b/src/commands/ls.rs index f847c39fe..4d2048fee 100644 --- a/src/commands/ls.rs +++ b/src/commands/ls.rs @@ -9,7 +9,10 @@ use crate::{commands::open_repository, status_err, Application, RUSTIC_APP}; use abscissa_core::{Command, Runnable, Shutdown}; use anyhow::Result; -use rustic_core::{Node, NodeType, TreeStreamerOptions}; +use rustic_core::{ + repofile::{Node, NodeType}, + LsOptions, +}; mod constants { // constants from man page inode(7) @@ -34,10 +37,6 @@ pub(crate) struct LsCmd { #[clap(value_name = "SNAPSHOT[:PATH]")] snap: String, - /// recursively list the dir (default when no PATH is given) - #[clap(long)] - recursive: bool, - /// show summary #[clap(long, short = 's')] summary: bool, @@ -47,7 +46,7 @@ pub(crate) struct LsCmd { long: bool, #[clap(flatten)] - streamer_opts: TreeStreamerOptions, + ls_opts: LsOptions, } impl Runnable for LsCmd { @@ -88,11 +87,12 @@ impl LsCmd { repo.node_from_snapshot_path(&self.snap, |sn| config.snapshot_filter.matches(sn))?; // recursive if standard if we specify a snapshot without dirs. In other cases, use the parameter `recursive` - let recursive = !self.snap.contains(':') || self.recursive; + let mut ls_opts = self.ls_opts.clone(); + ls_opts.recursive = !self.snap.contains(':') || ls_opts.recursive; let mut summary = Summary::default(); - for item in repo.ls(&node, &self.streamer_opts, recursive)? { + for item in repo.ls(&node, &ls_opts)? { let (path, node) = item?; summary.update(&node); if self.long { diff --git a/src/commands/merge.rs b/src/commands/merge.rs index b286f1386..ae7acb367 100644 --- a/src/commands/merge.rs +++ b/src/commands/merge.rs @@ -9,7 +9,7 @@ use log::info; use chrono::Local; -use rustic_core::{Node, SnapshotFile, SnapshotOptions}; +use rustic_core::{last_modified_node, repofile::SnapshotFile, SnapshotOptions}; /// `merge` subcommand #[derive(clap::Parser, Default, Command, Debug)] @@ -41,13 +41,6 @@ impl Runnable for MergeCmd { impl MergeCmd { fn inner_run(&self) -> Result<()> { - let now = Local::now(); - - let command: String = std::env::args_os() - .map(|s| s.to_string_lossy().to_string()) - .collect::>() - .join(" "); - let config = RUSTIC_APP.config(); let repo = open_repository(&config)?.to_indexed_ids()?; @@ -57,11 +50,9 @@ impl MergeCmd { repo.get_snapshots(&self.ids)? }; - let snap = SnapshotFile::new_from_options(&self.snap_opts, now, command)?; - - let cmp = |n1: &Node, n2: &Node| n1.meta.mtime.cmp(&n2.meta.mtime); + let snap = SnapshotFile::from_options(&self.snap_opts)?; - let snap = repo.merge_snapshots(&snapshots, &cmp, snap)?; + let snap = repo.merge_snapshots(&snapshots, &last_modified_node, snap)?; if self.json { let mut stdout = std::io::stdout(); diff --git a/src/commands/prune.rs b/src/commands/prune.rs index 2f3835bc0..d39f58d5d 100644 --- a/src/commands/prune.rs +++ b/src/commands/prune.rs @@ -10,15 +10,14 @@ use log::debug; use anyhow::Result; -use rustic_core::{PruneOpts, PruneStats, Sum}; +use rustic_core::{PruneOptions, PruneStats}; /// `prune` subcommand #[allow(clippy::struct_excessive_bools)] #[derive(clap::Parser, Command, Debug, Clone)] -#[group(id = "prune_opts")] pub(crate) struct PruneCmd { #[clap(flatten)] - pub(crate) opts: PruneOpts, + pub(crate) opts: PruneOptions, } impl Runnable for PruneCmd { @@ -51,8 +50,8 @@ impl PruneCmd { #[allow(clippy::cast_precision_loss)] fn print_stats(stats: &PruneStats) { let pack_stat = &stats.packs; - let blob_stat = stats.blobs.sum(); - let size_stat = stats.size.sum(); + let blob_stat = stats.blobs_sum(); + let size_stat = stats.size_sum(); debug!( "used: {:>10} blobs, {:>10}", diff --git a/src/commands/restore.rs b/src/commands/restore.rs index 1dd4c3c0f..703fee41b 100644 --- a/src/commands/restore.rs +++ b/src/commands/restore.rs @@ -10,7 +10,7 @@ use abscissa_core::{Command, Runnable, Shutdown}; use anyhow::Result; use log::info; -use rustic_core::{LocalDestination, RestoreOpts, TreeStreamerOptions}; +use rustic_core::{LocalDestination, LsOptions, RestoreOptions}; use crate::filtering::SnapshotFilter; @@ -27,10 +27,10 @@ pub(crate) struct RestoreCmd { dest: String, #[clap(flatten)] - opts: RestoreOpts, + opts: RestoreOptions, #[clap(flatten)] - streamer_opts: TreeStreamerOptions, + ls_opts: LsOptions, #[clap( flatten, @@ -55,7 +55,11 @@ impl RestoreCmd { let node = repo.node_from_snapshot_path(&self.snap, |sn| config.snapshot_filter.matches(sn))?; - let ls = repo.ls(&node, &self.streamer_opts, true)?; + + // for restore, always recurse into tree + let mut ls_opts = self.ls_opts.clone(); + ls_opts.recursive = true; + let ls = repo.ls(&node, &ls_opts)?; let dest = LocalDestination::new(&self.dest, true, !node.is_dir())?; diff --git a/src/commands/snapshots.rs b/src/commands/snapshots.rs index 0971505ba..f7443a967 100644 --- a/src/commands/snapshots.rs +++ b/src/commands/snapshots.rs @@ -14,7 +14,10 @@ use comfy_table::Cell; use humantime::format_duration; use itertools::Itertools; -use rustic_core::{DeleteOption, SnapshotFile, SnapshotGroupCriterion}; +use rustic_core::{ + repofile::{DeleteOption, SnapshotFile}, + SnapshotGroupCriterion, +}; /// `snapshot` subcommand #[derive(clap::Parser, Command, Debug)] diff --git a/src/commands/tag.rs b/src/commands/tag.rs index 87efb3ec2..b6df56995 100644 --- a/src/commands/tag.rs +++ b/src/commands/tag.rs @@ -8,7 +8,7 @@ use abscissa_core::{Command, Runnable, Shutdown}; use chrono::{Duration, Local}; -use rustic_core::{DeleteOption, StringList}; +use rustic_core::{repofile::DeleteOption, StringList}; /// `tag` subcommand diff --git a/src/filtering.rs b/src/filtering.rs index dae15c134..9549f4b38 100644 --- a/src/filtering.rs +++ b/src/filtering.rs @@ -1,7 +1,7 @@ use crate::error::RhaiErrorKinds; use log::warn; -use rustic_core::{SnapshotFile, StringList}; +use rustic_core::{repofile::SnapshotFile, StringList}; use std::{error::Error, str::FromStr}; use rhai::{serde::to_dynamic, Dynamic, Engine, FnPtr, AST}; From cfdde3ab1dac58b0d924eaafe37b1f049ce47e77 Mon Sep 17 00:00:00 2001 From: simonsan <14062932+simonsan@users.noreply.github.com> Date: Thu, 7 Sep 2023 15:23:39 +0200 Subject: [PATCH 2/3] ci: fix artifacts not building, do non-cross builds with `cargo-auditable` and include `config/` directory into package (#843) Signed-off-by: simonsan <14062932+simonsan@users.noreply.github.com> Co-authored-by: aawsome <37850842+aawsome@users.noreply.github.com> --- .github/workflows/release.yaml | 43 +++++++++++++++++++++++++++------- 1 file changed, 34 insertions(+), 9 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 97db2340a..3e52ca940 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -26,7 +26,7 @@ jobs: os-name: windows target: x86_64-pc-windows-msvc architecture: x86_64 - binary-postfix: "" + binary-postfix: ".exe" use-cross: false - os: macos-latest os-name: macos @@ -96,6 +96,11 @@ jobs: sudo apt update sudo apt-get install -y musl-tools fi + - name: install cargo-auditable for non-cross builds + shell: bash + if: ${{ matrix.job.use_cross != true}} + run: | + cargo install cargo-auditable cargo-audit - uses: Swatinem/rust-cache@v2 with: key: ${{ matrix.job.target }} @@ -104,39 +109,59 @@ jobs: run: echo "PROJECT_VERSION=$(git describe --tags)" >> $GITHUB_ENV - name: Cargo build uses: actions-rs/cargo@v1 + if: ${{ matrix.job.use-cross == true }} with: command: build use-cross: ${{ matrix.job.use-cross }} toolchain: ${{ matrix.rust }} args: --release --target ${{ matrix.job.target }} - + - name: Cargo auditable build + uses: actions-rs/cargo@v1 + if: ${{ matrix.job.use-cross == false }} + with: + command: auditable + use-cross: ${{ matrix.job.use-cross }} + toolchain: ${{ matrix.rust }} + args: build --release --target ${{ matrix.job.target }} - name: Packaging final binary - if: ${{ !contains(github.ref_name, '/') }} shell: bash + id: package-binary run: | + cp -a config target/${{ matrix.job.target }}/release/config cd target/${{ matrix.job.target }}/release ########## create tar.gz ########## - RELEASE_NAME=rustic-${{ github.ref_name }}-${{ matrix.job.target}} - tar czvf $RELEASE_NAME.tar.gz rustic${{ matrix.job.binary-postfix }} + + # accounting for main branch and therefore beta builds + if [[ ${{ github.ref }} = refs/heads/main ]]; then + RELEASE_NAME=rustic-beta-${{ matrix.job.target}}.tar.gz + elif [[ ${{ github.ref }} = refs/tags/* ]]; then + RELEASE_NAME=rustic-${{ github.ref_name }}-${{ matrix.job.target}}.tar.gz + else + RELEASE_NAME=rustic-${{ github.run_id }}-${{ github.run_attempt }}-${{ matrix.job.target}}.tar.gz + fi + + tar czvf $RELEASE_NAME rustic${{ matrix.job.binary-postfix }} config/ ########## create sha256 ########## if [[ ${{ runner.os }} == 'Windows' ]]; then - certutil -hashfile $RELEASE_NAME.tar.gz sha256 | grep -E [A-Fa-f0-9]{64} > $RELEASE_NAME.sha256 + certutil -hashfile $RELEASE_NAME sha256 | grep -E [A-Fa-f0-9]{64} > $RELEASE_NAME.sha256 else - shasum -a 256 $RELEASE_NAME.tar.gz > $RELEASE_NAME.sha256 + shasum -a 256 $RELEASE_NAME > $RELEASE_NAME.sha256 fi + + echo "release_name=$RELEASE_NAME" >> $GITHUB_OUTPUT - name: Storing binary as artefact uses: actions/upload-artifact@v3 with: name: binary-${{ matrix.job.target}} - path: target/${{ matrix.job.target }}/release/rustic-${{ github.ref_name }}-${{ matrix.job.target}}.tar.gz + path: target/${{ matrix.job.target }}/release/${{ steps.package-binary.outputs.release_name }}* - name: Releasing release versions uses: softprops/action-gh-release@v1 if: startsWith(github.ref, 'refs/tags/') with: files: | - target/${{ matrix.job.target }}/release/rustic-*.tar.gz + target/${{ matrix.job.target }}/release/${{ steps.package-binary.outputs.release_name }}* env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} From 0a4c873db83a126a97cf7e61286162c141ecf5f1 Mon Sep 17 00:00:00 2001 From: Alexander Weiss Date: Thu, 7 Sep 2023 15:28:38 +0200 Subject: [PATCH 3/3] add global config dir to docu --- config/README.md | 8 ++++---- config/full.toml | 3 ++- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/config/README.md b/config/README.md index c2262848e..8c73e3428 100644 --- a/config/README.md +++ b/config/README.md @@ -32,10 +32,10 @@ options. Therefore `commandline arguments` have the highest precedence. ## Profiles Configuration files can be placed in the user's local config directory, e.g. -`~/.config/rustic/`. You can use a different config files, e.g. `myconfig.toml` -and use the `-P` option to specify the profile name, e.g. -`rustic -P myconfig.toml`. Examples for different configuration files can be -found here in the [/config/](/config) directory. +`~/.config/rustic/` or in the global config dir, e.g. `/etc/rustic/`. You can +use a different config files, e.g. `myconfig.toml` and use the `-P` option to +specify the profile name, e.g. `rustic -P myconfig`. Examples for different +configuration files can be found here in the [/config/](/config) directory. ## Sections and Attributes diff --git a/config/full.toml b/config/full.toml index a89443b18..bd787ace5 100644 --- a/config/full.toml +++ b/config/full.toml @@ -1,6 +1,7 @@ # Full rustic config file containing all options which are available through the config file. # -# This file should be placed in the user's local config dir (~/.config/rustic/) +# This file should be placed in the user's local config dir (~/.config/rustic/) or in the global +# config dir (/etc/rustic). # If you save it under NAME.toml, use "rustic -P NAME" to access this profile. # # Note that most options can be overwritten by the corresponding command line option.