mirror of
https://github.com/restic/restic.git
synced 2026-02-22 16:56:24 +00:00
Compare commits
202 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b723ca3de5 | ||
|
|
f5084d70d7 | ||
|
|
29b7b17491 | ||
|
|
e14c4b1737 | ||
|
|
745d79fe5f | ||
|
|
fb95426f64 | ||
|
|
4cadc89ad3 | ||
|
|
409909a7f5 | ||
|
|
df500a372d | ||
|
|
a444731dc0 | ||
|
|
a6e8af7e0f | ||
|
|
aa5af8af0e | ||
|
|
4e3353109d | ||
|
|
02c8d38095 | ||
|
|
fd6211653c | ||
|
|
3d4f2dd6b4 | ||
|
|
c1ddc0c18b | ||
|
|
c95f032a9c | ||
|
|
3087776135 | ||
|
|
b6f01ffbe6 | ||
|
|
41fe9318b1 | ||
|
|
8387d18d4d | ||
|
|
929d2b8df3 | ||
|
|
4f0682d730 | ||
|
|
967d1bbf0c | ||
|
|
2f80b37b93 | ||
|
|
4d2aa18273 | ||
|
|
6b1e5d4e18 | ||
|
|
26d1f9f4ba | ||
|
|
6a89c0f0ef | ||
|
|
b87230b93d | ||
|
|
6f2b8d622a | ||
|
|
90440212f2 | ||
|
|
3a5c9aadad | ||
|
|
a78142c1bb | ||
|
|
07045c7e23 | ||
|
|
0a5d42db3f | ||
|
|
67d99b8cfb | ||
|
|
1a0c0dc277 | ||
|
|
e86d9307d0 | ||
|
|
923e681af3 | ||
|
|
37770b1d82 | ||
|
|
02fea4f76a | ||
|
|
e6db3596f1 | ||
|
|
3acc7af310 | ||
|
|
5c4653f427 | ||
|
|
f7317a9287 | ||
|
|
30db8057e4 | ||
|
|
0e897ef7b8 | ||
|
|
b3e727f40d | ||
|
|
17feccd998 | ||
|
|
604b18aa74 | ||
|
|
01c51b3449 | ||
|
|
de8cf5e345 | ||
|
|
cfa2ac69e0 | ||
|
|
1e9eefa066 | ||
|
|
e9af012229 | ||
|
|
8066e93f47 | ||
|
|
e19622e4b1 | ||
|
|
38ea7ed4f6 | ||
|
|
76d1866444 | ||
|
|
8b22fe29cf | ||
|
|
02014be76c | ||
|
|
16eeed2ad5 | ||
|
|
3f94f63967 | ||
|
|
88716794e3 | ||
|
|
3ca424050f | ||
|
|
fea2464d4d | ||
|
|
5bd5db4294 | ||
|
|
4429a66b5f | ||
|
|
8066195e6e | ||
|
|
f7f14cf8c9 | ||
|
|
5096f3b491 | ||
|
|
cf3fc2a5b1 | ||
|
|
920d458a4a | ||
|
|
b016dc2ff0 | ||
|
|
355db0bc29 | ||
|
|
6e2fe73189 | ||
|
|
303a5dab6a | ||
|
|
7dcd2968b6 | ||
|
|
298f490195 | ||
|
|
37cb82b28b | ||
|
|
bce6438d22 | ||
|
|
919dd2ac84 | ||
|
|
870bc5108e | ||
|
|
418296c5c9 | ||
|
|
a6481b3707 | ||
|
|
00b527fb09 | ||
|
|
0ebfc55ee3 | ||
|
|
35b7607802 | ||
|
|
fad9f65c65 | ||
|
|
939f3e972c | ||
|
|
ca8c3b4fd5 | ||
|
|
4f45b14f25 | ||
|
|
389067fb8b | ||
|
|
4b0ca9ddab | ||
|
|
b8c2544dcb | ||
|
|
c7762453cf | ||
|
|
303210aa08 | ||
|
|
c029881379 | ||
|
|
6e89963c21 | ||
|
|
1ac560181b | ||
|
|
18ec27a0da | ||
|
|
b40dea29ad | ||
|
|
0561155963 | ||
|
|
1aafc17212 | ||
|
|
f11789c437 | ||
|
|
8cab0c121d | ||
|
|
5979414bcd | ||
|
|
cc8b690b52 | ||
|
|
a164dc9391 | ||
|
|
9a26be4e5b | ||
|
|
733519d895 | ||
|
|
3d5a0c799b | ||
|
|
c4475ac58f | ||
|
|
c9fd9b5275 | ||
|
|
cadcab5a19 | ||
|
|
5ac9c1157a | ||
|
|
5715517e29 | ||
|
|
ecc2458de8 | ||
|
|
2c6ba5d9ac | ||
|
|
0cc3647e51 | ||
|
|
6b700d02f5 | ||
|
|
2b09a10234 | ||
|
|
1c87d01bad | ||
|
|
78a3ffcfb9 | ||
|
|
4d77c0c21c | ||
|
|
fb064afa34 | ||
|
|
7304738872 | ||
|
|
66efa425bf | ||
|
|
d51e9d1b98 | ||
|
|
e046428c94 | ||
|
|
75906edef5 | ||
|
|
203d775190 | ||
|
|
ecd7ee85e8 | ||
|
|
2022355800 | ||
|
|
36f22a0feb | ||
|
|
f58a44b911 | ||
|
|
fe886a6439 | ||
|
|
be23313072 | ||
|
|
3c112d9cae | ||
|
|
2970e38d92 | ||
|
|
870e7583a1 | ||
|
|
db1c835c37 | ||
|
|
190bed9908 | ||
|
|
85f4c826db | ||
|
|
5da4b0fc7d | ||
|
|
c1058005c3 | ||
|
|
ca73808649 | ||
|
|
f2ea91df38 | ||
|
|
15cc4d74b2 | ||
|
|
bf9a507148 | ||
|
|
65b476ead9 | ||
|
|
aaa1cc2c26 | ||
|
|
95434cff16 | ||
|
|
1b94ae1c00 | ||
|
|
d138b38f28 | ||
|
|
db8f5864fc | ||
|
|
1d8b21cdad | ||
|
|
3865b59716 | ||
|
|
7b8d1dc040 | ||
|
|
d19a29f79e | ||
|
|
449c049ce9 | ||
|
|
9f436d80e1 | ||
|
|
e277a92a2f | ||
|
|
d9e22c2df1 | ||
|
|
4b0fb5af36 | ||
|
|
7519c73987 | ||
|
|
45a48eb4a8 | ||
|
|
a2f30cde4c | ||
|
|
6ebcfe7c18 | ||
|
|
0022926eba | ||
|
|
3e3a0220ec | ||
|
|
c125fb763d | ||
|
|
b9f0f031b6 | ||
|
|
aa7043151a | ||
|
|
ebf22a35f4 | ||
|
|
3f069ac404 | ||
|
|
56e5467096 | ||
|
|
5ee932a124 | ||
|
|
fed25714a4 | ||
|
|
8906d85ab8 | ||
|
|
97aafc1eec | ||
|
|
6a5c9f57c2 | ||
|
|
6cf13483b5 | ||
|
|
f645306a18 | ||
|
|
186e10e0cb | ||
|
|
29a5bd5b30 | ||
|
|
06a01bc016 | ||
|
|
cdc287a7f6 | ||
|
|
deedc38129 | ||
|
|
1107eef215 | ||
|
|
60c7020bcb | ||
|
|
b96ef48562 | ||
|
|
6f69ae1b8d | ||
|
|
c4fbf2c779 | ||
|
|
7c084014fa | ||
|
|
879f6e0c81 | ||
|
|
8a97bb8661 | ||
|
|
5fe6de219d | ||
|
|
c13f79da02 | ||
|
|
db82e6b80c |
3
.github/PULL_REQUEST_TEMPLATE.md
vendored
3
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -22,6 +22,9 @@ Was the change discussed in an issue or in the forum before?
|
||||
|
||||
<!--
|
||||
Link issues and relevant forum posts here.
|
||||
|
||||
If this PR resolves an issue on GitHub, use "closes #1234" so that the issue is
|
||||
closed automatically when this PR is merged.
|
||||
-->
|
||||
|
||||
Checklist
|
||||
|
||||
31
.travis.yml
31
.travis.yml
@@ -3,14 +3,6 @@ sudo: false
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
go: "1.9.x"
|
||||
env: RESTIC_TEST_FUSE=0 RESTIC_TEST_CLOUD_BACKENDS=0 RESTIC_BUILD_SOLARIS=0
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.cache/go-build
|
||||
- $HOME/gopath/pkg/mod
|
||||
|
||||
- os: linux
|
||||
go: "1.10.x"
|
||||
env: RESTIC_TEST_FUSE=0 RESTIC_TEST_CLOUD_BACKENDS=0
|
||||
@@ -19,9 +11,25 @@ matrix:
|
||||
- $HOME/.cache/go-build
|
||||
- $HOME/gopath/pkg/mod
|
||||
|
||||
# only run fuse and cloud backends tests on Travis for the latest Go on Linux
|
||||
- os: linux
|
||||
go: "1.11.x"
|
||||
env: RESTIC_TEST_FUSE=0 RESTIC_TEST_CLOUD_BACKENDS=0
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.cache/go-build
|
||||
- $HOME/gopath/pkg/mod
|
||||
|
||||
- os: linux
|
||||
go: "1.12.x"
|
||||
env: RESTIC_TEST_FUSE=0 RESTIC_TEST_CLOUD_BACKENDS=0
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.cache/go-build
|
||||
- $HOME/gopath/pkg/mod
|
||||
|
||||
# only run fuse and cloud backends tests on Travis for the latest Go on Linux
|
||||
- os: linux
|
||||
go: "1.13.x"
|
||||
sudo: true
|
||||
cache:
|
||||
directories:
|
||||
@@ -29,7 +37,7 @@ matrix:
|
||||
- $HOME/gopath/pkg/mod
|
||||
|
||||
- os: osx
|
||||
go: "1.11.x"
|
||||
go: "1.13.x"
|
||||
env: RESTIC_TEST_FUSE=0 RESTIC_TEST_CLOUD_BACKENDS=0
|
||||
cache:
|
||||
directories:
|
||||
@@ -56,6 +64,3 @@ install:
|
||||
|
||||
script:
|
||||
- go run run_integration_tests.go
|
||||
|
||||
after_success:
|
||||
- test -r all.cov && bash <(curl -s https://codecov.io/bash) -f all.cov
|
||||
|
||||
245
CHANGELOG.md
245
CHANGELOG.md
@@ -1,3 +1,248 @@
|
||||
Changelog for restic 0.9.6 (2019-11-22)
|
||||
=======================================
|
||||
|
||||
The following sections list the changes in restic 0.9.6 relevant to
|
||||
restic users. The changes are ordered by importance.
|
||||
|
||||
Summary
|
||||
-------
|
||||
|
||||
* Fix #2063: Allow absolute path for filename when backing up from stdin
|
||||
* Fix #2174: Save files with invalid timestamps
|
||||
* Fix #2249: Read fresh metadata for unmodified files
|
||||
* Fix #2301: Add upper bound for t in --read-data-subset=n/t
|
||||
* Fix #2321: Check errors when loading index files
|
||||
* Enh #2306: Allow multiple retries for interactive password input
|
||||
* Enh #2330: Make `--group-by` accept both singular and plural
|
||||
* Enh #2350: Add option to configure S3 region
|
||||
|
||||
Details
|
||||
-------
|
||||
|
||||
* Bugfix #2063: Allow absolute path for filename when backing up from stdin
|
||||
|
||||
When backing up from stdin, handle directory path for `--stdin-filename`. This can be used to
|
||||
specify the full path for the backed-up file.
|
||||
|
||||
https://github.com/restic/restic/issues/2063
|
||||
|
||||
* Bugfix #2174: Save files with invalid timestamps
|
||||
|
||||
When restic reads invalid timestamps (year is before 0000 or after 9999) it refused to read and
|
||||
archive the file. We've changed the behavior and will now save modified timestamps with the
|
||||
year set to either 0000 or 9999, the rest of the timestamp stays the same, so the file will be saved
|
||||
(albeit with a bogus timestamp).
|
||||
|
||||
https://github.com/restic/restic/issues/2174
|
||||
https://github.com/restic/restic/issues/1173
|
||||
|
||||
* Bugfix #2249: Read fresh metadata for unmodified files
|
||||
|
||||
Restic took all metadata for files which were detected as unmodified, not taking into account
|
||||
changed metadata (ownership, mode). This is now corrected.
|
||||
|
||||
https://github.com/restic/restic/issues/2249
|
||||
https://github.com/restic/restic/pull/2252
|
||||
|
||||
* Bugfix #2301: Add upper bound for t in --read-data-subset=n/t
|
||||
|
||||
256 is the effective maximum for t, but restic would allow larger values, leading to strange
|
||||
behavior.
|
||||
|
||||
https://github.com/restic/restic/issues/2301
|
||||
https://github.com/restic/restic/pull/2304
|
||||
|
||||
* Bugfix #2321: Check errors when loading index files
|
||||
|
||||
Restic now checks and handles errors which occur when loading index files, the missing check
|
||||
leads to odd errors (and a stack trace printed to users) later. This was reported in the forum.
|
||||
|
||||
https://github.com/restic/restic/pull/2321
|
||||
https://forum.restic.net/t/check-rebuild-index-prune/1848/13
|
||||
|
||||
* Enhancement #2306: Allow multiple retries for interactive password input
|
||||
|
||||
Restic used to quit if the repository password was typed incorrectly once. Restic will now ask
|
||||
the user again for the repository password if typed incorrectly. The user will now get three
|
||||
tries to input the correct password before restic quits.
|
||||
|
||||
https://github.com/restic/restic/issues/2306
|
||||
|
||||
* Enhancement #2330: Make `--group-by` accept both singular and plural
|
||||
|
||||
One can now use the values `host`/`hosts`, `path`/`paths` and `tag` / `tags` interchangeably
|
||||
in the `--group-by` argument.
|
||||
|
||||
https://github.com/restic/restic/issues/2330
|
||||
|
||||
* Enhancement #2350: Add option to configure S3 region
|
||||
|
||||
We've added a new option for setting the region when accessing an S3-compatible service. For
|
||||
some providers, it is required to set this to a valid value. You can do that either by setting the
|
||||
environment variable `AWS_DEFAULT_REGION` or using the option `s3.region`, e.g. like this:
|
||||
`-o s3.region="us-east-1"`.
|
||||
|
||||
https://github.com/restic/restic/pull/2350
|
||||
|
||||
|
||||
Changelog for restic 0.9.5 (2019-04-23)
|
||||
=======================================
|
||||
|
||||
The following sections list the changes in restic 0.9.5 relevant to
|
||||
restic users. The changes are ordered by importance.
|
||||
|
||||
Summary
|
||||
-------
|
||||
|
||||
* Fix #2135: Return error when no bytes could be read from stdin
|
||||
* Fix #2181: Don't cancel timeout after 30 seconds for self-update
|
||||
* Fix #2203: Fix reading passwords from stdin
|
||||
* Fix #2224: Don't abort the find command when a tree can't be loaded
|
||||
* Enh #1895: Add case insensitive include & exclude options
|
||||
* Enh #1937: Support streaming JSON output for backup
|
||||
* Enh #2155: Add Openstack application credential auth for Swift
|
||||
* Enh #2179: Use ctime when checking for file changes
|
||||
* Enh #2184: Add --json support to forget command
|
||||
* Enh #2037: Add group-by option to snapshots command
|
||||
* Enh #2124: Ability to dump folders to tar via stdout
|
||||
* Enh #2139: Return error if no bytes could be read for `backup --stdin`
|
||||
* Enh #2205: Add --ignore-inode option to backup cmd
|
||||
* Enh #2220: Add config option to set S3 storage class
|
||||
|
||||
Details
|
||||
-------
|
||||
|
||||
* Bugfix #2135: Return error when no bytes could be read from stdin
|
||||
|
||||
We assume that users reading backup data from stdin want to know when no data could be read, so now
|
||||
restic returns an error when `backup --stdin` is called but no bytes could be read. Usually,
|
||||
this means that an earlier command in a pipe has failed. The documentation was amended and now
|
||||
recommends setting the `pipefail` option (`set -o pipefail`).
|
||||
|
||||
https://github.com/restic/restic/pull/2135
|
||||
https://github.com/restic/restic/pull/2139
|
||||
|
||||
* Bugfix #2181: Don't cancel timeout after 30 seconds for self-update
|
||||
|
||||
https://github.com/restic/restic/issues/2181
|
||||
|
||||
* Bugfix #2203: Fix reading passwords from stdin
|
||||
|
||||
Passwords for the `init`, `key add`, and `key passwd` commands can now be read from
|
||||
non-terminal stdin.
|
||||
|
||||
https://github.com/restic/restic/issues/2203
|
||||
|
||||
* Bugfix #2224: Don't abort the find command when a tree can't be loaded
|
||||
|
||||
Change the find command so that missing trees don't result in a crash. Instead, the error is
|
||||
logged to the debug log, and the tree ID is displayed along with the snapshot it belongs to. This
|
||||
makes it possible to recover repositories that are missing trees by forgetting the snapshots
|
||||
they are used in.
|
||||
|
||||
https://github.com/restic/restic/issues/2224
|
||||
|
||||
* Enhancement #1895: Add case insensitive include & exclude options
|
||||
|
||||
The backup and restore commands now have --iexclude and --iinclude flags as case insensitive
|
||||
variants of --exclude and --include.
|
||||
|
||||
https://github.com/restic/restic/issues/1895
|
||||
https://github.com/restic/restic/pull/2032
|
||||
|
||||
* Enhancement #1937: Support streaming JSON output for backup
|
||||
|
||||
We've added support for getting machine-readable status output during backup, just pass the
|
||||
flag `--json` for `restic backup` and restic will output a stream of JSON objects which contain
|
||||
the current progress.
|
||||
|
||||
https://github.com/restic/restic/issues/1937
|
||||
https://github.com/restic/restic/pull/1944
|
||||
|
||||
* Enhancement #2155: Add Openstack application credential auth for Swift
|
||||
|
||||
Since Openstack Queens Identity (auth V3) service supports an application credential auth
|
||||
method. It allows to create a technical account with the limited roles. This commit adds an
|
||||
application credential authentication method for the Swift backend.
|
||||
|
||||
https://github.com/restic/restic/issues/2155
|
||||
|
||||
* Enhancement #2179: Use ctime when checking for file changes
|
||||
|
||||
Previously, restic only checked a file's mtime (along with other non-timestamp metadata) to
|
||||
decide if a file has changed. This could cause restic to not notice that a file has changed (and
|
||||
therefore continue to store the old version, as opposed to the modified version) if something
|
||||
edits the file and then resets the timestamp. Restic now also checks the ctime of files, so any
|
||||
modifications to a file should be noticed, and the modified file will be backed up. The ctime
|
||||
check will be disabled if the --ignore-inode flag was given.
|
||||
|
||||
If this change causes problems for you, please open an issue, and we can look in to adding a
|
||||
seperate flag to disable just the ctime check.
|
||||
|
||||
https://github.com/restic/restic/issues/2179
|
||||
https://github.com/restic/restic/pull/2212
|
||||
|
||||
* Enhancement #2184: Add --json support to forget command
|
||||
|
||||
The forget command now supports the --json argument, outputting the information about what is
|
||||
(or would-be) kept and removed from the repository.
|
||||
|
||||
https://github.com/restic/restic/issues/2184
|
||||
https://github.com/restic/restic/pull/2185
|
||||
|
||||
* Enhancement #2037: Add group-by option to snapshots command
|
||||
|
||||
We have added an option to group the output of the snapshots command, similar to the output of the
|
||||
forget command. The option has been called "--group-by" and accepts any combination of the
|
||||
values "host", "paths" and "tags", separated by commas. Default behavior (not specifying
|
||||
--group-by) has not been changed. We have added support of the grouping to the JSON output.
|
||||
|
||||
https://github.com/restic/restic/issues/2037
|
||||
https://github.com/restic/restic/pull/2087
|
||||
|
||||
* Enhancement #2124: Ability to dump folders to tar via stdout
|
||||
|
||||
We've added the ability to dump whole folders to stdout via the `dump` command. Restic now
|
||||
requires at least Go 1.10 due to a limitation of the standard library for Go <= 1.9.
|
||||
|
||||
https://github.com/restic/restic/issues/2123
|
||||
https://github.com/restic/restic/pull/2124
|
||||
|
||||
* Enhancement #2139: Return error if no bytes could be read for `backup --stdin`
|
||||
|
||||
When restic is used to backup the output of a program, like `mysqldump | restic backup --stdin`,
|
||||
it now returns an error if no bytes could be read at all. This catches the failure case when
|
||||
`mysqldump` failed for some reason and did not output any data to stdout.
|
||||
|
||||
https://github.com/restic/restic/pull/2139
|
||||
|
||||
* Enhancement #2205: Add --ignore-inode option to backup cmd
|
||||
|
||||
This option handles backup of virtual filesystems that do not keep fixed inodes for files, like
|
||||
Fuse-based, pCloud, etc. Ignoring inode changes allows to consider the file as unchanged if
|
||||
last modification date and size are unchanged.
|
||||
|
||||
https://github.com/restic/restic/issues/1631
|
||||
https://github.com/restic/restic/pull/2205
|
||||
https://github.com/restic/restic/pull/2047
|
||||
|
||||
* Enhancement #2220: Add config option to set S3 storage class
|
||||
|
||||
The `s3.storage-class` option can be passed to restic (using `-o`) to specify the storage
|
||||
class to be used for S3 objects created by restic.
|
||||
|
||||
The storage class is passed as-is to S3, so it needs to be understood by the API. On AWS, it can be
|
||||
one of `STANDARD`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING` and
|
||||
`REDUCED_REDUNDANCY`. If unspecified, the default storage class is used (`STANDARD` on
|
||||
AWS).
|
||||
|
||||
You can mix storage classes in the same bucket, and the setting isn't stored in the restic
|
||||
repository, so be sure to specify it with each command that writes to S3.
|
||||
|
||||
https://github.com/restic/restic/issues/706
|
||||
https://github.com/restic/restic/pull/2220
|
||||
|
||||
|
||||
Changelog for restic 0.9.4 (2019-01-06)
|
||||
=======================================
|
||||
|
||||
|
||||
@@ -133,8 +133,7 @@ down to the following steps:
|
||||
|
||||
2. Clone the repository locally and create a new branch. If you are working on
|
||||
the code itself, please set up the development environment as described in
|
||||
the previous section. Especially take care to place your forked repository
|
||||
at the correct path (`src/github.com/restic/restic`) within your `GOPATH`.
|
||||
the previous section.
|
||||
|
||||
3. Then commit your changes as fine grained as possible, as smaller patches,
|
||||
that handle one and only one issue are easier to discuss and merge.
|
||||
@@ -150,11 +149,14 @@ down to the following steps:
|
||||
existing commit, use common sense to decide which is better), they will be
|
||||
automatically added to the pull request.
|
||||
|
||||
7. If your pull request changes anything that users should be aware of (a
|
||||
bugfix, a new feature, ...) please add an entry to the file
|
||||
['CHANGELOG.md'](CHANGELOG.md). It will be used in the announcement of the
|
||||
next stable release. While writing, ask yourself: If I were the user, what
|
||||
would I need to be aware of with this change.
|
||||
7. If your pull request changes anything that users should be aware
|
||||
of (a bugfix, a new feature, ...) please add an entry as a new
|
||||
file in `changelog/unreleased` including the issue number in the
|
||||
filename (e.g. `issue-8756`). Use the template in
|
||||
`changelog/TEMPLATE` for the content. It will be used in the
|
||||
announcement of the next stable release. While writing, ask
|
||||
yourself: If I were the user, what would I need to be aware of
|
||||
with this change.
|
||||
|
||||
8. Once your code looks good and passes all the tests, we'll merge it. Thanks
|
||||
a lot for your contribution!
|
||||
|
||||
@@ -129,8 +129,6 @@ Storage are sponsored by `AppsCode <https://appscode.com>`__!
|
||||
:target: https://goreportcard.com/report/github.com/restic/restic
|
||||
.. |Say Thanks| image:: https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg
|
||||
:target: https://saythanks.io/to/restic
|
||||
.. |TestCoverage| image:: https://codecov.io/gh/restic/restic/branch/master/graph/badge.svg
|
||||
:target: https://codecov.io/gh/restic/restic
|
||||
.. |AppsCode| image:: https://cdn.appscode.com/images/logo/appscode/ac-logo-color.png
|
||||
:target: https://appscode.com
|
||||
.. |Reviewed by Hound| image:: https://img.shields.io/badge/Reviewed_by-Hound-8E64B0.svg
|
||||
|
||||
@@ -20,8 +20,8 @@ init:
|
||||
|
||||
install:
|
||||
- rmdir c:\go /s /q
|
||||
- appveyor DownloadFile https://dl.google.com/go/go1.11.windows-amd64.msi
|
||||
- msiexec /i go1.11.windows-amd64.msi /q
|
||||
- appveyor DownloadFile https://dl.google.com/go/go1.13.4.windows-amd64.msi
|
||||
- msiexec /i go1.13.4.windows-amd64.msi /q
|
||||
- go version
|
||||
- go env
|
||||
- appveyor DownloadFile http://sourceforge.netcologne.de/project/gnuwin32/tar/1.13-1/tar-1.13-1-bin.zip -FileName tar.zip
|
||||
|
||||
15
build.go
15
build.go
@@ -60,12 +60,12 @@ import (
|
||||
|
||||
// config contains the configuration for the program to build.
|
||||
var config = Config{
|
||||
Name: "restic", // name of the program executable and directory
|
||||
Namespace: "github.com/restic/restic", // subdir of GOPATH, e.g. "github.com/foo/bar"
|
||||
Main: "./cmd/restic", // package name for the main package
|
||||
DefaultBuildTags: []string{"selfupdate"}, // specify build tags which are always used
|
||||
Tests: []string{"./..."}, // tests to run
|
||||
MinVersion: GoVersion{Major: 1, Minor: 9, Patch: 0}, // minimum Go version supported
|
||||
Name: "restic", // name of the program executable and directory
|
||||
Namespace: "github.com/restic/restic", // subdir of GOPATH, e.g. "github.com/foo/bar"
|
||||
Main: "./cmd/restic", // package name for the main package
|
||||
DefaultBuildTags: []string{"selfupdate"}, // specify build tags which are always used
|
||||
Tests: []string{"./..."}, // tests to run
|
||||
MinVersion: GoVersion{Major: 1, Minor: 10, Patch: 0}, // minimum Go version supported
|
||||
}
|
||||
|
||||
// Config configures the build.
|
||||
@@ -575,6 +575,9 @@ func main() {
|
||||
|
||||
buildArgs = append(buildArgs, "-mod=vendor")
|
||||
testArgs = append(testArgs, "-mod=vendor")
|
||||
|
||||
goEnv["GO111MODULE"] = "on"
|
||||
buildEnv["GO111MODULE"] = "on"
|
||||
} else {
|
||||
if tempdir == "" {
|
||||
tempdir, err = ioutil.TempDir("", fmt.Sprintf("%v-build-", config.Name))
|
||||
|
||||
7
changelog/0.9.5_2019-04-23/issue-1895
Normal file
7
changelog/0.9.5_2019-04-23/issue-1895
Normal file
@@ -0,0 +1,7 @@
|
||||
Enhancement: Add case insensitive include & exclude options
|
||||
|
||||
The backup and restore commands now have --iexclude and --iinclude flags
|
||||
as case insensitive variants of --exclude and --include.
|
||||
|
||||
https://github.com/restic/restic/issues/1895
|
||||
https://github.com/restic/restic/pull/2032
|
||||
8
changelog/0.9.5_2019-04-23/issue-1937
Normal file
8
changelog/0.9.5_2019-04-23/issue-1937
Normal file
@@ -0,0 +1,8 @@
|
||||
Enhancement: Support streaming JSON output for backup
|
||||
|
||||
We've added support for getting machine-readable status output during backup,
|
||||
just pass the flag `--json` for `restic backup` and restic will output a stream
|
||||
of JSON objects which contain the current progress.
|
||||
|
||||
https://github.com/restic/restic/issues/1937
|
||||
https://github.com/restic/restic/pull/1944
|
||||
10
changelog/0.9.5_2019-04-23/issue-2135
Normal file
10
changelog/0.9.5_2019-04-23/issue-2135
Normal file
@@ -0,0 +1,10 @@
|
||||
Bugfix: Return error when no bytes could be read from stdin
|
||||
|
||||
We assume that users reading backup data from stdin want to know when no data
|
||||
could be read, so now restic returns an error when `backup --stdin` is called
|
||||
but no bytes could be read. Usually, this means that an earlier command in a
|
||||
pipe has failed. The documentation was amended and now recommends setting the
|
||||
`pipefail` option (`set -o pipefail`).
|
||||
|
||||
https://github.com/restic/restic/pull/2135
|
||||
https://github.com/restic/restic/pull/2139
|
||||
8
changelog/0.9.5_2019-04-23/issue-2155
Normal file
8
changelog/0.9.5_2019-04-23/issue-2155
Normal file
@@ -0,0 +1,8 @@
|
||||
Enhancement: add Openstack application credential auth for Swift
|
||||
|
||||
Since Openstack Queens Identity (auth V3) service supports an application
|
||||
credential auth method. It allows to create a technical account with the
|
||||
limited roles. This commit adds an application credential authentication
|
||||
method for the Swift backend.
|
||||
|
||||
https://github.com/restic/restic/issues/2155
|
||||
15
changelog/0.9.5_2019-04-23/issue-2179
Normal file
15
changelog/0.9.5_2019-04-23/issue-2179
Normal file
@@ -0,0 +1,15 @@
|
||||
Enhancement: Use ctime when checking for file changes
|
||||
|
||||
Previously, restic only checked a file's mtime (along with other non-timestamp
|
||||
metadata) to decide if a file has changed. This could cause restic to not notice
|
||||
that a file has changed (and therefore continue to store the old version, as
|
||||
opposed to the modified version) if something edits the file and then resets the
|
||||
timestamp. Restic now also checks the ctime of files, so any modifications to a
|
||||
file should be noticed, and the modified file will be backed up. The ctime check
|
||||
will be disabled if the --ignore-inode flag was given.
|
||||
|
||||
If this change causes problems for you, please open an issue, and we can look in
|
||||
to adding a seperate flag to disable just the ctime check.
|
||||
|
||||
https://github.com/restic/restic/issues/2179
|
||||
https://github.com/restic/restic/pull/2212
|
||||
3
changelog/0.9.5_2019-04-23/issue-2181
Normal file
3
changelog/0.9.5_2019-04-23/issue-2181
Normal file
@@ -0,0 +1,3 @@
|
||||
Bugfix: Don't cancel timeout after 30 seconds for self-update
|
||||
|
||||
https://github.com/restic/restic/issues/2181
|
||||
8
changelog/0.9.5_2019-04-23/issue-2184
Normal file
8
changelog/0.9.5_2019-04-23/issue-2184
Normal file
@@ -0,0 +1,8 @@
|
||||
Enhancement: Add --json support to forget command
|
||||
|
||||
The forget command now supports the --json argument, outputting the
|
||||
information about what is (or would-be) kept and removed from the
|
||||
repository.
|
||||
|
||||
https://github.com/restic/restic/issues/2184
|
||||
https://github.com/restic/restic/pull/2185
|
||||
6
changelog/0.9.5_2019-04-23/issue-2203
Normal file
6
changelog/0.9.5_2019-04-23/issue-2203
Normal file
@@ -0,0 +1,6 @@
|
||||
Bugfix: Fix reading passwords from stdin
|
||||
|
||||
Passwords for the `init`, `key add`, and `key passwd` commands can now be read from
|
||||
non-terminal stdin.
|
||||
|
||||
https://github.com/restic/restic/issues/2203
|
||||
9
changelog/0.9.5_2019-04-23/issue-2224
Normal file
9
changelog/0.9.5_2019-04-23/issue-2224
Normal file
@@ -0,0 +1,9 @@
|
||||
Bugfix: Don't abort the find command when a tree can't be loaded
|
||||
|
||||
Change the find command so that missing trees don't result in a crash.
|
||||
Instead, the error is logged to the debug log, and the tree ID is displayed
|
||||
along with the snapshot it belongs to. This makes it possible to recover
|
||||
repositories that are missing trees by forgetting the snapshots they are used
|
||||
in.
|
||||
|
||||
https://github.com/restic/restic/issues/2224
|
||||
10
changelog/0.9.5_2019-04-23/pull-2087
Normal file
10
changelog/0.9.5_2019-04-23/pull-2087
Normal file
@@ -0,0 +1,10 @@
|
||||
Enhancement: Add group-by option to snapshots command
|
||||
|
||||
We have added an option to group the output of the snapshots command, similar
|
||||
to the output of the forget command. The option has been called "--group-by"
|
||||
and accepts any combination of the values "host", "paths" and "tags", separated
|
||||
by commas. Default behavior (not specifying --group-by) has not been changed.
|
||||
We have added support of the grouping to the JSON output.
|
||||
|
||||
https://github.com/restic/restic/issues/2037
|
||||
https://github.com/restic/restic/pull/2087
|
||||
8
changelog/0.9.5_2019-04-23/pull-2124
Normal file
8
changelog/0.9.5_2019-04-23/pull-2124
Normal file
@@ -0,0 +1,8 @@
|
||||
Enhancement: Ability to dump folders to tar via stdout
|
||||
|
||||
We've added the ability to dump whole folders to stdout via the `dump` command.
|
||||
Restic now requires at least Go 1.10 due to a limitation of the standard
|
||||
library for Go <= 1.9.
|
||||
|
||||
https://github.com/restic/restic/pull/2124
|
||||
https://github.com/restic/restic/issues/2123
|
||||
8
changelog/0.9.5_2019-04-23/pull-2139
Normal file
8
changelog/0.9.5_2019-04-23/pull-2139
Normal file
@@ -0,0 +1,8 @@
|
||||
Enhancement: Return error if no bytes could be read for `backup --stdin`
|
||||
|
||||
When restic is used to backup the output of a program, like `mysqldump | restic
|
||||
backup --stdin`, it now returns an error if no bytes could be read at all. This
|
||||
catches the failure case when `mysqldump` failed for some reason and did not
|
||||
output any data to stdout.
|
||||
|
||||
https://github.com/restic/restic/pull/2139
|
||||
10
changelog/0.9.5_2019-04-23/pull-2205
Normal file
10
changelog/0.9.5_2019-04-23/pull-2205
Normal file
@@ -0,0 +1,10 @@
|
||||
Enhancement: Add --ignore-inode option to backup cmd
|
||||
|
||||
This option handles backup of virtual filesystems that do not keep fixed
|
||||
inodes for files, like Fuse-based, pCloud, etc. Ignoring inode changes allows
|
||||
to consider the file as unchanged if last modification date and size
|
||||
are unchanged.
|
||||
|
||||
https://github.com/restic/restic/pull/2205
|
||||
https://github.com/restic/restic/pull/2047
|
||||
https://github.com/restic/restic/issues/1631
|
||||
16
changelog/0.9.5_2019-04-23/pull-2220
Normal file
16
changelog/0.9.5_2019-04-23/pull-2220
Normal file
@@ -0,0 +1,16 @@
|
||||
Enhancement: Add config option to set S3 storage class
|
||||
|
||||
The `s3.storage-class` option can be passed to restic (using `-o`) to
|
||||
specify the storage class to be used for S3 objects created by restic.
|
||||
|
||||
The storage class is passed as-is to S3, so it needs to be understood by
|
||||
the API. On AWS, it can be one of `STANDARD`, `STANDARD_IA`,
|
||||
`ONEZONE_IA`, `INTELLIGENT_TIERING` and `REDUCED_REDUNDANCY`. If
|
||||
unspecified, the default storage class is used (`STANDARD` on AWS).
|
||||
|
||||
You can mix storage classes in the same bucket, and the setting isn't
|
||||
stored in the restic repository, so be sure to specify it with each
|
||||
command that writes to S3.
|
||||
|
||||
https://github.com/restic/restic/pull/2220
|
||||
https://github.com/restic/restic/issues/706
|
||||
6
changelog/0.9.6_2019-11-22/issue-2063
Normal file
6
changelog/0.9.6_2019-11-22/issue-2063
Normal file
@@ -0,0 +1,6 @@
|
||||
Bugfix: Allow absolute path for filename when backing up from stdin
|
||||
|
||||
When backing up from stdin, handle directory path for `--stdin-filename`.
|
||||
This can be used to specify the full path for the backed-up file.
|
||||
|
||||
https://github.com/restic/restic/issues/2063
|
||||
10
changelog/0.9.6_2019-11-22/issue-2174
Normal file
10
changelog/0.9.6_2019-11-22/issue-2174
Normal file
@@ -0,0 +1,10 @@
|
||||
Bugfix: Save files with invalid timestamps
|
||||
|
||||
When restic reads invalid timestamps (year is before 0000 or after 9999) it
|
||||
refused to read and archive the file. We've changed the behavior and will now
|
||||
save modified timestamps with the year set to either 0000 or 9999, the rest of
|
||||
the timestamp stays the same, so the file will be saved (albeit with a bogus
|
||||
timestamp).
|
||||
|
||||
https://github.com/restic/restic/issues/2174
|
||||
https://github.com/restic/restic/issues/1173
|
||||
6
changelog/0.9.6_2019-11-22/issue-2249
Normal file
6
changelog/0.9.6_2019-11-22/issue-2249
Normal file
@@ -0,0 +1,6 @@
|
||||
Bugfix: Read fresh metadata for unmodified files
|
||||
|
||||
Restic took all metadata for files which were detected as unmodified, not taking into account changed metadata (ownership, mode). This is now corrected.
|
||||
|
||||
https://github.com/restic/restic/issues/2249
|
||||
https://github.com/restic/restic/pull/2252
|
||||
7
changelog/0.9.6_2019-11-22/issue-2301
Normal file
7
changelog/0.9.6_2019-11-22/issue-2301
Normal file
@@ -0,0 +1,7 @@
|
||||
Bugfix: Add upper bound for t in --read-data-subset=n/t
|
||||
|
||||
256 is the effective maximum for t, but restic would allow larger
|
||||
values, leading to strange behavior.
|
||||
|
||||
https://github.com/restic/restic/issues/2301
|
||||
https://github.com/restic/restic/pull/2304
|
||||
7
changelog/0.9.6_2019-11-22/issue-2306
Normal file
7
changelog/0.9.6_2019-11-22/issue-2306
Normal file
@@ -0,0 +1,7 @@
|
||||
Enhancement: Allow multiple retries for interactive password input
|
||||
|
||||
Restic used to quit if the repository password was typed incorrectly once.
|
||||
Restic will now ask the user again for the repository password if typed incorrectly.
|
||||
The user will now get three tries to input the correct password before restic quits.
|
||||
|
||||
https://github.com/restic/restic/issues/2306
|
||||
6
changelog/0.9.6_2019-11-22/issue-2330
Normal file
6
changelog/0.9.6_2019-11-22/issue-2330
Normal file
@@ -0,0 +1,6 @@
|
||||
Enhancement: Make `--group-by` accept both singular and plural
|
||||
|
||||
One can now use the values `host`/`hosts`, `path`/`paths` and
|
||||
`tag` / `tags` interchangeably in the `--group-by` argument.
|
||||
|
||||
https://github.com/restic/restic/issues/2330
|
||||
8
changelog/0.9.6_2019-11-22/pull-2321
Normal file
8
changelog/0.9.6_2019-11-22/pull-2321
Normal file
@@ -0,0 +1,8 @@
|
||||
Bugfix: Check errors when loading index files
|
||||
|
||||
Restic now checks and handles errors which occur when loading index files, the
|
||||
missing check leads to odd errors (and a stack trace printed to users) later.
|
||||
This was reported in the forum.
|
||||
|
||||
https://github.com/restic/restic/pull/2321
|
||||
https://forum.restic.net/t/check-rebuild-index-prune/1848/13
|
||||
8
changelog/0.9.6_2019-11-22/pull-2350
Normal file
8
changelog/0.9.6_2019-11-22/pull-2350
Normal file
@@ -0,0 +1,8 @@
|
||||
Enhancement: Add option to configure S3 region
|
||||
|
||||
We've added a new option for setting the region when accessing an S3-compatible
|
||||
service. For some providers, it is required to set this to a valid value. You
|
||||
can do that either by setting the environment variable `AWS_DEFAULT_REGION` or
|
||||
using the option `s3.region`, e.g. like this: `-o s3.region="us-east-1"`.
|
||||
|
||||
https://github.com/restic/restic/pull/2350
|
||||
131
cmd/restic/acl.go
Normal file
131
cmd/restic/acl.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package main
|
||||
|
||||
// Adapted from https://github.com/maxymania/go-system/blob/master/posix_acl/posix_acl.go
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
aclUserOwner = 0x0001
|
||||
aclUser = 0x0002
|
||||
aclGroupOwner = 0x0004
|
||||
aclGroup = 0x0008
|
||||
aclMask = 0x0010
|
||||
aclOthers = 0x0020
|
||||
)
|
||||
|
||||
type aclSID uint64
|
||||
|
||||
type aclElem struct {
|
||||
Tag uint16
|
||||
Perm uint16
|
||||
ID uint32
|
||||
}
|
||||
|
||||
type acl struct {
|
||||
Version uint32
|
||||
List []aclElement
|
||||
}
|
||||
|
||||
type aclElement struct {
|
||||
aclSID
|
||||
Perm uint16
|
||||
}
|
||||
|
||||
func (a *aclSID) setUID(uid uint32) {
|
||||
*a = aclSID(uid) | (aclUser << 32)
|
||||
}
|
||||
func (a *aclSID) setGID(gid uint32) {
|
||||
*a = aclSID(gid) | (aclGroup << 32)
|
||||
}
|
||||
|
||||
func (a *aclSID) setType(tp int) {
|
||||
*a = aclSID(tp) << 32
|
||||
}
|
||||
|
||||
func (a aclSID) getType() int {
|
||||
return int(a >> 32)
|
||||
}
|
||||
func (a aclSID) getID() uint32 {
|
||||
return uint32(a & 0xffffffff)
|
||||
}
|
||||
func (a aclSID) String() string {
|
||||
switch a >> 32 {
|
||||
case aclUserOwner:
|
||||
return "user::"
|
||||
case aclUser:
|
||||
return fmt.Sprintf("user:%v:", a.getID())
|
||||
case aclGroupOwner:
|
||||
return "group::"
|
||||
case aclGroup:
|
||||
return fmt.Sprintf("group:%v:", a.getID())
|
||||
case aclMask:
|
||||
return "mask::"
|
||||
case aclOthers:
|
||||
return "other::"
|
||||
}
|
||||
return "?:"
|
||||
}
|
||||
|
||||
func (a aclElement) String() string {
|
||||
str := ""
|
||||
if (a.Perm & 4) != 0 {
|
||||
str += "r"
|
||||
} else {
|
||||
str += "-"
|
||||
}
|
||||
if (a.Perm & 2) != 0 {
|
||||
str += "w"
|
||||
} else {
|
||||
str += "-"
|
||||
}
|
||||
if (a.Perm & 1) != 0 {
|
||||
str += "x"
|
||||
} else {
|
||||
str += "-"
|
||||
}
|
||||
return fmt.Sprintf("%v%v", a.aclSID, str)
|
||||
}
|
||||
|
||||
func (a *acl) decode(xattr []byte) {
|
||||
var elem aclElement
|
||||
ae := new(aclElem)
|
||||
nr := bytes.NewReader(xattr)
|
||||
e := binary.Read(nr, binary.LittleEndian, &a.Version)
|
||||
if e != nil {
|
||||
a.Version = 0
|
||||
return
|
||||
}
|
||||
if len(a.List) > 0 {
|
||||
a.List = a.List[:0]
|
||||
}
|
||||
for binary.Read(nr, binary.LittleEndian, ae) == nil {
|
||||
elem.aclSID = (aclSID(ae.Tag) << 32) | aclSID(ae.ID)
|
||||
elem.Perm = ae.Perm
|
||||
a.List = append(a.List, elem)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *acl) encode() []byte {
|
||||
buf := new(bytes.Buffer)
|
||||
ae := new(aclElem)
|
||||
binary.Write(buf, binary.LittleEndian, &a.Version)
|
||||
for _, elem := range a.List {
|
||||
ae.Tag = uint16(elem.getType())
|
||||
ae.Perm = elem.Perm
|
||||
ae.ID = elem.getID()
|
||||
binary.Write(buf, binary.LittleEndian, ae)
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (a *acl) String() string {
|
||||
var finalacl string
|
||||
for _, acl := range a.List {
|
||||
finalacl += acl.String() + "\n"
|
||||
}
|
||||
return finalacl
|
||||
}
|
||||
96
cmd/restic/acl_test.go
Normal file
96
cmd/restic/acl_test.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_acl_decode(t *testing.T) {
|
||||
type args struct {
|
||||
xattr []byte
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "decode string",
|
||||
args: args{
|
||||
xattr: []byte{2, 0, 0, 0, 1, 0, 6, 0, 255, 255, 255, 255, 2, 0, 7, 0, 0, 0, 0, 0, 2, 0, 7, 0, 254, 255, 0, 0, 4, 0, 7, 0, 255, 255, 255, 255, 16, 0, 7, 0, 255, 255, 255, 255, 32, 0, 4, 0, 255, 255, 255, 255},
|
||||
},
|
||||
want: "user::rw-\nuser:0:rwx\nuser:65534:rwx\ngroup::rwx\nmask::rwx\nother::r--\n",
|
||||
},
|
||||
{
|
||||
name: "decode fail",
|
||||
args: args{
|
||||
xattr: []byte("abctest"),
|
||||
},
|
||||
want: "",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
a := &acl{}
|
||||
a.decode(tt.args.xattr)
|
||||
if tt.want != a.String() {
|
||||
t.Errorf("acl.decode() = %v, want: %v", a.String(), tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_acl_encode(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
want []byte
|
||||
args []aclElement
|
||||
}{
|
||||
{
|
||||
name: "encode values",
|
||||
want: []byte{2, 0, 0, 0, 1, 0, 6, 0, 255, 255, 255, 255, 2, 0, 7, 0, 0, 0, 0, 0, 2, 0, 7, 0, 254, 255, 0, 0, 4, 0, 7, 0, 255, 255, 255, 255, 16, 0, 7, 0, 255, 255, 255, 255, 32, 0, 4, 0, 255, 255, 255, 255},
|
||||
args: []aclElement{
|
||||
{
|
||||
aclSID: 8589934591,
|
||||
Perm: 6,
|
||||
},
|
||||
{
|
||||
aclSID: 8589934592,
|
||||
Perm: 7,
|
||||
},
|
||||
{
|
||||
aclSID: 8590000126,
|
||||
Perm: 7,
|
||||
},
|
||||
{
|
||||
aclSID: 21474836479,
|
||||
Perm: 7,
|
||||
},
|
||||
{
|
||||
aclSID: 73014444031,
|
||||
Perm: 7,
|
||||
},
|
||||
{
|
||||
aclSID: 141733920767,
|
||||
Perm: 4,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "encode fail",
|
||||
want: []byte{2, 0, 0, 0},
|
||||
args: []aclElement{},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
a := &acl{
|
||||
Version: 2,
|
||||
List: tt.args,
|
||||
}
|
||||
if got := a.encode(); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("acl.encode() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -5,8 +5,10 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -23,6 +25,7 @@ import (
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/restic/restic/internal/textfile"
|
||||
"github.com/restic/restic/internal/ui"
|
||||
"github.com/restic/restic/internal/ui/jsonstatus"
|
||||
"github.com/restic/restic/internal/ui/termstatus"
|
||||
)
|
||||
|
||||
@@ -68,20 +71,22 @@ given as the arguments.
|
||||
|
||||
// BackupOptions bundles all options for the backup command.
|
||||
type BackupOptions struct {
|
||||
Parent string
|
||||
Force bool
|
||||
Excludes []string
|
||||
ExcludeFiles []string
|
||||
ExcludeOtherFS bool
|
||||
ExcludeIfPresent []string
|
||||
ExcludeCaches bool
|
||||
Stdin bool
|
||||
StdinFilename string
|
||||
Tags []string
|
||||
Host string
|
||||
FilesFrom []string
|
||||
TimeStamp string
|
||||
WithAtime bool
|
||||
Parent string
|
||||
Force bool
|
||||
Excludes []string
|
||||
InsensitiveExcludes []string
|
||||
ExcludeFiles []string
|
||||
ExcludeOtherFS bool
|
||||
ExcludeIfPresent []string
|
||||
ExcludeCaches bool
|
||||
Stdin bool
|
||||
StdinFilename string
|
||||
Tags []string
|
||||
Host string
|
||||
FilesFrom []string
|
||||
TimeStamp string
|
||||
WithAtime bool
|
||||
IgnoreInode bool
|
||||
}
|
||||
|
||||
var backupOptions BackupOptions
|
||||
@@ -93,10 +98,11 @@ func init() {
|
||||
f.StringVar(&backupOptions.Parent, "parent", "", "use this parent snapshot (default: last snapshot in the repo that has the same target files/directories)")
|
||||
f.BoolVarP(&backupOptions.Force, "force", "f", false, `force re-reading the target files/directories (overrides the "parent" flag)`)
|
||||
f.StringArrayVarP(&backupOptions.Excludes, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)")
|
||||
f.StringArrayVar(&backupOptions.InsensitiveExcludes, "iexclude", nil, "same as `--exclude` but ignores the casing of filenames")
|
||||
f.StringArrayVar(&backupOptions.ExcludeFiles, "exclude-file", nil, "read exclude patterns from a `file` (can be specified multiple times)")
|
||||
f.BoolVarP(&backupOptions.ExcludeOtherFS, "one-file-system", "x", false, "exclude other file systems")
|
||||
f.StringArrayVar(&backupOptions.ExcludeIfPresent, "exclude-if-present", nil, "takes filename[:header], exclude contents of directories containing filename (except filename itself) if header of that file is as provided (can be specified multiple times)")
|
||||
f.BoolVar(&backupOptions.ExcludeCaches, "exclude-caches", false, `excludes cache directories that are marked with a CACHEDIR.TAG file`)
|
||||
f.BoolVar(&backupOptions.ExcludeCaches, "exclude-caches", false, `excludes cache directories that are marked with a CACHEDIR.TAG file. See http://bford.info/cachedir/spec.html for the Cache Directory Tagging Standard`)
|
||||
f.BoolVar(&backupOptions.Stdin, "stdin", false, "read backup from stdin")
|
||||
f.StringVar(&backupOptions.StdinFilename, "stdin-filename", "stdin", "file name to use when reading from stdin")
|
||||
f.StringArrayVar(&backupOptions.Tags, "tag", nil, "add a `tag` for the new snapshot (can be specified multiple times)")
|
||||
@@ -108,6 +114,7 @@ func init() {
|
||||
f.StringArrayVar(&backupOptions.FilesFrom, "files-from", nil, "read the files to backup from file (can be combined with file args/can be specified multiple times)")
|
||||
f.StringVar(&backupOptions.TimeStamp, "time", "", "time of the backup (ex. '2012-11-01 22:08:41') (default: now)")
|
||||
f.BoolVar(&backupOptions.WithAtime, "with-atime", false, "store the atime for all files and directories")
|
||||
f.BoolVar(&backupOptions.IgnoreInode, "ignore-inode", false, "ignore inode number changes when checking for modified files")
|
||||
}
|
||||
|
||||
// filterExisting returns a slice of all existing items, or an error if no
|
||||
@@ -222,6 +229,10 @@ func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository, t
|
||||
opts.Excludes = append(opts.Excludes, excludes...)
|
||||
}
|
||||
|
||||
if len(opts.InsensitiveExcludes) > 0 {
|
||||
fs = append(fs, rejectByInsensitivePattern(opts.InsensitiveExcludes))
|
||||
}
|
||||
|
||||
if len(opts.Excludes) > 0 {
|
||||
fs = append(fs, rejectByPattern(opts.Excludes))
|
||||
}
|
||||
@@ -395,13 +406,43 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
|
||||
|
||||
var t tomb.Tomb
|
||||
|
||||
term.Print("open repository\n")
|
||||
if gopts.verbosity >= 2 && !gopts.JSON {
|
||||
term.Print("open repository\n")
|
||||
}
|
||||
|
||||
repo, err := OpenRepository(gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p := ui.NewBackup(term, gopts.verbosity)
|
||||
type ArchiveProgressReporter interface {
|
||||
CompleteItem(item string, previous, current *restic.Node, s archiver.ItemStats, d time.Duration)
|
||||
StartFile(filename string)
|
||||
CompleteBlob(filename string, bytes uint64)
|
||||
ScannerError(item string, fi os.FileInfo, err error) error
|
||||
ReportTotal(item string, s archiver.ScanStats)
|
||||
SetMinUpdatePause(d time.Duration)
|
||||
Run(ctx context.Context) error
|
||||
Error(item string, fi os.FileInfo, err error) error
|
||||
Finish(snapshotID restic.ID)
|
||||
|
||||
// ui.StdioWrapper
|
||||
Stdout() io.WriteCloser
|
||||
Stderr() io.WriteCloser
|
||||
|
||||
// ui.Message
|
||||
E(msg string, args ...interface{})
|
||||
P(msg string, args ...interface{})
|
||||
V(msg string, args ...interface{})
|
||||
VV(msg string, args ...interface{})
|
||||
}
|
||||
|
||||
var p ArchiveProgressReporter
|
||||
if gopts.JSON {
|
||||
p = jsonstatus.NewBackup(term, gopts.verbosity)
|
||||
} else {
|
||||
p = ui.NewBackup(term, gopts.verbosity)
|
||||
}
|
||||
|
||||
// use the terminal for stdout/stderr
|
||||
prevStdout, prevStderr := gopts.stdout, gopts.stderr
|
||||
@@ -416,13 +457,15 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
|
||||
if fps > 60 {
|
||||
fps = 60
|
||||
}
|
||||
p.MinUpdatePause = time.Second / time.Duration(fps)
|
||||
p.SetMinUpdatePause(time.Second / time.Duration(fps))
|
||||
}
|
||||
}
|
||||
|
||||
t.Go(func() error { return p.Run(t.Context(gopts.ctx)) })
|
||||
|
||||
p.V("lock repository")
|
||||
if !gopts.JSON {
|
||||
p.V("lock repository")
|
||||
}
|
||||
lock, err := lockRepo(repo)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
@@ -441,7 +484,9 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
|
||||
return err
|
||||
}
|
||||
|
||||
p.V("load index files")
|
||||
if !gopts.JSON {
|
||||
p.V("load index files")
|
||||
}
|
||||
err = repo.LoadIndex(gopts.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -452,7 +497,7 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
|
||||
return err
|
||||
}
|
||||
|
||||
if parentSnapshotID != nil {
|
||||
if !gopts.JSON && parentSnapshotID != nil {
|
||||
p.V("using parent snapshot %v\n", parentSnapshotID.Str())
|
||||
}
|
||||
|
||||
@@ -476,14 +521,17 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
|
||||
|
||||
var targetFS fs.FS = fs.Local{}
|
||||
if opts.Stdin {
|
||||
p.V("read data from stdin")
|
||||
if !gopts.JSON {
|
||||
p.V("read data from stdin")
|
||||
}
|
||||
filename := path.Join("/", opts.StdinFilename)
|
||||
targetFS = &fs.Reader{
|
||||
ModTime: timeStamp,
|
||||
Name: opts.StdinFilename,
|
||||
Name: filename,
|
||||
Mode: 0644,
|
||||
ReadCloser: os.Stdin,
|
||||
}
|
||||
targets = []string{opts.StdinFilename}
|
||||
targets = []string{filename}
|
||||
}
|
||||
|
||||
sc := archiver.NewScanner(targetFS)
|
||||
@@ -492,7 +540,9 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
|
||||
sc.Error = p.ScannerError
|
||||
sc.Result = p.ReportTotal
|
||||
|
||||
p.V("start scan on %v", targets)
|
||||
if !gopts.JSON {
|
||||
p.V("start scan on %v", targets)
|
||||
}
|
||||
t.Go(func() error { return sc.Scan(t.Context(gopts.ctx), targets) })
|
||||
|
||||
arch := archiver.New(repo, targetFS, archiver.Options{})
|
||||
@@ -500,9 +550,10 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
|
||||
arch.Select = selectFilter
|
||||
arch.WithAtime = opts.WithAtime
|
||||
arch.Error = p.Error
|
||||
arch.CompleteItem = p.CompleteItemFn
|
||||
arch.CompleteItem = p.CompleteItem
|
||||
arch.StartFile = p.StartFile
|
||||
arch.CompleteBlob = p.CompleteBlob
|
||||
arch.IgnoreInode = opts.IgnoreInode
|
||||
|
||||
if parentSnapshotID == nil {
|
||||
parentSnapshotID = &restic.ID{}
|
||||
@@ -519,10 +570,14 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
|
||||
uploader := archiver.IndexUploader{
|
||||
Repository: repo,
|
||||
Start: func() {
|
||||
p.VV("uploading intermediate index")
|
||||
if !gopts.JSON {
|
||||
p.VV("uploading intermediate index")
|
||||
}
|
||||
},
|
||||
Complete: func(id restic.ID) {
|
||||
p.V("uploaded intermediate index %v", id.Str())
|
||||
if !gopts.JSON {
|
||||
p.V("uploaded intermediate index %v", id.Str())
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
@@ -530,14 +585,18 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Termina
|
||||
return uploader.Upload(gopts.ctx, t.Context(gopts.ctx), 30*time.Second)
|
||||
})
|
||||
|
||||
p.V("start backup on %v", targets)
|
||||
if !gopts.JSON {
|
||||
p.V("start backup on %v", targets)
|
||||
}
|
||||
_, id, err := arch.Snapshot(gopts.ctx, targets, snapshotOpts)
|
||||
if err != nil {
|
||||
return errors.Fatalf("unable to save snapshot: %v", err)
|
||||
}
|
||||
|
||||
p.Finish()
|
||||
p.P("snapshot %s saved\n", id.Str())
|
||||
p.Finish(id)
|
||||
if !gopts.JSON {
|
||||
p.P("snapshot %s saved\n", id.Str())
|
||||
}
|
||||
|
||||
// cleanly shutdown all running goroutines
|
||||
t.Kill(nil)
|
||||
|
||||
@@ -74,7 +74,7 @@ func runCat(gopts GlobalOptions, args []string) error {
|
||||
fmt.Println(string(buf))
|
||||
return nil
|
||||
case "index":
|
||||
buf, err := repo.LoadAndDecrypt(gopts.ctx, restic.IndexFile, id)
|
||||
buf, err := repo.LoadAndDecrypt(gopts.ctx, nil, restic.IndexFile, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -99,7 +99,7 @@ func runCat(gopts GlobalOptions, args []string) error {
|
||||
return nil
|
||||
case "key":
|
||||
h := restic.Handle{Type: restic.KeyFile, Name: id.String()}
|
||||
buf, err := backend.LoadAll(gopts.ctx, repo.Backend(), h)
|
||||
buf, err := backend.LoadAll(gopts.ctx, nil, repo.Backend(), h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -150,7 +150,7 @@ func runCat(gopts GlobalOptions, args []string) error {
|
||||
switch tpe {
|
||||
case "pack":
|
||||
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
|
||||
buf, err := backend.LoadAll(gopts.ctx, repo.Backend(), h)
|
||||
buf, err := backend.LoadAll(gopts.ctx, nil, repo.Backend(), h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -67,11 +67,17 @@ func checkFlags(opts CheckOptions) error {
|
||||
if dataSubset[0] == 0 || dataSubset[1] == 0 || dataSubset[0] > dataSubset[1] {
|
||||
return errors.Fatalf("check flag --read-data-subset=n/t values must be positive integers, and n <= t, e.g. --read-data-subset=1/2")
|
||||
}
|
||||
if dataSubset[1] > totalBucketsMax {
|
||||
return errors.Fatalf("check flag --read-data-subset=n/t t must be at most %d", totalBucketsMax)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// See doReadData in runCheck below for why this is 256.
|
||||
const totalBucketsMax = 256
|
||||
|
||||
// stringToIntSlice converts string to []uint, using '/' as element separator
|
||||
func stringToIntSlice(param string) (split []uint, err error) {
|
||||
if param == "" {
|
||||
@@ -257,6 +263,8 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
|
||||
doReadData := func(bucket, totalBuckets uint) {
|
||||
packs := restic.IDSet{}
|
||||
for pack := range chkr.GetPacks() {
|
||||
// If we ever check more than the first byte
|
||||
// of pack, update totalBucketsMax.
|
||||
if (uint(pack[0]) % totalBuckets) == (bucket - 1) {
|
||||
packs.Insert(pack)
|
||||
}
|
||||
|
||||
@@ -1,15 +1,19 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/restic/restic/internal/walker"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -50,41 +54,18 @@ func init() {
|
||||
|
||||
func splitPath(p string) []string {
|
||||
d, f := path.Split(p)
|
||||
if d == "" || d == "/" {
|
||||
if d == "" {
|
||||
return []string{f}
|
||||
}
|
||||
if d == "/" {
|
||||
return []string{d}
|
||||
}
|
||||
s := splitPath(path.Clean(d))
|
||||
return append(s, f)
|
||||
}
|
||||
|
||||
func dumpNode(ctx context.Context, repo restic.Repository, node *restic.Node) error {
|
||||
var buf []byte
|
||||
for _, id := range node.Content {
|
||||
size, found := repo.LookupBlobSize(id, restic.DataBlob)
|
||||
if !found {
|
||||
return errors.Errorf("id %v not found in repository", id)
|
||||
}
|
||||
func printFromTree(ctx context.Context, tree *restic.Tree, repo restic.Repository, prefix string, pathComponents []string, pathToPrint string) error {
|
||||
|
||||
buf = buf[:cap(buf)]
|
||||
if len(buf) < restic.CiphertextLength(int(size)) {
|
||||
buf = restic.NewBlobBuffer(int(size))
|
||||
}
|
||||
|
||||
n, err := repo.LoadBlob(ctx, restic.DataBlob, id, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
_, err = os.Stdout.Write(buf)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Write")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func printFromTree(ctx context.Context, tree *restic.Tree, repo restic.Repository, prefix string, pathComponents []string) error {
|
||||
if tree == nil {
|
||||
return fmt.Errorf("called with a nil tree")
|
||||
}
|
||||
@@ -97,18 +78,21 @@ func printFromTree(ctx context.Context, tree *restic.Tree, repo restic.Repositor
|
||||
}
|
||||
item := filepath.Join(prefix, pathComponents[0])
|
||||
for _, node := range tree.Nodes {
|
||||
if node.Name == pathComponents[0] {
|
||||
if node.Name == pathComponents[0] || pathComponents[0] == "/" {
|
||||
switch {
|
||||
case l == 1 && node.Type == "file":
|
||||
return dumpNode(ctx, repo, node)
|
||||
return getNodeData(ctx, os.Stdout, repo, node)
|
||||
case l > 1 && node.Type == "dir":
|
||||
subtree, err := repo.LoadTree(ctx, *node.Subtree)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot load subtree for %q", item)
|
||||
}
|
||||
return printFromTree(ctx, subtree, repo, item, pathComponents[1:])
|
||||
return printFromTree(ctx, subtree, repo, item, pathComponents[1:], pathToPrint)
|
||||
case node.Type == "dir":
|
||||
node.Path = pathToPrint
|
||||
return tarTree(ctx, repo, node, pathToPrint)
|
||||
case l > 1:
|
||||
return fmt.Errorf("%q should be a dir, but s a %q", item, node.Type)
|
||||
return fmt.Errorf("%q should be a dir, but is a %q", item, node.Type)
|
||||
case node.Type != "file":
|
||||
return fmt.Errorf("%q should be a file, but is a %q", item, node.Type)
|
||||
}
|
||||
@@ -129,7 +113,7 @@ func runDump(opts DumpOptions, gopts GlobalOptions, args []string) error {
|
||||
|
||||
debug.Log("dump file %q from %q", pathToPrint, snapshotIDString)
|
||||
|
||||
splittedPath := splitPath(pathToPrint)
|
||||
splittedPath := splitPath(path.Clean(pathToPrint))
|
||||
|
||||
repo, err := OpenRepository(gopts)
|
||||
if err != nil {
|
||||
@@ -173,10 +157,143 @@ func runDump(opts DumpOptions, gopts GlobalOptions, args []string) error {
|
||||
Exitf(2, "loading tree for snapshot %q failed: %v", snapshotIDString, err)
|
||||
}
|
||||
|
||||
err = printFromTree(ctx, tree, repo, "", splittedPath)
|
||||
err = printFromTree(ctx, tree, repo, "", splittedPath, pathToPrint)
|
||||
if err != nil {
|
||||
Exitf(2, "cannot dump file: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getNodeData(ctx context.Context, output io.Writer, repo restic.Repository, node *restic.Node) error {
|
||||
var buf []byte
|
||||
for _, id := range node.Content {
|
||||
|
||||
size, found := repo.LookupBlobSize(id, restic.DataBlob)
|
||||
if !found {
|
||||
return errors.Errorf("id %v not found in repository", id)
|
||||
}
|
||||
|
||||
buf = buf[:cap(buf)]
|
||||
if len(buf) < restic.CiphertextLength(int(size)) {
|
||||
buf = restic.NewBlobBuffer(int(size))
|
||||
}
|
||||
|
||||
n, err := repo.LoadBlob(ctx, restic.DataBlob, id, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
_, err = output.Write(buf)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Write")
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func tarTree(ctx context.Context, repo restic.Repository, rootNode *restic.Node, rootPath string) error {
|
||||
|
||||
if stdoutIsTerminal() {
|
||||
return fmt.Errorf("stdout is the terminal, please redirect output")
|
||||
}
|
||||
|
||||
tw := tar.NewWriter(os.Stdout)
|
||||
defer tw.Close()
|
||||
|
||||
// If we want to dump "/" we'll need to add the name of the first node, too
|
||||
// as it would get lost otherwise.
|
||||
if rootNode.Path == "/" {
|
||||
rootNode.Path = path.Join(rootNode.Path, rootNode.Name)
|
||||
rootPath = rootNode.Path
|
||||
}
|
||||
|
||||
// we know that rootNode is a folder and walker.Walk will already process
|
||||
// the next node, so we have to tar this one first, too
|
||||
if err := tarNode(ctx, tw, rootNode, repo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := walker.Walk(ctx, repo, *rootNode.Subtree, nil, func(_ restic.ID, nodepath string, node *restic.Node, err error) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if node == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
node.Path = path.Join(rootPath, nodepath)
|
||||
|
||||
if node.Type == "file" || node.Type == "symlink" || node.Type == "dir" {
|
||||
err := tarNode(ctx, tw, node, repo)
|
||||
if err != err {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func tarNode(ctx context.Context, tw *tar.Writer, node *restic.Node, repo restic.Repository) error {
|
||||
|
||||
header := &tar.Header{
|
||||
Name: node.Path,
|
||||
Size: int64(node.Size),
|
||||
Mode: int64(node.Mode),
|
||||
Uid: int(node.UID),
|
||||
Gid: int(node.GID),
|
||||
ModTime: node.ModTime,
|
||||
AccessTime: node.AccessTime,
|
||||
ChangeTime: node.ChangeTime,
|
||||
PAXRecords: parseXattrs(node.ExtendedAttributes),
|
||||
}
|
||||
|
||||
if node.Type == "symlink" {
|
||||
header.Typeflag = tar.TypeSymlink
|
||||
header.Linkname = node.LinkTarget
|
||||
}
|
||||
|
||||
if node.Type == "dir" {
|
||||
header.Typeflag = tar.TypeDir
|
||||
}
|
||||
|
||||
err := tw.WriteHeader(header)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "TarHeader ")
|
||||
}
|
||||
|
||||
return getNodeData(ctx, tw, repo, node)
|
||||
|
||||
}
|
||||
|
||||
func parseXattrs(xattrs []restic.ExtendedAttribute) map[string]string {
|
||||
tmpMap := make(map[string]string)
|
||||
|
||||
for _, attr := range xattrs {
|
||||
attrString := string(attr.Value)
|
||||
|
||||
if strings.HasPrefix(attr.Name, "system.posix_acl_") {
|
||||
na := acl{}
|
||||
na.decode(attr.Value)
|
||||
|
||||
if na.String() != "" {
|
||||
if strings.Contains(attr.Name, "system.posix_acl_access") {
|
||||
tmpMap["SCHILY.acl.access"] = na.String()
|
||||
} else if strings.Contains(attr.Name, "system.posix_acl_default") {
|
||||
tmpMap["SCHILY.acl.default"] = na.String()
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
tmpMap["SCHILY.xattr."+attr.Name] = attrString
|
||||
}
|
||||
}
|
||||
|
||||
return tmpMap
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ func init() {
|
||||
f.BoolVar(&findOptions.BlobID, "blob", false, "pattern is a blob-ID")
|
||||
f.BoolVar(&findOptions.TreeID, "tree", false, "pattern is a tree-ID")
|
||||
f.BoolVar(&findOptions.PackID, "pack", false, "pattern is a pack-ID")
|
||||
f.BoolVar(&findOptions.ShowPackID, "show-pack-id", false, "display the pack-ID the blobs belong to (with --blob)")
|
||||
f.BoolVar(&findOptions.ShowPackID, "show-pack-id", false, "display the pack-ID the blobs belong to (with --blob or --tree)")
|
||||
f.BoolVarP(&findOptions.CaseInsensitive, "ignore-case", "i", false, "ignore case for pattern")
|
||||
f.BoolVarP(&findOptions.ListLong, "long", "l", false, "use a long listing format showing size and mode")
|
||||
|
||||
@@ -258,9 +258,13 @@ func (f *Finder) findInSnapshot(ctx context.Context, sn *restic.Snapshot) error
|
||||
}
|
||||
|
||||
f.out.newsn = sn
|
||||
return walker.Walk(ctx, f.repo, *sn.Tree, f.ignoreTrees, func(_ restic.ID, nodepath string, node *restic.Node, err error) (bool, error) {
|
||||
return walker.Walk(ctx, f.repo, *sn.Tree, f.ignoreTrees, func(parentTreeID restic.ID, nodepath string, node *restic.Node, err error) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
debug.Log("Error loading tree %v: %v", parentTreeID, err)
|
||||
|
||||
Printf("Unable to load tree %s\n ... which belongs to snapshot %s.\n", parentTreeID, sn.ID())
|
||||
|
||||
return false, walker.SkipNode
|
||||
}
|
||||
|
||||
if node == nil {
|
||||
@@ -340,7 +344,11 @@ func (f *Finder) findIDs(ctx context.Context, sn *restic.Snapshot) error {
|
||||
f.out.newsn = sn
|
||||
return walker.Walk(ctx, f.repo, *sn.Tree, f.ignoreTrees, func(parentTreeID restic.ID, nodepath string, node *restic.Node, err error) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
debug.Log("Error loading tree %v: %v", parentTreeID, err)
|
||||
|
||||
Printf("Unable to load tree %s\n ... which belongs to snapshot %s.\n", parentTreeID, sn.ID())
|
||||
|
||||
return false, walker.SkipNode
|
||||
}
|
||||
|
||||
if node == nil {
|
||||
@@ -442,27 +450,36 @@ func (f *Finder) packsToBlobs(ctx context.Context, packs []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Finder) findBlobsPacks(ctx context.Context) {
|
||||
func (f *Finder) findObjectPack(ctx context.Context, id string, t restic.BlobType) {
|
||||
idx := f.repo.Index()
|
||||
|
||||
rid, err := restic.ParseID(id)
|
||||
if err != nil {
|
||||
Printf("Note: cannot find pack for object '%s', unable to parse ID: %v\n", id, err)
|
||||
return
|
||||
}
|
||||
|
||||
blobs, found := idx.Lookup(rid, t)
|
||||
if !found {
|
||||
Printf("Object %s not found in the index\n", rid.Str())
|
||||
return
|
||||
}
|
||||
|
||||
for _, b := range blobs {
|
||||
if b.ID.Equal(rid) {
|
||||
Printf("Object belongs to pack %s\n ... Pack %s: %s\n", b.PackID, b.PackID.Str(), b.String())
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Finder) findObjectsPacks(ctx context.Context) {
|
||||
for i := range f.blobIDs {
|
||||
rid, err := restic.ParseID(i)
|
||||
if err != nil {
|
||||
Printf("Note: cannot find pack for blob '%s', unable to parse ID: %v\n", i, err)
|
||||
continue
|
||||
}
|
||||
f.findObjectPack(ctx, i, restic.DataBlob)
|
||||
}
|
||||
|
||||
blobs, found := idx.Lookup(rid, restic.DataBlob)
|
||||
if !found {
|
||||
Printf("Blob %s not found in the index\n", rid.Str())
|
||||
continue
|
||||
}
|
||||
|
||||
for _, b := range blobs {
|
||||
if b.ID.Equal(rid) {
|
||||
Printf("Blob belongs to pack %s\n ... Pack %s: %s\n", b.PackID, b.PackID.Str(), b.String())
|
||||
break
|
||||
}
|
||||
}
|
||||
for i := range f.treeIDs {
|
||||
f.findObjectPack(ctx, i, restic.TreeBlob)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -557,8 +574,8 @@ func runFind(opts FindOptions, gopts GlobalOptions, args []string) error {
|
||||
}
|
||||
f.out.Finish()
|
||||
|
||||
if opts.ShowPackID && f.blobIDs != nil {
|
||||
f.findBlobsPacks(ctx)
|
||||
if opts.ShowPackID && (f.blobIDs != nil || f.treeIDs != nil) {
|
||||
f.findObjectsPacks(ctx)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -3,10 +3,8 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"sort"
|
||||
"strings"
|
||||
"io"
|
||||
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -90,153 +88,129 @@ func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// group by hostname and dirs
|
||||
type key struct {
|
||||
Hostname string
|
||||
Paths []string
|
||||
Tags []string
|
||||
}
|
||||
snapshotGroups := make(map[string]restic.Snapshots)
|
||||
|
||||
var GroupByTag bool
|
||||
var GroupByHost bool
|
||||
var GroupByPath bool
|
||||
var GroupOptionList []string
|
||||
|
||||
GroupOptionList = strings.Split(opts.GroupBy, ",")
|
||||
|
||||
for _, option := range GroupOptionList {
|
||||
switch option {
|
||||
case "host":
|
||||
GroupByHost = true
|
||||
case "paths":
|
||||
GroupByPath = true
|
||||
case "tags":
|
||||
GroupByTag = true
|
||||
case "":
|
||||
default:
|
||||
return errors.Fatal("unknown grouping option: '" + option + "'")
|
||||
}
|
||||
}
|
||||
|
||||
removeSnapshots := 0
|
||||
|
||||
ctx, cancel := context.WithCancel(gopts.ctx)
|
||||
defer cancel()
|
||||
|
||||
var snapshots restic.Snapshots
|
||||
|
||||
for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args) {
|
||||
if len(args) > 0 {
|
||||
// When explicit snapshots args are given, remove them immediately.
|
||||
snapshots = append(snapshots, sn)
|
||||
}
|
||||
|
||||
if len(args) > 0 {
|
||||
// When explicit snapshots args are given, remove them immediately.
|
||||
for _, sn := range snapshots {
|
||||
if !opts.DryRun {
|
||||
h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
|
||||
if err = repo.Backend().Remove(gopts.ctx, h); err != nil {
|
||||
return err
|
||||
}
|
||||
Verbosef("removed snapshot %v\n", sn.ID().Str())
|
||||
if !gopts.JSON {
|
||||
Verbosef("removed snapshot %v\n", sn.ID().Str())
|
||||
}
|
||||
removeSnapshots++
|
||||
} else {
|
||||
Verbosef("would have removed snapshot %v\n", sn.ID().Str())
|
||||
if !gopts.JSON {
|
||||
Verbosef("would have removed snapshot %v\n", sn.ID().Str())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Determining grouping-keys
|
||||
var tags []string
|
||||
var hostname string
|
||||
var paths []string
|
||||
|
||||
if GroupByTag {
|
||||
tags = sn.Tags
|
||||
sort.StringSlice(tags).Sort()
|
||||
}
|
||||
if GroupByHost {
|
||||
hostname = sn.Hostname
|
||||
}
|
||||
if GroupByPath {
|
||||
paths = sn.Paths
|
||||
}
|
||||
|
||||
sort.StringSlice(sn.Paths).Sort()
|
||||
var k []byte
|
||||
var err error
|
||||
|
||||
k, err = json.Marshal(key{Tags: tags, Hostname: hostname, Paths: paths})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
snapshotGroups[string(k)] = append(snapshotGroups[string(k)], sn)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
snapshotGroups, _, err := restic.GroupSnapshots(snapshots, opts.GroupBy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
policy := restic.ExpirePolicy{
|
||||
Last: opts.Last,
|
||||
Hourly: opts.Hourly,
|
||||
Daily: opts.Daily,
|
||||
Weekly: opts.Weekly,
|
||||
Monthly: opts.Monthly,
|
||||
Yearly: opts.Yearly,
|
||||
Within: opts.Within,
|
||||
Tags: opts.KeepTags,
|
||||
}
|
||||
policy := restic.ExpirePolicy{
|
||||
Last: opts.Last,
|
||||
Hourly: opts.Hourly,
|
||||
Daily: opts.Daily,
|
||||
Weekly: opts.Weekly,
|
||||
Monthly: opts.Monthly,
|
||||
Yearly: opts.Yearly,
|
||||
Within: opts.Within,
|
||||
Tags: opts.KeepTags,
|
||||
}
|
||||
|
||||
if policy.Empty() && len(args) == 0 {
|
||||
Verbosef("no policy was specified, no snapshots will be removed\n")
|
||||
}
|
||||
if policy.Empty() && len(args) == 0 {
|
||||
if !gopts.JSON {
|
||||
Verbosef("no policy was specified, no snapshots will be removed\n")
|
||||
}
|
||||
}
|
||||
|
||||
if !policy.Empty() {
|
||||
Verbosef("Applying Policy: %v\n", policy)
|
||||
|
||||
for k, snapshotGroup := range snapshotGroups {
|
||||
var key key
|
||||
if json.Unmarshal([]byte(k), &key) != nil {
|
||||
return err
|
||||
if !policy.Empty() {
|
||||
if !gopts.JSON {
|
||||
Verbosef("Applying Policy: %v\n", policy)
|
||||
}
|
||||
|
||||
// Info
|
||||
Verbosef("snapshots")
|
||||
var infoStrings []string
|
||||
if GroupByTag {
|
||||
infoStrings = append(infoStrings, "tags ["+strings.Join(key.Tags, ", ")+"]")
|
||||
}
|
||||
if GroupByHost {
|
||||
infoStrings = append(infoStrings, "host ["+key.Hostname+"]")
|
||||
}
|
||||
if GroupByPath {
|
||||
infoStrings = append(infoStrings, "paths ["+strings.Join(key.Paths, ", ")+"]")
|
||||
}
|
||||
if infoStrings != nil {
|
||||
Verbosef(" for (" + strings.Join(infoStrings, ", ") + ")")
|
||||
}
|
||||
Verbosef(":\n\n")
|
||||
var jsonGroups []*ForgetGroup
|
||||
|
||||
keep, remove, reasons := restic.ApplyPolicy(snapshotGroup, policy)
|
||||
|
||||
if len(keep) != 0 && !gopts.Quiet {
|
||||
Printf("keep %d snapshots:\n", len(keep))
|
||||
PrintSnapshots(globalOptions.stdout, keep, reasons, opts.Compact)
|
||||
Printf("\n")
|
||||
}
|
||||
|
||||
if len(remove) != 0 && !gopts.Quiet {
|
||||
Printf("remove %d snapshots:\n", len(remove))
|
||||
PrintSnapshots(globalOptions.stdout, remove, nil, opts.Compact)
|
||||
Printf("\n")
|
||||
}
|
||||
|
||||
removeSnapshots += len(remove)
|
||||
|
||||
if !opts.DryRun {
|
||||
for _, sn := range remove {
|
||||
h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
|
||||
err = repo.Backend().Remove(gopts.ctx, h)
|
||||
for k, snapshotGroup := range snapshotGroups {
|
||||
if gopts.Verbose >= 1 && !gopts.JSON {
|
||||
err = PrintSnapshotGroupHeader(gopts.stdout, k)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var key restic.SnapshotGroupKey
|
||||
if json.Unmarshal([]byte(k), &key) != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var fg ForgetGroup
|
||||
fg.Tags = key.Tags
|
||||
fg.Host = key.Hostname
|
||||
fg.Paths = key.Paths
|
||||
|
||||
keep, remove, reasons := restic.ApplyPolicy(snapshotGroup, policy)
|
||||
|
||||
if len(keep) != 0 && !gopts.Quiet && !gopts.JSON {
|
||||
Printf("keep %d snapshots:\n", len(keep))
|
||||
PrintSnapshots(globalOptions.stdout, keep, reasons, opts.Compact)
|
||||
Printf("\n")
|
||||
}
|
||||
addJSONSnapshots(&fg.Keep, keep)
|
||||
|
||||
if len(remove) != 0 && !gopts.Quiet && !gopts.JSON {
|
||||
Printf("remove %d snapshots:\n", len(remove))
|
||||
PrintSnapshots(globalOptions.stdout, remove, nil, opts.Compact)
|
||||
Printf("\n")
|
||||
}
|
||||
addJSONSnapshots(&fg.Remove, remove)
|
||||
|
||||
fg.Reasons = reasons
|
||||
|
||||
jsonGroups = append(jsonGroups, &fg)
|
||||
|
||||
removeSnapshots += len(remove)
|
||||
|
||||
if !opts.DryRun {
|
||||
for _, sn := range remove {
|
||||
h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
|
||||
err = repo.Backend().Remove(gopts.ctx, h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if gopts.JSON {
|
||||
err = printJSONForget(gopts.stdout, jsonGroups)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if removeSnapshots > 0 && opts.Prune {
|
||||
Verbosef("%d snapshots have been removed, running prune\n", removeSnapshots)
|
||||
if !gopts.JSON {
|
||||
Verbosef("%d snapshots have been removed, running prune\n", removeSnapshots)
|
||||
}
|
||||
if !opts.DryRun {
|
||||
return pruneRepository(gopts, repo)
|
||||
}
|
||||
@@ -244,3 +218,28 @@ func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ForgetGroup helps to print what is forgotten in JSON.
|
||||
type ForgetGroup struct {
|
||||
Tags []string `json:"tags"`
|
||||
Host string `json:"host"`
|
||||
Paths []string `json:"paths"`
|
||||
Keep []Snapshot `json:"keep"`
|
||||
Remove []Snapshot `json:"remove"`
|
||||
Reasons []restic.KeepReason `json:"reasons"`
|
||||
}
|
||||
|
||||
func addJSONSnapshots(js *[]Snapshot, list restic.Snapshots) {
|
||||
for _, sn := range list {
|
||||
k := Snapshot{
|
||||
Snapshot: sn,
|
||||
ID: sn.ID(),
|
||||
ShortID: sn.ID().Str(),
|
||||
}
|
||||
*js = append(*js, k)
|
||||
}
|
||||
}
|
||||
|
||||
func printJSONForget(stdout io.Writer, forgets []*ForgetGroup) error {
|
||||
return json.NewEncoder(stdout).Encode(forgets)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ var cmdGenerate = &cobra.Command{
|
||||
Use: "generate [command]",
|
||||
Short: "Generate manual pages and auto-completion files (bash, zsh)",
|
||||
Long: `
|
||||
The "generate" command writes automatically generated files like the man pages
|
||||
The "generate" command writes automatically generated files (like the man pages
|
||||
and the auto-completion files for bash and zsh).
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
|
||||
@@ -149,7 +149,7 @@ func mount(opts MountOptions, gopts GlobalOptions, mountpoint string) error {
|
||||
}
|
||||
|
||||
Printf("Now serving the repository at %s\n", mountpoint)
|
||||
Printf("Don't forget to umount after quitting!\n")
|
||||
Printf("When finished, quit with Ctrl-c or umount the mountpoint.\n")
|
||||
|
||||
debug.Log("serving mount at %v", mountpoint)
|
||||
err = fs.Serve(c, root)
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/restic/restic/internal/filter"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/restic/restic/internal/restorer"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -28,13 +29,15 @@ repository.
|
||||
|
||||
// RestoreOptions collects all options for the restore command.
|
||||
type RestoreOptions struct {
|
||||
Exclude []string
|
||||
Include []string
|
||||
Target string
|
||||
Host string
|
||||
Paths []string
|
||||
Tags restic.TagLists
|
||||
Verify bool
|
||||
Exclude []string
|
||||
InsensitiveExclude []string
|
||||
Include []string
|
||||
InsensitiveInclude []string
|
||||
Target string
|
||||
Host string
|
||||
Paths []string
|
||||
Tags restic.TagLists
|
||||
Verify bool
|
||||
}
|
||||
|
||||
var restoreOptions RestoreOptions
|
||||
@@ -44,7 +47,9 @@ func init() {
|
||||
|
||||
flags := cmdRestore.Flags()
|
||||
flags.StringArrayVarP(&restoreOptions.Exclude, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)")
|
||||
flags.StringArrayVar(&restoreOptions.InsensitiveExclude, "iexclude", nil, "same as `--exclude` but ignores the casing of filenames")
|
||||
flags.StringArrayVarP(&restoreOptions.Include, "include", "i", nil, "include a `pattern`, exclude everything else (can be specified multiple times)")
|
||||
flags.StringArrayVar(&restoreOptions.InsensitiveInclude, "iinclude", nil, "same as `--include` but ignores the casing of filenames")
|
||||
flags.StringVarP(&restoreOptions.Target, "target", "t", "", "directory to extract data to")
|
||||
|
||||
flags.StringVarP(&restoreOptions.Host, "host", "H", "", `only consider snapshots for this host when the snapshot ID is "latest"`)
|
||||
@@ -55,6 +60,16 @@ func init() {
|
||||
|
||||
func runRestore(opts RestoreOptions, gopts GlobalOptions, args []string) error {
|
||||
ctx := gopts.ctx
|
||||
hasExcludes := len(opts.Exclude) > 0 || len(opts.InsensitiveExclude) > 0
|
||||
hasIncludes := len(opts.Include) > 0 || len(opts.InsensitiveInclude) > 0
|
||||
|
||||
for i, str := range opts.InsensitiveExclude {
|
||||
opts.InsensitiveExclude[i] = strings.ToLower(str)
|
||||
}
|
||||
|
||||
for i, str := range opts.InsensitiveInclude {
|
||||
opts.InsensitiveInclude[i] = strings.ToLower(str)
|
||||
}
|
||||
|
||||
switch {
|
||||
case len(args) == 0:
|
||||
@@ -67,7 +82,7 @@ func runRestore(opts RestoreOptions, gopts GlobalOptions, args []string) error {
|
||||
return errors.Fatal("please specify a directory to restore to (--target)")
|
||||
}
|
||||
|
||||
if len(opts.Exclude) > 0 && len(opts.Include) > 0 {
|
||||
if hasExcludes && hasIncludes {
|
||||
return errors.Fatal("exclude and include patterns are mutually exclusive")
|
||||
}
|
||||
|
||||
@@ -125,11 +140,16 @@ func runRestore(opts RestoreOptions, gopts GlobalOptions, args []string) error {
|
||||
Warnf("error for exclude pattern: %v", err)
|
||||
}
|
||||
|
||||
matchedInsensitive, _, err := filter.List(opts.InsensitiveExclude, strings.ToLower(item))
|
||||
if err != nil {
|
||||
Warnf("error for iexclude pattern: %v", err)
|
||||
}
|
||||
|
||||
// An exclude filter is basically a 'wildcard but foo',
|
||||
// so even if a childMayMatch, other children of a dir may not,
|
||||
// therefore childMayMatch does not matter, but we should not go down
|
||||
// unless the dir is selected for restore
|
||||
selectedForRestore = !matched
|
||||
selectedForRestore = !matched && !matchedInsensitive
|
||||
childMayBeSelected = selectedForRestore && node.Type == "dir"
|
||||
|
||||
return selectedForRestore, childMayBeSelected
|
||||
@@ -141,15 +161,20 @@ func runRestore(opts RestoreOptions, gopts GlobalOptions, args []string) error {
|
||||
Warnf("error for include pattern: %v", err)
|
||||
}
|
||||
|
||||
selectedForRestore = matched
|
||||
childMayBeSelected = childMayMatch && node.Type == "dir"
|
||||
matchedInsensitive, childMayMatchInsensitive, err := filter.List(opts.InsensitiveInclude, strings.ToLower(item))
|
||||
if err != nil {
|
||||
Warnf("error for iexclude pattern: %v", err)
|
||||
}
|
||||
|
||||
selectedForRestore = matched || matchedInsensitive
|
||||
childMayBeSelected = (childMayMatch || childMayMatchInsensitive) && node.Type == "dir"
|
||||
|
||||
return selectedForRestore, childMayBeSelected
|
||||
}
|
||||
|
||||
if len(opts.Exclude) > 0 {
|
||||
if hasExcludes {
|
||||
res.SelectFilter = selectExcludeFilter
|
||||
} else if len(opts.Include) > 0 {
|
||||
} else if hasIncludes {
|
||||
res.SelectFilter = selectIncludeFilter
|
||||
}
|
||||
|
||||
|
||||
@@ -32,6 +32,7 @@ type SnapshotOptions struct {
|
||||
Paths []string
|
||||
Compact bool
|
||||
Last bool
|
||||
GroupBy string
|
||||
}
|
||||
|
||||
var snapshotOptions SnapshotOptions
|
||||
@@ -45,6 +46,7 @@ func init() {
|
||||
f.StringArrayVar(&snapshotOptions.Paths, "path", nil, "only consider snapshots for this `path` (can be specified multiple times)")
|
||||
f.BoolVarP(&snapshotOptions.Compact, "compact", "c", false, "use compact format")
|
||||
f.BoolVar(&snapshotOptions.Last, "last", false, "only show the last snapshot for each host and path")
|
||||
f.StringVarP(&snapshotOptions.GroupBy, "group-by", "g", "", "string for grouping snapshots by host,paths,tags")
|
||||
}
|
||||
|
||||
func runSnapshots(opts SnapshotOptions, gopts GlobalOptions, args []string) error {
|
||||
@@ -64,25 +66,41 @@ func runSnapshots(opts SnapshotOptions, gopts GlobalOptions, args []string) erro
|
||||
ctx, cancel := context.WithCancel(gopts.ctx)
|
||||
defer cancel()
|
||||
|
||||
var list restic.Snapshots
|
||||
var snapshots restic.Snapshots
|
||||
for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args) {
|
||||
list = append(list, sn)
|
||||
snapshots = append(snapshots, sn)
|
||||
}
|
||||
snapshotGroups, grouped, err := restic.GroupSnapshots(snapshots, opts.GroupBy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if opts.Last {
|
||||
list = FilterLastSnapshots(list)
|
||||
for k, list := range snapshotGroups {
|
||||
if opts.Last {
|
||||
list = FilterLastSnapshots(list)
|
||||
}
|
||||
sort.Sort(sort.Reverse(list))
|
||||
snapshotGroups[k] = list
|
||||
}
|
||||
|
||||
sort.Sort(sort.Reverse(list))
|
||||
|
||||
if gopts.JSON {
|
||||
err := printSnapshotsJSON(gopts.stdout, list)
|
||||
err := printSnapshotGroupJSON(gopts.stdout, snapshotGroups, grouped)
|
||||
if err != nil {
|
||||
Warnf("error printing snapshot: %v\n", err)
|
||||
Warnf("error printing snapshots: %v\n", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
PrintSnapshots(gopts.stdout, list, nil, opts.Compact)
|
||||
|
||||
for k, list := range snapshotGroups {
|
||||
if grouped {
|
||||
err := PrintSnapshotGroupHeader(gopts.stdout, k)
|
||||
if err != nil {
|
||||
Warnf("error printing snapshots: %v\n", err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
PrintSnapshots(gopts.stdout, list, nil, opts.Compact)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -223,6 +241,42 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke
|
||||
tab.Write(stdout)
|
||||
}
|
||||
|
||||
// PrintSnapshotGroupHeader prints which group of the group-by option the
|
||||
// following snapshots belong to.
|
||||
// Prints nothing, if we did not group at all.
|
||||
func PrintSnapshotGroupHeader(stdout io.Writer, groupKeyJSON string) error {
|
||||
var key restic.SnapshotGroupKey
|
||||
var err error
|
||||
|
||||
err = json.Unmarshal([]byte(groupKeyJSON), &key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if key.Hostname == "" && key.Tags == nil && key.Paths == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Info
|
||||
fmt.Fprintf(stdout, "snapshots")
|
||||
var infoStrings []string
|
||||
if key.Hostname != "" {
|
||||
infoStrings = append(infoStrings, "host ["+key.Hostname+"]")
|
||||
}
|
||||
if key.Tags != nil {
|
||||
infoStrings = append(infoStrings, "tags ["+strings.Join(key.Tags, ", ")+"]")
|
||||
}
|
||||
if key.Paths != nil {
|
||||
infoStrings = append(infoStrings, "paths ["+strings.Join(key.Paths, ", ")+"]")
|
||||
}
|
||||
if infoStrings != nil {
|
||||
fmt.Fprintf(stdout, " for (%s)", strings.Join(infoStrings, ", "))
|
||||
}
|
||||
fmt.Fprintf(stdout, ":\n")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Snapshot helps to print Snaphots as JSON with their ID included.
|
||||
type Snapshot struct {
|
||||
*restic.Snapshot
|
||||
@@ -231,19 +285,58 @@ type Snapshot struct {
|
||||
ShortID string `json:"short_id"`
|
||||
}
|
||||
|
||||
// printSnapshotsJSON writes the JSON representation of list to stdout.
|
||||
func printSnapshotsJSON(stdout io.Writer, list restic.Snapshots) error {
|
||||
// SnapshotGroup helps to print SnaphotGroups as JSON with their GroupReasons included.
|
||||
type SnapshotGroup struct {
|
||||
GroupKey restic.SnapshotGroupKey `json:"group_key"`
|
||||
Snapshots []Snapshot `json:"snapshots"`
|
||||
}
|
||||
|
||||
// printSnapshotsJSON writes the JSON representation of list to stdout.
|
||||
func printSnapshotGroupJSON(stdout io.Writer, snGroups map[string]restic.Snapshots, grouped bool) error {
|
||||
if grouped {
|
||||
var snapshotGroups []SnapshotGroup
|
||||
|
||||
for k, list := range snGroups {
|
||||
var key restic.SnapshotGroupKey
|
||||
var err error
|
||||
var snapshots []Snapshot
|
||||
|
||||
err = json.Unmarshal([]byte(k), &key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, sn := range list {
|
||||
k := Snapshot{
|
||||
Snapshot: sn,
|
||||
ID: sn.ID(),
|
||||
ShortID: sn.ID().Str(),
|
||||
}
|
||||
snapshots = append(snapshots, k)
|
||||
}
|
||||
|
||||
group := SnapshotGroup{
|
||||
GroupKey: key,
|
||||
Snapshots: snapshots,
|
||||
}
|
||||
snapshotGroups = append(snapshotGroups, group)
|
||||
}
|
||||
|
||||
return json.NewEncoder(stdout).Encode(snapshotGroups)
|
||||
}
|
||||
|
||||
// Old behavior
|
||||
var snapshots []Snapshot
|
||||
|
||||
for _, sn := range list {
|
||||
|
||||
k := Snapshot{
|
||||
Snapshot: sn,
|
||||
ID: sn.ID(),
|
||||
ShortID: sn.ID().Str(),
|
||||
for _, list := range snGroups {
|
||||
for _, sn := range list {
|
||||
k := Snapshot{
|
||||
Snapshot: sn,
|
||||
ID: sn.ID(),
|
||||
ShortID: sn.ID().Str(),
|
||||
}
|
||||
snapshots = append(snapshots, k)
|
||||
}
|
||||
snapshots = append(snapshots, k)
|
||||
}
|
||||
|
||||
return json.NewEncoder(stdout).Encode(snapshots)
|
||||
|
||||
@@ -36,7 +36,8 @@ The modes are:
|
||||
* raw-data: Counts the size of blobs in the repository, regardless of
|
||||
how many files reference them.
|
||||
* blobs-per-file: A combination of files-by-contents and raw-data.
|
||||
* Refer to the online manual for more details about each mode.
|
||||
|
||||
Refer to the online manual for more details about each mode.
|
||||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
@@ -88,6 +88,18 @@ func rejectByPattern(patterns []string) RejectByNameFunc {
|
||||
}
|
||||
}
|
||||
|
||||
// Same as `rejectByPattern` but case insensitive.
|
||||
func rejectByInsensitivePattern(patterns []string) RejectByNameFunc {
|
||||
for index, path := range patterns {
|
||||
patterns[index] = strings.ToLower(path)
|
||||
}
|
||||
|
||||
rejFunc := rejectByPattern(patterns)
|
||||
return func(item string) bool {
|
||||
return rejFunc(strings.ToLower(item))
|
||||
}
|
||||
}
|
||||
|
||||
// rejectIfPresent returns a RejectByNameFunc which itself returns whether a path
|
||||
// should be excluded. The RejectByNameFunc considers a file to be excluded when
|
||||
// it resides in a directory with an exclusion file, that is specified by
|
||||
|
||||
@@ -36,6 +36,33 @@ func TestRejectByPattern(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestRejectByInsensitivePattern(t *testing.T) {
|
||||
var tests = []struct {
|
||||
filename string
|
||||
reject bool
|
||||
}{
|
||||
{filename: "/home/user/foo.GO", reject: true},
|
||||
{filename: "/home/user/foo.c", reject: false},
|
||||
{filename: "/home/user/foobar", reject: false},
|
||||
{filename: "/home/user/FOObar/x", reject: true},
|
||||
{filename: "/home/user/README", reject: false},
|
||||
{filename: "/home/user/readme.md", reject: true},
|
||||
}
|
||||
|
||||
patterns := []string{"*.go", "README.md", "/home/user/foobar/*"}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run("", func(t *testing.T) {
|
||||
reject := rejectByInsensitivePattern(patterns)
|
||||
res := reject(tc.filename)
|
||||
if res != tc.reject {
|
||||
t.Fatalf("wrong result for filename %v: want %v, got %v",
|
||||
tc.filename, tc.reject, res)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsExcludedByFile(t *testing.T) {
|
||||
const (
|
||||
tagFilename = "CACHEDIR.TAG"
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -33,11 +34,12 @@ import (
|
||||
|
||||
"github.com/restic/restic/internal/errors"
|
||||
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
"os/exec"
|
||||
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
var version = "0.9.4"
|
||||
var version = "0.9.6"
|
||||
|
||||
// TimeFormat is the format used for all timestamps printed by restic.
|
||||
const TimeFormat = "2006-01-02 15:04:05"
|
||||
@@ -273,15 +275,10 @@ func resolvePassword(opts GlobalOptions) (string, error) {
|
||||
|
||||
// readPassword reads the password from the given reader directly.
|
||||
func readPassword(in io.Reader) (password string, err error) {
|
||||
buf := make([]byte, 1000)
|
||||
n, err := io.ReadFull(in, buf)
|
||||
buf = buf[:n]
|
||||
sc := bufio.NewScanner(in)
|
||||
sc.Scan()
|
||||
|
||||
if err != nil && errors.Cause(err) != io.ErrUnexpectedEOF {
|
||||
return "", errors.Wrap(err, "ReadFull")
|
||||
}
|
||||
|
||||
return strings.TrimRight(string(buf), "\r\n"), nil
|
||||
return sc.Text(), errors.Wrap(err, "Scan")
|
||||
}
|
||||
|
||||
// readPasswordTerminal reads the password from the given reader which must be a
|
||||
@@ -323,7 +320,7 @@ func ReadPassword(opts GlobalOptions, prompt string) (string, error) {
|
||||
}
|
||||
|
||||
if len(password) == 0 {
|
||||
return "", errors.Fatal("an empty password is not a password")
|
||||
return "", errors.New("an empty password is not a password")
|
||||
}
|
||||
|
||||
return password, nil
|
||||
@@ -336,13 +333,15 @@ func ReadPasswordTwice(gopts GlobalOptions, prompt1, prompt2 string) (string, er
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
pw2, err := ReadPassword(gopts, prompt2)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if stdinIsTerminal() {
|
||||
pw2, err := ReadPassword(gopts, prompt2)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if pw1 != pw2 {
|
||||
return "", errors.Fatal("passwords do not match")
|
||||
if pw1 != pw2 {
|
||||
return "", errors.Fatal("passwords do not match")
|
||||
}
|
||||
}
|
||||
|
||||
return pw1, nil
|
||||
@@ -367,17 +366,35 @@ func OpenRepository(opts GlobalOptions) (*repository.Repository, error) {
|
||||
|
||||
s := repository.New(be)
|
||||
|
||||
opts.password, err = ReadPassword(opts, "enter password for repository: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
passwordTriesLeft := 1
|
||||
if stdinIsTerminal() && opts.password == "" {
|
||||
passwordTriesLeft = 3
|
||||
}
|
||||
|
||||
err = s.SearchKey(opts.ctx, opts.password, maxKeys, opts.KeyHint)
|
||||
for ; passwordTriesLeft > 0; passwordTriesLeft-- {
|
||||
opts.password, err = ReadPassword(opts, "enter password for repository: ")
|
||||
if err != nil && passwordTriesLeft > 1 {
|
||||
opts.password = ""
|
||||
fmt.Printf("%s. Try again\n", err)
|
||||
}
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
err = s.SearchKey(opts.ctx, opts.password, maxKeys, opts.KeyHint)
|
||||
if err != nil && passwordTriesLeft > 1 {
|
||||
opts.password = ""
|
||||
fmt.Printf("%s. Try again\n", err)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if errors.IsFatal(err) {
|
||||
return nil, err
|
||||
}
|
||||
return nil, errors.Fatalf("%s", err)
|
||||
}
|
||||
|
||||
if stdoutIsTerminal() {
|
||||
if stdoutIsTerminal() && !opts.JSON {
|
||||
id := s.Config().ID
|
||||
if len(id) > 8 {
|
||||
id = id[:8]
|
||||
@@ -427,7 +444,7 @@ func OpenRepository(opts GlobalOptions) (*repository.Repository, error) {
|
||||
}
|
||||
} else {
|
||||
if stdoutIsTerminal() {
|
||||
Verbosef("found %d old cache directories in %v, pass --cleanup-cache to remove them\n",
|
||||
Verbosef("found %d old cache directories in %v, run `restic cache --cleanup` to remove them\n",
|
||||
len(oldCacheDirs), c.Base)
|
||||
}
|
||||
}
|
||||
@@ -468,6 +485,10 @@ func parseConfig(loc location.Location, opts options.Options) (interface{}, erro
|
||||
cfg.Secret = os.Getenv("AWS_SECRET_ACCESS_KEY")
|
||||
}
|
||||
|
||||
if cfg.Region == "" {
|
||||
cfg.Region = os.Getenv("AWS_DEFAULT_REGION")
|
||||
}
|
||||
|
||||
if err := opts.Apply(loc.Scheme, &cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -219,6 +219,35 @@ func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) {
|
||||
rtest.OK(t, runForget(opts, gopts, args))
|
||||
}
|
||||
|
||||
func testRunForgetJSON(t testing.TB, gopts GlobalOptions, args ...string) {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
oldJSON := gopts.JSON
|
||||
gopts.stdout = buf
|
||||
gopts.JSON = true
|
||||
defer func() {
|
||||
gopts.stdout = os.Stdout
|
||||
gopts.JSON = oldJSON
|
||||
}()
|
||||
|
||||
opts := ForgetOptions{
|
||||
DryRun: true,
|
||||
Last: 1,
|
||||
}
|
||||
|
||||
rtest.OK(t, runForget(opts, gopts, args))
|
||||
|
||||
var forgets []*ForgetGroup
|
||||
rtest.OK(t, json.Unmarshal(buf.Bytes(), &forgets))
|
||||
|
||||
rtest.Assert(t, len(forgets) == 1,
|
||||
"Expected 1 snapshot group, got %v", len(forgets))
|
||||
rtest.Assert(t, len(forgets[0].Keep) == 1,
|
||||
"Expected 1 snapshot to be kept, got %v", len(forgets[0].Keep))
|
||||
rtest.Assert(t, len(forgets[0].Remove) == 2,
|
||||
"Expected 2 snapshots to be removed, got %v", len(forgets[0].Remove))
|
||||
return
|
||||
}
|
||||
|
||||
func testRunPrune(t testing.TB, gopts GlobalOptions) {
|
||||
rtest.OK(t, runPrune(gopts))
|
||||
}
|
||||
@@ -1051,6 +1080,7 @@ func TestPrune(t *testing.T) {
|
||||
rtest.Assert(t, len(snapshotIDs) == 3,
|
||||
"expected 3 snapshot, got %v", snapshotIDs)
|
||||
|
||||
testRunForgetJSON(t, env.gopts)
|
||||
testRunForget(t, env.gopts, firstSnapshot[0].String())
|
||||
testRunPrune(t, env.gopts)
|
||||
testRunCheck(t, env.gopts)
|
||||
|
||||
@@ -48,10 +48,6 @@ installed from the official repos, e.g. with ``apt-get``:
|
||||
$ apt-get install restic
|
||||
|
||||
|
||||
.. warning:: Please be aware that, at the time of writing, Debian *stable*
|
||||
has ``restic`` version 0.3.3 which is very old. The *testing* and *unstable*
|
||||
branches have recent versions of ``restic``.
|
||||
|
||||
Fedora
|
||||
======
|
||||
|
||||
@@ -78,6 +74,12 @@ If you are using macOS, you can install restic using the
|
||||
|
||||
$ brew install restic
|
||||
|
||||
You may also install it using `MacPorts <https://www.macports.org/>`__:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ sudo port install restic
|
||||
|
||||
Nix & NixOS
|
||||
===========
|
||||
|
||||
@@ -107,6 +109,15 @@ On FreeBSD (11 and probably later versions), you can install restic using ``pkg
|
||||
|
||||
# pkg install restic
|
||||
|
||||
openSUSE
|
||||
========
|
||||
|
||||
On openSUSE (leap 15.0 and greater, and tumbleweed), you can install restic using the ``zypper`` package manager:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# zypper install restic
|
||||
|
||||
RHEL & CentOS
|
||||
=============
|
||||
|
||||
@@ -168,28 +179,28 @@ There's both pre-compiled binaries for different platforms as well as the source
|
||||
code available for download. Just download and run the one matching your system.
|
||||
|
||||
The official binaries can be updated in place using the ``restic self-update``
|
||||
command:
|
||||
command (needs restic 0.9.3 or later):
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic version
|
||||
restic 0.9.1 compiled with go1.10.3 on linux/amd64
|
||||
restic 0.9.3 compiled with go1.11.2 on linux/amd64
|
||||
|
||||
$ restic self-update
|
||||
find latest release of restic at GitHub
|
||||
latest version is 0.9.2
|
||||
latest version is 0.9.4
|
||||
download file SHA256SUMS
|
||||
download SHA256SUMS
|
||||
download file SHA256SUMS
|
||||
download SHA256SUMS.asc
|
||||
GPG signature verification succeeded
|
||||
download restic_0.9.2_linux_amd64.bz2
|
||||
downloaded restic_0.9.2_linux_amd64.bz2
|
||||
download restic_0.9.4_linux_amd64.bz2
|
||||
downloaded restic_0.9.4_linux_amd64.bz2
|
||||
saved 12115904 bytes in ./restic
|
||||
successfully updated restic to version 0.9.2
|
||||
successfully updated restic to version 0.9.4
|
||||
|
||||
$ restic version
|
||||
restic 0.9.2 compiled with go1.10.3 on linux/amd64
|
||||
restic 0.9.4 compiled with go1.12.1 on linux/amd64
|
||||
|
||||
The ``self-update`` command uses the GPG signature on the files uploaded to
|
||||
GitHub to verify their authenticity. No external programs are necessary.
|
||||
|
||||
@@ -122,7 +122,17 @@ Last, if you'd like to use an entirely different program to create the
|
||||
SFTP connection, you can specify the command to be run with the option
|
||||
``-o sftp.command="foobar"``.
|
||||
|
||||
.. note:: Please be aware that sftp servers close connections when no data is
|
||||
received by the client. This can happen when restic is processing huge
|
||||
amounts of unchanged data. To avoid this issue add the following lines
|
||||
to the client’s .ssh/config file:
|
||||
|
||||
::
|
||||
|
||||
ServerAliveInterval 60
|
||||
ServerAliveCountMax 240
|
||||
|
||||
|
||||
REST Server
|
||||
***********
|
||||
|
||||
@@ -187,10 +197,11 @@ default location:
|
||||
Please note that knowledge of your password is required to access the repository.
|
||||
Losing your password means that your data is irrecoverably lost.
|
||||
|
||||
It is not possible at the moment to have restic create a new bucket in a
|
||||
different location, so you need to create it using a different program.
|
||||
Afterwards, the S3 server (``s3.amazonaws.com``) will redirect restic to
|
||||
the correct endpoint.
|
||||
If needed, you can manually specify the region to use by either setting the
|
||||
environment variable ``AWS_DEFAULT_REGION`` or calling restic with an option
|
||||
parameter like ``-o s3.region="us-east-1"``. If the region is not specified,
|
||||
the default region is used. Afterwards, the S3 server (at least for AWS,
|
||||
``s3.amazonaws.com``) will redirect restic to the correct endpoint.
|
||||
|
||||
Until version 0.8.0, restic used a default prefix of ``restic``, so the files
|
||||
in the bucket were placed in a directory named ``restic``. If you want to
|
||||
@@ -268,6 +279,18 @@ the naming convention of those variables follows the official Python Swift clien
|
||||
$ export OS_PROJECT_NAME=<MY_PROJECT_NAME>
|
||||
$ export OS_PROJECT_DOMAIN_NAME=<MY_PROJECT_DOMAIN_NAME>
|
||||
|
||||
# For keystone v3 application credential authentication (application credential id)
|
||||
$ export OS_AUTH_URL=<MY_AUTH_URL>
|
||||
$ export OS_APPLICATION_CREDENTIAL_ID=<MY_APPLICATION_CREDENTIAL_ID>
|
||||
$ export OS_APPLICATION_CREDENTIAL_SECRET=<MY_APPLICATION_CREDENTIAL_SECRET>
|
||||
|
||||
# For keystone v3 application credential authentication (application credential name)
|
||||
$ export OS_AUTH_URL=<MY_AUTH_URL>
|
||||
$ export OS_USERNAME=<MY_USERNAME>
|
||||
$ export OS_USER_DOMAIN_NAME=<MY_DOMAIN_NAME>
|
||||
$ export OS_APPLICATION_CREDENTIAL_NAME=<MY_APPLICATION_CREDENTIAL_NAME>
|
||||
$ export OS_APPLICATION_CREDENTIAL_SECRET=<MY_APPLICATION_CREDENTIAL_SECRET>
|
||||
|
||||
# For authentication based on tokens
|
||||
$ export OS_STORAGE_URL=<MY_STORAGE_URL>
|
||||
$ export OS_AUTH_TOKEN=<MY_AUTH_TOKEN>
|
||||
@@ -302,14 +325,14 @@ Backblaze B2
|
||||
|
||||
Restic can backup data to any Backblaze B2 bucket. You need to first setup the
|
||||
following environment variables with the credentials you can find in the
|
||||
dashboard in on the "Buckets" page when signed into your B2 account:
|
||||
dashboard on the "Buckets" page when signed into your B2 account:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ export B2_ACCOUNT_ID=<MY_APPLICATION_KEY_ID>
|
||||
$ export B2_ACCOUNT_KEY=<MY_SECRET_ACCOUNT_KEY>
|
||||
$ export B2_ACCOUNT_KEY=<MY_APPLICATION_KEY>
|
||||
|
||||
.. note:: In case you want to use Backblaze Application Keys replace <MY_APPLICATION_KEY_ID> and <MY_SECRET_ACCOUNT_KEY> with <applicationKeyId> and <applicationKey> respectively.
|
||||
.. note:: As of version 0.9.2, restic supports both master and non-master `application keys <https://www.backblaze.com/b2/docs/application_keys.html>`__. If using a non-master application key, ensure that it is created with at least **read and write** access to the B2 bucket. On earlier versions of restic, a master application key is required.
|
||||
|
||||
You can then initialize a repository stored at Backblaze B2. If the
|
||||
bucket does not exist yet and the credentials you passed to restic have the
|
||||
@@ -520,7 +543,7 @@ interaction. If you use emulation environments like
|
||||
``Mintty`` or ``rxvt``, you may get a password error.
|
||||
|
||||
You can workaround this by using a special tool called ``winpty`` (look
|
||||
`here <https://sourceforge.net/p/msys2/wiki/Porting/>`__ and
|
||||
`here <https://github.com/msys2/msys2/wiki/Porting>`__ and
|
||||
`here <https://github.com/rprichard/winpty>`__ for detail information).
|
||||
On MSYS2, you can install ``winpty`` as follows:
|
||||
|
||||
|
||||
@@ -132,19 +132,21 @@ Now is a good time to run ``restic check`` to verify that all data
|
||||
is properly stored in the repository. You should run this command regularly
|
||||
to make sure the internal structure of the repository is free of errors.
|
||||
|
||||
Including and Excluding Files
|
||||
*****************************
|
||||
Excluding Files
|
||||
***************
|
||||
|
||||
You can exclude folders and files by specifying exclude patterns, currently
|
||||
the exclude options are:
|
||||
|
||||
- ``--exclude`` Specified one or more times to exclude one or more items
|
||||
- ``--iexclude`` Same as ``--exclude`` but ignores the case of paths
|
||||
- ``--exclude-caches`` Specified once to exclude folders containing a special file
|
||||
- ``--exclude-file`` Specified one or more times to exclude items listed in a given file
|
||||
- ``--exclude-if-present`` Specified one or more times to exclude a folders content
|
||||
if it contains a given file (optionally having a given header)
|
||||
- ``--exclude-if-present foo`` Specified one or more times to exclude a folder's content if it contains a file called ``foo`` (optionally having a given header, no wildcards for the file name supported)
|
||||
|
||||
Let's say we have a file called ``excludes.txt`` with the following content:
|
||||
Please see ``restic help backup`` for more specific information about each exclude option.
|
||||
|
||||
Let's say we have a file called ``excludes.txt`` with the following content:
|
||||
|
||||
::
|
||||
|
||||
@@ -159,34 +161,31 @@ It can be used like this:
|
||||
|
||||
$ restic -r /srv/restic-repo backup ~/work --exclude="*.c" --exclude-file=excludes.txt
|
||||
|
||||
This instruct restic to exclude files matching the following criteria:
|
||||
This instructs restic to exclude files matching the following criteria:
|
||||
|
||||
* All files matching ``*.c`` (parameter ``--exclude``)
|
||||
* All files matching ``*.go`` (second line in ``excludes.txt``)
|
||||
* All files and sub-directories named ``bar`` which reside somewhere below a directory called ``foo`` (fourth line in ``excludes.txt``)
|
||||
* All files matching ``*.c`` (parameter ``--exclude``)
|
||||
|
||||
Please see ``restic help backup`` for more specific information about each exclude option.
|
||||
|
||||
Patterns use `filepath.Glob <https://golang.org/pkg/path/filepath/#Glob>`__ internally,
|
||||
see `filepath.Match <https://golang.org/pkg/path/filepath/#Match>`__ for
|
||||
syntax. Patterns are tested against the full path of a file/dir to be saved,
|
||||
even if restic is passed a relative path to save. Environment-variables in
|
||||
exclude-files are expanded with `os.ExpandEnv <https://golang.org/pkg/os/#ExpandEnv>`__,
|
||||
so `/home/$USER/foo` will be expanded to `/home/bob/foo` for the user `bob`. To
|
||||
get a literal dollar sign, write `$$` to the file.
|
||||
even if restic is passed a relative path to save.
|
||||
|
||||
Environment-variables in exclude files are expanded with `os.ExpandEnv <https://golang.org/pkg/os/#ExpandEnv>`__,
|
||||
so ``/home/$USER/foo`` will be expanded to ``/home/bob/foo`` for the user ``bob``.
|
||||
To get a literal dollar sign, write ``$$`` to the file.
|
||||
|
||||
Patterns need to match on complete path components. For example, the pattern ``foo``:
|
||||
|
||||
* matches ``/dir1/foo/dir2/file`` and ``/dir/foo``
|
||||
* does not match ``/dir/foobar`` or ``barfoo``
|
||||
|
||||
A trailing ``/`` is ignored, a leading ``/`` anchors the
|
||||
pattern at the root directory. This means, ``/bin`` matches ``/bin/bash`` but
|
||||
does not match ``/usr/bin/restic``.
|
||||
A trailing ``/`` is ignored, a leading ``/`` anchors the pattern at the root directory.
|
||||
This means, ``/bin`` matches ``/bin/bash`` but does not match ``/usr/bin/restic``.
|
||||
|
||||
Regular wildcards cannot be used to match over the
|
||||
directory separator ``/``. For example: ``b*ash`` matches ``/bin/bash`` but does not match
|
||||
``/bin/ash``.
|
||||
Regular wildcards cannot be used to match over the directory separator ``/``.
|
||||
For example: ``b*ash`` matches ``/bin/bash`` but does not match ``/bin/ash``.
|
||||
|
||||
For this, the special wildcard ``**`` can be used to match arbitrary
|
||||
sub-directories: The pattern ``foo/**/bar`` matches:
|
||||
@@ -195,6 +194,23 @@ sub-directories: The pattern ``foo/**/bar`` matches:
|
||||
* ``/foo/bar/file``
|
||||
* ``/tmp/foo/bar``
|
||||
|
||||
Spaces in patterns listed in an exclude file can be specified verbatim. That is,
|
||||
in order to exclude a file named ``foo bar star.txt``, put that just as it reads
|
||||
on one line in the exclude file. Please note that beginning and trailing spaces
|
||||
are trimmed - in order to match these, use e.g. a ``*`` at the beginning or end
|
||||
of the filename.
|
||||
|
||||
Spaces in patterns listed in the other exclude options (e.g. ``--exclude`` on the
|
||||
command line) are specified in different ways depending on the operating system
|
||||
and/or shell. Restic itself does not need any escaping, but your shell may need
|
||||
some escaping in order to pass the name/pattern as a single argument to restic.
|
||||
|
||||
On most Unixy shells, you can either quote or use backslashes. For example:
|
||||
|
||||
* ``--exclude='foo bar star/foo.txt'``
|
||||
* ``--exclude="foo bar star/foo.txt"``
|
||||
* ``--exclude=foo\ bar\ star/foo.txt``
|
||||
|
||||
By specifying the option ``--one-file-system`` you can instruct restic
|
||||
to only backup files from the file systems the initially specified files
|
||||
or directories reside on. For example, calling restic like this won't
|
||||
@@ -207,10 +223,13 @@ backup ``/sys`` or ``/dev`` on a Linux system:
|
||||
.. note:: ``--one-file-system`` is currently unsupported on Windows, and will
|
||||
cause the backup to immediately fail with an error.
|
||||
|
||||
By using the ``--files-from`` option you can read the files you want to
|
||||
backup from one or more files. This is especially useful if a lot of files have
|
||||
to be backed up that are not in the same folder or are maybe pre-filtered
|
||||
by other software.
|
||||
Including Files
|
||||
***************
|
||||
|
||||
By using the ``--files-from`` option you can read the files you want to back
|
||||
up from one or more files. This is especially useful if a lot of files have
|
||||
to be backed up that are not in the same folder or are maybe pre-filtered by
|
||||
other software.
|
||||
|
||||
For example maybe you want to backup files which have a name that matches a
|
||||
certain pattern:
|
||||
@@ -232,7 +251,11 @@ args:
|
||||
|
||||
$ restic -r /srv/restic-repo backup --files-from /tmp/files_to_backup /tmp/some_additional_file
|
||||
|
||||
Paths in the listing file can be absolute or relative.
|
||||
Paths in the listing file can be absolute or relative. Please note that
|
||||
patterns listed in a ``--files-from`` file are treated the same way as
|
||||
exclude patterns are, which means that beginning and trailing spaces are
|
||||
trimmed and special characters must be escaped. See the documentation
|
||||
above for more information.
|
||||
|
||||
Comparing Snapshots
|
||||
*******************
|
||||
@@ -279,6 +302,10 @@ written, and the next backup needs to write new metadata again. If you really
|
||||
want to save the access time for files and directories, you can pass the
|
||||
``--with-atime`` option to the ``backup`` command.
|
||||
|
||||
In filesystems that do not support inode consistency, like FUSE-based ones and pCloud, it is
|
||||
possible to ignore inode on changed files comparison by passing ``--ignore-inode`` to
|
||||
``backup`` command.
|
||||
|
||||
Reading data from stdin
|
||||
***********************
|
||||
|
||||
@@ -289,6 +316,7 @@ this mode of operation, just supply the option ``--stdin`` to the
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ set -o pipefail
|
||||
$ mysqldump [...] | restic -r /srv/restic-repo backup --stdin
|
||||
|
||||
This creates a new snapshot of the output of ``mysqldump``. You can then
|
||||
@@ -302,6 +330,13 @@ specified with ``--stdin-filename``, e.g. like this:
|
||||
|
||||
$ mysqldump [...] | restic -r /srv/restic-repo backup --stdin --stdin-filename production.sql
|
||||
|
||||
The option ``pipefail`` is highly recommended so that a non-zero exit code from
|
||||
one of the programs in the pipe (e.g. ``mysqldump`` here) makes the whole chain
|
||||
return a non-zero exit code. Refer to the `Use the Unofficial Bash Strict Mode
|
||||
<http://redsymbol.net/articles/unofficial-bash-strict-mode/>`__ for more
|
||||
details on this.
|
||||
|
||||
|
||||
Tags for backup
|
||||
***************
|
||||
|
||||
@@ -343,6 +378,7 @@ environment variables. The following list of environment variables:
|
||||
RESTIC_REPOSITORY Location of repository (replaces -r)
|
||||
RESTIC_PASSWORD_FILE Location of password file (replaces --password-file)
|
||||
RESTIC_PASSWORD The actual password for the repository
|
||||
RESTIC_PASSWORD_COMMAND Command printing the password for the repository to stdout
|
||||
|
||||
AWS_ACCESS_KEY_ID Amazon S3 access key ID
|
||||
AWS_SECRET_ACCESS_KEY Amazon S3 secret access key
|
||||
@@ -360,7 +396,11 @@ environment variables. The following list of environment variables:
|
||||
|
||||
OS_USER_DOMAIN_NAME User domain name for keystone authentication
|
||||
OS_PROJECT_NAME Project name for keystone authentication
|
||||
OS_PROJECT_DOMAIN_NAME PRoject domain name for keystone authentication
|
||||
OS_PROJECT_DOMAIN_NAME Project domain name for keystone authentication
|
||||
|
||||
OS_APPLICATION_CREDENTIAL_ID Application Credential ID (keystone v3)
|
||||
OS_APPLICATION_CREDENTIAL_NAME Application Credential Name (keystone v3)
|
||||
OS_APPLICATION_CREDENTIAL_SECRET Application Credential Secret (keystone v3)
|
||||
|
||||
OS_STORAGE_URL Storage URL for token authentication
|
||||
OS_AUTH_TOKEN Auth token for token authentication
|
||||
|
||||
@@ -56,6 +56,31 @@ Or filter by host:
|
||||
|
||||
Combining filters is also possible.
|
||||
|
||||
Furthermore you can group the output by the same filters (host, paths, tags):
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /srv/restic-repo snapshots --group-by host
|
||||
|
||||
enter password for repository:
|
||||
snapshots for (host [kasimir])
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
40dc1520 2015-05-08 21:38:30 kasimir /home/user/work
|
||||
79766175 2015-05-08 21:40:19 kasimir /home/user/work
|
||||
2 snapshots
|
||||
snapshots for (host [luigi])
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
bdbd3439 2015-05-08 21:45:17 luigi /home/art
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv
|
||||
2 snapshots
|
||||
snapshots for (host [kazik])
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
590c8fc8 2015-05-08 21:47:38 kazik /srv
|
||||
1 snapshots
|
||||
|
||||
|
||||
Checking a repo's integrity and consistency
|
||||
===========================================
|
||||
@@ -99,10 +124,11 @@ data files:
|
||||
check snapshots, trees and blobs
|
||||
read all data
|
||||
|
||||
Use ``--read-data-subset=n/t`` parameter to check subset of repository data
|
||||
files. The parameter takes two values, ``n`` and ``t``. All repository data
|
||||
files are logically devided in ``t`` roughly equal groups and only files that
|
||||
belong to the group number ``n`` are checked. For example, the following
|
||||
Use the ``--read-data-subset=n/t`` parameter to check only a subset of the
|
||||
repository data files at a time. The parameter takes two values, ``n`` and
|
||||
``t``. When the check command runs, all data files in the repository are
|
||||
logically divided in ``t`` (roughly equal) groups, and only files that
|
||||
belong to the group number ``n`` are checked. For example, the following
|
||||
commands check all repository data files over 5 separate invocations:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@@ -50,7 +50,11 @@ This will restore the file ``foo`` to ``/tmp/restore-work/work/foo``.
|
||||
|
||||
You can use the command ``restic ls latest`` or ``restic find foo`` to find the
|
||||
path to the file within the snapshot. This path you can then pass to
|
||||
`--include` in verbatim to only restore the single file or directory.
|
||||
``--include`` in verbatim to only restore the single file or directory.
|
||||
|
||||
There are case insensitive variants of of ``--exclude`` and ``--include`` called
|
||||
``--iexclude`` and ``--iinclude``. These options will behave the same way but
|
||||
ignore the casing of paths.
|
||||
|
||||
Restore using mount
|
||||
===================
|
||||
@@ -65,7 +69,7 @@ command to serve the repository with FUSE:
|
||||
$ restic -r /srv/restic-repo mount /mnt/restic
|
||||
enter password for repository:
|
||||
Now serving /srv/restic-repo at /mnt/restic
|
||||
Don't forget to umount after quitting!
|
||||
When finished, quit with Ctrl-c or umount the mountpoint.
|
||||
|
||||
Mounting repositories via FUSE is not possible on OpenBSD, Solaris/illumos
|
||||
and Windows. For Linux, the ``fuse`` kernel module needs to be loaded. For
|
||||
@@ -120,3 +124,13 @@ e.g.:
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /srv/restic-repo dump --path /production.sql latest production.sql | mysql
|
||||
|
||||
It is also possible to ``dump`` the contents of a whole folder structure to
|
||||
stdout. To retain the information about the files and folders Restic will
|
||||
output the contents in the tar format:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /srv/restic-repo dump /home/other/work latest > restore.tar
|
||||
|
||||
|
||||
|
||||
@@ -197,7 +197,7 @@ To only keep the last snapshot of all snapshots with both the tag ``foo`` and
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic forget --tag foo,tag bar --keep-last 1
|
||||
$ restic forget --tag foo,bar --keep-last 1
|
||||
|
||||
All the ``--keep-*`` options above only count
|
||||
hours/days/weeks/months/years which have a snapshot, so those without a
|
||||
@@ -210,7 +210,7 @@ all snapshots, use ``--keep-last 1`` and then finally remove the last
|
||||
snapshot ID manually (by passing the ID to ``forget``).
|
||||
|
||||
All snapshots are evaluated against all matching ``--keep-*`` counts. A
|
||||
single snapshot on 2017-09-30 (Sun) will count as a daily, weekly and monthly.
|
||||
single snapshot on 2017-09-30 (Sat) will count as a daily, weekly and monthly.
|
||||
|
||||
Let's explain this with an example: Suppose you have only made a backup
|
||||
on each Sunday for 12 weeks. Then ``forget --keep-daily 4`` will keep
|
||||
|
||||
@@ -247,8 +247,13 @@ restic is now ready to be used with AWS S3. Try to create a backup:
|
||||
----------------------------------------------------------------------
|
||||
10fdbace 2017-03-26 16:41:50 blackbox /home/philip/restic-demo/test.bin
|
||||
|
||||
A snapshot was created and stored in the S3 bucket. This snapshot may now be
|
||||
restored:
|
||||
A snapshot was created and stored in the S3 bucket. By default backups to AWS S3 will use the ``STANDARD`` storage class. Available storage classes include ``STANDARD``, ``STANDARD_IA``, ``ONEZONE_IA``, ``INTELLIGENT_TIERING``, and ``REDUCED_REDUNDANCY``. A different storage class could have been specified in the above command by using ``-o`` or ``--option``:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ ./restic backup -o s3.storageclass=REDUCED_REDUNDANCY test.bin
|
||||
|
||||
This snapshot may now be restored:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
|
||||
@@ -277,6 +277,10 @@ _restic_backup()
|
||||
flags+=("--host=")
|
||||
two_word_flags+=("-H")
|
||||
local_nonpersistent_flags+=("--host=")
|
||||
flags+=("--iexclude=")
|
||||
local_nonpersistent_flags+=("--iexclude=")
|
||||
flags+=("--ignore-inode")
|
||||
local_nonpersistent_flags+=("--ignore-inode")
|
||||
flags+=("--one-file-system")
|
||||
flags+=("-x")
|
||||
local_nonpersistent_flags+=("--one-file-system")
|
||||
@@ -1222,6 +1226,10 @@ _restic_restore()
|
||||
flags+=("--host=")
|
||||
two_word_flags+=("-H")
|
||||
local_nonpersistent_flags+=("--host=")
|
||||
flags+=("--iexclude=")
|
||||
local_nonpersistent_flags+=("--iexclude=")
|
||||
flags+=("--iinclude=")
|
||||
local_nonpersistent_flags+=("--iinclude=")
|
||||
flags+=("--include=")
|
||||
two_word_flags+=("-i")
|
||||
local_nonpersistent_flags+=("--include=")
|
||||
@@ -1324,6 +1332,9 @@ _restic_snapshots()
|
||||
flags+=("--compact")
|
||||
flags+=("-c")
|
||||
local_nonpersistent_flags+=("--compact")
|
||||
flags+=("--group-by=")
|
||||
two_word_flags+=("-g")
|
||||
local_nonpersistent_flags+=("--group-by=")
|
||||
flags+=("--help")
|
||||
flags+=("-h")
|
||||
local_nonpersistent_flags+=("--help")
|
||||
|
||||
@@ -26,7 +26,8 @@ given as the arguments.
|
||||
|
||||
.PP
|
||||
\fB\-\-exclude\-caches\fP[=false]
|
||||
excludes cache directories that are marked with a CACHEDIR.TAG file
|
||||
excludes cache directories that are marked with a CACHEDIR.TAG file. See
|
||||
\[la]http://bford.info/cachedir/spec.html\[ra] for the Cache Directory Tagging Standard
|
||||
|
||||
.PP
|
||||
\fB\-\-exclude\-file\fP=[]
|
||||
@@ -52,6 +53,14 @@ given as the arguments.
|
||||
\fB\-H\fP, \fB\-\-host\fP=""
|
||||
set the \fB\fChostname\fR for the snapshot manually. To prevent an expensive rescan use the "parent" flag
|
||||
|
||||
.PP
|
||||
\fB\-\-iexclude\fP=[]
|
||||
same as \fB\fC\-\-exclude\fR but ignores the casing of filenames
|
||||
|
||||
.PP
|
||||
\fB\-\-ignore\-inode\fP[=false]
|
||||
ignore inode number changes when checking for modified files
|
||||
|
||||
.PP
|
||||
\fB\-x\fP, \fB\-\-one\-file\-system\fP[=false]
|
||||
exclude other file systems
|
||||
|
||||
@@ -59,7 +59,7 @@ It can also be used to search for restic blobs or trees for troubleshooting.
|
||||
|
||||
.PP
|
||||
\fB\-\-show\-pack\-id\fP[=false]
|
||||
display the pack\-ID the blobs belong to (with \-\-blob)
|
||||
display the pack\-ID the blobs belong to (with \-\-blob or \-\-tree)
|
||||
|
||||
.PP
|
||||
\fB\-s\fP, \fB\-\-snapshot\fP=[]
|
||||
|
||||
@@ -15,7 +15,7 @@ restic\-generate \- Generate manual pages and auto\-completion files (bash, zsh)
|
||||
|
||||
.SH DESCRIPTION
|
||||
.PP
|
||||
The "generate" command writes automatically generated files like the man pages
|
||||
The "generate" command writes automatically generated files (like the man pages
|
||||
and the auto\-completion files for bash and zsh).
|
||||
|
||||
|
||||
|
||||
@@ -36,6 +36,14 @@ repository.
|
||||
\fB\-H\fP, \fB\-\-host\fP=""
|
||||
only consider snapshots for this host when the snapshot ID is "latest"
|
||||
|
||||
.PP
|
||||
\fB\-\-iexclude\fP=[]
|
||||
same as \fB\fC\-\-exclude\fR but ignores the casing of filenames
|
||||
|
||||
.PP
|
||||
\fB\-\-iinclude\fP=[]
|
||||
same as \fB\fC\-\-include\fR but ignores the casing of filenames
|
||||
|
||||
.PP
|
||||
\fB\-i\fP, \fB\-\-include\fP=[]
|
||||
include a \fB\fCpattern\fR, exclude everything else (can be specified multiple times)
|
||||
|
||||
@@ -23,6 +23,10 @@ The "snapshots" command lists all snapshots stored in the repository.
|
||||
\fB\-c\fP, \fB\-\-compact\fP[=false]
|
||||
use compact format
|
||||
|
||||
.PP
|
||||
\fB\-g\fP, \fB\-\-group\-by\fP=""
|
||||
string for grouping snapshots by host,paths,tags
|
||||
|
||||
.PP
|
||||
\fB\-h\fP, \fB\-\-help\fP[=false]
|
||||
help for snapshots
|
||||
|
||||
@@ -40,11 +40,12 @@ raw\-data: Counts the size of blobs in the repository, regardless of
|
||||
how many files reference them.
|
||||
.IP \(bu 2
|
||||
blobs\-per\-file: A combination of files\-by\-contents and raw\-data.
|
||||
.IP \(bu 2
|
||||
Refer to the online manual for more details about each mode.
|
||||
|
||||
.RE
|
||||
|
||||
.PP
|
||||
Refer to the online manual for more details about each mode.
|
||||
|
||||
|
||||
.SH OPTIONS
|
||||
.PP
|
||||
|
||||
@@ -78,7 +78,7 @@ command:
|
||||
|
||||
Flags:
|
||||
-e, --exclude pattern exclude a pattern (can be specified multiple times)
|
||||
--exclude-caches excludes cache directories that are marked with a CACHEDIR.TAG file
|
||||
--exclude-caches excludes cache directories that are marked with a CACHEDIR.TAG file. See http://bford.info/cachedir/spec.html for the Cache Directory Tagging Standard
|
||||
--exclude-file file read exclude patterns from a file (can be specified multiple times)
|
||||
--exclude-if-present stringArray takes filename[:header], exclude contents of directories containing filename (except filename itself) if header of that file is as provided (can be specified multiple times)
|
||||
--files-from string read the files to backup from file (can be combined with file args/can be specified multiple times)
|
||||
@@ -376,9 +376,10 @@ OS-specific cache folder:
|
||||
* macOS: ``~/Library/Caches/restic``
|
||||
* Windows: ``%LOCALAPPDATA%/restic``
|
||||
|
||||
The command line parameter ``--cache-dir`` can each be used to override the
|
||||
default cache location. The parameter ``--no-cache`` disables the cache
|
||||
entirely. In this case, all data is loaded from the repo.
|
||||
The command line parameter ``--cache-dir`` or the environment variable
|
||||
``$RESTIC_CACHE_DIR`` can be used to override the default cache location. The
|
||||
parameter ``--no-cache`` disables the cache entirely. In this case, all data
|
||||
is loaded from the repo.
|
||||
|
||||
The cache is ephemeral: When a file cannot be read from the cache, it is loaded
|
||||
from the repository.
|
||||
|
||||
72
go.mod
72
go.mod
@@ -2,54 +2,50 @@ module github.com/restic/restic
|
||||
|
||||
require (
|
||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669
|
||||
cloud.google.com/go v0.27.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go v20.1.0+incompatible
|
||||
github.com/Azure/go-autorest v10.15.3+incompatible // indirect
|
||||
github.com/cenkalti/backoff v2.0.0+incompatible
|
||||
github.com/cpuguy83/go-md2man v1.0.8 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
|
||||
github.com/dnaeon/go-vcr v0.0.0-20180814043457-aafff18a5cc2 // indirect
|
||||
cloud.google.com/go v0.37.4 // indirect
|
||||
github.com/Azure/azure-sdk-for-go v27.3.0+incompatible
|
||||
github.com/Azure/go-autorest/autorest v0.9.2 // indirect
|
||||
github.com/cenkalti/backoff v2.1.1+incompatible
|
||||
github.com/cpuguy83/go-md2man v1.0.10 // indirect
|
||||
github.com/dnaeon/go-vcr v1.0.1 // indirect
|
||||
github.com/elithrar/simple-scrypt v1.3.0
|
||||
github.com/go-ini/ini v1.38.2 // indirect
|
||||
github.com/golang/protobuf v1.2.0 // indirect
|
||||
github.com/golang/protobuf v1.3.1 // indirect
|
||||
github.com/google/go-cmp v0.2.0
|
||||
github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.0
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190411002643-bd77b112433e // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.1 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jtolds/gls v4.2.1+incompatible // indirect
|
||||
github.com/juju/ratelimit v1.0.1
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
github.com/kurin/blazer v0.5.1
|
||||
github.com/kurin/blazer v0.5.3
|
||||
github.com/marstr/guid v1.1.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.4
|
||||
github.com/minio/minio-go v6.0.7+incompatible
|
||||
github.com/mitchellh/go-homedir v1.0.0 // indirect
|
||||
github.com/ncw/swift v1.0.41
|
||||
github.com/pkg/errors v0.8.0
|
||||
github.com/pkg/profile v1.2.1
|
||||
github.com/pkg/sftp v1.8.2
|
||||
github.com/pkg/xattr v0.3.1
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.7
|
||||
github.com/minio/minio-go/v6 v6.0.43
|
||||
github.com/ncw/swift v1.0.47
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/pkg/profile v1.3.0
|
||||
github.com/pkg/sftp v1.10.0
|
||||
github.com/pkg/xattr v0.4.1
|
||||
github.com/restic/chunker v0.2.0
|
||||
github.com/russross/blackfriday v1.5.1 // indirect
|
||||
github.com/satori/go.uuid v1.2.0 // indirect
|
||||
github.com/smartystreets/assertions v0.0.0-20180820201707-7c9eb446e3cf // indirect
|
||||
github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a // indirect
|
||||
github.com/smartystreets/assertions v0.0.0-20190401211740-f487f9de1cd3 // indirect
|
||||
github.com/spf13/cobra v0.0.3
|
||||
github.com/spf13/pflag v1.0.2
|
||||
github.com/stretchr/testify v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f
|
||||
golang.org/x/sys v0.0.0-20180907202204-917fdcba135d
|
||||
golang.org/x/text v0.3.0
|
||||
google.golang.org/api v0.0.0-20180907210053-b609d5e6b7ab
|
||||
google.golang.org/appengine v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.3
|
||||
github.com/stretchr/testify v1.3.0 // indirect
|
||||
go.opencensus.io v0.20.2 // indirect
|
||||
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2
|
||||
google.golang.org/api v0.3.2
|
||||
google.golang.org/appengine v1.5.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7 // indirect
|
||||
google.golang.org/grpc v1.20.1 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||
gopkg.in/ini.v1 v1.38.2 // indirect
|
||||
gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637
|
||||
gopkg.in/yaml.v2 v2.2.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.2 // indirect
|
||||
)
|
||||
|
||||
go 1.13
|
||||
|
||||
248
go.sum
248
go.sum
@@ -1,108 +1,252 @@
|
||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669 h1:FNCRpXiquG1aoyqcIWVFmpTSKVcx2bQD38uZZeGtdlw=
|
||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
|
||||
cloud.google.com/go v0.27.0 h1:Xa8ZWro6QYKOwDKtxfKsiE0ea2jD39nx32RxtF5RjYE=
|
||||
cloud.google.com/go v0.27.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/Azure/azure-sdk-for-go v20.1.0+incompatible h1:b8OWFQuH5MPi2LYyAR2Ga+7KVH9ipwiSSSMga04/Urc=
|
||||
github.com/Azure/azure-sdk-for-go v20.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/go-autorest v10.15.3+incompatible h1:nhKI/bvazIs3C3TFGoSqKY6hZ8f5od5mb5/UcS6HVIY=
|
||||
github.com/Azure/go-autorest v10.15.3+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/cenkalti/backoff v2.0.0+incompatible h1:5IIPUHhlnUZbcHQsQou5k1Tn58nJkeJL9U+ig5CHJbY=
|
||||
github.com/cenkalti/backoff v2.0.0+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/cpuguy83/go-md2man v1.0.8 h1:DwoNytLphI8hzS2Af4D0dfaEaiSq2bN05mEm4R6vf8M=
|
||||
github.com/cpuguy83/go-md2man v1.0.8/go.mod h1:N6JayAiVKtlHSnuTCeuLSQVs75hb8q+dYQLjr7cDsKY=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU=
|
||||
cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
|
||||
github.com/Azure/azure-sdk-for-go v27.3.0+incompatible h1:i+ROfG3CsZUPoVAnhK06T3R6PmBzKB9ds+lHBpN7Mzo=
|
||||
github.com/Azure/azure-sdk-for-go v27.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/go-autorest/autorest v0.9.2 h1:6AWuh3uWrsZJcNoCHrCF/+g4aKPCU39kaMO6/qrnK/4=
|
||||
github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
|
||||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY=
|
||||
github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dnaeon/go-vcr v0.0.0-20180814043457-aafff18a5cc2 h1:G9/PqfhOrt8JXnw0DGTfVoOkKHDhOlEZqhE/cu+NvQM=
|
||||
github.com/dnaeon/go-vcr v0.0.0-20180814043457-aafff18a5cc2/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
|
||||
github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=
|
||||
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/elithrar/simple-scrypt v1.3.0 h1:KIlOlxdoQf9JWKl5lMAJ28SY2URB0XTRDn2TckyzAZg=
|
||||
github.com/elithrar/simple-scrypt v1.3.0/go.mod h1:U2XQRI95XHY0St410VE3UjT7vuKb1qPwrl/EJwEqnZo=
|
||||
github.com/go-ini/ini v1.38.2 h1:6Hl/z3p3iFkA0dlDfzYxuFuUGD+kaweypF6btsR2/Q4=
|
||||
github.com/go-ini/ini v1.38.2/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c h1:16eHWuMGvCjSfgRJKqIzapE78onvvTbdi1rMkU00lZw=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190411002643-bd77b112433e h1:XWcjeEtTFTOVA9Fs1w7n2XBftk5ib4oZrhzWk0B+3eA=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190411002643-bd77b112433e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE=
|
||||
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/juju/ratelimit v1.0.1 h1:+7AIFJVQ0EQgq/K9+0Krm7m530Du7tIz0METWzN0RgY=
|
||||
github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kurin/blazer v0.5.1 h1:mBc4i1uhHJEqU0KvzOgpMHhkwf+EcXvxjWEUS7HG+eY=
|
||||
github.com/kurin/blazer v0.5.1/go.mod h1:4FCXMUWo9DllR2Do4TtBd377ezyAJ51vB5uTBjt0pGU=
|
||||
github.com/kurin/blazer v0.5.3 h1:SAgYv0TKU0kN/ETfO5ExjNAPyMt2FocO2s/UlCHfjAk=
|
||||
github.com/kurin/blazer v0.5.3/go.mod h1:4FCXMUWo9DllR2Do4TtBd377ezyAJ51vB5uTBjt0pGU=
|
||||
github.com/marstr/guid v1.1.0 h1:/M4H/1G4avsieL6BbUwCOBzulmoeKVP5ux/3mQNnbyI=
|
||||
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
|
||||
github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/minio/minio-go v6.0.7+incompatible h1:nWABqotkiT/3aLgFnG30doQiwFkDMM9xnGGQnS+Ao6M=
|
||||
github.com/minio/minio-go v6.0.7+incompatible/go.mod h1:7guKYtitv8dktvNUGrhzmNlA5wrAABTQXCoesZdFQO8=
|
||||
github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/ncw/swift v1.0.41 h1:kfoTVQKt1A4n0m1Q3YWku9OoXfpo06biqVfi73yseBs=
|
||||
github.com/ncw/swift v1.0.41/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
|
||||
github.com/mattn/go-isatty v0.0.7 h1:UvyT9uN+3r7yLEYSlJsbQGdsaB/a0DlgWP3pql6iwOc=
|
||||
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/minio/minio-go/v6 v6.0.43 h1:D7c6Kx0ZB5U8EXJ6SQVOqPzapaLK/qpxQIktCnPHp/o=
|
||||
github.com/minio/minio-go/v6 v6.0.43/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg=
|
||||
github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU=
|
||||
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/ncw/swift v1.0.47 h1:4DQRPj35Y41WogBxyhOXlrI37nzGlyEcsforeudyYPQ=
|
||||
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.2.1 h1:F++O52m40owAmADcojzM+9gyjmMOY/T4oYJkgFDH8RE=
|
||||
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
|
||||
github.com/pkg/sftp v1.8.2 h1:3upwlsK5/USEeM5gzIe9eWdzU4sV+kG3gKKg3RLBuWE=
|
||||
github.com/pkg/sftp v1.8.2/go.mod h1:NxmoDg/QLVWluQDUYG7XBZTLUpKeFa8e3aMf1BfjyHk=
|
||||
github.com/pkg/xattr v0.3.1 h1:6ceg5jxT3cH4lM5n8S2PmiNeOv61MK08yvvYJwyrPH0=
|
||||
github.com/pkg/xattr v0.3.1/go.mod h1:CBdxFOf0VLbaj6HKuP2ITOVV7NY6ycPKgIgnSx2ZNVs=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.3.0 h1:OQIvuDgm00gWVWGTf4m4mCt6W1/0YqU7Ntg0mySWgaI=
|
||||
github.com/pkg/profile v1.3.0/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
|
||||
github.com/pkg/sftp v1.10.0 h1:DGA1KlA9esU6WcicH+P8PxFZOl15O6GYtab1cIJdOlE=
|
||||
github.com/pkg/sftp v1.10.0/go.mod h1:NxmoDg/QLVWluQDUYG7XBZTLUpKeFa8e3aMf1BfjyHk=
|
||||
github.com/pkg/xattr v0.4.1 h1:dhclzL6EqOXNaPDWqoeb9tIxATfBSmjqL0b4DpSjwRw=
|
||||
github.com/pkg/xattr v0.4.1/go.mod h1:W2cGD0TBEus7MkUgv0tNZ9JutLtVO3cXu+IBRuHqnFs=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/restic/chunker v0.2.0 h1:GjvmvFuv2mx0iekZs+iAlrioo2UtgsGSSplvoXaVHDU=
|
||||
github.com/restic/chunker v0.2.0/go.mod h1:VdjruEj+7BU1ZZTW8Qqi1exxRx2Omf2JH0NsUEkQ29s=
|
||||
github.com/russross/blackfriday v1.5.1 h1:B8ZN6pD4PVofmlDCDUdELeYrbsVIDM/bpjW3v3zgcRc=
|
||||
github.com/russross/blackfriday v1.5.1/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/smartystreets/assertions v0.0.0-20180820201707-7c9eb446e3cf h1:6V1qxN6Usn4jy8unvggSJz/NC790tefw8Zdy6OZS5co=
|
||||
github.com/smartystreets/assertions v0.0.0-20180820201707-7c9eb446e3cf/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a h1:JSvGDIbmil4Ui/dDdFBExb7/cmkNjyX5F97oglmvCDo=
|
||||
github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v0.0.0-20190401211740-f487f9de1cd3 h1:hBSHahWMEgzwRyS6dRpxY0XyjZsHyQ61s084wo5PJe0=
|
||||
github.com/smartystreets/assertions v0.0.0-20190401211740-f487f9de1cd3/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/pflag v1.0.2 h1:Fy0orTDgHdbnzHcsOgfCN4LtHf0ec3wwtiwJqwvf3Gc=
|
||||
github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk=
|
||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f h1:R423Cnkcp5JABoeemiGEPlt9tHXFfw5kvc0yqlxRPWo=
|
||||
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA=
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180525142821-c11f84a56e43/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180907202204-917fdcba135d h1:kWn1hlsqeUrk6JsLJO0ZFyz9bMg8u85voZlIuc68ZU4=
|
||||
golang.org/x/sys v0.0.0-20180907202204-917fdcba135d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181021155630-eda9bb28ed51/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
google.golang.org/api v0.0.0-20180907210053-b609d5e6b7ab h1:qNpJa8m9WofZ7RLj+7o15Ppapwm30+RweyIDSNpw8ps=
|
||||
google.golang.org/api v0.0.0-20180907210053-b609d5e6b7ab/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/api v0.3.2 h1:iTp+3yyl/KOtxa/d1/JUE0GGSoR6FuW5udver22iwpw=
|
||||
google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7 h1:ZUjXAXmrAyrmmCPHgCA/vChHcpsX27MZ3yBonD/z1KE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/ini.v1 v1.38.2 h1:dGcbywv4RufeGeiMycPT/plKB5FtmLKLnWKwBiLhUA4=
|
||||
gopkg.in/ini.v1 v1.38.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk=
|
||||
gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 h1:yiW+nvdHb9LVqSHQBXfZCieqV4fzYhNBql77zY0ykqs=
|
||||
gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk=
|
||||
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
||||
@@ -56,16 +56,6 @@ func verbose(f string, args ...interface{}) {
|
||||
fmt.Printf(f, args...)
|
||||
}
|
||||
|
||||
func run(cmd string, args ...string) {
|
||||
c := exec.Command(cmd, args...)
|
||||
c.Stdout = os.Stdout
|
||||
c.Stderr = os.Stderr
|
||||
err := c.Run()
|
||||
if err != nil {
|
||||
die("error running %s %s: %v", cmd, args, err)
|
||||
}
|
||||
}
|
||||
|
||||
func rm(file string) {
|
||||
err := os.Remove(file)
|
||||
|
||||
@@ -78,13 +68,6 @@ func rm(file string) {
|
||||
}
|
||||
}
|
||||
|
||||
func rmdir(dir string) {
|
||||
err := os.RemoveAll(dir)
|
||||
if err != nil {
|
||||
die("error removing %v: %v", dir, err)
|
||||
}
|
||||
}
|
||||
|
||||
func mkdir(dir string) {
|
||||
err := os.MkdirAll(dir, 0755)
|
||||
if err != nil {
|
||||
@@ -92,14 +75,6 @@ func mkdir(dir string) {
|
||||
}
|
||||
}
|
||||
|
||||
func getwd() string {
|
||||
pwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
die("Getwd(): %v", err)
|
||||
}
|
||||
return pwd
|
||||
}
|
||||
|
||||
func abs(dir string) string {
|
||||
absDir, err := filepath.Abs(dir)
|
||||
if err != nil {
|
||||
|
||||
@@ -78,7 +78,8 @@ type Archiver struct {
|
||||
// WithAtime configures if the access time for files and directories should
|
||||
// be saved. Enabling it may result in much metadata, so it's off by
|
||||
// default.
|
||||
WithAtime bool
|
||||
WithAtime bool
|
||||
IgnoreInode bool
|
||||
}
|
||||
|
||||
// Options is used to configure the archiver.
|
||||
@@ -133,6 +134,7 @@ func New(repo restic.Repository, fs fs.FS, opts Options) *Archiver {
|
||||
CompleteItem: func(string, *restic.Node, *restic.Node, ItemStats, time.Duration) {},
|
||||
StartFile: func(string) {},
|
||||
CompleteBlob: func(string, uint64) {},
|
||||
IgnoreInode: false,
|
||||
}
|
||||
|
||||
return arch
|
||||
@@ -382,12 +384,19 @@ func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous
|
||||
return FutureNode{}, true, nil
|
||||
}
|
||||
|
||||
// use previous node if the file hasn't changed
|
||||
if previous != nil && !fileChanged(fi, previous) {
|
||||
debug.Log("%v hasn't changed, returning old node", target)
|
||||
// use previous list of blobs if the file hasn't changed
|
||||
if previous != nil && !fileChanged(fi, previous, arch.IgnoreInode) {
|
||||
debug.Log("%v hasn't changed, using old list of blobs", target)
|
||||
arch.CompleteItem(snPath, previous, previous, ItemStats{}, time.Since(start))
|
||||
arch.CompleteBlob(snPath, previous.Size)
|
||||
fn.node = previous
|
||||
fn.node, err = arch.nodeFromFileInfo(target, fi)
|
||||
if err != nil {
|
||||
return FutureNode{}, false, err
|
||||
}
|
||||
|
||||
// copy list of blobs
|
||||
fn.node.Content = previous.Content
|
||||
|
||||
_ = file.Close()
|
||||
return fn, false, nil
|
||||
}
|
||||
@@ -436,7 +445,7 @@ func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous
|
||||
|
||||
// fileChanged returns true if the file's content has changed since the node
|
||||
// was created.
|
||||
func fileChanged(fi os.FileInfo, node *restic.Node) bool {
|
||||
func fileChanged(fi os.FileInfo, node *restic.Node, ignoreInode bool) bool {
|
||||
if node == nil {
|
||||
return true
|
||||
}
|
||||
@@ -451,14 +460,19 @@ func fileChanged(fi os.FileInfo, node *restic.Node) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// check size
|
||||
// check status change timestamp
|
||||
extFI := fs.ExtendedStat(fi)
|
||||
if !ignoreInode && !extFI.ChangeTime.Equal(node.ChangeTime) {
|
||||
return true
|
||||
}
|
||||
|
||||
// check size
|
||||
if uint64(fi.Size()) != node.Size || uint64(extFI.Size) != node.Size {
|
||||
return true
|
||||
}
|
||||
|
||||
// check inode
|
||||
if node.Inode != extFI.Inode {
|
||||
if !ignoreInode && node.Inode != extFI.Inode {
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/restic/restic/internal/checker"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/fs"
|
||||
@@ -124,9 +126,9 @@ func saveFile(t testing.TB, repo restic.Repository, filename string, filesystem
|
||||
|
||||
func TestArchiverSaveFile(t *testing.T) {
|
||||
var tests = []TestFile{
|
||||
TestFile{Content: ""},
|
||||
TestFile{Content: "foo"},
|
||||
TestFile{Content: string(restictest.Random(23, 12*1024*1024+1287898))},
|
||||
{Content: ""},
|
||||
{Content: "foo"},
|
||||
{Content: string(restictest.Random(23, 12*1024*1024+1287898))},
|
||||
}
|
||||
|
||||
for _, testfile := range tests {
|
||||
@@ -160,7 +162,6 @@ func TestArchiverSaveFileReaderFS(t *testing.T) {
|
||||
var tests = []struct {
|
||||
Data string
|
||||
}{
|
||||
{Data: ""},
|
||||
{Data: "foo"},
|
||||
{Data: string(restictest.Random(23, 12*1024*1024+1287898))},
|
||||
}
|
||||
@@ -203,9 +204,9 @@ func TestArchiverSaveFileReaderFS(t *testing.T) {
|
||||
|
||||
func TestArchiverSave(t *testing.T) {
|
||||
var tests = []TestFile{
|
||||
TestFile{Content: ""},
|
||||
TestFile{Content: "foo"},
|
||||
TestFile{Content: string(restictest.Random(23, 12*1024*1024+1287898))},
|
||||
{Content: ""},
|
||||
{Content: "foo"},
|
||||
{Content: string(restictest.Random(23, 12*1024*1024+1287898))},
|
||||
}
|
||||
|
||||
for _, testfile := range tests {
|
||||
@@ -271,7 +272,6 @@ func TestArchiverSaveReaderFS(t *testing.T) {
|
||||
var tests = []struct {
|
||||
Data string
|
||||
}{
|
||||
{Data: ""},
|
||||
{Data: "foo"},
|
||||
{Data: string(restictest.Random(23, 12*1024*1024+1287898))},
|
||||
}
|
||||
@@ -557,9 +557,12 @@ func TestFileChanged(t *testing.T) {
|
||||
}
|
||||
|
||||
var tests = []struct {
|
||||
Name string
|
||||
Content []byte
|
||||
Modify func(t testing.TB, filename string)
|
||||
Name string
|
||||
SkipForWindows bool
|
||||
Content []byte
|
||||
Modify func(t testing.TB, filename string)
|
||||
IgnoreInode bool
|
||||
SameFile bool
|
||||
}{
|
||||
{
|
||||
Name: "same-content-new-file",
|
||||
@@ -576,6 +579,23 @@ func TestFileChanged(t *testing.T) {
|
||||
save(t, filename, defaultContent)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "new-content-same-timestamp",
|
||||
// on Windows, there's no "create time" field users cannot modify,
|
||||
// so we're unable to detect if a file has been modified when the
|
||||
// timestamps are reset, so we skip this test for Windows
|
||||
SkipForWindows: true,
|
||||
Modify: func(t testing.TB, filename string) {
|
||||
fi, err := os.Stat(filename)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
extFI := fs.ExtendedStat(fi)
|
||||
save(t, filename, bytes.ToUpper(defaultContent))
|
||||
sleep()
|
||||
setTimestamp(t, filename, extFI.AccessTime, extFI.ModTime)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "other-content",
|
||||
Modify: func(t testing.TB, filename string) {
|
||||
@@ -598,10 +618,26 @@ func TestFileChanged(t *testing.T) {
|
||||
save(t, filename, defaultContent)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "ignore-inode",
|
||||
Modify: func(t testing.TB, filename string) {
|
||||
fi := lstat(t, filename)
|
||||
remove(t, filename)
|
||||
sleep()
|
||||
save(t, filename, defaultContent)
|
||||
setTimestamp(t, filename, fi.ModTime(), fi.ModTime())
|
||||
},
|
||||
IgnoreInode: true,
|
||||
SameFile: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
if runtime.GOOS == "windows" && test.SkipForWindows {
|
||||
t.Skip("don't run test on Windows")
|
||||
}
|
||||
|
||||
tempdir, cleanup := restictest.TempDir(t)
|
||||
defer cleanup()
|
||||
|
||||
@@ -615,15 +651,24 @@ func TestFileChanged(t *testing.T) {
|
||||
fiBefore := lstat(t, filename)
|
||||
node := nodeFromFI(t, filename, fiBefore)
|
||||
|
||||
if fileChanged(fiBefore, node) {
|
||||
if fileChanged(fiBefore, node, false) {
|
||||
t.Fatalf("unchanged file detected as changed")
|
||||
}
|
||||
|
||||
test.Modify(t, filename)
|
||||
|
||||
fiAfter := lstat(t, filename)
|
||||
if !fileChanged(fiAfter, node) {
|
||||
t.Fatalf("modified file detected as unchanged")
|
||||
|
||||
if test.SameFile {
|
||||
// file should be detected as unchanged
|
||||
if fileChanged(fiAfter, node, test.IgnoreInode) {
|
||||
t.Fatalf("unmodified file detected as changed")
|
||||
}
|
||||
} else {
|
||||
// file should be detected as changed
|
||||
if !fileChanged(fiAfter, node, test.IgnoreInode) && !test.SameFile {
|
||||
t.Fatalf("modified file detected as unchanged")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -639,7 +684,7 @@ func TestFilChangedSpecialCases(t *testing.T) {
|
||||
|
||||
t.Run("nil-node", func(t *testing.T) {
|
||||
fi := lstat(t, filename)
|
||||
if !fileChanged(fi, nil) {
|
||||
if !fileChanged(fi, nil, false) {
|
||||
t.Fatal("nil node detected as unchanged")
|
||||
}
|
||||
})
|
||||
@@ -648,7 +693,7 @@ func TestFilChangedSpecialCases(t *testing.T) {
|
||||
fi := lstat(t, filename)
|
||||
node := nodeFromFI(t, filename, fi)
|
||||
node.Type = "symlink"
|
||||
if !fileChanged(fi, node) {
|
||||
if !fileChanged(fi, node, false) {
|
||||
t.Fatal("node with changed type detected as unchanged")
|
||||
}
|
||||
})
|
||||
@@ -1900,3 +1945,154 @@ func TestArchiverAbortEarlyOnError(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func snapshot(t testing.TB, repo restic.Repository, fs fs.FS, parent restic.ID, filename string) (restic.ID, *restic.Node) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
arch := New(repo, fs, Options{})
|
||||
|
||||
sopts := SnapshotOptions{
|
||||
Time: time.Now(),
|
||||
ParentSnapshot: parent,
|
||||
}
|
||||
snapshot, snapshotID, err := arch.Snapshot(ctx, []string{filename}, sopts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tree, err := repo.LoadTree(ctx, *snapshot.Tree)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
node := tree.Find(filename)
|
||||
if node == nil {
|
||||
t.Fatalf("unable to find node for testfile in snapshot")
|
||||
}
|
||||
|
||||
return snapshotID, node
|
||||
}
|
||||
|
||||
func chmod(t testing.TB, filename string, mode os.FileMode) {
|
||||
err := os.Chmod(filename, mode)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// StatFS allows overwriting what is returned by the Lstat function.
|
||||
type StatFS struct {
|
||||
fs.FS
|
||||
|
||||
OverrideLstat map[string]os.FileInfo
|
||||
}
|
||||
|
||||
func (fs *StatFS) Lstat(name string) (os.FileInfo, error) {
|
||||
if fi, ok := fs.OverrideLstat[name]; ok {
|
||||
return fi, nil
|
||||
}
|
||||
|
||||
return fs.FS.Lstat(name)
|
||||
}
|
||||
|
||||
func (fs *StatFS) OpenFile(name string, flags int, perm os.FileMode) (fs.File, error) {
|
||||
if fi, ok := fs.OverrideLstat[name]; ok {
|
||||
f, err := fs.FS.OpenFile(name, flags, perm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
wrappedFile := fileStat{
|
||||
File: f,
|
||||
fi: fi,
|
||||
}
|
||||
return wrappedFile, nil
|
||||
}
|
||||
|
||||
return fs.FS.OpenFile(name, flags, perm)
|
||||
}
|
||||
|
||||
type fileStat struct {
|
||||
fs.File
|
||||
fi os.FileInfo
|
||||
}
|
||||
|
||||
func (f fileStat) Stat() (os.FileInfo, error) {
|
||||
return f.fi, nil
|
||||
}
|
||||
|
||||
// used by wrapFileInfo, use untyped const in order to avoid having a version
|
||||
// of wrapFileInfo for each OS
|
||||
const (
|
||||
mockFileInfoMode = 0400
|
||||
mockFileInfoUID = 51234
|
||||
mockFileInfoGID = 51235
|
||||
)
|
||||
|
||||
func TestMetadataChanged(t *testing.T) {
|
||||
files := TestDir{
|
||||
"testfile": TestFile{
|
||||
Content: "foo bar test file",
|
||||
},
|
||||
}
|
||||
|
||||
tempdir, repo, cleanup := prepareTempdirRepoSrc(t, files)
|
||||
defer cleanup()
|
||||
|
||||
back := fs.TestChdir(t, tempdir)
|
||||
defer back()
|
||||
|
||||
// get metadata
|
||||
fi := lstat(t, "testfile")
|
||||
want, err := restic.NodeFromFileInfo("testfile", fi)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fs := &StatFS{
|
||||
FS: fs.Local{},
|
||||
OverrideLstat: map[string]os.FileInfo{
|
||||
"testfile": fi,
|
||||
},
|
||||
}
|
||||
|
||||
snapshotID, node2 := snapshot(t, repo, fs, restic.ID{}, "testfile")
|
||||
|
||||
// set some values so we can then compare the nodes
|
||||
want.Content = node2.Content
|
||||
want.Path = ""
|
||||
want.ExtendedAttributes = nil
|
||||
|
||||
// make sure that metadata was recorded successfully
|
||||
if !cmp.Equal(want, node2) {
|
||||
t.Fatalf("metadata does not match:\n%v", cmp.Diff(want, node2))
|
||||
}
|
||||
|
||||
// modify the mode by wrapping it in a new struct, uses the consts defined above
|
||||
fs.OverrideLstat["testfile"] = wrapFileInfo(t, fi)
|
||||
|
||||
// set the override values in the 'want' node which
|
||||
want.Mode = 0400
|
||||
// ignore UID and GID on Windows
|
||||
if runtime.GOOS != "windows" {
|
||||
want.UID = 51234
|
||||
want.GID = 51235
|
||||
}
|
||||
// no user and group name
|
||||
want.User = ""
|
||||
want.Group = ""
|
||||
|
||||
// make another snapshot
|
||||
snapshotID, node3 := snapshot(t, repo, fs, snapshotID, "testfile")
|
||||
|
||||
// make sure that metadata was recorded successfully
|
||||
if !cmp.Equal(want, node3) {
|
||||
t.Fatalf("metadata does not match:\n%v", cmp.Diff(want, node3))
|
||||
}
|
||||
|
||||
// make sure the content matches
|
||||
TestEnsureFileContent(context.Background(), t, repo, "testfile", node3, files["testfile"].(TestFile))
|
||||
|
||||
checker.TestCheckRepo(t, repo)
|
||||
}
|
||||
|
||||
41
internal/archiver/archiver_unix_test.go
Normal file
41
internal/archiver/archiver_unix_test.go
Normal file
@@ -0,0 +1,41 @@
|
||||
// +build !windows
|
||||
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type wrappedFileInfo struct {
|
||||
os.FileInfo
|
||||
sys interface{}
|
||||
mode os.FileMode
|
||||
}
|
||||
|
||||
func (fi wrappedFileInfo) Sys() interface{} {
|
||||
return fi.sys
|
||||
}
|
||||
|
||||
func (fi wrappedFileInfo) Mode() os.FileMode {
|
||||
return fi.mode
|
||||
}
|
||||
|
||||
// wrapFileInfo returns a new os.FileInfo with the mode, owner, and group fields changed.
|
||||
func wrapFileInfo(t testing.TB, fi os.FileInfo) os.FileInfo {
|
||||
// get the underlying stat_t and modify the values
|
||||
stat := fi.Sys().(*syscall.Stat_t)
|
||||
stat.Mode = mockFileInfoMode
|
||||
stat.Uid = mockFileInfoUID
|
||||
stat.Gid = mockFileInfoGID
|
||||
|
||||
// wrap the os.FileInfo so we can return a modified stat_t
|
||||
res := wrappedFileInfo{
|
||||
FileInfo: fi,
|
||||
sys: stat,
|
||||
mode: mockFileInfoMode,
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
28
internal/archiver/archiver_windows_test.go
Normal file
28
internal/archiver/archiver_windows_test.go
Normal file
@@ -0,0 +1,28 @@
|
||||
// +build windows
|
||||
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type wrappedFileInfo struct {
|
||||
os.FileInfo
|
||||
mode os.FileMode
|
||||
}
|
||||
|
||||
func (fi wrappedFileInfo) Mode() os.FileMode {
|
||||
return fi.mode
|
||||
}
|
||||
|
||||
// wrapFileInfo returns a new os.FileInfo with the mode, owner, and group fields changed.
|
||||
func wrapFileInfo(t testing.TB, fi os.FileInfo) os.FileInfo {
|
||||
// wrap the os.FileInfo and return the modified mode, uid and gid are ignored on Windows
|
||||
res := wrappedFileInfo{
|
||||
FileInfo: fi,
|
||||
mode: mockFileInfoMode,
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
@@ -47,9 +47,7 @@ func ParseConfig(s string) (interface{}, error) {
|
||||
return nil, errors.New("azure: invalid format: bucket name or path not found")
|
||||
}
|
||||
container, path := data[0], path.Clean(data[1])
|
||||
if strings.HasPrefix(path, "/") {
|
||||
path = path[1:]
|
||||
}
|
||||
path = strings.TrimPrefix(path, "/")
|
||||
cfg := NewConfig()
|
||||
cfg.Container = container
|
||||
cfg.Prefix = path
|
||||
|
||||
@@ -49,9 +49,7 @@ func ParseConfig(s string) (interface{}, error) {
|
||||
|
||||
bucket, path := data[0], path.Clean(data[1])
|
||||
|
||||
if strings.HasPrefix(path, "/") {
|
||||
path = path[1:]
|
||||
}
|
||||
path = strings.TrimPrefix(path, "/")
|
||||
|
||||
cfg := NewConfig()
|
||||
cfg.Bucket = bucket
|
||||
|
||||
@@ -116,8 +116,8 @@ func TestDefaultLayout(t *testing.T) {
|
||||
want = append(want, filepath.Join(tempdir, "data", fmt.Sprintf("%02x", i)))
|
||||
}
|
||||
|
||||
sort.Sort(sort.StringSlice(want))
|
||||
sort.Sort(sort.StringSlice(dirs))
|
||||
sort.Strings(want)
|
||||
sort.Strings(dirs)
|
||||
|
||||
if !reflect.DeepEqual(dirs, want) {
|
||||
t.Fatalf("wrong paths returned, want:\n %v\ngot:\n %v", want, dirs)
|
||||
@@ -189,8 +189,8 @@ func TestRESTLayout(t *testing.T) {
|
||||
filepath.Join(path, "keys"),
|
||||
}
|
||||
|
||||
sort.Sort(sort.StringSlice(want))
|
||||
sort.Sort(sort.StringSlice(dirs))
|
||||
sort.Strings(want)
|
||||
sort.Strings(dirs)
|
||||
|
||||
if !reflect.DeepEqual(dirs, want) {
|
||||
t.Fatalf("wrong paths returned, want:\n %v\ngot:\n %v", want, dirs)
|
||||
@@ -335,8 +335,8 @@ func TestS3LegacyLayout(t *testing.T) {
|
||||
filepath.Join(path, "key"),
|
||||
}
|
||||
|
||||
sort.Sort(sort.StringSlice(want))
|
||||
sort.Sort(sort.StringSlice(dirs))
|
||||
sort.Strings(want)
|
||||
sort.Strings(dirs)
|
||||
|
||||
if !reflect.DeepEqual(dirs, want) {
|
||||
t.Fatalf("wrong paths returned, want:\n %v\ngot:\n %v", want, dirs)
|
||||
|
||||
@@ -25,25 +25,6 @@ var _ restic.Backend = &Local{}
|
||||
|
||||
const defaultLayout = "default"
|
||||
|
||||
// dirExists returns true if the name exists and is a directory.
|
||||
func dirExists(name string) bool {
|
||||
f, err := fs.Open(name)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if err = f.Close(); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return fi.IsDir()
|
||||
}
|
||||
|
||||
// Open opens the local backend as specified by config.
|
||||
func Open(cfg Config) (*Local, error) {
|
||||
debug.Log("open local backend at %v (layout %q)", cfg.Path, cfg.Layout)
|
||||
|
||||
@@ -127,8 +127,6 @@ func New(cfg Config, lim limiter.Limiter) (*Backend, error) {
|
||||
return nil, err
|
||||
}
|
||||
args = append(args, a...)
|
||||
} else {
|
||||
args = append(args, "rclone")
|
||||
}
|
||||
|
||||
// then add the arguments
|
||||
@@ -139,10 +137,6 @@ func New(cfg Config, lim limiter.Limiter) (*Backend, error) {
|
||||
}
|
||||
|
||||
args = append(args, a...)
|
||||
} else {
|
||||
args = append(args,
|
||||
"serve", "restic", "--stdio",
|
||||
"--b2-hard-delete", "--drive-use-trash=false")
|
||||
}
|
||||
|
||||
// finally, add the remote
|
||||
|
||||
@@ -15,15 +15,19 @@ type Config struct {
|
||||
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
|
||||
}
|
||||
|
||||
var defaultConfig = Config{
|
||||
Program: "rclone",
|
||||
Args: "serve restic --stdio --b2-hard-delete --drive-use-trash=false",
|
||||
Connections: 5,
|
||||
}
|
||||
|
||||
func init() {
|
||||
options.Register("rclone", Config{})
|
||||
}
|
||||
|
||||
// NewConfig returns a new Config with the default values filled in.
|
||||
func NewConfig() Config {
|
||||
return Config{
|
||||
Connections: 5,
|
||||
}
|
||||
return defaultConfig
|
||||
}
|
||||
|
||||
// ParseConfig parses the string s and extracts the remote server URL.
|
||||
|
||||
@@ -14,7 +14,9 @@ func TestParseConfig(t *testing.T) {
|
||||
"rclone:local:foo:/bar",
|
||||
Config{
|
||||
Remote: "local:foo:/bar",
|
||||
Connections: 5,
|
||||
Program: defaultConfig.Program,
|
||||
Args: defaultConfig.Args,
|
||||
Connections: defaultConfig.Connections,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -18,9 +18,11 @@ type Config struct {
|
||||
Bucket string
|
||||
Prefix string
|
||||
Layout string `option:"layout" help:"use this backend layout (default: auto-detect)"`
|
||||
StorageClass string `option:"storage-class" help:"set S3 storage class (STANDARD, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING or REDUCED_REDUNDANCY)"`
|
||||
|
||||
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
|
||||
MaxRetries uint `option:"retries" help:"set the number of retries attempted"`
|
||||
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
|
||||
MaxRetries uint `option:"retries" help:"set the number of retries attempted"`
|
||||
Region string `option:"region" help:"set region"`
|
||||
}
|
||||
|
||||
// NewConfig returns a new Config with the default values filled in.
|
||||
|
||||
@@ -14,8 +14,8 @@ import (
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
|
||||
"github.com/minio/minio-go"
|
||||
"github.com/minio/minio-go/pkg/credentials"
|
||||
"github.com/minio/minio-go/v6"
|
||||
"github.com/minio/minio-go/v6/pkg/credentials"
|
||||
|
||||
"github.com/restic/restic/internal/debug"
|
||||
)
|
||||
@@ -66,7 +66,7 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
|
||||
},
|
||||
},
|
||||
})
|
||||
client, err := minio.NewWithCredentials(cfg.Endpoint, creds, !cfg.UseHTTP, "")
|
||||
client, err := minio.NewWithCredentials(cfg.Endpoint, creds, !cfg.UseHTTP, cfg.Region)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "minio.NewWithCredentials")
|
||||
}
|
||||
@@ -231,22 +231,6 @@ func (be *Backend) Path() string {
|
||||
return be.cfg.Prefix
|
||||
}
|
||||
|
||||
// lenForFile returns the length of the file.
|
||||
func lenForFile(f *os.File) (int64, error) {
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "Stat")
|
||||
}
|
||||
|
||||
pos, err := f.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "Seek")
|
||||
}
|
||||
|
||||
size := fi.Size() - pos
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// Save stores data in the backend at the handle.
|
||||
func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
|
||||
debug.Log("Save %v", h)
|
||||
@@ -260,7 +244,7 @@ func (be *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindRe
|
||||
be.sem.GetToken()
|
||||
defer be.sem.ReleaseToken()
|
||||
|
||||
opts := minio.PutObjectOptions{}
|
||||
opts := minio.PutObjectOptions{StorageClass: be.cfg.StorageClass}
|
||||
opts.ContentType = "application/octet-stream"
|
||||
|
||||
debug.Log("PutObject(%v, %v, %v)", be.cfg.Bucket, objName, rd.Length())
|
||||
@@ -321,7 +305,7 @@ func (be *Backend) openReader(ctx context.Context, h restic.Handle, length int,
|
||||
|
||||
be.sem.GetToken()
|
||||
coreClient := minio.Core{Client: be.client}
|
||||
rd, err := coreClient.GetObjectWithContext(ctx, be.cfg.Bucket, objName, opts)
|
||||
rd, _, _, err := coreClient.GetObjectWithContext(ctx, be.cfg.Bucket, objName, opts)
|
||||
if err != nil {
|
||||
be.sem.ReleaseToken()
|
||||
return nil, err
|
||||
|
||||
@@ -23,6 +23,11 @@ type Config struct {
|
||||
StorageURL string
|
||||
AuthToken string
|
||||
|
||||
// auth v3 only
|
||||
ApplicationCredentialID string
|
||||
ApplicationCredentialName string
|
||||
ApplicationCredentialSecret string
|
||||
|
||||
Container string
|
||||
Prefix string
|
||||
DefaultContainerPolicy string
|
||||
@@ -96,6 +101,11 @@ func ApplyEnvironment(prefix string, cfg interface{}) error {
|
||||
{&c.UserName, prefix + "ST_USER"},
|
||||
{&c.APIKey, prefix + "ST_KEY"},
|
||||
|
||||
// Application Credential auth
|
||||
{&c.ApplicationCredentialID, prefix + "OS_APPLICATION_CREDENTIAL_ID"},
|
||||
{&c.ApplicationCredentialName, prefix + "OS_APPLICATION_CREDENTIAL_NAME"},
|
||||
{&c.ApplicationCredentialSecret, prefix + "OS_APPLICATION_CREDENTIAL_SECRET"},
|
||||
|
||||
// Manual authentication
|
||||
{&c.StorageURL, prefix + "OS_STORAGE_URL"},
|
||||
{&c.AuthToken, prefix + "OS_AUTH_TOKEN"},
|
||||
|
||||
@@ -17,8 +17,6 @@ import (
|
||||
"github.com/ncw/swift"
|
||||
)
|
||||
|
||||
const connLimit = 10
|
||||
|
||||
// beSwift is a backend which stores the data on a swift endpoint.
|
||||
type beSwift struct {
|
||||
conn *swift.Connection
|
||||
@@ -43,19 +41,22 @@ func Open(cfg Config, rt http.RoundTripper) (restic.Backend, error) {
|
||||
|
||||
be := &beSwift{
|
||||
conn: &swift.Connection{
|
||||
UserName: cfg.UserName,
|
||||
Domain: cfg.Domain,
|
||||
ApiKey: cfg.APIKey,
|
||||
AuthUrl: cfg.AuthURL,
|
||||
Region: cfg.Region,
|
||||
Tenant: cfg.Tenant,
|
||||
TenantId: cfg.TenantID,
|
||||
TenantDomain: cfg.TenantDomain,
|
||||
TrustId: cfg.TrustID,
|
||||
StorageUrl: cfg.StorageURL,
|
||||
AuthToken: cfg.AuthToken,
|
||||
ConnectTimeout: time.Minute,
|
||||
Timeout: time.Minute,
|
||||
UserName: cfg.UserName,
|
||||
Domain: cfg.Domain,
|
||||
ApiKey: cfg.APIKey,
|
||||
AuthUrl: cfg.AuthURL,
|
||||
Region: cfg.Region,
|
||||
Tenant: cfg.Tenant,
|
||||
TenantId: cfg.TenantID,
|
||||
TenantDomain: cfg.TenantDomain,
|
||||
TrustId: cfg.TrustID,
|
||||
StorageUrl: cfg.StorageURL,
|
||||
AuthToken: cfg.AuthToken,
|
||||
ApplicationCredentialId: cfg.ApplicationCredentialID,
|
||||
ApplicationCredentialName: cfg.ApplicationCredentialName,
|
||||
ApplicationCredentialSecret: cfg.ApplicationCredentialSecret,
|
||||
ConnectTimeout: time.Minute,
|
||||
Timeout: time.Minute,
|
||||
|
||||
Transport: rt,
|
||||
},
|
||||
|
||||
@@ -79,7 +79,7 @@ func (s *Suite) TestConfig(t *testing.T) {
|
||||
var testString = "Config"
|
||||
|
||||
// create config and read it back
|
||||
_, err := backend.LoadAll(context.TODO(), b, restic.Handle{Type: restic.ConfigFile})
|
||||
_, err := backend.LoadAll(context.TODO(), nil, b, restic.Handle{Type: restic.ConfigFile})
|
||||
if err == nil {
|
||||
t.Fatalf("did not get expected error for non-existing config")
|
||||
}
|
||||
@@ -93,7 +93,7 @@ func (s *Suite) TestConfig(t *testing.T) {
|
||||
// same config
|
||||
for _, name := range []string{"", "foo", "bar", "0000000000000000000000000000000000000000000000000000000000000000"} {
|
||||
h := restic.Handle{Type: restic.ConfigFile, Name: name}
|
||||
buf, err := backend.LoadAll(context.TODO(), b, h)
|
||||
buf, err := backend.LoadAll(context.TODO(), nil, b, h)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to read config with name %q: %+v", name, err)
|
||||
}
|
||||
@@ -491,7 +491,7 @@ func (s *Suite) TestSave(t *testing.T) {
|
||||
err := b.Save(context.TODO(), h, restic.NewByteReader(data))
|
||||
test.OK(t, err)
|
||||
|
||||
buf, err := backend.LoadAll(context.TODO(), b, h)
|
||||
buf, err := backend.LoadAll(context.TODO(), nil, b, h)
|
||||
test.OK(t, err)
|
||||
if len(buf) != len(data) {
|
||||
t.Fatalf("number of bytes does not match, want %v, got %v", len(data), len(buf))
|
||||
@@ -584,7 +584,7 @@ func (s *Suite) TestSaveFilenames(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
|
||||
buf, err := backend.LoadAll(context.TODO(), b, h)
|
||||
buf, err := backend.LoadAll(context.TODO(), nil, b, h)
|
||||
if err != nil {
|
||||
t.Errorf("test %d failed: Load() returned %+v", i, err)
|
||||
continue
|
||||
@@ -734,7 +734,7 @@ func (s *Suite) TestBackend(t *testing.T) {
|
||||
|
||||
// test Load()
|
||||
h := restic.Handle{Type: tpe, Name: ts.id}
|
||||
buf, err := backend.LoadAll(context.TODO(), b, h)
|
||||
buf, err := backend.LoadAll(context.TODO(), nil, b, h)
|
||||
test.OK(t, err)
|
||||
test.Equals(t, ts.data, string(buf))
|
||||
|
||||
|
||||
@@ -1,20 +1,33 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/restic/restic/internal/restic"
|
||||
)
|
||||
|
||||
// LoadAll reads all data stored in the backend for the handle.
|
||||
func LoadAll(ctx context.Context, be restic.Backend, h restic.Handle) (buf []byte, err error) {
|
||||
err = be.Load(ctx, h, 0, 0, func(rd io.Reader) (ierr error) {
|
||||
buf, ierr = ioutil.ReadAll(rd)
|
||||
return ierr
|
||||
// LoadAll reads all data stored in the backend for the handle into the given
|
||||
// buffer, which is truncated. If the buffer is not large enough or nil, a new
|
||||
// one is allocated.
|
||||
func LoadAll(ctx context.Context, buf []byte, be restic.Backend, h restic.Handle) ([]byte, error) {
|
||||
err := be.Load(ctx, h, 0, 0, func(rd io.Reader) error {
|
||||
// make sure this is idempotent, in case an error occurs this function may be called multiple times!
|
||||
wr := bytes.NewBuffer(buf[:0])
|
||||
_, cerr := io.Copy(wr, rd)
|
||||
if cerr != nil {
|
||||
return cerr
|
||||
}
|
||||
buf = wr.Bytes()
|
||||
return nil
|
||||
})
|
||||
return buf, err
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// LimitedReadCloser wraps io.LimitedReader and exposes the Close() method.
|
||||
|
||||
@@ -19,6 +19,7 @@ const MiB = 1 << 20
|
||||
|
||||
func TestLoadAll(t *testing.T) {
|
||||
b := mem.New()
|
||||
var buf []byte
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
data := rtest.Random(23+i, rand.Intn(MiB)+500*KiB)
|
||||
@@ -28,7 +29,7 @@ func TestLoadAll(t *testing.T) {
|
||||
err := b.Save(context.TODO(), h, restic.NewByteReader(data))
|
||||
rtest.OK(t, err)
|
||||
|
||||
buf, err := backend.LoadAll(context.TODO(), b, restic.Handle{Type: restic.DataFile, Name: id.String()})
|
||||
buf, err := backend.LoadAll(context.TODO(), buf, b, restic.Handle{Type: restic.DataFile, Name: id.String()})
|
||||
rtest.OK(t, err)
|
||||
|
||||
if len(buf) != len(data) {
|
||||
@@ -43,55 +44,66 @@ func TestLoadAll(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadSmallBuffer(t *testing.T) {
|
||||
b := mem.New()
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
data := rtest.Random(23+i, rand.Intn(MiB)+500*KiB)
|
||||
|
||||
id := restic.Hash(data)
|
||||
h := restic.Handle{Name: id.String(), Type: restic.DataFile}
|
||||
err := b.Save(context.TODO(), h, restic.NewByteReader(data))
|
||||
rtest.OK(t, err)
|
||||
|
||||
buf, err := backend.LoadAll(context.TODO(), b, restic.Handle{Type: restic.DataFile, Name: id.String()})
|
||||
rtest.OK(t, err)
|
||||
|
||||
if len(buf) != len(data) {
|
||||
t.Errorf("length of returned buffer does not match, want %d, got %d", len(data), len(buf))
|
||||
continue
|
||||
}
|
||||
|
||||
if !bytes.Equal(buf, data) {
|
||||
t.Errorf("wrong data returned")
|
||||
continue
|
||||
}
|
||||
func save(t testing.TB, be restic.Backend, buf []byte) restic.Handle {
|
||||
id := restic.Hash(buf)
|
||||
h := restic.Handle{Name: id.String(), Type: restic.DataFile}
|
||||
err := be.Save(context.TODO(), h, restic.NewByteReader(buf))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func TestLoadLargeBuffer(t *testing.T) {
|
||||
func TestLoadAllAppend(t *testing.T) {
|
||||
b := mem.New()
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
data := rtest.Random(23+i, rand.Intn(MiB)+500*KiB)
|
||||
h1 := save(t, b, []byte("foobar test string"))
|
||||
randomData := rtest.Random(23, rand.Intn(MiB)+500*KiB)
|
||||
h2 := save(t, b, randomData)
|
||||
|
||||
id := restic.Hash(data)
|
||||
h := restic.Handle{Name: id.String(), Type: restic.DataFile}
|
||||
err := b.Save(context.TODO(), h, restic.NewByteReader(data))
|
||||
rtest.OK(t, err)
|
||||
var tests = []struct {
|
||||
handle restic.Handle
|
||||
buf []byte
|
||||
want []byte
|
||||
}{
|
||||
{
|
||||
handle: h1,
|
||||
buf: nil,
|
||||
want: []byte("foobar test string"),
|
||||
},
|
||||
{
|
||||
handle: h1,
|
||||
buf: []byte("xxx"),
|
||||
want: []byte("foobar test string"),
|
||||
},
|
||||
{
|
||||
handle: h2,
|
||||
buf: nil,
|
||||
want: randomData,
|
||||
},
|
||||
{
|
||||
handle: h2,
|
||||
buf: make([]byte, 0, 200),
|
||||
want: randomData,
|
||||
},
|
||||
{
|
||||
handle: h2,
|
||||
buf: []byte("foobarbaz"),
|
||||
want: randomData,
|
||||
},
|
||||
}
|
||||
|
||||
buf, err := backend.LoadAll(context.TODO(), b, restic.Handle{Type: restic.DataFile, Name: id.String()})
|
||||
rtest.OK(t, err)
|
||||
for _, test := range tests {
|
||||
t.Run("", func(t *testing.T) {
|
||||
buf, err := backend.LoadAll(context.TODO(), test.buf, b, test.handle)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(buf) != len(data) {
|
||||
t.Errorf("length of returned buffer does not match, want %d, got %d", len(data), len(buf))
|
||||
continue
|
||||
}
|
||||
|
||||
if !bytes.Equal(buf, data) {
|
||||
t.Errorf("wrong data returned")
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(buf, test.want) {
|
||||
t.Errorf("wrong data returned, want %q, got %q", test.want, buf)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
4
internal/cache/backend_test.go
vendored
4
internal/cache/backend_test.go
vendored
@@ -17,7 +17,7 @@ import (
|
||||
)
|
||||
|
||||
func loadAndCompare(t testing.TB, be restic.Backend, h restic.Handle, data []byte) {
|
||||
buf, err := backend.LoadAll(context.TODO(), be, h)
|
||||
buf, err := backend.LoadAll(context.TODO(), nil, be, h)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -147,7 +147,7 @@ func TestErrorBackend(t *testing.T) {
|
||||
loadTest := func(wg *sync.WaitGroup, be restic.Backend) {
|
||||
defer wg.Done()
|
||||
|
||||
buf, err := backend.LoadAll(context.TODO(), be, h)
|
||||
buf, err := backend.LoadAll(context.TODO(), nil, be, h)
|
||||
if err == testErr {
|
||||
return
|
||||
}
|
||||
|
||||
12
internal/cache/cache.go
vendored
12
internal/cache/cache.go
vendored
@@ -24,7 +24,7 @@ type Cache struct {
|
||||
}
|
||||
|
||||
const dirMode = 0700
|
||||
const fileMode = 0600
|
||||
const fileMode = 0644
|
||||
|
||||
func readVersion(dir string) (v uint, err error) {
|
||||
buf, err := ioutil.ReadFile(filepath.Join(dir, "version"))
|
||||
@@ -68,7 +68,7 @@ func writeCachedirTag(dir string) error {
|
||||
return errors.Wrap(err, "Lstat")
|
||||
}
|
||||
|
||||
f, err := fs.OpenFile(tagfile, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0644)
|
||||
f, err := fs.OpenFile(tagfile, os.O_CREATE|os.O_EXCL|os.O_WRONLY, fileMode)
|
||||
if err != nil {
|
||||
if os.IsExist(errors.Cause(err)) {
|
||||
return nil
|
||||
@@ -138,7 +138,7 @@ func New(id string, basedir string) (c *Cache, err error) {
|
||||
}
|
||||
|
||||
if v < cacheVersion {
|
||||
err = ioutil.WriteFile(filepath.Join(cachedir, "version"), []byte(fmt.Sprintf("%d", cacheVersion)), 0644)
|
||||
err = ioutil.WriteFile(filepath.Join(cachedir, "version"), []byte(fmt.Sprintf("%d", cacheVersion)), fileMode)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "WriteFile")
|
||||
}
|
||||
@@ -175,11 +175,7 @@ const MaxCacheAge = 30 * 24 * time.Hour
|
||||
|
||||
func validCacheDirName(s string) bool {
|
||||
r := regexp.MustCompile(`^[a-fA-F0-9]{64}$`)
|
||||
if !r.MatchString(s) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
return r.MatchString(s)
|
||||
}
|
||||
|
||||
// listCacheDirs returns the list of cache directories.
|
||||
|
||||
8
internal/cache/dir.go
vendored
8
internal/cache/dir.go
vendored
@@ -12,17 +12,21 @@ import (
|
||||
|
||||
// xdgCacheDir returns the cache directory according to XDG basedir spec, see
|
||||
// http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
|
||||
// unless RESTIC_CACHE_DIR is defined
|
||||
func xdgCacheDir() (string, error) {
|
||||
cachedir := os.Getenv("RESTIC_CACHE_DIR")
|
||||
xdgcache := os.Getenv("XDG_CACHE_HOME")
|
||||
home := os.Getenv("HOME")
|
||||
|
||||
if xdgcache != "" {
|
||||
if cachedir != "" {
|
||||
return cachedir, nil
|
||||
} else if xdgcache != "" {
|
||||
return filepath.Join(xdgcache, "restic"), nil
|
||||
} else if home != "" {
|
||||
return filepath.Join(home, ".cache", "restic"), nil
|
||||
}
|
||||
|
||||
return "", errors.New("unable to locate cache directory (XDG_CACHE_HOME and HOME unset)")
|
||||
return "", errors.New("unable to locate cache directory (RESTIC_CACHE_DIR, XDG_CACHE_HOME and HOME unset)")
|
||||
}
|
||||
|
||||
// windowsCacheDir returns the cache directory for Windows.
|
||||
|
||||
6
internal/cache/file.go
vendored
6
internal/cache/file.go
vendored
@@ -214,9 +214,5 @@ func (c *Cache) Has(h restic.Handle) bool {
|
||||
}
|
||||
|
||||
_, err := fs.Stat(c.filename(h))
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
return err == nil
|
||||
}
|
||||
|
||||
@@ -49,7 +49,7 @@ func New(repo restic.Repository) *Checker {
|
||||
return c
|
||||
}
|
||||
|
||||
const defaultParallelism = 40
|
||||
const defaultParallelism = 5
|
||||
|
||||
// ErrDuplicatePacks is returned when a pack is found in more than one index.
|
||||
type ErrDuplicatePacks struct {
|
||||
@@ -74,82 +74,110 @@ func (err ErrOldIndexFormat) Error() string {
|
||||
// LoadIndex loads all index files.
|
||||
func (c *Checker) LoadIndex(ctx context.Context) (hints []error, errs []error) {
|
||||
debug.Log("Start")
|
||||
type indexRes struct {
|
||||
Index *repository.Index
|
||||
err error
|
||||
ID string
|
||||
|
||||
// track spawned goroutines using wg, create a new context which is
|
||||
// cancelled as soon as an error occurs.
|
||||
wg, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
type FileInfo struct {
|
||||
restic.ID
|
||||
Size int64
|
||||
}
|
||||
|
||||
indexCh := make(chan indexRes)
|
||||
type Result struct {
|
||||
*repository.Index
|
||||
restic.ID
|
||||
Err error
|
||||
}
|
||||
|
||||
worker := func(ctx context.Context, id restic.ID) error {
|
||||
debug.Log("worker got index %v", id)
|
||||
idx, err := repository.LoadIndexWithDecoder(ctx, c.repo, id, repository.DecodeIndex)
|
||||
if errors.Cause(err) == repository.ErrOldIndexFormat {
|
||||
debug.Log("index %v has old format", id)
|
||||
hints = append(hints, ErrOldIndexFormat{id})
|
||||
ch := make(chan FileInfo)
|
||||
resultCh := make(chan Result)
|
||||
|
||||
idx, err = repository.LoadIndexWithDecoder(ctx, c.repo, id, repository.DecodeOldIndex)
|
||||
// send list of index files through ch, which is closed afterwards
|
||||
wg.Go(func() error {
|
||||
defer close(ch)
|
||||
return c.repo.List(ctx, restic.IndexFile, func(id restic.ID, size int64) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case ch <- FileInfo{id, size}:
|
||||
}
|
||||
return nil
|
||||
})
|
||||
})
|
||||
|
||||
// a worker receives an index ID from ch, loads the index, and sends it to indexCh
|
||||
worker := func() error {
|
||||
var buf []byte
|
||||
for fi := range ch {
|
||||
debug.Log("worker got file %v", fi.ID.Str())
|
||||
var err error
|
||||
var idx *repository.Index
|
||||
idx, buf, err = repository.LoadIndexWithDecoder(ctx, c.repo, buf[:0], fi.ID, repository.DecodeIndex)
|
||||
if errors.Cause(err) == repository.ErrOldIndexFormat {
|
||||
debug.Log("index %v has old format", fi.ID.Str())
|
||||
hints = append(hints, ErrOldIndexFormat{fi.ID})
|
||||
|
||||
idx, buf, err = repository.LoadIndexWithDecoder(ctx, c.repo, buf[:0], fi.ID, repository.DecodeOldIndex)
|
||||
}
|
||||
|
||||
err = errors.Wrapf(err, "error loading index %v", fi.ID.Str())
|
||||
|
||||
select {
|
||||
case resultCh <- Result{idx, fi.ID, err}:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
err = errors.Wrapf(err, "error loading index %v", id.Str())
|
||||
|
||||
select {
|
||||
case indexCh <- indexRes{Index: idx, ID: id.String(), err: err}:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer close(indexCh)
|
||||
debug.Log("start loading indexes in parallel")
|
||||
err := repository.FilesInParallel(ctx, c.repo.Backend(), restic.IndexFile, defaultParallelism,
|
||||
repository.ParallelWorkFuncParseID(worker))
|
||||
debug.Log("loading indexes finished, error: %v", err)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
// final closes indexCh after all workers have terminated
|
||||
final := func() error {
|
||||
close(resultCh)
|
||||
return nil
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
// run workers on ch
|
||||
wg.Go(func() error {
|
||||
return repository.RunWorkers(ctx, defaultParallelism, worker, final)
|
||||
})
|
||||
|
||||
// receive decoded indexes
|
||||
packToIndex := make(map[restic.ID]restic.IDSet)
|
||||
wg.Go(func() error {
|
||||
for res := range resultCh {
|
||||
debug.Log("process index %v, err %v", res.ID, res.Err)
|
||||
|
||||
for res := range indexCh {
|
||||
debug.Log("process index %v, err %v", res.ID, res.err)
|
||||
|
||||
if res.err != nil {
|
||||
errs = append(errs, res.err)
|
||||
continue
|
||||
}
|
||||
|
||||
idxID, err := restic.ParseID(res.ID)
|
||||
if err != nil {
|
||||
errs = append(errs, errors.Errorf("unable to parse as index ID: %v", res.ID))
|
||||
continue
|
||||
}
|
||||
|
||||
c.indexes[idxID] = res.Index
|
||||
c.masterIndex.Insert(res.Index)
|
||||
|
||||
debug.Log("process blobs")
|
||||
cnt := 0
|
||||
for blob := range res.Index.Each(ctx) {
|
||||
c.packs.Insert(blob.PackID)
|
||||
c.blobs.Insert(blob.ID)
|
||||
c.blobRefs.M[blob.ID] = 0
|
||||
cnt++
|
||||
|
||||
if _, ok := packToIndex[blob.PackID]; !ok {
|
||||
packToIndex[blob.PackID] = restic.NewIDSet()
|
||||
if res.Err != nil {
|
||||
errs = append(errs, res.Err)
|
||||
continue
|
||||
}
|
||||
packToIndex[blob.PackID].Insert(idxID)
|
||||
}
|
||||
|
||||
debug.Log("%d blobs processed", cnt)
|
||||
c.indexes[res.ID] = res.Index
|
||||
c.masterIndex.Insert(res.Index)
|
||||
|
||||
debug.Log("process blobs")
|
||||
cnt := 0
|
||||
for blob := range res.Index.Each(ctx) {
|
||||
c.packs.Insert(blob.PackID)
|
||||
c.blobs.Insert(blob.ID)
|
||||
c.blobRefs.M[blob.ID] = 0
|
||||
cnt++
|
||||
|
||||
if _, ok := packToIndex[blob.PackID]; !ok {
|
||||
packToIndex[blob.PackID] = restic.NewIDSet()
|
||||
}
|
||||
packToIndex[blob.PackID].Insert(res.ID)
|
||||
}
|
||||
|
||||
debug.Log("%d blobs processed", cnt)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
err := wg.Wait()
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
debug.Log("checking for duplicate packs")
|
||||
@@ -163,7 +191,7 @@ func (c *Checker) LoadIndex(ctx context.Context) (hints []error, errs []error) {
|
||||
}
|
||||
}
|
||||
|
||||
err := c.repo.SetIndex(c.masterIndex)
|
||||
err = c.repo.SetIndex(c.masterIndex)
|
||||
if err != nil {
|
||||
debug.Log("SetIndex returned error: %v", err)
|
||||
errs = append(errs, err)
|
||||
@@ -281,31 +309,52 @@ func loadSnapshotTreeIDs(ctx context.Context, repo restic.Repository) (restic.ID
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
snapshotWorker := func(ctx context.Context, strID string) error {
|
||||
id, err := restic.ParseID(strID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// track spawned goroutines using wg, create a new context which is
|
||||
// cancelled as soon as an error occurs.
|
||||
wg, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
debug.Log("load snapshot %v", id)
|
||||
ch := make(chan restic.ID)
|
||||
|
||||
treeID, err := loadTreeFromSnapshot(ctx, repo, id)
|
||||
if err != nil {
|
||||
errs.Lock()
|
||||
errs.errs = append(errs.errs, err)
|
||||
errs.Unlock()
|
||||
// send list of index files through ch, which is closed afterwards
|
||||
wg.Go(func() error {
|
||||
defer close(ch)
|
||||
return repo.List(ctx, restic.SnapshotFile, func(id restic.ID, size int64) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case ch <- id:
|
||||
}
|
||||
return nil
|
||||
})
|
||||
})
|
||||
|
||||
// a worker receives an index ID from ch, loads the snapshot and the tree,
|
||||
// and adds the result to errs and trees.
|
||||
worker := func() error {
|
||||
for id := range ch {
|
||||
debug.Log("load snapshot %v", id)
|
||||
|
||||
treeID, err := loadTreeFromSnapshot(ctx, repo, id)
|
||||
if err != nil {
|
||||
errs.Lock()
|
||||
errs.errs = append(errs.errs, err)
|
||||
errs.Unlock()
|
||||
continue
|
||||
}
|
||||
|
||||
debug.Log("snapshot %v has tree %v", id, treeID)
|
||||
trees.Lock()
|
||||
trees.IDs = append(trees.IDs, treeID)
|
||||
trees.Unlock()
|
||||
}
|
||||
|
||||
debug.Log("snapshot %v has tree %v", id, treeID)
|
||||
trees.Lock()
|
||||
trees.IDs = append(trees.IDs, treeID)
|
||||
trees.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
err := repository.FilesInParallel(ctx, repo.Backend(), restic.SnapshotFile, defaultParallelism, snapshotWorker)
|
||||
for i := 0; i < defaultParallelism; i++ {
|
||||
wg.Go(worker)
|
||||
}
|
||||
|
||||
err := wg.Wait()
|
||||
if err != nil {
|
||||
errs.errs = append(errs.errs, err)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
@@ -19,10 +20,13 @@ type Reader struct {
|
||||
Name string
|
||||
io.ReadCloser
|
||||
|
||||
// for FileInfo
|
||||
Mode os.FileMode
|
||||
ModTime time.Time
|
||||
Size int64
|
||||
|
||||
AllowEmptyFile bool
|
||||
|
||||
open sync.Once
|
||||
}
|
||||
|
||||
@@ -40,7 +44,7 @@ func (fs *Reader) Open(name string) (f File, err error) {
|
||||
switch name {
|
||||
case fs.Name:
|
||||
fs.open.Do(func() {
|
||||
f = newReaderFile(fs.ReadCloser, fs.fi())
|
||||
f = newReaderFile(fs.ReadCloser, fs.fi(), fs.AllowEmptyFile)
|
||||
})
|
||||
|
||||
if f == nil {
|
||||
@@ -78,7 +82,7 @@ func (fs *Reader) OpenFile(name string, flag int, perm os.FileMode) (f File, err
|
||||
}
|
||||
|
||||
fs.open.Do(func() {
|
||||
f = newReaderFile(fs.ReadCloser, fs.fi())
|
||||
f = newReaderFile(fs.ReadCloser, fs.fi(), fs.AllowEmptyFile)
|
||||
})
|
||||
|
||||
if f == nil {
|
||||
@@ -99,17 +103,32 @@ func (fs *Reader) Stat(name string) (os.FileInfo, error) {
|
||||
// describes the symbolic link. Lstat makes no attempt to follow the link.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func (fs *Reader) Lstat(name string) (os.FileInfo, error) {
|
||||
getDirInfo := func(name string) os.FileInfo {
|
||||
fi := fakeFileInfo{
|
||||
name: fs.Base(name),
|
||||
size: 0,
|
||||
mode: os.ModeDir | 0755,
|
||||
modtime: time.Now(),
|
||||
}
|
||||
return fi
|
||||
}
|
||||
|
||||
switch name {
|
||||
case fs.Name:
|
||||
return fs.fi(), nil
|
||||
case "/", ".":
|
||||
fi := fakeFileInfo{
|
||||
name: name,
|
||||
size: 0,
|
||||
mode: 0755,
|
||||
modtime: time.Now(),
|
||||
return getDirInfo(name), nil
|
||||
}
|
||||
|
||||
dir := fs.Dir(fs.Name)
|
||||
for {
|
||||
if dir == "/" || dir == "." {
|
||||
break
|
||||
}
|
||||
return fi, nil
|
||||
if name == dir {
|
||||
return getDirInfo(name), nil
|
||||
}
|
||||
dir = fs.Dir(dir)
|
||||
}
|
||||
|
||||
return nil, os.ErrNotExist
|
||||
@@ -158,9 +177,10 @@ func (fs *Reader) Dir(p string) string {
|
||||
return path.Dir(p)
|
||||
}
|
||||
|
||||
func newReaderFile(rd io.ReadCloser, fi os.FileInfo) readerFile {
|
||||
return readerFile{
|
||||
ReadCloser: rd,
|
||||
func newReaderFile(rd io.ReadCloser, fi os.FileInfo, allowEmptyFile bool) *readerFile {
|
||||
return &readerFile{
|
||||
ReadCloser: rd,
|
||||
AllowEmptyFile: allowEmptyFile,
|
||||
fakeFile: fakeFile{
|
||||
FileInfo: fi,
|
||||
name: fi.Name(),
|
||||
@@ -170,19 +190,41 @@ func newReaderFile(rd io.ReadCloser, fi os.FileInfo) readerFile {
|
||||
|
||||
type readerFile struct {
|
||||
io.ReadCloser
|
||||
AllowEmptyFile, bytesRead bool
|
||||
|
||||
fakeFile
|
||||
}
|
||||
|
||||
func (r readerFile) Read(p []byte) (int, error) {
|
||||
return r.ReadCloser.Read(p)
|
||||
// ErrFileEmpty is returned inside a *os.PathError by Read() for the file
|
||||
// opened from the fs provided by Reader when no data could be read and
|
||||
// AllowEmptyFile is not set.
|
||||
var ErrFileEmpty = errors.New("no data read")
|
||||
|
||||
func (r *readerFile) Read(p []byte) (int, error) {
|
||||
n, err := r.ReadCloser.Read(p)
|
||||
if n > 0 {
|
||||
r.bytesRead = true
|
||||
}
|
||||
|
||||
// return an error if we did not read any data
|
||||
if err == io.EOF && !r.AllowEmptyFile && !r.bytesRead {
|
||||
fmt.Printf("reader: %d bytes read, err %v, bytesRead %v, allowEmpty %v\n", n, err, r.bytesRead, r.AllowEmptyFile)
|
||||
return n, &os.PathError{
|
||||
Path: r.fakeFile.name,
|
||||
Op: "read",
|
||||
Err: ErrFileEmpty,
|
||||
}
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r readerFile) Close() error {
|
||||
func (r *readerFile) Close() error {
|
||||
return r.ReadCloser.Close()
|
||||
}
|
||||
|
||||
// ensure that readerFile implements File
|
||||
var _ File = readerFile{}
|
||||
var _ File = &readerFile{}
|
||||
|
||||
// fakeFile implements all File methods, but only returns errors for anything
|
||||
// except Stat() and Name().
|
||||
|
||||
@@ -4,7 +4,9 @@ import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -70,8 +72,8 @@ func verifyDirectoryContents(t testing.TB, fs FS, dir string, want []string) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sort.Sort(sort.StringSlice(want))
|
||||
sort.Sort(sort.StringSlice(entries))
|
||||
sort.Strings(want)
|
||||
sort.Strings(entries)
|
||||
|
||||
if !cmp.Equal(want, entries) {
|
||||
t.Error(cmp.Diff(want, entries))
|
||||
@@ -150,8 +152,8 @@ func verifyDirectoryContentsFI(t testing.TB, fs FS, dir string, want []os.FileIn
|
||||
}
|
||||
|
||||
func checkFileInfo(t testing.TB, fi os.FileInfo, filename string, modtime time.Time, mode os.FileMode, isdir bool) {
|
||||
if fi.IsDir() {
|
||||
t.Errorf("IsDir returned true, want false")
|
||||
if fi.IsDir() != isdir {
|
||||
t.Errorf("IsDir returned %t, want %t", fi.IsDir(), isdir)
|
||||
}
|
||||
|
||||
if fi.Mode() != mode {
|
||||
@@ -162,8 +164,12 @@ func checkFileInfo(t testing.TB, fi os.FileInfo, filename string, modtime time.T
|
||||
t.Errorf("ModTime() returned wrong value, want %v, got %v", modtime, fi.ModTime())
|
||||
}
|
||||
|
||||
if fi.Name() != filename {
|
||||
t.Errorf("Name() returned wrong value, want %q, got %q", filename, fi.Name())
|
||||
if path.Base(fi.Name()) != fi.Name() {
|
||||
t.Errorf("Name() returned is not base, want %q, got %q", path.Base(fi.Name()), fi.Name())
|
||||
}
|
||||
|
||||
if fi.Name() != path.Base(filename) {
|
||||
t.Errorf("Name() returned wrong value, want %q, got %q", path.Base(filename), fi.Name())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -264,7 +270,7 @@ func TestFSReader(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkFileInfo(t, fi, "/", time.Time{}, 0755, false)
|
||||
checkFileInfo(t, fi, "/", time.Time{}, os.ModeDir|0755, true)
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -275,7 +281,16 @@ func TestFSReader(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkFileInfo(t, fi, ".", time.Time{}, 0755, false)
|
||||
checkFileInfo(t, fi, ".", time.Time{}, os.ModeDir|0755, true)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "dir/Lstat-error-not-exist",
|
||||
f: func(t *testing.T, fs FS) {
|
||||
_, err := fs.Lstat("other")
|
||||
if err != os.ErrNotExist {
|
||||
t.Fatal(err)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -286,7 +301,7 @@ func TestFSReader(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkFileInfo(t, fi, "/", time.Time{}, 0755, false)
|
||||
checkFileInfo(t, fi, "/", time.Time{}, os.ModeDir|0755, true)
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -297,7 +312,7 @@ func TestFSReader(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkFileInfo(t, fi, ".", time.Time{}, 0755, false)
|
||||
checkFileInfo(t, fi, ".", time.Time{}, os.ModeDir|0755, true)
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -317,3 +332,114 @@ func TestFSReader(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFSReaderDir(t *testing.T) {
|
||||
data := test.Random(55, 1<<18+588)
|
||||
now := time.Now()
|
||||
|
||||
var tests = []struct {
|
||||
name string
|
||||
filename string
|
||||
}{
|
||||
{
|
||||
name: "Lstat-absolute",
|
||||
filename: "/path/to/foobar",
|
||||
},
|
||||
{
|
||||
name: "Lstat-relative",
|
||||
filename: "path/to/foobar",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fs := &Reader{
|
||||
Name: test.filename,
|
||||
ReadCloser: ioutil.NopCloser(bytes.NewReader(data)),
|
||||
|
||||
Mode: 0644,
|
||||
Size: int64(len(data)),
|
||||
ModTime: now,
|
||||
}
|
||||
|
||||
dir := path.Dir(fs.Name)
|
||||
for {
|
||||
if dir == "/" || dir == "." {
|
||||
break
|
||||
}
|
||||
|
||||
fi, err := fs.Lstat(dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkFileInfo(t, fi, dir, time.Time{}, os.ModeDir|0755, true)
|
||||
|
||||
dir = path.Dir(dir)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFSReaderMinFileSize(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
data string
|
||||
allowEmpty bool
|
||||
readMustErr bool
|
||||
}{
|
||||
{
|
||||
name: "regular",
|
||||
data: "foobar",
|
||||
},
|
||||
{
|
||||
name: "empty",
|
||||
data: "",
|
||||
allowEmpty: false,
|
||||
readMustErr: true,
|
||||
},
|
||||
{
|
||||
name: "empty2",
|
||||
data: "",
|
||||
allowEmpty: true,
|
||||
readMustErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fs := &Reader{
|
||||
Name: "testfile",
|
||||
ReadCloser: ioutil.NopCloser(strings.NewReader(test.data)),
|
||||
Mode: 0644,
|
||||
ModTime: time.Now(),
|
||||
AllowEmptyFile: test.allowEmpty,
|
||||
}
|
||||
|
||||
f, err := fs.Open("testfile")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
buf, err := ioutil.ReadAll(f)
|
||||
if test.readMustErr {
|
||||
if err == nil {
|
||||
t.Fatal("expected error not found, got nil")
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if string(buf) != test.data {
|
||||
t.Fatalf("wrong data returned, want %q, got %q", test.data, string(buf))
|
||||
}
|
||||
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ type ExtendedFileInfo struct {
|
||||
|
||||
AccessTime time.Time // last access time stamp
|
||||
ModTime time.Time // last (content) modification time stamp
|
||||
ChangeTime time.Time // last status change time stamp
|
||||
}
|
||||
|
||||
// ExtendedStat returns an ExtendedFileInfo constructed from the os.FileInfo.
|
||||
|
||||
@@ -30,6 +30,7 @@ func extendedStat(fi os.FileInfo) ExtendedFileInfo {
|
||||
|
||||
AccessTime: time.Unix(s.Atimespec.Unix()),
|
||||
ModTime: time.Unix(s.Mtimespec.Unix()),
|
||||
ChangeTime: time.Unix(s.Ctimespec.Unix()),
|
||||
}
|
||||
|
||||
return extFI
|
||||
|
||||
@@ -30,6 +30,7 @@ func extendedStat(fi os.FileInfo) ExtendedFileInfo {
|
||||
|
||||
AccessTime: time.Unix(s.Atim.Unix()),
|
||||
ModTime: time.Unix(s.Mtim.Unix()),
|
||||
ChangeTime: time.Unix(s.Ctim.Unix()),
|
||||
}
|
||||
|
||||
return extFI
|
||||
|
||||
@@ -27,5 +27,7 @@ func extendedStat(fi os.FileInfo) ExtendedFileInfo {
|
||||
mtime := syscall.NsecToTimespec(s.LastWriteTime.Nanoseconds())
|
||||
extFI.ModTime = time.Unix(mtime.Unix())
|
||||
|
||||
extFI.ChangeTime = extFI.ModTime
|
||||
|
||||
return extFI
|
||||
}
|
||||
|
||||
@@ -1,141 +0,0 @@
|
||||
package mock
|
||||
|
||||
import (
|
||||
"github.com/restic/restic/internal/crypto"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
)
|
||||
|
||||
// Repository implements a mock Repository.
|
||||
type Repository struct {
|
||||
BackendFn func() restic.Backend
|
||||
|
||||
KeyFn func() *crypto.Key
|
||||
|
||||
SetIndexFn func(restic.Index) error
|
||||
|
||||
IndexFn func() restic.Index
|
||||
SaveFullIndexFn func() error
|
||||
SaveIndexFn func() error
|
||||
LoadIndexFn func() error
|
||||
|
||||
ConfigFn func() restic.Config
|
||||
|
||||
LookupBlobSizeFn func(restic.ID, restic.BlobType) (uint, error)
|
||||
|
||||
ListFn func(restic.FileType, <-chan struct{}) <-chan restic.ID
|
||||
ListPackFn func(restic.ID) ([]restic.Blob, int64, error)
|
||||
|
||||
FlushFn func() error
|
||||
|
||||
SaveUnpackedFn func(restic.FileType, []byte) (restic.ID, error)
|
||||
SaveJSONUnpackedFn func(restic.FileType, interface{}) (restic.ID, error)
|
||||
|
||||
LoadJSONUnpackedFn func(restic.FileType, restic.ID, interface{}) error
|
||||
LoadAndDecryptFn func(restic.FileType, restic.ID) ([]byte, error)
|
||||
|
||||
LoadBlobFn func(restic.BlobType, restic.ID, []byte) (int, error)
|
||||
SaveBlobFn func(restic.BlobType, []byte, restic.ID) (restic.ID, error)
|
||||
|
||||
LoadTreeFn func(restic.ID) (*restic.Tree, error)
|
||||
SaveTreeFn func(t *restic.Tree) (restic.ID, error)
|
||||
}
|
||||
|
||||
// Backend is a stub method.
|
||||
func (repo Repository) Backend() restic.Backend {
|
||||
return repo.BackendFn()
|
||||
}
|
||||
|
||||
// Key is a stub method.
|
||||
func (repo Repository) Key() *crypto.Key {
|
||||
return repo.KeyFn()
|
||||
}
|
||||
|
||||
// SetIndex is a stub method.
|
||||
func (repo Repository) SetIndex(idx restic.Index) error {
|
||||
return repo.SetIndexFn(idx)
|
||||
}
|
||||
|
||||
// Index is a stub method.
|
||||
func (repo Repository) Index() restic.Index {
|
||||
return repo.IndexFn()
|
||||
}
|
||||
|
||||
// SaveFullIndex is a stub method.
|
||||
func (repo Repository) SaveFullIndex() error {
|
||||
return repo.SaveFullIndexFn()
|
||||
}
|
||||
|
||||
// SaveIndex is a stub method.
|
||||
func (repo Repository) SaveIndex() error {
|
||||
return repo.SaveIndexFn()
|
||||
}
|
||||
|
||||
// LoadIndex is a stub method.
|
||||
func (repo Repository) LoadIndex() error {
|
||||
return repo.LoadIndexFn()
|
||||
}
|
||||
|
||||
// Config is a stub method.
|
||||
func (repo Repository) Config() restic.Config {
|
||||
return repo.ConfigFn()
|
||||
}
|
||||
|
||||
// LookupBlobSize is a stub method.
|
||||
func (repo Repository) LookupBlobSize(id restic.ID, t restic.BlobType) (uint, error) {
|
||||
return repo.LookupBlobSizeFn(id, t)
|
||||
}
|
||||
|
||||
// List is a stub method.
|
||||
func (repo Repository) List(t restic.FileType, done <-chan struct{}) <-chan restic.ID {
|
||||
return repo.ListFn(t, done)
|
||||
}
|
||||
|
||||
// ListPack is a stub method.
|
||||
func (repo Repository) ListPack(id restic.ID) ([]restic.Blob, int64, error) {
|
||||
return repo.ListPackFn(id)
|
||||
}
|
||||
|
||||
// Flush is a stub method.
|
||||
func (repo Repository) Flush() error {
|
||||
return repo.FlushFn()
|
||||
}
|
||||
|
||||
// SaveUnpacked is a stub method.
|
||||
func (repo Repository) SaveUnpacked(t restic.FileType, buf []byte) (restic.ID, error) {
|
||||
return repo.SaveUnpackedFn(t, buf)
|
||||
}
|
||||
|
||||
// SaveJSONUnpacked is a stub method.
|
||||
func (repo Repository) SaveJSONUnpacked(t restic.FileType, item interface{}) (restic.ID, error) {
|
||||
return repo.SaveJSONUnpackedFn(t, item)
|
||||
}
|
||||
|
||||
// LoadJSONUnpacked is a stub method.
|
||||
func (repo Repository) LoadJSONUnpacked(t restic.FileType, id restic.ID, item interface{}) error {
|
||||
return repo.LoadJSONUnpackedFn(t, id, item)
|
||||
}
|
||||
|
||||
// LoadAndDecrypt is a stub method.
|
||||
func (repo Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, error) {
|
||||
return repo.LoadAndDecryptFn(t, id)
|
||||
}
|
||||
|
||||
// LoadBlob is a stub method.
|
||||
func (repo Repository) LoadBlob(t restic.BlobType, id restic.ID, buf []byte) (int, error) {
|
||||
return repo.LoadBlobFn(t, id, buf)
|
||||
}
|
||||
|
||||
// SaveBlob is a stub method.
|
||||
func (repo Repository) SaveBlob(t restic.BlobType, buf []byte, id restic.ID) (restic.ID, error) {
|
||||
return repo.SaveBlobFn(t, buf, id)
|
||||
}
|
||||
|
||||
// LoadTree is a stub method.
|
||||
func (repo Repository) LoadTree(id restic.ID) (*restic.Tree, error) {
|
||||
return repo.LoadTreeFn(id)
|
||||
}
|
||||
|
||||
// SaveTree is a stub method.
|
||||
func (repo Repository) SaveTree(t *restic.Tree) (restic.ID, error) {
|
||||
return repo.SaveTreeFn(t)
|
||||
}
|
||||
@@ -22,10 +22,8 @@ func Register(ns string, cfg interface{}) {
|
||||
|
||||
// List returns a list of all registered options (using Register()).
|
||||
func List() (list []Help) {
|
||||
list = make([]Help, 0, len(opts))
|
||||
for _, opt := range opts {
|
||||
list = append(list, opt)
|
||||
}
|
||||
list = make([]Help, len(opts))
|
||||
copy(list, opts)
|
||||
return list
|
||||
}
|
||||
|
||||
|
||||
@@ -549,21 +549,21 @@ func DecodeOldIndex(buf []byte) (idx *Index, err error) {
|
||||
}
|
||||
|
||||
// LoadIndexWithDecoder loads the index and decodes it with fn.
|
||||
func LoadIndexWithDecoder(ctx context.Context, repo restic.Repository, id restic.ID, fn func([]byte) (*Index, error)) (idx *Index, err error) {
|
||||
func LoadIndexWithDecoder(ctx context.Context, repo restic.Repository, buf []byte, id restic.ID, fn func([]byte) (*Index, error)) (*Index, []byte, error) {
|
||||
debug.Log("Loading index %v", id)
|
||||
|
||||
buf, err := repo.LoadAndDecrypt(ctx, restic.IndexFile, id)
|
||||
buf, err := repo.LoadAndDecrypt(ctx, buf[:0], restic.IndexFile, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, buf[:0], err
|
||||
}
|
||||
|
||||
idx, err = fn(buf)
|
||||
idx, err := fn(buf)
|
||||
if err != nil {
|
||||
debug.Log("error while decoding index %v: %v", id, err)
|
||||
return nil, err
|
||||
return nil, buf[:0], err
|
||||
}
|
||||
|
||||
idx.id = id
|
||||
|
||||
return idx, nil
|
||||
return idx, buf, nil
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
|
||||
var (
|
||||
// ErrNoKeyFound is returned when no key for the repository could be decrypted.
|
||||
ErrNoKeyFound = errors.Fatal("wrong password or no key found")
|
||||
ErrNoKeyFound = errors.New("wrong password or no key found")
|
||||
|
||||
// ErrMaxKeysReached is returned when the maximum number of keys was checked and no key could be found.
|
||||
ErrMaxKeysReached = errors.Fatal("maximum number of keys reached")
|
||||
@@ -184,7 +184,7 @@ func SearchKey(ctx context.Context, s *Repository, password string, maxKeys int,
|
||||
// LoadKey loads a key from the backend.
|
||||
func LoadKey(ctx context.Context, s *Repository, name string) (k *Key, err error) {
|
||||
h := restic.Handle{Type: restic.KeyFile, Name: name}
|
||||
data, err := backend.LoadAll(ctx, s.be, h)
|
||||
data, err := backend.LoadAll(ctx, nil, s.be, h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// ParallelWorkFunc gets one file ID to work on. If an error is returned,
|
||||
// processing stops. When the contect is cancelled the function should return.
|
||||
type ParallelWorkFunc func(ctx context.Context, id string) error
|
||||
|
||||
// ParallelIDWorkFunc gets one restic.ID to work on. If an error is returned,
|
||||
// processing stops. When the context is cancelled the function should return.
|
||||
type ParallelIDWorkFunc func(ctx context.Context, id restic.ID) error
|
||||
|
||||
// FilesInParallel runs n workers of f in parallel, on the IDs that
|
||||
// repo.List(t) yields. If f returns an error, the process is aborted and the
|
||||
// first error is returned.
|
||||
func FilesInParallel(ctx context.Context, repo restic.Lister, t restic.FileType, n int, f ParallelWorkFunc) error {
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
ch := make(chan string, n)
|
||||
g.Go(func() error {
|
||||
defer close(ch)
|
||||
return repo.List(ctx, t, func(fi restic.FileInfo) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case ch <- fi.Name:
|
||||
}
|
||||
return nil
|
||||
})
|
||||
})
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
g.Go(func() error {
|
||||
for name := range ch {
|
||||
err := f(ctx, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
return g.Wait()
|
||||
}
|
||||
|
||||
// ParallelWorkFuncParseID converts a function that takes a restic.ID to a
|
||||
// function that takes a string. Filenames that do not parse as a restic.ID
|
||||
// are ignored.
|
||||
func ParallelWorkFuncParseID(f ParallelIDWorkFunc) ParallelWorkFunc {
|
||||
return func(ctx context.Context, s string) error {
|
||||
id, err := restic.ParseID(s)
|
||||
if err != nil {
|
||||
debug.Log("invalid ID %q: %v", id, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
return f(ctx, id)
|
||||
}
|
||||
}
|
||||
@@ -1,129 +0,0 @@
|
||||
package repository_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
|
||||
"github.com/restic/restic/internal/repository"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
type testIDs []string
|
||||
|
||||
var lister = testIDs{
|
||||
"40bb581cd36de952985c97a3ff6b21df41ee897d4db2040354caa36a17ff5268",
|
||||
"2e15811a4d14ffac66d36a9ff456019d8de4c10c949d45b643f8477d17e92ff3",
|
||||
"70c11b3ed521ad6b76d905c002ca98b361fca06aca060a063432c7311155a4da",
|
||||
"8056a33e75dccdda701b6c989c7ed0cb71bbb6da13c6427fe5986f0896cc91c0",
|
||||
"79d8776200596aa0237b10d470f7b850b86f8a1a80988ef5c8bee2874ce992e2",
|
||||
"f9f1f29791c6b79b90b35efd083f17a3b163bbbafb1a2fdf43d46d56cffda289",
|
||||
"3834178d05d0f6dd07f872ee0262ff1ace0f0f375768227d3c902b0b66591369",
|
||||
"66d5cc68c9186414806f366ae5493ce7f229212993750a4992be4030f6af28c5",
|
||||
"ebca5af4f397944f68cd215e3dfa2b197a7ba0f7c17d65d9f7390d0a15cde296",
|
||||
"d4511ce6ff732d106275a57e40745c599e987c0da44c42cddbef592aac102437",
|
||||
"f366202f0bfeefaedd7b49e2f21a90d3cbddb97d257a74d788dd34e19a684dae",
|
||||
"a5c17728ab2433cd50636dd5c6c7068c7a44f2999d09c46e8f528466da8a059d",
|
||||
"bae0f9492b9b208233029b87692a1a55cbd7fbe1cf3f6d7bc693ac266a6d6f0e",
|
||||
"9d500187913c7510d71d1902703d312c7aaa56f1e98351385b9535fdabae595e",
|
||||
"ffbddd8a4c1e54d258bb3e16d3929b546b61af63cb560b3e3061a8bef5b24552",
|
||||
"201bb3abf655e7ef71e79ed4fb1079b0502b5acb4d9fad5e72a0de690c50a386",
|
||||
"08eb57bbd559758ea96e99f9b7688c30e7b3bcf0c4562ff4535e2d8edeffaeed",
|
||||
"e50b7223b04985ff38d9e11d1cba333896ef4264f82bd5d0653a028bce70e542",
|
||||
"65a9421cd59cc7b7a71dcd9076136621af607fb4701d2e5c2af23b6396cf2f37",
|
||||
"995a655b3521c19b4d0c266222266d89c8fc62889597d61f45f336091e646d57",
|
||||
"51ec6f0bce77ed97df2dd7ae849338c3a8155a057da927eedd66e3d61be769ad",
|
||||
"7b3923a0c0666431efecdbf6cb171295ec1710b6595eebcba3b576b49d13e214",
|
||||
"2cedcc3d14698bea7e4b0546f7d5d48951dd90add59e6f2d44b693fd8913717d",
|
||||
"fd6770cbd54858fdbd3d7b4239b985e5599180064d93ca873f27e86e8407d011",
|
||||
"9edc51d8e6e04d05c9757848c1bfbfdc8e86b6330982294632488922e59fdb1b",
|
||||
"1a6c4fbb24ad724c968b2020417c3d057e6c89e49bdfb11d91006def65eab6a0",
|
||||
"cb3b29808cd0adfa2dca1f3a04f98114fbccf4eb487cdd4022f49bd70eeb049b",
|
||||
"f55edcb40c619e29a20e432f8aaddc83a649be2c2d1941ccdc474cd2af03d490",
|
||||
"e8ccc1763a92de23566b95c3ad1414a098016ece69a885fc8a72782a7517d17c",
|
||||
"0fe2e3db8c5a12ad7101a63a0fffee901be54319cfe146bead7aec851722f82d",
|
||||
"36be45a6ae7c95ad97cee1b33023be324bce7a7b4b7036e24125679dd9ff5b44",
|
||||
"1685ed1a57c37859fbef1f7efb7509f20b84ec17a765605de43104d2fa37884b",
|
||||
"9d83629a6a004c505b100a0b5d0b246833b63aa067aa9b59e3abd6b74bc4d3a8",
|
||||
"be49a66b60175c5e2ee273b42165f86ef11bb6518c1c79950bcd3f4c196c98bd",
|
||||
"0fd89885d821761b4a890782908e75793028747d15ace3c6cbf0ad56582b4fa5",
|
||||
"94a767519a4e352a88796604943841fea21429f3358b4d5d55596dbda7d15dce",
|
||||
"8dd07994afe6e572ddc9698fb0d13a0d4c26a38b7992818a71a99d1e0ac2b034",
|
||||
"f7380a6f795ed31fbeb2945c72c5fd1d45044e5ab152311e75e007fa530f5847",
|
||||
"5ca1ce01458e484393d7e9c8af42b0ff37a73a2fee0f18e14cff0fb180e33014",
|
||||
"8f44178be3fe0a2bd41f922576fb7a9b19d589754504be746f56c759df328fda",
|
||||
"12d33847c2be711c989f37360dd7aa8537fd14972262a4530634a08fdf32a767",
|
||||
"31e077f5080f78846a00093caff2b6b839519cc47516142eeba9c41d4072a605",
|
||||
"14f01db8a0054e70222b76d2555d70114b4bf8a0f02084324af2df226f14a795",
|
||||
"7f5dbbaf31b4551828e8e76cef408375db9fbcdcdb6b5949f2d1b0c4b8632132",
|
||||
"42a5d9b9bb7e4a16f23ba916bcf87f38c1aa1f2de2ab79736f725850a8ff6a1b",
|
||||
"e06f8f901ea708beba8712a11b6e2d0be7c4b018d0254204ef269bcdf5e8c6cc",
|
||||
"d9ba75785bf45b0c4fd3b2365c968099242483f2f0d0c7c20306dac11fae96e9",
|
||||
"428debbb280873907cef2ec099efe1566e42a59775d6ec74ded0c4048d5a6515",
|
||||
"3b51049d4dae701098e55a69536fa31ad2be1adc17b631a695a40e8a294fe9c0",
|
||||
"168f88aa4b105e9811f5f79439cc1a689be4eec77f3361d42f22fe8f7ddc74a9",
|
||||
"0baa0ab2249b33d64449a899cb7bd8eae5231f0d4ff70f09830dc1faa2e4abee",
|
||||
"0c3896d346b580306a49de29f3a78913a41e14b8461b124628c33a64636241f2",
|
||||
"b18313f1651c15e100e7179aa3eb8ffa62c3581159eaf7f83156468d19781e42",
|
||||
"996361f7d988e48267ccc7e930fed4637be35fe7562b8601dceb7a32313a14c8",
|
||||
"dfb4e6268437d53048d22b811048cd045df15693fc6789affd002a0fc80a6e60",
|
||||
"34dd044c228727f2226a0c9c06a3e5ceb5e30e31cb7854f8fa1cde846b395a58",
|
||||
}
|
||||
|
||||
func (tests testIDs) List(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error {
|
||||
for i := 0; i < 500; i++ {
|
||||
for _, id := range tests {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
fi := restic.FileInfo{
|
||||
Name: id,
|
||||
}
|
||||
|
||||
err := fn(fi)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestFilesInParallel(t *testing.T) {
|
||||
f := func(ctx context.Context, id string) error {
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
return nil
|
||||
}
|
||||
|
||||
for n := 1; n < 5; n++ {
|
||||
err := repository.FilesInParallel(context.TODO(), lister, restic.DataFile, n*100, f)
|
||||
rtest.OK(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
var errTest = errors.New("test error")
|
||||
|
||||
func TestFilesInParallelWithError(t *testing.T) {
|
||||
f := func(ctx context.Context, id string) error {
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
|
||||
if rand.Float32() < 0.01 {
|
||||
return errTest
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
for n := 1; n < 5; n++ {
|
||||
err := repository.FilesInParallel(context.TODO(), lister, restic.DataFile, n*100, f)
|
||||
if err != errTest {
|
||||
t.Fatalf("wrong error returned, want %q, got %v", errTest, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user