mirror of
https://github.com/restic/restic.git
synced 2026-02-22 16:56:24 +00:00
Compare commits
320 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d1ecdf7441 | ||
|
|
088ca033f8 | ||
|
|
5b7dd32c20 | ||
|
|
eb94395f3d | ||
|
|
22f5fc5739 | ||
|
|
e994cacbfe | ||
|
|
3114d41cb7 | ||
|
|
968b2ece43 | ||
|
|
feed54caef | ||
|
|
4eddcb344e | ||
|
|
2ae06a7a01 | ||
|
|
25945718a1 | ||
|
|
254188f38f | ||
|
|
3601c39177 | ||
|
|
02f7bb0d4c | ||
|
|
565d72ef36 | ||
|
|
3806623c23 | ||
|
|
0fa12839a5 | ||
|
|
a257a613d7 | ||
|
|
0a752b9fab | ||
|
|
eeec0d63c2 | ||
|
|
04d6b5da2f | ||
|
|
1dfd3b8aa3 | ||
|
|
0873821b98 | ||
|
|
0a9cbd47c7 | ||
|
|
b61027b48d | ||
|
|
53701891a1 | ||
|
|
68b462d057 | ||
|
|
649f789190 | ||
|
|
7b3e319398 | ||
|
|
5494c1858e | ||
|
|
c5763e59d5 | ||
|
|
b090c73bd4 | ||
|
|
2b9a408ccc | ||
|
|
83c35bd6b5 | ||
|
|
98b012a04e | ||
|
|
a9af896ddd | ||
|
|
309dca8179 | ||
|
|
8144cd24d6 | ||
|
|
0ce8191be5 | ||
|
|
595f2582fa | ||
|
|
da83bd8265 | ||
|
|
799cc37c22 | ||
|
|
35ba817128 | ||
|
|
29a61950dd | ||
|
|
acd39eaab5 | ||
|
|
3d55b54f3d | ||
|
|
daae3500dd | ||
|
|
64fe9ec048 | ||
|
|
cb80a70aca | ||
|
|
24398d2b9d | ||
|
|
d4a2d70089 | ||
|
|
9add72e9d6 | ||
|
|
e7fc908ff1 | ||
|
|
4ffca0f4b4 | ||
|
|
a0f3e94655 | ||
|
|
6485a6cdc0 | ||
|
|
931f5cdd33 | ||
|
|
3975d76f23 | ||
|
|
bf6602bc1b | ||
|
|
a85ffc66ae | ||
|
|
828267aaa3 | ||
|
|
a77c615909 | ||
|
|
cfdf4c92f7 | ||
|
|
0f9fb37c78 | ||
|
|
673bce936e | ||
|
|
1f83635267 | ||
|
|
2d7e1b5804 | ||
|
|
085cf36199 | ||
|
|
ceb4a3ecc0 | ||
|
|
cf7795ce64 | ||
|
|
223dc78acb | ||
|
|
f63cd12569 | ||
|
|
65afeba19a | ||
|
|
791f73e0db | ||
|
|
8ded453ab0 | ||
|
|
e443454c4b | ||
|
|
1dd9a58e5a | ||
|
|
b628bcee27 | ||
|
|
dfc0cbf3a8 | ||
|
|
512a92895f | ||
|
|
6ab425f130 | ||
|
|
f5b9ee53a3 | ||
|
|
ea073f58cf | ||
|
|
bef5c4acb8 | ||
|
|
b5b3c0eaf8 | ||
|
|
1fb80bf0e2 | ||
|
|
436332d5f2 | ||
|
|
fe8c12c798 | ||
|
|
1cc59010f5 | ||
|
|
878c1cd936 | ||
|
|
5170c4898a | ||
|
|
2054e3c026 | ||
|
|
ffbe05af9b | ||
|
|
84f95a09d7 | ||
|
|
573410afab | ||
|
|
619939ccd9 | ||
|
|
714a5d1dc4 | ||
|
|
bc42dbdf87 | ||
|
|
765b5437bd | ||
|
|
5d7b38cabf | ||
|
|
debf1fce54 | ||
|
|
0045f2fb61 | ||
|
|
5764b55aee | ||
|
|
5e3a41dbd2 | ||
|
|
88d0f24ce7 | ||
|
|
eb6e3ba8b3 | ||
|
|
528c301891 | ||
|
|
f7ae0cb78f | ||
|
|
3695ba5882 | ||
|
|
4c95d2cfdc | ||
|
|
cc6a8b6e15 | ||
|
|
51d8e6aa28 | ||
|
|
f0600c1d5f | ||
|
|
90da66261a | ||
|
|
82c2dafb23 | ||
|
|
bfdd26c541 | ||
|
|
e699f6d1bd | ||
|
|
fae65ebc61 | ||
|
|
f744c3534d | ||
|
|
9ce40761c8 | ||
|
|
48924009fe | ||
|
|
d497fb6966 | ||
|
|
5bc7f150f8 | ||
|
|
a6eda344a4 | ||
|
|
1aa52e5e1e | ||
|
|
769f06cea2 | ||
|
|
8d90588020 | ||
|
|
9cf63c99cf | ||
|
|
4a0f77650b | ||
|
|
b53679a24d | ||
|
|
b06845c545 | ||
|
|
c55b6ee544 | ||
|
|
045f545085 | ||
|
|
038b63f7f7 | ||
|
|
d3f4c816c7 | ||
|
|
72aa6be38d | ||
|
|
444a268ce0 | ||
|
|
17a38faa43 | ||
|
|
24385ff56e | ||
|
|
f51bc8e9b9 | ||
|
|
6f5bf45212 | ||
|
|
3af8f53097 | ||
|
|
6c6b0e2395 | ||
|
|
26351522c5 | ||
|
|
dec2e4788e | ||
|
|
f9cd736b33 | ||
|
|
553dd00741 | ||
|
|
88634dac3a | ||
|
|
83924d0864 | ||
|
|
22bde5b277 | ||
|
|
cdbdf74811 | ||
|
|
db16702263 | ||
|
|
5dd137d53e | ||
|
|
8de06bd453 | ||
|
|
a7e64afc0d | ||
|
|
ed09887d9e | ||
|
|
d097d40237 | ||
|
|
196bbbd25b | ||
|
|
93e62c6f18 | ||
|
|
3acf03986a | ||
|
|
12a904eb4b | ||
|
|
7f06ec98b8 | ||
|
|
d62264c837 | ||
|
|
b2a67d458c | ||
|
|
de88fb2022 | ||
|
|
71263b5090 | ||
|
|
3fd1e4a992 | ||
|
|
9f752b8306 | ||
|
|
e07ae7631c | ||
|
|
9fd941f6fc | ||
|
|
91c458bf74 | ||
|
|
374b1144de | ||
|
|
f05b0871e9 | ||
|
|
4cb8fe3210 | ||
|
|
08eb5b42eb | ||
|
|
1c703e4161 | ||
|
|
ebd3723a06 | ||
|
|
06b23edb39 | ||
|
|
e893be3dec | ||
|
|
ca14942c80 | ||
|
|
11d01fcd32 | ||
|
|
5061607e77 | ||
|
|
69d8fe5b4f | ||
|
|
916efa4e1a | ||
|
|
a3492d69dd | ||
|
|
8e24c51233 | ||
|
|
d8107f77aa | ||
|
|
79e950b710 | ||
|
|
f0d7f3f1bd | ||
|
|
9afec53c55 | ||
|
|
11098d6eb0 | ||
|
|
7e6fc15ece | ||
|
|
78c0995853 | ||
|
|
84c14e623d | ||
|
|
d965d703d1 | ||
|
|
b20921d836 | ||
|
|
a78493f549 | ||
|
|
2be0aa9dbc | ||
|
|
aa29c68189 | ||
|
|
d3da30e8fb | ||
|
|
3337b5d3c4 | ||
|
|
458448357c | ||
|
|
27d0909302 | ||
|
|
5f0ebb71b2 | ||
|
|
00f647dc92 | ||
|
|
8e7202bd6a | ||
|
|
5cf7c827b8 | ||
|
|
71f7f4f543 | ||
|
|
bf47dba1c4 | ||
|
|
cbd457e557 | ||
|
|
6cf4b81558 | ||
|
|
bb84d351f1 | ||
|
|
a107e3cc84 | ||
|
|
e934966b54 | ||
|
|
bd9f23f1d2 | ||
|
|
2a2fb74ba8 | ||
|
|
bd819a5e81 | ||
|
|
162629571d | ||
|
|
2c04ad3c29 | ||
|
|
238d3807e9 | ||
|
|
7f9d227725 | ||
|
|
8de6e5a627 | ||
|
|
8d735cf6a9 | ||
|
|
29bb845f0e | ||
|
|
1bb2d59e38 | ||
|
|
3ceb2ad3cf | ||
|
|
009c803c8a | ||
|
|
c0ef1ec6fd | ||
|
|
69c2e8ce7e | ||
|
|
f102406cd7 | ||
|
|
302619a11a | ||
|
|
80bcae44e2 | ||
|
|
1f263a7683 | ||
|
|
3b57075109 | ||
|
|
3fa7304e94 | ||
|
|
47950b82a0 | ||
|
|
9ecf7070af | ||
|
|
2310773798 | ||
|
|
a60e3b5030 | ||
|
|
b350b443d0 | ||
|
|
2c517e4a33 | ||
|
|
4bdd59b4ad | ||
|
|
f5daf33322 | ||
|
|
1058a91b39 | ||
|
|
240b8f273a | ||
|
|
6808523d34 | ||
|
|
bad6184ab5 | ||
|
|
6b384287f3 | ||
|
|
ef33cf12ca | ||
|
|
a5cbbb8b5a | ||
|
|
71924fb7c0 | ||
|
|
b0565015cc | ||
|
|
fa283c6ecd | ||
|
|
94d157d97a | ||
|
|
f72f3dbc6a | ||
|
|
3c3a180417 | ||
|
|
fd6c854a21 | ||
|
|
e9cddc0be5 | ||
|
|
d7e5f11b78 | ||
|
|
2b1b6d8c2a | ||
|
|
acc2fa5816 | ||
|
|
6285f31604 | ||
|
|
3cca831b2e | ||
|
|
cff6fea32a | ||
|
|
17e1872544 | ||
|
|
246302375d | ||
|
|
231da4ff80 | ||
|
|
1b4b469440 | ||
|
|
35e3762e37 | ||
|
|
7e732dbd2d | ||
|
|
8b4d4ec25f | ||
|
|
035d0aeb31 | ||
|
|
f1bc181c5b | ||
|
|
50b724ca23 | ||
|
|
6227821b4e | ||
|
|
810056c2bc | ||
|
|
34b3e3a095 | ||
|
|
bdd085e9f1 | ||
|
|
ffc3503e6f | ||
|
|
51b16ad57d | ||
|
|
723592d923 | ||
|
|
22aa17091b | ||
|
|
4720a7d807 | ||
|
|
d5323223f4 | ||
|
|
fe79177b40 | ||
|
|
5c32ae15c2 | ||
|
|
6c2334f505 | ||
|
|
b55ac2afd6 | ||
|
|
d9012b4a64 | ||
|
|
952f124238 | ||
|
|
14db71d3fa | ||
|
|
f59ffcaeae | ||
|
|
d609e4a986 | ||
|
|
0e6c72ad1d | ||
|
|
d5f42201c5 | ||
|
|
122a0944a6 | ||
|
|
fa26ecc8f9 | ||
|
|
00139648a0 | ||
|
|
6ba38e9a38 | ||
|
|
812cb0ba77 | ||
|
|
b5c397435c | ||
|
|
043424824c | ||
|
|
c88c48a29f | ||
|
|
2fa93b291a | ||
|
|
1ad4d1aafd | ||
|
|
b108966b12 | ||
|
|
1fe8deeb6e | ||
|
|
fa4570bde8 | ||
|
|
f6c2787d80 | ||
|
|
4b8b625b90 | ||
|
|
be00d91967 | ||
|
|
e4a9905d6f | ||
|
|
68ec29e7ec | ||
|
|
d860ce0570 | ||
|
|
fc9b27c533 | ||
|
|
d4a9b546c1 | ||
|
|
628fb0fb72 | ||
|
|
2de233fe8b | ||
|
|
d2834b61fb |
26
.travis.yml
26
.travis.yml
@@ -2,23 +2,30 @@ language: go
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- 1.3.3
|
||||
- 1.4.3
|
||||
- 1.5.4
|
||||
- 1.6.2
|
||||
- 1.6.3
|
||||
- 1.7.1
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
env:
|
||||
matrix:
|
||||
RESTIC_TEST_FUSE=0
|
||||
|
||||
matrix:
|
||||
exclude:
|
||||
- os: osx
|
||||
go: 1.3.3
|
||||
- os: osx
|
||||
go: 1.4.3
|
||||
- os: osx
|
||||
go: 1.5.4
|
||||
go: 1.6.3
|
||||
- os: linux
|
||||
go: 1.7.1
|
||||
include:
|
||||
- os: linux
|
||||
go: 1.7.1
|
||||
sudo: true
|
||||
env:
|
||||
RESTIC_TEST_FUSE=1
|
||||
|
||||
|
||||
notifications:
|
||||
irc:
|
||||
@@ -40,3 +47,4 @@ script:
|
||||
|
||||
after_success:
|
||||
- GOPATH=$PWD:$PWD/vendor goveralls -coverprofile=all.cov -service=travis-ci -repotoken "$COVERALLS_TOKEN"
|
||||
- bash <(curl -s https://codecov.io/bash) -f all.cov
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
|
||||
FROM ubuntu:14.04
|
||||
|
||||
ARG GOVERSION=1.6
|
||||
ARG GOVERSION=1.7
|
||||
ARG GOARCH=amd64
|
||||
|
||||
# install dependencies
|
||||
@@ -45,7 +45,6 @@ RUN mkdir -p $HOME/restic
|
||||
# pre-install tools, this speeds up running the tests itself
|
||||
RUN go get github.com/constabulary/gb/...
|
||||
RUN go get golang.org/x/tools/cmd/cover
|
||||
RUN go get github.com/mattn/goveralls
|
||||
RUN go get github.com/mitchellh/gox
|
||||
RUN go get github.com/pierrre/gotestcover
|
||||
RUN mkdir $HOME/bin \
|
||||
|
||||
12
README.md
12
README.md
@@ -2,7 +2,6 @@
|
||||
[](https://travis-ci.org/restic/restic)
|
||||
[](https://ci.appveyor.com/project/fd0/restic/branch/master)
|
||||
[](http://goreportcard.com/report/github.com/restic/restic)
|
||||
[](https://coveralls.io/r/restic/restic)
|
||||
|
||||
|
||||
Introduction
|
||||
@@ -22,7 +21,7 @@ latest released version.
|
||||
Build restic
|
||||
============
|
||||
|
||||
Install Go/Golang (at least version 1.3), then run `go run build.go`,
|
||||
Install Go/Golang (at least version 1.6), then run `go run build.go`,
|
||||
afterwards you'll find the binary in the current directory:
|
||||
|
||||
$ go run build.go
|
||||
@@ -40,11 +39,10 @@ Building restic with gccgo may work, but is not supported.
|
||||
Contribute and Documentation
|
||||
============================
|
||||
|
||||
Contributions are welcome! More information can be found in
|
||||
[`CONTRIBUTING.md`](CONTRIBUTING.md). A document describing the design of
|
||||
restic and the data structures stored on the back end is contained in
|
||||
[`doc/Design.md`](doc/Design.md).
|
||||
The development environment is described in [`CONTRIBUTING.md`](CONTRIBUTING.md).
|
||||
Contributions are welcome! More information and a description of the
|
||||
development environment can be found in [`CONTRIBUTING.md`](CONTRIBUTING.md). A
|
||||
document describing the design of restic and the data structures stored on the
|
||||
back end is contained in [`doc/Design.md`](doc/Design.md).
|
||||
|
||||
If you'd like to start contributing to restic, but don't know exactly what do
|
||||
to, have a look at this great article by Dave Cheney:
|
||||
|
||||
@@ -13,8 +13,8 @@ init:
|
||||
|
||||
install:
|
||||
- rmdir c:\go /s /q
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.6.1.windows-amd64.msi
|
||||
- msiexec /i go1.6.1.windows-amd64.msi /q
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.7.windows-amd64.msi
|
||||
- msiexec /i go1.7.windows-amd64.msi /q
|
||||
- go version
|
||||
- go env
|
||||
- appveyor DownloadFile http://sourceforge.netcologne.de/project/gnuwin32/tar/1.13-1/tar-1.13-1-bin.zip -FileName tar.zip
|
||||
|
||||
11
build.go
11
build.go
@@ -264,14 +264,13 @@ type Constants map[string]string
|
||||
func (cs Constants) LDFlags() string {
|
||||
l := make([]string, 0, len(cs))
|
||||
|
||||
v := runtime.Version()
|
||||
if strings.HasPrefix(v, "go1.5") || strings.HasPrefix(v, "go1.6") || strings.HasPrefix(v, "go1.7") {
|
||||
if runtime.Version() < "go1.5" {
|
||||
for k, v := range cs {
|
||||
l = append(l, fmt.Sprintf(`-X "%s=%s"`, k, v))
|
||||
l = append(l, fmt.Sprintf(`-X %q %q`, k, v))
|
||||
}
|
||||
} else {
|
||||
for k, v := range cs {
|
||||
l = append(l, fmt.Sprintf(`-X %q %q`, k, v))
|
||||
l = append(l, fmt.Sprintf(`-X "%s=%s"`, k, v))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -279,6 +278,10 @@ func (cs Constants) LDFlags() string {
|
||||
}
|
||||
|
||||
func main() {
|
||||
if runtime.Version() < "go1.6" {
|
||||
fmt.Fprintf(os.Stderr, "old version of Go detected (%v), I'll try but no guarantees\n", runtime.Version())
|
||||
}
|
||||
|
||||
buildTags := []string{}
|
||||
|
||||
skipNext := false
|
||||
|
||||
276
doc/Manual.md
276
doc/Manual.md
@@ -21,7 +21,8 @@ after installation of 'go' go straight forward to 'git clone [...]'
|
||||
|
||||
If you are using Linux, BSD or Windows, the only way to install restic on your
|
||||
system right now is to compile it from source. restic is written in the Go
|
||||
programming language and you need at least Go version 1.3. See the [Getting
|
||||
programming language and you need at least Go version 1.6. Building restic may
|
||||
also work with older versions of Go, but that's not supported. See the [Getting
|
||||
started](https://golang.org/doc/install) guide of the Go project for
|
||||
instructions how to install Go.
|
||||
|
||||
@@ -44,24 +45,32 @@ Usage help is available:
|
||||
restic [OPTIONS] <command>
|
||||
|
||||
Application Options:
|
||||
-r, --repo= Repository directory to backup to/restore from
|
||||
-r, --repo= Repository directory to backup to/restore from
|
||||
--cache-dir= Directory to use as a local cache
|
||||
-q, --quiet Do not output comprehensive progress report (false)
|
||||
--no-lock Do not lock the repo, this allows some operations on read-only repos. (false)
|
||||
-o, --option= Specify options in the form 'foo.key=value'
|
||||
|
||||
Help Options:
|
||||
-h, --help Show this help message
|
||||
-h, --help Show this help message
|
||||
|
||||
Available commands:
|
||||
backup save file/directory
|
||||
cache manage cache
|
||||
cat dump something
|
||||
find find a file/directory
|
||||
fsck check the repository
|
||||
init create repository
|
||||
key manage keys
|
||||
list lists data
|
||||
ls list files
|
||||
restore restore a snapshot
|
||||
snapshots show snapshots
|
||||
version display version
|
||||
backup save file/directory
|
||||
cat dump something
|
||||
check check the repository
|
||||
find find a file/directory
|
||||
forget removes snapshots from a repository
|
||||
init create repository
|
||||
key manage keys
|
||||
list lists data
|
||||
ls list files
|
||||
mount mount a repository
|
||||
prune removes content from a repository
|
||||
rebuild-index rebuild the index
|
||||
restore restore a snapshot
|
||||
snapshots show snapshots
|
||||
unlock remove locks
|
||||
version display version
|
||||
|
||||
Similar to programs such as `git`, restic has a number of sub-commands. You can
|
||||
see these commands in the listing above. Each sub-command may have own
|
||||
@@ -75,18 +84,32 @@ them, e.g. for the `backup` command:
|
||||
The backup command creates a snapshot of a file or directory
|
||||
|
||||
Application Options:
|
||||
-r, --repo= Repository directory to backup to/restore from
|
||||
--cache-dir= Directory to use as a local cache
|
||||
-q, --quiet Do not output comprehensive progress report (false)
|
||||
--no-lock Do not lock the repo, this allows some operations on read-only repos. (false)
|
||||
-r, --repo= Repository directory to backup to/restore from (/tmp/repo)
|
||||
-p, --password-file= Read the repository password from a file
|
||||
--cache-dir= Directory to use as a local cache
|
||||
-q, --quiet Do not output comprehensive progress report (false)
|
||||
--no-lock Do not lock the repo, this allows some operations on read-only repos. (false)
|
||||
-o, --option= Specify options in the form 'foo.key=value'
|
||||
|
||||
Help Options:
|
||||
-h, --help Show this help message
|
||||
-h, --help Show this help message
|
||||
|
||||
[backup command options]
|
||||
-p, --parent= use this parent snapshot (default: last snapshot in repo that has the same target)
|
||||
-f, --force Force re-reading the target. Overrides the "parent" flag
|
||||
-e, --exclude= Exclude a pattern (can be specified multiple times)
|
||||
-p, --parent= use this parent snapshot (default: last snapshot in repo that has the same target)
|
||||
-f, --force Force re-reading the target. Overrides the "parent" flag
|
||||
-e, --exclude= Exclude a pattern (can be specified multiple times)
|
||||
--exclude-file= Read exclude-patterns from file
|
||||
--stdin read backup data from stdin
|
||||
--stdin-filename= file name to use when reading from stdin (stdin)
|
||||
--tag= Add a tag (can be specified multiple times)
|
||||
|
||||
Subcommand that support showing progress information such as `backup`, `check` and `prune` will do so unless
|
||||
the quiet flag `-q` or `--quiet` is set. When running from a non-interactive console progress reporting will
|
||||
be limited to once every 10 seconds to not fill your logs.
|
||||
|
||||
Additionally on Unix systems if `restic` receives a SIGUSR signal the current progress will written to the
|
||||
standard output so you can check up on the status at will.
|
||||
|
||||
|
||||
# Initialize a repository
|
||||
|
||||
@@ -107,7 +130,8 @@ Remembering your password is important! If you lose it, you won't be able to
|
||||
access data stored in the repository.
|
||||
|
||||
For automated backups, restic accepts the repository location in the
|
||||
environment variable `RESTIC_REPOSITORY` and also the password in the variable
|
||||
environment variable `RESTIC_REPOSITORY`. The password can be read from a file
|
||||
(via the option `--password-file`) or the environment variable
|
||||
`RESTIC_PASSWORD`.
|
||||
|
||||
## Password prompt on Windows
|
||||
@@ -182,6 +206,13 @@ see [`filepath.Match`](https://golang.org/pkg/path/filepath/#Match) for syntax.
|
||||
Additionally `**` exludes arbitrary subdirectories.
|
||||
Environment-variables in exclude-files are expanded with [`os.ExpandEnv`](https://golang.org/pkg/os/#ExpandEnv).
|
||||
|
||||
By specifying the option `--one-file-system` you can instruct restic to only
|
||||
backup files from the file systems the initially specified files or directories
|
||||
reside on. For example, calling restic like this won't backup `/sys` or
|
||||
`/dev` on a Linux system:
|
||||
|
||||
$ restic -r /tmp/backup backup --one-file-system /
|
||||
|
||||
## Reading data from stdin
|
||||
|
||||
Sometimes it can be nice to directly save the output of a program, e.g.
|
||||
@@ -196,7 +227,17 @@ the fuse mounting option (see below) to mount the repository and read the file.
|
||||
By default, the file name `stdin` is used, a different name can be specified
|
||||
with `--stdin-filename`, e.g. like this:
|
||||
|
||||
$ mysqldump [...] | restic -r /tmp/backup backup --stdin --stdin-filenam production.sql
|
||||
$ mysqldump [...] | restic -r /tmp/backup backup --stdin --stdin-filename production.sql
|
||||
|
||||
## Tags
|
||||
|
||||
Snapshots can have one or more tags, short strings which add identifying
|
||||
information. Just specify the tags for a snapshot with `--tag`:
|
||||
|
||||
$ restic -r /tmp/backup backup --tag projectX ~/shared/work/web
|
||||
[...]
|
||||
|
||||
The tags can later be used to keep (or forget) snapshots.
|
||||
|
||||
# List all snapshots
|
||||
|
||||
@@ -204,31 +245,31 @@ Now, you can list all the snapshots stored in the repository:
|
||||
|
||||
$ restic -r /tmp/backup snapshots
|
||||
enter password for repository:
|
||||
ID Date Host Directory
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
40dc1520 2015-05-08 21:38:30 kasimir /home/user/work
|
||||
79766175 2015-05-08 21:40:19 kasimir /home/user/work
|
||||
bdbd3439 2015-05-08 21:45:17 luigi /home/art
|
||||
590c8fc8 2015-05-08 21:47:38 kazik /srv
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv
|
||||
40dc1520 2015-05-08 21:38:30 kasimir /home/user/work
|
||||
79766175 2015-05-08 21:40:19 kasimir /home/user/work
|
||||
bdbd3439 2015-05-08 21:45:17 luigi /home/art
|
||||
590c8fc8 2015-05-08 21:47:38 kazik /srv
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv
|
||||
|
||||
You can filter the listing by directory path:
|
||||
|
||||
$ restic -r /tmp/backup snapshots --path="/srv"
|
||||
enter password for repository:
|
||||
ID Date Host Directory
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
590c8fc8 2015-05-08 21:47:38 kazik /srv
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv
|
||||
590c8fc8 2015-05-08 21:47:38 kazik /srv
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv
|
||||
|
||||
Or filter by host:
|
||||
|
||||
$ restic -r /tmp/backup snapshots --host luigi
|
||||
enter password for repository:
|
||||
ID Date Host Directory
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
bdbd3439 2015-05-08 21:45:17 luigi /home/art
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv
|
||||
bdbd3439 2015-05-08 21:45:17 luigi /home/art
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv
|
||||
|
||||
Combining filters is also possible.
|
||||
|
||||
@@ -310,10 +351,7 @@ the repository with FUSE:
|
||||
Now serving /tmp/backup at /tmp/restic
|
||||
Don't forget to umount after quitting!
|
||||
|
||||
Windows doesn't support FUSE directly. Projects like
|
||||
[dokan](http://dokan-dev.github.io/) try to fill the gap. We haven't tested it
|
||||
yet, but we'd like to hear about your experience. For setup information see
|
||||
[dokan FUSE in dokan's wiki](https://github.com/dokan-dev/dokany/wiki/FUSE).
|
||||
Mounting repositories via FUSE is not possible on Windows and OpenBSD.
|
||||
|
||||
# Create an SFTP repository
|
||||
|
||||
@@ -325,18 +363,16 @@ credentials.
|
||||
Once the server is configured, the setup of the SFTP repository can simply be
|
||||
achieved by changing the URL scheme in the `init` command:
|
||||
|
||||
$ restic -r sftp://user@host//tmp/backup init
|
||||
$ restic -r sftp:user@host:/tmp/backup init
|
||||
enter password for new backend:
|
||||
enter password again:
|
||||
created restic backend f1c6108821 at sftp://user@host//tmp/backup
|
||||
created restic backend f1c6108821 at sftp:user@host:/tmp/backup
|
||||
Please note that knowledge of your password is required to access the repository.
|
||||
Losing your password means that your data is irrecoverably lost.
|
||||
|
||||
Yes, that's really two slash (`/`) characters after the host name, here the
|
||||
directory `/tmp/backup` on the server is meant. If you'd rather like to create
|
||||
a repository in the user's home directory on the server, use the location
|
||||
`sftp://user@host/foo/bar/repo`. In this case the directory is relative to the
|
||||
user's home directory: `foo/bar/repo`.
|
||||
You can also specify a relative (read: no slash (`/`) character at the
|
||||
beginning) directory, in this case the dir is relative to the remote user's
|
||||
home directory.
|
||||
|
||||
# Create an Amazon S3 repository
|
||||
|
||||
@@ -347,15 +383,16 @@ Restic can backup data to any Amazon S3 bucket. However, in this case, changing
|
||||
|
||||
You can then easily initialize a repository that uses your Amazon S3 as a backend.
|
||||
|
||||
$ restic -r s3://s3.amazonaws.com/bucket_name init
|
||||
$ restic -r s3:eu-central-1/bucket_name init
|
||||
enter password for new backend:
|
||||
enter password again:
|
||||
created restic backend eefee03bbd at s3://s3.amazonaws.com/bucket_name
|
||||
created restic backend eefee03bbd at s3:eu-central-1/bucket_name
|
||||
Please note that knowledge of your password is required to access the repository.
|
||||
Losing your password means that your data is irrecoverably lost.
|
||||
|
||||
For an S3-compatible repository without TLS available, use the alternative URI
|
||||
protocol `s3:http://server:port/bucket_name`.
|
||||
Fro an s3-compatible server that is not Amazon (like Minio, see below), or is
|
||||
only available via HTTP, you can specify the URL to the server like this:
|
||||
`s3:http://server:port/bucket_name`.
|
||||
|
||||
## Create a Minio Server repository
|
||||
|
||||
@@ -380,6 +417,115 @@ Now you can easily initialize restic to use Minio server as backend with this co
|
||||
Please note that knowledge of your password is required to access
|
||||
the repository. Losing your password means that your data is irrecoverably lost.
|
||||
|
||||
# Removing old snapshots
|
||||
|
||||
All backup space is finite, so restic allows removing old snapshots. This can
|
||||
be done either manually (by specifying a snapshot ID to remove) or by using a
|
||||
policy that describes which snapshots to forget. For all remove operations, two
|
||||
commands need to be called in sequence: `forget` to remove a snapshot and
|
||||
`prune` to actually remove the data that was referenced by the snapshot from
|
||||
the repository.
|
||||
|
||||
## Remove a single snapshot
|
||||
|
||||
The command `snapshots` can be used to list all snapshots in a repository like this:
|
||||
|
||||
$ restic -r /tmp/backup snapshots
|
||||
enter password for repository:
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
40dc1520 2015-05-08 21:38:30 kasimir /home/user/work
|
||||
79766175 2015-05-08 21:40:19 kasimir /home/user/work
|
||||
bdbd3439 2015-05-08 21:45:17 luigi /home/art
|
||||
590c8fc8 2015-05-08 21:47:38 kazik /srv
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv
|
||||
|
||||
In order to remove the snapshot of `/home/art`, use the `forget` command and
|
||||
specify the snapshot ID on the command line:
|
||||
|
||||
$ restic -r /tmp/backup forget bdbd3439
|
||||
enter password for repository:
|
||||
removed snapshot d3f01f63
|
||||
|
||||
Afterwards this snapshot is removed:
|
||||
|
||||
$ restic -r /tmp/backup snapshots
|
||||
enter password for repository:
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
40dc1520 2015-05-08 21:38:30 kasimir /home/user/work
|
||||
79766175 2015-05-08 21:40:19 kasimir /home/user/work
|
||||
590c8fc8 2015-05-08 21:47:38 kazik /srv
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv
|
||||
|
||||
But the data that was referenced by files in this snapshot is still stored in
|
||||
the repository. To cleanup unreferenced data, the `prune` command must be run:
|
||||
|
||||
$ restic -r /tmp/backup prune
|
||||
enter password for repository:
|
||||
|
||||
counting files in repo
|
||||
building new index for repo
|
||||
[0:00] 100.00% 22 / 22 files
|
||||
repository contains 22 packs (8512 blobs) with 100.092 MiB bytes
|
||||
processed 8512 blobs: 0 duplicate blobs, 0B duplicate
|
||||
load all snapshots
|
||||
find data that is still in use for 1 snapshots
|
||||
[0:00] 100.00% 1 / 1 snapshots
|
||||
found 8433 of 8512 data blobs still in use
|
||||
will rewrite 3 packs
|
||||
creating new index
|
||||
[0:00] 86.36% 19 / 22 files
|
||||
saved new index as 544a5084
|
||||
done
|
||||
|
||||
Afterwards the repository is smaller.
|
||||
|
||||
## Removing snapshots according to a policy
|
||||
|
||||
Removing snapshots manually is tedious and error-prone, therefore restic allows
|
||||
specifying which snapshots should be removed automatically according to a
|
||||
policy. You can specify how many hourly, daily, weekly, monthly and yearly
|
||||
snapshots to keep, any other snapshots are removed. The most important
|
||||
command-line parameter here is `--dry-run` which instructs restic to not remove
|
||||
anything but print which snapshots would be removed.
|
||||
|
||||
When `forget` is run with a policy, restic loads the list of all snapshots,
|
||||
then groups these by host name and list of directories. The policy is then
|
||||
applied to each group of snapshots separately. This is a safety feature.
|
||||
|
||||
The `forget` command accepts the following parameters:
|
||||
|
||||
* `--keep-last n` never delete the `n` last (most recent) snapshots
|
||||
* `--keep-hourly n` for the last `n` hours in which a snapshot was made, keep
|
||||
only the last snapshot for each hour.
|
||||
* `--keep-daily n` for the last `n` days which have one or more snapshots, only
|
||||
keep the last one for that day.
|
||||
* `--keep-weekly n` for the last `n` weeks which have one or more snapshots, only
|
||||
keep the last one for that week.
|
||||
* `--keep-monthly n` for the last `n` months which have one or more snapshots, only
|
||||
keep the last one for that month.
|
||||
* `--keep-yearly n` for the last `n` years which have one or more snapshots, only
|
||||
keep the last one for that year.
|
||||
* `--keep-tag` keep all snapshots which have all tags specified by this option
|
||||
(can be specified multiple times).
|
||||
|
||||
Additionally, you can restrict removing snapshots to those which have a
|
||||
particular hostname with the `--hostname` parameter, or tags with the `--tag`
|
||||
option. When multiple tags are specified, only the snapshots which have all the
|
||||
tags are considered.
|
||||
|
||||
All the `--keep-*` options above only count hours/days/weeks/months/years which
|
||||
have a snapshot, so those without a snapshot are ignored.
|
||||
|
||||
Let's explain this with an example: Suppose you have only made a backup on each
|
||||
Sunday for 12 weeks. Then `forget --keep-daily 4` will keep the last four snapshots
|
||||
for the last four Sundays, but remove the rest. Only counting the days which
|
||||
have a backup and ignore the ones without is a safety feature: it prevents
|
||||
restic from removing many snapshots when no new ones are created. If it was
|
||||
implemented otherwise, running `forget --keep-daily 4` on a Friday would remove
|
||||
all snapshots!
|
||||
|
||||
# Debugging restic
|
||||
|
||||
The program can be built with debug support like this:
|
||||
@@ -387,14 +533,36 @@ The program can be built with debug support like this:
|
||||
$ go run build.go -tags debug
|
||||
|
||||
Afterwards, extensive debug messages are written to the file in environment
|
||||
variable `RESTIC_DEBUG`, e.g.:
|
||||
variable `DEBUG_LOG`, e.g.:
|
||||
|
||||
$ RESTIC_DEBUG=/tmp/restic-debug.log restic backup ~/work
|
||||
$ DEBUG_LOG=/tmp/restic-debug.log restic backup ~/work
|
||||
|
||||
If you suspect that there is a bug, you can have a look at the debug log.
|
||||
Please be aware that the debug log might contain sensitive information such as
|
||||
file and directory names.
|
||||
|
||||
The debug log will always contain all log messages restic generates. You can
|
||||
also instruct restic to print some or all debug messages to stderr. These can
|
||||
also be limited to e.g. a list of source files or a list of patterns for
|
||||
function names. The patterns are globbing patterns (see the documentation for
|
||||
[`path.Glob`](https://golang.org/pkg/path/#Glob)), multiple patterns are
|
||||
separated by commas. Patterns are case sensitive.
|
||||
|
||||
Printing all log messages to the console can be achieved by setting the file
|
||||
filter to `*`:
|
||||
|
||||
$ DEBUG_FILES=* restic check
|
||||
|
||||
If you want restic to just print all debug log messages from the files
|
||||
`main.go` and `lock.go`, set the environment variable `DEBUG_FILES` like this:
|
||||
|
||||
$ DEBUG_FILES=main.go,lock.go restic check
|
||||
|
||||
The following command line instructs restic to only print debug statements
|
||||
originating in functions that match the pattern `*unlock*` (case sensitive):
|
||||
|
||||
$ DEBUG_FUNCS=*unlock* restic check
|
||||
|
||||
# Under the hood: Browse repository objects
|
||||
|
||||
Internally, a repository stores data of several different types described in the [design documentation](https://github.com/restic/restic/blob/master/doc/Design.md). You can `list` objects such as blobs, packs, index, snapshots, keys or locks with the following command:
|
||||
|
||||
@@ -3,7 +3,9 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -17,6 +19,12 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ForbiddenImports are the packages from the stdlib that should not be used in
|
||||
// our code.
|
||||
var ForbiddenImports = map[string]bool{
|
||||
"errors": true,
|
||||
}
|
||||
|
||||
var runCrossCompile = flag.Bool("cross-compile", true, "run cross compilation tests")
|
||||
var minioServer = flag.String("minio", "", "path to the minio server binary")
|
||||
var debug = flag.Bool("debug", false, "output debug messages")
|
||||
@@ -142,7 +150,6 @@ func (env *TravisEnvironment) Prepare() error {
|
||||
|
||||
for _, pkg := range []string{
|
||||
"golang.org/x/tools/cmd/cover",
|
||||
"github.com/mattn/goveralls",
|
||||
"github.com/pierrre/gotestcover",
|
||||
} {
|
||||
err := run("go", "get", pkg)
|
||||
@@ -158,17 +165,7 @@ func (env *TravisEnvironment) Prepare() error {
|
||||
return err
|
||||
}
|
||||
|
||||
if runtime.GOOS == "darwin" {
|
||||
// install the libraries necessary for fuse
|
||||
if err := run("brew", "update"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := run("brew", "cask", "install", "osxfuse"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if *runCrossCompile {
|
||||
if *runCrossCompile && !(runtime.Version() < "go1.7") {
|
||||
// only test cross compilation on linux with Travis
|
||||
if err := run("go", "get", "github.com/mitchellh/gox"); err != nil {
|
||||
return err
|
||||
@@ -191,8 +188,7 @@ func (env *TravisEnvironment) Prepare() error {
|
||||
|
||||
msg("gox: OS/ARCH %v\n", env.goxOSArch)
|
||||
|
||||
v := runtime.Version()
|
||||
if !strings.HasPrefix(v, "go1.5") && !strings.HasPrefix(v, "go1.6") {
|
||||
if runtime.Version() < "go1.5" {
|
||||
err := run("gox", "-build-toolchain",
|
||||
"-osarch", strings.Join(env.goxOSArch, " "))
|
||||
|
||||
@@ -305,8 +301,8 @@ func StartBackgroundCommand(env map[string]string, cmd string, args ...string) (
|
||||
|
||||
// RunTests starts the tests for Travis.
|
||||
func (env *TravisEnvironment) RunTests() error {
|
||||
// run fuse tests on darwin
|
||||
if runtime.GOOS != "darwin" {
|
||||
// do not run fuse tests on darwin
|
||||
if runtime.GOOS == "darwin" {
|
||||
msg("skip fuse integration tests on %v\n", runtime.GOOS)
|
||||
os.Setenv("RESTIC_TEST_FUSE", "0")
|
||||
}
|
||||
@@ -318,14 +314,17 @@ func (env *TravisEnvironment) RunTests() error {
|
||||
|
||||
env.env["GOPATH"] = cwd + ":" + filepath.Join(cwd, "vendor")
|
||||
|
||||
if *runCrossCompile {
|
||||
if *runCrossCompile && !(runtime.Version() < "go1.7") {
|
||||
// compile for all target architectures with tags
|
||||
for _, tags := range []string{"release", "debug"} {
|
||||
runWithEnv(env.env, "gox", "-verbose",
|
||||
err := runWithEnv(env.env, "gox", "-verbose",
|
||||
"-osarch", strings.Join(env.goxOSArch, " "),
|
||||
"-tags", tags,
|
||||
"-output", "/tmp/{{.Dir}}_{{.OS}}_{{.Arch}}",
|
||||
"cmds/restic")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -340,7 +339,30 @@ func (env *TravisEnvironment) RunTests() error {
|
||||
return err
|
||||
}
|
||||
|
||||
return runGofmt()
|
||||
if err = runGofmt(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
deps, err := findImports()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
foundForbiddenImports := false
|
||||
for name, imports := range deps {
|
||||
for _, pkg := range imports {
|
||||
if _, ok := ForbiddenImports[pkg]; ok {
|
||||
fmt.Fprintf(os.Stderr, "========== package %v imports forbidden package %v\n", name, pkg)
|
||||
foundForbiddenImports = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if foundForbiddenImports {
|
||||
return errors.New("CI: forbidden imports found")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AppveyorEnvironment is the environment on Windows.
|
||||
@@ -408,6 +430,46 @@ func updateEnv(env []string, override map[string]string) []string {
|
||||
return newEnv
|
||||
}
|
||||
|
||||
func findImports() (map[string][]string, error) {
|
||||
res := make(map[string][]string)
|
||||
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Getwd() returned error: %v", err)
|
||||
}
|
||||
|
||||
gopath := cwd + ":" + filepath.Join(cwd, "vendor")
|
||||
|
||||
cmd := exec.Command("go", "list", "-f", `{{.ImportPath}} {{join .Imports " "}}`, "./src/...")
|
||||
cmd.Env = updateEnv(os.Environ(), map[string]string{"GOPATH": gopath})
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sc := bufio.NewScanner(bytes.NewReader(output))
|
||||
for sc.Scan() {
|
||||
wordScanner := bufio.NewScanner(strings.NewReader(sc.Text()))
|
||||
wordScanner.Split(bufio.ScanWords)
|
||||
|
||||
if !wordScanner.Scan() {
|
||||
return nil, fmt.Errorf("package name not found in line: %s", output)
|
||||
}
|
||||
name := wordScanner.Text()
|
||||
var deps []string
|
||||
|
||||
for wordScanner.Scan() {
|
||||
deps = append(deps, wordScanner.Text())
|
||||
}
|
||||
|
||||
res[name] = deps
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func runGofmt() error {
|
||||
dir, err := os.Getwd()
|
||||
if err != nil {
|
||||
|
||||
@@ -12,6 +12,8 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"restic/fs"
|
||||
)
|
||||
|
||||
// Context contains repository meta-data.
|
||||
@@ -134,7 +136,7 @@ func GetBlob(c *Context) http.HandlerFunc {
|
||||
dir := vars[1]
|
||||
name := vars[2]
|
||||
path := filepath.Join(c.path, dir, name)
|
||||
file, err := os.Open(path)
|
||||
file, err := fs.Open(path)
|
||||
if err != nil {
|
||||
http.Error(w, "404 not found", 404)
|
||||
return
|
||||
@@ -152,7 +154,7 @@ func SaveBlob(c *Context) http.HandlerFunc {
|
||||
name := vars[2]
|
||||
path := filepath.Join(c.path, dir, name)
|
||||
tmp := path + "_tmp"
|
||||
tf, err := os.OpenFile(tmp, os.O_CREATE|os.O_WRONLY, 0600)
|
||||
tf, err := fs.OpenFile(tmp, os.O_CREATE|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
http.Error(w, "500 internal server error", 500)
|
||||
return
|
||||
|
||||
@@ -32,7 +32,8 @@ import (
|
||||
"encoding/csv"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"restic/fs"
|
||||
)
|
||||
|
||||
// lookup passwords in a htpasswd file
|
||||
@@ -47,7 +48,7 @@ type HtpasswdFile struct {
|
||||
// file and returns them. If an error is encountered, it is returned, together
|
||||
// with a nil-Pointer for the HtpasswdFile.
|
||||
func NewHtpasswdFromFile(path string) (*HtpasswdFile, error) {
|
||||
r, err := os.Open(path)
|
||||
r, err := fs.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -32,6 +32,9 @@ func AddCleanupHandler(f func() error) {
|
||||
cleanupHandlers.Lock()
|
||||
defer cleanupHandlers.Unlock()
|
||||
|
||||
// reset the done flag for integration tests
|
||||
cleanupHandlers.done = false
|
||||
|
||||
cleanupHandlers.list = append(cleanupHandlers.list, f)
|
||||
}
|
||||
|
||||
@@ -51,13 +54,14 @@ func RunCleanupHandlers() {
|
||||
fmt.Fprintf(stderr, "error in cleanup handler: %v\n", err)
|
||||
}
|
||||
}
|
||||
cleanupHandlers.list = nil
|
||||
}
|
||||
|
||||
// CleanupHandler handles the SIGINT signal.
|
||||
func CleanupHandler(c <-chan os.Signal) {
|
||||
for s := range c {
|
||||
debug.Log("CleanupHandler", "signal %v received, cleaning up", s)
|
||||
fmt.Println("\x1b[2KInterrupt received, cleaning up")
|
||||
debug.Log("signal %v received, cleaning up", s)
|
||||
fmt.Printf("%sInterrupt received, cleaning up\n", ClearLine())
|
||||
RunCleanupHandlers()
|
||||
fmt.Println("exiting")
|
||||
os.Exit(0)
|
||||
|
||||
@@ -2,132 +2,90 @@ package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"restic"
|
||||
"restic/backend"
|
||||
"restic/debug"
|
||||
"restic/filter"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"restic/archiver"
|
||||
"restic/debug"
|
||||
"restic/errors"
|
||||
"restic/filter"
|
||||
"restic/fs"
|
||||
)
|
||||
|
||||
type CmdBackup struct {
|
||||
Parent string `short:"p" long:"parent" description:"use this parent snapshot (default: last snapshot in repo that has the same target)"`
|
||||
Force bool `short:"f" long:"force" description:"Force re-reading the target. Overrides the \"parent\" flag"`
|
||||
Excludes []string `short:"e" long:"exclude" description:"Exclude a pattern (can be specified multiple times)"`
|
||||
ExcludeFile string `long:"exclude-file" description:"Read exclude-patterns from file"`
|
||||
Stdin bool `long:"stdin" description:"read backup data from stdin"`
|
||||
StdinFilename string `long:"stdin-filename" default:"stdin" description:"file name to use when reading from stdin"`
|
||||
var cmdBackup = &cobra.Command{
|
||||
Use: "backup [flags] FILE/DIR [FILE/DIR] ...",
|
||||
Short: "create a new backup of files and/or directories",
|
||||
Long: `
|
||||
The "backup" command creates a new snapshot and saves the files and directories
|
||||
given as the arguments.
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if backupOptions.Stdin {
|
||||
return readBackupFromStdin(backupOptions, globalOptions, args)
|
||||
}
|
||||
|
||||
global *GlobalOptions
|
||||
return runBackup(backupOptions, globalOptions, args)
|
||||
},
|
||||
}
|
||||
|
||||
// BackupOptions bundles all options for the backup command.
|
||||
type BackupOptions struct {
|
||||
Parent string
|
||||
Force bool
|
||||
Excludes []string
|
||||
ExcludeFile string
|
||||
ExcludeOtherFS bool
|
||||
Stdin bool
|
||||
StdinFilename string
|
||||
Tags []string
|
||||
}
|
||||
|
||||
var backupOptions BackupOptions
|
||||
|
||||
func init() {
|
||||
_, err := parser.AddCommand("backup",
|
||||
"save file/directory",
|
||||
"The backup command creates a snapshot of a file or directory",
|
||||
&CmdBackup{global: &globalOpts})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
cmdRoot.AddCommand(cmdBackup)
|
||||
|
||||
f := cmdBackup.Flags()
|
||||
f.StringVar(&backupOptions.Parent, "parent", "", "use this parent snapshot (default: last snapshot in the repo that has the same target files/directories)")
|
||||
f.BoolVarP(&backupOptions.Force, "force", "f", false, `force re-reading the target files/directories. Overrides the "parent" flag`)
|
||||
f.StringSliceVarP(&backupOptions.Excludes, "exclude", "e", []string{}, "exclude a `pattern` (can be specified multiple times)")
|
||||
f.StringVar(&backupOptions.ExcludeFile, "exclude-file", "", "read exclude patterns from a file")
|
||||
f.BoolVarP(&backupOptions.ExcludeOtherFS, "one-file-system", "x", false, "Exclude other file systems")
|
||||
f.BoolVar(&backupOptions.Stdin, "stdin", false, "read backup from stdin")
|
||||
f.StringVar(&backupOptions.StdinFilename, "stdin-filename", "", "file name to use when reading from stdin")
|
||||
f.StringSliceVar(&backupOptions.Tags, "tag", []string{}, "add a `tag` for the new snapshot (can be specified multiple times)")
|
||||
}
|
||||
|
||||
func formatBytes(c uint64) string {
|
||||
b := float64(c)
|
||||
|
||||
switch {
|
||||
case c > 1<<40:
|
||||
return fmt.Sprintf("%.3f TiB", b/(1<<40))
|
||||
case c > 1<<30:
|
||||
return fmt.Sprintf("%.3f GiB", b/(1<<30))
|
||||
case c > 1<<20:
|
||||
return fmt.Sprintf("%.3f MiB", b/(1<<20))
|
||||
case c > 1<<10:
|
||||
return fmt.Sprintf("%.3f KiB", b/(1<<10))
|
||||
default:
|
||||
return fmt.Sprintf("%dB", c)
|
||||
}
|
||||
}
|
||||
|
||||
func formatSeconds(sec uint64) string {
|
||||
hours := sec / 3600
|
||||
sec -= hours * 3600
|
||||
min := sec / 60
|
||||
sec -= min * 60
|
||||
if hours > 0 {
|
||||
return fmt.Sprintf("%d:%02d:%02d", hours, min, sec)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%d:%02d", min, sec)
|
||||
}
|
||||
|
||||
func formatPercent(numerator uint64, denominator uint64) string {
|
||||
if denominator == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
percent := 100.0 * float64(numerator) / float64(denominator)
|
||||
|
||||
if percent > 100 {
|
||||
percent = 100
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%3.2f%%", percent)
|
||||
}
|
||||
|
||||
func formatRate(bytes uint64, duration time.Duration) string {
|
||||
sec := float64(duration) / float64(time.Second)
|
||||
rate := float64(bytes) / sec / (1 << 20)
|
||||
return fmt.Sprintf("%.2fMiB/s", rate)
|
||||
}
|
||||
|
||||
func formatDuration(d time.Duration) string {
|
||||
sec := uint64(d / time.Second)
|
||||
return formatSeconds(sec)
|
||||
}
|
||||
|
||||
func printTree2(indent int, t *restic.Tree) {
|
||||
for _, node := range t.Nodes {
|
||||
if node.Tree() != nil {
|
||||
fmt.Printf("%s%s/\n", strings.Repeat(" ", indent), node.Name)
|
||||
printTree2(indent+1, node.Tree())
|
||||
} else {
|
||||
fmt.Printf("%s%s\n", strings.Repeat(" ", indent), node.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cmd CmdBackup) Usage() string {
|
||||
return "DIR/FILE [DIR/FILE] [...]"
|
||||
}
|
||||
|
||||
func (cmd CmdBackup) newScanProgress() *restic.Progress {
|
||||
if !cmd.global.ShowProgress() {
|
||||
func newScanProgress(gopts GlobalOptions) *restic.Progress {
|
||||
if gopts.Quiet {
|
||||
return nil
|
||||
}
|
||||
|
||||
p := restic.NewProgress(time.Second)
|
||||
p := restic.NewProgress()
|
||||
p.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) {
|
||||
fmt.Printf("\x1b[2K[%s] %d directories, %d files, %s\r", formatDuration(d), s.Dirs, s.Files, formatBytes(s.Bytes))
|
||||
PrintProgress("[%s] %d directories, %d files, %s", formatDuration(d), s.Dirs, s.Files, formatBytes(s.Bytes))
|
||||
}
|
||||
p.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
|
||||
fmt.Printf("\x1b[2Kscanned %d directories, %d files in %s\n", s.Dirs, s.Files, formatDuration(d))
|
||||
PrintProgress("scanned %d directories, %d files in %s\n", s.Dirs, s.Files, formatDuration(d))
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (cmd CmdBackup) newArchiveProgress(todo restic.Stat) *restic.Progress {
|
||||
if !cmd.global.ShowProgress() {
|
||||
func newArchiveProgress(gopts GlobalOptions, todo restic.Stat) *restic.Progress {
|
||||
if gopts.Quiet {
|
||||
return nil
|
||||
}
|
||||
|
||||
archiveProgress := restic.NewProgress(time.Second)
|
||||
archiveProgress := restic.NewProgress()
|
||||
|
||||
var bps, eta uint64
|
||||
itemsTodo := todo.Files + todo.Dirs
|
||||
@@ -156,7 +114,7 @@ func (cmd CmdBackup) newArchiveProgress(todo restic.Stat) *restic.Progress {
|
||||
|
||||
w, _, err := terminal.GetSize(int(os.Stdout.Fd()))
|
||||
if err == nil {
|
||||
maxlen := w - len(status2)
|
||||
maxlen := w - len(status2) - 1
|
||||
|
||||
if maxlen < 4 {
|
||||
status1 = ""
|
||||
@@ -166,7 +124,7 @@ func (cmd CmdBackup) newArchiveProgress(todo restic.Stat) *restic.Progress {
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("\x1b[2K%s%s\r", status1, status2)
|
||||
PrintProgress("%s%s", status1, status2)
|
||||
}
|
||||
|
||||
archiveProgress.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
|
||||
@@ -176,12 +134,12 @@ func (cmd CmdBackup) newArchiveProgress(todo restic.Stat) *restic.Progress {
|
||||
return archiveProgress
|
||||
}
|
||||
|
||||
func (cmd CmdBackup) newArchiveStdinProgress() *restic.Progress {
|
||||
if !cmd.global.ShowProgress() {
|
||||
func newArchiveStdinProgress(gopts GlobalOptions) *restic.Progress {
|
||||
if gopts.Quiet {
|
||||
return nil
|
||||
}
|
||||
|
||||
archiveProgress := restic.NewProgress(time.Second)
|
||||
archiveProgress := restic.NewProgress()
|
||||
|
||||
var bps uint64
|
||||
|
||||
@@ -207,7 +165,7 @@ func (cmd CmdBackup) newArchiveStdinProgress() *restic.Progress {
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("\x1b[2K%s\r", status1)
|
||||
PrintProgress("%s", status1)
|
||||
}
|
||||
|
||||
archiveProgress.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
|
||||
@@ -221,8 +179,8 @@ func (cmd CmdBackup) newArchiveStdinProgress() *restic.Progress {
|
||||
// items exist at all.
|
||||
func filterExisting(items []string) (result []string, err error) {
|
||||
for _, item := range items {
|
||||
_, err := os.Lstat(item)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
_, err := fs.Lstat(item)
|
||||
if err != nil && os.IsNotExist(errors.Cause(err)) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -230,18 +188,39 @@ func filterExisting(items []string) (result []string, err error) {
|
||||
}
|
||||
|
||||
if len(result) == 0 {
|
||||
return nil, errors.New("all target directories/files do not exist")
|
||||
return nil, errors.Fatal("all target directories/files do not exist")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (cmd CmdBackup) readFromStdin(args []string) error {
|
||||
// gatherDevices returns the set of unique device ids of the files and/or
|
||||
// directory paths listed in "items".
|
||||
func gatherDevices(items []string) (deviceMap map[uint64]struct{}, err error) {
|
||||
deviceMap = make(map[uint64]struct{})
|
||||
for _, item := range items {
|
||||
fi, err := fs.Lstat(item)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id, err := fs.DeviceID(fi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
deviceMap[id] = struct{}{}
|
||||
}
|
||||
if len(deviceMap) == 0 {
|
||||
return nil, errors.New("zero allowed devices")
|
||||
}
|
||||
return deviceMap, nil
|
||||
}
|
||||
|
||||
func readBackupFromStdin(opts BackupOptions, gopts GlobalOptions, args []string) error {
|
||||
if len(args) != 0 {
|
||||
return fmt.Errorf("when reading from stdin, no additional files can be specified")
|
||||
return errors.Fatalf("when reading from stdin, no additional files can be specified")
|
||||
}
|
||||
|
||||
repo, err := cmd.global.OpenRepository()
|
||||
repo, err := OpenRepository(gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -257,7 +236,7 @@ func (cmd CmdBackup) readFromStdin(args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
_, id, err := restic.ArchiveReader(repo, cmd.newArchiveStdinProgress(), os.Stdin, cmd.StdinFilename)
|
||||
_, id, err := archiver.ArchiveReader(repo, newArchiveStdinProgress(gopts), os.Stdin, opts.StdinFilename, opts.Tags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -266,13 +245,9 @@ func (cmd CmdBackup) readFromStdin(args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cmd CmdBackup) Execute(args []string) error {
|
||||
if cmd.Stdin {
|
||||
return cmd.readFromStdin(args)
|
||||
}
|
||||
|
||||
func runBackup(opts BackupOptions, gopts GlobalOptions, args []string) error {
|
||||
if len(args) == 0 {
|
||||
return fmt.Errorf("wrong number of parameters, Usage: %s", cmd.Usage())
|
||||
return errors.Fatalf("wrong number of parameters")
|
||||
}
|
||||
|
||||
target := make([]string, 0, len(args))
|
||||
@@ -288,7 +263,17 @@ func (cmd CmdBackup) Execute(args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
repo, err := cmd.global.OpenRepository()
|
||||
// allowed devices
|
||||
var allowedDevs map[uint64]struct{}
|
||||
if opts.ExcludeOtherFS {
|
||||
allowedDevs, err = gatherDevices(target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
debug.Log("allowed devices: %v\n", allowedDevs)
|
||||
}
|
||||
|
||||
repo, err := OpenRepository(gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -304,20 +289,20 @@ func (cmd CmdBackup) Execute(args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
var parentSnapshotID *backend.ID
|
||||
var parentSnapshotID *restic.ID
|
||||
|
||||
// Force using a parent
|
||||
if !cmd.Force && cmd.Parent != "" {
|
||||
id, err := restic.FindSnapshot(repo, cmd.Parent)
|
||||
if !opts.Force && opts.Parent != "" {
|
||||
id, err := restic.FindSnapshot(repo, opts.Parent)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid id %q: %v", cmd.Parent, err)
|
||||
return errors.Fatalf("invalid id %q: %v", opts.Parent, err)
|
||||
}
|
||||
|
||||
parentSnapshotID = &id
|
||||
}
|
||||
|
||||
// Find last snapshot to set it as parent, if not already set
|
||||
if !cmd.Force && parentSnapshotID == nil {
|
||||
if !opts.Force && parentSnapshotID == nil {
|
||||
id, err := restic.FindLatestSnapshot(repo, target, "")
|
||||
if err == nil {
|
||||
parentSnapshotID = &id
|
||||
@@ -327,16 +312,16 @@ func (cmd CmdBackup) Execute(args []string) error {
|
||||
}
|
||||
|
||||
if parentSnapshotID != nil {
|
||||
cmd.global.Verbosef("using parent snapshot %v\n", parentSnapshotID.Str())
|
||||
Verbosef("using parent snapshot %v\n", parentSnapshotID.Str())
|
||||
}
|
||||
|
||||
cmd.global.Verbosef("scan %v\n", target)
|
||||
Verbosef("scan %v\n", target)
|
||||
|
||||
// add patterns from file
|
||||
if cmd.ExcludeFile != "" {
|
||||
file, err := os.Open(cmd.ExcludeFile)
|
||||
if opts.ExcludeFile != "" {
|
||||
file, err := fs.Open(opts.ExcludeFile)
|
||||
if err != nil {
|
||||
cmd.global.Warnf("error reading exclude patterns: %v", err)
|
||||
Warnf("error reading exclude patterns: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -345,45 +330,62 @@ func (cmd CmdBackup) Execute(args []string) error {
|
||||
line := scanner.Text()
|
||||
if !strings.HasPrefix(line, "#") {
|
||||
line = os.ExpandEnv(line)
|
||||
cmd.Excludes = append(cmd.Excludes, line)
|
||||
opts.Excludes = append(opts.Excludes, line)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
selectFilter := func(item string, fi os.FileInfo) bool {
|
||||
matched, err := filter.List(cmd.Excludes, item)
|
||||
matched, err := filter.List(opts.Excludes, item)
|
||||
if err != nil {
|
||||
cmd.global.Warnf("error for exclude pattern: %v", err)
|
||||
Warnf("error for exclude pattern: %v", err)
|
||||
}
|
||||
|
||||
if matched {
|
||||
debug.Log("backup.Execute", "path %q excluded by a filter", item)
|
||||
debug.Log("path %q excluded by a filter", item)
|
||||
return false
|
||||
}
|
||||
|
||||
return !matched
|
||||
if !opts.ExcludeOtherFS {
|
||||
return true
|
||||
}
|
||||
|
||||
id, err := fs.DeviceID(fi)
|
||||
if err != nil {
|
||||
// This should never happen because gatherDevices() would have
|
||||
// errored out earlier. If it still does that's a reason to panic.
|
||||
panic(err)
|
||||
}
|
||||
_, found := allowedDevs[id]
|
||||
if !found {
|
||||
debug.Log("path %q on disallowed device %d", item, id)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
stat, err := restic.Scan(target, selectFilter, cmd.newScanProgress())
|
||||
stat, err := archiver.Scan(target, selectFilter, newScanProgress(gopts))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
arch := restic.NewArchiver(repo)
|
||||
arch.Excludes = cmd.Excludes
|
||||
arch := archiver.New(repo)
|
||||
arch.Excludes = opts.Excludes
|
||||
arch.SelectFilter = selectFilter
|
||||
|
||||
arch.Error = func(dir string, fi os.FileInfo, err error) error {
|
||||
// TODO: make ignoring errors configurable
|
||||
cmd.global.Warnf("\x1b[2K\rerror for %s: %v\n", dir, err)
|
||||
Warnf("%s\rerror for %s: %v\n", ClearLine(), dir, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
_, id, err := arch.Snapshot(cmd.newArchiveProgress(stat), target, parentSnapshotID)
|
||||
_, id, err := arch.Snapshot(newArchiveProgress(gopts, stat), target, opts.Tags, parentSnapshotID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cmd.global.Verbosef("snapshot %s saved\n", id.Str())
|
||||
Verbosef("snapshot %s saved\n", id.Str())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"restic"
|
||||
)
|
||||
|
||||
type CmdCache struct {
|
||||
global *GlobalOptions
|
||||
}
|
||||
|
||||
func init() {
|
||||
_, err := parser.AddCommand("cache",
|
||||
"manage cache",
|
||||
"The cache command creates and manages the local cache",
|
||||
&CmdCache{global: &globalOpts})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (cmd CmdCache) Usage() string {
|
||||
return "[update|clear]"
|
||||
}
|
||||
|
||||
func (cmd CmdCache) Execute(args []string) error {
|
||||
// if len(args) == 0 || len(args) > 2 {
|
||||
// return fmt.Errorf("wrong number of parameters, Usage: %s", cmd.Usage())
|
||||
// }
|
||||
|
||||
repo, err := cmd.global.OpenRepository()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lock, err := lockRepo(repo)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cache, err := restic.NewCache(repo, cmd.global.CacheDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("clear cache for old snapshots\n")
|
||||
err = cache.Clear(repo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("done\n")
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -2,41 +2,39 @@ package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"restic"
|
||||
"restic/backend"
|
||||
"restic/debug"
|
||||
"restic/pack"
|
||||
"restic/errors"
|
||||
"restic/repository"
|
||||
)
|
||||
|
||||
type CmdCat struct {
|
||||
global *GlobalOptions
|
||||
var cmdCat = &cobra.Command{
|
||||
Use: "cat [flags] [pack|blob|tree|snapshot|key|masterkey|config|lock] ID",
|
||||
Short: "print internal objects to stdout",
|
||||
Long: `
|
||||
The "cat" command is used to print internal objects to stdout.
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runCat(globalOptions, args)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
_, err := parser.AddCommand("cat",
|
||||
"dump something",
|
||||
"The cat command dumps data structures or data from a repository",
|
||||
&CmdCat{global: &globalOpts})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
cmdRoot.AddCommand(cmdCat)
|
||||
}
|
||||
|
||||
func (cmd CmdCat) Usage() string {
|
||||
return "[pack|blob|tree|snapshot|key|masterkey|config|lock] ID"
|
||||
}
|
||||
|
||||
func (cmd CmdCat) Execute(args []string) error {
|
||||
func runCat(gopts GlobalOptions, args []string) error {
|
||||
if len(args) < 1 || (args[0] != "masterkey" && args[0] != "config" && len(args) != 2) {
|
||||
return fmt.Errorf("type or ID not specified, Usage: %s", cmd.Usage())
|
||||
return errors.Fatalf("type or ID not specified")
|
||||
}
|
||||
|
||||
repo, err := cmd.global.OpenRepository()
|
||||
repo, err := OpenRepository(gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -49,12 +47,12 @@ func (cmd CmdCat) Execute(args []string) error {
|
||||
|
||||
tpe := args[0]
|
||||
|
||||
var id backend.ID
|
||||
var id restic.ID
|
||||
if tpe != "masterkey" && tpe != "config" {
|
||||
id, err = backend.ParseID(args[1])
|
||||
id, err = restic.ParseID(args[1])
|
||||
if err != nil {
|
||||
if tpe != "snapshot" {
|
||||
return err
|
||||
return errors.Fatalf("unable to parse ID: %v\n", err)
|
||||
}
|
||||
|
||||
// find snapshot id with prefix
|
||||
@@ -68,7 +66,7 @@ func (cmd CmdCat) Execute(args []string) error {
|
||||
// handle all types that don't need an index
|
||||
switch tpe {
|
||||
case "config":
|
||||
buf, err := json.MarshalIndent(repo.Config, "", " ")
|
||||
buf, err := json.MarshalIndent(repo.Config(), "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -76,7 +74,7 @@ func (cmd CmdCat) Execute(args []string) error {
|
||||
fmt.Println(string(buf))
|
||||
return nil
|
||||
case "index":
|
||||
buf, err := repo.LoadAndDecrypt(backend.Index, id)
|
||||
buf, err := repo.LoadAndDecrypt(restic.IndexFile, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -86,7 +84,7 @@ func (cmd CmdCat) Execute(args []string) error {
|
||||
|
||||
case "snapshot":
|
||||
sn := &restic.Snapshot{}
|
||||
err = repo.LoadJSONUnpacked(backend.Snapshot, id, sn)
|
||||
err = repo.LoadJSONUnpacked(restic.SnapshotFile, id, sn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -100,7 +98,7 @@ func (cmd CmdCat) Execute(args []string) error {
|
||||
|
||||
return nil
|
||||
case "key":
|
||||
h := backend.Handle{Type: backend.Key, Name: id.String()}
|
||||
h := restic.Handle{Type: restic.KeyFile, Name: id.String()}
|
||||
buf, err := backend.LoadAll(repo.Backend(), h, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -151,42 +149,52 @@ func (cmd CmdCat) Execute(args []string) error {
|
||||
|
||||
switch tpe {
|
||||
case "pack":
|
||||
h := backend.Handle{Type: backend.Data, Name: id.String()}
|
||||
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
|
||||
buf, err := backend.LoadAll(repo.Backend(), h, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hash := restic.Hash(buf)
|
||||
if !hash.Equal(id) {
|
||||
fmt.Fprintf(stderr, "Warning: hash of data does not match ID, want\n %v\ngot:\n %v\n", id.String(), hash.String())
|
||||
}
|
||||
|
||||
_, err = os.Stdout.Write(buf)
|
||||
return err
|
||||
|
||||
case "blob":
|
||||
blob, err := repo.Index().Lookup(id)
|
||||
if err != nil {
|
||||
for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} {
|
||||
list, err := repo.Index().Lookup(id, t)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
blob := list[0]
|
||||
|
||||
buf := make([]byte, blob.Length)
|
||||
n, err := repo.LoadBlob(restic.DataBlob, id, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
_, err = os.Stdout.Write(buf)
|
||||
return err
|
||||
}
|
||||
|
||||
buf := make([]byte, blob.Length)
|
||||
data, err := repo.LoadBlob(blob.Type, id, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = os.Stdout.Write(data)
|
||||
return err
|
||||
return errors.Fatal("blob not found")
|
||||
|
||||
case "tree":
|
||||
debug.Log("cat", "cat tree %v", id.Str())
|
||||
tree := restic.NewTree()
|
||||
err = repo.LoadJSONPack(pack.Tree, id, tree)
|
||||
debug.Log("cat tree %v", id.Str())
|
||||
tree, err := repo.LoadTree(id)
|
||||
if err != nil {
|
||||
debug.Log("cat", "unable to load tree %v: %v", id.Str(), err)
|
||||
debug.Log("unable to load tree %v: %v", id.Str(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
buf, err := json.MarshalIndent(&tree, "", " ")
|
||||
if err != nil {
|
||||
debug.Log("cat", "error json.MarshalIndent(): %v", err)
|
||||
debug.Log("error json.MarshalIndent(): %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -194,6 +202,6 @@ func (cmd CmdCat) Execute(args []string) error {
|
||||
return nil
|
||||
|
||||
default:
|
||||
return errors.New("invalid type")
|
||||
return errors.Fatal("invalid type")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,44 +1,53 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
|
||||
"restic"
|
||||
"restic/checker"
|
||||
"restic/errors"
|
||||
)
|
||||
|
||||
type CmdCheck struct {
|
||||
ReadData bool `long:"read-data" default:"false" description:"Read data blobs"`
|
||||
CheckUnused bool `long:"check-unused" default:"false" description:"Check for unused blobs"`
|
||||
|
||||
global *GlobalOptions
|
||||
var cmdCheck = &cobra.Command{
|
||||
Use: "check [flags]",
|
||||
Short: "check the repository for errors",
|
||||
Long: `
|
||||
The "check" command tests the repository for errors and reports any errors it
|
||||
finds. It can also be used to read all data and therefore simulate a restore.
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runCheck(checkOptions, globalOptions, args)
|
||||
},
|
||||
}
|
||||
|
||||
// CheckOptions bundle all options for the 'check' command.
|
||||
type CheckOptions struct {
|
||||
ReadData bool
|
||||
CheckUnused bool
|
||||
}
|
||||
|
||||
var checkOptions CheckOptions
|
||||
|
||||
func init() {
|
||||
_, err := parser.AddCommand("check",
|
||||
"check the repository",
|
||||
"The check command check the integrity and consistency of the repository",
|
||||
&CmdCheck{global: &globalOpts})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
cmdRoot.AddCommand(cmdCheck)
|
||||
|
||||
f := cmdCheck.Flags()
|
||||
f.BoolVar(&checkOptions.ReadData, "read-data", false, "Read all data blobs")
|
||||
f.BoolVar(&checkOptions.CheckUnused, "check-unused", false, "Find unused blobs")
|
||||
}
|
||||
|
||||
func (cmd CmdCheck) Usage() string {
|
||||
return "[check-options]"
|
||||
}
|
||||
|
||||
func (cmd CmdCheck) newReadProgress(todo restic.Stat) *restic.Progress {
|
||||
if !cmd.global.ShowProgress() {
|
||||
func newReadProgress(gopts GlobalOptions, todo restic.Stat) *restic.Progress {
|
||||
if gopts.Quiet {
|
||||
return nil
|
||||
}
|
||||
|
||||
readProgress := restic.NewProgress(time.Second)
|
||||
readProgress := restic.NewProgress()
|
||||
|
||||
readProgress.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) {
|
||||
status := fmt.Sprintf("[%s] %s %d / %d items",
|
||||
@@ -54,7 +63,7 @@ func (cmd CmdCheck) newReadProgress(todo restic.Stat) *restic.Progress {
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("\x1b[2K%s\r", status)
|
||||
PrintProgress("%s", status)
|
||||
}
|
||||
|
||||
readProgress.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
|
||||
@@ -64,18 +73,18 @@ func (cmd CmdCheck) newReadProgress(todo restic.Stat) *restic.Progress {
|
||||
return readProgress
|
||||
}
|
||||
|
||||
func (cmd CmdCheck) Execute(args []string) error {
|
||||
func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
|
||||
if len(args) != 0 {
|
||||
return errors.New("check has no arguments")
|
||||
return errors.Fatal("check has no arguments")
|
||||
}
|
||||
|
||||
repo, err := cmd.global.OpenRepository()
|
||||
repo, err := OpenRepository(gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !cmd.global.NoLock {
|
||||
cmd.global.Verbosef("Create exclusive lock for repository\n")
|
||||
if !gopts.NoLock {
|
||||
Verbosef("Create exclusive lock for repository\n")
|
||||
lock, err := lockRepoExclusive(repo)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
@@ -85,26 +94,26 @@ func (cmd CmdCheck) Execute(args []string) error {
|
||||
|
||||
chkr := checker.New(repo)
|
||||
|
||||
cmd.global.Verbosef("Load indexes\n")
|
||||
Verbosef("Load indexes\n")
|
||||
hints, errs := chkr.LoadIndex()
|
||||
|
||||
dupFound := false
|
||||
for _, hint := range hints {
|
||||
cmd.global.Printf("%v\n", hint)
|
||||
Printf("%v\n", hint)
|
||||
if _, ok := hint.(checker.ErrDuplicatePacks); ok {
|
||||
dupFound = true
|
||||
}
|
||||
}
|
||||
|
||||
if dupFound {
|
||||
cmd.global.Printf("\nrun `restic rebuild-index' to correct this\n")
|
||||
Printf("\nrun `restic rebuild-index' to correct this\n")
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
for _, err := range errs {
|
||||
cmd.global.Warnf("error: %v\n", err)
|
||||
Warnf("error: %v\n", err)
|
||||
}
|
||||
return fmt.Errorf("LoadIndex returned errors")
|
||||
return errors.Fatal("LoadIndex returned errors")
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
@@ -113,7 +122,7 @@ func (cmd CmdCheck) Execute(args []string) error {
|
||||
errorsFound := false
|
||||
errChan := make(chan error)
|
||||
|
||||
cmd.global.Verbosef("Check all packs\n")
|
||||
Verbosef("Check all packs\n")
|
||||
go chkr.Packs(errChan, done)
|
||||
|
||||
for err := range errChan {
|
||||
@@ -121,7 +130,7 @@ func (cmd CmdCheck) Execute(args []string) error {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
}
|
||||
|
||||
cmd.global.Verbosef("Check snapshots, trees and blobs\n")
|
||||
Verbosef("Check snapshots, trees and blobs\n")
|
||||
errChan = make(chan error)
|
||||
go chkr.Structure(errChan, done)
|
||||
|
||||
@@ -137,17 +146,17 @@ func (cmd CmdCheck) Execute(args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
if cmd.CheckUnused {
|
||||
if opts.CheckUnused {
|
||||
for _, id := range chkr.UnusedBlobs() {
|
||||
cmd.global.Verbosef("unused blob %v\n", id.Str())
|
||||
Verbosef("unused blob %v\n", id.Str())
|
||||
errorsFound = true
|
||||
}
|
||||
}
|
||||
|
||||
if cmd.ReadData {
|
||||
cmd.global.Verbosef("Read all data\n")
|
||||
if opts.ReadData {
|
||||
Verbosef("Read all data\n")
|
||||
|
||||
p := cmd.newReadProgress(restic.Stat{Blobs: chkr.CountPacks()})
|
||||
p := newReadProgress(gopts, restic.Stat{Blobs: chkr.CountPacks()})
|
||||
errChan := make(chan error)
|
||||
|
||||
go chkr.ReadData(p, errChan, done)
|
||||
@@ -159,7 +168,7 @@ func (cmd CmdCheck) Execute(args []string) error {
|
||||
}
|
||||
|
||||
if errorsFound {
|
||||
return errors.New("repository contains errors")
|
||||
return errors.Fatal("repository contains errors")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -8,34 +8,29 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"restic"
|
||||
"restic/backend"
|
||||
"restic/errors"
|
||||
"restic/pack"
|
||||
"restic/repository"
|
||||
|
||||
"restic/worker"
|
||||
|
||||
"github.com/juju/errors"
|
||||
)
|
||||
|
||||
type CmdDump struct {
|
||||
global *GlobalOptions
|
||||
|
||||
repo *repository.Repository
|
||||
var cmdDump = &cobra.Command{
|
||||
Use: "dump [indexes|snapshots|trees|all|packs]",
|
||||
Short: "dump data structures",
|
||||
Long: `
|
||||
The "dump" command dumps data structures from a repository as JSON objects. It
|
||||
is used for debugging purposes only.`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runDump(globalOptions, args)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
_, err := parser.AddCommand("dump",
|
||||
"dump data structures",
|
||||
"The dump command dumps data structures from a repository as JSON documents",
|
||||
&CmdDump{global: &globalOpts})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (cmd CmdDump) Usage() string {
|
||||
return "[indexes|snapshots|trees|all|packs]"
|
||||
cmdRoot.AddCommand(cmdDump)
|
||||
}
|
||||
|
||||
func prettyPrintJSON(wr io.Writer, item interface{}) error {
|
||||
@@ -48,11 +43,11 @@ func prettyPrintJSON(wr io.Writer, item interface{}) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func printSnapshots(repo *repository.Repository, wr io.Writer) error {
|
||||
func debugPrintSnapshots(repo *repository.Repository, wr io.Writer) error {
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
for id := range repo.List(backend.Snapshot, done) {
|
||||
for id := range repo.List(restic.SnapshotFile, done) {
|
||||
snapshot, err := restic.LoadSnapshot(repo, id)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "LoadSnapshot(%v): %v", id.Str(), err)
|
||||
@@ -70,37 +65,6 @@ func printSnapshots(repo *repository.Repository, wr io.Writer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func printTrees(repo *repository.Repository, wr io.Writer) error {
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
trees := []backend.ID{}
|
||||
|
||||
for _, idx := range repo.Index().All() {
|
||||
for blob := range idx.Each(nil) {
|
||||
if blob.Type != pack.Tree {
|
||||
continue
|
||||
}
|
||||
|
||||
trees = append(trees, blob.ID)
|
||||
}
|
||||
}
|
||||
|
||||
for _, id := range trees {
|
||||
tree, err := restic.LoadTree(repo, id)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "LoadTree(%v): %v", id.Str(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Fprintf(wr, "tree_id: %v\n", id)
|
||||
|
||||
prettyPrintJSON(wr, tree)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const dumpPackWorkers = 10
|
||||
|
||||
// Pack is the struct used in printPacks.
|
||||
@@ -112,10 +76,10 @@ type Pack struct {
|
||||
|
||||
// Blob is the struct used in printPacks.
|
||||
type Blob struct {
|
||||
Type pack.BlobType `json:"type"`
|
||||
Length uint `json:"length"`
|
||||
ID backend.ID `json:"id"`
|
||||
Offset uint `json:"offset"`
|
||||
Type restic.BlobType `json:"type"`
|
||||
Length uint `json:"length"`
|
||||
ID restic.ID `json:"id"`
|
||||
Offset uint `json:"offset"`
|
||||
}
|
||||
|
||||
func printPacks(repo *repository.Repository, wr io.Writer) error {
|
||||
@@ -125,15 +89,19 @@ func printPacks(repo *repository.Repository, wr io.Writer) error {
|
||||
f := func(job worker.Job, done <-chan struct{}) (interface{}, error) {
|
||||
name := job.Data.(string)
|
||||
|
||||
h := backend.Handle{Type: backend.Data, Name: name}
|
||||
rd := backend.NewReadSeeker(repo.Backend(), h)
|
||||
h := restic.Handle{Type: restic.DataFile, Name: name}
|
||||
|
||||
unpacker, err := pack.NewUnpacker(repo.Key(), rd)
|
||||
blobInfo, err := repo.Backend().Stat(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return unpacker.Entries, nil
|
||||
blobs, err := pack.List(repo.Key(), restic.ReaderAt(repo.Backend(), h), blobInfo.Size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return blobs, nil
|
||||
}
|
||||
|
||||
jobCh := make(chan worker.Job)
|
||||
@@ -141,7 +109,7 @@ func printPacks(repo *repository.Repository, wr io.Writer) error {
|
||||
wp := worker.New(dumpPackWorkers, f, jobCh, resCh)
|
||||
|
||||
go func() {
|
||||
for name := range repo.Backend().List(backend.Data, done) {
|
||||
for name := range repo.Backend().List(restic.DataFile, done) {
|
||||
jobCh <- worker.Job{Data: name}
|
||||
}
|
||||
close(jobCh)
|
||||
@@ -155,7 +123,7 @@ func printPacks(repo *repository.Repository, wr io.Writer) error {
|
||||
continue
|
||||
}
|
||||
|
||||
entries := job.Result.([]pack.Blob)
|
||||
entries := job.Result.([]restic.Blob)
|
||||
p := Pack{
|
||||
Name: name,
|
||||
Blobs: make([]Blob, len(entries)),
|
||||
@@ -177,14 +145,14 @@ func printPacks(repo *repository.Repository, wr io.Writer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cmd CmdDump) DumpIndexes() error {
|
||||
func dumpIndexes(repo restic.Repository) error {
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
for id := range cmd.repo.List(backend.Index, done) {
|
||||
for id := range repo.List(restic.IndexFile, done) {
|
||||
fmt.Printf("index_id: %v\n", id)
|
||||
|
||||
idx, err := repository.LoadIndex(cmd.repo, id)
|
||||
idx, err := repository.LoadIndex(repo, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -198,21 +166,22 @@ func (cmd CmdDump) DumpIndexes() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cmd CmdDump) Execute(args []string) error {
|
||||
func runDump(gopts GlobalOptions, args []string) error {
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("type not specified, Usage: %s", cmd.Usage())
|
||||
return errors.Fatalf("type not specified")
|
||||
}
|
||||
|
||||
repo, err := cmd.global.OpenRepository()
|
||||
repo, err := OpenRepository(gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cmd.repo = repo
|
||||
|
||||
lock, err := lockRepo(repo)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
if !gopts.NoLock {
|
||||
lock, err := lockRepo(repo)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = repo.LoadIndex()
|
||||
@@ -224,35 +193,26 @@ func (cmd CmdDump) Execute(args []string) error {
|
||||
|
||||
switch tpe {
|
||||
case "indexes":
|
||||
return cmd.DumpIndexes()
|
||||
return dumpIndexes(repo)
|
||||
case "snapshots":
|
||||
return printSnapshots(repo, os.Stdout)
|
||||
case "trees":
|
||||
return printTrees(repo, os.Stdout)
|
||||
return debugPrintSnapshots(repo, os.Stdout)
|
||||
case "packs":
|
||||
return printPacks(repo, os.Stdout)
|
||||
case "all":
|
||||
fmt.Printf("snapshots:\n")
|
||||
err := printSnapshots(repo, os.Stdout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("\ntrees:\n")
|
||||
|
||||
err = printTrees(repo, os.Stdout)
|
||||
err := debugPrintSnapshots(repo, os.Stdout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("\nindexes:\n")
|
||||
err = cmd.DumpIndexes()
|
||||
err = dumpIndexes(repo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
default:
|
||||
return errors.Errorf("no such type %q", tpe)
|
||||
return errors.Fatalf("no such type %q", tpe)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,31 +1,56 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"restic"
|
||||
"restic/backend"
|
||||
"restic/debug"
|
||||
"restic/errors"
|
||||
"restic/repository"
|
||||
)
|
||||
|
||||
var cmdFind = &cobra.Command{
|
||||
Use: "find [flags] PATTERN",
|
||||
Short: "find a file or directory",
|
||||
Long: `
|
||||
The "find" command searches for files or directories in snapshots stored in the
|
||||
repo. `,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runFind(findOptions, globalOptions, args)
|
||||
},
|
||||
}
|
||||
|
||||
// FindOptions bundle all options for the find command.
|
||||
type FindOptions struct {
|
||||
Oldest string
|
||||
Newest string
|
||||
Snapshot string
|
||||
}
|
||||
|
||||
var findOptions FindOptions
|
||||
|
||||
func init() {
|
||||
cmdRoot.AddCommand(cmdFind)
|
||||
|
||||
f := cmdFind.Flags()
|
||||
f.StringVarP(&findOptions.Oldest, "oldest", "o", "", "Oldest modification date/time")
|
||||
f.StringVarP(&findOptions.Newest, "newest", "n", "", "Newest modification date/time")
|
||||
f.StringVarP(&findOptions.Snapshot, "snapshot", "s", "", "Snapshot ID to search in")
|
||||
}
|
||||
|
||||
type findPattern struct {
|
||||
oldest, newest time.Time
|
||||
pattern string
|
||||
}
|
||||
|
||||
type findResult struct {
|
||||
node *restic.Node
|
||||
path string
|
||||
}
|
||||
|
||||
type CmdFind struct {
|
||||
Oldest string `short:"o" long:"oldest" description:"Oldest modification date/time"`
|
||||
Newest string `short:"n" long:"newest" description:"Newest modification date/time"`
|
||||
Snapshot string `short:"s" long:"snapshot" description:"Snapshot ID to search in"`
|
||||
|
||||
oldest, newest time.Time
|
||||
pattern string
|
||||
global *GlobalOptions
|
||||
}
|
||||
|
||||
var timeFormats = []string{
|
||||
"2006-01-02",
|
||||
"2006-01-02 15:04",
|
||||
@@ -40,16 +65,6 @@ var timeFormats = []string{
|
||||
"Mon Jan 2 15:04:05 -0700 MST 2006",
|
||||
}
|
||||
|
||||
func init() {
|
||||
_, err := parser.AddCommand("find",
|
||||
"find a file/directory",
|
||||
"The find command searches for files or directories in snapshots",
|
||||
&CmdFind{global: &globalOpts})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func parseTime(str string) (time.Time, error) {
|
||||
for _, fmt := range timeFormats {
|
||||
if t, err := time.ParseInLocation(fmt, str, time.Local); err == nil {
|
||||
@@ -57,44 +72,44 @@ func parseTime(str string) (time.Time, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return time.Time{}, fmt.Errorf("unable to parse time: %q", str)
|
||||
return time.Time{}, errors.Fatalf("unable to parse time: %q", str)
|
||||
}
|
||||
|
||||
func (c CmdFind) findInTree(repo *repository.Repository, id backend.ID, path string) ([]findResult, error) {
|
||||
debug.Log("restic.find", "checking tree %v\n", id)
|
||||
tree, err := restic.LoadTree(repo, id)
|
||||
func findInTree(repo *repository.Repository, pat findPattern, id restic.ID, path string) ([]findResult, error) {
|
||||
debug.Log("checking tree %v\n", id)
|
||||
tree, err := repo.LoadTree(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
results := []findResult{}
|
||||
for _, node := range tree.Nodes {
|
||||
debug.Log("restic.find", " testing entry %q\n", node.Name)
|
||||
debug.Log(" testing entry %q\n", node.Name)
|
||||
|
||||
m, err := filepath.Match(c.pattern, node.Name)
|
||||
m, err := filepath.Match(pat.pattern, node.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if m {
|
||||
debug.Log("restic.find", " pattern matches\n")
|
||||
if !c.oldest.IsZero() && node.ModTime.Before(c.oldest) {
|
||||
debug.Log("restic.find", " ModTime is older than %s\n", c.oldest)
|
||||
debug.Log(" pattern matches\n")
|
||||
if !pat.oldest.IsZero() && node.ModTime.Before(pat.oldest) {
|
||||
debug.Log(" ModTime is older than %s\n", pat.oldest)
|
||||
continue
|
||||
}
|
||||
|
||||
if !c.newest.IsZero() && node.ModTime.After(c.newest) {
|
||||
debug.Log("restic.find", " ModTime is newer than %s\n", c.newest)
|
||||
if !pat.newest.IsZero() && node.ModTime.After(pat.newest) {
|
||||
debug.Log(" ModTime is newer than %s\n", pat.newest)
|
||||
continue
|
||||
}
|
||||
|
||||
results = append(results, findResult{node: node, path: path})
|
||||
} else {
|
||||
debug.Log("restic.find", " pattern does not match\n")
|
||||
debug.Log(" pattern does not match\n")
|
||||
}
|
||||
|
||||
if node.Type == "dir" {
|
||||
subdirResults, err := c.findInTree(repo, *node.Subtree, filepath.Join(path, node.Name))
|
||||
subdirResults, err := findInTree(repo, pat, *node.Subtree, filepath.Join(path, node.Name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -106,15 +121,15 @@ func (c CmdFind) findInTree(repo *repository.Repository, id backend.ID, path str
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func (c CmdFind) findInSnapshot(repo *repository.Repository, id backend.ID) error {
|
||||
debug.Log("restic.find", "searching in snapshot %s\n for entries within [%s %s]", id.Str(), c.oldest, c.newest)
|
||||
func findInSnapshot(repo *repository.Repository, pat findPattern, id restic.ID) error {
|
||||
debug.Log("searching in snapshot %s\n for entries within [%s %s]", id.Str(), pat.oldest, pat.newest)
|
||||
|
||||
sn, err := restic.LoadSnapshot(repo, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
results, err := c.findInTree(repo, *sn.Tree, "")
|
||||
results, err := findInTree(repo, pat, *sn.Tree, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -122,49 +137,50 @@ func (c CmdFind) findInSnapshot(repo *repository.Repository, id backend.ID) erro
|
||||
if len(results) == 0 {
|
||||
return nil
|
||||
}
|
||||
c.global.Verbosef("found %d matching entries in snapshot %s\n", len(results), id)
|
||||
Verbosef("found %d matching entries in snapshot %s\n", len(results), id)
|
||||
for _, res := range results {
|
||||
res.node.Name = filepath.Join(res.path, res.node.Name)
|
||||
c.global.Printf(" %s\n", res.node)
|
||||
Printf(" %s\n", res.node)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (CmdFind) Usage() string {
|
||||
return "[find-OPTIONS] PATTERN"
|
||||
}
|
||||
|
||||
func (c CmdFind) Execute(args []string) error {
|
||||
func runFind(opts FindOptions, gopts GlobalOptions, args []string) error {
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("wrong number of arguments, Usage: %s", c.Usage())
|
||||
return errors.Fatalf("wrong number of arguments")
|
||||
}
|
||||
|
||||
var err error
|
||||
var (
|
||||
err error
|
||||
pat findPattern
|
||||
)
|
||||
|
||||
if c.Oldest != "" {
|
||||
c.oldest, err = parseTime(c.Oldest)
|
||||
if opts.Oldest != "" {
|
||||
pat.oldest, err = parseTime(opts.Oldest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if c.Newest != "" {
|
||||
c.newest, err = parseTime(c.Newest)
|
||||
if opts.Newest != "" {
|
||||
pat.newest, err = parseTime(opts.Newest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
repo, err := c.global.OpenRepository()
|
||||
repo, err := OpenRepository(gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lock, err := lockRepo(repo)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
if !gopts.NoLock {
|
||||
lock, err := lockRepo(repo)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = repo.LoadIndex()
|
||||
@@ -172,21 +188,21 @@ func (c CmdFind) Execute(args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
c.pattern = args[0]
|
||||
pat.pattern = args[0]
|
||||
|
||||
if c.Snapshot != "" {
|
||||
snapshotID, err := restic.FindSnapshot(repo, c.Snapshot)
|
||||
if opts.Snapshot != "" {
|
||||
snapshotID, err := restic.FindSnapshot(repo, opts.Snapshot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid id %q: %v", args[1], err)
|
||||
return errors.Fatalf("invalid id %q: %v", args[1], err)
|
||||
}
|
||||
|
||||
return c.findInSnapshot(repo, snapshotID)
|
||||
return findInSnapshot(repo, pat, snapshotID)
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
for snapshotID := range repo.List(backend.Snapshot, done) {
|
||||
err := c.findInSnapshot(repo, snapshotID)
|
||||
for snapshotID := range repo.List(restic.SnapshotFile, done) {
|
||||
err := findInSnapshot(repo, pat, snapshotID)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
204
src/cmds/restic/cmd_forget.go
Normal file
204
src/cmds/restic/cmd_forget.go
Normal file
@@ -0,0 +1,204 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"restic"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var cmdForget = &cobra.Command{
|
||||
Use: "forget [flags] [snapshot ID] [...]",
|
||||
Short: "forget removes snapshots from the repository",
|
||||
Long: `
|
||||
The "forget" command removes snapshots according to a policy. Please note that
|
||||
this command really only deletes the snapshot object in the repository, which
|
||||
is a reference to data stored there. In order to remove this (now unreferenced)
|
||||
data after 'forget' was run successfully, see the 'prune' command. `,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runForget(forgetOptions, globalOptions, args)
|
||||
},
|
||||
}
|
||||
|
||||
// ForgetOptions collects all options for the forget command.
|
||||
type ForgetOptions struct {
|
||||
Last int
|
||||
Hourly int
|
||||
Daily int
|
||||
Weekly int
|
||||
Monthly int
|
||||
Yearly int
|
||||
|
||||
KeepTags []string
|
||||
|
||||
Hostname string
|
||||
Tags []string
|
||||
|
||||
DryRun bool
|
||||
}
|
||||
|
||||
var forgetOptions ForgetOptions
|
||||
|
||||
func init() {
|
||||
cmdRoot.AddCommand(cmdForget)
|
||||
|
||||
f := cmdForget.Flags()
|
||||
f.IntVarP(&forgetOptions.Last, "keep-last", "l", 0, "keep the last `n` snapshots")
|
||||
f.IntVarP(&forgetOptions.Hourly, "keep-hourly", "H", 0, "keep the last `n` hourly snapshots")
|
||||
f.IntVarP(&forgetOptions.Daily, "keep-daily", "d", 0, "keep the last `n` daily snapshots")
|
||||
f.IntVarP(&forgetOptions.Weekly, "keep-weekly", "w", 0, "keep the last `n` weekly snapshots")
|
||||
f.IntVarP(&forgetOptions.Monthly, "keep-monthly", "m", 0, "keep the last `n` monthly snapshots")
|
||||
f.IntVarP(&forgetOptions.Yearly, "keep-yearly", "y", 0, "keep the last `n` yearly snapshots")
|
||||
|
||||
f.StringSliceVar(&forgetOptions.KeepTags, "keep-tag", []string{}, "always keep snapshots with this `tag` (can be specified multiple times)")
|
||||
f.StringVar(&forgetOptions.Hostname, "hostname", "", "only forget snapshots for the given hostname")
|
||||
f.StringSliceVar(&forgetOptions.Tags, "tag", []string{}, "only forget snapshots with the `tag` (can be specified multiple times)")
|
||||
|
||||
f.BoolVarP(&forgetOptions.DryRun, "dry-run", "n", false, "do not delete anything, just print what would be done")
|
||||
}
|
||||
|
||||
func printSnapshots(w io.Writer, snapshots restic.Snapshots) {
|
||||
tab := NewTable()
|
||||
tab.Header = fmt.Sprintf("%-8s %-19s %-10s %-10s %s", "ID", "Date", "Host", "Tags", "Directory")
|
||||
tab.RowFormat = "%-8s %-19s %-10s %-10s %s"
|
||||
|
||||
for _, sn := range snapshots {
|
||||
if len(sn.Paths) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
firstTag := ""
|
||||
if len(sn.Tags) > 0 {
|
||||
firstTag = sn.Tags[0]
|
||||
}
|
||||
|
||||
tab.Rows = append(tab.Rows, []interface{}{sn.ID().Str(), sn.Time.Format(TimeFormat), sn.Hostname, firstTag, sn.Paths[0]})
|
||||
|
||||
rows := len(sn.Paths)
|
||||
if len(sn.Tags) > rows {
|
||||
rows = len(sn.Tags)
|
||||
}
|
||||
|
||||
for i := 1; i < rows; i++ {
|
||||
path := ""
|
||||
if len(sn.Paths) > i {
|
||||
path = sn.Paths[i]
|
||||
}
|
||||
|
||||
tag := ""
|
||||
if len(sn.Tags) > i {
|
||||
tag = sn.Tags[i]
|
||||
}
|
||||
|
||||
tab.Rows = append(tab.Rows, []interface{}{"", "", "", tag, path})
|
||||
}
|
||||
}
|
||||
|
||||
tab.Write(w)
|
||||
}
|
||||
|
||||
func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error {
|
||||
repo, err := OpenRepository(gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lock, err := lockRepoExclusive(repo)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = repo.LoadIndex()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// first, process all snapshot IDs given as arguments
|
||||
for _, s := range args {
|
||||
id, err := restic.FindSnapshot(repo, s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !opts.DryRun {
|
||||
err = repo.Backend().Remove(restic.SnapshotFile, id.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
Verbosef("removed snapshot %v\n", id.Str())
|
||||
} else {
|
||||
Verbosef("would removed snapshot %v\n", id.Str())
|
||||
}
|
||||
}
|
||||
|
||||
policy := restic.ExpirePolicy{
|
||||
Last: opts.Last,
|
||||
Hourly: opts.Hourly,
|
||||
Daily: opts.Daily,
|
||||
Weekly: opts.Weekly,
|
||||
Monthly: opts.Monthly,
|
||||
Yearly: opts.Yearly,
|
||||
Tags: opts.KeepTags,
|
||||
}
|
||||
|
||||
if policy.Empty() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// then, load all remaining snapshots
|
||||
snapshots, err := restic.LoadAllSnapshots(repo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// group by hostname and dirs
|
||||
type key struct {
|
||||
Hostname string
|
||||
Dirs string
|
||||
}
|
||||
|
||||
snapshotGroups := make(map[key]restic.Snapshots)
|
||||
|
||||
for _, sn := range snapshots {
|
||||
if opts.Hostname != "" && sn.Hostname != opts.Hostname {
|
||||
continue
|
||||
}
|
||||
|
||||
if !sn.HasTags(opts.Tags) {
|
||||
continue
|
||||
}
|
||||
|
||||
k := key{Hostname: sn.Hostname, Dirs: strings.Join(sn.Paths, ":")}
|
||||
list := snapshotGroups[k]
|
||||
list = append(list, sn)
|
||||
snapshotGroups[k] = list
|
||||
}
|
||||
|
||||
for key, snapshotGroup := range snapshotGroups {
|
||||
Printf("snapshots for host %v, directories %v:\n\n", key.Hostname, key.Dirs)
|
||||
keep, remove := restic.ApplyPolicy(snapshotGroup, policy)
|
||||
|
||||
Printf("keep %d snapshots:\n", len(keep))
|
||||
printSnapshots(globalOptions.stdout, keep)
|
||||
Printf("\n")
|
||||
|
||||
Printf("remove %d snapshots:\n", len(remove))
|
||||
printSnapshots(globalOptions.stdout, remove)
|
||||
Printf("\n")
|
||||
|
||||
if !opts.DryRun {
|
||||
for _, sn := range remove {
|
||||
err = repo.Backend().Remove(restic.SnapshotFile, sn.ID().String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,53 +1,58 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"restic/errors"
|
||||
"restic/repository"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type CmdInit struct {
|
||||
global *GlobalOptions
|
||||
var cmdInit = &cobra.Command{
|
||||
Use: "init",
|
||||
Short: "initialize a new repository",
|
||||
Long: `
|
||||
The "init" command initializes a new repository.
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runInit(globalOptions, args)
|
||||
},
|
||||
}
|
||||
|
||||
func (cmd CmdInit) Execute(args []string) error {
|
||||
if cmd.global.Repo == "" {
|
||||
return errors.New("Please specify repository location (-r)")
|
||||
func init() {
|
||||
cmdRoot.AddCommand(cmdInit)
|
||||
}
|
||||
|
||||
func runInit(gopts GlobalOptions, args []string) error {
|
||||
if gopts.Repo == "" {
|
||||
return errors.Fatal("Please specify repository location (-r)")
|
||||
}
|
||||
|
||||
be, err := create(cmd.global.Repo)
|
||||
be, err := create(gopts.Repo)
|
||||
if err != nil {
|
||||
cmd.global.Exitf(1, "creating backend at %s failed: %v\n", cmd.global.Repo, err)
|
||||
return errors.Fatalf("create backend at %s failed: %v\n", gopts.Repo, err)
|
||||
}
|
||||
|
||||
if cmd.global.password == "" {
|
||||
cmd.global.password = cmd.global.ReadPasswordTwice(
|
||||
if gopts.password == "" {
|
||||
gopts.password, err = ReadPasswordTwice(gopts,
|
||||
"enter password for new backend: ",
|
||||
"enter password again: ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
s := repository.New(be)
|
||||
|
||||
err = s.Init(cmd.global.password)
|
||||
err = s.Init(gopts.password)
|
||||
if err != nil {
|
||||
cmd.global.Exitf(1, "creating key in backend at %s failed: %v\n", cmd.global.Repo, err)
|
||||
return errors.Fatalf("create key in backend at %s failed: %v\n", gopts.Repo, err)
|
||||
}
|
||||
|
||||
cmd.global.Verbosef("created restic backend %v at %s\n", s.Config.ID[:10], cmd.global.Repo)
|
||||
cmd.global.Verbosef("\n")
|
||||
cmd.global.Verbosef("Please note that knowledge of your password is required to access\n")
|
||||
cmd.global.Verbosef("the repository. Losing your password means that your data is\n")
|
||||
cmd.global.Verbosef("irrecoverably lost.\n")
|
||||
Verbosef("created restic backend %v at %s\n", s.Config().ID[:10], gopts.Repo)
|
||||
Verbosef("\n")
|
||||
Verbosef("Please note that knowledge of your password is required to access\n")
|
||||
Verbosef("the repository. Losing your password means that your data is\n")
|
||||
Verbosef("irrecoverably lost.\n")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
_, err := parser.AddCommand("init",
|
||||
"create repository",
|
||||
"The init command creates a new repository",
|
||||
&CmdInit{global: &globalOpts})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,45 +1,42 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"restic"
|
||||
|
||||
"restic/backend"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"restic/errors"
|
||||
"restic/repository"
|
||||
)
|
||||
|
||||
type CmdKey struct {
|
||||
global *GlobalOptions
|
||||
newPassword string
|
||||
var cmdKey = &cobra.Command{
|
||||
Use: "key [list|add|rm|passwd] [ID]",
|
||||
Short: "manage keys (passwords)",
|
||||
Long: `
|
||||
The "key" command manages keys (passwords) for accessing a repository.
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runKey(globalOptions, args)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
_, err := parser.AddCommand("key",
|
||||
"manage keys",
|
||||
"The key command manages keys (passwords) of a repository",
|
||||
&CmdKey{global: &globalOpts})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
cmdRoot.AddCommand(cmdKey)
|
||||
}
|
||||
|
||||
func (cmd CmdKey) listKeys(s *repository.Repository) error {
|
||||
func listKeys(s *repository.Repository) error {
|
||||
tab := NewTable()
|
||||
tab.Header = fmt.Sprintf(" %-10s %-10s %-10s %s", "ID", "User", "Host", "Created")
|
||||
tab.RowFormat = "%s%-10s %-10s %-10s %s"
|
||||
|
||||
plen, err := s.PrefixLength(backend.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
for id := range s.List(backend.Key, done) {
|
||||
for id := range s.List(restic.KeyFile, done) {
|
||||
k, err := repository.LoadKey(s, id.String())
|
||||
if err != nil {
|
||||
cmd.global.Warnf("LoadKey() failed: %v\n", err)
|
||||
Warnf("LoadKey() failed: %v\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -49,74 +46,83 @@ func (cmd CmdKey) listKeys(s *repository.Repository) error {
|
||||
} else {
|
||||
current = " "
|
||||
}
|
||||
tab.Rows = append(tab.Rows, []interface{}{current, id.String()[:plen],
|
||||
tab.Rows = append(tab.Rows, []interface{}{current, id.Str(),
|
||||
k.Username, k.Hostname, k.Created.Format(TimeFormat)})
|
||||
}
|
||||
|
||||
return tab.Write(cmd.global.stdout)
|
||||
return tab.Write(globalOptions.stdout)
|
||||
}
|
||||
|
||||
func (cmd CmdKey) getNewPassword() string {
|
||||
if cmd.newPassword != "" {
|
||||
return cmd.newPassword
|
||||
// testKeyNewPassword is used to set a new password during integration testing.
|
||||
var testKeyNewPassword string
|
||||
|
||||
func getNewPassword(gopts GlobalOptions) (string, error) {
|
||||
if testKeyNewPassword != "" {
|
||||
return testKeyNewPassword, nil
|
||||
}
|
||||
|
||||
return cmd.global.ReadPasswordTwice(
|
||||
return ReadPasswordTwice(gopts,
|
||||
"enter password for new key: ",
|
||||
"enter password again: ")
|
||||
}
|
||||
|
||||
func (cmd CmdKey) addKey(repo *repository.Repository) error {
|
||||
id, err := repository.AddKey(repo, cmd.getNewPassword(), repo.Key())
|
||||
func addKey(gopts GlobalOptions, repo *repository.Repository) error {
|
||||
pw, err := getNewPassword(gopts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating new key failed: %v\n", err)
|
||||
return err
|
||||
}
|
||||
|
||||
cmd.global.Verbosef("saved new key as %s\n", id)
|
||||
id, err := repository.AddKey(repo, pw, repo.Key())
|
||||
if err != nil {
|
||||
return errors.Fatalf("creating new key failed: %v\n", err)
|
||||
}
|
||||
|
||||
Verbosef("saved new key as %s\n", id)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cmd CmdKey) deleteKey(repo *repository.Repository, name string) error {
|
||||
func deleteKey(repo *repository.Repository, name string) error {
|
||||
if name == repo.KeyName() {
|
||||
return errors.New("refusing to remove key currently used to access repository")
|
||||
return errors.Fatal("refusing to remove key currently used to access repository")
|
||||
}
|
||||
|
||||
err := repo.Backend().Remove(backend.Key, name)
|
||||
err := repo.Backend().Remove(restic.KeyFile, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cmd.global.Verbosef("removed key %v\n", name)
|
||||
Verbosef("removed key %v\n", name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cmd CmdKey) changePassword(repo *repository.Repository) error {
|
||||
id, err := repository.AddKey(repo, cmd.getNewPassword(), repo.Key())
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating new key failed: %v\n", err)
|
||||
}
|
||||
|
||||
err = repo.Backend().Remove(backend.Key, repo.KeyName())
|
||||
func changePassword(gopts GlobalOptions, repo *repository.Repository) error {
|
||||
pw, err := getNewPassword(gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cmd.global.Verbosef("saved new key as %s\n", id)
|
||||
id, err := repository.AddKey(repo, pw, repo.Key())
|
||||
if err != nil {
|
||||
return errors.Fatalf("creating new key failed: %v\n", err)
|
||||
}
|
||||
|
||||
err = repo.Backend().Remove(restic.KeyFile, repo.KeyName())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
Verbosef("saved new key as %s\n", id)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cmd CmdKey) Usage() string {
|
||||
return "[list|add|rm|passwd] [ID]"
|
||||
}
|
||||
|
||||
func (cmd CmdKey) Execute(args []string) error {
|
||||
func runKey(gopts GlobalOptions, args []string) error {
|
||||
if len(args) < 1 || (args[0] == "rm" && len(args) != 2) {
|
||||
return fmt.Errorf("wrong number of arguments, Usage: %s", cmd.Usage())
|
||||
return errors.Fatalf("wrong number of arguments")
|
||||
}
|
||||
|
||||
repo, err := cmd.global.OpenRepository()
|
||||
repo, err := OpenRepository(gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -129,7 +135,7 @@ func (cmd CmdKey) Execute(args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return cmd.listKeys(repo)
|
||||
return listKeys(repo)
|
||||
case "add":
|
||||
lock, err := lockRepo(repo)
|
||||
defer unlockRepo(lock)
|
||||
@@ -137,7 +143,7 @@ func (cmd CmdKey) Execute(args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return cmd.addKey(repo)
|
||||
return addKey(gopts, repo)
|
||||
case "rm":
|
||||
lock, err := lockRepoExclusive(repo)
|
||||
defer unlockRepo(lock)
|
||||
@@ -145,12 +151,12 @@ func (cmd CmdKey) Execute(args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
id, err := backend.Find(repo.Backend(), backend.Key, args[1])
|
||||
id, err := restic.Find(repo.Backend(), restic.KeyFile, args[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return cmd.deleteKey(repo, id)
|
||||
return deleteKey(repo, id)
|
||||
case "passwd":
|
||||
lock, err := lockRepoExclusive(repo)
|
||||
defer unlockRepo(lock)
|
||||
@@ -158,7 +164,7 @@ func (cmd CmdKey) Execute(args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return cmd.changePassword(repo)
|
||||
return changePassword(gopts, repo)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -1,41 +1,38 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"restic"
|
||||
"restic/errors"
|
||||
|
||||
"restic/backend"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type CmdList struct {
|
||||
global *GlobalOptions
|
||||
var cmdList = &cobra.Command{
|
||||
Use: "list [blobs|packs|index|snapshots|keys|locks]",
|
||||
Short: "list items in the repository",
|
||||
Long: `
|
||||
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runList(globalOptions, args)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
_, err := parser.AddCommand("list",
|
||||
"lists data",
|
||||
"The list command lists structures or data of a repository",
|
||||
&CmdList{global: &globalOpts})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
cmdRoot.AddCommand(cmdList)
|
||||
}
|
||||
|
||||
func (cmd CmdList) Usage() string {
|
||||
return "[blobs|packs|index|snapshots|keys|locks]"
|
||||
}
|
||||
|
||||
func (cmd CmdList) Execute(args []string) error {
|
||||
func runList(opts GlobalOptions, args []string) error {
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("type not specified, Usage: %s", cmd.Usage())
|
||||
return errors.Fatalf("type not specified")
|
||||
}
|
||||
|
||||
repo, err := cmd.global.OpenRepository()
|
||||
repo, err := OpenRepository(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !cmd.global.NoLock {
|
||||
if !opts.NoLock {
|
||||
lock, err := lockRepo(repo)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
@@ -43,37 +40,24 @@ func (cmd CmdList) Execute(args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
var t backend.Type
|
||||
var t restic.FileType
|
||||
switch args[0] {
|
||||
case "blobs":
|
||||
err = repo.LoadIndex()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, idx := range repo.Index().All() {
|
||||
for blob := range idx.Each(nil) {
|
||||
cmd.global.Printf("%s\n", blob.ID)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
case "packs":
|
||||
t = backend.Data
|
||||
t = restic.DataFile
|
||||
case "index":
|
||||
t = backend.Index
|
||||
t = restic.IndexFile
|
||||
case "snapshots":
|
||||
t = backend.Snapshot
|
||||
t = restic.SnapshotFile
|
||||
case "keys":
|
||||
t = backend.Key
|
||||
t = restic.KeyFile
|
||||
case "locks":
|
||||
t = backend.Lock
|
||||
t = restic.LockFile
|
||||
default:
|
||||
return errors.New("invalid type")
|
||||
return errors.Fatal("invalid type")
|
||||
}
|
||||
|
||||
for id := range repo.List(t, nil) {
|
||||
cmd.global.Printf("%s\n", id)
|
||||
Printf("%s\n", id)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -5,29 +5,34 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"restic"
|
||||
"restic/backend"
|
||||
"restic/errors"
|
||||
"restic/repository"
|
||||
)
|
||||
|
||||
type CmdLs struct {
|
||||
Long bool `short:"l" long:"long" description:"Use a long listing format showing size and mode"`
|
||||
|
||||
global *GlobalOptions
|
||||
var cmdLs = &cobra.Command{
|
||||
Use: "ls [flags] snapshot-ID",
|
||||
Short: "list files in a snapshot",
|
||||
Long: `
|
||||
The "ls" command allows listing files and directories in a snapshot.
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runLs(globalOptions, args)
|
||||
},
|
||||
}
|
||||
|
||||
var listLong bool
|
||||
|
||||
func init() {
|
||||
_, err := parser.AddCommand("ls",
|
||||
"list files",
|
||||
"The ls command lists all files and directories in a snapshot",
|
||||
&CmdLs{global: &globalOpts})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
cmdRoot.AddCommand(cmdLs)
|
||||
|
||||
cmdLs.Flags().BoolVarP(&listLong, "long", "l", false, "use a long listing format showing size and mode")
|
||||
}
|
||||
|
||||
func (cmd CmdLs) printNode(prefix string, n *restic.Node) string {
|
||||
if !cmd.Long {
|
||||
func printNode(prefix string, n *restic.Node) string {
|
||||
if !listLong {
|
||||
return filepath.Join(prefix, n.Name)
|
||||
}
|
||||
|
||||
@@ -46,17 +51,17 @@ func (cmd CmdLs) printNode(prefix string, n *restic.Node) string {
|
||||
}
|
||||
}
|
||||
|
||||
func (cmd CmdLs) printTree(prefix string, repo *repository.Repository, id backend.ID) error {
|
||||
tree, err := restic.LoadTree(repo, id)
|
||||
func printTree(prefix string, repo *repository.Repository, id restic.ID) error {
|
||||
tree, err := repo.LoadTree(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, entry := range tree.Nodes {
|
||||
cmd.global.Printf(cmd.printNode(prefix, entry) + "\n")
|
||||
Printf(printNode(prefix, entry) + "\n")
|
||||
|
||||
if entry.Type == "dir" && entry.Subtree != nil {
|
||||
err = cmd.printTree(filepath.Join(prefix, entry.Name), repo, *entry.Subtree)
|
||||
err = printTree(filepath.Join(prefix, entry.Name), repo, *entry.Subtree)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -66,16 +71,12 @@ func (cmd CmdLs) printTree(prefix string, repo *repository.Repository, id backen
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cmd CmdLs) Usage() string {
|
||||
return "snapshot-ID [DIR]"
|
||||
}
|
||||
|
||||
func (cmd CmdLs) Execute(args []string) error {
|
||||
func runLs(gopts GlobalOptions, args []string) error {
|
||||
if len(args) < 1 || len(args) > 2 {
|
||||
return fmt.Errorf("wrong number of arguments, Usage: %s", cmd.Usage())
|
||||
return errors.Fatalf("no snapshot ID given")
|
||||
}
|
||||
|
||||
repo, err := cmd.global.OpenRepository()
|
||||
repo, err := OpenRepository(gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -95,7 +96,7 @@ func (cmd CmdLs) Execute(args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
cmd.global.Verbosef("snapshot of %v at %s:\n", sn.Paths, sn.Time)
|
||||
Verbosef("snapshot of %v at %s:\n", sn.Paths, sn.Time)
|
||||
|
||||
return cmd.printTree("", repo, *sn.Tree)
|
||||
return printTree("", repo, *sn.Tree)
|
||||
}
|
||||
|
||||
@@ -4,47 +4,50 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"restic/debug"
|
||||
"restic/errors"
|
||||
|
||||
resticfs "restic/fs"
|
||||
"restic/fuse"
|
||||
|
||||
systemFuse "bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
)
|
||||
|
||||
type CmdMount struct {
|
||||
Root bool `long:"owner-root" description:"use 'root' as the owner of files and dirs" default:"false"`
|
||||
|
||||
global *GlobalOptions
|
||||
ready chan struct{}
|
||||
done chan struct{}
|
||||
var cmdMount = &cobra.Command{
|
||||
Use: "mount [flags] mountpoint",
|
||||
Short: "mount the repository",
|
||||
Long: `
|
||||
The "mount" command mounts the repository via fuse to a directory. This is a
|
||||
read-only mount.
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runMount(mountOptions, globalOptions, args)
|
||||
},
|
||||
}
|
||||
|
||||
// MountOptions collects all options for the mount command.
|
||||
type MountOptions struct {
|
||||
OwnerRoot bool
|
||||
}
|
||||
|
||||
var mountOptions MountOptions
|
||||
|
||||
func init() {
|
||||
_, err := parser.AddCommand("mount",
|
||||
"mount a repository",
|
||||
"The mount command mounts a repository read-only to a given directory",
|
||||
&CmdMount{
|
||||
global: &globalOpts,
|
||||
ready: make(chan struct{}, 1),
|
||||
done: make(chan struct{}),
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
cmdRoot.AddCommand(cmdMount)
|
||||
|
||||
cmdMount.Flags().BoolVar(&mountOptions.OwnerRoot, "owner-root", false, "use 'root' as the owner of files and dirs")
|
||||
}
|
||||
|
||||
func (cmd CmdMount) Usage() string {
|
||||
return "MOUNTPOINT"
|
||||
}
|
||||
func mount(opts MountOptions, gopts GlobalOptions, mountpoint string) error {
|
||||
debug.Log("start mount")
|
||||
defer debug.Log("finish mount")
|
||||
|
||||
func (cmd CmdMount) Execute(args []string) error {
|
||||
if len(args) == 0 {
|
||||
return fmt.Errorf("wrong number of parameters, Usage: %s", cmd.Usage())
|
||||
}
|
||||
|
||||
repo, err := cmd.global.OpenRepository()
|
||||
repo, err := OpenRepository(gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -54,10 +57,9 @@ func (cmd CmdMount) Execute(args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
mountpoint := args[0]
|
||||
if _, err := os.Stat(mountpoint); os.IsNotExist(err) {
|
||||
cmd.global.Verbosef("Mountpoint %s doesn't exist, creating it\n", mountpoint)
|
||||
err = os.Mkdir(mountpoint, os.ModeDir|0700)
|
||||
if _, err := resticfs.Stat(mountpoint); os.IsNotExist(errors.Cause(err)) {
|
||||
Verbosef("Mountpoint %s doesn't exist, creating it\n", mountpoint)
|
||||
err = resticfs.Mkdir(mountpoint, os.ModeDir|0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -71,37 +73,41 @@ func (cmd CmdMount) Execute(args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
root := fs.Tree{}
|
||||
root.Add("snapshots", fuse.NewSnapshotsDir(repo, cmd.Root))
|
||||
Printf("Now serving the repository at %s\n", mountpoint)
|
||||
Printf("Don't forget to umount after quitting!\n")
|
||||
|
||||
cmd.global.Printf("Now serving %s at %s\n", repo.Backend().Location(), mountpoint)
|
||||
cmd.global.Printf("Don't forget to umount after quitting!\n")
|
||||
root := fs.Tree{}
|
||||
root.Add("snapshots", fuse.NewSnapshotsDir(repo, opts.OwnerRoot))
|
||||
|
||||
debug.Log("serving mount at %v", mountpoint)
|
||||
err = fs.Serve(c, &root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
<-c.Ready
|
||||
return c.MountError
|
||||
}
|
||||
|
||||
func umount(mountpoint string) error {
|
||||
return systemFuse.Unmount(mountpoint)
|
||||
}
|
||||
|
||||
func runMount(opts MountOptions, gopts GlobalOptions, args []string) error {
|
||||
if len(args) == 0 {
|
||||
return errors.Fatalf("wrong number of parameters")
|
||||
}
|
||||
|
||||
mountpoint := args[0]
|
||||
|
||||
AddCleanupHandler(func() error {
|
||||
return systemFuse.Unmount(mountpoint)
|
||||
debug.Log("running umount cleanup handler for mount at %v", mountpoint)
|
||||
err := umount(mountpoint)
|
||||
if err != nil {
|
||||
Warnf("unable to umount (maybe already umounted?): %v\n", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
cmd.ready <- struct{}{}
|
||||
|
||||
errServe := make(chan error)
|
||||
go func() {
|
||||
err = fs.Serve(c, &root)
|
||||
if err != nil {
|
||||
errServe <- err
|
||||
}
|
||||
|
||||
<-c.Ready
|
||||
errServe <- c.MountError
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-errServe:
|
||||
return err
|
||||
case <-cmd.done:
|
||||
err := systemFuse.Unmount(mountpoint)
|
||||
if err != nil {
|
||||
cmd.global.Printf("Error umounting: %s\n", err)
|
||||
}
|
||||
return c.Close()
|
||||
}
|
||||
return mount(opts, gopts, mountpoint)
|
||||
}
|
||||
|
||||
253
src/cmds/restic/cmd_prune.go
Normal file
253
src/cmds/restic/cmd_prune.go
Normal file
@@ -0,0 +1,253 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"restic"
|
||||
"restic/debug"
|
||||
"restic/errors"
|
||||
"restic/index"
|
||||
"restic/repository"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
var cmdPrune = &cobra.Command{
|
||||
Use: "prune [flags]",
|
||||
Short: "remove unneeded data from the repository",
|
||||
Long: `
|
||||
The "prune" command checks the repository and removes data that is not
|
||||
referenced and therefore not needed any more.
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runPrune(globalOptions)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmdRoot.AddCommand(cmdPrune)
|
||||
}
|
||||
|
||||
// newProgressMax returns a progress that counts blobs.
|
||||
func newProgressMax(show bool, max uint64, description string) *restic.Progress {
|
||||
if !show {
|
||||
return nil
|
||||
}
|
||||
|
||||
p := restic.NewProgress()
|
||||
|
||||
p.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) {
|
||||
status := fmt.Sprintf("[%s] %s %d / %d %s",
|
||||
formatDuration(d),
|
||||
formatPercent(s.Blobs, max),
|
||||
s.Blobs, max, description)
|
||||
|
||||
w, _, err := terminal.GetSize(int(os.Stdout.Fd()))
|
||||
if err == nil {
|
||||
if len(status) > w {
|
||||
max := w - len(status) - 4
|
||||
status = status[:max] + "... "
|
||||
}
|
||||
}
|
||||
|
||||
PrintProgress("%s", status)
|
||||
}
|
||||
|
||||
p.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func runPrune(gopts GlobalOptions) error {
|
||||
repo, err := OpenRepository(gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lock, err := lockRepoExclusive(repo)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = repo.LoadIndex()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
var stats struct {
|
||||
blobs int
|
||||
packs int
|
||||
snapshots int
|
||||
bytes int64
|
||||
}
|
||||
|
||||
Verbosef("counting files in repo\n")
|
||||
for _ = range repo.List(restic.DataFile, done) {
|
||||
stats.packs++
|
||||
}
|
||||
|
||||
Verbosef("building new index for repo\n")
|
||||
|
||||
bar := newProgressMax(!gopts.Quiet, uint64(stats.packs), "packs")
|
||||
idx, err := index.New(repo, bar)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, pack := range idx.Packs {
|
||||
stats.bytes += pack.Size
|
||||
}
|
||||
Verbosef("repository contains %v packs (%v blobs) with %v bytes\n",
|
||||
len(idx.Packs), len(idx.Blobs), formatBytes(uint64(stats.bytes)))
|
||||
|
||||
blobCount := make(map[restic.BlobHandle]int)
|
||||
duplicateBlobs := 0
|
||||
duplicateBytes := 0
|
||||
|
||||
// find duplicate blobs
|
||||
for _, p := range idx.Packs {
|
||||
for _, entry := range p.Entries {
|
||||
stats.blobs++
|
||||
h := restic.BlobHandle{ID: entry.ID, Type: entry.Type}
|
||||
blobCount[h]++
|
||||
|
||||
if blobCount[h] > 1 {
|
||||
duplicateBlobs++
|
||||
duplicateBytes += int(entry.Length)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Verbosef("processed %d blobs: %d duplicate blobs, %v duplicate\n",
|
||||
stats.blobs, duplicateBlobs, formatBytes(uint64(duplicateBytes)))
|
||||
Verbosef("load all snapshots\n")
|
||||
|
||||
// find referenced blobs
|
||||
snapshots, err := restic.LoadAllSnapshots(repo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stats.snapshots = len(snapshots)
|
||||
|
||||
Verbosef("find data that is still in use for %d snapshots\n", stats.snapshots)
|
||||
|
||||
usedBlobs := restic.NewBlobSet()
|
||||
seenBlobs := restic.NewBlobSet()
|
||||
|
||||
bar = newProgressMax(!gopts.Quiet, uint64(len(snapshots)), "snapshots")
|
||||
bar.Start()
|
||||
for _, sn := range snapshots {
|
||||
debug.Log("process snapshot %v", sn.ID().Str())
|
||||
|
||||
err = restic.FindUsedBlobs(repo, *sn.Tree, usedBlobs, seenBlobs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
debug.Log("found %v blobs for snapshot %v", sn.ID().Str())
|
||||
bar.Report(restic.Stat{Blobs: 1})
|
||||
}
|
||||
bar.Done()
|
||||
|
||||
Verbosef("found %d of %d data blobs still in use, removing %d blobs\n",
|
||||
len(usedBlobs), stats.blobs, stats.blobs-len(usedBlobs))
|
||||
|
||||
// find packs that need a rewrite
|
||||
rewritePacks := restic.NewIDSet()
|
||||
for h, blob := range idx.Blobs {
|
||||
if !usedBlobs.Has(h) {
|
||||
rewritePacks.Merge(blob.Packs)
|
||||
continue
|
||||
}
|
||||
|
||||
if blobCount[h] > 1 {
|
||||
rewritePacks.Merge(blob.Packs)
|
||||
}
|
||||
}
|
||||
|
||||
removeBytes := 0
|
||||
|
||||
// find packs that are unneeded
|
||||
removePacks := restic.NewIDSet()
|
||||
for packID, p := range idx.Packs {
|
||||
|
||||
hasActiveBlob := false
|
||||
for _, blob := range p.Entries {
|
||||
h := restic.BlobHandle{ID: blob.ID, Type: blob.Type}
|
||||
if usedBlobs.Has(h) {
|
||||
hasActiveBlob = true
|
||||
continue
|
||||
}
|
||||
|
||||
removeBytes += int(blob.Length)
|
||||
}
|
||||
|
||||
if hasActiveBlob {
|
||||
continue
|
||||
}
|
||||
|
||||
removePacks.Insert(packID)
|
||||
|
||||
if !rewritePacks.Has(packID) {
|
||||
return errors.Fatalf("pack %v is unneeded, but not contained in rewritePacks", packID.Str())
|
||||
}
|
||||
|
||||
rewritePacks.Delete(packID)
|
||||
}
|
||||
|
||||
Verbosef("will delete %d packs and rewrite %d packs, this frees %s\n",
|
||||
len(removePacks), len(rewritePacks), formatBytes(uint64(removeBytes)))
|
||||
|
||||
err = repository.Repack(repo, rewritePacks, usedBlobs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for packID := range removePacks {
|
||||
err = repo.Backend().Remove(restic.DataFile, packID.String())
|
||||
if err != nil {
|
||||
Warnf("unable to remove file %v from the repository\n", packID.Str())
|
||||
}
|
||||
}
|
||||
|
||||
Verbosef("creating new index\n")
|
||||
|
||||
stats.packs = 0
|
||||
for _ = range repo.List(restic.DataFile, done) {
|
||||
stats.packs++
|
||||
}
|
||||
bar = newProgressMax(!gopts.Quiet, uint64(stats.packs), "packs")
|
||||
idx, err = index.New(repo, bar)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var supersedes restic.IDs
|
||||
for idxID := range repo.List(restic.IndexFile, done) {
|
||||
err := repo.Backend().Remove(restic.IndexFile, idxID.String())
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "unable to remove index %v: %v\n", idxID.Str(), err)
|
||||
}
|
||||
|
||||
supersedes = append(supersedes, idxID)
|
||||
}
|
||||
|
||||
id, err := idx.Save(repo, supersedes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
Verbosef("saved new index as %v\n", id.Str())
|
||||
|
||||
Verbosef("done\n")
|
||||
return nil
|
||||
}
|
||||
@@ -1,125 +1,32 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"restic/backend"
|
||||
"restic/debug"
|
||||
"restic/pack"
|
||||
"restic/repository"
|
||||
"restic/worker"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type CmdRebuildIndex struct {
|
||||
global *GlobalOptions
|
||||
|
||||
repo *repository.Repository
|
||||
var cmdRebuildIndex = &cobra.Command{
|
||||
Use: "rebuild-index [flags]",
|
||||
Short: "build a new index file",
|
||||
Long: `
|
||||
The "rebuild-index" command creates a new index by combining the index files
|
||||
into a new one.
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runRebuildIndex(globalOptions)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
_, err := parser.AddCommand("rebuild-index",
|
||||
"rebuild the index",
|
||||
"The rebuild-index command builds a new index",
|
||||
&CmdRebuildIndex{global: &globalOpts})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
cmdRoot.AddCommand(cmdRebuildIndex)
|
||||
}
|
||||
|
||||
const rebuildIndexWorkers = 10
|
||||
|
||||
func loadBlobsFromPacks(repo *repository.Repository) (packs map[backend.ID][]pack.Blob) {
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
f := func(job worker.Job, done <-chan struct{}) (interface{}, error) {
|
||||
return repo.ListPack(job.Data.(backend.ID))
|
||||
}
|
||||
|
||||
jobCh := make(chan worker.Job)
|
||||
resCh := make(chan worker.Job)
|
||||
wp := worker.New(rebuildIndexWorkers, f, jobCh, resCh)
|
||||
|
||||
go func() {
|
||||
for id := range repo.List(backend.Data, done) {
|
||||
jobCh <- worker.Job{Data: id}
|
||||
}
|
||||
close(jobCh)
|
||||
}()
|
||||
|
||||
packs = make(map[backend.ID][]pack.Blob)
|
||||
for job := range resCh {
|
||||
id := job.Data.(backend.ID)
|
||||
|
||||
if job.Error != nil {
|
||||
fmt.Fprintf(os.Stderr, "error for pack %v: %v\n", id, job.Error)
|
||||
continue
|
||||
}
|
||||
|
||||
entries := job.Result.([]pack.Blob)
|
||||
packs[id] = entries
|
||||
}
|
||||
|
||||
wp.Wait()
|
||||
|
||||
return packs
|
||||
}
|
||||
|
||||
func listIndexIDs(repo *repository.Repository) (list backend.IDs) {
|
||||
done := make(chan struct{})
|
||||
for id := range repo.List(backend.Index, done) {
|
||||
list = append(list, id)
|
||||
}
|
||||
|
||||
return list
|
||||
}
|
||||
|
||||
func (cmd CmdRebuildIndex) rebuildIndex() error {
|
||||
debug.Log("RebuildIndex.RebuildIndex", "start rebuilding index")
|
||||
|
||||
packs := loadBlobsFromPacks(cmd.repo)
|
||||
cmd.global.Verbosef("loaded blobs from %d packs\n", len(packs))
|
||||
|
||||
idx := repository.NewIndex()
|
||||
for packID, entries := range packs {
|
||||
for _, entry := range entries {
|
||||
pb := repository.PackedBlob{
|
||||
ID: entry.ID,
|
||||
Type: entry.Type,
|
||||
Length: entry.Length,
|
||||
Offset: entry.Offset,
|
||||
PackID: packID,
|
||||
}
|
||||
idx.Store(pb)
|
||||
}
|
||||
}
|
||||
|
||||
oldIndexes := listIndexIDs(cmd.repo)
|
||||
idx.AddToSupersedes(oldIndexes...)
|
||||
cmd.global.Printf(" saving new index\n")
|
||||
id, err := repository.SaveIndex(cmd.repo, idx)
|
||||
if err != nil {
|
||||
debug.Log("RebuildIndex.RebuildIndex", "error saving index: %v", err)
|
||||
return err
|
||||
}
|
||||
debug.Log("RebuildIndex.RebuildIndex", "new index saved as %v", id.Str())
|
||||
|
||||
for _, indexID := range oldIndexes {
|
||||
err := cmd.repo.Backend().Remove(backend.Index, indexID.String())
|
||||
if err != nil {
|
||||
cmd.global.Warnf("unable to remove index %v: %v\n", indexID.Str(), err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cmd CmdRebuildIndex) Execute(args []string) error {
|
||||
repo, err := cmd.global.OpenRepository()
|
||||
func runRebuildIndex(gopts GlobalOptions) error {
|
||||
repo, err := OpenRepository(gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cmd.repo = repo
|
||||
|
||||
lock, err := lockRepoExclusive(repo)
|
||||
defer unlockRepo(lock)
|
||||
@@ -127,5 +34,5 @@ func (cmd CmdRebuildIndex) Execute(args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return cmd.rebuildIndex()
|
||||
return repository.RebuildIndex(repo)
|
||||
}
|
||||
|
||||
@@ -1,62 +1,75 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"restic"
|
||||
"restic/backend"
|
||||
"restic/debug"
|
||||
"restic/errors"
|
||||
"restic/filter"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type CmdRestore struct {
|
||||
Exclude []string `short:"e" long:"exclude" description:"Exclude a pattern (can be specified multiple times)"`
|
||||
Include []string `short:"i" long:"include" description:"Include a pattern, exclude everything else (can be specified multiple times)"`
|
||||
Target string `short:"t" long:"target" description:"Directory to restore to"`
|
||||
Host string `short:"h" long:"host" description:"Source Filter (for id=latest)"`
|
||||
Paths []string `short:"p" long:"path" description:"Path Filter (absolute path;for id=latest) (can be specified multiple times)"`
|
||||
var cmdRestore = &cobra.Command{
|
||||
Use: "restore [flags] snapshotID",
|
||||
Short: "extract the data from a snapshot",
|
||||
Long: `
|
||||
The "restore" command extracts the data from a snapshot from the repository to
|
||||
a directory.
|
||||
|
||||
global *GlobalOptions
|
||||
The special snapshot "latest" can be used to restore the latest snapshot in the
|
||||
repository.
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runRestore(restoreOptions, globalOptions, args)
|
||||
},
|
||||
}
|
||||
|
||||
// RestoreOptions collects all options for the restore command.
|
||||
type RestoreOptions struct {
|
||||
Exclude []string
|
||||
Include []string
|
||||
Target string
|
||||
Host string
|
||||
Paths []string
|
||||
}
|
||||
|
||||
var restoreOptions RestoreOptions
|
||||
|
||||
func init() {
|
||||
_, err := parser.AddCommand("restore",
|
||||
"restore a snapshot",
|
||||
"The restore command restores a snapshot to a directory",
|
||||
&CmdRestore{global: &globalOpts})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
cmdRoot.AddCommand(cmdRestore)
|
||||
|
||||
flags := cmdRestore.Flags()
|
||||
flags.StringSliceVarP(&restoreOptions.Exclude, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)")
|
||||
flags.StringSliceVarP(&restoreOptions.Include, "include", "i", nil, "include a `pattern`, exclude everything else (can be specified multiple times)")
|
||||
flags.StringVarP(&restoreOptions.Target, "target", "t", "", "directory to extract data to")
|
||||
|
||||
flags.StringVarP(&restoreOptions.Host, "host", "H", "", `only consider snapshots for this host when the snapshot ID is "latest"`)
|
||||
flags.StringSliceVar(&restoreOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path` for snapshot ID \"latest\"")
|
||||
}
|
||||
|
||||
func (cmd CmdRestore) Usage() string {
|
||||
return "snapshot-ID"
|
||||
}
|
||||
|
||||
func (cmd CmdRestore) Execute(args []string) error {
|
||||
func runRestore(opts RestoreOptions, gopts GlobalOptions, args []string) error {
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("wrong number of arguments, Usage: %s", cmd.Usage())
|
||||
return errors.Fatalf("no snapshot ID specified")
|
||||
}
|
||||
|
||||
if cmd.Target == "" {
|
||||
return errors.New("please specify a directory to restore to (--target)")
|
||||
if opts.Target == "" {
|
||||
return errors.Fatal("please specify a directory to restore to (--target)")
|
||||
}
|
||||
|
||||
if len(cmd.Exclude) > 0 && len(cmd.Include) > 0 {
|
||||
return errors.New("exclude and include patterns are mutually exclusive")
|
||||
if len(opts.Exclude) > 0 && len(opts.Include) > 0 {
|
||||
return errors.Fatal("exclude and include patterns are mutually exclusive")
|
||||
}
|
||||
|
||||
snapshotIDString := args[0]
|
||||
|
||||
debug.Log("restore", "restore %v to %v", snapshotIDString, cmd.Target)
|
||||
debug.Log("restore %v to %v", snapshotIDString, opts.Target)
|
||||
|
||||
repo, err := cmd.global.OpenRepository()
|
||||
repo, err := OpenRepository(gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !cmd.global.NoLock {
|
||||
if !gopts.NoLock {
|
||||
lock, err := lockRepo(repo)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
@@ -69,60 +82,55 @@ func (cmd CmdRestore) Execute(args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
var id backend.ID
|
||||
var id restic.ID
|
||||
|
||||
if snapshotIDString == "latest" {
|
||||
id, err = restic.FindLatestSnapshot(repo, cmd.Paths, cmd.Host)
|
||||
id, err = restic.FindLatestSnapshot(repo, opts.Paths, opts.Host)
|
||||
if err != nil {
|
||||
cmd.global.Exitf(1, "latest snapshot for criteria not found: %v Paths:%v Host:%v", err, cmd.Paths, cmd.Host)
|
||||
Exitf(1, "latest snapshot for criteria not found: %v Paths:%v Host:%v", err, opts.Paths, opts.Host)
|
||||
}
|
||||
} else {
|
||||
id, err = restic.FindSnapshot(repo, snapshotIDString)
|
||||
if err != nil {
|
||||
cmd.global.Exitf(1, "invalid id %q: %v", snapshotIDString, err)
|
||||
Exitf(1, "invalid id %q: %v", snapshotIDString, err)
|
||||
}
|
||||
}
|
||||
|
||||
res, err := restic.NewRestorer(repo, id)
|
||||
if err != nil {
|
||||
cmd.global.Exitf(2, "creating restorer failed: %v\n", err)
|
||||
Exitf(2, "creating restorer failed: %v\n", err)
|
||||
}
|
||||
|
||||
res.Error = func(dir string, node *restic.Node, err error) error {
|
||||
cmd.global.Warnf("error for %s: %+v\n", dir, err)
|
||||
Warnf("error for %s: %+v\n", dir, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
selectExcludeFilter := func(item string, dstpath string, node *restic.Node) bool {
|
||||
matched, err := filter.List(cmd.Exclude, item)
|
||||
matched, err := filter.List(opts.Exclude, item)
|
||||
if err != nil {
|
||||
cmd.global.Warnf("error for exclude pattern: %v", err)
|
||||
Warnf("error for exclude pattern: %v", err)
|
||||
}
|
||||
|
||||
return !matched
|
||||
}
|
||||
|
||||
selectIncludeFilter := func(item string, dstpath string, node *restic.Node) bool {
|
||||
matched, err := filter.List(cmd.Include, item)
|
||||
matched, err := filter.List(opts.Include, item)
|
||||
if err != nil {
|
||||
cmd.global.Warnf("error for include pattern: %v", err)
|
||||
Warnf("error for include pattern: %v", err)
|
||||
}
|
||||
|
||||
return matched
|
||||
}
|
||||
|
||||
if len(cmd.Exclude) > 0 {
|
||||
if len(opts.Exclude) > 0 {
|
||||
res.SelectFilter = selectExcludeFilter
|
||||
} else if len(cmd.Include) > 0 {
|
||||
} else if len(opts.Include) > 0 {
|
||||
res.SelectFilter = selectIncludeFilter
|
||||
}
|
||||
|
||||
cmd.global.Verbosef("restoring %s to %s\n", res.Snapshot(), cmd.Target)
|
||||
Verbosef("restoring %s to %s\n", res.Snapshot(), opts.Target)
|
||||
|
||||
err = res.RestoreTo(cmd.Target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return res.RestoreTo(opts.Target)
|
||||
}
|
||||
|
||||
@@ -1,105 +1,77 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"restic/errors"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"restic"
|
||||
"restic/backend"
|
||||
)
|
||||
|
||||
type Table struct {
|
||||
Header string
|
||||
Rows [][]interface{}
|
||||
|
||||
RowFormat string
|
||||
var cmdSnapshots = &cobra.Command{
|
||||
Use: "snapshots",
|
||||
Short: "list all snapshots",
|
||||
Long: `
|
||||
The "snapshots" command lists all snapshots stored in a repository.
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runSnapshots(snapshotOptions, globalOptions, args)
|
||||
},
|
||||
}
|
||||
|
||||
func NewTable() Table {
|
||||
return Table{
|
||||
Rows: [][]interface{}{},
|
||||
}
|
||||
// SnapshotOptions bundle all options for the snapshots command.
|
||||
type SnapshotOptions struct {
|
||||
Host string
|
||||
Paths []string
|
||||
}
|
||||
|
||||
func (t Table) Write(w io.Writer) error {
|
||||
_, err := fmt.Fprintln(w, t.Header)
|
||||
if err != nil {
|
||||
return err
|
||||
var snapshotOptions SnapshotOptions
|
||||
|
||||
func init() {
|
||||
cmdRoot.AddCommand(cmdSnapshots)
|
||||
|
||||
f := cmdSnapshots.Flags()
|
||||
f.StringVar(&snapshotOptions.Host, "host", "", "only print snapshots for this host")
|
||||
f.StringSliceVar(&snapshotOptions.Paths, "path", []string{}, "only print snapshots for this `path` (can be specified multiple times)")
|
||||
}
|
||||
|
||||
func runSnapshots(opts SnapshotOptions, gopts GlobalOptions, args []string) error {
|
||||
if len(args) != 0 {
|
||||
return errors.Fatalf("wrong number of arguments")
|
||||
}
|
||||
_, err = fmt.Fprintln(w, strings.Repeat("-", 70))
|
||||
|
||||
repo, err := OpenRepository(gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, row := range t.Rows {
|
||||
_, err = fmt.Fprintf(w, t.RowFormat+"\n", row...)
|
||||
if !gopts.NoLock {
|
||||
lock, err := lockRepo(repo)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const TimeFormat = "2006-01-02 15:04:05"
|
||||
|
||||
type CmdSnapshots struct {
|
||||
Host string `short:"h" long:"host" description:"Host Filter"`
|
||||
Paths []string `short:"p" long:"path" description:"Path Filter (absolute path) (can be specified multiple times)"`
|
||||
|
||||
global *GlobalOptions
|
||||
}
|
||||
|
||||
func init() {
|
||||
_, err := parser.AddCommand("snapshots",
|
||||
"show snapshots",
|
||||
"The snapshots command lists all snapshots stored in a repository",
|
||||
&CmdSnapshots{global: &globalOpts})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (cmd CmdSnapshots) Usage() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (cmd CmdSnapshots) Execute(args []string) error {
|
||||
if len(args) != 0 {
|
||||
return fmt.Errorf("wrong number of arguments, usage: %s", cmd.Usage())
|
||||
}
|
||||
|
||||
repo, err := cmd.global.OpenRepository()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lock, err := lockRepo(repo)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tab := NewTable()
|
||||
tab.Header = fmt.Sprintf("%-8s %-19s %-10s %s", "ID", "Date", "Host", "Directory")
|
||||
tab.RowFormat = "%-8s %-19s %-10s %s"
|
||||
tab.Header = fmt.Sprintf("%-8s %-19s %-10s %-10s %s", "ID", "Date", "Host", "Tags", "Directory")
|
||||
tab.RowFormat = "%-8s %-19s %-10s %-10s %s"
|
||||
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
list := []*restic.Snapshot{}
|
||||
for id := range repo.List(backend.Snapshot, done) {
|
||||
for id := range repo.List(restic.SnapshotFile, done) {
|
||||
sn, err := restic.LoadSnapshot(repo, id)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error loading snapshot %s: %v\n", id, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if restic.SamePaths(sn.Paths, cmd.Paths) && (cmd.Host == "" || cmd.Host == sn.Hostname) {
|
||||
if restic.SamePaths(sn.Paths, opts.Paths) && (opts.Host == "" || opts.Host == sn.Hostname) {
|
||||
pos := sort.Search(len(list), func(i int) bool {
|
||||
return list[i].Time.After(sn.Time)
|
||||
})
|
||||
@@ -115,22 +87,35 @@ func (cmd CmdSnapshots) Execute(args []string) error {
|
||||
|
||||
}
|
||||
|
||||
plen, err := repo.PrefixLength(backend.Snapshot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, sn := range list {
|
||||
if len(sn.Paths) == 0 {
|
||||
continue
|
||||
}
|
||||
id := sn.ID()
|
||||
tab.Rows = append(tab.Rows, []interface{}{hex.EncodeToString(id[:plen/2]), sn.Time.Format(TimeFormat), sn.Hostname, sn.Paths[0]})
|
||||
|
||||
if len(sn.Paths) > 1 {
|
||||
for _, path := range sn.Paths[1:] {
|
||||
tab.Rows = append(tab.Rows, []interface{}{"", "", "", path})
|
||||
firstTag := ""
|
||||
if len(sn.Tags) > 0 {
|
||||
firstTag = sn.Tags[0]
|
||||
}
|
||||
|
||||
tab.Rows = append(tab.Rows, []interface{}{sn.ID().Str(), sn.Time.Format(TimeFormat), sn.Hostname, firstTag, sn.Paths[0]})
|
||||
|
||||
rows := len(sn.Paths)
|
||||
if len(sn.Tags) > rows {
|
||||
rows = len(sn.Tags)
|
||||
}
|
||||
|
||||
for i := 1; i < rows; i++ {
|
||||
path := ""
|
||||
if len(sn.Paths) > i {
|
||||
path = sn.Paths[i]
|
||||
}
|
||||
|
||||
tag := ""
|
||||
if len(sn.Tags) > i {
|
||||
tag = sn.Tags[i]
|
||||
}
|
||||
|
||||
tab.Rows = append(tab.Rows, []interface{}{"", "", "", tag, path})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,35 +1,43 @@
|
||||
package main
|
||||
|
||||
import "restic"
|
||||
import (
|
||||
"restic"
|
||||
|
||||
type CmdUnlock struct {
|
||||
RemoveAll bool `long:"remove-all" description:"Remove all locks, even stale ones"`
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
global *GlobalOptions
|
||||
var unlockCmd = &cobra.Command{
|
||||
Use: "unlock",
|
||||
Short: "remove locks other processes created",
|
||||
Long: `
|
||||
The "unlock" command removes stale locks that have been created by other restic processes.
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runUnlock(unlockOptions, globalOptions)
|
||||
},
|
||||
}
|
||||
|
||||
// UnlockOptions collects all options for the unlock command.
|
||||
type UnlockOptions struct {
|
||||
RemoveAll bool
|
||||
}
|
||||
|
||||
var unlockOptions UnlockOptions
|
||||
|
||||
func init() {
|
||||
_, err := parser.AddCommand("unlock",
|
||||
"remove locks",
|
||||
"The unlock command checks for stale locks and removes them",
|
||||
&CmdUnlock{global: &globalOpts})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
cmdRoot.AddCommand(unlockCmd)
|
||||
|
||||
unlockCmd.Flags().BoolVar(&unlockOptions.RemoveAll, "remove-all", false, "Remove all locks, even non-stale ones")
|
||||
}
|
||||
|
||||
func (cmd CmdUnlock) Usage() string {
|
||||
return "[unlock-options]"
|
||||
}
|
||||
|
||||
func (cmd CmdUnlock) Execute(args []string) error {
|
||||
repo, err := cmd.global.OpenRepository()
|
||||
func runUnlock(opts UnlockOptions, gopts GlobalOptions) error {
|
||||
repo, err := OpenRepository(gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fn := restic.RemoveStaleLocks
|
||||
if cmd.RemoveAll {
|
||||
if opts.RemoveAll {
|
||||
fn = restic.RemoveAllLocks
|
||||
}
|
||||
|
||||
@@ -38,6 +46,6 @@ func (cmd CmdUnlock) Execute(args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
cmd.global.Verbosef("successfully removed locks\n")
|
||||
Verbosef("successfully removed locks\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,23 +3,23 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type CmdVersion struct{}
|
||||
var versionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Print version information",
|
||||
Long: `
|
||||
The "version" command prints detailed information about the build environment
|
||||
and the version of this software.
|
||||
`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Printf("restic %s\ncompiled at %s with %v on %v/%v\n",
|
||||
version, compiledAt, runtime.Version(), runtime.GOOS, runtime.GOARCH)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
_, err := parser.AddCommand("version",
|
||||
"display version",
|
||||
"The version command displays detailed information about the version",
|
||||
&CmdVersion{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (cmd CmdVersion) Execute(args []string) error {
|
||||
fmt.Printf("restic %s\ncompiled at %s with %v\n",
|
||||
version, compiledAt, runtime.Version())
|
||||
|
||||
return nil
|
||||
cmdRoot.AddCommand(versionCmd)
|
||||
}
|
||||
|
||||
60
src/cmds/restic/format.go
Normal file
60
src/cmds/restic/format.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
func formatBytes(c uint64) string {
|
||||
b := float64(c)
|
||||
|
||||
switch {
|
||||
case c > 1<<40:
|
||||
return fmt.Sprintf("%.3f TiB", b/(1<<40))
|
||||
case c > 1<<30:
|
||||
return fmt.Sprintf("%.3f GiB", b/(1<<30))
|
||||
case c > 1<<20:
|
||||
return fmt.Sprintf("%.3f MiB", b/(1<<20))
|
||||
case c > 1<<10:
|
||||
return fmt.Sprintf("%.3f KiB", b/(1<<10))
|
||||
default:
|
||||
return fmt.Sprintf("%dB", c)
|
||||
}
|
||||
}
|
||||
|
||||
func formatSeconds(sec uint64) string {
|
||||
hours := sec / 3600
|
||||
sec -= hours * 3600
|
||||
min := sec / 60
|
||||
sec -= min * 60
|
||||
if hours > 0 {
|
||||
return fmt.Sprintf("%d:%02d:%02d", hours, min, sec)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%d:%02d", min, sec)
|
||||
}
|
||||
|
||||
func formatPercent(numerator uint64, denominator uint64) string {
|
||||
if denominator == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
percent := 100.0 * float64(numerator) / float64(denominator)
|
||||
|
||||
if percent > 100 {
|
||||
percent = 100
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%3.2f%%", percent)
|
||||
}
|
||||
|
||||
func formatRate(bytes uint64, duration time.Duration) string {
|
||||
sec := float64(duration) / float64(time.Second)
|
||||
rate := float64(bytes) / sec / (1 << 20)
|
||||
return fmt.Sprintf("%.2fMiB/s", rate)
|
||||
}
|
||||
|
||||
func formatDuration(d time.Duration) string {
|
||||
sec := uint64(d / time.Second)
|
||||
return formatSeconds(sec)
|
||||
}
|
||||
@@ -1,14 +1,17 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"restic"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"restic/backend"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"restic/backend/local"
|
||||
"restic/backend/rest"
|
||||
"restic/backend/s3"
|
||||
@@ -17,27 +20,50 @@ import (
|
||||
"restic/location"
|
||||
"restic/repository"
|
||||
|
||||
"github.com/jessevdk/go-flags"
|
||||
"restic/errors"
|
||||
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
var version = "compiled manually"
|
||||
var compiledAt = "unknown time"
|
||||
|
||||
// GlobalOptions holds all those options that can be set for every command.
|
||||
func parseEnvironment(cmd *cobra.Command, args []string) {
|
||||
repo := os.Getenv("RESTIC_REPOSITORY")
|
||||
if repo != "" {
|
||||
globalOptions.Repo = repo
|
||||
}
|
||||
|
||||
pw := os.Getenv("RESTIC_PASSWORD")
|
||||
if pw != "" {
|
||||
globalOptions.password = pw
|
||||
}
|
||||
}
|
||||
|
||||
// GlobalOptions hold all global options for restic.
|
||||
type GlobalOptions struct {
|
||||
Repo string `short:"r" long:"repo" description:"Repository directory to backup to/restore from"`
|
||||
CacheDir string ` long:"cache-dir" description:"Directory to use as a local cache"`
|
||||
Quiet bool `short:"q" long:"quiet" default:"false" description:"Do not output comprehensive progress report"`
|
||||
NoLock bool ` long:"no-lock" default:"false" description:"Do not lock the repo, this allows some operations on read-only repos."`
|
||||
Options []string `short:"o" long:"option" description:"Specify options in the form 'foo.key=value'"`
|
||||
Repo string
|
||||
PasswordFile string
|
||||
Quiet bool
|
||||
NoLock bool
|
||||
|
||||
password string
|
||||
stdout io.Writer
|
||||
stderr io.Writer
|
||||
}
|
||||
|
||||
var globalOptions = GlobalOptions{
|
||||
stdout: os.Stdout,
|
||||
stderr: os.Stderr,
|
||||
}
|
||||
|
||||
func init() {
|
||||
f := cmdRoot.PersistentFlags()
|
||||
f.StringVarP(&globalOptions.Repo, "repo", "r", "", "repository to backup to or restore from (default: $RESTIC_REPOSITORY)")
|
||||
f.StringVarP(&globalOptions.PasswordFile, "password-file", "p", "", "read the repository password from a file")
|
||||
f.BoolVarP(&globalOptions.Quiet, "quiet", "q", false, "do not outputcomprehensive progress report")
|
||||
f.BoolVar(&globalOptions.NoLock, "no-lock", false, "do not lock the repo, this allows some operations on read-only repos")
|
||||
|
||||
restoreTerminal()
|
||||
}
|
||||
|
||||
@@ -56,14 +82,22 @@ func checkErrno(err error) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func stdinIsTerminal() bool {
|
||||
return terminal.IsTerminal(int(os.Stdin.Fd()))
|
||||
}
|
||||
|
||||
func stdoutIsTerminal() bool {
|
||||
return terminal.IsTerminal(int(os.Stdout.Fd()))
|
||||
}
|
||||
|
||||
// restoreTerminal installs a cleanup handler that restores the previous
|
||||
// terminal state on exit.
|
||||
func restoreTerminal() {
|
||||
fd := int(os.Stdout.Fd())
|
||||
if !terminal.IsTerminal(fd) {
|
||||
if !stdoutIsTerminal() {
|
||||
return
|
||||
}
|
||||
|
||||
fd := int(os.Stdout.Fd())
|
||||
state, err := terminal.GetState(fd)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "unable to get terminal state: %v\n", err)
|
||||
@@ -79,12 +113,23 @@ func restoreTerminal() {
|
||||
})
|
||||
}
|
||||
|
||||
var globalOpts = GlobalOptions{stdout: os.Stdout, stderr: os.Stderr}
|
||||
var parser = flags.NewParser(&globalOpts, flags.HelpFlag|flags.PassDoubleDash)
|
||||
// ClearLine creates a platform dependent string to clear the current
|
||||
// line, so it can be overwritten. ANSI sequences are not supported on
|
||||
// current windows cmd shell.
|
||||
func ClearLine() string {
|
||||
if runtime.GOOS == "windows" {
|
||||
w, _, err := terminal.GetSize(int(os.Stdout.Fd()))
|
||||
if err == nil {
|
||||
return strings.Repeat(" ", w-1) + "\r"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
return "\x1b[2K"
|
||||
}
|
||||
|
||||
// Printf writes the message to the configured stdout stream.
|
||||
func (o GlobalOptions) Printf(format string, args ...interface{}) {
|
||||
_, err := fmt.Fprintf(o.stdout, format, args...)
|
||||
func Printf(format string, args ...interface{}) {
|
||||
_, err := fmt.Fprintf(globalOptions.stdout, format, args...)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "unable to write to stdout: %v\n", err)
|
||||
os.Exit(100)
|
||||
@@ -92,31 +137,42 @@ func (o GlobalOptions) Printf(format string, args ...interface{}) {
|
||||
}
|
||||
|
||||
// Verbosef calls Printf to write the message when the verbose flag is set.
|
||||
func (o GlobalOptions) Verbosef(format string, args ...interface{}) {
|
||||
if o.Quiet {
|
||||
func Verbosef(format string, args ...interface{}) {
|
||||
if globalOptions.Quiet {
|
||||
return
|
||||
}
|
||||
|
||||
o.Printf(format, args...)
|
||||
Printf(format, args...)
|
||||
}
|
||||
|
||||
// ShowProgress returns true iff the progress status should be written, i.e.
|
||||
// the quiet flag is not set and the output is a terminal.
|
||||
func (o GlobalOptions) ShowProgress() bool {
|
||||
if o.Quiet {
|
||||
return false
|
||||
// PrintProgress wraps fmt.Printf to handle the difference in writing progress
|
||||
// information to terminals and non-terminal stdout
|
||||
func PrintProgress(format string, args ...interface{}) {
|
||||
var (
|
||||
message string
|
||||
carriageControl string
|
||||
)
|
||||
message = fmt.Sprintf(format, args...)
|
||||
|
||||
if !(strings.HasSuffix(message, "\r") || strings.HasSuffix(message, "\n")) {
|
||||
if stdoutIsTerminal() {
|
||||
carriageControl = "\r"
|
||||
} else {
|
||||
carriageControl = "\n"
|
||||
}
|
||||
message = fmt.Sprintf("%s%s", message, carriageControl)
|
||||
}
|
||||
|
||||
if !terminal.IsTerminal(int(os.Stdout.Fd())) {
|
||||
return false
|
||||
if stdoutIsTerminal() {
|
||||
message = fmt.Sprintf("%s%s", ClearLine(), message)
|
||||
}
|
||||
|
||||
return true
|
||||
fmt.Print(message)
|
||||
}
|
||||
|
||||
// Warnf writes the message to the configured stderr stream.
|
||||
func (o GlobalOptions) Warnf(format string, args ...interface{}) {
|
||||
_, err := fmt.Fprintf(o.stderr, format, args...)
|
||||
func Warnf(format string, args ...interface{}) {
|
||||
_, err := fmt.Fprintf(globalOptions.stderr, format, args...)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "unable to write to stderr: %v\n", err)
|
||||
os.Exit(100)
|
||||
@@ -124,12 +180,12 @@ func (o GlobalOptions) Warnf(format string, args ...interface{}) {
|
||||
}
|
||||
|
||||
// Exitf uses Warnf to write the message and then calls os.Exit(exitcode).
|
||||
func (o GlobalOptions) Exitf(exitcode int, format string, args ...interface{}) {
|
||||
func Exitf(exitcode int, format string, args ...interface{}) {
|
||||
if format[len(format)-1] != '\n' {
|
||||
format += "\n"
|
||||
}
|
||||
|
||||
o.Warnf(format, args...)
|
||||
Warnf(format, args...)
|
||||
os.Exit(exitcode)
|
||||
}
|
||||
|
||||
@@ -139,8 +195,8 @@ func readPassword(in io.Reader) (password string, err error) {
|
||||
n, err := io.ReadFull(in, buf)
|
||||
buf = buf[:n]
|
||||
|
||||
if err != nil && err != io.ErrUnexpectedEOF {
|
||||
return "", err
|
||||
if err != nil && errors.Cause(err) != io.ErrUnexpectedEOF {
|
||||
return "", errors.Wrap(err, "ReadFull")
|
||||
}
|
||||
|
||||
return strings.TrimRight(string(buf), "\r\n"), nil
|
||||
@@ -154,89 +210,113 @@ func readPasswordTerminal(in *os.File, out io.Writer, prompt string) (password s
|
||||
buf, err := terminal.ReadPassword(int(in.Fd()))
|
||||
fmt.Fprintln(out)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", errors.Wrap(err, "ReadPassword")
|
||||
}
|
||||
|
||||
password = string(buf)
|
||||
return password, nil
|
||||
}
|
||||
|
||||
// ReadPassword reads the password from stdin.
|
||||
func (o GlobalOptions) ReadPassword(prompt string) string {
|
||||
// ReadPassword reads the password from a password file, the environment
|
||||
// variable RESTIC_PASSWORD or prompts the user.
|
||||
func ReadPassword(opts GlobalOptions, prompt string) (string, error) {
|
||||
if opts.PasswordFile != "" {
|
||||
s, err := ioutil.ReadFile(opts.PasswordFile)
|
||||
return strings.TrimSpace(string(s)), errors.Wrap(err, "Readfile")
|
||||
}
|
||||
|
||||
if pwd := os.Getenv("RESTIC_PASSWORD"); pwd != "" {
|
||||
return pwd, nil
|
||||
}
|
||||
|
||||
var (
|
||||
password string
|
||||
err error
|
||||
)
|
||||
|
||||
if terminal.IsTerminal(int(os.Stdin.Fd())) {
|
||||
if stdinIsTerminal() {
|
||||
password, err = readPasswordTerminal(os.Stdin, os.Stderr, prompt)
|
||||
} else {
|
||||
password, err = readPassword(os.Stdin)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
o.Exitf(2, "unable to read password: %v", err)
|
||||
return "", errors.Wrap(err, "unable to read password")
|
||||
}
|
||||
|
||||
if len(password) == 0 {
|
||||
o.Exitf(1, "an empty password is not a password")
|
||||
return "", errors.Fatal("an empty password is not a password")
|
||||
}
|
||||
|
||||
return password
|
||||
return password, nil
|
||||
}
|
||||
|
||||
// ReadPasswordTwice calls ReadPassword two times and returns an error when the
|
||||
// passwords don't match.
|
||||
func (o GlobalOptions) ReadPasswordTwice(prompt1, prompt2 string) string {
|
||||
pw1 := o.ReadPassword(prompt1)
|
||||
pw2 := o.ReadPassword(prompt2)
|
||||
if pw1 != pw2 {
|
||||
o.Exitf(1, "passwords do not match")
|
||||
func ReadPasswordTwice(gopts GlobalOptions, prompt1, prompt2 string) (string, error) {
|
||||
pw1, err := ReadPassword(gopts, prompt1)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
pw2, err := ReadPassword(gopts, prompt2)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return pw1
|
||||
if pw1 != pw2 {
|
||||
return "", errors.Fatal("passwords do not match")
|
||||
}
|
||||
|
||||
return pw1, nil
|
||||
}
|
||||
|
||||
const maxKeys = 20
|
||||
|
||||
// OpenRepository reads the password and opens the repository.
|
||||
func (o GlobalOptions) OpenRepository() (*repository.Repository, error) {
|
||||
if o.Repo == "" {
|
||||
return nil, errors.New("Please specify repository location (-r)")
|
||||
func OpenRepository(opts GlobalOptions) (*repository.Repository, error) {
|
||||
if opts.Repo == "" {
|
||||
return nil, errors.Fatal("Please specify repository location (-r)")
|
||||
}
|
||||
|
||||
be, err := open(o.Repo)
|
||||
be, err := open(opts.Repo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := repository.New(be)
|
||||
|
||||
if o.password == "" {
|
||||
o.password = o.ReadPassword("enter password for repository: ")
|
||||
if opts.password == "" {
|
||||
opts.password, err = ReadPassword(opts, "enter password for repository: ")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
err = s.SearchKey(o.password)
|
||||
err = s.SearchKey(opts.password, maxKeys)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to open repo: %v", err)
|
||||
return nil, errors.Fatalf("unable to open repo: %v", err)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Open the backend specified by a location config.
|
||||
func open(s string) (backend.Backend, error) {
|
||||
debug.Log("open", "parsing location %v", s)
|
||||
func open(s string) (restic.Backend, error) {
|
||||
debug.Log("parsing location %v", s)
|
||||
loc, err := location.Parse(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Fatalf("parsing repository location failed: %v", err)
|
||||
}
|
||||
|
||||
var be restic.Backend
|
||||
|
||||
switch loc.Scheme {
|
||||
case "local":
|
||||
debug.Log("open", "opening local repository at %#v", loc.Config)
|
||||
return local.Open(loc.Config.(string))
|
||||
debug.Log("opening local repository at %#v", loc.Config)
|
||||
be, err = local.Open(loc.Config.(string))
|
||||
case "sftp":
|
||||
debug.Log("open", "opening sftp repository at %#v", loc.Config)
|
||||
return sftp.OpenWithConfig(loc.Config.(sftp.Config))
|
||||
debug.Log("opening sftp repository at %#v", loc.Config)
|
||||
be, err = sftp.OpenWithConfig(loc.Config.(sftp.Config))
|
||||
case "s3":
|
||||
cfg := loc.Config.(s3.Config)
|
||||
if cfg.KeyID == "" {
|
||||
@@ -247,19 +327,24 @@ func open(s string) (backend.Backend, error) {
|
||||
cfg.Secret = os.Getenv("AWS_SECRET_ACCESS_KEY")
|
||||
}
|
||||
|
||||
debug.Log("open", "opening s3 repository at %#v", cfg)
|
||||
return s3.Open(cfg)
|
||||
debug.Log("opening s3 repository at %#v", cfg)
|
||||
be, err = s3.Open(cfg)
|
||||
case "rest":
|
||||
return rest.Open(loc.Config.(rest.Config))
|
||||
be, err = rest.Open(loc.Config.(rest.Config))
|
||||
default:
|
||||
return nil, errors.Fatalf("invalid backend: %q", loc.Scheme)
|
||||
}
|
||||
|
||||
debug.Log("open", "invalid repository location: %v", s)
|
||||
return nil, fmt.Errorf("invalid scheme %q", loc.Scheme)
|
||||
if err != nil {
|
||||
return nil, errors.Fatalf("unable to open repo at %v: %v", s, err)
|
||||
}
|
||||
|
||||
return be, nil
|
||||
}
|
||||
|
||||
// Create the backend specified by URI.
|
||||
func create(s string) (backend.Backend, error) {
|
||||
debug.Log("open", "parsing location %v", s)
|
||||
func create(s string) (restic.Backend, error) {
|
||||
debug.Log("parsing location %v", s)
|
||||
loc, err := location.Parse(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -267,10 +352,10 @@ func create(s string) (backend.Backend, error) {
|
||||
|
||||
switch loc.Scheme {
|
||||
case "local":
|
||||
debug.Log("open", "create local repository at %#v", loc.Config)
|
||||
debug.Log("create local repository at %#v", loc.Config)
|
||||
return local.Create(loc.Config.(string))
|
||||
case "sftp":
|
||||
debug.Log("open", "create sftp repository at %#v", loc.Config)
|
||||
debug.Log("create sftp repository at %#v", loc.Config)
|
||||
return sftp.CreateWithConfig(loc.Config.(sftp.Config))
|
||||
case "s3":
|
||||
cfg := loc.Config.(s3.Config)
|
||||
@@ -282,12 +367,12 @@ func create(s string) (backend.Backend, error) {
|
||||
cfg.Secret = os.Getenv("AWS_SECRET_ACCESS_KEY")
|
||||
}
|
||||
|
||||
debug.Log("open", "create s3 repository at %#v", loc.Config)
|
||||
debug.Log("create s3 repository at %#v", loc.Config)
|
||||
return s3.Open(cfg)
|
||||
case "rest":
|
||||
return rest.Open(loc.Config.(rest.Config))
|
||||
}
|
||||
|
||||
debug.Log("open", "invalid repository scheme: %v", s)
|
||||
return nil, fmt.Errorf("invalid scheme %q", loc.Scheme)
|
||||
debug.Log("invalid repository scheme: %v", s)
|
||||
return nil, errors.Fatalf("invalid scheme %q", loc.Scheme)
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// +build ignore
|
||||
// +build !openbsd
|
||||
// +build !windows
|
||||
|
||||
@@ -12,7 +13,6 @@ import (
|
||||
"time"
|
||||
|
||||
"restic"
|
||||
"restic/backend"
|
||||
"restic/repository"
|
||||
. "restic/test"
|
||||
)
|
||||
@@ -23,45 +23,119 @@ const (
|
||||
mountTestSubdir = "snapshots"
|
||||
)
|
||||
|
||||
func snapshotsDirExists(t testing.TB, dir string) bool {
|
||||
f, err := os.Open(filepath.Join(dir, mountTestSubdir))
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := f.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// waitForMount blocks (max mountWait * mountSleep) until the subdir
|
||||
// "snapshots" appears in the dir.
|
||||
func waitForMount(dir string) error {
|
||||
func waitForMount(t testing.TB, dir string) {
|
||||
for i := 0; i < mountWait; i++ {
|
||||
f, err := os.Open(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
names, err := f.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = f.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, name := range names {
|
||||
if name == mountTestSubdir {
|
||||
return nil
|
||||
}
|
||||
if snapshotsDirExists(t, dir) {
|
||||
t.Log("mounted directory is ready")
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(mountSleep)
|
||||
}
|
||||
|
||||
return fmt.Errorf("subdir %q of dir %s never appeared", mountTestSubdir, dir)
|
||||
t.Errorf("subdir %q of dir %s never appeared", mountTestSubdir, dir)
|
||||
}
|
||||
|
||||
func cmdMount(t testing.TB, global GlobalOptions, dir string, ready, done chan struct{}) {
|
||||
defer func() {
|
||||
ready <- struct{}{}
|
||||
}()
|
||||
func mount(t testing.TB, global GlobalOptions, dir string) {
|
||||
cmd := &CmdMount{global: &global}
|
||||
OK(t, cmd.Mount(dir))
|
||||
}
|
||||
|
||||
cmd := &CmdMount{global: &global, ready: ready, done: done}
|
||||
OK(t, cmd.Execute([]string{dir}))
|
||||
if TestCleanupTempDirs {
|
||||
RemoveAll(t, dir)
|
||||
func umount(t testing.TB, global GlobalOptions, dir string) {
|
||||
cmd := &CmdMount{global: &global}
|
||||
|
||||
var err error
|
||||
for i := 0; i < mountWait; i++ {
|
||||
if err = cmd.Umount(dir); err == nil {
|
||||
t.Logf("directory %v umounted", dir)
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(mountSleep)
|
||||
}
|
||||
|
||||
t.Errorf("unable to umount dir %v, last error was: %v", dir, err)
|
||||
}
|
||||
|
||||
func listSnapshots(t testing.TB, dir string) []string {
|
||||
snapshotsDir, err := os.Open(filepath.Join(dir, "snapshots"))
|
||||
OK(t, err)
|
||||
names, err := snapshotsDir.Readdirnames(-1)
|
||||
OK(t, err)
|
||||
OK(t, snapshotsDir.Close())
|
||||
return names
|
||||
}
|
||||
|
||||
func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Repository, mountpoint, repodir string, snapshotIDs restic.IDs) {
|
||||
t.Logf("checking for %d snapshots: %v", len(snapshotIDs), snapshotIDs)
|
||||
go mount(t, global, mountpoint)
|
||||
waitForMount(t, mountpoint)
|
||||
defer umount(t, global, mountpoint)
|
||||
|
||||
if !snapshotsDirExists(t, mountpoint) {
|
||||
t.Fatal(`virtual directory "snapshots" doesn't exist`)
|
||||
}
|
||||
|
||||
ids := listSnapshots(t, repodir)
|
||||
t.Logf("found %v snapshots in repo: %v", len(ids), ids)
|
||||
|
||||
namesInSnapshots := listSnapshots(t, mountpoint)
|
||||
t.Logf("found %v snapshots in fuse mount: %v", len(namesInSnapshots), namesInSnapshots)
|
||||
Assert(t,
|
||||
len(namesInSnapshots) == len(snapshotIDs),
|
||||
"Invalid number of snapshots: expected %d, got %d", len(snapshotIDs), len(namesInSnapshots))
|
||||
|
||||
namesMap := make(map[string]bool)
|
||||
for _, name := range namesInSnapshots {
|
||||
namesMap[name] = false
|
||||
}
|
||||
|
||||
for _, id := range snapshotIDs {
|
||||
snapshot, err := restic.LoadSnapshot(repo, id)
|
||||
OK(t, err)
|
||||
|
||||
ts := snapshot.Time.Format(time.RFC3339)
|
||||
present, ok := namesMap[ts]
|
||||
if !ok {
|
||||
t.Errorf("Snapshot %v (%q) isn't present in fuse dir", id.Str(), ts)
|
||||
}
|
||||
|
||||
for i := 1; present; i++ {
|
||||
ts = fmt.Sprintf("%s-%d", snapshot.Time.Format(time.RFC3339), i)
|
||||
present, ok = namesMap[ts]
|
||||
if !ok {
|
||||
t.Errorf("Snapshot %v (%q) isn't present in fuse dir", id.Str(), ts)
|
||||
}
|
||||
|
||||
if !present {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
namesMap[ts] = true
|
||||
}
|
||||
|
||||
for name, present := range namesMap {
|
||||
Assert(t, present, "Directory %s is present in fuse dir but is not a snapshot", name)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -70,34 +144,8 @@ func TestMount(t *testing.T) {
|
||||
t.Skip("Skipping fuse tests")
|
||||
}
|
||||
|
||||
checkSnapshots := func(repo *repository.Repository, mountpoint string, snapshotIDs []backend.ID) {
|
||||
snapshotsDir, err := os.Open(filepath.Join(mountpoint, "snapshots"))
|
||||
OK(t, err)
|
||||
namesInSnapshots, err := snapshotsDir.Readdirnames(-1)
|
||||
OK(t, err)
|
||||
Assert(t,
|
||||
len(namesInSnapshots) == len(snapshotIDs),
|
||||
"Invalid number of snapshots: expected %d, got %d", len(snapshotIDs), len(namesInSnapshots))
|
||||
|
||||
namesMap := make(map[string]bool)
|
||||
for _, name := range namesInSnapshots {
|
||||
namesMap[name] = false
|
||||
}
|
||||
|
||||
for _, id := range snapshotIDs {
|
||||
snapshot, err := restic.LoadSnapshot(repo, id)
|
||||
OK(t, err)
|
||||
_, ok := namesMap[snapshot.Time.Format(time.RFC3339)]
|
||||
Assert(t, ok, "Snapshot %s isn't present in fuse dir", snapshot.Time.Format(time.RFC3339))
|
||||
namesMap[snapshot.Time.Format(time.RFC3339)] = true
|
||||
}
|
||||
for name, present := range namesMap {
|
||||
Assert(t, present, "Directory %s is present in fuse dir but is not a snapshot", name)
|
||||
}
|
||||
OK(t, snapshotsDir.Close())
|
||||
}
|
||||
|
||||
withTestEnvironment(t, func(env *testEnvironment, global GlobalOptions) {
|
||||
|
||||
cmdInit(t, global)
|
||||
repo, err := global.OpenRepository()
|
||||
OK(t, err)
|
||||
@@ -108,32 +156,9 @@ func TestMount(t *testing.T) {
|
||||
// We remove the mountpoint now to check that cmdMount creates it
|
||||
RemoveAll(t, mountpoint)
|
||||
|
||||
ready := make(chan struct{}, 2)
|
||||
done := make(chan struct{})
|
||||
go cmdMount(t, global, mountpoint, ready, done)
|
||||
<-ready
|
||||
defer close(done)
|
||||
OK(t, waitForMount(mountpoint))
|
||||
checkSnapshots(t, global, repo, mountpoint, env.repo, []restic.ID{})
|
||||
|
||||
mountpointDir, err := os.Open(mountpoint)
|
||||
OK(t, err)
|
||||
names, err := mountpointDir.Readdirnames(-1)
|
||||
OK(t, err)
|
||||
Assert(t, len(names) == 1 && names[0] == "snapshots", `The fuse virtual directory "snapshots" doesn't exist`)
|
||||
OK(t, mountpointDir.Close())
|
||||
|
||||
checkSnapshots(repo, mountpoint, []backend.ID{})
|
||||
|
||||
datafile := filepath.Join("testdata", "backup-data.tar.gz")
|
||||
fd, err := os.Open(datafile)
|
||||
if os.IsNotExist(err) {
|
||||
t.Skipf("unable to find data file %q, skipping", datafile)
|
||||
return
|
||||
}
|
||||
OK(t, err)
|
||||
OK(t, fd.Close())
|
||||
|
||||
SetupTarTestFixture(t, env.testdata, datafile)
|
||||
SetupTarTestFixture(t, env.testdata, filepath.Join("testdata", "backup-data.tar.gz"))
|
||||
|
||||
// first backup
|
||||
cmdBackup(t, global, []string{env.testdata}, nil)
|
||||
@@ -141,7 +166,7 @@ func TestMount(t *testing.T) {
|
||||
Assert(t, len(snapshotIDs) == 1,
|
||||
"expected one snapshot, got %v", snapshotIDs)
|
||||
|
||||
checkSnapshots(repo, mountpoint, snapshotIDs)
|
||||
checkSnapshots(t, global, repo, mountpoint, env.repo, snapshotIDs)
|
||||
|
||||
// second backup, implicit incremental
|
||||
cmdBackup(t, global, []string{env.testdata}, nil)
|
||||
@@ -149,7 +174,7 @@ func TestMount(t *testing.T) {
|
||||
Assert(t, len(snapshotIDs) == 2,
|
||||
"expected two snapshots, got %v", snapshotIDs)
|
||||
|
||||
checkSnapshots(repo, mountpoint, snapshotIDs)
|
||||
checkSnapshots(t, global, repo, mountpoint, env.repo, snapshotIDs)
|
||||
|
||||
// third backup, explicit incremental
|
||||
cmdBackup(t, global, []string{env.testdata}, &snapshotIDs[0])
|
||||
@@ -157,6 +182,30 @@ func TestMount(t *testing.T) {
|
||||
Assert(t, len(snapshotIDs) == 3,
|
||||
"expected three snapshots, got %v", snapshotIDs)
|
||||
|
||||
checkSnapshots(repo, mountpoint, snapshotIDs)
|
||||
checkSnapshots(t, global, repo, mountpoint, env.repo, snapshotIDs)
|
||||
})
|
||||
}
|
||||
|
||||
func TestMountSameTimestamps(t *testing.T) {
|
||||
if !RunFuseTest {
|
||||
t.Skip("Skipping fuse tests")
|
||||
}
|
||||
|
||||
withTestEnvironment(t, func(env *testEnvironment, global GlobalOptions) {
|
||||
SetupTarTestFixture(t, env.base, filepath.Join("testdata", "repo-same-timestamps.tar.gz"))
|
||||
|
||||
repo, err := global.OpenRepository()
|
||||
OK(t, err)
|
||||
|
||||
mountpoint, err := ioutil.TempDir(TestTempDir, "restic-test-mount-")
|
||||
OK(t, err)
|
||||
|
||||
ids := []restic.ID{
|
||||
restic.TestParseID("280303689e5027328889a06d718b729e96a1ce6ae9ef8290bff550459ae611ee"),
|
||||
restic.TestParseID("75ad6cdc0868e082f2596d5ab8705e9f7d87316f5bf5690385eeff8dbe49d9f5"),
|
||||
restic.TestParseID("5fd0d8b2ef0fa5d23e58f1e460188abb0f525c0f0c4af8365a1280c807a80a1b"),
|
||||
}
|
||||
|
||||
checkSnapshots(t, global, repo, mountpoint, env.repo, ids)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"restic/repository"
|
||||
. "restic/test"
|
||||
)
|
||||
|
||||
@@ -165,27 +166,6 @@ type testEnvironment struct {
|
||||
base, cache, repo, testdata string
|
||||
}
|
||||
|
||||
func configureRestic(t testing.TB, cache, repo string) GlobalOptions {
|
||||
return GlobalOptions{
|
||||
CacheDir: cache,
|
||||
Repo: repo,
|
||||
Quiet: true,
|
||||
|
||||
password: TestPassword,
|
||||
stdout: os.Stdout,
|
||||
stderr: os.Stderr,
|
||||
}
|
||||
}
|
||||
|
||||
func cleanupTempdir(t testing.TB, tempdir string) {
|
||||
if !TestCleanupTempDirs {
|
||||
t.Logf("leaving temporary directory %v used for test", tempdir)
|
||||
return
|
||||
}
|
||||
|
||||
RemoveAll(t, tempdir)
|
||||
}
|
||||
|
||||
// withTestEnvironment creates a test environment and calls f with it. After f has
|
||||
// returned, the temporary directory is removed.
|
||||
func withTestEnvironment(t testing.TB, f func(*testEnvironment, GlobalOptions)) {
|
||||
@@ -193,6 +173,8 @@ func withTestEnvironment(t testing.TB, f func(*testEnvironment, GlobalOptions))
|
||||
t.Skip("integration tests disabled")
|
||||
}
|
||||
|
||||
repository.TestUseLowSecurityKDFParameters(t)
|
||||
|
||||
tempdir, err := ioutil.TempDir(TestTempDir, "restic-test-")
|
||||
OK(t, err)
|
||||
|
||||
@@ -207,7 +189,18 @@ func withTestEnvironment(t testing.TB, f func(*testEnvironment, GlobalOptions))
|
||||
OK(t, os.MkdirAll(env.cache, 0700))
|
||||
OK(t, os.MkdirAll(env.repo, 0700))
|
||||
|
||||
f(&env, configureRestic(t, env.cache, env.repo))
|
||||
gopts := GlobalOptions{
|
||||
Repo: env.repo,
|
||||
Quiet: true,
|
||||
password: TestPassword,
|
||||
stdout: os.Stdout,
|
||||
stderr: os.Stderr,
|
||||
}
|
||||
|
||||
// always overwrite global options
|
||||
globalOptions = gopts
|
||||
|
||||
f(&env, gopts)
|
||||
|
||||
if !TestCleanupTempDirs {
|
||||
t.Logf("leaving temporary directory %v used for test", tempdir)
|
||||
@@ -216,13 +209,3 @@ func withTestEnvironment(t testing.TB, f func(*testEnvironment, GlobalOptions))
|
||||
|
||||
RemoveAll(t, tempdir)
|
||||
}
|
||||
|
||||
// removeFile resets the read-only flag and then deletes the file.
|
||||
func removeFile(fn string) error {
|
||||
err := os.Chmod(fn, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.Remove(fn)
|
||||
}
|
||||
|
||||
@@ -10,24 +10,26 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"restic"
|
||||
"strings"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"restic/backend"
|
||||
"restic/errors"
|
||||
|
||||
"restic/debug"
|
||||
"restic/filter"
|
||||
"restic/repository"
|
||||
. "restic/test"
|
||||
)
|
||||
|
||||
func parseIDsFromReader(t testing.TB, rd io.Reader) backend.IDs {
|
||||
IDs := backend.IDs{}
|
||||
func parseIDsFromReader(t testing.TB, rd io.Reader) restic.IDs {
|
||||
IDs := restic.IDs{}
|
||||
sc := bufio.NewScanner(rd)
|
||||
|
||||
for sc.Scan() {
|
||||
id, err := backend.ParseID(sc.Text())
|
||||
id, err := restic.ParseID(sc.Text())
|
||||
if err != nil {
|
||||
t.Logf("parse id %v: %v", sc.Text(), err)
|
||||
continue
|
||||
@@ -39,129 +41,152 @@ func parseIDsFromReader(t testing.TB, rd io.Reader) backend.IDs {
|
||||
return IDs
|
||||
}
|
||||
|
||||
func cmdInit(t testing.TB, global GlobalOptions) {
|
||||
cmd := &CmdInit{global: &global}
|
||||
OK(t, cmd.Execute(nil))
|
||||
func testRunInit(t testing.TB, opts GlobalOptions) {
|
||||
repository.TestUseLowSecurityKDFParameters(t)
|
||||
restic.TestSetLockTimeout(t, 0)
|
||||
|
||||
t.Logf("repository initialized at %v", global.Repo)
|
||||
OK(t, runInit(opts, nil))
|
||||
t.Logf("repository initialized at %v", opts.Repo)
|
||||
}
|
||||
|
||||
func cmdBackup(t testing.TB, global GlobalOptions, target []string, parentID *backend.ID) {
|
||||
cmdBackupExcludes(t, global, target, parentID, nil)
|
||||
}
|
||||
|
||||
func cmdBackupExcludes(t testing.TB, global GlobalOptions, target []string, parentID *backend.ID, excludes []string) {
|
||||
cmd := &CmdBackup{global: &global, Excludes: excludes}
|
||||
if parentID != nil {
|
||||
cmd.Parent = parentID.String()
|
||||
}
|
||||
|
||||
func testRunBackup(t testing.TB, target []string, opts BackupOptions, gopts GlobalOptions) {
|
||||
t.Logf("backing up %v", target)
|
||||
|
||||
OK(t, cmd.Execute(target))
|
||||
OK(t, runBackup(opts, gopts, target))
|
||||
}
|
||||
|
||||
func cmdList(t testing.TB, global GlobalOptions, tpe string) backend.IDs {
|
||||
cmd := &CmdList{global: &global}
|
||||
return executeAndParseIDs(t, cmd, tpe)
|
||||
}
|
||||
|
||||
func executeAndParseIDs(t testing.TB, cmd *CmdList, args ...string) backend.IDs {
|
||||
func testRunList(t testing.TB, tpe string, opts GlobalOptions) restic.IDs {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
cmd.global.stdout = buf
|
||||
OK(t, cmd.Execute(args))
|
||||
globalOptions.stdout = buf
|
||||
defer func() {
|
||||
globalOptions.stdout = os.Stdout
|
||||
}()
|
||||
|
||||
OK(t, runList(opts, []string{tpe}))
|
||||
return parseIDsFromReader(t, buf)
|
||||
}
|
||||
|
||||
func cmdRestore(t testing.TB, global GlobalOptions, dir string, snapshotID backend.ID) {
|
||||
cmdRestoreExcludes(t, global, dir, snapshotID, nil)
|
||||
func testRunRestore(t testing.TB, opts GlobalOptions, dir string, snapshotID restic.ID) {
|
||||
testRunRestoreExcludes(t, opts, dir, snapshotID, nil)
|
||||
}
|
||||
|
||||
func cmdRestoreLatest(t testing.TB, global GlobalOptions, dir string, paths []string, host string) {
|
||||
cmd := &CmdRestore{global: &global, Target: dir, Host: host, Paths: paths}
|
||||
OK(t, cmd.Execute([]string{"latest"}))
|
||||
func testRunRestoreLatest(t testing.TB, gopts GlobalOptions, dir string, paths []string, host string) {
|
||||
opts := RestoreOptions{
|
||||
Target: dir,
|
||||
Host: host,
|
||||
Paths: paths,
|
||||
}
|
||||
|
||||
OK(t, runRestore(opts, gopts, []string{"latest"}))
|
||||
}
|
||||
|
||||
func cmdRestoreExcludes(t testing.TB, global GlobalOptions, dir string, snapshotID backend.ID, excludes []string) {
|
||||
cmd := &CmdRestore{global: &global, Target: dir, Exclude: excludes}
|
||||
OK(t, cmd.Execute([]string{snapshotID.String()}))
|
||||
func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, excludes []string) {
|
||||
opts := RestoreOptions{
|
||||
Target: dir,
|
||||
Exclude: excludes,
|
||||
}
|
||||
|
||||
OK(t, runRestore(opts, gopts, []string{snapshotID.String()}))
|
||||
}
|
||||
|
||||
func cmdRestoreIncludes(t testing.TB, global GlobalOptions, dir string, snapshotID backend.ID, includes []string) {
|
||||
cmd := &CmdRestore{global: &global, Target: dir, Include: includes}
|
||||
OK(t, cmd.Execute([]string{snapshotID.String()}))
|
||||
func testRunRestoreIncludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, includes []string) {
|
||||
opts := RestoreOptions{
|
||||
Target: dir,
|
||||
Include: includes,
|
||||
}
|
||||
|
||||
OK(t, runRestore(opts, gopts, []string{snapshotID.String()}))
|
||||
}
|
||||
|
||||
func cmdCheck(t testing.TB, global GlobalOptions) {
|
||||
cmd := &CmdCheck{
|
||||
global: &global,
|
||||
func testRunCheck(t testing.TB, gopts GlobalOptions) {
|
||||
opts := CheckOptions{
|
||||
ReadData: true,
|
||||
CheckUnused: true,
|
||||
}
|
||||
OK(t, cmd.Execute(nil))
|
||||
OK(t, runCheck(opts, gopts, nil))
|
||||
}
|
||||
|
||||
func cmdCheckOutput(t testing.TB, global GlobalOptions) string {
|
||||
func testRunCheckOutput(gopts GlobalOptions) (string, error) {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
global.stdout = buf
|
||||
cmd := &CmdCheck{global: &global, ReadData: true}
|
||||
OK(t, cmd.Execute(nil))
|
||||
return string(buf.Bytes())
|
||||
|
||||
globalOptions.stdout = buf
|
||||
defer func() {
|
||||
globalOptions.stdout = os.Stdout
|
||||
}()
|
||||
|
||||
opts := CheckOptions{
|
||||
ReadData: true,
|
||||
}
|
||||
|
||||
err := runCheck(opts, gopts, nil)
|
||||
return string(buf.Bytes()), err
|
||||
}
|
||||
|
||||
func cmdRebuildIndex(t testing.TB, global GlobalOptions) {
|
||||
global.stdout = ioutil.Discard
|
||||
cmd := &CmdRebuildIndex{global: &global}
|
||||
OK(t, cmd.Execute(nil))
|
||||
func testRunRebuildIndex(t testing.TB, gopts GlobalOptions) {
|
||||
globalOptions.stdout = ioutil.Discard
|
||||
defer func() {
|
||||
globalOptions.stdout = os.Stdout
|
||||
}()
|
||||
|
||||
OK(t, runRebuildIndex(gopts))
|
||||
}
|
||||
|
||||
func cmdLs(t testing.TB, global GlobalOptions, snapshotID string) []string {
|
||||
var buf bytes.Buffer
|
||||
global.stdout = &buf
|
||||
func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
globalOptions.stdout = buf
|
||||
quiet := globalOptions.Quiet
|
||||
globalOptions.Quiet = true
|
||||
defer func() {
|
||||
globalOptions.stdout = os.Stdout
|
||||
globalOptions.Quiet = quiet
|
||||
}()
|
||||
|
||||
cmd := &CmdLs{global: &global}
|
||||
OK(t, cmd.Execute([]string{snapshotID}))
|
||||
OK(t, runLs(gopts, []string{snapshotID}))
|
||||
|
||||
return strings.Split(string(buf.Bytes()), "\n")
|
||||
}
|
||||
|
||||
func cmdFind(t testing.TB, global GlobalOptions, pattern string) []string {
|
||||
var buf bytes.Buffer
|
||||
global.stdout = &buf
|
||||
func testRunFind(t testing.TB, gopts GlobalOptions, pattern string) []string {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
globalOptions.stdout = buf
|
||||
defer func() {
|
||||
globalOptions.stdout = os.Stdout
|
||||
}()
|
||||
|
||||
cmd := &CmdFind{global: &global}
|
||||
OK(t, cmd.Execute([]string{pattern}))
|
||||
opts := FindOptions{}
|
||||
|
||||
OK(t, runFind(opts, gopts, []string{pattern}))
|
||||
|
||||
return strings.Split(string(buf.Bytes()), "\n")
|
||||
}
|
||||
|
||||
func TestBackup(t *testing.T) {
|
||||
withTestEnvironment(t, func(env *testEnvironment, global GlobalOptions) {
|
||||
withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
|
||||
datafile := filepath.Join("testdata", "backup-data.tar.gz")
|
||||
fd, err := os.Open(datafile)
|
||||
if os.IsNotExist(err) {
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
t.Skipf("unable to find data file %q, skipping", datafile)
|
||||
return
|
||||
}
|
||||
OK(t, err)
|
||||
OK(t, fd.Close())
|
||||
|
||||
cmdInit(t, global)
|
||||
testRunInit(t, gopts)
|
||||
|
||||
SetupTarTestFixture(t, env.testdata, datafile)
|
||||
opts := BackupOptions{}
|
||||
|
||||
// first backup
|
||||
cmdBackup(t, global, []string{env.testdata}, nil)
|
||||
snapshotIDs := cmdList(t, global, "snapshots")
|
||||
testRunBackup(t, []string{env.testdata}, opts, gopts)
|
||||
snapshotIDs := testRunList(t, "snapshots", gopts)
|
||||
Assert(t, len(snapshotIDs) == 1,
|
||||
"expected one snapshot, got %v", snapshotIDs)
|
||||
|
||||
cmdCheck(t, global)
|
||||
testRunCheck(t, gopts)
|
||||
stat1 := dirStats(env.repo)
|
||||
|
||||
// second backup, implicit incremental
|
||||
cmdBackup(t, global, []string{env.testdata}, nil)
|
||||
snapshotIDs = cmdList(t, global, "snapshots")
|
||||
testRunBackup(t, []string{env.testdata}, opts, gopts)
|
||||
snapshotIDs = testRunList(t, "snapshots", gopts)
|
||||
Assert(t, len(snapshotIDs) == 2,
|
||||
"expected two snapshots, got %v", snapshotIDs)
|
||||
|
||||
@@ -171,10 +196,11 @@ func TestBackup(t *testing.T) {
|
||||
}
|
||||
t.Logf("repository grown by %d bytes", stat2.size-stat1.size)
|
||||
|
||||
cmdCheck(t, global)
|
||||
testRunCheck(t, gopts)
|
||||
// third backup, explicit incremental
|
||||
cmdBackup(t, global, []string{env.testdata}, &snapshotIDs[0])
|
||||
snapshotIDs = cmdList(t, global, "snapshots")
|
||||
opts.Parent = snapshotIDs[0].String()
|
||||
testRunBackup(t, []string{env.testdata}, opts, gopts)
|
||||
snapshotIDs = testRunList(t, "snapshots", gopts)
|
||||
Assert(t, len(snapshotIDs) == 3,
|
||||
"expected three snapshots, got %v", snapshotIDs)
|
||||
|
||||
@@ -188,20 +214,20 @@ func TestBackup(t *testing.T) {
|
||||
for i, snapshotID := range snapshotIDs {
|
||||
restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
|
||||
t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir)
|
||||
cmdRestore(t, global, restoredir, snapshotIDs[0])
|
||||
testRunRestore(t, gopts, restoredir, snapshotIDs[0])
|
||||
Assert(t, directoriesEqualContents(env.testdata, filepath.Join(restoredir, "testdata")),
|
||||
"directories are not equal")
|
||||
}
|
||||
|
||||
cmdCheck(t, global)
|
||||
testRunCheck(t, gopts)
|
||||
})
|
||||
}
|
||||
|
||||
func TestBackupNonExistingFile(t *testing.T) {
|
||||
withTestEnvironment(t, func(env *testEnvironment, global GlobalOptions) {
|
||||
withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
|
||||
datafile := filepath.Join("testdata", "backup-data.tar.gz")
|
||||
fd, err := os.Open(datafile)
|
||||
if os.IsNotExist(err) {
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
t.Skipf("unable to find data file %q, skipping", datafile)
|
||||
return
|
||||
}
|
||||
@@ -210,9 +236,11 @@ func TestBackupNonExistingFile(t *testing.T) {
|
||||
|
||||
SetupTarTestFixture(t, env.testdata, datafile)
|
||||
|
||||
cmdInit(t, global)
|
||||
|
||||
global.stderr = ioutil.Discard
|
||||
testRunInit(t, gopts)
|
||||
globalOptions.stderr = ioutil.Discard
|
||||
defer func() {
|
||||
globalOptions.stderr = os.Stderr
|
||||
}()
|
||||
|
||||
p := filepath.Join(env.testdata, "0", "0")
|
||||
dirs := []string{
|
||||
@@ -221,15 +249,18 @@ func TestBackupNonExistingFile(t *testing.T) {
|
||||
filepath.Join(p, "nonexisting"),
|
||||
filepath.Join(p, "5"),
|
||||
}
|
||||
cmdBackup(t, global, dirs, nil)
|
||||
|
||||
opts := BackupOptions{}
|
||||
|
||||
testRunBackup(t, dirs, opts, gopts)
|
||||
})
|
||||
}
|
||||
|
||||
func TestBackupMissingFile1(t *testing.T) {
|
||||
withTestEnvironment(t, func(env *testEnvironment, global GlobalOptions) {
|
||||
withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
|
||||
datafile := filepath.Join("testdata", "backup-data.tar.gz")
|
||||
fd, err := os.Open(datafile)
|
||||
if os.IsNotExist(err) {
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
t.Skipf("unable to find data file %q, skipping", datafile)
|
||||
return
|
||||
}
|
||||
@@ -238,9 +269,12 @@ func TestBackupMissingFile1(t *testing.T) {
|
||||
|
||||
SetupTarTestFixture(t, env.testdata, datafile)
|
||||
|
||||
cmdInit(t, global)
|
||||
testRunInit(t, gopts)
|
||||
globalOptions.stderr = ioutil.Discard
|
||||
defer func() {
|
||||
globalOptions.stderr = os.Stderr
|
||||
}()
|
||||
|
||||
global.stderr = ioutil.Discard
|
||||
ranHook := false
|
||||
debug.Hook("pipe.walk1", func(context interface{}) {
|
||||
pathname := context.(string)
|
||||
@@ -255,8 +289,10 @@ func TestBackupMissingFile1(t *testing.T) {
|
||||
OK(t, os.Remove(filepath.Join(env.testdata, "0", "0", "9", "37")))
|
||||
})
|
||||
|
||||
cmdBackup(t, global, []string{env.testdata}, nil)
|
||||
cmdCheck(t, global)
|
||||
opts := BackupOptions{}
|
||||
|
||||
testRunBackup(t, []string{env.testdata}, opts, gopts)
|
||||
testRunCheck(t, gopts)
|
||||
|
||||
Assert(t, ranHook, "hook did not run")
|
||||
debug.RemoveHook("pipe.walk1")
|
||||
@@ -264,10 +300,10 @@ func TestBackupMissingFile1(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBackupMissingFile2(t *testing.T) {
|
||||
withTestEnvironment(t, func(env *testEnvironment, global GlobalOptions) {
|
||||
withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
|
||||
datafile := filepath.Join("testdata", "backup-data.tar.gz")
|
||||
fd, err := os.Open(datafile)
|
||||
if os.IsNotExist(err) {
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
t.Skipf("unable to find data file %q, skipping", datafile)
|
||||
return
|
||||
}
|
||||
@@ -276,9 +312,13 @@ func TestBackupMissingFile2(t *testing.T) {
|
||||
|
||||
SetupTarTestFixture(t, env.testdata, datafile)
|
||||
|
||||
cmdInit(t, global)
|
||||
testRunInit(t, gopts)
|
||||
|
||||
globalOptions.stderr = ioutil.Discard
|
||||
defer func() {
|
||||
globalOptions.stderr = os.Stderr
|
||||
}()
|
||||
|
||||
global.stderr = ioutil.Discard
|
||||
ranHook := false
|
||||
debug.Hook("pipe.walk2", func(context interface{}) {
|
||||
pathname := context.(string)
|
||||
@@ -293,8 +333,10 @@ func TestBackupMissingFile2(t *testing.T) {
|
||||
OK(t, os.Remove(filepath.Join(env.testdata, "0", "0", "9", "37")))
|
||||
})
|
||||
|
||||
cmdBackup(t, global, []string{env.testdata}, nil)
|
||||
cmdCheck(t, global)
|
||||
opts := BackupOptions{}
|
||||
|
||||
testRunBackup(t, []string{env.testdata}, opts, gopts)
|
||||
testRunCheck(t, gopts)
|
||||
|
||||
Assert(t, ranHook, "hook did not run")
|
||||
debug.RemoveHook("pipe.walk2")
|
||||
@@ -302,10 +344,10 @@ func TestBackupMissingFile2(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBackupDirectoryError(t *testing.T) {
|
||||
withTestEnvironment(t, func(env *testEnvironment, global GlobalOptions) {
|
||||
withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
|
||||
datafile := filepath.Join("testdata", "backup-data.tar.gz")
|
||||
fd, err := os.Open(datafile)
|
||||
if os.IsNotExist(err) {
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
t.Skipf("unable to find data file %q, skipping", datafile)
|
||||
return
|
||||
}
|
||||
@@ -314,9 +356,13 @@ func TestBackupDirectoryError(t *testing.T) {
|
||||
|
||||
SetupTarTestFixture(t, env.testdata, datafile)
|
||||
|
||||
cmdInit(t, global)
|
||||
testRunInit(t, gopts)
|
||||
|
||||
globalOptions.stderr = ioutil.Discard
|
||||
defer func() {
|
||||
globalOptions.stderr = os.Stderr
|
||||
}()
|
||||
|
||||
global.stderr = ioutil.Discard
|
||||
ranHook := false
|
||||
|
||||
testdir := filepath.Join(env.testdata, "0", "0", "9")
|
||||
@@ -335,17 +381,17 @@ func TestBackupDirectoryError(t *testing.T) {
|
||||
OK(t, os.RemoveAll(testdir))
|
||||
})
|
||||
|
||||
cmdBackup(t, global, []string{filepath.Join(env.testdata, "0", "0")}, nil)
|
||||
cmdCheck(t, global)
|
||||
testRunBackup(t, []string{filepath.Join(env.testdata, "0", "0")}, BackupOptions{}, gopts)
|
||||
testRunCheck(t, gopts)
|
||||
|
||||
Assert(t, ranHook, "hook did not run")
|
||||
debug.RemoveHook("pipe.walk2")
|
||||
|
||||
snapshots := cmdList(t, global, "snapshots")
|
||||
snapshots := testRunList(t, "snapshots", gopts)
|
||||
Assert(t, len(snapshots) > 0,
|
||||
"no snapshots found in repo (%v)", datafile)
|
||||
|
||||
files := cmdLs(t, global, snapshots[0].String())
|
||||
files := testRunLs(t, gopts, snapshots[0].String())
|
||||
|
||||
Assert(t, len(files) > 1, "snapshot is empty")
|
||||
})
|
||||
@@ -361,8 +407,8 @@ func includes(haystack []string, needle string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func loadSnapshotMap(t testing.TB, global GlobalOptions) map[string]struct{} {
|
||||
snapshotIDs := cmdList(t, global, "snapshots")
|
||||
func loadSnapshotMap(t testing.TB, gopts GlobalOptions) map[string]struct{} {
|
||||
snapshotIDs := testRunList(t, "snapshots", gopts)
|
||||
|
||||
m := make(map[string]struct{})
|
||||
for _, id := range snapshotIDs {
|
||||
@@ -391,8 +437,8 @@ var backupExcludeFilenames = []string{
|
||||
}
|
||||
|
||||
func TestBackupExclude(t *testing.T) {
|
||||
withTestEnvironment(t, func(env *testEnvironment, global GlobalOptions) {
|
||||
cmdInit(t, global)
|
||||
withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
|
||||
testRunInit(t, gopts)
|
||||
|
||||
datadir := filepath.Join(env.base, "testdata")
|
||||
|
||||
@@ -409,21 +455,25 @@ func TestBackupExclude(t *testing.T) {
|
||||
|
||||
snapshots := make(map[string]struct{})
|
||||
|
||||
cmdBackup(t, global, []string{datadir}, nil)
|
||||
snapshots, snapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, global))
|
||||
files := cmdLs(t, global, snapshotID)
|
||||
opts := BackupOptions{}
|
||||
|
||||
testRunBackup(t, []string{datadir}, opts, gopts)
|
||||
snapshots, snapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, gopts))
|
||||
files := testRunLs(t, gopts, snapshotID)
|
||||
Assert(t, includes(files, filepath.Join("testdata", "foo.tar.gz")),
|
||||
"expected file %q in first snapshot, but it's not included", "foo.tar.gz")
|
||||
|
||||
cmdBackupExcludes(t, global, []string{datadir}, nil, []string{"*.tar.gz"})
|
||||
snapshots, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, global))
|
||||
files = cmdLs(t, global, snapshotID)
|
||||
opts.Excludes = []string{"*.tar.gz"}
|
||||
testRunBackup(t, []string{datadir}, opts, gopts)
|
||||
snapshots, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, gopts))
|
||||
files = testRunLs(t, gopts, snapshotID)
|
||||
Assert(t, !includes(files, filepath.Join("testdata", "foo.tar.gz")),
|
||||
"expected file %q not in first snapshot, but it's included", "foo.tar.gz")
|
||||
|
||||
cmdBackupExcludes(t, global, []string{datadir}, nil, []string{"*.tar.gz", "private/secret"})
|
||||
snapshots, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, global))
|
||||
files = cmdLs(t, global, snapshotID)
|
||||
opts.Excludes = []string{"*.tar.gz", "private/secret"}
|
||||
testRunBackup(t, []string{datadir}, opts, gopts)
|
||||
snapshots, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, gopts))
|
||||
files = testRunLs(t, gopts, snapshotID)
|
||||
Assert(t, !includes(files, filepath.Join("testdata", "foo.tar.gz")),
|
||||
"expected file %q not in first snapshot, but it's included", "foo.tar.gz")
|
||||
Assert(t, !includes(files, filepath.Join("testdata", "private", "secret", "passwords.txt")),
|
||||
@@ -460,22 +510,24 @@ func appendRandomData(filename string, bytes uint) error {
|
||||
}
|
||||
|
||||
func TestIncrementalBackup(t *testing.T) {
|
||||
withTestEnvironment(t, func(env *testEnvironment, global GlobalOptions) {
|
||||
cmdInit(t, global)
|
||||
withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
|
||||
testRunInit(t, gopts)
|
||||
|
||||
datadir := filepath.Join(env.base, "testdata")
|
||||
testfile := filepath.Join(datadir, "testfile")
|
||||
|
||||
OK(t, appendRandomData(testfile, incrementalFirstWrite))
|
||||
|
||||
cmdBackup(t, global, []string{datadir}, nil)
|
||||
cmdCheck(t, global)
|
||||
opts := BackupOptions{}
|
||||
|
||||
testRunBackup(t, []string{datadir}, opts, gopts)
|
||||
testRunCheck(t, gopts)
|
||||
stat1 := dirStats(env.repo)
|
||||
|
||||
OK(t, appendRandomData(testfile, incrementalSecondWrite))
|
||||
|
||||
cmdBackup(t, global, []string{datadir}, nil)
|
||||
cmdCheck(t, global)
|
||||
testRunBackup(t, []string{datadir}, opts, gopts)
|
||||
testRunCheck(t, gopts)
|
||||
stat2 := dirStats(env.repo)
|
||||
if stat2.size-stat1.size > incrementalFirstWrite {
|
||||
t.Errorf("repository size has grown by more than %d bytes", incrementalFirstWrite)
|
||||
@@ -484,8 +536,8 @@ func TestIncrementalBackup(t *testing.T) {
|
||||
|
||||
OK(t, appendRandomData(testfile, incrementalThirdWrite))
|
||||
|
||||
cmdBackup(t, global, []string{datadir}, nil)
|
||||
cmdCheck(t, global)
|
||||
testRunBackup(t, []string{datadir}, opts, gopts)
|
||||
testRunCheck(t, gopts)
|
||||
stat3 := dirStats(env.repo)
|
||||
if stat3.size-stat2.size > incrementalFirstWrite {
|
||||
t.Errorf("repository size has grown by more than %d bytes", incrementalFirstWrite)
|
||||
@@ -494,24 +546,17 @@ func TestIncrementalBackup(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func cmdKey(t testing.TB, global GlobalOptions, args ...string) string {
|
||||
var buf bytes.Buffer
|
||||
func testRunKeyListOtherIDs(t testing.TB, gopts GlobalOptions) []string {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
|
||||
global.stdout = &buf
|
||||
cmd := &CmdKey{global: &global}
|
||||
OK(t, cmd.Execute(args))
|
||||
globalOptions.stdout = buf
|
||||
defer func() {
|
||||
globalOptions.stdout = os.Stdout
|
||||
}()
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
OK(t, runKey(gopts, []string{"list"}))
|
||||
|
||||
func cmdKeyListOtherIDs(t testing.TB, global GlobalOptions) []string {
|
||||
var buf bytes.Buffer
|
||||
|
||||
global.stdout = &buf
|
||||
cmd := &CmdKey{global: &global}
|
||||
OK(t, cmd.Execute([]string{"list"}))
|
||||
|
||||
scanner := bufio.NewScanner(&buf)
|
||||
scanner := bufio.NewScanner(buf)
|
||||
exp := regexp.MustCompile(`^ ([a-f0-9]+) `)
|
||||
|
||||
IDs := []string{}
|
||||
@@ -524,21 +569,28 @@ func cmdKeyListOtherIDs(t testing.TB, global GlobalOptions) []string {
|
||||
return IDs
|
||||
}
|
||||
|
||||
func cmdKeyAddNewKey(t testing.TB, global GlobalOptions, newPassword string) {
|
||||
cmd := &CmdKey{global: &global, newPassword: newPassword}
|
||||
OK(t, cmd.Execute([]string{"add"}))
|
||||
func testRunKeyAddNewKey(t testing.TB, newPassword string, gopts GlobalOptions) {
|
||||
testKeyNewPassword = newPassword
|
||||
defer func() {
|
||||
testKeyNewPassword = ""
|
||||
}()
|
||||
|
||||
OK(t, runKey(gopts, []string{"add"}))
|
||||
}
|
||||
|
||||
func cmdKeyPasswd(t testing.TB, global GlobalOptions, newPassword string) {
|
||||
cmd := &CmdKey{global: &global, newPassword: newPassword}
|
||||
OK(t, cmd.Execute([]string{"passwd"}))
|
||||
func testRunKeyPasswd(t testing.TB, newPassword string, gopts GlobalOptions) {
|
||||
testKeyNewPassword = newPassword
|
||||
defer func() {
|
||||
testKeyNewPassword = ""
|
||||
}()
|
||||
|
||||
OK(t, runKey(gopts, []string{"passwd"}))
|
||||
}
|
||||
|
||||
func cmdKeyRemove(t testing.TB, global GlobalOptions, IDs []string) {
|
||||
cmd := &CmdKey{global: &global}
|
||||
func testRunKeyRemove(t testing.TB, gopts GlobalOptions, IDs []string) {
|
||||
t.Logf("remove %d keys: %q\n", len(IDs), IDs)
|
||||
for _, id := range IDs {
|
||||
OK(t, cmd.Execute([]string{"rm", id}))
|
||||
OK(t, runKey(gopts, []string{"rm", id}))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -548,25 +600,24 @@ func TestKeyAddRemove(t *testing.T) {
|
||||
"raicneirvOjEfEigonOmLasOd",
|
||||
}
|
||||
|
||||
withTestEnvironment(t, func(env *testEnvironment, global GlobalOptions) {
|
||||
cmdInit(t, global)
|
||||
withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
|
||||
testRunInit(t, gopts)
|
||||
|
||||
cmdKeyPasswd(t, global, "geheim2")
|
||||
global.password = "geheim2"
|
||||
t.Logf("changed password to %q", global.password)
|
||||
testRunKeyPasswd(t, "geheim2", gopts)
|
||||
gopts.password = "geheim2"
|
||||
t.Logf("changed password to %q", gopts.password)
|
||||
|
||||
for _, newPassword := range passwordList {
|
||||
cmdKeyAddNewKey(t, global, newPassword)
|
||||
testRunKeyAddNewKey(t, newPassword, gopts)
|
||||
t.Logf("added new password %q", newPassword)
|
||||
global.password = newPassword
|
||||
cmdKeyRemove(t, global, cmdKeyListOtherIDs(t, global))
|
||||
gopts.password = newPassword
|
||||
testRunKeyRemove(t, gopts, testRunKeyListOtherIDs(t, gopts))
|
||||
}
|
||||
|
||||
global.password = passwordList[len(passwordList)-1]
|
||||
t.Logf("testing access with last password %q\n", global.password)
|
||||
cmdKey(t, global, "list")
|
||||
|
||||
cmdCheck(t, global)
|
||||
gopts.password = passwordList[len(passwordList)-1]
|
||||
t.Logf("testing access with last password %q\n", gopts.password)
|
||||
OK(t, runKey(gopts, []string{"list"}))
|
||||
testRunCheck(t, gopts)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -577,7 +628,7 @@ func testFileSize(filename string, size int64) error {
|
||||
}
|
||||
|
||||
if fi.Size() != size {
|
||||
return fmt.Errorf("wrong file size for %v: expected %v, got %v", filename, size, fi.Size())
|
||||
return errors.Fatalf("wrong file size for %v: expected %v, got %v", filename, size, fi.Size())
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -594,8 +645,8 @@ func TestRestoreFilter(t *testing.T) {
|
||||
{"subdir1/subdir2/testfile4.c", 102},
|
||||
}
|
||||
|
||||
withTestEnvironment(t, func(env *testEnvironment, global GlobalOptions) {
|
||||
cmdInit(t, global)
|
||||
withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
|
||||
testRunInit(t, gopts)
|
||||
|
||||
for _, test := range testfiles {
|
||||
p := filepath.Join(env.testdata, test.name)
|
||||
@@ -603,26 +654,28 @@ func TestRestoreFilter(t *testing.T) {
|
||||
OK(t, appendRandomData(p, test.size))
|
||||
}
|
||||
|
||||
cmdBackup(t, global, []string{env.testdata}, nil)
|
||||
cmdCheck(t, global)
|
||||
opts := BackupOptions{}
|
||||
|
||||
snapshotID := cmdList(t, global, "snapshots")[0]
|
||||
testRunBackup(t, []string{env.testdata}, opts, gopts)
|
||||
testRunCheck(t, gopts)
|
||||
|
||||
snapshotID := testRunList(t, "snapshots", gopts)[0]
|
||||
|
||||
// no restore filter should restore all files
|
||||
cmdRestore(t, global, filepath.Join(env.base, "restore0"), snapshotID)
|
||||
testRunRestore(t, gopts, filepath.Join(env.base, "restore0"), snapshotID)
|
||||
for _, test := range testfiles {
|
||||
OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", test.name), int64(test.size)))
|
||||
}
|
||||
|
||||
for i, pat := range []string{"*.c", "*.exe", "*", "*file3*"} {
|
||||
base := filepath.Join(env.base, fmt.Sprintf("restore%d", i+1))
|
||||
cmdRestoreExcludes(t, global, base, snapshotID, []string{pat})
|
||||
testRunRestoreExcludes(t, gopts, base, snapshotID, []string{pat})
|
||||
for _, test := range testfiles {
|
||||
err := testFileSize(filepath.Join(base, "testdata", test.name), int64(test.size))
|
||||
if ok, _ := filter.Match(pat, filepath.Base(test.name)); !ok {
|
||||
OK(t, err)
|
||||
} else {
|
||||
Assert(t, os.IsNotExist(err),
|
||||
Assert(t, os.IsNotExist(errors.Cause(err)),
|
||||
"expected %v to not exist in restore step %v, but it exists, err %v", test.name, i+1, err)
|
||||
}
|
||||
}
|
||||
@@ -633,52 +686,54 @@ func TestRestoreFilter(t *testing.T) {
|
||||
|
||||
func TestRestoreLatest(t *testing.T) {
|
||||
|
||||
withTestEnvironment(t, func(env *testEnvironment, global GlobalOptions) {
|
||||
cmdInit(t, global)
|
||||
withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
|
||||
testRunInit(t, gopts)
|
||||
|
||||
p := filepath.Join(env.testdata, "testfile.c")
|
||||
OK(t, os.MkdirAll(filepath.Dir(p), 0755))
|
||||
OK(t, appendRandomData(p, 100))
|
||||
|
||||
cmdBackup(t, global, []string{env.testdata}, nil)
|
||||
cmdCheck(t, global)
|
||||
opts := BackupOptions{}
|
||||
|
||||
testRunBackup(t, []string{env.testdata}, opts, gopts)
|
||||
testRunCheck(t, gopts)
|
||||
|
||||
os.Remove(p)
|
||||
OK(t, appendRandomData(p, 101))
|
||||
cmdBackup(t, global, []string{env.testdata}, nil)
|
||||
cmdCheck(t, global)
|
||||
testRunBackup(t, []string{env.testdata}, opts, gopts)
|
||||
testRunCheck(t, gopts)
|
||||
|
||||
// Restore latest without any filters
|
||||
cmdRestoreLatest(t, global, filepath.Join(env.base, "restore0"), nil, "")
|
||||
testRunRestoreLatest(t, gopts, filepath.Join(env.base, "restore0"), nil, "")
|
||||
OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", "testfile.c"), int64(101)))
|
||||
|
||||
// Setup test files in different directories backed up in different snapshots
|
||||
p1 := filepath.Join(env.testdata, "p1/testfile.c")
|
||||
OK(t, os.MkdirAll(filepath.Dir(p1), 0755))
|
||||
OK(t, appendRandomData(p1, 102))
|
||||
cmdBackup(t, global, []string{filepath.Dir(p1)}, nil)
|
||||
cmdCheck(t, global)
|
||||
testRunBackup(t, []string{filepath.Dir(p1)}, opts, gopts)
|
||||
testRunCheck(t, gopts)
|
||||
|
||||
p2 := filepath.Join(env.testdata, "p2/testfile.c")
|
||||
OK(t, os.MkdirAll(filepath.Dir(p2), 0755))
|
||||
OK(t, appendRandomData(p2, 103))
|
||||
cmdBackup(t, global, []string{filepath.Dir(p2)}, nil)
|
||||
cmdCheck(t, global)
|
||||
testRunBackup(t, []string{filepath.Dir(p2)}, opts, gopts)
|
||||
testRunCheck(t, gopts)
|
||||
|
||||
p1rAbs := filepath.Join(env.base, "restore1", "p1/testfile.c")
|
||||
p2rAbs := filepath.Join(env.base, "restore2", "p2/testfile.c")
|
||||
|
||||
cmdRestoreLatest(t, global, filepath.Join(env.base, "restore1"), []string{filepath.Dir(p1)}, "")
|
||||
testRunRestoreLatest(t, gopts, filepath.Join(env.base, "restore1"), []string{filepath.Dir(p1)}, "")
|
||||
OK(t, testFileSize(p1rAbs, int64(102)))
|
||||
if _, err := os.Stat(p2rAbs); os.IsNotExist(err) {
|
||||
Assert(t, os.IsNotExist(err),
|
||||
if _, err := os.Stat(p2rAbs); os.IsNotExist(errors.Cause(err)) {
|
||||
Assert(t, os.IsNotExist(errors.Cause(err)),
|
||||
"expected %v to not exist in restore, but it exists, err %v", p2rAbs, err)
|
||||
}
|
||||
|
||||
cmdRestoreLatest(t, global, filepath.Join(env.base, "restore2"), []string{filepath.Dir(p2)}, "")
|
||||
testRunRestoreLatest(t, gopts, filepath.Join(env.base, "restore2"), []string{filepath.Dir(p2)}, "")
|
||||
OK(t, testFileSize(p2rAbs, int64(103)))
|
||||
if _, err := os.Stat(p1rAbs); os.IsNotExist(err) {
|
||||
Assert(t, os.IsNotExist(err),
|
||||
if _, err := os.Stat(p1rAbs); os.IsNotExist(errors.Cause(err)) {
|
||||
Assert(t, os.IsNotExist(errors.Cause(err)),
|
||||
"expected %v to not exist in restore, but it exists, err %v", p1rAbs, err)
|
||||
}
|
||||
|
||||
@@ -686,20 +741,24 @@ func TestRestoreLatest(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRestoreWithPermissionFailure(t *testing.T) {
|
||||
withTestEnvironment(t, func(env *testEnvironment, global GlobalOptions) {
|
||||
withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
|
||||
datafile := filepath.Join("testdata", "repo-restore-permissions-test.tar.gz")
|
||||
SetupTarTestFixture(t, env.base, datafile)
|
||||
|
||||
snapshots := cmdList(t, global, "snapshots")
|
||||
snapshots := testRunList(t, "snapshots", gopts)
|
||||
Assert(t, len(snapshots) > 0,
|
||||
"no snapshots found in repo (%v)", datafile)
|
||||
|
||||
global.stderr = ioutil.Discard
|
||||
cmdRestore(t, global, filepath.Join(env.base, "restore"), snapshots[0])
|
||||
globalOptions.stderr = ioutil.Discard
|
||||
defer func() {
|
||||
globalOptions.stderr = os.Stderr
|
||||
}()
|
||||
|
||||
testRunRestore(t, gopts, filepath.Join(env.base, "restore"), snapshots[0])
|
||||
|
||||
// make sure that all files have been restored, regardeless of any
|
||||
// permission errors
|
||||
files := cmdLs(t, global, snapshots[0].String())
|
||||
files := testRunLs(t, gopts, snapshots[0].String())
|
||||
for _, filename := range files {
|
||||
fi, err := os.Lstat(filepath.Join(env.base, "restore", filename))
|
||||
OK(t, err)
|
||||
@@ -720,23 +779,25 @@ func setZeroModTime(filename string) error {
|
||||
}
|
||||
|
||||
func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) {
|
||||
withTestEnvironment(t, func(env *testEnvironment, global GlobalOptions) {
|
||||
cmdInit(t, global)
|
||||
withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
|
||||
testRunInit(t, gopts)
|
||||
|
||||
p := filepath.Join(env.testdata, "subdir1", "subdir2", "subdir3", "file.ext")
|
||||
OK(t, os.MkdirAll(filepath.Dir(p), 0755))
|
||||
OK(t, appendRandomData(p, 200))
|
||||
OK(t, setZeroModTime(filepath.Join(env.testdata, "subdir1", "subdir2")))
|
||||
|
||||
cmdBackup(t, global, []string{env.testdata}, nil)
|
||||
cmdCheck(t, global)
|
||||
opts := BackupOptions{}
|
||||
|
||||
snapshotID := cmdList(t, global, "snapshots")[0]
|
||||
testRunBackup(t, []string{env.testdata}, opts, gopts)
|
||||
testRunCheck(t, gopts)
|
||||
|
||||
snapshotID := testRunList(t, "snapshots", gopts)[0]
|
||||
|
||||
// restore with filter "*.ext", this should restore "file.ext", but
|
||||
// since the directories are ignored and only created because of
|
||||
// "file.ext", no meta data should be restored for them.
|
||||
cmdRestoreIncludes(t, global, filepath.Join(env.base, "restore0"), snapshotID, []string{"*.ext"})
|
||||
testRunRestoreIncludes(t, gopts, filepath.Join(env.base, "restore0"), snapshotID, []string{"*.ext"})
|
||||
|
||||
f1 := filepath.Join(env.base, "restore0", "testdata", "subdir1", "subdir2")
|
||||
fi, err := os.Stat(f1)
|
||||
@@ -746,7 +807,7 @@ func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) {
|
||||
"meta data of intermediate directory has been restore although it was ignored")
|
||||
|
||||
// restore with filter "*", this should restore meta data on everything.
|
||||
cmdRestoreIncludes(t, global, filepath.Join(env.base, "restore1"), snapshotID, []string{"*"})
|
||||
testRunRestoreIncludes(t, gopts, filepath.Join(env.base, "restore1"), snapshotID, []string{"*"})
|
||||
|
||||
f2 := filepath.Join(env.base, "restore1", "testdata", "subdir1", "subdir2")
|
||||
fi, err = os.Stat(f2)
|
||||
@@ -758,44 +819,55 @@ func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFind(t *testing.T) {
|
||||
withTestEnvironment(t, func(env *testEnvironment, global GlobalOptions) {
|
||||
withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
|
||||
datafile := filepath.Join("testdata", "backup-data.tar.gz")
|
||||
cmdInit(t, global)
|
||||
testRunInit(t, gopts)
|
||||
SetupTarTestFixture(t, env.testdata, datafile)
|
||||
cmdBackup(t, global, []string{env.testdata}, nil)
|
||||
cmdCheck(t, global)
|
||||
|
||||
results := cmdFind(t, global, "unexistingfile")
|
||||
opts := BackupOptions{}
|
||||
|
||||
testRunBackup(t, []string{env.testdata}, opts, gopts)
|
||||
testRunCheck(t, gopts)
|
||||
|
||||
results := testRunFind(t, gopts, "unexistingfile")
|
||||
Assert(t, len(results) != 0, "unexisting file found in repo (%v)", datafile)
|
||||
|
||||
results = cmdFind(t, global, "testfile")
|
||||
results = testRunFind(t, gopts, "testfile")
|
||||
Assert(t, len(results) != 1, "file not found in repo (%v)", datafile)
|
||||
|
||||
results = cmdFind(t, global, "test")
|
||||
results = testRunFind(t, gopts, "test")
|
||||
Assert(t, len(results) < 2, "less than two file found in repo (%v)", datafile)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRebuildIndex(t *testing.T) {
|
||||
withTestEnvironment(t, func(env *testEnvironment, global GlobalOptions) {
|
||||
withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
|
||||
datafile := filepath.Join("..", "..", "restic", "checker", "testdata", "duplicate-packs-in-index-test-repo.tar.gz")
|
||||
SetupTarTestFixture(t, env.base, datafile)
|
||||
|
||||
out := cmdCheckOutput(t, global)
|
||||
out, err := testRunCheckOutput(gopts)
|
||||
if !strings.Contains(out, "contained in several indexes") {
|
||||
t.Fatalf("did not find checker hint for packs in several indexes")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error from checker for test repository, got %v", err)
|
||||
}
|
||||
|
||||
if !strings.Contains(out, "restic rebuild-index") {
|
||||
t.Fatalf("did not find hint for rebuild-index comman")
|
||||
}
|
||||
|
||||
cmdRebuildIndex(t, global)
|
||||
testRunRebuildIndex(t, gopts)
|
||||
|
||||
out = cmdCheckOutput(t, global)
|
||||
out, err = testRunCheckOutput(gopts)
|
||||
if len(out) != 0 {
|
||||
t.Fatalf("expected no output from the checker, got: %v", out)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error from checker after rebuild-index, got: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -804,29 +876,8 @@ func TestRebuildIndexAlwaysFull(t *testing.T) {
|
||||
TestRebuildIndex(t)
|
||||
}
|
||||
|
||||
var optimizeTests = []struct {
|
||||
testFilename string
|
||||
snapshots backend.IDSet
|
||||
}{
|
||||
{
|
||||
filepath.Join("..", "..", "restic", "checker", "testdata", "checker-test-repo.tar.gz"),
|
||||
backend.NewIDSet(ParseID("a13c11e582b77a693dd75ab4e3a3ba96538a056594a4b9076e4cacebe6e06d43")),
|
||||
},
|
||||
{
|
||||
filepath.Join("testdata", "old-index-repo.tar.gz"),
|
||||
nil,
|
||||
},
|
||||
{
|
||||
filepath.Join("testdata", "old-index-repo.tar.gz"),
|
||||
backend.NewIDSet(
|
||||
ParseID("f7d83db709977178c9d1a09e4009355e534cde1a135b8186b8b118a3fc4fcd41"),
|
||||
ParseID("51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02"),
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
func TestCheckRestoreNoLock(t *testing.T) {
|
||||
withTestEnvironment(t, func(env *testEnvironment, global GlobalOptions) {
|
||||
withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
|
||||
datafile := filepath.Join("testdata", "small-repo.tar.gz")
|
||||
SetupTarTestFixture(t, env.base, datafile)
|
||||
|
||||
@@ -838,14 +889,15 @@ func TestCheckRestoreNoLock(t *testing.T) {
|
||||
})
|
||||
OK(t, err)
|
||||
|
||||
global.NoLock = true
|
||||
cmdCheck(t, global)
|
||||
gopts.NoLock = true
|
||||
|
||||
snapshotIDs := cmdList(t, global, "snapshots")
|
||||
testRunCheck(t, gopts)
|
||||
|
||||
snapshotIDs := testRunList(t, "snapshots", gopts)
|
||||
if len(snapshotIDs) == 0 {
|
||||
t.Fatalf("found no snapshots")
|
||||
}
|
||||
|
||||
cmdRestore(t, global, filepath.Join(env.base, "restore"), snapshotIDs[0])
|
||||
testRunRestore(t, gopts, filepath.Join(env.base, "restore"), snapshotIDs[0])
|
||||
})
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ func lockRepository(repo *repository.Repository, exclusive bool) (*restic.Lock,
|
||||
|
||||
globalLocks.Lock()
|
||||
if globalLocks.cancelRefresh == nil {
|
||||
debug.Log("main.lockRepository", "start goroutine for lock refresh")
|
||||
debug.Log("start goroutine for lock refresh")
|
||||
globalLocks.cancelRefresh = make(chan struct{})
|
||||
globalLocks.refreshWG = sync.WaitGroup{}
|
||||
globalLocks.refreshWG.Add(1)
|
||||
@@ -55,7 +55,7 @@ func lockRepository(repo *repository.Repository, exclusive bool) (*restic.Lock,
|
||||
var refreshInterval = 5 * time.Minute
|
||||
|
||||
func refreshLocks(wg *sync.WaitGroup, done <-chan struct{}) {
|
||||
debug.Log("main.refreshLocks", "start")
|
||||
debug.Log("start")
|
||||
defer func() {
|
||||
wg.Done()
|
||||
globalLocks.Lock()
|
||||
@@ -68,10 +68,10 @@ func refreshLocks(wg *sync.WaitGroup, done <-chan struct{}) {
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
debug.Log("main.refreshLocks", "terminate")
|
||||
debug.Log("terminate")
|
||||
return
|
||||
case <-ticker.C:
|
||||
debug.Log("main.refreshLocks", "refreshing locks")
|
||||
debug.Log("refreshing locks")
|
||||
globalLocks.Lock()
|
||||
for _, lock := range globalLocks.locks {
|
||||
err := lock.Refresh()
|
||||
@@ -88,9 +88,9 @@ func unlockRepo(lock *restic.Lock) error {
|
||||
globalLocks.Lock()
|
||||
defer globalLocks.Unlock()
|
||||
|
||||
debug.Log("unlockRepo", "unlocking repository")
|
||||
debug.Log("unlocking repository")
|
||||
if err := lock.Unlock(); err != nil {
|
||||
debug.Log("unlockRepo", "error while unlocking: %v", err)
|
||||
debug.Log("error while unlocking: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -108,13 +108,13 @@ func unlockAll() error {
|
||||
globalLocks.Lock()
|
||||
defer globalLocks.Unlock()
|
||||
|
||||
debug.Log("unlockAll", "unlocking %d locks", len(globalLocks.locks))
|
||||
debug.Log("unlocking %d locks", len(globalLocks.locks))
|
||||
for _, lock := range globalLocks.locks {
|
||||
if err := lock.Unlock(); err != nil {
|
||||
debug.Log("unlockAll", "error while unlocking: %v", err)
|
||||
debug.Log("error while unlocking: %v", err)
|
||||
return err
|
||||
}
|
||||
debug.Log("unlockAll", "successfully removed lock")
|
||||
debug.Log("successfully removed lock")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -3,38 +3,50 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/jessevdk/go-flags"
|
||||
"restic"
|
||||
"restic/debug"
|
||||
"runtime"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"restic/errors"
|
||||
)
|
||||
|
||||
// cmdRoot is the base command when no other command has been specified.
|
||||
var cmdRoot = &cobra.Command{
|
||||
Use: "restic",
|
||||
Short: "backup and restore files",
|
||||
Long: `
|
||||
restic is a backup program which allows saving multiple revisions of files and
|
||||
directories in an encrypted repository stored on different backends.
|
||||
`,
|
||||
SilenceErrors: true,
|
||||
SilenceUsage: true,
|
||||
PersistentPreRun: parseEnvironment,
|
||||
}
|
||||
|
||||
func init() {
|
||||
// set GOMAXPROCS to number of CPUs
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
if runtime.Version() < "go1.5" {
|
||||
gomaxprocs := os.Getenv("GOMAXPROCS")
|
||||
debug.Log("read GOMAXPROCS from env variable, value: %s", gomaxprocs)
|
||||
if gomaxprocs == "" {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
// defer profile.Start(profile.MemProfileRate(100000), profile.ProfilePath(".")).Stop()
|
||||
// defer profile.Start(profile.CPUProfile, profile.ProfilePath(".")).Stop()
|
||||
globalOpts.Repo = os.Getenv("RESTIC_REPOSITORY")
|
||||
globalOpts.password = os.Getenv("RESTIC_PASSWORD")
|
||||
debug.Log("main %#v", os.Args)
|
||||
err := cmdRoot.Execute()
|
||||
|
||||
debug.Log("restic", "main %#v", os.Args)
|
||||
|
||||
_, err := parser.Parse()
|
||||
if e, ok := err.(*flags.Error); ok && e.Type == flags.ErrHelp {
|
||||
parser.WriteHelp(os.Stdout)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
switch {
|
||||
case restic.IsAlreadyLocked(errors.Cause(err)):
|
||||
fmt.Fprintf(os.Stderr, "%v\nthe `unlock` command can be used to remove stale locks\n", err)
|
||||
case errors.IsFatal(errors.Cause(err)):
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
}
|
||||
|
||||
if restic.IsAlreadyLocked(err) {
|
||||
fmt.Fprintf(os.Stderr, "\nthe `unlock` command can be used to remove stale locks\n")
|
||||
case err != nil:
|
||||
fmt.Fprintf(os.Stderr, "%+v\n", err)
|
||||
}
|
||||
|
||||
RunCleanupHandlers()
|
||||
|
||||
46
src/cmds/restic/table.go
Normal file
46
src/cmds/restic/table.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Table contains data for a table to be printed.
|
||||
type Table struct {
|
||||
Header string
|
||||
Rows [][]interface{}
|
||||
|
||||
RowFormat string
|
||||
}
|
||||
|
||||
// NewTable initializes a new Table.
|
||||
func NewTable() Table {
|
||||
return Table{
|
||||
Rows: [][]interface{}{},
|
||||
}
|
||||
}
|
||||
|
||||
// Write prints the table to w.
|
||||
func (t Table) Write(w io.Writer) error {
|
||||
_, err := fmt.Fprintln(w, t.Header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = fmt.Fprintln(w, strings.Repeat("-", 70))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, row := range t.Rows {
|
||||
_, err = fmt.Fprintf(w, t.RowFormat+"\n", row...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TimeFormat is the format used for all timestamps printed by restic.
|
||||
const TimeFormat = "2006-01-02 15:04:05"
|
||||
BIN
src/cmds/restic/testdata/repo-same-timestamps.tar.gz
vendored
Normal file
BIN
src/cmds/restic/testdata/repo-same-timestamps.tar.gz
vendored
Normal file
Binary file not shown.
@@ -1,122 +0,0 @@
|
||||
package restic
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"restic/backend"
|
||||
"restic/debug"
|
||||
"restic/pack"
|
||||
"restic/repository"
|
||||
"time"
|
||||
|
||||
"github.com/restic/chunker"
|
||||
)
|
||||
|
||||
// saveTreeJSON stores a tree in the repository.
|
||||
func saveTreeJSON(repo *repository.Repository, item interface{}) (backend.ID, error) {
|
||||
data, err := json.Marshal(item)
|
||||
if err != nil {
|
||||
return backend.ID{}, err
|
||||
}
|
||||
data = append(data, '\n')
|
||||
|
||||
// check if tree has been saved before
|
||||
id := backend.Hash(data)
|
||||
if repo.Index().Has(id) {
|
||||
return id, nil
|
||||
}
|
||||
|
||||
return repo.SaveJSON(pack.Tree, item)
|
||||
}
|
||||
|
||||
// ArchiveReader reads from the reader and archives the data. Returned is the
|
||||
// resulting snapshot and its ID.
|
||||
func ArchiveReader(repo *repository.Repository, p *Progress, rd io.Reader, name string) (*Snapshot, backend.ID, error) {
|
||||
debug.Log("ArchiveReader", "start archiving %s", name)
|
||||
sn, err := NewSnapshot([]string{name})
|
||||
if err != nil {
|
||||
return nil, backend.ID{}, err
|
||||
}
|
||||
|
||||
p.Start()
|
||||
defer p.Done()
|
||||
|
||||
chnker := chunker.New(rd, repo.Config.ChunkerPolynomial)
|
||||
|
||||
var ids backend.IDs
|
||||
var fileSize uint64
|
||||
|
||||
for {
|
||||
chunk, err := chnker.Next(getBuf())
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, backend.ID{}, err
|
||||
}
|
||||
|
||||
id := backend.Hash(chunk.Data)
|
||||
|
||||
if !repo.Index().Has(id) {
|
||||
_, err := repo.SaveAndEncrypt(pack.Data, chunk.Data, nil)
|
||||
if err != nil {
|
||||
return nil, backend.ID{}, err
|
||||
}
|
||||
debug.Log("ArchiveReader", "saved blob %v (%d bytes)\n", id.Str(), chunk.Length)
|
||||
} else {
|
||||
debug.Log("ArchiveReader", "blob %v already saved in the repo\n", id.Str())
|
||||
}
|
||||
|
||||
freeBuf(chunk.Data)
|
||||
|
||||
ids = append(ids, id)
|
||||
|
||||
p.Report(Stat{Bytes: uint64(chunk.Length)})
|
||||
fileSize += uint64(chunk.Length)
|
||||
}
|
||||
|
||||
tree := &Tree{
|
||||
Nodes: []*Node{
|
||||
&Node{
|
||||
Name: name,
|
||||
AccessTime: time.Now(),
|
||||
ModTime: time.Now(),
|
||||
Type: "file",
|
||||
Mode: 0644,
|
||||
Size: fileSize,
|
||||
UID: sn.UID,
|
||||
GID: sn.GID,
|
||||
User: sn.Username,
|
||||
Content: ids,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
treeID, err := saveTreeJSON(repo, tree)
|
||||
if err != nil {
|
||||
return nil, backend.ID{}, err
|
||||
}
|
||||
sn.Tree = &treeID
|
||||
debug.Log("ArchiveReader", "tree saved as %v", treeID.Str())
|
||||
|
||||
id, err := repo.SaveJSONUnpacked(backend.Snapshot, sn)
|
||||
if err != nil {
|
||||
return nil, backend.ID{}, err
|
||||
}
|
||||
|
||||
sn.id = &id
|
||||
debug.Log("ArchiveReader", "snapshot saved as %v", id.Str())
|
||||
|
||||
err = repo.Flush()
|
||||
if err != nil {
|
||||
return nil, backend.ID{}, err
|
||||
}
|
||||
|
||||
err = repo.SaveIndex()
|
||||
if err != nil {
|
||||
return nil, backend.ID{}, err
|
||||
}
|
||||
|
||||
return sn, id, nil
|
||||
}
|
||||
103
src/restic/archiver/archive_reader.go
Normal file
103
src/restic/archiver/archive_reader.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"io"
|
||||
"restic"
|
||||
"restic/debug"
|
||||
"time"
|
||||
|
||||
"restic/errors"
|
||||
|
||||
"github.com/restic/chunker"
|
||||
)
|
||||
|
||||
// ArchiveReader reads from the reader and archives the data. Returned is the
|
||||
// resulting snapshot and its ID.
|
||||
func ArchiveReader(repo restic.Repository, p *restic.Progress, rd io.Reader, name string, tags []string) (*restic.Snapshot, restic.ID, error) {
|
||||
debug.Log("start archiving %s", name)
|
||||
sn, err := restic.NewSnapshot([]string{name}, tags)
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
|
||||
p.Start()
|
||||
defer p.Done()
|
||||
|
||||
chnker := chunker.New(rd, repo.Config().ChunkerPolynomial)
|
||||
|
||||
var ids restic.IDs
|
||||
var fileSize uint64
|
||||
|
||||
for {
|
||||
chunk, err := chnker.Next(getBuf())
|
||||
if errors.Cause(err) == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, errors.Wrap(err, "chunker.Next()")
|
||||
}
|
||||
|
||||
id := restic.Hash(chunk.Data)
|
||||
|
||||
if !repo.Index().Has(id, restic.DataBlob) {
|
||||
_, err := repo.SaveBlob(restic.DataBlob, chunk.Data, id)
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
debug.Log("saved blob %v (%d bytes)\n", id.Str(), chunk.Length)
|
||||
} else {
|
||||
debug.Log("blob %v already saved in the repo\n", id.Str())
|
||||
}
|
||||
|
||||
freeBuf(chunk.Data)
|
||||
|
||||
ids = append(ids, id)
|
||||
|
||||
p.Report(restic.Stat{Bytes: uint64(chunk.Length)})
|
||||
fileSize += uint64(chunk.Length)
|
||||
}
|
||||
|
||||
tree := &restic.Tree{
|
||||
Nodes: []*restic.Node{
|
||||
&restic.Node{
|
||||
Name: name,
|
||||
AccessTime: time.Now(),
|
||||
ModTime: time.Now(),
|
||||
Type: "file",
|
||||
Mode: 0644,
|
||||
Size: fileSize,
|
||||
UID: sn.UID,
|
||||
GID: sn.GID,
|
||||
User: sn.Username,
|
||||
Content: ids,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
treeID, err := repo.SaveTree(tree)
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
sn.Tree = &treeID
|
||||
debug.Log("tree saved as %v", treeID.Str())
|
||||
|
||||
id, err := repo.SaveJSONUnpacked(restic.SnapshotFile, sn)
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
|
||||
debug.Log("snapshot saved as %v", id.Str())
|
||||
|
||||
err = repo.Flush()
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
|
||||
err = repo.SaveIndex()
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
|
||||
return sn, id, nil
|
||||
}
|
||||
@@ -1,28 +1,25 @@
|
||||
package restic
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"math/rand"
|
||||
"restic/backend"
|
||||
"restic/pack"
|
||||
"restic"
|
||||
"restic/repository"
|
||||
"testing"
|
||||
|
||||
"github.com/restic/chunker"
|
||||
)
|
||||
|
||||
func loadBlob(t *testing.T, repo *repository.Repository, id backend.ID, buf []byte) []byte {
|
||||
buf, err := repo.LoadBlob(pack.Data, id, buf)
|
||||
func loadBlob(t *testing.T, repo restic.Repository, id restic.ID, buf []byte) int {
|
||||
n, err := repo.LoadBlob(restic.DataBlob, id, buf)
|
||||
if err != nil {
|
||||
t.Fatalf("LoadBlob(%v) returned error %v", id, err)
|
||||
}
|
||||
|
||||
return buf
|
||||
return n
|
||||
}
|
||||
|
||||
func checkSavedFile(t *testing.T, repo *repository.Repository, treeID backend.ID, name string, rd io.Reader) {
|
||||
tree, err := LoadTree(repo, treeID)
|
||||
func checkSavedFile(t *testing.T, repo restic.Repository, treeID restic.ID, name string, rd io.Reader) {
|
||||
tree, err := repo.LoadTree(treeID)
|
||||
if err != nil {
|
||||
t.Fatalf("LoadTree() returned error %v", err)
|
||||
}
|
||||
@@ -41,13 +38,23 @@ func checkSavedFile(t *testing.T, repo *repository.Repository, treeID backend.ID
|
||||
}
|
||||
|
||||
// check blobs
|
||||
buf := make([]byte, chunker.MaxSize)
|
||||
buf2 := make([]byte, chunker.MaxSize)
|
||||
for i, id := range node.Content {
|
||||
buf = loadBlob(t, repo, id, buf)
|
||||
size, err := repo.LookupBlobSize(id, restic.DataBlob)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
buf2 = buf2[:len(buf)]
|
||||
buf := make([]byte, int(size))
|
||||
n := loadBlob(t, repo, id, buf)
|
||||
if n != len(buf) {
|
||||
t.Errorf("wrong number of bytes read, want %d, got %d", len(buf), n)
|
||||
}
|
||||
|
||||
buf2 := make([]byte, int(size))
|
||||
_, err = io.ReadFull(rd, buf2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(buf, buf2) {
|
||||
t.Fatalf("blob %d (%v) is wrong", i, id.Str())
|
||||
@@ -55,6 +62,11 @@ func checkSavedFile(t *testing.T, repo *repository.Repository, treeID backend.ID
|
||||
}
|
||||
}
|
||||
|
||||
// fakeFile returns a reader which yields deterministic pseudo-random data.
|
||||
func fakeFile(t testing.TB, seed, size int64) io.Reader {
|
||||
return io.LimitReader(restic.NewRandReader(rand.New(rand.NewSource(seed))), size)
|
||||
}
|
||||
|
||||
func TestArchiveReader(t *testing.T) {
|
||||
repo, cleanup := repository.TestRepository(t)
|
||||
defer cleanup()
|
||||
@@ -65,7 +77,7 @@ func TestArchiveReader(t *testing.T) {
|
||||
|
||||
f := fakeFile(t, seed, size)
|
||||
|
||||
sn, id, err := ArchiveReader(repo, nil, f, "fakefile")
|
||||
sn, id, err := ArchiveReader(repo, nil, f, "fakefile", []string{"test"})
|
||||
if err != nil {
|
||||
t.Fatalf("ArchiveReader() returned error %v", err)
|
||||
}
|
||||
@@ -95,7 +107,7 @@ func BenchmarkArchiveReader(t *testing.B) {
|
||||
t.ResetTimer()
|
||||
|
||||
for i := 0; i < t.N; i++ {
|
||||
_, _, err := ArchiveReader(repo, nil, bytes.NewReader(buf), "fakefile")
|
||||
_, _, err := ArchiveReader(repo, nil, bytes.NewReader(buf), "fakefile", []string{"test"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package restic
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
@@ -6,20 +6,19 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"restic"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"restic/backend"
|
||||
"restic/errors"
|
||||
"restic/walk"
|
||||
|
||||
"restic/debug"
|
||||
"restic/fs"
|
||||
"restic/pack"
|
||||
"restic/pipe"
|
||||
"restic/repository"
|
||||
|
||||
"github.com/restic/chunker"
|
||||
|
||||
"github.com/juju/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -32,9 +31,9 @@ var archiverAllowAllFiles = func(string, os.FileInfo) bool { return true }
|
||||
|
||||
// Archiver is used to backup a set of directories.
|
||||
type Archiver struct {
|
||||
repo *repository.Repository
|
||||
repo restic.Repository
|
||||
knownBlobs struct {
|
||||
backend.IDSet
|
||||
restic.IDSet
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
@@ -45,16 +44,16 @@ type Archiver struct {
|
||||
Excludes []string
|
||||
}
|
||||
|
||||
// NewArchiver returns a new archiver.
|
||||
func NewArchiver(repo *repository.Repository) *Archiver {
|
||||
// New returns a new archiver.
|
||||
func New(repo restic.Repository) *Archiver {
|
||||
arch := &Archiver{
|
||||
repo: repo,
|
||||
blobToken: make(chan struct{}, maxConcurrentBlobs),
|
||||
knownBlobs: struct {
|
||||
backend.IDSet
|
||||
restic.IDSet
|
||||
sync.Mutex
|
||||
}{
|
||||
IDSet: backend.NewIDSet(),
|
||||
IDSet: restic.NewIDSet(),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -72,7 +71,7 @@ func NewArchiver(repo *repository.Repository) *Archiver {
|
||||
// When the blob is not known, false is returned and the blob is added to the
|
||||
// list. This means that the caller false is returned to is responsible to save
|
||||
// the blob to the backend.
|
||||
func (arch *Archiver) isKnownBlob(id backend.ID) bool {
|
||||
func (arch *Archiver) isKnownBlob(id restic.ID, t restic.BlobType) bool {
|
||||
arch.knownBlobs.Lock()
|
||||
defer arch.knownBlobs.Unlock()
|
||||
|
||||
@@ -82,7 +81,7 @@ func (arch *Archiver) isKnownBlob(id backend.ID) bool {
|
||||
|
||||
arch.knownBlobs.Insert(id)
|
||||
|
||||
_, err := arch.repo.Index().Lookup(id)
|
||||
_, err := arch.repo.Index().Lookup(id, t)
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
@@ -91,59 +90,59 @@ func (arch *Archiver) isKnownBlob(id backend.ID) bool {
|
||||
}
|
||||
|
||||
// Save stores a blob read from rd in the repository.
|
||||
func (arch *Archiver) Save(t pack.BlobType, data []byte, id backend.ID) error {
|
||||
debug.Log("Archiver.Save", "Save(%v, %v)\n", t, id.Str())
|
||||
func (arch *Archiver) Save(t restic.BlobType, data []byte, id restic.ID) error {
|
||||
debug.Log("Save(%v, %v)\n", t, id.Str())
|
||||
|
||||
if arch.isKnownBlob(id) {
|
||||
debug.Log("Archiver.Save", "blob %v is known\n", id.Str())
|
||||
if arch.isKnownBlob(id, restic.DataBlob) {
|
||||
debug.Log("blob %v is known\n", id.Str())
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := arch.repo.SaveAndEncrypt(t, data, &id)
|
||||
_, err := arch.repo.SaveBlob(t, data, id)
|
||||
if err != nil {
|
||||
debug.Log("Archiver.Save", "Save(%v, %v): error %v\n", t, id.Str(), err)
|
||||
debug.Log("Save(%v, %v): error %v\n", t, id.Str(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
debug.Log("Archiver.Save", "Save(%v, %v): new blob\n", t, id.Str())
|
||||
debug.Log("Save(%v, %v): new blob\n", t, id.Str())
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveTreeJSON stores a tree in the repository.
|
||||
func (arch *Archiver) SaveTreeJSON(item interface{}) (backend.ID, error) {
|
||||
data, err := json.Marshal(item)
|
||||
func (arch *Archiver) SaveTreeJSON(tree *restic.Tree) (restic.ID, error) {
|
||||
data, err := json.Marshal(tree)
|
||||
if err != nil {
|
||||
return backend.ID{}, err
|
||||
return restic.ID{}, errors.Wrap(err, "Marshal")
|
||||
}
|
||||
data = append(data, '\n')
|
||||
|
||||
// check if tree has been saved before
|
||||
id := backend.Hash(data)
|
||||
if arch.isKnownBlob(id) {
|
||||
id := restic.Hash(data)
|
||||
if arch.isKnownBlob(id, restic.TreeBlob) {
|
||||
return id, nil
|
||||
}
|
||||
|
||||
return arch.repo.SaveJSON(pack.Tree, item)
|
||||
return arch.repo.SaveBlob(restic.TreeBlob, data, id)
|
||||
}
|
||||
|
||||
func (arch *Archiver) reloadFileIfChanged(node *Node, file fs.File) (*Node, error) {
|
||||
func (arch *Archiver) reloadFileIfChanged(node *restic.Node, file fs.File) (*restic.Node, error) {
|
||||
fi, err := file.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "restic.Stat")
|
||||
}
|
||||
|
||||
if fi.ModTime() == node.ModTime {
|
||||
return node, nil
|
||||
}
|
||||
|
||||
err = arch.Error(node.path, fi, errors.New("file has changed"))
|
||||
err = arch.Error(node.Path, fi, errors.New("file has changed"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
node, err = NodeFromFileInfo(node.path, fi)
|
||||
node, err = restic.NodeFromFileInfo(node.Path, fi)
|
||||
if err != nil {
|
||||
debug.Log("Archiver.SaveFile", "NodeFromFileInfo returned error for %v: %v", node.path, err)
|
||||
debug.Log("restic.NodeFromFileInfo returned error for %v: %v", node.Path, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -151,21 +150,21 @@ func (arch *Archiver) reloadFileIfChanged(node *Node, file fs.File) (*Node, erro
|
||||
}
|
||||
|
||||
type saveResult struct {
|
||||
id backend.ID
|
||||
id restic.ID
|
||||
bytes uint64
|
||||
}
|
||||
|
||||
func (arch *Archiver) saveChunk(chunk chunker.Chunk, p *Progress, token struct{}, file fs.File, resultChannel chan<- saveResult) {
|
||||
func (arch *Archiver) saveChunk(chunk chunker.Chunk, p *restic.Progress, token struct{}, file fs.File, resultChannel chan<- saveResult) {
|
||||
defer freeBuf(chunk.Data)
|
||||
|
||||
id := backend.Hash(chunk.Data)
|
||||
err := arch.Save(pack.Data, chunk.Data, id)
|
||||
id := restic.Hash(chunk.Data)
|
||||
err := arch.Save(restic.DataBlob, chunk.Data, id)
|
||||
// TODO handle error
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
p.Report(Stat{Bytes: uint64(chunk.Length)})
|
||||
p.Report(restic.Stat{Bytes: uint64(chunk.Length)})
|
||||
arch.blobToken <- token
|
||||
resultChannel <- saveResult{id: id, bytes: uint64(chunk.Length)}
|
||||
}
|
||||
@@ -178,41 +177,41 @@ func waitForResults(resultChannels [](<-chan saveResult)) ([]saveResult, error)
|
||||
}
|
||||
|
||||
if len(results) != len(resultChannels) {
|
||||
return nil, fmt.Errorf("chunker returned %v chunks, but only %v blobs saved", len(resultChannels), len(results))
|
||||
return nil, errors.Errorf("chunker returned %v chunks, but only %v blobs saved", len(resultChannels), len(results))
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func updateNodeContent(node *Node, results []saveResult) error {
|
||||
debug.Log("Archiver.Save", "checking size for file %s", node.path)
|
||||
func updateNodeContent(node *restic.Node, results []saveResult) error {
|
||||
debug.Log("checking size for file %s", node.Path)
|
||||
|
||||
var bytes uint64
|
||||
node.Content = make([]backend.ID, len(results))
|
||||
node.Content = make([]restic.ID, len(results))
|
||||
|
||||
for i, b := range results {
|
||||
node.Content[i] = b.id
|
||||
bytes += b.bytes
|
||||
|
||||
debug.Log("Archiver.Save", " adding blob %s, %d bytes", b.id.Str(), b.bytes)
|
||||
debug.Log(" adding blob %s, %d bytes", b.id.Str(), b.bytes)
|
||||
}
|
||||
|
||||
if bytes != node.Size {
|
||||
return fmt.Errorf("errors saving node %q: saved %d bytes, wanted %d bytes", node.path, bytes, node.Size)
|
||||
return errors.Errorf("errors saving node %q: saved %d bytes, wanted %d bytes", node.Path, bytes, node.Size)
|
||||
}
|
||||
|
||||
debug.Log("Archiver.SaveFile", "SaveFile(%q): %v blobs\n", node.path, len(results))
|
||||
debug.Log("SaveFile(%q): %v blobs\n", node.Path, len(results))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveFile stores the content of the file on the backend as a Blob by calling
|
||||
// Save for each chunk.
|
||||
func (arch *Archiver) SaveFile(p *Progress, node *Node) error {
|
||||
file, err := fs.Open(node.path)
|
||||
func (arch *Archiver) SaveFile(p *restic.Progress, node *restic.Node) error {
|
||||
file, err := fs.Open(node.Path)
|
||||
defer file.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "Open")
|
||||
}
|
||||
|
||||
node, err = arch.reloadFileIfChanged(node, file)
|
||||
@@ -220,17 +219,17 @@ func (arch *Archiver) SaveFile(p *Progress, node *Node) error {
|
||||
return err
|
||||
}
|
||||
|
||||
chnker := chunker.New(file, arch.repo.Config.ChunkerPolynomial)
|
||||
chnker := chunker.New(file, arch.repo.Config().ChunkerPolynomial)
|
||||
resultChannels := [](<-chan saveResult){}
|
||||
|
||||
for {
|
||||
chunk, err := chnker.Next(getBuf())
|
||||
if err == io.EOF {
|
||||
if errors.Cause(err) == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return errors.Annotate(err, "SaveFile() chunker.Next()")
|
||||
return errors.Wrap(err, "chunker.Next")
|
||||
}
|
||||
|
||||
resCh := make(chan saveResult, 1)
|
||||
@@ -247,9 +246,9 @@ func (arch *Archiver) SaveFile(p *Progress, node *Node) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan struct{}, entCh <-chan pipe.Entry) {
|
||||
func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *restic.Progress, done <-chan struct{}, entCh <-chan pipe.Entry) {
|
||||
defer func() {
|
||||
debug.Log("Archiver.fileWorker", "done")
|
||||
debug.Log("done")
|
||||
wg.Done()
|
||||
}()
|
||||
for {
|
||||
@@ -260,38 +259,38 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st
|
||||
return
|
||||
}
|
||||
|
||||
debug.Log("Archiver.fileWorker", "got job %v", e)
|
||||
debug.Log("got job %v", e)
|
||||
|
||||
// check for errors
|
||||
if e.Error() != nil {
|
||||
debug.Log("Archiver.fileWorker", "job %v has errors: %v", e.Path(), e.Error())
|
||||
debug.Log("job %v has errors: %v", e.Path(), e.Error())
|
||||
// TODO: integrate error reporting
|
||||
fmt.Fprintf(os.Stderr, "error for %v: %v\n", e.Path(), e.Error())
|
||||
// ignore this file
|
||||
e.Result() <- nil
|
||||
p.Report(Stat{Errors: 1})
|
||||
p.Report(restic.Stat{Errors: 1})
|
||||
continue
|
||||
}
|
||||
|
||||
node, err := NodeFromFileInfo(e.Fullpath(), e.Info())
|
||||
node, err := restic.NodeFromFileInfo(e.Fullpath(), e.Info())
|
||||
if err != nil {
|
||||
// TODO: integrate error reporting
|
||||
debug.Log("Archiver.fileWorker", "NodeFromFileInfo returned error for %v: %v", node.path, err)
|
||||
debug.Log("restic.NodeFromFileInfo returned error for %v: %v", node.Path, err)
|
||||
e.Result() <- nil
|
||||
p.Report(Stat{Errors: 1})
|
||||
p.Report(restic.Stat{Errors: 1})
|
||||
continue
|
||||
}
|
||||
|
||||
// try to use old node, if present
|
||||
if e.Node != nil {
|
||||
debug.Log("Archiver.fileWorker", " %v use old data", e.Path())
|
||||
debug.Log(" %v use old data", e.Path())
|
||||
|
||||
oldNode := e.Node.(*Node)
|
||||
oldNode := e.Node.(*restic.Node)
|
||||
// check if all content is still available in the repository
|
||||
contentMissing := false
|
||||
for _, blob := range oldNode.blobs {
|
||||
if ok, err := arch.repo.Backend().Test(backend.Data, blob.Storage.String()); !ok || err != nil {
|
||||
debug.Log("Archiver.fileWorker", " %v not using old data, %v (%v) is missing", e.Path(), blob.ID.Str(), blob.Storage.Str())
|
||||
for _, blob := range oldNode.Content {
|
||||
if !arch.repo.Index().Has(blob, restic.DataBlob) {
|
||||
debug.Log(" %v not using old data, %v is missing", e.Path(), blob.Str())
|
||||
contentMissing = true
|
||||
break
|
||||
}
|
||||
@@ -299,33 +298,32 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st
|
||||
|
||||
if !contentMissing {
|
||||
node.Content = oldNode.Content
|
||||
node.blobs = oldNode.blobs
|
||||
debug.Log("Archiver.fileWorker", " %v content is complete", e.Path())
|
||||
debug.Log(" %v content is complete", e.Path())
|
||||
}
|
||||
} else {
|
||||
debug.Log("Archiver.fileWorker", " %v no old data", e.Path())
|
||||
debug.Log(" %v no old data", e.Path())
|
||||
}
|
||||
|
||||
// otherwise read file normally
|
||||
if node.Type == "file" && len(node.Content) == 0 {
|
||||
debug.Log("Archiver.fileWorker", " read and save %v, content: %v", e.Path(), node.Content)
|
||||
debug.Log(" read and save %v, content: %v", e.Path(), node.Content)
|
||||
err = arch.SaveFile(p, node)
|
||||
if err != nil {
|
||||
// TODO: integrate error reporting
|
||||
fmt.Fprintf(os.Stderr, "error for %v: %v\n", node.path, err)
|
||||
fmt.Fprintf(os.Stderr, "error for %v: %v\n", node.Path, err)
|
||||
// ignore this file
|
||||
e.Result() <- nil
|
||||
p.Report(Stat{Errors: 1})
|
||||
p.Report(restic.Stat{Errors: 1})
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
// report old data size
|
||||
p.Report(Stat{Bytes: node.Size})
|
||||
p.Report(restic.Stat{Bytes: node.Size})
|
||||
}
|
||||
|
||||
debug.Log("Archiver.fileWorker", " processed %v, %d/%d blobs", e.Path(), len(node.Content), len(node.blobs))
|
||||
debug.Log(" processed %v, %d blobs", e.Path(), len(node.Content))
|
||||
e.Result() <- node
|
||||
p.Report(Stat{Files: 1})
|
||||
p.Report(restic.Stat{Files: 1})
|
||||
case <-done:
|
||||
// pipeline was cancelled
|
||||
return
|
||||
@@ -333,10 +331,10 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st
|
||||
}
|
||||
}
|
||||
|
||||
func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan struct{}, dirCh <-chan pipe.Dir) {
|
||||
debug.Log("Archiver.dirWorker", "start")
|
||||
func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *restic.Progress, done <-chan struct{}, dirCh <-chan pipe.Dir) {
|
||||
debug.Log("start")
|
||||
defer func() {
|
||||
debug.Log("Archiver.dirWorker", "done")
|
||||
debug.Log("done")
|
||||
wg.Done()
|
||||
}()
|
||||
for {
|
||||
@@ -346,47 +344,47 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str
|
||||
// channel is closed
|
||||
return
|
||||
}
|
||||
debug.Log("Archiver.dirWorker", "save dir %v (%d entries), error %v\n", dir.Path(), len(dir.Entries), dir.Error())
|
||||
debug.Log("save dir %v (%d entries), error %v\n", dir.Path(), len(dir.Entries), dir.Error())
|
||||
|
||||
// ignore dir nodes with errors
|
||||
if dir.Error() != nil {
|
||||
fmt.Fprintf(os.Stderr, "error walking dir %v: %v\n", dir.Path(), dir.Error())
|
||||
dir.Result() <- nil
|
||||
p.Report(Stat{Errors: 1})
|
||||
p.Report(restic.Stat{Errors: 1})
|
||||
continue
|
||||
}
|
||||
|
||||
tree := NewTree()
|
||||
tree := restic.NewTree()
|
||||
|
||||
// wait for all content
|
||||
for _, ch := range dir.Entries {
|
||||
debug.Log("Archiver.dirWorker", "receiving result from %v", ch)
|
||||
debug.Log("receiving result from %v", ch)
|
||||
res := <-ch
|
||||
|
||||
// if we get a nil pointer here, an error has happened while
|
||||
// processing this entry. Ignore it for now.
|
||||
if res == nil {
|
||||
debug.Log("Archiver.dirWorker", "got nil result?")
|
||||
debug.Log("got nil result?")
|
||||
continue
|
||||
}
|
||||
|
||||
// else insert node
|
||||
node := res.(*Node)
|
||||
node := res.(*restic.Node)
|
||||
tree.Insert(node)
|
||||
|
||||
if node.Type == "dir" {
|
||||
debug.Log("Archiver.dirWorker", "got tree node for %s: %v", node.path, node.Subtree)
|
||||
debug.Log("got tree node for %s: %v", node.Path, node.Subtree)
|
||||
|
||||
if node.Subtree.IsNull() {
|
||||
panic("invalid null subtree ID")
|
||||
panic("invalid null subtree restic.ID")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
node := &Node{}
|
||||
node := &restic.Node{}
|
||||
|
||||
if dir.Path() != "" && dir.Info() != nil {
|
||||
n, err := NodeFromFileInfo(dir.Path(), dir.Info())
|
||||
n, err := restic.NodeFromFileInfo(dir.Path(), dir.Info())
|
||||
if err != nil {
|
||||
n.Error = err.Error()
|
||||
dir.Result() <- n
|
||||
@@ -403,18 +401,18 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
debug.Log("Archiver.dirWorker", "save tree for %s: %v", dir.Path(), id.Str())
|
||||
debug.Log("save tree for %s: %v", dir.Path(), id.Str())
|
||||
if id.IsNull() {
|
||||
panic("invalid null subtree ID return from SaveTreeJSON()")
|
||||
panic("invalid null subtree restic.ID return from SaveTreeJSON()")
|
||||
}
|
||||
|
||||
node.Subtree = &id
|
||||
|
||||
debug.Log("Archiver.dirWorker", "sending result to %v", dir.Result())
|
||||
debug.Log("sending result to %v", dir.Result())
|
||||
|
||||
dir.Result() <- node
|
||||
if dir.Path() != "" {
|
||||
p.Report(Stat{Dirs: 1})
|
||||
p.Report(restic.Stat{Dirs: 1})
|
||||
}
|
||||
case <-done:
|
||||
// pipeline was cancelled
|
||||
@@ -424,7 +422,7 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str
|
||||
}
|
||||
|
||||
type archivePipe struct {
|
||||
Old <-chan WalkTreeJob
|
||||
Old <-chan walk.TreeJob
|
||||
New <-chan pipe.Job
|
||||
}
|
||||
|
||||
@@ -445,7 +443,7 @@ func copyJobs(done <-chan struct{}, in <-chan pipe.Job, out chan<- pipe.Job) {
|
||||
case job, ok = <-inCh:
|
||||
if !ok {
|
||||
// input channel closed, we're done
|
||||
debug.Log("copyJobs", "input channel closed, we're done")
|
||||
debug.Log("input channel closed, we're done")
|
||||
return
|
||||
}
|
||||
inCh = nil
|
||||
@@ -459,21 +457,21 @@ func copyJobs(done <-chan struct{}, in <-chan pipe.Job, out chan<- pipe.Job) {
|
||||
|
||||
type archiveJob struct {
|
||||
hasOld bool
|
||||
old WalkTreeJob
|
||||
old walk.TreeJob
|
||||
new pipe.Job
|
||||
}
|
||||
|
||||
func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) {
|
||||
defer func() {
|
||||
close(out)
|
||||
debug.Log("ArchivePipe.compare", "done")
|
||||
debug.Log("done")
|
||||
}()
|
||||
|
||||
debug.Log("ArchivePipe.compare", "start")
|
||||
debug.Log("start")
|
||||
var (
|
||||
loadOld, loadNew bool = true, true
|
||||
ok bool
|
||||
oldJob WalkTreeJob
|
||||
oldJob walk.TreeJob
|
||||
newJob pipe.Job
|
||||
)
|
||||
|
||||
@@ -482,7 +480,7 @@ func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) {
|
||||
oldJob, ok = <-a.Old
|
||||
// if the old channel is closed, just pass through the new jobs
|
||||
if !ok {
|
||||
debug.Log("ArchivePipe.compare", "old channel is closed, copy from new channel")
|
||||
debug.Log("old channel is closed, copy from new channel")
|
||||
|
||||
// handle remaining newJob
|
||||
if !loadNew {
|
||||
@@ -500,15 +498,15 @@ func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) {
|
||||
newJob, ok = <-a.New
|
||||
// if the new channel is closed, there are no more files in the current snapshot, return
|
||||
if !ok {
|
||||
debug.Log("ArchivePipe.compare", "new channel is closed, we're done")
|
||||
debug.Log("new channel is closed, we're done")
|
||||
return
|
||||
}
|
||||
|
||||
loadNew = false
|
||||
}
|
||||
|
||||
debug.Log("ArchivePipe.compare", "old job: %v", oldJob.Path)
|
||||
debug.Log("ArchivePipe.compare", "new job: %v", newJob.Path())
|
||||
debug.Log("old job: %v", oldJob.Path)
|
||||
debug.Log("new job: %v", newJob.Path())
|
||||
|
||||
// at this point we have received an old job as well as a new job, compare paths
|
||||
file1 := oldJob.Path
|
||||
@@ -518,7 +516,7 @@ func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) {
|
||||
dir2 := filepath.Dir(file2)
|
||||
|
||||
if file1 == file2 {
|
||||
debug.Log("ArchivePipe.compare", " same filename %q", file1)
|
||||
debug.Log(" same filename %q", file1)
|
||||
|
||||
// send job
|
||||
out <- archiveJob{hasOld: true, old: oldJob, new: newJob}.Copy()
|
||||
@@ -526,19 +524,19 @@ func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) {
|
||||
loadNew = true
|
||||
continue
|
||||
} else if dir1 < dir2 {
|
||||
debug.Log("ArchivePipe.compare", " %q < %q, file %q added", dir1, dir2, file2)
|
||||
debug.Log(" %q < %q, file %q added", dir1, dir2, file2)
|
||||
// file is new, send new job and load new
|
||||
loadNew = true
|
||||
out <- archiveJob{new: newJob}.Copy()
|
||||
continue
|
||||
} else if dir1 == dir2 {
|
||||
if file1 < file2 {
|
||||
debug.Log("ArchivePipe.compare", " %q < %q, file %q removed", file1, file2, file1)
|
||||
debug.Log(" %q < %q, file %q removed", file1, file2, file1)
|
||||
// file has been removed, load new old
|
||||
loadOld = true
|
||||
continue
|
||||
} else {
|
||||
debug.Log("ArchivePipe.compare", " %q > %q, file %q added", file1, file2, file2)
|
||||
debug.Log(" %q > %q, file %q added", file1, file2, file2)
|
||||
// file is new, send new job and load new
|
||||
loadNew = true
|
||||
out <- archiveJob{new: newJob}.Copy()
|
||||
@@ -546,7 +544,7 @@ func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) {
|
||||
}
|
||||
}
|
||||
|
||||
debug.Log("ArchivePipe.compare", " %q > %q, file %q removed", file1, file2, file1)
|
||||
debug.Log(" %q > %q, file %q removed", file1, file2, file1)
|
||||
// file has been removed, throw away old job and load new
|
||||
loadOld = true
|
||||
}
|
||||
@@ -559,7 +557,7 @@ func (j archiveJob) Copy() pipe.Job {
|
||||
|
||||
// handle files
|
||||
if isRegularFile(j.new.Info()) {
|
||||
debug.Log("archiveJob.Copy", " job %v is file", j.new.Path())
|
||||
debug.Log(" job %v is file", j.new.Path())
|
||||
|
||||
// if type has changed, return new job directly
|
||||
if j.old.Node == nil {
|
||||
@@ -567,12 +565,12 @@ func (j archiveJob) Copy() pipe.Job {
|
||||
}
|
||||
|
||||
// if file is newer, return the new job
|
||||
if j.old.Node.isNewer(j.new.Fullpath(), j.new.Info()) {
|
||||
debug.Log("archiveJob.Copy", " job %v is newer", j.new.Path())
|
||||
if j.old.Node.IsNewer(j.new.Fullpath(), j.new.Info()) {
|
||||
debug.Log(" job %v is newer", j.new.Path())
|
||||
return j.new
|
||||
}
|
||||
|
||||
debug.Log("archiveJob.Copy", " job %v add old data", j.new.Path())
|
||||
debug.Log(" job %v add old data", j.new.Path())
|
||||
// otherwise annotate job with old data
|
||||
e := j.new.(pipe.Entry)
|
||||
e.Node = j.old.Node
|
||||
@@ -597,10 +595,10 @@ func (arch *Archiver) saveIndexes(wg *sync.WaitGroup, done <-chan struct{}) {
|
||||
case <-done:
|
||||
return
|
||||
case <-ticker.C:
|
||||
debug.Log("Archiver.saveIndexes", "saving full indexes")
|
||||
debug.Log("saving full indexes")
|
||||
err := arch.repo.SaveFullIndex()
|
||||
if err != nil {
|
||||
debug.Log("Archiver.saveIndexes", "save indexes returned an error: %v", err)
|
||||
debug.Log("save indexes returned an error: %v", err)
|
||||
fmt.Fprintf(os.Stderr, "error saving preliminary index: %v\n", err)
|
||||
}
|
||||
}
|
||||
@@ -632,14 +630,14 @@ func (p baseNameSlice) Len() int { return len(p) }
|
||||
func (p baseNameSlice) Less(i, j int) bool { return filepath.Base(p[i]) < filepath.Base(p[j]) }
|
||||
func (p baseNameSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
// Snapshot creates a snapshot of the given paths. If parentID is set, this is
|
||||
// Snapshot creates a snapshot of the given paths. If parentrestic.ID is set, this is
|
||||
// used to compare the files to the ones archived at the time this snapshot was
|
||||
// taken.
|
||||
func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID) (*Snapshot, backend.ID, error) {
|
||||
func (arch *Archiver) Snapshot(p *restic.Progress, paths, tags []string, parentID *restic.ID) (*restic.Snapshot, restic.ID, error) {
|
||||
paths = unique(paths)
|
||||
sort.Sort(baseNameSlice(paths))
|
||||
|
||||
debug.Log("Archiver.Snapshot", "start for %v", paths)
|
||||
debug.Log("start for %v", paths)
|
||||
|
||||
debug.RunHook("Archiver.Snapshot", nil)
|
||||
|
||||
@@ -651,9 +649,9 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID
|
||||
defer p.Done()
|
||||
|
||||
// create new snapshot
|
||||
sn, err := NewSnapshot(paths)
|
||||
sn, err := restic.NewSnapshot(paths, tags)
|
||||
if err != nil {
|
||||
return nil, backend.ID{}, err
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
sn.Excludes = arch.Excludes
|
||||
|
||||
@@ -664,18 +662,18 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID
|
||||
sn.Parent = parentID
|
||||
|
||||
// load parent snapshot
|
||||
parent, err := LoadSnapshot(arch.repo, *parentID)
|
||||
parent, err := restic.LoadSnapshot(arch.repo, *parentID)
|
||||
if err != nil {
|
||||
return nil, backend.ID{}, err
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
|
||||
// start walker on old tree
|
||||
ch := make(chan WalkTreeJob)
|
||||
go WalkTree(arch.repo, *parent.Tree, done, ch)
|
||||
ch := make(chan walk.TreeJob)
|
||||
go walk.Tree(arch.repo, *parent.Tree, done, ch)
|
||||
jobs.Old = ch
|
||||
} else {
|
||||
// use closed channel
|
||||
ch := make(chan WalkTreeJob)
|
||||
ch := make(chan walk.TreeJob)
|
||||
close(ch)
|
||||
jobs.Old = ch
|
||||
}
|
||||
@@ -685,7 +683,7 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID
|
||||
resCh := make(chan pipe.Result, 1)
|
||||
go func() {
|
||||
pipe.Walk(paths, arch.SelectFilter, done, pipeCh, resCh)
|
||||
debug.Log("Archiver.Snapshot", "pipe.Walk done")
|
||||
debug.Log("pipe.Walk done")
|
||||
}()
|
||||
jobs.New = pipeCh
|
||||
|
||||
@@ -700,7 +698,7 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
pipe.Split(ch, dirCh, entCh)
|
||||
debug.Log("Archiver.Snapshot", "split done")
|
||||
debug.Log("split done")
|
||||
close(dirCh)
|
||||
close(entCh)
|
||||
wg.Done()
|
||||
@@ -720,44 +718,42 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID
|
||||
go arch.saveIndexes(&wgIndexSaver, stopIndexSaver)
|
||||
|
||||
// wait for all workers to terminate
|
||||
debug.Log("Archiver.Snapshot", "wait for workers")
|
||||
debug.Log("wait for workers")
|
||||
wg.Wait()
|
||||
|
||||
// stop index saver
|
||||
close(stopIndexSaver)
|
||||
wgIndexSaver.Wait()
|
||||
|
||||
debug.Log("Archiver.Snapshot", "workers terminated")
|
||||
debug.Log("workers terminated")
|
||||
|
||||
// receive the top-level tree
|
||||
root := (<-resCh).(*Node)
|
||||
debug.Log("Archiver.Snapshot", "root node received: %v", root.Subtree.Str())
|
||||
root := (<-resCh).(*restic.Node)
|
||||
debug.Log("root node received: %v", root.Subtree.Str())
|
||||
sn.Tree = root.Subtree
|
||||
|
||||
// save snapshot
|
||||
id, err := arch.repo.SaveJSONUnpacked(backend.Snapshot, sn)
|
||||
id, err := arch.repo.SaveJSONUnpacked(restic.SnapshotFile, sn)
|
||||
if err != nil {
|
||||
return nil, backend.ID{}, err
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
|
||||
// store ID in snapshot struct
|
||||
sn.id = &id
|
||||
debug.Log("Archiver.Snapshot", "saved snapshot %v", id.Str())
|
||||
debug.Log("saved snapshot %v", id.Str())
|
||||
|
||||
// flush repository
|
||||
err = arch.repo.Flush()
|
||||
if err != nil {
|
||||
return nil, backend.ID{}, err
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
|
||||
// save index
|
||||
err = arch.repo.SaveIndex()
|
||||
if err != nil {
|
||||
debug.Log("Archiver.Snapshot", "error saving index: %v", err)
|
||||
return nil, backend.ID{}, err
|
||||
debug.Log("error saving index: %v", err)
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
|
||||
debug.Log("Archiver.Snapshot", "saved indexes")
|
||||
debug.Log("saved indexes")
|
||||
|
||||
return sn, id, nil
|
||||
}
|
||||
@@ -770,17 +766,17 @@ func isRegularFile(fi os.FileInfo) bool {
|
||||
return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0
|
||||
}
|
||||
|
||||
// Scan traverses the dirs to collect Stat information while emitting progress
|
||||
// Scan traverses the dirs to collect restic.Stat information while emitting progress
|
||||
// information with p.
|
||||
func Scan(dirs []string, filter pipe.SelectFunc, p *Progress) (Stat, error) {
|
||||
func Scan(dirs []string, filter pipe.SelectFunc, p *restic.Progress) (restic.Stat, error) {
|
||||
p.Start()
|
||||
defer p.Done()
|
||||
|
||||
var stat Stat
|
||||
var stat restic.Stat
|
||||
|
||||
for _, dir := range dirs {
|
||||
debug.Log("Scan", "Start for %v", dir)
|
||||
err := filepath.Walk(dir, func(str string, fi os.FileInfo, err error) error {
|
||||
debug.Log("Start for %v", dir)
|
||||
err := fs.Walk(dir, func(str string, fi os.FileInfo, err error) error {
|
||||
// TODO: integrate error reporting
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error for %v: %v\n", str, err)
|
||||
@@ -792,14 +788,14 @@ func Scan(dirs []string, filter pipe.SelectFunc, p *Progress) (Stat, error) {
|
||||
}
|
||||
|
||||
if !filter(str, fi) {
|
||||
debug.Log("Scan.Walk", "path %v excluded", str)
|
||||
debug.Log("path %v excluded", str)
|
||||
if fi.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
s := Stat{}
|
||||
s := restic.Stat{}
|
||||
if fi.IsDir() {
|
||||
s.Dirs++
|
||||
} else {
|
||||
@@ -817,9 +813,9 @@ func Scan(dirs []string, filter pipe.SelectFunc, p *Progress) (Stat, error) {
|
||||
return nil
|
||||
})
|
||||
|
||||
debug.Log("Scan", "Done for %v, err: %v", dir, err)
|
||||
debug.Log("Done for %v, err: %v", dir, err)
|
||||
if err != nil {
|
||||
return Stat{}, err
|
||||
return restic.Stat{}, errors.Wrap(err, "fs.Walk")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,17 +1,18 @@
|
||||
package restic_test
|
||||
package archiver_test
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"io"
|
||||
mrand "math/rand"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"restic/errors"
|
||||
|
||||
"restic"
|
||||
"restic/backend"
|
||||
"restic/pack"
|
||||
"restic/archiver"
|
||||
"restic/mock"
|
||||
"restic/repository"
|
||||
)
|
||||
|
||||
@@ -19,14 +20,14 @@ const parallelSaves = 50
|
||||
const testSaveIndexTime = 100 * time.Millisecond
|
||||
const testTimeout = 2 * time.Second
|
||||
|
||||
var DupID backend.ID
|
||||
var DupID restic.ID
|
||||
|
||||
func randomID() backend.ID {
|
||||
func randomID() restic.ID {
|
||||
if mrand.Float32() < 0.5 {
|
||||
return DupID
|
||||
}
|
||||
|
||||
id := backend.ID{}
|
||||
id := restic.ID{}
|
||||
_, err := io.ReadFull(rand.Reader, id[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -35,30 +36,30 @@ func randomID() backend.ID {
|
||||
}
|
||||
|
||||
// forgetfulBackend returns a backend that forgets everything.
|
||||
func forgetfulBackend() backend.Backend {
|
||||
be := &backend.MockBackend{}
|
||||
func forgetfulBackend() restic.Backend {
|
||||
be := &mock.Backend{}
|
||||
|
||||
be.TestFn = func(t backend.Type, name string) (bool, error) {
|
||||
be.TestFn = func(t restic.FileType, name string) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
be.LoadFn = func(h backend.Handle, p []byte, off int64) (int, error) {
|
||||
be.LoadFn = func(h restic.Handle, p []byte, off int64) (int, error) {
|
||||
return 0, errors.New("not found")
|
||||
}
|
||||
|
||||
be.SaveFn = func(h backend.Handle, p []byte) error {
|
||||
be.SaveFn = func(h restic.Handle, p []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
be.StatFn = func(h backend.Handle) (backend.BlobInfo, error) {
|
||||
return backend.BlobInfo{}, errors.New("not found")
|
||||
be.StatFn = func(h restic.Handle) (restic.FileInfo, error) {
|
||||
return restic.FileInfo{}, errors.New("not found")
|
||||
}
|
||||
|
||||
be.RemoveFn = func(t backend.Type, name string) error {
|
||||
be.RemoveFn = func(t restic.FileType, name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
be.ListFn = func(t backend.Type, done <-chan struct{}) <-chan string {
|
||||
be.ListFn = func(t restic.FileType, done <-chan struct{}) <-chan string {
|
||||
ch := make(chan string)
|
||||
close(ch)
|
||||
return ch
|
||||
@@ -84,7 +85,7 @@ func testArchiverDuplication(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
arch := restic.NewArchiver(repo)
|
||||
arch := archiver.New(repo)
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
done := make(chan struct{})
|
||||
@@ -101,13 +102,13 @@ func testArchiverDuplication(t *testing.T) {
|
||||
|
||||
id := randomID()
|
||||
|
||||
if repo.Index().Has(id) {
|
||||
if repo.Index().Has(id, restic.DataBlob) {
|
||||
continue
|
||||
}
|
||||
|
||||
buf := make([]byte, 50)
|
||||
|
||||
err := arch.Save(pack.Data, buf, id)
|
||||
err := arch.Save(restic.DataBlob, buf, id)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -1,10 +1,11 @@
|
||||
package restic
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"restic/pipe"
|
||||
"restic/walk"
|
||||
)
|
||||
|
||||
var treeJobs = []string{
|
||||
@@ -82,12 +83,12 @@ func (j testPipeJob) Error() error { return j.err }
|
||||
func (j testPipeJob) Info() os.FileInfo { return j.fi }
|
||||
func (j testPipeJob) Result() chan<- pipe.Result { return j.res }
|
||||
|
||||
func testTreeWalker(done <-chan struct{}, out chan<- WalkTreeJob) {
|
||||
func testTreeWalker(done <-chan struct{}, out chan<- walk.TreeJob) {
|
||||
for _, e := range treeJobs {
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
case out <- WalkTreeJob{Path: e}:
|
||||
case out <- walk.TreeJob{Path: e}:
|
||||
}
|
||||
}
|
||||
|
||||
@@ -109,7 +110,7 @@ func testPipeWalker(done <-chan struct{}, out chan<- pipe.Job) {
|
||||
func TestArchivePipe(t *testing.T) {
|
||||
done := make(chan struct{})
|
||||
|
||||
treeCh := make(chan WalkTreeJob)
|
||||
treeCh := make(chan walk.TreeJob)
|
||||
pipeCh := make(chan pipe.Job)
|
||||
|
||||
go testTreeWalker(done, treeCh)
|
||||
@@ -1,4 +1,4 @@
|
||||
package restic_test
|
||||
package archiver_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -7,13 +7,14 @@ import (
|
||||
"time"
|
||||
|
||||
"restic"
|
||||
"restic/backend"
|
||||
"restic/archiver"
|
||||
"restic/checker"
|
||||
"restic/crypto"
|
||||
"restic/pack"
|
||||
"restic/repository"
|
||||
. "restic/test"
|
||||
|
||||
"restic/errors"
|
||||
|
||||
"github.com/restic/chunker"
|
||||
)
|
||||
|
||||
@@ -31,7 +32,7 @@ func benchmarkChunkEncrypt(b testing.TB, buf, buf2 []byte, rd Rdr, key *crypto.K
|
||||
for {
|
||||
chunk, err := ch.Next(buf)
|
||||
|
||||
if err == io.EOF {
|
||||
if errors.Cause(err) == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
@@ -47,8 +48,8 @@ func benchmarkChunkEncrypt(b testing.TB, buf, buf2 []byte, rd Rdr, key *crypto.K
|
||||
}
|
||||
|
||||
func BenchmarkChunkEncrypt(b *testing.B) {
|
||||
repo := SetupRepo()
|
||||
defer TeardownRepo(repo)
|
||||
repo, cleanup := repository.TestRepository(b)
|
||||
defer cleanup()
|
||||
|
||||
data := Random(23, 10<<20) // 10MiB
|
||||
rd := bytes.NewReader(data)
|
||||
@@ -69,7 +70,7 @@ func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *crypto.Key)
|
||||
|
||||
for {
|
||||
chunk, err := ch.Next(buf)
|
||||
if err == io.EOF {
|
||||
if errors.Cause(err) == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
@@ -79,8 +80,8 @@ func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *crypto.Key)
|
||||
}
|
||||
|
||||
func BenchmarkChunkEncryptParallel(b *testing.B) {
|
||||
repo := SetupRepo()
|
||||
defer TeardownRepo(repo)
|
||||
repo, cleanup := repository.TestRepository(b)
|
||||
defer cleanup()
|
||||
|
||||
data := Random(23, 10<<20) // 10MiB
|
||||
|
||||
@@ -98,12 +99,12 @@ func BenchmarkChunkEncryptParallel(b *testing.B) {
|
||||
}
|
||||
|
||||
func archiveDirectory(b testing.TB) {
|
||||
repo := SetupRepo()
|
||||
defer TeardownRepo(repo)
|
||||
repo, cleanup := repository.TestRepository(b)
|
||||
defer cleanup()
|
||||
|
||||
arch := restic.NewArchiver(repo)
|
||||
arch := archiver.New(repo)
|
||||
|
||||
_, id, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil)
|
||||
_, id, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil, nil)
|
||||
OK(b, err)
|
||||
|
||||
b.Logf("snapshot archived as %v", id)
|
||||
@@ -127,9 +128,17 @@ func BenchmarkArchiveDirectory(b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
func countPacks(repo restic.Repository, t restic.FileType) (n uint) {
|
||||
for _ = range repo.Backend().List(t, nil) {
|
||||
n++
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
func archiveWithDedup(t testing.TB) {
|
||||
repo := SetupRepo()
|
||||
defer TeardownRepo(repo)
|
||||
repo, cleanup := repository.TestRepository(t)
|
||||
defer cleanup()
|
||||
|
||||
if BenchArchiveDirectory == "" {
|
||||
t.Skip("benchdir not set, skipping TestArchiverDedup")
|
||||
@@ -142,24 +151,24 @@ func archiveWithDedup(t testing.TB) {
|
||||
}
|
||||
|
||||
// archive a few files
|
||||
sn := SnapshotDir(t, repo, BenchArchiveDirectory, nil)
|
||||
sn := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, nil)
|
||||
t.Logf("archived snapshot %v", sn.ID().Str())
|
||||
|
||||
// get archive stats
|
||||
cnt.before.packs = repo.Count(backend.Data)
|
||||
cnt.before.dataBlobs = repo.Index().Count(pack.Data)
|
||||
cnt.before.treeBlobs = repo.Index().Count(pack.Tree)
|
||||
cnt.before.packs = countPacks(repo, restic.DataFile)
|
||||
cnt.before.dataBlobs = repo.Index().Count(restic.DataBlob)
|
||||
cnt.before.treeBlobs = repo.Index().Count(restic.TreeBlob)
|
||||
t.Logf("packs %v, data blobs %v, tree blobs %v",
|
||||
cnt.before.packs, cnt.before.dataBlobs, cnt.before.treeBlobs)
|
||||
|
||||
// archive the same files again, without parent snapshot
|
||||
sn2 := SnapshotDir(t, repo, BenchArchiveDirectory, nil)
|
||||
sn2 := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, nil)
|
||||
t.Logf("archived snapshot %v", sn2.ID().Str())
|
||||
|
||||
// get archive stats again
|
||||
cnt.after.packs = repo.Count(backend.Data)
|
||||
cnt.after.dataBlobs = repo.Index().Count(pack.Data)
|
||||
cnt.after.treeBlobs = repo.Index().Count(pack.Tree)
|
||||
cnt.after.packs = countPacks(repo, restic.DataFile)
|
||||
cnt.after.dataBlobs = repo.Index().Count(restic.DataBlob)
|
||||
cnt.after.treeBlobs = repo.Index().Count(restic.TreeBlob)
|
||||
t.Logf("packs %v, data blobs %v, tree blobs %v",
|
||||
cnt.after.packs, cnt.after.dataBlobs, cnt.after.treeBlobs)
|
||||
|
||||
@@ -170,13 +179,13 @@ func archiveWithDedup(t testing.TB) {
|
||||
}
|
||||
|
||||
// archive the same files again, with a parent snapshot
|
||||
sn3 := SnapshotDir(t, repo, BenchArchiveDirectory, sn2.ID())
|
||||
sn3 := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, sn2.ID())
|
||||
t.Logf("archived snapshot %v, parent %v", sn3.ID().Str(), sn2.ID().Str())
|
||||
|
||||
// get archive stats again
|
||||
cnt.after2.packs = repo.Count(backend.Data)
|
||||
cnt.after2.dataBlobs = repo.Index().Count(pack.Data)
|
||||
cnt.after2.treeBlobs = repo.Index().Count(pack.Tree)
|
||||
cnt.after2.packs = countPacks(repo, restic.DataFile)
|
||||
cnt.after2.dataBlobs = repo.Index().Count(restic.DataBlob)
|
||||
cnt.after2.treeBlobs = repo.Index().Count(restic.TreeBlob)
|
||||
t.Logf("packs %v, data blobs %v, tree blobs %v",
|
||||
cnt.after2.packs, cnt.after2.dataBlobs, cnt.after2.treeBlobs)
|
||||
|
||||
@@ -191,48 +200,6 @@ func TestArchiveDedup(t *testing.T) {
|
||||
archiveWithDedup(t)
|
||||
}
|
||||
|
||||
func BenchmarkLoadTree(t *testing.B) {
|
||||
repo := SetupRepo()
|
||||
defer TeardownRepo(repo)
|
||||
|
||||
if BenchArchiveDirectory == "" {
|
||||
t.Skip("benchdir not set, skipping TestArchiverDedup")
|
||||
}
|
||||
|
||||
// archive a few files
|
||||
arch := restic.NewArchiver(repo)
|
||||
sn, _, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil)
|
||||
OK(t, err)
|
||||
t.Logf("archived snapshot %v", sn.ID())
|
||||
|
||||
list := make([]backend.ID, 0, 10)
|
||||
done := make(chan struct{})
|
||||
|
||||
for _, idx := range repo.Index().All() {
|
||||
for blob := range idx.Each(done) {
|
||||
if blob.Type != pack.Tree {
|
||||
continue
|
||||
}
|
||||
|
||||
list = append(list, blob.ID)
|
||||
if len(list) == cap(list) {
|
||||
close(done)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// start benchmark
|
||||
t.ResetTimer()
|
||||
|
||||
for i := 0; i < t.N; i++ {
|
||||
for _, id := range list {
|
||||
_, err := restic.LoadTree(repo, id)
|
||||
OK(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Saves several identical chunks concurrently and later checks that there are no
|
||||
// unreferenced packs in the repository. See also #292 and #358.
|
||||
func TestParallelSaveWithDuplication(t *testing.T) {
|
||||
@@ -242,13 +209,13 @@ func TestParallelSaveWithDuplication(t *testing.T) {
|
||||
}
|
||||
|
||||
func testParallelSaveWithDuplication(t *testing.T, seed int) {
|
||||
repo := SetupRepo()
|
||||
defer TeardownRepo(repo)
|
||||
repo, cleanup := repository.TestRepository(t)
|
||||
defer cleanup()
|
||||
|
||||
dataSizeMb := 128
|
||||
duplication := 7
|
||||
|
||||
arch := restic.NewArchiver(repo)
|
||||
arch := archiver.New(repo)
|
||||
chunks := getRandomData(seed, dataSizeMb*1024*1024)
|
||||
|
||||
errChannels := [](<-chan error){}
|
||||
@@ -265,9 +232,9 @@ func testParallelSaveWithDuplication(t *testing.T, seed int) {
|
||||
go func(c chunker.Chunk, errChan chan<- error) {
|
||||
barrier <- struct{}{}
|
||||
|
||||
id := backend.Hash(c.Data)
|
||||
id := restic.Hash(c.Data)
|
||||
time.Sleep(time.Duration(id[0]))
|
||||
err := arch.Save(pack.Data, c.Data, id)
|
||||
err := arch.Save(restic.DataBlob, c.Data, id)
|
||||
<-barrier
|
||||
errChan <- err
|
||||
}(c, errChan)
|
||||
@@ -292,7 +259,7 @@ func getRandomData(seed int, size int) []chunker.Chunk {
|
||||
|
||||
for {
|
||||
c, err := chunker.Next(nil)
|
||||
if err == io.EOF {
|
||||
if errors.Cause(err) == io.EOF {
|
||||
break
|
||||
}
|
||||
chunks = append(chunks, c)
|
||||
@@ -301,7 +268,7 @@ func getRandomData(seed int, size int) []chunker.Chunk {
|
||||
return chunks
|
||||
}
|
||||
|
||||
func createAndInitChecker(t *testing.T, repo *repository.Repository) *checker.Checker {
|
||||
func createAndInitChecker(t *testing.T, repo restic.Repository) *checker.Checker {
|
||||
chkr := checker.New(repo)
|
||||
|
||||
hints, errs := chkr.LoadIndex()
|
||||
21
src/restic/archiver/buffer_pool.go
Normal file
21
src/restic/archiver/buffer_pool.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/restic/chunker"
|
||||
)
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return make([]byte, chunker.MinSize)
|
||||
},
|
||||
}
|
||||
|
||||
func getBuf() []byte {
|
||||
return bufPool.Get().([]byte)
|
||||
}
|
||||
|
||||
func freeBuf(data []byte) {
|
||||
bufPool.Put(data)
|
||||
}
|
||||
16
src/restic/archiver/testing.go
Normal file
16
src/restic/archiver/testing.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"restic"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestSnapshot creates a new snapshot of path.
|
||||
func TestSnapshot(t testing.TB, repo restic.Repository, path string, parent *restic.ID) *restic.Snapshot {
|
||||
arch := New(repo)
|
||||
sn, _, err := arch.Snapshot(nil, []string{path}, []string{"test"}, parent)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return sn
|
||||
}
|
||||
38
src/restic/backend.go
Normal file
38
src/restic/backend.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package restic
|
||||
|
||||
// Backend is used to store and access data.
|
||||
type Backend interface {
|
||||
// Location returns a string that describes the type and location of the
|
||||
// repository.
|
||||
Location() string
|
||||
|
||||
// Test a boolean value whether a File with the name and type exists.
|
||||
Test(t FileType, name string) (bool, error)
|
||||
|
||||
// Remove removes a File with type t and name.
|
||||
Remove(t FileType, name string) error
|
||||
|
||||
// Close the backend
|
||||
Close() error
|
||||
|
||||
// Load returns the data stored in the backend for h at the given offset
|
||||
// and saves it in p. Load has the same semantics as io.ReaderAt, except
|
||||
// that a negative offset is also allowed. In this case it references a
|
||||
// position relative to the end of the file (similar to Seek()).
|
||||
Load(h Handle, p []byte, off int64) (int, error)
|
||||
|
||||
// Save stores the data in the backend under the given handle.
|
||||
Save(h Handle, p []byte) error
|
||||
|
||||
// Stat returns information about the File identified by h.
|
||||
Stat(h Handle) (FileInfo, error)
|
||||
|
||||
// List returns a channel that yields all names of files of type t in an
|
||||
// arbitrary order. A goroutine is started for this. If the channel done is
|
||||
// closed, sending stops.
|
||||
List(t FileType, done <-chan struct{}) <-chan string
|
||||
}
|
||||
|
||||
// FileInfo is returned by Stat() and contains information about a file in the
|
||||
// backend.
|
||||
type FileInfo struct{ Size int64 }
|
||||
@@ -1,5 +1,4 @@
|
||||
// Package backend provides local and remote storage for restic repositories.
|
||||
// All backends need to implement the Backend interface. There is a
|
||||
// MockBackend, which can be used for mocking in tests, and a MemBackend, which
|
||||
// stores all data in a hash internally.
|
||||
// All backends need to implement the Backend interface. There is a MemBackend,
|
||||
// which stores all data in a map internally and can be used for testing.
|
||||
package backend
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
package backend_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"restic/backend"
|
||||
. "restic/test"
|
||||
)
|
||||
|
||||
type mockBackend struct {
|
||||
list func(backend.Type, <-chan struct{}) <-chan string
|
||||
}
|
||||
|
||||
func (m mockBackend) List(t backend.Type, done <-chan struct{}) <-chan string {
|
||||
return m.list(t, done)
|
||||
}
|
||||
|
||||
var samples = backend.IDs{
|
||||
ParseID("20bdc1402a6fc9b633aaffffffffffffffffffffffffffffffffffffffffffff"),
|
||||
ParseID("20bdc1402a6fc9b633ccd578c4a92d0f4ef1a457fa2e16c596bc73fb409d6cc0"),
|
||||
ParseID("20bdc1402a6fc9b633ffffffffffffffffffffffffffffffffffffffffffffff"),
|
||||
ParseID("20ff988befa5fc40350f00d531a767606efefe242c837aaccb80673f286be53d"),
|
||||
ParseID("326cb59dfe802304f96ee9b5b9af93bdee73a30f53981e5ec579aedb6f1d0f07"),
|
||||
ParseID("86b60b9594d1d429c4aa98fa9562082cabf53b98c7dc083abe5dae31074dd15a"),
|
||||
ParseID("96c8dbe225079e624b5ce509f5bd817d1453cd0a85d30d536d01b64a8669aeae"),
|
||||
ParseID("fa31d65b87affcd167b119e9d3d2a27b8236ca4836cb077ed3e96fcbe209b792"),
|
||||
}
|
||||
|
||||
func TestPrefixLength(t *testing.T) {
|
||||
list := samples
|
||||
|
||||
m := mockBackend{}
|
||||
m.list = func(t backend.Type, done <-chan struct{}) <-chan string {
|
||||
ch := make(chan string)
|
||||
go func() {
|
||||
defer close(ch)
|
||||
for _, id := range list {
|
||||
select {
|
||||
case ch <- id.String():
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
l, err := backend.PrefixLength(m, backend.Snapshot)
|
||||
OK(t, err)
|
||||
Equals(t, 19, l)
|
||||
|
||||
list = samples[:3]
|
||||
l, err = backend.PrefixLength(m, backend.Snapshot)
|
||||
OK(t, err)
|
||||
Equals(t, 19, l)
|
||||
|
||||
list = samples[3:]
|
||||
l, err = backend.PrefixLength(m, backend.Snapshot)
|
||||
OK(t, err)
|
||||
Equals(t, 8, l)
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Handle is used to store and access data in a backend.
|
||||
type Handle struct {
|
||||
Type Type
|
||||
Name string
|
||||
}
|
||||
|
||||
func (h Handle) String() string {
|
||||
name := h.Name
|
||||
if len(name) > 10 {
|
||||
name = name[:10]
|
||||
}
|
||||
return fmt.Sprintf("<%s/%s>", h.Type, name)
|
||||
}
|
||||
|
||||
// Valid returns an error if h is not valid.
|
||||
func (h Handle) Valid() error {
|
||||
if h.Type == "" {
|
||||
return errors.New("type is empty")
|
||||
}
|
||||
|
||||
switch h.Type {
|
||||
case Data:
|
||||
case Key:
|
||||
case Lock:
|
||||
case Snapshot:
|
||||
case Index:
|
||||
case Config:
|
||||
default:
|
||||
return fmt.Errorf("invalid Type %q", h.Type)
|
||||
}
|
||||
|
||||
if h.Type == Config {
|
||||
return nil
|
||||
}
|
||||
|
||||
if h.Name == "" {
|
||||
return errors.New("invalid Name")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
package backend_test
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"restic/backend"
|
||||
. "restic/test"
|
||||
)
|
||||
|
||||
var uniqTests = []struct {
|
||||
before, after backend.IDs
|
||||
}{
|
||||
{
|
||||
backend.IDs{
|
||||
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
|
||||
ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
|
||||
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
|
||||
},
|
||||
backend.IDs{
|
||||
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
|
||||
ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
|
||||
},
|
||||
},
|
||||
{
|
||||
backend.IDs{
|
||||
ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
|
||||
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
|
||||
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
|
||||
},
|
||||
backend.IDs{
|
||||
ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
|
||||
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
|
||||
},
|
||||
},
|
||||
{
|
||||
backend.IDs{
|
||||
ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
|
||||
ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"),
|
||||
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
|
||||
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
|
||||
},
|
||||
backend.IDs{
|
||||
ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
|
||||
ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"),
|
||||
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestUniqIDs(t *testing.T) {
|
||||
for i, test := range uniqTests {
|
||||
uniq := test.before.Uniq()
|
||||
if !reflect.DeepEqual(uniq, test.after) {
|
||||
t.Errorf("uniqIDs() test %v failed\n wanted: %v\n got: %v", i, test.after, uniq)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
package backend_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"restic/backend"
|
||||
. "restic/test"
|
||||
)
|
||||
|
||||
var idsetTests = []struct {
|
||||
id backend.ID
|
||||
seen bool
|
||||
}{
|
||||
{ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), false},
|
||||
{ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), false},
|
||||
{ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true},
|
||||
{ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true},
|
||||
{ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true},
|
||||
{ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), false},
|
||||
{ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true},
|
||||
{ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true},
|
||||
{ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), true},
|
||||
{ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true},
|
||||
}
|
||||
|
||||
func TestIDSet(t *testing.T) {
|
||||
set := backend.NewIDSet()
|
||||
for i, test := range idsetTests {
|
||||
seen := set.Has(test.id)
|
||||
if seen != test.seen {
|
||||
t.Errorf("IDSet test %v failed: wanted %v, got %v", i, test.seen, seen)
|
||||
}
|
||||
set.Insert(test.id)
|
||||
}
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
package backend
|
||||
|
||||
// Type is the type of a Blob.
|
||||
type Type string
|
||||
|
||||
// These are the different data types a backend can store.
|
||||
const (
|
||||
Data Type = "data"
|
||||
Key = "key"
|
||||
Lock = "lock"
|
||||
Snapshot = "snapshot"
|
||||
Index = "index"
|
||||
Config = "config"
|
||||
)
|
||||
|
||||
// Backend is used to store and access data.
|
||||
type Backend interface {
|
||||
// Location returns a string that describes the type and location of the
|
||||
// repository.
|
||||
Location() string
|
||||
|
||||
// Test a boolean value whether a Blob with the name and type exists.
|
||||
Test(t Type, name string) (bool, error)
|
||||
|
||||
// Remove removes a Blob with type t and name.
|
||||
Remove(t Type, name string) error
|
||||
|
||||
// Close the backend
|
||||
Close() error
|
||||
|
||||
Lister
|
||||
|
||||
// Load returns the data stored in the backend for h at the given offset
|
||||
// and saves it in p. Load has the same semantics as io.ReaderAt.
|
||||
Load(h Handle, p []byte, off int64) (int, error)
|
||||
|
||||
// Save stores the data in the backend under the given handle.
|
||||
Save(h Handle, p []byte) error
|
||||
|
||||
// Stat returns information about the blob identified by h.
|
||||
Stat(h Handle) (BlobInfo, error)
|
||||
}
|
||||
|
||||
// Lister implements listing data items stored in a backend.
|
||||
type Lister interface {
|
||||
// List returns a channel that yields all names of blobs of type t in an
|
||||
// arbitrary order. A goroutine is started for this. If the channel done is
|
||||
// closed, sending stops.
|
||||
List(t Type, done <-chan struct{}) <-chan string
|
||||
}
|
||||
|
||||
// Deleter are backends that allow to self-delete all content stored in them.
|
||||
type Deleter interface {
|
||||
// Delete the complete repository.
|
||||
Delete() error
|
||||
}
|
||||
|
||||
// BlobInfo is returned by Stat() and contains information about a stored blob.
|
||||
type BlobInfo struct {
|
||||
Size int64
|
||||
}
|
||||
@@ -51,6 +51,13 @@ func TestLocalBackendLoad(t *testing.T) {
|
||||
test.TestLoad(t)
|
||||
}
|
||||
|
||||
func TestLocalBackendLoadNegativeOffset(t *testing.T) {
|
||||
if SkipMessage != "" {
|
||||
t.Skip(SkipMessage)
|
||||
}
|
||||
test.TestLoadNegativeOffset(t)
|
||||
}
|
||||
|
||||
func TestLocalBackendSave(t *testing.T) {
|
||||
if SkipMessage != "" {
|
||||
t.Skip(SkipMessage)
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"restic/errors"
|
||||
)
|
||||
|
||||
// ParseConfig parses a local backend config.
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"restic"
|
||||
|
||||
"restic/errors"
|
||||
|
||||
"restic/backend"
|
||||
"restic/debug"
|
||||
@@ -18,6 +19,8 @@ type Local struct {
|
||||
p string
|
||||
}
|
||||
|
||||
var _ restic.Backend = &Local{}
|
||||
|
||||
func paths(dir string) []string {
|
||||
return []string{
|
||||
dir,
|
||||
@@ -34,8 +37,8 @@ func paths(dir string) []string {
|
||||
func Open(dir string) (*Local, error) {
|
||||
// test if all necessary dirs are there
|
||||
for _, d := range paths(dir) {
|
||||
if _, err := os.Stat(d); err != nil {
|
||||
return nil, fmt.Errorf("%s does not exist", d)
|
||||
if _, err := fs.Stat(d); err != nil {
|
||||
return nil, errors.Wrap(err, "Open")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,16 +49,16 @@ func Open(dir string) (*Local, error) {
|
||||
// backend at dir. Afterwards a new config blob should be created.
|
||||
func Create(dir string) (*Local, error) {
|
||||
// test if config file already exists
|
||||
_, err := os.Lstat(filepath.Join(dir, backend.Paths.Config))
|
||||
_, err := fs.Lstat(filepath.Join(dir, backend.Paths.Config))
|
||||
if err == nil {
|
||||
return nil, errors.New("config file already exists")
|
||||
}
|
||||
|
||||
// create paths for data, refs and temp
|
||||
for _, d := range paths(dir) {
|
||||
err := os.MkdirAll(d, backend.Modes.Dir)
|
||||
err := fs.MkdirAll(d, backend.Modes.Dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "MkdirAll")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -69,8 +72,8 @@ func (b *Local) Location() string {
|
||||
}
|
||||
|
||||
// Construct path for given Type and name.
|
||||
func filename(base string, t backend.Type, name string) string {
|
||||
if t == backend.Config {
|
||||
func filename(base string, t restic.FileType, name string) string {
|
||||
if t == restic.ConfigFile {
|
||||
return filepath.Join(base, "config")
|
||||
}
|
||||
|
||||
@@ -78,50 +81,57 @@ func filename(base string, t backend.Type, name string) string {
|
||||
}
|
||||
|
||||
// Construct directory for given Type.
|
||||
func dirname(base string, t backend.Type, name string) string {
|
||||
func dirname(base string, t restic.FileType, name string) string {
|
||||
var n string
|
||||
switch t {
|
||||
case backend.Data:
|
||||
case restic.DataFile:
|
||||
n = backend.Paths.Data
|
||||
if len(name) > 2 {
|
||||
n = filepath.Join(n, name[:2])
|
||||
}
|
||||
case backend.Snapshot:
|
||||
case restic.SnapshotFile:
|
||||
n = backend.Paths.Snapshots
|
||||
case backend.Index:
|
||||
case restic.IndexFile:
|
||||
n = backend.Paths.Index
|
||||
case backend.Lock:
|
||||
case restic.LockFile:
|
||||
n = backend.Paths.Locks
|
||||
case backend.Key:
|
||||
case restic.KeyFile:
|
||||
n = backend.Paths.Keys
|
||||
}
|
||||
return filepath.Join(base, n)
|
||||
}
|
||||
|
||||
// Load returns the data stored in the backend for h at the given offset
|
||||
// and saves it in p. Load has the same semantics as io.ReaderAt.
|
||||
func (b *Local) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
|
||||
// Load returns the data stored in the backend for h at the given offset and
|
||||
// saves it in p. Load has the same semantics as io.ReaderAt, with one
|
||||
// exception: when off is lower than zero, it is treated as an offset relative
|
||||
// to the end of the file.
|
||||
func (b *Local) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
|
||||
debug.Log("Load %v, length %v at %v", h, len(p), off)
|
||||
if err := h.Valid(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
f, err := os.Open(filename(b.p, h.Type, h.Name))
|
||||
f, err := fs.Open(filename(b.p, h.Type, h.Name))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return 0, errors.Wrap(err, "Open")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
e := f.Close()
|
||||
if err == nil && e != nil {
|
||||
err = e
|
||||
err = errors.Wrap(e, "Close")
|
||||
}
|
||||
}()
|
||||
|
||||
if off > 0 {
|
||||
switch {
|
||||
case off > 0:
|
||||
_, err = f.Seek(off, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
case off < 0:
|
||||
_, err = f.Seek(off, 2)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "Seek")
|
||||
}
|
||||
|
||||
return io.ReadFull(f, p)
|
||||
@@ -131,12 +141,12 @@ func (b *Local) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
|
||||
func writeToTempfile(tempdir string, p []byte) (filename string, err error) {
|
||||
tmpfile, err := ioutil.TempFile(tempdir, "temp-")
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", errors.Wrap(err, "TempFile")
|
||||
}
|
||||
|
||||
n, err := tmpfile.Write(p)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", errors.Wrap(err, "Write")
|
||||
}
|
||||
|
||||
if n != len(p) {
|
||||
@@ -144,101 +154,108 @@ func writeToTempfile(tempdir string, p []byte) (filename string, err error) {
|
||||
}
|
||||
|
||||
if err = tmpfile.Sync(); err != nil {
|
||||
return "", err
|
||||
return "", errors.Wrap(err, "Syncn")
|
||||
}
|
||||
|
||||
err = fs.ClearCache(tmpfile)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", errors.Wrap(err, "ClearCache")
|
||||
}
|
||||
|
||||
err = tmpfile.Close()
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", errors.Wrap(err, "Close")
|
||||
}
|
||||
|
||||
return tmpfile.Name(), nil
|
||||
}
|
||||
|
||||
// Save stores data in the backend at the handle.
|
||||
func (b *Local) Save(h backend.Handle, p []byte) (err error) {
|
||||
func (b *Local) Save(h restic.Handle, p []byte) (err error) {
|
||||
debug.Log("Save %v, length %v", h, len(p))
|
||||
if err := h.Valid(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tmpfile, err := writeToTempfile(filepath.Join(b.p, backend.Paths.Temp), p)
|
||||
debug.Log("local.Save", "saved %v (%d bytes) to %v", h, len(p), tmpfile)
|
||||
debug.Log("saved %v (%d bytes) to %v", h, len(p), tmpfile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
filename := filename(b.p, h.Type, h.Name)
|
||||
|
||||
// test if new path already exists
|
||||
if _, err := os.Stat(filename); err == nil {
|
||||
return fmt.Errorf("Rename(): file %v already exists", filename)
|
||||
if _, err := fs.Stat(filename); err == nil {
|
||||
return errors.Errorf("Rename(): file %v already exists", filename)
|
||||
}
|
||||
|
||||
// create directories if necessary, ignore errors
|
||||
if h.Type == backend.Data {
|
||||
err = os.MkdirAll(filepath.Dir(filename), backend.Modes.Dir)
|
||||
if h.Type == restic.DataFile {
|
||||
err = fs.MkdirAll(filepath.Dir(filename), backend.Modes.Dir)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "MkdirAll")
|
||||
}
|
||||
}
|
||||
|
||||
err = os.Rename(tmpfile, filename)
|
||||
debug.Log("local.Save", "save %v: rename %v -> %v: %v",
|
||||
err = fs.Rename(tmpfile, filename)
|
||||
debug.Log("save %v: rename %v -> %v: %v",
|
||||
h, filepath.Base(tmpfile), filepath.Base(filename), err)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "Rename")
|
||||
}
|
||||
|
||||
// set mode to read-only
|
||||
fi, err := os.Stat(filename)
|
||||
fi, err := fs.Stat(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "Stat")
|
||||
}
|
||||
|
||||
return setNewFileMode(filename, fi)
|
||||
}
|
||||
|
||||
// Stat returns information about a blob.
|
||||
func (b *Local) Stat(h backend.Handle) (backend.BlobInfo, error) {
|
||||
func (b *Local) Stat(h restic.Handle) (restic.FileInfo, error) {
|
||||
debug.Log("Stat %v", h)
|
||||
if err := h.Valid(); err != nil {
|
||||
return backend.BlobInfo{}, err
|
||||
return restic.FileInfo{}, err
|
||||
}
|
||||
|
||||
fi, err := os.Stat(filename(b.p, h.Type, h.Name))
|
||||
fi, err := fs.Stat(filename(b.p, h.Type, h.Name))
|
||||
if err != nil {
|
||||
return backend.BlobInfo{}, err
|
||||
return restic.FileInfo{}, errors.Wrap(err, "Stat")
|
||||
}
|
||||
|
||||
return backend.BlobInfo{Size: fi.Size()}, nil
|
||||
return restic.FileInfo{Size: fi.Size()}, nil
|
||||
}
|
||||
|
||||
// Test returns true if a blob of the given type and name exists in the backend.
|
||||
func (b *Local) Test(t backend.Type, name string) (bool, error) {
|
||||
_, err := os.Stat(filename(b.p, t, name))
|
||||
func (b *Local) Test(t restic.FileType, name string) (bool, error) {
|
||||
debug.Log("Test %v %v", t, name)
|
||||
_, err := fs.Stat(filename(b.p, t, name))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
return false, errors.Wrap(err, "Stat")
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Remove removes the blob with the given name and type.
|
||||
func (b *Local) Remove(t backend.Type, name string) error {
|
||||
func (b *Local) Remove(t restic.FileType, name string) error {
|
||||
debug.Log("Remove %v %v", t, name)
|
||||
fn := filename(b.p, t, name)
|
||||
|
||||
// reset read-only flag
|
||||
err := os.Chmod(fn, 0666)
|
||||
err := fs.Chmod(fn, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "Chmod")
|
||||
}
|
||||
|
||||
return os.Remove(fn)
|
||||
return fs.Remove(fn)
|
||||
}
|
||||
|
||||
func isFile(fi os.FileInfo) bool {
|
||||
@@ -246,15 +263,15 @@ func isFile(fi os.FileInfo) bool {
|
||||
}
|
||||
|
||||
func readdir(d string) (fileInfos []os.FileInfo, err error) {
|
||||
f, e := os.Open(d)
|
||||
f, e := fs.Open(d)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
return nil, errors.Wrap(e, "Open")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
e := f.Close()
|
||||
if err == nil {
|
||||
err = e
|
||||
err = errors.Wrap(e, "Close")
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -303,9 +320,10 @@ func listDirs(dir string) (filenames []string, err error) {
|
||||
// List returns a channel that yields all names of blobs of type t. A
|
||||
// goroutine is started for this. If the channel done is closed, sending
|
||||
// stops.
|
||||
func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string {
|
||||
func (b *Local) List(t restic.FileType, done <-chan struct{}) <-chan string {
|
||||
debug.Log("List %v", t)
|
||||
lister := listDir
|
||||
if t == backend.Data {
|
||||
if t == restic.DataFile {
|
||||
lister = listDirs
|
||||
}
|
||||
|
||||
@@ -336,11 +354,13 @@ func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string {
|
||||
|
||||
// Delete removes the repository and all files.
|
||||
func (b *Local) Delete() error {
|
||||
return os.RemoveAll(b.p)
|
||||
debug.Log("Delete()")
|
||||
return fs.RemoveAll(b.p)
|
||||
}
|
||||
|
||||
// Close closes all open files.
|
||||
func (b *Local) Close() error {
|
||||
debug.Log("Close()")
|
||||
// this does not need to do anything, all open files are closed within the
|
||||
// same function.
|
||||
return nil
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"restic"
|
||||
|
||||
"restic/backend"
|
||||
"restic/backend/local"
|
||||
"restic/backend/test"
|
||||
)
|
||||
@@ -30,7 +30,7 @@ func createTempdir() error {
|
||||
}
|
||||
|
||||
func init() {
|
||||
test.CreateFn = func() (backend.Backend, error) {
|
||||
test.CreateFn = func() (restic.Backend, error) {
|
||||
err := createTempdir()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -38,7 +38,7 @@ func init() {
|
||||
return local.Create(tempBackendDir)
|
||||
}
|
||||
|
||||
test.OpenFn = func() (backend.Backend, error) {
|
||||
test.OpenFn = func() (restic.Backend, error) {
|
||||
err := createTempdir()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -4,9 +4,10 @@ package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"restic/fs"
|
||||
)
|
||||
|
||||
// set file to readonly
|
||||
func setNewFileMode(f string, fi os.FileInfo) error {
|
||||
return os.Chmod(f, fi.Mode()&os.FileMode(^uint32(0222)))
|
||||
return fs.Chmod(f, fi.Mode()&os.FileMode(^uint32(0222)))
|
||||
}
|
||||
|
||||
@@ -51,6 +51,13 @@ func TestMemBackendLoad(t *testing.T) {
|
||||
test.TestLoad(t)
|
||||
}
|
||||
|
||||
func TestMemBackendLoadNegativeOffset(t *testing.T) {
|
||||
if SkipMessage != "" {
|
||||
t.Skip(SkipMessage)
|
||||
}
|
||||
test.TestLoadNegativeOffset(t)
|
||||
}
|
||||
|
||||
func TestMemBackendSave(t *testing.T) {
|
||||
if SkipMessage != "" {
|
||||
t.Skip(SkipMessage)
|
||||
|
||||
@@ -1,28 +1,30 @@
|
||||
package mem
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"restic"
|
||||
"sync"
|
||||
|
||||
"restic/backend"
|
||||
"restic/errors"
|
||||
|
||||
"restic/debug"
|
||||
)
|
||||
|
||||
type entry struct {
|
||||
Type backend.Type
|
||||
Type restic.FileType
|
||||
Name string
|
||||
}
|
||||
|
||||
type memMap map[entry][]byte
|
||||
|
||||
// make sure that MemoryBackend implements backend.Backend
|
||||
var _ restic.Backend = &MemoryBackend{}
|
||||
|
||||
// MemoryBackend is a mock backend that uses a map for storing all data in
|
||||
// memory. This should only be used for tests.
|
||||
type MemoryBackend struct {
|
||||
data memMap
|
||||
m sync.Mutex
|
||||
|
||||
backend.MockBackend
|
||||
}
|
||||
|
||||
// New returns a new backend that saves all data in a map in memory.
|
||||
@@ -31,64 +33,17 @@ func New() *MemoryBackend {
|
||||
data: make(memMap),
|
||||
}
|
||||
|
||||
be.MockBackend.TestFn = func(t backend.Type, name string) (bool, error) {
|
||||
return memTest(be, t, name)
|
||||
}
|
||||
|
||||
be.MockBackend.LoadFn = func(h backend.Handle, p []byte, off int64) (int, error) {
|
||||
return memLoad(be, h, p, off)
|
||||
}
|
||||
|
||||
be.MockBackend.SaveFn = func(h backend.Handle, p []byte) error {
|
||||
return memSave(be, h, p)
|
||||
}
|
||||
|
||||
be.MockBackend.StatFn = func(h backend.Handle) (backend.BlobInfo, error) {
|
||||
return memStat(be, h)
|
||||
}
|
||||
|
||||
be.MockBackend.RemoveFn = func(t backend.Type, name string) error {
|
||||
return memRemove(be, t, name)
|
||||
}
|
||||
|
||||
be.MockBackend.ListFn = func(t backend.Type, done <-chan struct{}) <-chan string {
|
||||
return memList(be, t, done)
|
||||
}
|
||||
|
||||
be.MockBackend.DeleteFn = func() error {
|
||||
be.m.Lock()
|
||||
defer be.m.Unlock()
|
||||
|
||||
be.data = make(memMap)
|
||||
return nil
|
||||
}
|
||||
|
||||
be.MockBackend.LocationFn = func() string {
|
||||
return "Memory Backend"
|
||||
}
|
||||
|
||||
debug.Log("MemoryBackend.New", "created new memory backend")
|
||||
debug.Log("created new memory backend")
|
||||
|
||||
return be
|
||||
}
|
||||
|
||||
func (be *MemoryBackend) insert(t backend.Type, name string, data []byte) error {
|
||||
// Test returns whether a file exists.
|
||||
func (be *MemoryBackend) Test(t restic.FileType, name string) (bool, error) {
|
||||
be.m.Lock()
|
||||
defer be.m.Unlock()
|
||||
|
||||
if _, ok := be.data[entry{t, name}]; ok {
|
||||
return errors.New("already present")
|
||||
}
|
||||
|
||||
be.data[entry{t, name}] = data
|
||||
return nil
|
||||
}
|
||||
|
||||
func memTest(be *MemoryBackend, t backend.Type, name string) (bool, error) {
|
||||
be.m.Lock()
|
||||
defer be.m.Unlock()
|
||||
|
||||
debug.Log("MemoryBackend.Test", "test %v %v", t, name)
|
||||
debug.Log("test %v %v", t, name)
|
||||
|
||||
if _, ok := be.data[entry{t, name}]; ok {
|
||||
return true, nil
|
||||
@@ -97,7 +52,8 @@ func memTest(be *MemoryBackend, t backend.Type, name string) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func memLoad(be *MemoryBackend, h backend.Handle, p []byte, off int64) (int, error) {
|
||||
// Load reads data from the backend.
|
||||
func (be *MemoryBackend) Load(h restic.Handle, p []byte, off int64) (int, error) {
|
||||
if err := h.Valid(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -105,19 +61,24 @@ func memLoad(be *MemoryBackend, h backend.Handle, p []byte, off int64) (int, err
|
||||
be.m.Lock()
|
||||
defer be.m.Unlock()
|
||||
|
||||
if h.Type == backend.Config {
|
||||
if h.Type == restic.ConfigFile {
|
||||
h.Name = ""
|
||||
}
|
||||
|
||||
debug.Log("MemoryBackend.Load", "get %v offset %v len %v", h, off, len(p))
|
||||
debug.Log("get %v offset %v len %v", h, off, len(p))
|
||||
|
||||
if _, ok := be.data[entry{h.Type, h.Name}]; !ok {
|
||||
return 0, errors.New("no such data")
|
||||
}
|
||||
|
||||
buf := be.data[entry{h.Type, h.Name}]
|
||||
if off > int64(len(buf)) {
|
||||
switch {
|
||||
case off > int64(len(buf)):
|
||||
return 0, errors.New("offset beyond end of file")
|
||||
case off < -int64(len(buf)):
|
||||
off = 0
|
||||
case off < 0:
|
||||
off = int64(len(buf)) + off
|
||||
}
|
||||
|
||||
buf = buf[off:]
|
||||
@@ -131,7 +92,8 @@ func memLoad(be *MemoryBackend, h backend.Handle, p []byte, off int64) (int, err
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func memSave(be *MemoryBackend, h backend.Handle, p []byte) error {
|
||||
// Save adds new Data to the backend.
|
||||
func (be *MemoryBackend) Save(h restic.Handle, p []byte) error {
|
||||
if err := h.Valid(); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -139,7 +101,7 @@ func memSave(be *MemoryBackend, h backend.Handle, p []byte) error {
|
||||
be.m.Lock()
|
||||
defer be.m.Unlock()
|
||||
|
||||
if h.Type == backend.Config {
|
||||
if h.Type == restic.ConfigFile {
|
||||
h.Name = ""
|
||||
}
|
||||
|
||||
@@ -147,7 +109,7 @@ func memSave(be *MemoryBackend, h backend.Handle, p []byte) error {
|
||||
return errors.New("file already exists")
|
||||
}
|
||||
|
||||
debug.Log("MemoryBackend.Save", "save %v bytes at %v", len(p), h)
|
||||
debug.Log("save %v bytes at %v", len(p), h)
|
||||
buf := make([]byte, len(p))
|
||||
copy(buf, p)
|
||||
be.data[entry{h.Type, h.Name}] = buf
|
||||
@@ -155,33 +117,35 @@ func memSave(be *MemoryBackend, h backend.Handle, p []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func memStat(be *MemoryBackend, h backend.Handle) (backend.BlobInfo, error) {
|
||||
// Stat returns information about a file in the backend.
|
||||
func (be *MemoryBackend) Stat(h restic.Handle) (restic.FileInfo, error) {
|
||||
be.m.Lock()
|
||||
defer be.m.Unlock()
|
||||
|
||||
if err := h.Valid(); err != nil {
|
||||
return backend.BlobInfo{}, err
|
||||
return restic.FileInfo{}, err
|
||||
}
|
||||
|
||||
if h.Type == backend.Config {
|
||||
if h.Type == restic.ConfigFile {
|
||||
h.Name = ""
|
||||
}
|
||||
|
||||
debug.Log("MemoryBackend.Stat", "stat %v", h)
|
||||
debug.Log("stat %v", h)
|
||||
|
||||
e, ok := be.data[entry{h.Type, h.Name}]
|
||||
if !ok {
|
||||
return backend.BlobInfo{}, errors.New("no such data")
|
||||
return restic.FileInfo{}, errors.New("no such data")
|
||||
}
|
||||
|
||||
return backend.BlobInfo{Size: int64(len(e))}, nil
|
||||
return restic.FileInfo{Size: int64(len(e))}, nil
|
||||
}
|
||||
|
||||
func memRemove(be *MemoryBackend, t backend.Type, name string) error {
|
||||
// Remove deletes a file from the backend.
|
||||
func (be *MemoryBackend) Remove(t restic.FileType, name string) error {
|
||||
be.m.Lock()
|
||||
defer be.m.Unlock()
|
||||
|
||||
debug.Log("MemoryBackend.Remove", "get %v %v", t, name)
|
||||
debug.Log("get %v %v", t, name)
|
||||
|
||||
if _, ok := be.data[entry{t, name}]; !ok {
|
||||
return errors.New("no such data")
|
||||
@@ -192,7 +156,8 @@ func memRemove(be *MemoryBackend, t backend.Type, name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func memList(be *MemoryBackend, t backend.Type, done <-chan struct{}) <-chan string {
|
||||
// List returns a channel which yields entries from the backend.
|
||||
func (be *MemoryBackend) List(t restic.FileType, done <-chan struct{}) <-chan string {
|
||||
be.m.Lock()
|
||||
defer be.m.Unlock()
|
||||
|
||||
@@ -206,7 +171,7 @@ func memList(be *MemoryBackend, t backend.Type, done <-chan struct{}) <-chan str
|
||||
ids = append(ids, entry.Name)
|
||||
}
|
||||
|
||||
debug.Log("MemoryBackend.List", "list %v: %v", t, ids)
|
||||
debug.Log("list %v: %v", t, ids)
|
||||
|
||||
go func() {
|
||||
defer close(ch)
|
||||
@@ -221,3 +186,22 @@ func memList(be *MemoryBackend, t backend.Type, done <-chan struct{}) <-chan str
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
// Location returns the location of the backend (RAM).
|
||||
func (be *MemoryBackend) Location() string {
|
||||
return "RAM"
|
||||
}
|
||||
|
||||
// Delete removes all data in the backend.
|
||||
func (be *MemoryBackend) Delete() error {
|
||||
be.m.Lock()
|
||||
defer be.m.Unlock()
|
||||
|
||||
be.data = make(memMap)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the backend.
|
||||
func (be *MemoryBackend) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,19 +1,20 @@
|
||||
package mem_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"restic"
|
||||
|
||||
"restic/errors"
|
||||
|
||||
"restic/backend"
|
||||
"restic/backend/mem"
|
||||
"restic/backend/test"
|
||||
)
|
||||
|
||||
var be backend.Backend
|
||||
var be restic.Backend
|
||||
|
||||
//go:generate go run ../test/generate_backend_tests.go
|
||||
|
||||
func init() {
|
||||
test.CreateFn = func() (backend.Backend, error) {
|
||||
test.CreateFn = func() (restic.Backend, error) {
|
||||
if be != nil {
|
||||
return nil, errors.New("temporary memory backend dir already exists")
|
||||
}
|
||||
@@ -23,7 +24,7 @@ func init() {
|
||||
return be, nil
|
||||
}
|
||||
|
||||
test.OpenFn = func() (backend.Backend, error) {
|
||||
test.OpenFn = func() (restic.Backend, error) {
|
||||
if be == nil {
|
||||
return nil, errors.New("repository not initialized")
|
||||
}
|
||||
|
||||
@@ -1,103 +0,0 @@
|
||||
package backend
|
||||
|
||||
import "errors"
|
||||
|
||||
// MockBackend implements a backend whose functions can be specified. This
|
||||
// should only be used for tests.
|
||||
type MockBackend struct {
|
||||
CloseFn func() error
|
||||
LoadFn func(h Handle, p []byte, off int64) (int, error)
|
||||
SaveFn func(h Handle, p []byte) error
|
||||
StatFn func(h Handle) (BlobInfo, error)
|
||||
ListFn func(Type, <-chan struct{}) <-chan string
|
||||
RemoveFn func(Type, string) error
|
||||
TestFn func(Type, string) (bool, error)
|
||||
DeleteFn func() error
|
||||
LocationFn func() string
|
||||
}
|
||||
|
||||
// Close the backend.
|
||||
func (m *MockBackend) Close() error {
|
||||
if m.CloseFn == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return m.CloseFn()
|
||||
}
|
||||
|
||||
// Location returns a location string.
|
||||
func (m *MockBackend) Location() string {
|
||||
if m.LocationFn == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return m.LocationFn()
|
||||
}
|
||||
|
||||
// Load loads data from the backend.
|
||||
func (m *MockBackend) Load(h Handle, p []byte, off int64) (int, error) {
|
||||
if m.LoadFn == nil {
|
||||
return 0, errors.New("not implemented")
|
||||
}
|
||||
|
||||
return m.LoadFn(h, p, off)
|
||||
}
|
||||
|
||||
// Save data in the backend.
|
||||
func (m *MockBackend) Save(h Handle, p []byte) error {
|
||||
if m.SaveFn == nil {
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
|
||||
return m.SaveFn(h, p)
|
||||
}
|
||||
|
||||
// Stat an object in the backend.
|
||||
func (m *MockBackend) Stat(h Handle) (BlobInfo, error) {
|
||||
if m.StatFn == nil {
|
||||
return BlobInfo{}, errors.New("not implemented")
|
||||
}
|
||||
|
||||
return m.StatFn(h)
|
||||
}
|
||||
|
||||
// List items of type t.
|
||||
func (m *MockBackend) List(t Type, done <-chan struct{}) <-chan string {
|
||||
if m.ListFn == nil {
|
||||
ch := make(chan string)
|
||||
close(ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
return m.ListFn(t, done)
|
||||
}
|
||||
|
||||
// Remove data from the backend.
|
||||
func (m *MockBackend) Remove(t Type, name string) error {
|
||||
if m.RemoveFn == nil {
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
|
||||
return m.RemoveFn(t, name)
|
||||
}
|
||||
|
||||
// Test for the existence of a specific item.
|
||||
func (m *MockBackend) Test(t Type, name string) (bool, error) {
|
||||
if m.TestFn == nil {
|
||||
return false, errors.New("not implemented")
|
||||
}
|
||||
|
||||
return m.TestFn(t, name)
|
||||
}
|
||||
|
||||
// Delete all data.
|
||||
func (m *MockBackend) Delete() error {
|
||||
if m.DeleteFn == nil {
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
|
||||
return m.DeleteFn()
|
||||
}
|
||||
|
||||
// Make sure that MockBackend implements the backend interface.
|
||||
var _ Backend = &MockBackend{}
|
||||
@@ -1,63 +0,0 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
type readSeeker struct {
|
||||
be Backend
|
||||
h Handle
|
||||
t Type
|
||||
name string
|
||||
offset int64
|
||||
size int64
|
||||
}
|
||||
|
||||
// NewReadSeeker returns an io.ReadSeeker for the given object in the backend.
|
||||
func NewReadSeeker(be Backend, h Handle) io.ReadSeeker {
|
||||
return &readSeeker{be: be, h: h}
|
||||
}
|
||||
|
||||
func (rd *readSeeker) Read(p []byte) (int, error) {
|
||||
n, err := rd.be.Load(rd.h, p, rd.offset)
|
||||
rd.offset += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (rd *readSeeker) Seek(offset int64, whence int) (n int64, err error) {
|
||||
switch whence {
|
||||
case 0:
|
||||
rd.offset = offset
|
||||
case 1:
|
||||
rd.offset += offset
|
||||
case 2:
|
||||
if rd.size == 0 {
|
||||
rd.size, err = rd.getSize()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
pos := rd.size + offset
|
||||
if pos < 0 {
|
||||
return 0, errors.New("invalid offset, before start of blob")
|
||||
}
|
||||
|
||||
rd.offset = pos
|
||||
return rd.offset, nil
|
||||
default:
|
||||
return 0, errors.New("invalid value for parameter whence")
|
||||
}
|
||||
|
||||
return rd.offset, nil
|
||||
}
|
||||
|
||||
func (rd *readSeeker) getSize() (int64, error) {
|
||||
stat, err := rd.be.Stat(rd.h)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return stat.Size, nil
|
||||
}
|
||||
@@ -1,114 +0,0 @@
|
||||
package backend_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"math/rand"
|
||||
"restic/backend"
|
||||
"restic/backend/mem"
|
||||
"testing"
|
||||
|
||||
. "restic/test"
|
||||
)
|
||||
|
||||
func abs(a int) int {
|
||||
if a < 0 {
|
||||
return -a
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
func loadAndCompare(t testing.TB, rd io.ReadSeeker, size int, offset int64, expected []byte) {
|
||||
var (
|
||||
pos int64
|
||||
err error
|
||||
)
|
||||
|
||||
if offset >= 0 {
|
||||
pos, err = rd.Seek(offset, 0)
|
||||
} else {
|
||||
pos, err = rd.Seek(offset, 2)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("Seek(%d, 0) returned error: %v", offset, err)
|
||||
return
|
||||
}
|
||||
|
||||
if offset >= 0 && pos != offset {
|
||||
t.Errorf("pos after seek is wrong, want %d, got %d", offset, pos)
|
||||
} else if offset < 0 && pos != int64(size)+offset {
|
||||
t.Errorf("pos after relative seek is wrong, want %d, got %d", int64(size)+offset, pos)
|
||||
}
|
||||
|
||||
buf := make([]byte, len(expected))
|
||||
n, err := rd.Read(buf)
|
||||
|
||||
// if we requested data beyond the end of the file, ignore
|
||||
// ErrUnexpectedEOF error
|
||||
if offset > 0 && len(buf) > size && err == io.ErrUnexpectedEOF {
|
||||
err = nil
|
||||
buf = buf[:size]
|
||||
}
|
||||
|
||||
if offset < 0 && len(buf) > abs(int(offset)) && err == io.ErrUnexpectedEOF {
|
||||
err = nil
|
||||
buf = buf[:abs(int(offset))]
|
||||
}
|
||||
|
||||
if n != len(buf) {
|
||||
t.Errorf("Load(%d, %d): wrong length returned, want %d, got %d",
|
||||
len(buf), offset, len(buf), n)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Load(%d, %d): unexpected error: %v", len(buf), offset, err)
|
||||
return
|
||||
}
|
||||
|
||||
buf = buf[:n]
|
||||
if !bytes.Equal(buf, expected) {
|
||||
t.Errorf("Load(%d, %d) returned wrong bytes", len(buf), offset)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadSeeker(t *testing.T) {
|
||||
b := mem.New()
|
||||
|
||||
length := rand.Intn(1<<24) + 2000
|
||||
|
||||
data := Random(23, length)
|
||||
id := backend.Hash(data)
|
||||
|
||||
handle := backend.Handle{Type: backend.Data, Name: id.String()}
|
||||
err := b.Save(handle, data)
|
||||
if err != nil {
|
||||
t.Fatalf("Save() error: %v", err)
|
||||
}
|
||||
|
||||
for i := 0; i < 50; i++ {
|
||||
l := rand.Intn(length + 2000)
|
||||
o := rand.Intn(length + 2000)
|
||||
|
||||
if rand.Float32() > 0.5 {
|
||||
o = -o
|
||||
}
|
||||
|
||||
d := data
|
||||
if o > 0 && o < len(d) {
|
||||
d = d[o:]
|
||||
} else {
|
||||
o = len(d)
|
||||
d = d[:0]
|
||||
}
|
||||
|
||||
if l > 0 && l < len(d) {
|
||||
d = d[:l]
|
||||
}
|
||||
|
||||
rd := backend.NewReadSeeker(b, handle)
|
||||
loadAndCompare(t, rd, len(data), int64(o), d)
|
||||
}
|
||||
}
|
||||
@@ -51,6 +51,13 @@ func TestRestBackendLoad(t *testing.T) {
|
||||
test.TestLoad(t)
|
||||
}
|
||||
|
||||
func TestRestBackendLoadNegativeOffset(t *testing.T) {
|
||||
if SkipMessage != "" {
|
||||
t.Skip(SkipMessage)
|
||||
}
|
||||
test.TestLoadNegativeOffset(t)
|
||||
}
|
||||
|
||||
func TestRestBackendSave(t *testing.T) {
|
||||
if SkipMessage != "" {
|
||||
t.Skip(SkipMessage)
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"restic/errors"
|
||||
)
|
||||
|
||||
// Config contains all configuration necessary to connect to a REST server.
|
||||
@@ -21,7 +22,7 @@ func ParseConfig(s string) (interface{}, error) {
|
||||
u, err := url.Parse(s)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "url.Parse")
|
||||
}
|
||||
|
||||
cfg := Config{URL: u}
|
||||
|
||||
@@ -3,38 +3,40 @@ package rest
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"restic"
|
||||
"strings"
|
||||
|
||||
"restic/errors"
|
||||
|
||||
"restic/backend"
|
||||
)
|
||||
|
||||
const connLimit = 10
|
||||
|
||||
// restPath returns the path to the given resource.
|
||||
func restPath(url *url.URL, h backend.Handle) string {
|
||||
func restPath(url *url.URL, h restic.Handle) string {
|
||||
u := *url
|
||||
|
||||
var dir string
|
||||
|
||||
switch h.Type {
|
||||
case backend.Config:
|
||||
case restic.ConfigFile:
|
||||
dir = ""
|
||||
h.Name = "config"
|
||||
case backend.Data:
|
||||
case restic.DataFile:
|
||||
dir = backend.Paths.Data
|
||||
case backend.Snapshot:
|
||||
case restic.SnapshotFile:
|
||||
dir = backend.Paths.Snapshots
|
||||
case backend.Index:
|
||||
case restic.IndexFile:
|
||||
dir = backend.Paths.Index
|
||||
case backend.Lock:
|
||||
case restic.LockFile:
|
||||
dir = backend.Paths.Locks
|
||||
case backend.Key:
|
||||
case restic.KeyFile:
|
||||
dir = backend.Paths.Keys
|
||||
default:
|
||||
dir = string(h.Type)
|
||||
@@ -52,7 +54,7 @@ type restBackend struct {
|
||||
}
|
||||
|
||||
// Open opens the REST backend with the given config.
|
||||
func Open(cfg Config) (backend.Backend, error) {
|
||||
func Open(cfg Config) (restic.Backend, error) {
|
||||
connChan := make(chan struct{}, connLimit)
|
||||
for i := 0; i < connLimit; i++ {
|
||||
connChan <- struct{}{}
|
||||
@@ -70,14 +72,28 @@ func (b *restBackend) Location() string {
|
||||
|
||||
// Load returns the data stored in the backend for h at the given offset
|
||||
// and saves it in p. Load has the same semantics as io.ReaderAt.
|
||||
func (b *restBackend) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
|
||||
func (b *restBackend) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
|
||||
if err := h.Valid(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// invert offset
|
||||
if off < 0 {
|
||||
info, err := b.Stat(h)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "Stat")
|
||||
}
|
||||
|
||||
if -off > info.Size {
|
||||
off = 0
|
||||
} else {
|
||||
off = info.Size + off
|
||||
}
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", restPath(b.url, h), nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return 0, errors.Wrap(err, "http.NewRequest")
|
||||
}
|
||||
req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", off, off+int64(len(p))))
|
||||
<-b.connChan
|
||||
@@ -89,23 +105,23 @@ func (b *restBackend) Load(h backend.Handle, p []byte, off int64) (n int, err er
|
||||
e := resp.Body.Close()
|
||||
|
||||
if err == nil {
|
||||
err = e
|
||||
err = errors.Wrap(e, "Close")
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return 0, errors.Wrap(err, "client.Do")
|
||||
}
|
||||
if resp.StatusCode != 200 && resp.StatusCode != 206 {
|
||||
return 0, fmt.Errorf("unexpected HTTP response code %v", resp.StatusCode)
|
||||
return 0, errors.Errorf("unexpected HTTP response code %v", resp.StatusCode)
|
||||
}
|
||||
|
||||
return io.ReadFull(resp.Body, p)
|
||||
}
|
||||
|
||||
// Save stores data in the backend at the handle.
|
||||
func (b *restBackend) Save(h backend.Handle, p []byte) (err error) {
|
||||
func (b *restBackend) Save(h restic.Handle, p []byte) (err error) {
|
||||
if err := h.Valid(); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -119,48 +135,48 @@ func (b *restBackend) Save(h backend.Handle, p []byte) (err error) {
|
||||
e := resp.Body.Close()
|
||||
|
||||
if err == nil {
|
||||
err = e
|
||||
err = errors.Wrap(e, "Close")
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "client.Post")
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return fmt.Errorf("unexpected HTTP response code %v", resp.StatusCode)
|
||||
return errors.Errorf("unexpected HTTP response code %v", resp.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stat returns information about a blob.
|
||||
func (b *restBackend) Stat(h backend.Handle) (backend.BlobInfo, error) {
|
||||
func (b *restBackend) Stat(h restic.Handle) (restic.FileInfo, error) {
|
||||
if err := h.Valid(); err != nil {
|
||||
return backend.BlobInfo{}, err
|
||||
return restic.FileInfo{}, err
|
||||
}
|
||||
|
||||
<-b.connChan
|
||||
resp, err := b.client.Head(restPath(b.url, h))
|
||||
b.connChan <- struct{}{}
|
||||
if err != nil {
|
||||
return backend.BlobInfo{}, err
|
||||
return restic.FileInfo{}, errors.Wrap(err, "client.Head")
|
||||
}
|
||||
|
||||
if err = resp.Body.Close(); err != nil {
|
||||
return backend.BlobInfo{}, err
|
||||
return restic.FileInfo{}, errors.Wrap(err, "Close")
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return backend.BlobInfo{}, fmt.Errorf("unexpected HTTP response code %v", resp.StatusCode)
|
||||
return restic.FileInfo{}, errors.Errorf("unexpected HTTP response code %v", resp.StatusCode)
|
||||
}
|
||||
|
||||
if resp.ContentLength < 0 {
|
||||
return backend.BlobInfo{}, errors.New("negative content length")
|
||||
return restic.FileInfo{}, errors.New("negative content length")
|
||||
}
|
||||
|
||||
bi := backend.BlobInfo{
|
||||
bi := restic.FileInfo{
|
||||
Size: resp.ContentLength,
|
||||
}
|
||||
|
||||
@@ -168,8 +184,8 @@ func (b *restBackend) Stat(h backend.Handle) (backend.BlobInfo, error) {
|
||||
}
|
||||
|
||||
// Test returns true if a blob of the given type and name exists in the backend.
|
||||
func (b *restBackend) Test(t backend.Type, name string) (bool, error) {
|
||||
_, err := b.Stat(backend.Handle{Type: t, Name: name})
|
||||
func (b *restBackend) Test(t restic.FileType, name string) (bool, error) {
|
||||
_, err := b.Stat(restic.Handle{Type: t, Name: name})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
@@ -178,22 +194,22 @@ func (b *restBackend) Test(t backend.Type, name string) (bool, error) {
|
||||
}
|
||||
|
||||
// Remove removes the blob with the given name and type.
|
||||
func (b *restBackend) Remove(t backend.Type, name string) error {
|
||||
h := backend.Handle{Type: t, Name: name}
|
||||
func (b *restBackend) Remove(t restic.FileType, name string) error {
|
||||
h := restic.Handle{Type: t, Name: name}
|
||||
if err := h.Valid(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("DELETE", restPath(b.url, h), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "http.NewRequest")
|
||||
}
|
||||
<-b.connChan
|
||||
resp, err := b.client.Do(req)
|
||||
b.connChan <- struct{}{}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "client.Do")
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
@@ -206,10 +222,10 @@ func (b *restBackend) Remove(t backend.Type, name string) error {
|
||||
// List returns a channel that yields all names of blobs of type t. A
|
||||
// goroutine is started for this. If the channel done is closed, sending
|
||||
// stops.
|
||||
func (b *restBackend) List(t backend.Type, done <-chan struct{}) <-chan string {
|
||||
func (b *restBackend) List(t restic.FileType, done <-chan struct{}) <-chan string {
|
||||
ch := make(chan string)
|
||||
|
||||
url := restPath(b.url, backend.Handle{Type: t})
|
||||
url := restPath(b.url, restic.Handle{Type: t})
|
||||
if !strings.HasSuffix(url, "/") {
|
||||
url += "/"
|
||||
}
|
||||
|
||||
@@ -2,35 +2,35 @@ package rest
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"restic/backend"
|
||||
"restic"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var restPathTests = []struct {
|
||||
Handle backend.Handle
|
||||
Handle restic.Handle
|
||||
URL *url.URL
|
||||
Result string
|
||||
}{
|
||||
{
|
||||
URL: parseURL("https://hostname.foo"),
|
||||
Handle: backend.Handle{
|
||||
Type: backend.Data,
|
||||
Handle: restic.Handle{
|
||||
Type: restic.DataFile,
|
||||
Name: "foobar",
|
||||
},
|
||||
Result: "https://hostname.foo/data/foobar",
|
||||
},
|
||||
{
|
||||
URL: parseURL("https://hostname.foo:1234/prefix/repo"),
|
||||
Handle: backend.Handle{
|
||||
Type: backend.Lock,
|
||||
Handle: restic.Handle{
|
||||
Type: restic.LockFile,
|
||||
Name: "foobar",
|
||||
},
|
||||
Result: "https://hostname.foo:1234/prefix/repo/locks/foobar",
|
||||
},
|
||||
{
|
||||
URL: parseURL("https://hostname.foo:1234/prefix/repo"),
|
||||
Handle: backend.Handle{
|
||||
Type: backend.Config,
|
||||
Handle: restic.Handle{
|
||||
Type: restic.ConfigFile,
|
||||
Name: "foobar",
|
||||
},
|
||||
Result: "https://hostname.foo:1234/prefix/repo/config",
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
package rest_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"restic"
|
||||
|
||||
"restic/errors"
|
||||
|
||||
"restic/backend"
|
||||
"restic/backend/rest"
|
||||
"restic/backend/test"
|
||||
. "restic/test"
|
||||
@@ -30,13 +31,13 @@ func init() {
|
||||
URL: url,
|
||||
}
|
||||
|
||||
test.CreateFn = func() (backend.Backend, error) {
|
||||
test.CreateFn = func() (restic.Backend, error) {
|
||||
be, err := rest.Open(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
exists, err := be.Test(backend.Config, "")
|
||||
exists, err := be.Test(restic.ConfigFile, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -48,7 +49,7 @@ func init() {
|
||||
return be, nil
|
||||
}
|
||||
|
||||
test.OpenFn = func() (backend.Backend, error) {
|
||||
test.OpenFn = func() (restic.Backend, error) {
|
||||
return rest.Open(cfg)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,6 +51,13 @@ func TestS3BackendLoad(t *testing.T) {
|
||||
test.TestLoad(t)
|
||||
}
|
||||
|
||||
func TestS3BackendLoadNegativeOffset(t *testing.T) {
|
||||
if SkipMessage != "" {
|
||||
t.Skip(SkipMessage)
|
||||
}
|
||||
test.TestLoadNegativeOffset(t)
|
||||
}
|
||||
|
||||
func TestS3BackendSave(t *testing.T) {
|
||||
if SkipMessage != "" {
|
||||
t.Skip(SkipMessage)
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"restic/errors"
|
||||
)
|
||||
|
||||
// Config contains all configuration necessary to connect to an s3 compatible
|
||||
@@ -31,7 +32,7 @@ func ParseConfig(s string) (interface{}, error) {
|
||||
// bucket name and prefix
|
||||
url, err := url.Parse(s[3:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "url.Parse")
|
||||
}
|
||||
|
||||
if url.Path == "" {
|
||||
|
||||
@@ -2,13 +2,14 @@ package s3
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"restic"
|
||||
"strings"
|
||||
|
||||
"restic/errors"
|
||||
|
||||
"github.com/minio/minio-go"
|
||||
|
||||
"restic/backend"
|
||||
"restic/debug"
|
||||
)
|
||||
|
||||
@@ -24,32 +25,35 @@ type s3 struct {
|
||||
|
||||
// Open opens the S3 backend at bucket and region. The bucket is created if it
|
||||
// does not exist yet.
|
||||
func Open(cfg Config) (backend.Backend, error) {
|
||||
debug.Log("s3.Open", "open, config %#v", cfg)
|
||||
func Open(cfg Config) (restic.Backend, error) {
|
||||
debug.Log("open, config %#v", cfg)
|
||||
|
||||
client, err := minio.New(cfg.Endpoint, cfg.KeyID, cfg.Secret, !cfg.UseHTTP)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "minio.New")
|
||||
}
|
||||
|
||||
be := &s3{client: client, bucketname: cfg.Bucket, prefix: cfg.Prefix}
|
||||
be.createConnections()
|
||||
|
||||
if err := client.BucketExists(cfg.Bucket); err != nil {
|
||||
debug.Log("s3.Open", "BucketExists(%v) returned err %v, trying to create the bucket", cfg.Bucket, err)
|
||||
ok, err := client.BucketExists(cfg.Bucket)
|
||||
if err != nil {
|
||||
debug.Log("BucketExists(%v) returned err %v, trying to create the bucket", cfg.Bucket, err)
|
||||
return nil, errors.Wrap(err, "client.BucketExists")
|
||||
}
|
||||
|
||||
if !ok {
|
||||
// create new bucket with default ACL in default region
|
||||
err = client.MakeBucket(cfg.Bucket, "")
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "client.MakeBucket")
|
||||
}
|
||||
}
|
||||
|
||||
return be, nil
|
||||
}
|
||||
|
||||
func (be *s3) s3path(t backend.Type, name string) string {
|
||||
func (be *s3) s3path(t restic.FileType, name string) string {
|
||||
var path string
|
||||
|
||||
if be.prefix != "" {
|
||||
@@ -57,7 +61,7 @@ func (be *s3) s3path(t backend.Type, name string) string {
|
||||
}
|
||||
path += string(t)
|
||||
|
||||
if t == backend.Config {
|
||||
if t == restic.ConfigFile {
|
||||
return path
|
||||
}
|
||||
return path + "/" + name
|
||||
@@ -77,53 +81,91 @@ func (be *s3) Location() string {
|
||||
|
||||
// Load returns the data stored in the backend for h at the given offset
|
||||
// and saves it in p. Load has the same semantics as io.ReaderAt.
|
||||
func (be s3) Load(h backend.Handle, p []byte, off int64) (int, error) {
|
||||
debug.Log("s3.Load", "%v, offset %v, len %v", h, off, len(p))
|
||||
path := be.s3path(h.Type, h.Name)
|
||||
obj, err := be.client.GetObject(be.bucketname, path)
|
||||
if err != nil {
|
||||
debug.Log("s3.GetReader", " err %v", err)
|
||||
return 0, err
|
||||
}
|
||||
func (be s3) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
|
||||
var obj *minio.Object
|
||||
|
||||
if off > 0 {
|
||||
_, err = obj.Seek(off, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
debug.Log("%v, offset %v, len %v", h, off, len(p))
|
||||
path := be.s3path(h.Type, h.Name)
|
||||
|
||||
<-be.connChan
|
||||
defer func() {
|
||||
be.connChan <- struct{}{}
|
||||
}()
|
||||
|
||||
// This may not read the whole object, so ensure object
|
||||
// is closed to avoid duplicate connections.
|
||||
n, err := io.ReadFull(obj, p)
|
||||
obj, err = be.client.GetObject(be.bucketname, path)
|
||||
if err != nil {
|
||||
obj.Close()
|
||||
} else {
|
||||
err = obj.Close()
|
||||
debug.Log(" err %v", err)
|
||||
return 0, errors.Wrap(err, "client.GetObject")
|
||||
}
|
||||
return n, err
|
||||
|
||||
// make sure that the object is closed properly.
|
||||
defer func() {
|
||||
e := obj.Close()
|
||||
if err == nil {
|
||||
err = errors.Wrap(e, "Close")
|
||||
}
|
||||
}()
|
||||
|
||||
info, err := obj.Stat()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "obj.Stat")
|
||||
}
|
||||
|
||||
// handle negative offsets
|
||||
if off < 0 {
|
||||
// if the negative offset is larger than the object itself, read from
|
||||
// the beginning.
|
||||
if -off > info.Size {
|
||||
off = 0
|
||||
} else {
|
||||
// otherwise compute the offset from the end of the file.
|
||||
off = info.Size + off
|
||||
}
|
||||
}
|
||||
|
||||
// return an error if the offset is beyond the end of the file
|
||||
if off > info.Size {
|
||||
return 0, errors.Wrap(io.EOF, "")
|
||||
}
|
||||
|
||||
var nextError error
|
||||
|
||||
// manually create an io.ErrUnexpectedEOF
|
||||
if off+int64(len(p)) > info.Size {
|
||||
newlen := info.Size - off
|
||||
p = p[:newlen]
|
||||
|
||||
nextError = io.ErrUnexpectedEOF
|
||||
|
||||
debug.Log(" capped buffer to %v byte", len(p))
|
||||
}
|
||||
|
||||
n, err = obj.ReadAt(p, off)
|
||||
if int64(n) == info.Size-off && errors.Cause(err) == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
err = nextError
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Save stores data in the backend at the handle.
|
||||
func (be s3) Save(h backend.Handle, p []byte) (err error) {
|
||||
func (be s3) Save(h restic.Handle, p []byte) (err error) {
|
||||
if err := h.Valid(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
debug.Log("s3.Save", "%v bytes at %d", len(p), h)
|
||||
debug.Log("%v with %d bytes", h, len(p))
|
||||
|
||||
path := be.s3path(h.Type, h.Name)
|
||||
|
||||
// Check key does not already exist
|
||||
_, err = be.client.StatObject(be.bucketname, path)
|
||||
if err == nil {
|
||||
debug.Log("s3.blob.Finalize()", "%v already exists", h)
|
||||
debug.Log("%v already exists", h)
|
||||
return errors.New("key already exists")
|
||||
}
|
||||
|
||||
@@ -132,35 +174,46 @@ func (be s3) Save(h backend.Handle, p []byte) (err error) {
|
||||
be.connChan <- struct{}{}
|
||||
}()
|
||||
|
||||
debug.Log("s3.Save", "PutObject(%v, %v, %v, %v)",
|
||||
debug.Log("PutObject(%v, %v, %v, %v)",
|
||||
be.bucketname, path, int64(len(p)), "binary/octet-stream")
|
||||
n, err := be.client.PutObject(be.bucketname, path, bytes.NewReader(p), "binary/octet-stream")
|
||||
debug.Log("s3.Save", "%v -> %v bytes, err %#v", path, n, err)
|
||||
debug.Log("%v -> %v bytes, err %#v", path, n, err)
|
||||
|
||||
return err
|
||||
return errors.Wrap(err, "client.PutObject")
|
||||
}
|
||||
|
||||
// Stat returns information about a blob.
|
||||
func (be s3) Stat(h backend.Handle) (backend.BlobInfo, error) {
|
||||
debug.Log("s3.Stat", "%v", h)
|
||||
func (be s3) Stat(h restic.Handle) (bi restic.FileInfo, err error) {
|
||||
debug.Log("%v", h)
|
||||
|
||||
path := be.s3path(h.Type, h.Name)
|
||||
obj, err := be.client.GetObject(be.bucketname, path)
|
||||
var obj *minio.Object
|
||||
|
||||
obj, err = be.client.GetObject(be.bucketname, path)
|
||||
if err != nil {
|
||||
debug.Log("s3.Stat", "GetObject() err %v", err)
|
||||
return backend.BlobInfo{}, err
|
||||
debug.Log("GetObject() err %v", err)
|
||||
return restic.FileInfo{}, errors.Wrap(err, "client.GetObject")
|
||||
}
|
||||
|
||||
// make sure that the object is closed properly.
|
||||
defer func() {
|
||||
e := obj.Close()
|
||||
if err == nil {
|
||||
err = errors.Wrap(e, "Close")
|
||||
}
|
||||
}()
|
||||
|
||||
fi, err := obj.Stat()
|
||||
if err != nil {
|
||||
debug.Log("s3.Stat", "Stat() err %v", err)
|
||||
return backend.BlobInfo{}, err
|
||||
debug.Log("Stat() err %v", err)
|
||||
return restic.FileInfo{}, errors.Wrap(err, "Stat")
|
||||
}
|
||||
|
||||
return backend.BlobInfo{Size: fi.Size}, nil
|
||||
return restic.FileInfo{Size: fi.Size}, nil
|
||||
}
|
||||
|
||||
// Test returns true if a blob of the given type and name exists in the backend.
|
||||
func (be *s3) Test(t backend.Type, name string) (bool, error) {
|
||||
func (be *s3) Test(t restic.FileType, name string) (bool, error) {
|
||||
found := false
|
||||
path := be.s3path(t, name)
|
||||
_, err := be.client.StatObject(be.bucketname, path)
|
||||
@@ -173,18 +226,18 @@ func (be *s3) Test(t backend.Type, name string) (bool, error) {
|
||||
}
|
||||
|
||||
// Remove removes the blob with the given name and type.
|
||||
func (be *s3) Remove(t backend.Type, name string) error {
|
||||
func (be *s3) Remove(t restic.FileType, name string) error {
|
||||
path := be.s3path(t, name)
|
||||
err := be.client.RemoveObject(be.bucketname, path)
|
||||
debug.Log("s3.Remove", "%v %v -> err %v", t, name, err)
|
||||
return err
|
||||
debug.Log("%v %v -> err %v", t, name, err)
|
||||
return errors.Wrap(err, "client.RemoveObject")
|
||||
}
|
||||
|
||||
// List returns a channel that yields all names of blobs of type t. A
|
||||
// goroutine is started for this. If the channel done is closed, sending
|
||||
// stops.
|
||||
func (be *s3) List(t backend.Type, done <-chan struct{}) <-chan string {
|
||||
debug.Log("s3.List", "listing %v", t)
|
||||
func (be *s3) List(t restic.FileType, done <-chan struct{}) <-chan string {
|
||||
debug.Log("listing %v", t)
|
||||
ch := make(chan string)
|
||||
|
||||
prefix := be.s3path(t, "")
|
||||
@@ -211,11 +264,11 @@ func (be *s3) List(t backend.Type, done <-chan struct{}) <-chan string {
|
||||
}
|
||||
|
||||
// Remove keys for a specified backend type.
|
||||
func (be *s3) removeKeys(t backend.Type) error {
|
||||
func (be *s3) removeKeys(t restic.FileType) error {
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
for key := range be.List(backend.Data, done) {
|
||||
err := be.Remove(backend.Data, key)
|
||||
for key := range be.List(restic.DataFile, done) {
|
||||
err := be.Remove(restic.DataFile, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -226,12 +279,12 @@ func (be *s3) removeKeys(t backend.Type) error {
|
||||
|
||||
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
|
||||
func (be *s3) Delete() error {
|
||||
alltypes := []backend.Type{
|
||||
backend.Data,
|
||||
backend.Key,
|
||||
backend.Lock,
|
||||
backend.Snapshot,
|
||||
backend.Index}
|
||||
alltypes := []restic.FileType{
|
||||
restic.DataFile,
|
||||
restic.KeyFile,
|
||||
restic.LockFile,
|
||||
restic.SnapshotFile,
|
||||
restic.IndexFile}
|
||||
|
||||
for _, t := range alltypes {
|
||||
err := be.removeKeys(t)
|
||||
@@ -240,7 +293,7 @@ func (be *s3) Delete() error {
|
||||
}
|
||||
}
|
||||
|
||||
return be.Remove(backend.Config, "")
|
||||
return be.Remove(restic.ConfigFile, "")
|
||||
}
|
||||
|
||||
// Close does nothing
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
package s3_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"restic"
|
||||
|
||||
"restic/errors"
|
||||
|
||||
"restic/backend"
|
||||
"restic/backend/s3"
|
||||
"restic/backend/test"
|
||||
. "restic/test"
|
||||
@@ -37,13 +38,13 @@ func init() {
|
||||
cfg.UseHTTP = true
|
||||
}
|
||||
|
||||
test.CreateFn = func() (backend.Backend, error) {
|
||||
test.CreateFn = func() (restic.Backend, error) {
|
||||
be, err := s3.Open(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
exists, err := be.Test(backend.Config, "")
|
||||
exists, err := be.Test(restic.ConfigFile, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -55,7 +56,7 @@ func init() {
|
||||
return be, nil
|
||||
}
|
||||
|
||||
test.OpenFn = func() (backend.Backend, error) {
|
||||
test.OpenFn = func() (restic.Backend, error) {
|
||||
return s3.Open(cfg)
|
||||
}
|
||||
|
||||
|
||||
@@ -51,6 +51,13 @@ func TestSftpBackendLoad(t *testing.T) {
|
||||
test.TestLoad(t)
|
||||
}
|
||||
|
||||
func TestSftpBackendLoadNegativeOffset(t *testing.T) {
|
||||
if SkipMessage != "" {
|
||||
t.Skip(SkipMessage)
|
||||
}
|
||||
test.TestLoadNegativeOffset(t)
|
||||
}
|
||||
|
||||
func TestSftpBackendSave(t *testing.T) {
|
||||
if SkipMessage != "" {
|
||||
t.Skip(SkipMessage)
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
package sftp
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"restic/errors"
|
||||
)
|
||||
|
||||
// Config collects all information required to connect to an sftp server.
|
||||
@@ -25,13 +26,18 @@ func ParseConfig(s string) (interface{}, error) {
|
||||
// parse the "sftp://user@host/path" url format
|
||||
url, err := url.Parse(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "url.Parse")
|
||||
}
|
||||
if url.User != nil {
|
||||
user = url.User.Username()
|
||||
}
|
||||
host = url.Host
|
||||
dir = url.Path[1:]
|
||||
dir = url.Path
|
||||
if dir == "" {
|
||||
return nil, errors.Errorf("invalid backend %q, no directory specified", s)
|
||||
}
|
||||
|
||||
dir = dir[1:]
|
||||
case strings.HasPrefix(s, "sftp:"):
|
||||
// parse the sftp:user@host:path format, which means we'll get
|
||||
// "user@host:path" in s
|
||||
|
||||
@@ -74,3 +74,17 @@ func TestParseConfig(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var configTestsInvalid = []string{
|
||||
"sftp://host:dir",
|
||||
}
|
||||
|
||||
func TestParseConfigInvalid(t *testing.T) {
|
||||
for i, test := range configTestsInvalid {
|
||||
_, err := ParseConfig(test)
|
||||
if err == nil {
|
||||
t.Errorf("test %d: invalid config %s did not return an error", i, test)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,20 +1,24 @@
|
||||
package sftp
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"path"
|
||||
"restic"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"restic/errors"
|
||||
|
||||
"github.com/juju/errors"
|
||||
"github.com/pkg/sftp"
|
||||
"restic/backend"
|
||||
"restic/debug"
|
||||
|
||||
"github.com/pkg/sftp"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -26,16 +30,29 @@ type SFTP struct {
|
||||
c *sftp.Client
|
||||
p string
|
||||
|
||||
cmd *exec.Cmd
|
||||
cmd *exec.Cmd
|
||||
result <-chan error
|
||||
}
|
||||
|
||||
var _ restic.Backend = &SFTP{}
|
||||
|
||||
func startClient(program string, args ...string) (*SFTP, error) {
|
||||
// Connect to a remote host and request the sftp subsystem via the 'ssh'
|
||||
// command. This assumes that passwordless login is correctly configured.
|
||||
cmd := exec.Command(program, args...)
|
||||
|
||||
// send errors from ssh to stderr
|
||||
cmd.Stderr = os.Stderr
|
||||
// prefix the errors with the program name
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "cmd.StderrPipe")
|
||||
}
|
||||
|
||||
go func() {
|
||||
sc := bufio.NewScanner(stderr)
|
||||
for sc.Scan() {
|
||||
fmt.Fprintf(os.Stderr, "subprocess %v: %v\n", program, sc.Text())
|
||||
}
|
||||
}()
|
||||
|
||||
// ignore signals sent to the parent (e.g. SIGINT)
|
||||
cmd.SysProcAttr = ignoreSigIntProcAttr()
|
||||
@@ -43,25 +60,33 @@ func startClient(program string, args ...string) (*SFTP, error) {
|
||||
// get stdin and stdout
|
||||
wr, err := cmd.StdinPipe()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return nil, errors.Wrap(err, "cmd.StdinPipe")
|
||||
}
|
||||
rd, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return nil, errors.Wrap(err, "cmd.StdoutPipe")
|
||||
}
|
||||
|
||||
// start the process
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Fatal(err)
|
||||
return nil, errors.Wrap(err, "cmd.Start")
|
||||
}
|
||||
|
||||
// wait in a different goroutine
|
||||
ch := make(chan error, 1)
|
||||
go func() {
|
||||
err := cmd.Wait()
|
||||
debug.Log("ssh command exited, err %v", err)
|
||||
ch <- errors.Wrap(err, "cmd.Wait")
|
||||
}()
|
||||
|
||||
// open the SFTP session
|
||||
client, err := sftp.NewClientPipe(rd, wr)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return nil, errors.Errorf("unable to start the sftp session, error: %v", err)
|
||||
}
|
||||
|
||||
return &SFTP{c: client, cmd: cmd}, nil
|
||||
return &SFTP{c: client, cmd: cmd, result: ch}, nil
|
||||
}
|
||||
|
||||
func paths(dir string) []string {
|
||||
@@ -76,19 +101,35 @@ func paths(dir string) []string {
|
||||
}
|
||||
}
|
||||
|
||||
// clientError returns an error if the client has exited. Otherwise, nil is
|
||||
// returned immediately.
|
||||
func (r *SFTP) clientError() error {
|
||||
select {
|
||||
case err := <-r.result:
|
||||
debug.Log("client has exited with err %v", err)
|
||||
return err
|
||||
default:
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open opens an sftp backend. When the command is started via
|
||||
// exec.Command, it is expected to speak sftp on stdin/stdout. The backend
|
||||
// is expected at the given path.
|
||||
// is expected at the given path. `dir` must be delimited by forward slashes
|
||||
// ("/"), which is required by sftp.
|
||||
func Open(dir string, program string, args ...string) (*SFTP, error) {
|
||||
debug.Log("open backend with program %v, %v at %v", program, args, dir)
|
||||
sftp, err := startClient(program, args...)
|
||||
if err != nil {
|
||||
debug.Log("unable to start program: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// test if all necessary dirs and files are there
|
||||
for _, d := range paths(dir) {
|
||||
if _, err := sftp.c.Lstat(d); err != nil {
|
||||
return nil, fmt.Errorf("%s does not exist", d)
|
||||
return nil, errors.Errorf("%s does not exist", d)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,12 +155,15 @@ func buildSSHCommand(cfg Config) []string {
|
||||
// OpenWithConfig opens an sftp backend as described by the config by running
|
||||
// "ssh" with the appropiate arguments.
|
||||
func OpenWithConfig(cfg Config) (*SFTP, error) {
|
||||
debug.Log("open with config %v", cfg)
|
||||
return Open(cfg.Dir, "ssh", buildSSHCommand(cfg)...)
|
||||
}
|
||||
|
||||
// Create creates all the necessary files and directories for a new sftp
|
||||
// backend at dir. Afterwards a new config blob should be created.
|
||||
// backend at dir. Afterwards a new config blob should be created. `dir` must
|
||||
// be delimited by forward slashes ("/"), which is required by sftp.
|
||||
func Create(dir string, program string, args ...string) (*SFTP, error) {
|
||||
debug.Log("%v %v", program, args)
|
||||
sftp, err := startClient(program, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -139,14 +183,9 @@ func Create(dir string, program string, args ...string) (*SFTP, error) {
|
||||
}
|
||||
}
|
||||
|
||||
err = sftp.c.Close()
|
||||
err = sftp.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = sftp.cmd.Wait()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "Close")
|
||||
}
|
||||
|
||||
// open backend
|
||||
@@ -156,6 +195,7 @@ func Create(dir string, program string, args ...string) (*SFTP, error) {
|
||||
// CreateWithConfig creates an sftp backend as described by the config by running
|
||||
// "ssh" with the appropiate arguments.
|
||||
func CreateWithConfig(cfg Config) (*SFTP, error) {
|
||||
debug.Log("config %v", cfg)
|
||||
return Create(cfg.Dir, "ssh", buildSSHCommand(cfg)...)
|
||||
}
|
||||
|
||||
@@ -170,9 +210,8 @@ func (r *SFTP) tempFile() (string, *sftp.File, error) {
|
||||
buf := make([]byte, tempfileRandomSuffixLength)
|
||||
_, err := io.ReadFull(rand.Reader, buf)
|
||||
if err != nil {
|
||||
return "", nil, errors.Annotatef(err,
|
||||
"unable to read %d random bytes for tempfile name",
|
||||
tempfileRandomSuffixLength)
|
||||
return "", nil, errors.Errorf("unable to read %d random bytes for tempfile name: %v",
|
||||
tempfileRandomSuffixLength, err)
|
||||
}
|
||||
|
||||
// construct tempfile name
|
||||
@@ -181,7 +220,7 @@ func (r *SFTP) tempFile() (string, *sftp.File, error) {
|
||||
// create file in temp dir
|
||||
f, err := r.c.Create(name)
|
||||
if err != nil {
|
||||
return "", nil, errors.Annotatef(err, "creating tempfile %q failed", name)
|
||||
return "", nil, errors.Errorf("creating tempfile %q failed: %v", name, err)
|
||||
}
|
||||
|
||||
return name, f, nil
|
||||
@@ -195,11 +234,11 @@ func (r *SFTP) mkdirAll(dir string, mode os.FileMode) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("mkdirAll(%s): entry exists but is not a directory", dir)
|
||||
return errors.Errorf("mkdirAll(%s): entry exists but is not a directory", dir)
|
||||
}
|
||||
|
||||
// create parent directories
|
||||
errMkdirAll := r.mkdirAll(filepath.Dir(dir), backend.Modes.Dir)
|
||||
errMkdirAll := r.mkdirAll(path.Dir(dir), backend.Modes.Dir)
|
||||
|
||||
// create directory
|
||||
errMkdir := r.c.Mkdir(dir)
|
||||
@@ -208,11 +247,11 @@ func (r *SFTP) mkdirAll(dir string, mode os.FileMode) error {
|
||||
fi, err = r.c.Lstat(dir)
|
||||
if err != nil {
|
||||
// return previous errors
|
||||
return fmt.Errorf("mkdirAll(%s): unable to create directories: %v, %v", dir, errMkdirAll, errMkdir)
|
||||
return errors.Errorf("mkdirAll(%s): unable to create directories: %v, %v", dir, errMkdirAll, errMkdir)
|
||||
}
|
||||
|
||||
if !fi.IsDir() {
|
||||
return fmt.Errorf("mkdirAll(%s): entry exists but is not a directory", dir)
|
||||
return errors.Errorf("mkdirAll(%s): entry exists but is not a directory", dir)
|
||||
}
|
||||
|
||||
// set mode
|
||||
@@ -220,12 +259,12 @@ func (r *SFTP) mkdirAll(dir string, mode os.FileMode) error {
|
||||
}
|
||||
|
||||
// Rename temp file to final name according to type and name.
|
||||
func (r *SFTP) renameFile(oldname string, t backend.Type, name string) error {
|
||||
func (r *SFTP) renameFile(oldname string, t restic.FileType, name string) error {
|
||||
filename := r.filename(t, name)
|
||||
|
||||
// create directories if necessary
|
||||
if t == backend.Data {
|
||||
err := r.mkdirAll(filepath.Dir(filename), backend.Modes.Dir)
|
||||
if t == restic.DataFile {
|
||||
err := r.mkdirAll(path.Dir(filename), backend.Modes.Dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -233,31 +272,33 @@ func (r *SFTP) renameFile(oldname string, t backend.Type, name string) error {
|
||||
|
||||
// test if new file exists
|
||||
if _, err := r.c.Lstat(filename); err == nil {
|
||||
return fmt.Errorf("Close(): file %v already exists", filename)
|
||||
return errors.Errorf("Close(): file %v already exists", filename)
|
||||
}
|
||||
|
||||
err := r.c.Rename(oldname, filename)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "Rename")
|
||||
}
|
||||
|
||||
// set mode to read-only
|
||||
fi, err := r.c.Lstat(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "Lstat")
|
||||
}
|
||||
|
||||
return r.c.Chmod(filename, fi.Mode()&os.FileMode(^uint32(0222)))
|
||||
err = r.c.Chmod(filename, fi.Mode()&os.FileMode(^uint32(0222)))
|
||||
return errors.Wrap(err, "Chmod")
|
||||
}
|
||||
|
||||
// Join joins the given paths and cleans them afterwards.
|
||||
// Join joins the given paths and cleans them afterwards. This always uses
|
||||
// forward slashes, which is required by sftp.
|
||||
func Join(parts ...string) string {
|
||||
return filepath.Clean(strings.Join(parts, "/"))
|
||||
return path.Clean(path.Join(parts...))
|
||||
}
|
||||
|
||||
// Construct path for given backend.Type and name.
|
||||
func (r *SFTP) filename(t backend.Type, name string) string {
|
||||
if t == backend.Config {
|
||||
// Construct path for given restic.Type and name.
|
||||
func (r *SFTP) filename(t restic.FileType, name string) string {
|
||||
if t == restic.ConfigFile {
|
||||
return Join(r.p, "config")
|
||||
}
|
||||
|
||||
@@ -265,21 +306,21 @@ func (r *SFTP) filename(t backend.Type, name string) string {
|
||||
}
|
||||
|
||||
// Construct directory for given backend.Type.
|
||||
func (r *SFTP) dirname(t backend.Type, name string) string {
|
||||
func (r *SFTP) dirname(t restic.FileType, name string) string {
|
||||
var n string
|
||||
switch t {
|
||||
case backend.Data:
|
||||
case restic.DataFile:
|
||||
n = backend.Paths.Data
|
||||
if len(name) > 2 {
|
||||
n = Join(n, name[:2])
|
||||
}
|
||||
case backend.Snapshot:
|
||||
case restic.SnapshotFile:
|
||||
n = backend.Paths.Snapshots
|
||||
case backend.Index:
|
||||
case restic.IndexFile:
|
||||
n = backend.Paths.Index
|
||||
case backend.Lock:
|
||||
case restic.LockFile:
|
||||
n = backend.Paths.Locks
|
||||
case backend.Key:
|
||||
case restic.KeyFile:
|
||||
n = backend.Paths.Keys
|
||||
}
|
||||
return Join(r.p, n)
|
||||
@@ -287,45 +328,63 @@ func (r *SFTP) dirname(t backend.Type, name string) string {
|
||||
|
||||
// Load returns the data stored in the backend for h at the given offset
|
||||
// and saves it in p. Load has the same semantics as io.ReaderAt.
|
||||
func (r *SFTP) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
|
||||
func (r *SFTP) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
|
||||
debug.Log("load %v, %d bytes, offset %v", h, len(p), off)
|
||||
if err := r.clientError(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if err := h.Valid(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
f, err := r.c.Open(r.filename(h.Type, h.Name))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return 0, errors.Wrap(err, "Open")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
e := f.Close()
|
||||
if err == nil && e != nil {
|
||||
err = e
|
||||
err = errors.Wrap(e, "Close")
|
||||
}
|
||||
}()
|
||||
|
||||
if off > 0 {
|
||||
switch {
|
||||
case off > 0:
|
||||
_, err = f.Seek(off, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
case off < 0:
|
||||
_, err = f.Seek(off, 2)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "Seek")
|
||||
}
|
||||
|
||||
return io.ReadFull(f, p)
|
||||
}
|
||||
|
||||
// Save stores data in the backend at the handle.
|
||||
func (r *SFTP) Save(h backend.Handle, p []byte) (err error) {
|
||||
func (r *SFTP) Save(h restic.Handle, p []byte) (err error) {
|
||||
debug.Log("save %v bytes to %v", h, len(p))
|
||||
if err := r.clientError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := h.Valid(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
filename, tmpfile, err := r.tempFile()
|
||||
debug.Log("sftp.Save", "save %v (%d bytes) to %v", h, len(p), filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
debug.Log("save %v (%d bytes) to %v", h, len(p), filename)
|
||||
|
||||
n, err := tmpfile.Write(p)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "Write")
|
||||
}
|
||||
|
||||
if n != len(p) {
|
||||
@@ -334,62 +393,74 @@ func (r *SFTP) Save(h backend.Handle, p []byte) (err error) {
|
||||
|
||||
err = tmpfile.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "Close")
|
||||
}
|
||||
|
||||
err = r.renameFile(filename, h.Type, h.Name)
|
||||
debug.Log("sftp.Save", "save %v: rename %v: %v",
|
||||
h, filepath.Base(filename), err)
|
||||
if err != nil {
|
||||
return fmt.Errorf("sftp: renameFile: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
debug.Log("save %v: rename %v: %v",
|
||||
h, path.Base(filename), err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Stat returns information about a blob.
|
||||
func (r *SFTP) Stat(h backend.Handle) (backend.BlobInfo, error) {
|
||||
func (r *SFTP) Stat(h restic.Handle) (restic.FileInfo, error) {
|
||||
debug.Log("stat %v", h)
|
||||
if err := r.clientError(); err != nil {
|
||||
return restic.FileInfo{}, err
|
||||
}
|
||||
|
||||
if err := h.Valid(); err != nil {
|
||||
return backend.BlobInfo{}, err
|
||||
return restic.FileInfo{}, err
|
||||
}
|
||||
|
||||
fi, err := r.c.Lstat(r.filename(h.Type, h.Name))
|
||||
if err != nil {
|
||||
return backend.BlobInfo{}, err
|
||||
return restic.FileInfo{}, errors.Wrap(err, "Lstat")
|
||||
}
|
||||
|
||||
return backend.BlobInfo{Size: fi.Size()}, nil
|
||||
return restic.FileInfo{Size: fi.Size()}, nil
|
||||
}
|
||||
|
||||
// Test returns true if a blob of the given type and name exists in the backend.
|
||||
func (r *SFTP) Test(t backend.Type, name string) (bool, error) {
|
||||
func (r *SFTP) Test(t restic.FileType, name string) (bool, error) {
|
||||
debug.Log("type %v, name %v", t, name)
|
||||
if err := r.clientError(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
_, err := r.c.Lstat(r.filename(t, name))
|
||||
if os.IsNotExist(err) {
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, errors.Wrap(err, "Lstat")
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Remove removes the content stored at name.
|
||||
func (r *SFTP) Remove(t backend.Type, name string) error {
|
||||
func (r *SFTP) Remove(t restic.FileType, name string) error {
|
||||
debug.Log("type %v, name %v", t, name)
|
||||
if err := r.clientError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return r.c.Remove(r.filename(t, name))
|
||||
}
|
||||
|
||||
// List returns a channel that yields all names of blobs of type t. A
|
||||
// goroutine is started for this. If the channel done is closed, sending
|
||||
// stops.
|
||||
func (r *SFTP) List(t backend.Type, done <-chan struct{}) <-chan string {
|
||||
func (r *SFTP) List(t restic.FileType, done <-chan struct{}) <-chan string {
|
||||
debug.Log("list all %v", t)
|
||||
ch := make(chan string)
|
||||
|
||||
go func() {
|
||||
defer close(ch)
|
||||
|
||||
if t == backend.Data {
|
||||
if t == restic.DataFile {
|
||||
// read first level
|
||||
basedir := r.dirname(t, "")
|
||||
|
||||
@@ -448,18 +519,30 @@ func (r *SFTP) List(t backend.Type, done <-chan struct{}) <-chan string {
|
||||
|
||||
}
|
||||
|
||||
var closeTimeout = 2 * time.Second
|
||||
|
||||
// Close closes the sftp connection and terminates the underlying command.
|
||||
func (r *SFTP) Close() error {
|
||||
debug.Log("")
|
||||
if r == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := r.c.Close()
|
||||
debug.Log("sftp.Close", "Close returned error %v", err)
|
||||
debug.Log("Close returned error %v", err)
|
||||
|
||||
// wait for closeTimeout before killing the process
|
||||
select {
|
||||
case err := <-r.result:
|
||||
return err
|
||||
case <-time.After(closeTimeout):
|
||||
}
|
||||
|
||||
if err := r.cmd.Process.Kill(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return r.cmd.Wait()
|
||||
// get the error, but ignore it
|
||||
<-r.result
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
package sftp_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"restic"
|
||||
"strings"
|
||||
|
||||
"restic/backend"
|
||||
"restic/errors"
|
||||
|
||||
"restic/backend/sftp"
|
||||
"restic/backend/test"
|
||||
|
||||
@@ -28,7 +29,6 @@ func createTempdir() error {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("created new test backend at %v\n", tempdir)
|
||||
tempBackendDir = tempdir
|
||||
return nil
|
||||
}
|
||||
@@ -39,7 +39,7 @@ func init() {
|
||||
for _, dir := range strings.Split(TestSFTPPath, ":") {
|
||||
testpath := filepath.Join(dir, "sftp-server")
|
||||
_, err := os.Stat(testpath)
|
||||
if !os.IsNotExist(err) {
|
||||
if !os.IsNotExist(errors.Cause(err)) {
|
||||
sftpserver = testpath
|
||||
break
|
||||
}
|
||||
@@ -50,21 +50,23 @@ func init() {
|
||||
return
|
||||
}
|
||||
|
||||
test.CreateFn = func() (backend.Backend, error) {
|
||||
args := []string{"-e"}
|
||||
|
||||
test.CreateFn = func() (restic.Backend, error) {
|
||||
err := createTempdir()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sftp.Create(tempBackendDir, sftpserver)
|
||||
return sftp.Create(tempBackendDir, sftpserver, args...)
|
||||
}
|
||||
|
||||
test.OpenFn = func() (backend.Backend, error) {
|
||||
test.OpenFn = func() (restic.Backend, error) {
|
||||
err := createTempdir()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sftp.Open(tempBackendDir, sftpserver)
|
||||
return sftp.Open(tempBackendDir, sftpserver, args...)
|
||||
}
|
||||
|
||||
test.CleanupFn = func() error {
|
||||
@@ -72,7 +74,6 @@ func init() {
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Printf("removing test backend at %v\n", tempBackendDir)
|
||||
err := os.RemoveAll(tempBackendDir)
|
||||
tempBackendDir = ""
|
||||
return err
|
||||
|
||||
@@ -51,6 +51,13 @@ func TestTestBackendLoad(t *testing.T) {
|
||||
test.TestLoad(t)
|
||||
}
|
||||
|
||||
func TestTestBackendLoadNegativeOffset(t *testing.T) {
|
||||
if SkipMessage != "" {
|
||||
t.Skip(SkipMessage)
|
||||
}
|
||||
test.TestLoadNegativeOffset(t)
|
||||
}
|
||||
|
||||
func TestTestBackendSave(t *testing.T) {
|
||||
if SkipMessage != "" {
|
||||
t.Skip(SkipMessage)
|
||||
|
||||
@@ -4,29 +4,31 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"restic"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"restic/errors"
|
||||
"restic/test"
|
||||
|
||||
"restic/backend"
|
||||
. "restic/test"
|
||||
)
|
||||
|
||||
// CreateFn is a function that creates a temporary repository for the tests.
|
||||
var CreateFn func() (backend.Backend, error)
|
||||
var CreateFn func() (restic.Backend, error)
|
||||
|
||||
// OpenFn is a function that opens a previously created temporary repository.
|
||||
var OpenFn func() (backend.Backend, error)
|
||||
var OpenFn func() (restic.Backend, error)
|
||||
|
||||
// CleanupFn removes temporary files and directories created during the tests.
|
||||
var CleanupFn func() error
|
||||
|
||||
var but backend.Backend // backendUnderTest
|
||||
var but restic.Backend // backendUnderTest
|
||||
var butInitialized bool
|
||||
|
||||
func open(t testing.TB) backend.Backend {
|
||||
func open(t testing.TB) restic.Backend {
|
||||
if OpenFn == nil {
|
||||
t.Fatal("OpenFn not set")
|
||||
}
|
||||
@@ -116,7 +118,7 @@ func TestCreateWithConfig(t testing.TB) {
|
||||
defer close(t)
|
||||
|
||||
// save a config
|
||||
store(t, b, backend.Config, []byte("test config"))
|
||||
store(t, b, restic.ConfigFile, []byte("test config"))
|
||||
|
||||
// now create the backend again, this must fail
|
||||
_, err := CreateFn()
|
||||
@@ -125,7 +127,7 @@ func TestCreateWithConfig(t testing.TB) {
|
||||
}
|
||||
|
||||
// remove config
|
||||
err = b.Remove(backend.Config, "")
|
||||
err = b.Remove(restic.ConfigFile, "")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error removing config: %v", err)
|
||||
}
|
||||
@@ -150,12 +152,12 @@ func TestConfig(t testing.TB) {
|
||||
var testString = "Config"
|
||||
|
||||
// create config and read it back
|
||||
_, err := backend.LoadAll(b, backend.Handle{Type: backend.Config}, nil)
|
||||
_, err := backend.LoadAll(b, restic.Handle{Type: restic.ConfigFile}, nil)
|
||||
if err == nil {
|
||||
t.Fatalf("did not get expected error for non-existing config")
|
||||
}
|
||||
|
||||
err = b.Save(backend.Handle{Type: backend.Config}, []byte(testString))
|
||||
err = b.Save(restic.Handle{Type: restic.ConfigFile}, []byte(testString))
|
||||
if err != nil {
|
||||
t.Fatalf("Save() error: %v", err)
|
||||
}
|
||||
@@ -163,7 +165,7 @@ func TestConfig(t testing.TB) {
|
||||
// try accessing the config with different names, should all return the
|
||||
// same config
|
||||
for _, name := range []string{"", "foo", "bar", "0000000000000000000000000000000000000000000000000000000000000000"} {
|
||||
h := backend.Handle{Type: backend.Config, Name: name}
|
||||
h := restic.Handle{Type: restic.ConfigFile, Name: name}
|
||||
buf, err := backend.LoadAll(b, h, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to read config with name %q: %v", name, err)
|
||||
@@ -180,22 +182,22 @@ func TestLoad(t testing.TB) {
|
||||
b := open(t)
|
||||
defer close(t)
|
||||
|
||||
_, err := b.Load(backend.Handle{}, nil, 0)
|
||||
_, err := b.Load(restic.Handle{}, nil, 0)
|
||||
if err == nil {
|
||||
t.Fatalf("Load() did not return an error for invalid handle")
|
||||
}
|
||||
|
||||
_, err = b.Load(backend.Handle{Type: backend.Data, Name: "foobar"}, nil, 0)
|
||||
_, err = b.Load(restic.Handle{Type: restic.DataFile, Name: "foobar"}, nil, 0)
|
||||
if err == nil {
|
||||
t.Fatalf("Load() did not return an error for non-existing blob")
|
||||
}
|
||||
|
||||
length := rand.Intn(1<<24) + 2000
|
||||
|
||||
data := Random(23, length)
|
||||
id := backend.Hash(data)
|
||||
data := test.Random(23, length)
|
||||
id := restic.Hash(data)
|
||||
|
||||
handle := backend.Handle{Type: backend.Data, Name: id.String()}
|
||||
handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
|
||||
err = b.Save(handle, data)
|
||||
if err != nil {
|
||||
t.Fatalf("Save() error: %v", err)
|
||||
@@ -220,9 +222,60 @@ func TestLoad(t testing.TB) {
|
||||
buf := make([]byte, l)
|
||||
n, err := b.Load(handle, buf, int64(o))
|
||||
|
||||
// if we requested data beyond the end of the file, ignore
|
||||
// if we requested data beyond the end of the file, require
|
||||
// ErrUnexpectedEOF error
|
||||
if l > len(d) && err == io.ErrUnexpectedEOF {
|
||||
if l > len(d) {
|
||||
if errors.Cause(err) != io.ErrUnexpectedEOF {
|
||||
t.Errorf("Load(%d, %d) did not return io.ErrUnexpectedEOF", len(buf), int64(o))
|
||||
}
|
||||
err = nil
|
||||
buf = buf[:len(d)]
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Load(%d, %d): unexpected error: %v", len(buf), int64(o), err)
|
||||
continue
|
||||
}
|
||||
|
||||
if n != len(buf) {
|
||||
t.Errorf("Load(%d, %d): wrong length returned, want %d, got %d",
|
||||
len(buf), int64(o), len(buf), n)
|
||||
continue
|
||||
}
|
||||
|
||||
buf = buf[:n]
|
||||
if !bytes.Equal(buf, d) {
|
||||
t.Errorf("Load(%d, %d) returned wrong bytes", len(buf), int64(o))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// test with negative offset
|
||||
for i := 0; i < 50; i++ {
|
||||
l := rand.Intn(length + 2000)
|
||||
o := rand.Intn(length + 2000)
|
||||
|
||||
d := data
|
||||
if o < len(d) {
|
||||
d = d[len(d)-o:]
|
||||
} else {
|
||||
o = 0
|
||||
}
|
||||
|
||||
if l > 0 && l < len(d) {
|
||||
d = d[:l]
|
||||
}
|
||||
|
||||
buf := make([]byte, l)
|
||||
n, err := b.Load(handle, buf, -int64(o))
|
||||
|
||||
// if we requested data beyond the end of the file, require
|
||||
// ErrUnexpectedEOF error
|
||||
if l > len(d) {
|
||||
if errors.Cause(err) != io.ErrUnexpectedEOF {
|
||||
t.Errorf("Load(%d, %d) did not return io.ErrUnexpectedEOF", len(buf), int64(o))
|
||||
continue
|
||||
}
|
||||
err = nil
|
||||
buf = buf[:len(d)]
|
||||
}
|
||||
@@ -252,34 +305,90 @@ func TestLoad(t testing.TB) {
|
||||
t.Errorf("wrong length for larger buffer returned, want %d, got %d", length, n)
|
||||
}
|
||||
|
||||
if err != io.ErrUnexpectedEOF {
|
||||
if errors.Cause(err) != io.ErrUnexpectedEOF {
|
||||
t.Errorf("wrong error returned for larger buffer: want io.ErrUnexpectedEOF, got %#v", err)
|
||||
}
|
||||
|
||||
OK(t, b.Remove(backend.Data, id.String()))
|
||||
test.OK(t, b.Remove(restic.DataFile, id.String()))
|
||||
}
|
||||
|
||||
// TestLoadNegativeOffset tests the backend's Load function with negative offsets.
|
||||
func TestLoadNegativeOffset(t testing.TB) {
|
||||
b := open(t)
|
||||
defer close(t)
|
||||
|
||||
length := rand.Intn(1<<24) + 2000
|
||||
|
||||
data := test.Random(23, length)
|
||||
id := restic.Hash(data)
|
||||
|
||||
handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
|
||||
err := b.Save(handle, data)
|
||||
if err != nil {
|
||||
t.Fatalf("Save() error: %v", err)
|
||||
}
|
||||
|
||||
// test normal reads
|
||||
for i := 0; i < 50; i++ {
|
||||
l := rand.Intn(length + 2000)
|
||||
o := -rand.Intn(length + 2000)
|
||||
|
||||
buf := make([]byte, l)
|
||||
n, err := b.Load(handle, buf, int64(o))
|
||||
|
||||
// if we requested data beyond the end of the file, require
|
||||
// ErrUnexpectedEOF error
|
||||
if len(buf) > -o {
|
||||
if errors.Cause(err) != io.ErrUnexpectedEOF {
|
||||
t.Errorf("Load(%d, %d) did not return io.ErrUnexpectedEOF", len(buf), o)
|
||||
continue
|
||||
}
|
||||
err = nil
|
||||
buf = buf[:-o]
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Load(%d, %d) returned error: %v", len(buf), o, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if n != len(buf) {
|
||||
t.Errorf("Load(%d, %d) returned short read, only got %d bytes", len(buf), o, n)
|
||||
continue
|
||||
}
|
||||
|
||||
p := len(data) + o
|
||||
if !bytes.Equal(buf, data[p:p+len(buf)]) {
|
||||
t.Errorf("Load(%d, %d) returned wrong bytes", len(buf), o)
|
||||
continue
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
test.OK(t, b.Remove(restic.DataFile, id.String()))
|
||||
}
|
||||
|
||||
// TestSave tests saving data in the backend.
|
||||
func TestSave(t testing.TB) {
|
||||
b := open(t)
|
||||
defer close(t)
|
||||
var id backend.ID
|
||||
var id restic.ID
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
length := rand.Intn(1<<23) + 200000
|
||||
data := Random(23, length)
|
||||
data := test.Random(23, length)
|
||||
// use the first 32 byte as the ID
|
||||
copy(id[:], data)
|
||||
|
||||
h := backend.Handle{
|
||||
Type: backend.Data,
|
||||
h := restic.Handle{
|
||||
Type: restic.DataFile,
|
||||
Name: fmt.Sprintf("%s-%d", id, i),
|
||||
}
|
||||
err := b.Save(h, data)
|
||||
OK(t, err)
|
||||
test.OK(t, err)
|
||||
|
||||
buf, err := backend.LoadAll(b, h, nil)
|
||||
OK(t, err)
|
||||
test.OK(t, err)
|
||||
if len(buf) != len(data) {
|
||||
t.Fatalf("number of bytes does not match, want %v, got %v", len(data), len(buf))
|
||||
}
|
||||
@@ -289,7 +398,7 @@ func TestSave(t testing.TB) {
|
||||
}
|
||||
|
||||
fi, err := b.Stat(h)
|
||||
OK(t, err)
|
||||
test.OK(t, err)
|
||||
|
||||
if fi.Size != int64(len(data)) {
|
||||
t.Fatalf("Stat() returned different size, want %q, got %d", len(data), fi.Size)
|
||||
@@ -320,7 +429,7 @@ func TestSaveFilenames(t testing.TB) {
|
||||
defer close(t)
|
||||
|
||||
for i, test := range filenameTests {
|
||||
h := backend.Handle{Name: test.name, Type: backend.Data}
|
||||
h := restic.Handle{Name: test.name, Type: restic.DataFile}
|
||||
err := b.Save(h, []byte(test.data))
|
||||
if err != nil {
|
||||
t.Errorf("test %d failed: Save() returned %v", i, err)
|
||||
@@ -355,18 +464,10 @@ var testStrings = []struct {
|
||||
{"4e54d2c721cbdb730f01b10b62dec622962b36966ec685880effa63d71c808f2", "foo/../../baz"},
|
||||
}
|
||||
|
||||
func store(t testing.TB, b backend.Backend, tpe backend.Type, data []byte) {
|
||||
id := backend.Hash(data)
|
||||
err := b.Save(backend.Handle{Name: id.String(), Type: tpe}, data)
|
||||
OK(t, err)
|
||||
}
|
||||
|
||||
func read(t testing.TB, rd io.Reader, expectedData []byte) {
|
||||
buf, err := ioutil.ReadAll(rd)
|
||||
OK(t, err)
|
||||
if expectedData != nil {
|
||||
Equals(t, expectedData, buf)
|
||||
}
|
||||
func store(t testing.TB, b restic.Backend, tpe restic.FileType, data []byte) {
|
||||
id := restic.Hash(data)
|
||||
err := b.Save(restic.Handle{Name: id.String(), Type: tpe}, data)
|
||||
test.OK(t, err)
|
||||
}
|
||||
|
||||
// TestBackend tests all functions of the backend.
|
||||
@@ -374,90 +475,90 @@ func TestBackend(t testing.TB) {
|
||||
b := open(t)
|
||||
defer close(t)
|
||||
|
||||
for _, tpe := range []backend.Type{
|
||||
backend.Data, backend.Key, backend.Lock,
|
||||
backend.Snapshot, backend.Index,
|
||||
for _, tpe := range []restic.FileType{
|
||||
restic.DataFile, restic.KeyFile, restic.LockFile,
|
||||
restic.SnapshotFile, restic.IndexFile,
|
||||
} {
|
||||
// detect non-existing files
|
||||
for _, test := range testStrings {
|
||||
id, err := backend.ParseID(test.id)
|
||||
OK(t, err)
|
||||
for _, ts := range testStrings {
|
||||
id, err := restic.ParseID(ts.id)
|
||||
test.OK(t, err)
|
||||
|
||||
// test if blob is already in repository
|
||||
ret, err := b.Test(tpe, id.String())
|
||||
OK(t, err)
|
||||
Assert(t, !ret, "blob was found to exist before creating")
|
||||
test.OK(t, err)
|
||||
test.Assert(t, !ret, "blob was found to exist before creating")
|
||||
|
||||
// try to stat a not existing blob
|
||||
h := backend.Handle{Type: tpe, Name: id.String()}
|
||||
h := restic.Handle{Type: tpe, Name: id.String()}
|
||||
_, err = b.Stat(h)
|
||||
Assert(t, err != nil, "blob data could be extracted before creation")
|
||||
test.Assert(t, err != nil, "blob data could be extracted before creation")
|
||||
|
||||
// try to read not existing blob
|
||||
_, err = b.Load(h, nil, 0)
|
||||
Assert(t, err != nil, "blob reader could be obtained before creation")
|
||||
test.Assert(t, err != nil, "blob reader could be obtained before creation")
|
||||
|
||||
// try to get string out, should fail
|
||||
ret, err = b.Test(tpe, id.String())
|
||||
OK(t, err)
|
||||
Assert(t, !ret, "id %q was found (but should not have)", test.id)
|
||||
test.OK(t, err)
|
||||
test.Assert(t, !ret, "id %q was found (but should not have)", ts.id)
|
||||
}
|
||||
|
||||
// add files
|
||||
for _, test := range testStrings {
|
||||
store(t, b, tpe, []byte(test.data))
|
||||
for _, ts := range testStrings {
|
||||
store(t, b, tpe, []byte(ts.data))
|
||||
|
||||
// test Load()
|
||||
h := backend.Handle{Type: tpe, Name: test.id}
|
||||
h := restic.Handle{Type: tpe, Name: ts.id}
|
||||
buf, err := backend.LoadAll(b, h, nil)
|
||||
OK(t, err)
|
||||
Equals(t, test.data, string(buf))
|
||||
test.OK(t, err)
|
||||
test.Equals(t, ts.data, string(buf))
|
||||
|
||||
// try to read it out with an offset and a length
|
||||
start := 1
|
||||
end := len(test.data) - 2
|
||||
end := len(ts.data) - 2
|
||||
length := end - start
|
||||
|
||||
buf2 := make([]byte, length)
|
||||
n, err := b.Load(h, buf2, int64(start))
|
||||
OK(t, err)
|
||||
Equals(t, length, n)
|
||||
Equals(t, test.data[start:end], string(buf2))
|
||||
test.OK(t, err)
|
||||
test.Equals(t, length, n)
|
||||
test.Equals(t, ts.data[start:end], string(buf2))
|
||||
}
|
||||
|
||||
// test adding the first file again
|
||||
test := testStrings[0]
|
||||
ts := testStrings[0]
|
||||
|
||||
// create blob
|
||||
err := b.Save(backend.Handle{Type: tpe, Name: test.id}, []byte(test.data))
|
||||
Assert(t, err != nil, "expected error, got %v", err)
|
||||
err := b.Save(restic.Handle{Type: tpe, Name: ts.id}, []byte(ts.data))
|
||||
test.Assert(t, err != nil, "expected error, got %v", err)
|
||||
|
||||
// remove and recreate
|
||||
err = b.Remove(tpe, test.id)
|
||||
OK(t, err)
|
||||
err = b.Remove(tpe, ts.id)
|
||||
test.OK(t, err)
|
||||
|
||||
// test that the blob is gone
|
||||
ok, err := b.Test(tpe, test.id)
|
||||
OK(t, err)
|
||||
Assert(t, ok == false, "removed blob still present")
|
||||
ok, err := b.Test(tpe, ts.id)
|
||||
test.OK(t, err)
|
||||
test.Assert(t, ok == false, "removed blob still present")
|
||||
|
||||
// create blob
|
||||
err = b.Save(backend.Handle{Type: tpe, Name: test.id}, []byte(test.data))
|
||||
OK(t, err)
|
||||
err = b.Save(restic.Handle{Type: tpe, Name: ts.id}, []byte(ts.data))
|
||||
test.OK(t, err)
|
||||
|
||||
// list items
|
||||
IDs := backend.IDs{}
|
||||
IDs := restic.IDs{}
|
||||
|
||||
for _, test := range testStrings {
|
||||
id, err := backend.ParseID(test.id)
|
||||
OK(t, err)
|
||||
for _, ts := range testStrings {
|
||||
id, err := restic.ParseID(ts.id)
|
||||
test.OK(t, err)
|
||||
IDs = append(IDs, id)
|
||||
}
|
||||
|
||||
list := backend.IDs{}
|
||||
list := restic.IDs{}
|
||||
|
||||
for s := range b.List(tpe, nil) {
|
||||
list = append(list, ParseID(s))
|
||||
list = append(list, restic.TestParseID(s))
|
||||
}
|
||||
|
||||
if len(IDs) != len(list) {
|
||||
@@ -472,19 +573,19 @@ func TestBackend(t testing.TB) {
|
||||
}
|
||||
|
||||
// remove content if requested
|
||||
if TestCleanupTempDirs {
|
||||
for _, test := range testStrings {
|
||||
id, err := backend.ParseID(test.id)
|
||||
OK(t, err)
|
||||
if test.TestCleanupTempDirs {
|
||||
for _, ts := range testStrings {
|
||||
id, err := restic.ParseID(ts.id)
|
||||
test.OK(t, err)
|
||||
|
||||
found, err := b.Test(tpe, id.String())
|
||||
OK(t, err)
|
||||
test.OK(t, err)
|
||||
|
||||
OK(t, b.Remove(tpe, id.String()))
|
||||
test.OK(t, b.Remove(tpe, id.String()))
|
||||
|
||||
found, err = b.Test(tpe, id.String())
|
||||
OK(t, err)
|
||||
Assert(t, !found, fmt.Sprintf("id %q not found after removal", id))
|
||||
test.OK(t, err)
|
||||
test.Assert(t, !found, fmt.Sprintf("id %q not found after removal", id))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -495,7 +596,7 @@ func TestDelete(t testing.TB) {
|
||||
b := open(t)
|
||||
defer close(t)
|
||||
|
||||
be, ok := b.(backend.Deleter)
|
||||
be, ok := b.(restic.Deleter)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
@@ -513,7 +614,7 @@ func TestCleanup(t testing.TB) {
|
||||
return
|
||||
}
|
||||
|
||||
if !TestCleanupTempDirs {
|
||||
if !test.TestCleanupTempDirs {
|
||||
t.Logf("not cleaning up backend")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1,19 +1,20 @@
|
||||
package test_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"restic"
|
||||
|
||||
"restic/errors"
|
||||
|
||||
"restic/backend"
|
||||
"restic/backend/mem"
|
||||
"restic/backend/test"
|
||||
)
|
||||
|
||||
var be backend.Backend
|
||||
var be restic.Backend
|
||||
|
||||
//go:generate go run ../test/generate_backend_tests.go
|
||||
|
||||
func init() {
|
||||
test.CreateFn = func() (backend.Backend, error) {
|
||||
test.CreateFn = func() (restic.Backend, error) {
|
||||
if be != nil {
|
||||
return nil, errors.New("temporary memory backend dir already exists")
|
||||
}
|
||||
@@ -23,7 +24,7 @@ func init() {
|
||||
return be, nil
|
||||
}
|
||||
|
||||
test.OpenFn = func() (backend.Backend, error) {
|
||||
test.OpenFn = func() (restic.Backend, error) {
|
||||
if be == nil {
|
||||
return nil, errors.New("repository not initialized")
|
||||
}
|
||||
|
||||
@@ -1,15 +1,20 @@
|
||||
package backend
|
||||
|
||||
import "io"
|
||||
import (
|
||||
"io"
|
||||
"restic"
|
||||
|
||||
"restic/errors"
|
||||
)
|
||||
|
||||
// LoadAll reads all data stored in the backend for the handle. The buffer buf
|
||||
// is resized to accomodate all data in the blob. Errors returned by be.Load()
|
||||
// are passed on, except io.ErrUnexpectedEOF is silenced and nil returned
|
||||
// instead, since it means this function is working properly.
|
||||
func LoadAll(be Backend, h Handle, buf []byte) ([]byte, error) {
|
||||
func LoadAll(be restic.Backend, h restic.Handle, buf []byte) ([]byte, error) {
|
||||
fi, err := be.Stat(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "Stat")
|
||||
}
|
||||
|
||||
if fi.Size > int64(len(buf)) {
|
||||
@@ -17,7 +22,7 @@ func LoadAll(be Backend, h Handle, buf []byte) ([]byte, error) {
|
||||
}
|
||||
|
||||
n, err := be.Load(h, buf, 0)
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
if errors.Cause(err) == io.ErrUnexpectedEOF {
|
||||
err = nil
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
@@ -3,6 +3,7 @@ package backend_test
|
||||
import (
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"restic"
|
||||
"testing"
|
||||
|
||||
"restic/backend"
|
||||
@@ -19,11 +20,11 @@ func TestLoadAll(t *testing.T) {
|
||||
for i := 0; i < 20; i++ {
|
||||
data := Random(23+i, rand.Intn(MiB)+500*KiB)
|
||||
|
||||
id := backend.Hash(data)
|
||||
err := b.Save(backend.Handle{Name: id.String(), Type: backend.Data}, data)
|
||||
id := restic.Hash(data)
|
||||
err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data)
|
||||
OK(t, err)
|
||||
|
||||
buf, err := backend.LoadAll(b, backend.Handle{Type: backend.Data, Name: id.String()}, nil)
|
||||
buf, err := backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, nil)
|
||||
OK(t, err)
|
||||
|
||||
if len(buf) != len(data) {
|
||||
@@ -44,12 +45,12 @@ func TestLoadSmallBuffer(t *testing.T) {
|
||||
for i := 0; i < 20; i++ {
|
||||
data := Random(23+i, rand.Intn(MiB)+500*KiB)
|
||||
|
||||
id := backend.Hash(data)
|
||||
err := b.Save(backend.Handle{Name: id.String(), Type: backend.Data}, data)
|
||||
id := restic.Hash(data)
|
||||
err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data)
|
||||
OK(t, err)
|
||||
|
||||
buf := make([]byte, len(data)-23)
|
||||
buf, err = backend.LoadAll(b, backend.Handle{Type: backend.Data, Name: id.String()}, buf)
|
||||
buf, err = backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, buf)
|
||||
OK(t, err)
|
||||
|
||||
if len(buf) != len(data) {
|
||||
@@ -70,12 +71,12 @@ func TestLoadLargeBuffer(t *testing.T) {
|
||||
for i := 0; i < 20; i++ {
|
||||
data := Random(23+i, rand.Intn(MiB)+500*KiB)
|
||||
|
||||
id := backend.Hash(data)
|
||||
err := b.Save(backend.Handle{Name: id.String(), Type: backend.Data}, data)
|
||||
id := restic.Hash(data)
|
||||
err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data)
|
||||
OK(t, err)
|
||||
|
||||
buf := make([]byte, len(data)+100)
|
||||
buf, err = backend.LoadAll(b, backend.Handle{Type: backend.Data, Name: id.String()}, buf)
|
||||
buf, err = backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, buf)
|
||||
OK(t, err)
|
||||
|
||||
if len(buf) != len(data) {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
package backend
|
||||
package restic
|
||||
|
||||
import "errors"
|
||||
import "restic/errors"
|
||||
|
||||
// ErrNoIDPrefixFound is returned by Find() when no ID for the given prefix
|
||||
// could be found.
|
||||
@@ -10,10 +10,10 @@ var ErrNoIDPrefixFound = errors.New("no ID found")
|
||||
// prefix are found.
|
||||
var ErrMultipleIDMatches = errors.New("multiple IDs with prefix found")
|
||||
|
||||
// Find loads the list of all blobs of type t and searches for names which
|
||||
// Find loads the list of all files of type t and searches for names which
|
||||
// start with prefix. If none is found, nil and ErrNoIDPrefixFound is returned.
|
||||
// If more than one is found, nil and ErrMultipleIDMatches is returned.
|
||||
func Find(be Lister, t Type, prefix string) (string, error) {
|
||||
func Find(be Lister, t FileType, prefix string) (string, error) {
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
@@ -41,7 +41,7 @@ const minPrefixLength = 8
|
||||
|
||||
// PrefixLength returns the number of bytes required so that all prefixes of
|
||||
// all names of type t are unique.
|
||||
func PrefixLength(be Lister, t Type) (int, error) {
|
||||
func PrefixLength(be Lister, t FileType) (int, error) {
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
@@ -52,8 +52,9 @@ func PrefixLength(be Lister, t Type) (int, error) {
|
||||
}
|
||||
|
||||
// select prefixes of length l, test if the last one is the same as the current one
|
||||
id := ID{}
|
||||
outer:
|
||||
for l := minPrefixLength; l < IDSize; l++ {
|
||||
for l := minPrefixLength; l < len(id); l++ {
|
||||
var last string
|
||||
|
||||
for _, name := range list {
|
||||
@@ -66,5 +67,5 @@ outer:
|
||||
return l, nil
|
||||
}
|
||||
|
||||
return IDSize, nil
|
||||
return len(id), nil
|
||||
}
|
||||
70
src/restic/backend_find_test.go
Normal file
70
src/restic/backend_find_test.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package restic
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
type mockBackend struct {
|
||||
list func(FileType, <-chan struct{}) <-chan string
|
||||
}
|
||||
|
||||
func (m mockBackend) List(t FileType, done <-chan struct{}) <-chan string {
|
||||
return m.list(t, done)
|
||||
}
|
||||
|
||||
var samples = IDs{
|
||||
TestParseID("20bdc1402a6fc9b633aaffffffffffffffffffffffffffffffffffffffffffff"),
|
||||
TestParseID("20bdc1402a6fc9b633ccd578c4a92d0f4ef1a457fa2e16c596bc73fb409d6cc0"),
|
||||
TestParseID("20bdc1402a6fc9b633ffffffffffffffffffffffffffffffffffffffffffffff"),
|
||||
TestParseID("20ff988befa5fc40350f00d531a767606efefe242c837aaccb80673f286be53d"),
|
||||
TestParseID("326cb59dfe802304f96ee9b5b9af93bdee73a30f53981e5ec579aedb6f1d0f07"),
|
||||
TestParseID("86b60b9594d1d429c4aa98fa9562082cabf53b98c7dc083abe5dae31074dd15a"),
|
||||
TestParseID("96c8dbe225079e624b5ce509f5bd817d1453cd0a85d30d536d01b64a8669aeae"),
|
||||
TestParseID("fa31d65b87affcd167b119e9d3d2a27b8236ca4836cb077ed3e96fcbe209b792"),
|
||||
}
|
||||
|
||||
func TestPrefixLength(t *testing.T) {
|
||||
list := samples
|
||||
|
||||
m := mockBackend{}
|
||||
m.list = func(t FileType, done <-chan struct{}) <-chan string {
|
||||
ch := make(chan string)
|
||||
go func() {
|
||||
defer close(ch)
|
||||
for _, id := range list {
|
||||
select {
|
||||
case ch <- id.String():
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
l, err := PrefixLength(m, SnapshotFile)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if l != 19 {
|
||||
t.Errorf("wrong prefix length returned, want %d, got %d", 19, l)
|
||||
}
|
||||
|
||||
list = samples[:3]
|
||||
l, err = PrefixLength(m, SnapshotFile)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if l != 19 {
|
||||
t.Errorf("wrong prefix length returned, want %d, got %d", 19, l)
|
||||
}
|
||||
|
||||
list = samples[3:]
|
||||
l, err = PrefixLength(m, SnapshotFile)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if l != 8 {
|
||||
t.Errorf("wrong prefix length returned, want %d, got %d", 8, l)
|
||||
}
|
||||
}
|
||||
115
src/restic/blob.go
Normal file
115
src/restic/blob.go
Normal file
@@ -0,0 +1,115 @@
|
||||
package restic
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"restic/errors"
|
||||
)
|
||||
|
||||
// Blob is one part of a file or a tree.
|
||||
type Blob struct {
|
||||
Type BlobType
|
||||
Length uint
|
||||
ID ID
|
||||
Offset uint
|
||||
}
|
||||
|
||||
// PackedBlob is a blob stored within a file.
|
||||
type PackedBlob struct {
|
||||
Blob
|
||||
PackID ID
|
||||
}
|
||||
|
||||
// BlobHandle identifies a blob of a given type.
|
||||
type BlobHandle struct {
|
||||
ID ID
|
||||
Type BlobType
|
||||
}
|
||||
|
||||
func (h BlobHandle) String() string {
|
||||
return fmt.Sprintf("<%s/%s>", h.Type, h.ID.Str())
|
||||
}
|
||||
|
||||
// BlobType specifies what a blob stored in a pack is.
|
||||
type BlobType uint8
|
||||
|
||||
// These are the blob types that can be stored in a pack.
|
||||
const (
|
||||
InvalidBlob BlobType = iota
|
||||
DataBlob
|
||||
TreeBlob
|
||||
)
|
||||
|
||||
func (t BlobType) String() string {
|
||||
switch t {
|
||||
case DataBlob:
|
||||
return "data"
|
||||
case TreeBlob:
|
||||
return "tree"
|
||||
case InvalidBlob:
|
||||
return "invalid"
|
||||
}
|
||||
|
||||
return fmt.Sprintf("<BlobType %d>", t)
|
||||
}
|
||||
|
||||
// MarshalJSON encodes the BlobType into JSON.
|
||||
func (t BlobType) MarshalJSON() ([]byte, error) {
|
||||
switch t {
|
||||
case DataBlob:
|
||||
return []byte(`"data"`), nil
|
||||
case TreeBlob:
|
||||
return []byte(`"tree"`), nil
|
||||
}
|
||||
|
||||
return nil, errors.New("unknown blob type")
|
||||
}
|
||||
|
||||
// UnmarshalJSON decodes the BlobType from JSON.
|
||||
func (t *BlobType) UnmarshalJSON(buf []byte) error {
|
||||
switch string(buf) {
|
||||
case `"data"`:
|
||||
*t = DataBlob
|
||||
case `"tree"`:
|
||||
*t = TreeBlob
|
||||
default:
|
||||
return errors.New("unknown blob type")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BlobHandles is an ordered list of BlobHandles that implements sort.Interface.
|
||||
type BlobHandles []BlobHandle
|
||||
|
||||
func (h BlobHandles) Len() int {
|
||||
return len(h)
|
||||
}
|
||||
|
||||
func (h BlobHandles) Less(i, j int) bool {
|
||||
for k, b := range h[i].ID {
|
||||
if b == h[j].ID[k] {
|
||||
continue
|
||||
}
|
||||
|
||||
if b < h[j].ID[k] {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
return h[i].Type < h[j].Type
|
||||
}
|
||||
|
||||
func (h BlobHandles) Swap(i, j int) {
|
||||
h[i], h[j] = h[j], h[i]
|
||||
}
|
||||
|
||||
func (h BlobHandles) String() string {
|
||||
elements := make([]string, 0, len(h))
|
||||
for _, e := range h {
|
||||
elements = append(elements, e.String())
|
||||
}
|
||||
return fmt.Sprintf("%v", elements)
|
||||
}
|
||||
109
src/restic/blob_set.go
Normal file
109
src/restic/blob_set.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package restic
|
||||
|
||||
import "sort"
|
||||
|
||||
// BlobSet is a set of blobs.
|
||||
type BlobSet map[BlobHandle]struct{}
|
||||
|
||||
// NewBlobSet returns a new BlobSet, populated with ids.
|
||||
func NewBlobSet(handles ...BlobHandle) BlobSet {
|
||||
m := make(BlobSet)
|
||||
for _, h := range handles {
|
||||
m[h] = struct{}{}
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// Has returns true iff id is contained in the set.
|
||||
func (s BlobSet) Has(h BlobHandle) bool {
|
||||
_, ok := s[h]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Insert adds id to the set.
|
||||
func (s BlobSet) Insert(h BlobHandle) {
|
||||
s[h] = struct{}{}
|
||||
}
|
||||
|
||||
// Delete removes id from the set.
|
||||
func (s BlobSet) Delete(h BlobHandle) {
|
||||
delete(s, h)
|
||||
}
|
||||
|
||||
// Equals returns true iff s equals other.
|
||||
func (s BlobSet) Equals(other BlobSet) bool {
|
||||
if len(s) != len(other) {
|
||||
return false
|
||||
}
|
||||
|
||||
for h := range s {
|
||||
if _, ok := other[h]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Merge adds the blobs in other to the current set.
|
||||
func (s BlobSet) Merge(other BlobSet) {
|
||||
for h := range other {
|
||||
s.Insert(h)
|
||||
}
|
||||
}
|
||||
|
||||
// Intersect returns a new set containing the handles that are present in both sets.
|
||||
func (s BlobSet) Intersect(other BlobSet) (result BlobSet) {
|
||||
result = NewBlobSet()
|
||||
|
||||
set1 := s
|
||||
set2 := other
|
||||
|
||||
// iterate over the smaller set
|
||||
if len(set2) < len(set1) {
|
||||
set1, set2 = set2, set1
|
||||
}
|
||||
|
||||
for h := range set1 {
|
||||
if set2.Has(h) {
|
||||
result.Insert(h)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Sub returns a new set containing all handles that are present in s but not in
|
||||
// other.
|
||||
func (s BlobSet) Sub(other BlobSet) (result BlobSet) {
|
||||
result = NewBlobSet()
|
||||
for h := range s {
|
||||
if !other.Has(h) {
|
||||
result.Insert(h)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// List returns a sorted slice of all BlobHandle in the set.
|
||||
func (s BlobSet) List() BlobHandles {
|
||||
list := make(BlobHandles, 0, len(s))
|
||||
for h := range s {
|
||||
list = append(list, h)
|
||||
}
|
||||
|
||||
sort.Sort(list)
|
||||
|
||||
return list
|
||||
}
|
||||
|
||||
func (s BlobSet) String() string {
|
||||
str := s.List().String()
|
||||
if len(str) < 2 {
|
||||
return "{}"
|
||||
}
|
||||
|
||||
return "{" + str[1:len(str)-1] + "}"
|
||||
}
|
||||
41
src/restic/blob_test.go
Normal file
41
src/restic/blob_test.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package restic
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var blobTypeJSON = []struct {
|
||||
t BlobType
|
||||
res string
|
||||
}{
|
||||
{DataBlob, `"data"`},
|
||||
{TreeBlob, `"tree"`},
|
||||
}
|
||||
|
||||
func TestBlobTypeJSON(t *testing.T) {
|
||||
for _, test := range blobTypeJSON {
|
||||
// test serialize
|
||||
buf, err := json.Marshal(test.t)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
if test.res != string(buf) {
|
||||
t.Errorf("want %q, got %q", test.res, string(buf))
|
||||
continue
|
||||
}
|
||||
|
||||
// test unserialize
|
||||
var v BlobType
|
||||
err = json.Unmarshal([]byte(test.res), &v)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
if test.t != v {
|
||||
t.Errorf("want %v, got %v", test.t, v)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,289 +0,0 @@
|
||||
package restic
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"restic/backend"
|
||||
"restic/debug"
|
||||
"restic/repository"
|
||||
)
|
||||
|
||||
// Cache is used to locally cache items from a repository.
|
||||
type Cache struct {
|
||||
base string
|
||||
}
|
||||
|
||||
// NewCache returns a new cache at cacheDir. If it is the empty string, the
|
||||
// default cache location is chosen.
|
||||
func NewCache(repo *repository.Repository, cacheDir string) (*Cache, error) {
|
||||
var err error
|
||||
|
||||
if cacheDir == "" {
|
||||
cacheDir, err = getCacheDir()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
basedir := filepath.Join(cacheDir, repo.Config.ID)
|
||||
debug.Log("Cache.New", "opened cache at %v", basedir)
|
||||
|
||||
return &Cache{base: basedir}, nil
|
||||
}
|
||||
|
||||
// Has checks if the local cache has the id.
|
||||
func (c *Cache) Has(t backend.Type, subtype string, id backend.ID) (bool, error) {
|
||||
filename, err := c.filename(t, subtype, id)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
fd, err := os.Open(filename)
|
||||
defer fd.Close()
|
||||
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
debug.Log("Cache.Has", "test for file %v: not cached", filename)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
debug.Log("Cache.Has", "test for file %v: error %v", filename, err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
debug.Log("Cache.Has", "test for file %v: is cached", filename)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Store returns an io.WriteCloser that is used to save new information to the
|
||||
// cache. The returned io.WriteCloser must be closed by the caller after all
|
||||
// data has been written.
|
||||
func (c *Cache) Store(t backend.Type, subtype string, id backend.ID) (io.WriteCloser, error) {
|
||||
filename, err := c.filename(t, subtype, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dirname := filepath.Dir(filename)
|
||||
err = os.MkdirAll(dirname, 0700)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
file, err := os.Create(filename)
|
||||
if err != nil {
|
||||
debug.Log("Cache.Store", "error creating file %v: %v", filename, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
debug.Log("Cache.Store", "created file %v", filename)
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// Load returns information from the cache. The returned io.ReadCloser must be
|
||||
// closed by the caller.
|
||||
func (c *Cache) Load(t backend.Type, subtype string, id backend.ID) (io.ReadCloser, error) {
|
||||
filename, err := c.filename(t, subtype, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return os.Open(filename)
|
||||
}
|
||||
|
||||
func (c *Cache) purge(t backend.Type, subtype string, id backend.ID) error {
|
||||
filename, err := c.filename(t, subtype, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = os.Remove(filename)
|
||||
debug.Log("Cache.purge", "Remove file %v: %v", filename, err)
|
||||
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Clear removes information from the cache that isn't present in the repository any more.
|
||||
func (c *Cache) Clear(repo *repository.Repository) error {
|
||||
list, err := c.list(backend.Snapshot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, entry := range list {
|
||||
debug.Log("Cache.Clear", "found entry %v", entry)
|
||||
|
||||
if ok, err := repo.Backend().Test(backend.Snapshot, entry.ID.String()); !ok || err != nil {
|
||||
debug.Log("Cache.Clear", "snapshot %v doesn't exist any more, removing %v", entry.ID, entry)
|
||||
|
||||
err = c.purge(backend.Snapshot, entry.Subtype, entry.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type cacheEntry struct {
|
||||
ID backend.ID
|
||||
Subtype string
|
||||
}
|
||||
|
||||
func (c cacheEntry) String() string {
|
||||
if c.Subtype != "" {
|
||||
return c.ID.Str() + "." + c.Subtype
|
||||
}
|
||||
return c.ID.Str()
|
||||
}
|
||||
|
||||
func (c *Cache) list(t backend.Type) ([]cacheEntry, error) {
|
||||
var dir string
|
||||
|
||||
switch t {
|
||||
case backend.Snapshot:
|
||||
dir = filepath.Join(c.base, "snapshots")
|
||||
default:
|
||||
return nil, fmt.Errorf("cache not supported for type %v", t)
|
||||
}
|
||||
|
||||
fd, err := os.Open(dir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return []cacheEntry{}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
fis, err := fd.Readdir(-1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
entries := make([]cacheEntry, 0, len(fis))
|
||||
|
||||
for _, fi := range fis {
|
||||
parts := strings.SplitN(fi.Name(), ".", 2)
|
||||
|
||||
id, err := backend.ParseID(parts[0])
|
||||
// ignore invalid cache entries for now
|
||||
if err != nil {
|
||||
debug.Log("Cache.List", "unable to parse name %v as id: %v", parts[0], err)
|
||||
continue
|
||||
}
|
||||
|
||||
e := cacheEntry{ID: id}
|
||||
|
||||
if len(parts) == 2 {
|
||||
e.Subtype = parts[1]
|
||||
}
|
||||
|
||||
entries = append(entries, e)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func (c *Cache) filename(t backend.Type, subtype string, id backend.ID) (string, error) {
|
||||
filename := id.String()
|
||||
if subtype != "" {
|
||||
filename += "." + subtype
|
||||
}
|
||||
|
||||
switch t {
|
||||
case backend.Snapshot:
|
||||
return filepath.Join(c.base, "snapshots", filename), nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("cache not supported for type %v", t)
|
||||
}
|
||||
|
||||
func getCacheDir() (string, error) {
|
||||
if dir := os.Getenv("RESTIC_CACHE"); dir != "" {
|
||||
return dir, nil
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
return getWindowsCacheDir()
|
||||
}
|
||||
|
||||
return getXDGCacheDir()
|
||||
}
|
||||
|
||||
// getWindowsCacheDir will return %APPDATA%\restic or create
|
||||
// a folder in the temporary folder called "restic".
|
||||
func getWindowsCacheDir() (string, error) {
|
||||
cachedir := os.Getenv("APPDATA")
|
||||
if cachedir == "" {
|
||||
cachedir = os.TempDir()
|
||||
}
|
||||
cachedir = filepath.Join(cachedir, "restic")
|
||||
fi, err := os.Stat(cachedir)
|
||||
|
||||
if os.IsNotExist(err) {
|
||||
err = os.MkdirAll(cachedir, 0700)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return cachedir, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !fi.IsDir() {
|
||||
return "", fmt.Errorf("cache dir %v is not a directory", cachedir)
|
||||
}
|
||||
return cachedir, nil
|
||||
}
|
||||
|
||||
// getXDGCacheDir returns the cache directory according to XDG basedir spec, see
|
||||
// http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
|
||||
func getXDGCacheDir() (string, error) {
|
||||
xdgcache := os.Getenv("XDG_CACHE_HOME")
|
||||
home := os.Getenv("HOME")
|
||||
|
||||
if xdgcache == "" && home == "" {
|
||||
return "", errors.New("unable to locate cache directory (XDG_CACHE_HOME and HOME unset)")
|
||||
}
|
||||
|
||||
cachedir := ""
|
||||
if xdgcache != "" {
|
||||
cachedir = filepath.Join(xdgcache, "restic")
|
||||
} else if home != "" {
|
||||
cachedir = filepath.Join(home, ".cache", "restic")
|
||||
}
|
||||
|
||||
fi, err := os.Stat(cachedir)
|
||||
if os.IsNotExist(err) {
|
||||
err = os.MkdirAll(cachedir, 0700)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
fi, err = os.Stat(cachedir)
|
||||
debug.Log("getCacheDir", "create cache dir %v", cachedir)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !fi.IsDir() {
|
||||
return "", fmt.Errorf("cache dir %v is not a directory", cachedir)
|
||||
}
|
||||
|
||||
return cachedir, nil
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
package restic_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"restic"
|
||||
. "restic/test"
|
||||
)
|
||||
|
||||
func TestCache(t *testing.T) {
|
||||
repo := SetupRepo()
|
||||
defer TeardownRepo(repo)
|
||||
|
||||
_, err := restic.NewCache(repo, "")
|
||||
OK(t, err)
|
||||
|
||||
arch := restic.NewArchiver(repo)
|
||||
|
||||
// archive some files, this should automatically cache all blobs from the snapshot
|
||||
_, _, err = arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil)
|
||||
|
||||
// TODO: test caching index
|
||||
}
|
||||
@@ -2,10 +2,11 @@ package checker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"restic/errors"
|
||||
|
||||
"restic"
|
||||
"restic/backend"
|
||||
"restic/crypto"
|
||||
@@ -20,31 +21,31 @@ import (
|
||||
// A Checker only tests for internal errors within the data structures of the
|
||||
// repository (e.g. missing blobs), and needs a valid Repository to work on.
|
||||
type Checker struct {
|
||||
packs backend.IDSet
|
||||
blobs backend.IDSet
|
||||
packs restic.IDSet
|
||||
blobs restic.IDSet
|
||||
blobRefs struct {
|
||||
sync.Mutex
|
||||
M map[backend.ID]uint
|
||||
M map[restic.ID]uint
|
||||
}
|
||||
indexes map[backend.ID]*repository.Index
|
||||
orphanedPacks backend.IDs
|
||||
indexes map[restic.ID]*repository.Index
|
||||
orphanedPacks restic.IDs
|
||||
|
||||
masterIndex *repository.MasterIndex
|
||||
|
||||
repo *repository.Repository
|
||||
repo restic.Repository
|
||||
}
|
||||
|
||||
// New returns a new checker which runs on repo.
|
||||
func New(repo *repository.Repository) *Checker {
|
||||
func New(repo restic.Repository) *Checker {
|
||||
c := &Checker{
|
||||
packs: backend.NewIDSet(),
|
||||
blobs: backend.NewIDSet(),
|
||||
packs: restic.NewIDSet(),
|
||||
blobs: restic.NewIDSet(),
|
||||
masterIndex: repository.NewMasterIndex(),
|
||||
indexes: make(map[backend.ID]*repository.Index),
|
||||
indexes: make(map[restic.ID]*repository.Index),
|
||||
repo: repo,
|
||||
}
|
||||
|
||||
c.blobRefs.M = make(map[backend.ID]uint)
|
||||
c.blobRefs.M = make(map[restic.ID]uint)
|
||||
|
||||
return c
|
||||
}
|
||||
@@ -53,8 +54,8 @@ const defaultParallelism = 40
|
||||
|
||||
// ErrDuplicatePacks is returned when a pack is found in more than one index.
|
||||
type ErrDuplicatePacks struct {
|
||||
PackID backend.ID
|
||||
Indexes backend.IDSet
|
||||
PackID restic.ID
|
||||
Indexes restic.IDSet
|
||||
}
|
||||
|
||||
func (e ErrDuplicatePacks) Error() string {
|
||||
@@ -64,7 +65,7 @@ func (e ErrDuplicatePacks) Error() string {
|
||||
// ErrOldIndexFormat is returned when an index with the old format is
|
||||
// found.
|
||||
type ErrOldIndexFormat struct {
|
||||
backend.ID
|
||||
restic.ID
|
||||
}
|
||||
|
||||
func (err ErrOldIndexFormat) Error() string {
|
||||
@@ -73,7 +74,7 @@ func (err ErrOldIndexFormat) Error() string {
|
||||
|
||||
// LoadIndex loads all index files.
|
||||
func (c *Checker) LoadIndex() (hints []error, errs []error) {
|
||||
debug.Log("LoadIndex", "Start")
|
||||
debug.Log("Start")
|
||||
type indexRes struct {
|
||||
Index *repository.Index
|
||||
ID string
|
||||
@@ -81,11 +82,11 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
|
||||
|
||||
indexCh := make(chan indexRes)
|
||||
|
||||
worker := func(id backend.ID, done <-chan struct{}) error {
|
||||
debug.Log("LoadIndex", "worker got index %v", id)
|
||||
worker := func(id restic.ID, done <-chan struct{}) error {
|
||||
debug.Log("worker got index %v", id)
|
||||
idx, err := repository.LoadIndexWithDecoder(c.repo, id, repository.DecodeIndex)
|
||||
if err == repository.ErrOldIndexFormat {
|
||||
debug.Log("LoadIndex", "index %v has old format", id.Str())
|
||||
if errors.Cause(err) == repository.ErrOldIndexFormat {
|
||||
debug.Log("index %v has old format", id.Str())
|
||||
hints = append(hints, ErrOldIndexFormat{id})
|
||||
|
||||
idx, err = repository.LoadIndexWithDecoder(c.repo, id, repository.DecodeOldIndex)
|
||||
@@ -106,10 +107,10 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
|
||||
var perr error
|
||||
go func() {
|
||||
defer close(indexCh)
|
||||
debug.Log("LoadIndex", "start loading indexes in parallel")
|
||||
perr = repository.FilesInParallel(c.repo.Backend(), backend.Index, defaultParallelism,
|
||||
debug.Log("start loading indexes in parallel")
|
||||
perr = repository.FilesInParallel(c.repo.Backend(), restic.IndexFile, defaultParallelism,
|
||||
repository.ParallelWorkFuncParseID(worker))
|
||||
debug.Log("LoadIndex", "loading indexes finished, error: %v", perr)
|
||||
debug.Log("loading indexes finished, error: %v", perr)
|
||||
}()
|
||||
|
||||
done := make(chan struct{})
|
||||
@@ -120,20 +121,20 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
|
||||
return hints, errs
|
||||
}
|
||||
|
||||
packToIndex := make(map[backend.ID]backend.IDSet)
|
||||
packToIndex := make(map[restic.ID]restic.IDSet)
|
||||
|
||||
for res := range indexCh {
|
||||
debug.Log("LoadIndex", "process index %v", res.ID)
|
||||
idxID, err := backend.ParseID(res.ID)
|
||||
debug.Log("process index %v", res.ID)
|
||||
idxID, err := restic.ParseID(res.ID)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("unable to parse as index ID: %v", res.ID))
|
||||
errs = append(errs, errors.Errorf("unable to parse as index ID: %v", res.ID))
|
||||
continue
|
||||
}
|
||||
|
||||
c.indexes[idxID] = res.Index
|
||||
c.masterIndex.Insert(res.Index)
|
||||
|
||||
debug.Log("LoadIndex", "process blobs")
|
||||
debug.Log("process blobs")
|
||||
cnt := 0
|
||||
for blob := range res.Index.Each(done) {
|
||||
c.packs.Insert(blob.PackID)
|
||||
@@ -142,19 +143,19 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
|
||||
cnt++
|
||||
|
||||
if _, ok := packToIndex[blob.PackID]; !ok {
|
||||
packToIndex[blob.PackID] = backend.NewIDSet()
|
||||
packToIndex[blob.PackID] = restic.NewIDSet()
|
||||
}
|
||||
packToIndex[blob.PackID].Insert(idxID)
|
||||
}
|
||||
|
||||
debug.Log("LoadIndex", "%d blobs processed", cnt)
|
||||
debug.Log("%d blobs processed", cnt)
|
||||
}
|
||||
|
||||
debug.Log("LoadIndex", "done, error %v", perr)
|
||||
debug.Log("done, error %v", perr)
|
||||
|
||||
debug.Log("LoadIndex", "checking for duplicate packs")
|
||||
debug.Log("checking for duplicate packs")
|
||||
for packID := range c.packs {
|
||||
debug.Log("LoadIndex", " check pack %v: contained in %d indexes", packID.Str(), len(packToIndex[packID]))
|
||||
debug.Log(" check pack %v: contained in %d indexes", packID.Str(), len(packToIndex[packID]))
|
||||
if len(packToIndex[packID]) > 1 {
|
||||
hints = append(hints, ErrDuplicatePacks{
|
||||
PackID: packID,
|
||||
@@ -170,7 +171,7 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
|
||||
|
||||
// PackError describes an error with a specific pack.
|
||||
type PackError struct {
|
||||
ID backend.ID
|
||||
ID restic.ID
|
||||
Orphaned bool
|
||||
Err error
|
||||
}
|
||||
@@ -179,14 +180,14 @@ func (e PackError) Error() string {
|
||||
return "pack " + e.ID.String() + ": " + e.Err.Error()
|
||||
}
|
||||
|
||||
func packIDTester(repo *repository.Repository, inChan <-chan backend.ID, errChan chan<- error, wg *sync.WaitGroup, done <-chan struct{}) {
|
||||
debug.Log("Checker.testPackID", "worker start")
|
||||
defer debug.Log("Checker.testPackID", "worker done")
|
||||
func packIDTester(repo restic.Repository, inChan <-chan restic.ID, errChan chan<- error, wg *sync.WaitGroup, done <-chan struct{}) {
|
||||
debug.Log("worker start")
|
||||
defer debug.Log("worker done")
|
||||
|
||||
defer wg.Done()
|
||||
|
||||
for id := range inChan {
|
||||
ok, err := repo.Backend().Test(backend.Data, id.String())
|
||||
ok, err := repo.Backend().Test(restic.DataFile, id.String())
|
||||
if err != nil {
|
||||
err = PackError{ID: id, Err: err}
|
||||
} else {
|
||||
@@ -196,7 +197,7 @@ func packIDTester(repo *repository.Repository, inChan <-chan backend.ID, errChan
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
debug.Log("Checker.testPackID", "error checking for pack %s: %v", id.Str(), err)
|
||||
debug.Log("error checking for pack %s: %v", id.Str(), err)
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
@@ -206,7 +207,7 @@ func packIDTester(repo *repository.Repository, inChan <-chan backend.ID, errChan
|
||||
continue
|
||||
}
|
||||
|
||||
debug.Log("Checker.testPackID", "pack %s exists", id.Str())
|
||||
debug.Log("pack %s exists", id.Str())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -216,12 +217,12 @@ func packIDTester(repo *repository.Repository, inChan <-chan backend.ID, errChan
|
||||
func (c *Checker) Packs(errChan chan<- error, done <-chan struct{}) {
|
||||
defer close(errChan)
|
||||
|
||||
debug.Log("Checker.Packs", "checking for %d packs", len(c.packs))
|
||||
seenPacks := backend.NewIDSet()
|
||||
debug.Log("checking for %d packs", len(c.packs))
|
||||
seenPacks := restic.NewIDSet()
|
||||
|
||||
var workerWG sync.WaitGroup
|
||||
|
||||
IDChan := make(chan backend.ID)
|
||||
IDChan := make(chan restic.ID)
|
||||
for i := 0; i < defaultParallelism; i++ {
|
||||
workerWG.Add(1)
|
||||
go packIDTester(c.repo, IDChan, errChan, &workerWG, done)
|
||||
@@ -233,12 +234,12 @@ func (c *Checker) Packs(errChan chan<- error, done <-chan struct{}) {
|
||||
}
|
||||
close(IDChan)
|
||||
|
||||
debug.Log("Checker.Packs", "waiting for %d workers to terminate", defaultParallelism)
|
||||
debug.Log("waiting for %d workers to terminate", defaultParallelism)
|
||||
workerWG.Wait()
|
||||
debug.Log("Checker.Packs", "workers terminated")
|
||||
debug.Log("workers terminated")
|
||||
|
||||
for id := range c.repo.List(backend.Data, done) {
|
||||
debug.Log("Checker.Packs", "check data blob %v", id.Str())
|
||||
for id := range c.repo.List(restic.DataFile, done) {
|
||||
debug.Log("check data blob %v", id.Str())
|
||||
if !seenPacks.Has(id) {
|
||||
c.orphanedPacks = append(c.orphanedPacks, id)
|
||||
select {
|
||||
@@ -252,8 +253,8 @@ func (c *Checker) Packs(errChan chan<- error, done <-chan struct{}) {
|
||||
|
||||
// Error is an error that occurred while checking a repository.
|
||||
type Error struct {
|
||||
TreeID backend.ID
|
||||
BlobID backend.ID
|
||||
TreeID restic.ID
|
||||
BlobID restic.ID
|
||||
Err error
|
||||
}
|
||||
|
||||
@@ -272,25 +273,25 @@ func (e Error) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
func loadTreeFromSnapshot(repo *repository.Repository, id backend.ID) (backend.ID, error) {
|
||||
func loadTreeFromSnapshot(repo restic.Repository, id restic.ID) (restic.ID, error) {
|
||||
sn, err := restic.LoadSnapshot(repo, id)
|
||||
if err != nil {
|
||||
debug.Log("Checker.loadTreeFromSnapshot", "error loading snapshot %v: %v", id.Str(), err)
|
||||
return backend.ID{}, err
|
||||
debug.Log("error loading snapshot %v: %v", id.Str(), err)
|
||||
return restic.ID{}, err
|
||||
}
|
||||
|
||||
if sn.Tree == nil {
|
||||
debug.Log("Checker.loadTreeFromSnapshot", "snapshot %v has no tree", id.Str())
|
||||
return backend.ID{}, fmt.Errorf("snapshot %v has no tree", id)
|
||||
debug.Log("snapshot %v has no tree", id.Str())
|
||||
return restic.ID{}, errors.Errorf("snapshot %v has no tree", id)
|
||||
}
|
||||
|
||||
return *sn.Tree, nil
|
||||
}
|
||||
|
||||
// loadSnapshotTreeIDs loads all snapshots from backend and returns the tree IDs.
|
||||
func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) {
|
||||
func loadSnapshotTreeIDs(repo restic.Repository) (restic.IDs, []error) {
|
||||
var trees struct {
|
||||
IDs backend.IDs
|
||||
IDs restic.IDs
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
@@ -300,12 +301,12 @@ func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) {
|
||||
}
|
||||
|
||||
snapshotWorker := func(strID string, done <-chan struct{}) error {
|
||||
id, err := backend.ParseID(strID)
|
||||
id, err := restic.ParseID(strID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
debug.Log("Checker.Snaphots", "load snapshot %v", id.Str())
|
||||
debug.Log("load snapshot %v", id.Str())
|
||||
|
||||
treeID, err := loadTreeFromSnapshot(repo, id)
|
||||
if err != nil {
|
||||
@@ -315,7 +316,7 @@ func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
debug.Log("Checker.Snaphots", "snapshot %v has tree %v", id.Str(), treeID.Str())
|
||||
debug.Log("snapshot %v has tree %v", id.Str(), treeID.Str())
|
||||
trees.Lock()
|
||||
trees.IDs = append(trees.IDs, treeID)
|
||||
trees.Unlock()
|
||||
@@ -323,7 +324,7 @@ func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := repository.FilesInParallel(repo.Backend(), backend.Snapshot, defaultParallelism, snapshotWorker)
|
||||
err := repository.FilesInParallel(repo.Backend(), restic.SnapshotFile, defaultParallelism, snapshotWorker)
|
||||
if err != nil {
|
||||
errs.errs = append(errs.errs, err)
|
||||
}
|
||||
@@ -333,7 +334,7 @@ func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) {
|
||||
|
||||
// TreeError collects several errors that occurred while processing a tree.
|
||||
type TreeError struct {
|
||||
ID backend.ID
|
||||
ID restic.ID
|
||||
Errors []error
|
||||
}
|
||||
|
||||
@@ -342,18 +343,18 @@ func (e TreeError) Error() string {
|
||||
}
|
||||
|
||||
type treeJob struct {
|
||||
backend.ID
|
||||
restic.ID
|
||||
error
|
||||
*restic.Tree
|
||||
}
|
||||
|
||||
// loadTreeWorker loads trees from repo and sends them to out.
|
||||
func loadTreeWorker(repo *repository.Repository,
|
||||
in <-chan backend.ID, out chan<- treeJob,
|
||||
func loadTreeWorker(repo restic.Repository,
|
||||
in <-chan restic.ID, out chan<- treeJob,
|
||||
done <-chan struct{}, wg *sync.WaitGroup) {
|
||||
|
||||
defer func() {
|
||||
debug.Log("checker.loadTreeWorker", "exiting")
|
||||
debug.Log("exiting")
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
@@ -373,16 +374,16 @@ func loadTreeWorker(repo *repository.Repository,
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
debug.Log("checker.loadTreeWorker", "load tree %v", treeID.Str())
|
||||
debug.Log("load tree %v", treeID.Str())
|
||||
|
||||
tree, err := restic.LoadTree(repo, treeID)
|
||||
debug.Log("checker.loadTreeWorker", "load tree %v (%v) returned err: %v", tree, treeID.Str(), err)
|
||||
tree, err := repo.LoadTree(treeID)
|
||||
debug.Log("load tree %v (%v) returned err: %v", tree, treeID.Str(), err)
|
||||
job = treeJob{ID: treeID, error: err, Tree: tree}
|
||||
outCh = out
|
||||
inCh = nil
|
||||
|
||||
case outCh <- job:
|
||||
debug.Log("checker.loadTreeWorker", "sent tree %v", job.ID.Str())
|
||||
debug.Log("sent tree %v", job.ID.Str())
|
||||
outCh = nil
|
||||
inCh = in
|
||||
}
|
||||
@@ -392,7 +393,7 @@ func loadTreeWorker(repo *repository.Repository,
|
||||
// checkTreeWorker checks the trees received and sends out errors to errChan.
|
||||
func (c *Checker) checkTreeWorker(in <-chan treeJob, out chan<- error, done <-chan struct{}, wg *sync.WaitGroup) {
|
||||
defer func() {
|
||||
debug.Log("checker.checkTreeWorker", "exiting")
|
||||
debug.Log("exiting")
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
@@ -406,12 +407,12 @@ func (c *Checker) checkTreeWorker(in <-chan treeJob, out chan<- error, done <-ch
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
debug.Log("checker.checkTreeWorker", "done channel closed, exiting")
|
||||
debug.Log("done channel closed, exiting")
|
||||
return
|
||||
|
||||
case job, ok := <-inCh:
|
||||
if !ok {
|
||||
debug.Log("checker.checkTreeWorker", "input channel closed, exiting")
|
||||
debug.Log("input channel closed, exiting")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -422,14 +423,14 @@ func (c *Checker) checkTreeWorker(in <-chan treeJob, out chan<- error, done <-ch
|
||||
alreadyChecked = true
|
||||
}
|
||||
c.blobRefs.M[id]++
|
||||
debug.Log("checker.checkTreeWorker", "tree %v refcount %d", job.ID.Str(), c.blobRefs.M[id])
|
||||
debug.Log("tree %v refcount %d", job.ID.Str(), c.blobRefs.M[id])
|
||||
c.blobRefs.Unlock()
|
||||
|
||||
if alreadyChecked {
|
||||
continue
|
||||
}
|
||||
|
||||
debug.Log("checker.checkTreeWorker", "check tree %v (tree %v, err %v)", job.ID.Str(), job.Tree, job.error)
|
||||
debug.Log("check tree %v (tree %v, err %v)", job.ID.Str(), job.Tree, job.error)
|
||||
|
||||
var errs []error
|
||||
if job.error != nil {
|
||||
@@ -439,23 +440,23 @@ func (c *Checker) checkTreeWorker(in <-chan treeJob, out chan<- error, done <-ch
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
debug.Log("checker.checkTreeWorker", "checked tree %v: %v errors", job.ID.Str(), len(errs))
|
||||
debug.Log("checked tree %v: %v errors", job.ID.Str(), len(errs))
|
||||
treeError = TreeError{ID: job.ID, Errors: errs}
|
||||
outCh = out
|
||||
inCh = nil
|
||||
}
|
||||
|
||||
case outCh <- treeError:
|
||||
debug.Log("checker.checkTreeWorker", "tree %v: sent %d errors", treeError.ID, len(treeError.Errors))
|
||||
debug.Log("tree %v: sent %d errors", treeError.ID, len(treeError.Errors))
|
||||
outCh = nil
|
||||
inCh = in
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func filterTrees(backlog backend.IDs, loaderChan chan<- backend.ID, in <-chan treeJob, out chan<- treeJob, done <-chan struct{}) {
|
||||
func filterTrees(backlog restic.IDs, loaderChan chan<- restic.ID, in <-chan treeJob, out chan<- treeJob, done <-chan struct{}) {
|
||||
defer func() {
|
||||
debug.Log("checker.filterTrees", "closing output channels")
|
||||
debug.Log("closing output channels")
|
||||
close(loaderChan)
|
||||
close(out)
|
||||
}()
|
||||
@@ -465,7 +466,7 @@ func filterTrees(backlog backend.IDs, loaderChan chan<- backend.ID, in <-chan tr
|
||||
outCh = out
|
||||
loadCh = loaderChan
|
||||
job treeJob
|
||||
nextTreeID backend.ID
|
||||
nextTreeID restic.ID
|
||||
outstandingLoadTreeJobs = 0
|
||||
)
|
||||
|
||||
@@ -479,7 +480,7 @@ func filterTrees(backlog backend.IDs, loaderChan chan<- backend.ID, in <-chan tr
|
||||
}
|
||||
|
||||
if loadCh == nil && outCh == nil && outstandingLoadTreeJobs == 0 {
|
||||
debug.Log("checker.filterTrees", "backlog is empty, all channels nil, exiting")
|
||||
debug.Log("backlog is empty, all channels nil, exiting")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -493,7 +494,7 @@ func filterTrees(backlog backend.IDs, loaderChan chan<- backend.ID, in <-chan tr
|
||||
|
||||
case j, ok := <-inCh:
|
||||
if !ok {
|
||||
debug.Log("checker.filterTrees", "input channel closed")
|
||||
debug.Log("input channel closed")
|
||||
inCh = nil
|
||||
in = nil
|
||||
continue
|
||||
@@ -501,23 +502,23 @@ func filterTrees(backlog backend.IDs, loaderChan chan<- backend.ID, in <-chan tr
|
||||
|
||||
outstandingLoadTreeJobs--
|
||||
|
||||
debug.Log("checker.filterTrees", "input job tree %v", j.ID.Str())
|
||||
debug.Log("input job tree %v", j.ID.Str())
|
||||
|
||||
var err error
|
||||
|
||||
if j.error != nil {
|
||||
debug.Log("checker.filterTrees", "received job with error: %v (tree %v, ID %v)", j.error, j.Tree, j.ID.Str())
|
||||
debug.Log("received job with error: %v (tree %v, ID %v)", j.error, j.Tree, j.ID.Str())
|
||||
} else if j.Tree == nil {
|
||||
debug.Log("checker.filterTrees", "received job with nil tree pointer: %v (ID %v)", j.error, j.ID.Str())
|
||||
debug.Log("received job with nil tree pointer: %v (ID %v)", j.error, j.ID.Str())
|
||||
err = errors.New("tree is nil and error is nil")
|
||||
} else {
|
||||
debug.Log("checker.filterTrees", "subtrees for tree %v: %v", j.ID.Str(), j.Tree.Subtrees())
|
||||
debug.Log("subtrees for tree %v: %v", j.ID.Str(), j.Tree.Subtrees())
|
||||
for _, id := range j.Tree.Subtrees() {
|
||||
if id.IsNull() {
|
||||
// We do not need to raise this error here, it is
|
||||
// checked when the tree is checked. Just make sure
|
||||
// that we do not add any null IDs to the backlog.
|
||||
debug.Log("checker.filterTrees", "tree %v has nil subtree", j.ID.Str())
|
||||
debug.Log("tree %v has nil subtree", j.ID.Str())
|
||||
continue
|
||||
}
|
||||
backlog = append(backlog, id)
|
||||
@@ -534,7 +535,7 @@ func filterTrees(backlog backend.IDs, loaderChan chan<- backend.ID, in <-chan tr
|
||||
inCh = nil
|
||||
|
||||
case outCh <- job:
|
||||
debug.Log("checker.FilterTrees", "tree sent to check: %v", job.ID.Str())
|
||||
debug.Log("tree sent to check: %v", job.ID.Str())
|
||||
outCh = nil
|
||||
inCh = in
|
||||
}
|
||||
@@ -548,7 +549,7 @@ func (c *Checker) Structure(errChan chan<- error, done <-chan struct{}) {
|
||||
defer close(errChan)
|
||||
|
||||
trees, errs := loadSnapshotTreeIDs(c.repo)
|
||||
debug.Log("checker.Structure", "need to check %d trees from snapshots, %d errs returned", len(trees), len(errs))
|
||||
debug.Log("need to check %d trees from snapshots, %d errs returned", len(trees), len(errs))
|
||||
|
||||
for _, err := range errs {
|
||||
select {
|
||||
@@ -558,7 +559,7 @@ func (c *Checker) Structure(errChan chan<- error, done <-chan struct{}) {
|
||||
}
|
||||
}
|
||||
|
||||
treeIDChan := make(chan backend.ID)
|
||||
treeIDChan := make(chan restic.ID)
|
||||
treeJobChan1 := make(chan treeJob)
|
||||
treeJobChan2 := make(chan treeJob)
|
||||
|
||||
@@ -574,37 +575,33 @@ func (c *Checker) Structure(errChan chan<- error, done <-chan struct{}) {
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func (c *Checker) checkTree(id backend.ID, tree *restic.Tree) (errs []error) {
|
||||
debug.Log("Checker.checkTree", "checking tree %v", id.Str())
|
||||
func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) {
|
||||
debug.Log("checking tree %v", id.Str())
|
||||
|
||||
var blobs []backend.ID
|
||||
var blobs []restic.ID
|
||||
|
||||
for _, node := range tree.Nodes {
|
||||
switch node.Type {
|
||||
case "file":
|
||||
if node.Content == nil {
|
||||
errs = append(errs, Error{TreeID: id, Err: fmt.Errorf("file %q has nil blob list", node.Name)})
|
||||
}
|
||||
|
||||
if node.Mode == 0 {
|
||||
errs = append(errs, Error{TreeID: id, Err: fmt.Errorf("file %q has invalid mode: %v", node.Name, node.Mode)})
|
||||
errs = append(errs, Error{TreeID: id, Err: errors.Errorf("file %q has nil blob list", node.Name)})
|
||||
}
|
||||
|
||||
for b, blobID := range node.Content {
|
||||
if blobID.IsNull() {
|
||||
errs = append(errs, Error{TreeID: id, Err: fmt.Errorf("file %q blob %d has null ID", node.Name, b)})
|
||||
errs = append(errs, Error{TreeID: id, Err: errors.Errorf("file %q blob %d has null ID", node.Name, b)})
|
||||
continue
|
||||
}
|
||||
blobs = append(blobs, blobID)
|
||||
}
|
||||
case "dir":
|
||||
if node.Subtree == nil {
|
||||
errs = append(errs, Error{TreeID: id, Err: fmt.Errorf("dir node %q has no subtree", node.Name)})
|
||||
errs = append(errs, Error{TreeID: id, Err: errors.Errorf("dir node %q has no subtree", node.Name)})
|
||||
continue
|
||||
}
|
||||
|
||||
if node.Subtree.IsNull() {
|
||||
errs = append(errs, Error{TreeID: id, Err: fmt.Errorf("dir node %q subtree id is null", node.Name)})
|
||||
errs = append(errs, Error{TreeID: id, Err: errors.Errorf("dir node %q subtree id is null", node.Name)})
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -612,7 +609,7 @@ func (c *Checker) checkTree(id backend.ID, tree *restic.Tree) (errs []error) {
|
||||
// nothing to check
|
||||
|
||||
default:
|
||||
errs = append(errs, Error{TreeID: id, Err: fmt.Errorf("node %q with invalid type %q", node.Name, node.Type)})
|
||||
errs = append(errs, Error{TreeID: id, Err: errors.Errorf("node %q with invalid type %q", node.Name, node.Type)})
|
||||
}
|
||||
|
||||
if node.Name == "" {
|
||||
@@ -623,11 +620,11 @@ func (c *Checker) checkTree(id backend.ID, tree *restic.Tree) (errs []error) {
|
||||
for _, blobID := range blobs {
|
||||
c.blobRefs.Lock()
|
||||
c.blobRefs.M[blobID]++
|
||||
debug.Log("Checker.checkTree", "blob %v refcount %d", blobID.Str(), c.blobRefs.M[blobID])
|
||||
debug.Log("blob %v refcount %d", blobID.Str(), c.blobRefs.M[blobID])
|
||||
c.blobRefs.Unlock()
|
||||
|
||||
if !c.blobs.Has(blobID) {
|
||||
debug.Log("Checker.trees", "tree %v references blob %v which isn't contained in index", id.Str(), blobID.Str())
|
||||
debug.Log("tree %v references blob %v which isn't contained in index", id.Str(), blobID.Str())
|
||||
|
||||
errs = append(errs, Error{TreeID: id, BlobID: blobID, Err: errors.New("not found in index")})
|
||||
}
|
||||
@@ -637,14 +634,14 @@ func (c *Checker) checkTree(id backend.ID, tree *restic.Tree) (errs []error) {
|
||||
}
|
||||
|
||||
// UnusedBlobs returns all blobs that have never been referenced.
|
||||
func (c *Checker) UnusedBlobs() (blobs backend.IDs) {
|
||||
func (c *Checker) UnusedBlobs() (blobs restic.IDs) {
|
||||
c.blobRefs.Lock()
|
||||
defer c.blobRefs.Unlock()
|
||||
|
||||
debug.Log("Checker.UnusedBlobs", "checking %d blobs", len(c.blobs))
|
||||
debug.Log("checking %d blobs", len(c.blobs))
|
||||
for id := range c.blobs {
|
||||
if c.blobRefs.M[id] == 0 {
|
||||
debug.Log("Checker.UnusedBlobs", "blob %v not referenced", id.Str())
|
||||
debug.Log("blob %v not referenced", id.Str())
|
||||
blobs = append(blobs, id)
|
||||
}
|
||||
}
|
||||
@@ -652,58 +649,54 @@ func (c *Checker) UnusedBlobs() (blobs backend.IDs) {
|
||||
return blobs
|
||||
}
|
||||
|
||||
// OrphanedPacks returns a slice of unused packs (only available after Packs() was run).
|
||||
func (c *Checker) OrphanedPacks() backend.IDs {
|
||||
return c.orphanedPacks
|
||||
}
|
||||
|
||||
// CountPacks returns the number of packs in the repository.
|
||||
func (c *Checker) CountPacks() uint64 {
|
||||
return uint64(len(c.packs))
|
||||
}
|
||||
|
||||
// checkPack reads a pack and checks the integrity of all blobs.
|
||||
func checkPack(r *repository.Repository, id backend.ID) error {
|
||||
debug.Log("Checker.checkPack", "checking pack %v", id.Str())
|
||||
h := backend.Handle{Type: backend.Data, Name: id.String()}
|
||||
func checkPack(r restic.Repository, id restic.ID) error {
|
||||
debug.Log("checking pack %v", id.Str())
|
||||
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
|
||||
buf, err := backend.LoadAll(r.Backend(), h, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hash := backend.Hash(buf)
|
||||
hash := restic.Hash(buf)
|
||||
if !hash.Equal(id) {
|
||||
debug.Log("Checker.checkPack", "Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
|
||||
return fmt.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
|
||||
debug.Log("Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
|
||||
return errors.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
|
||||
}
|
||||
|
||||
unpacker, err := pack.NewUnpacker(r.Key(), bytes.NewReader(buf))
|
||||
blobs, err := pack.List(r.Key(), bytes.NewReader(buf), int64(len(buf)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var errs []error
|
||||
for i, blob := range unpacker.Entries {
|
||||
debug.Log("Checker.checkPack", " check blob %d: %v", i, blob.ID.Str())
|
||||
for i, blob := range blobs {
|
||||
debug.Log(" check blob %d: %v", i, blob.ID.Str())
|
||||
|
||||
plainBuf := make([]byte, blob.Length)
|
||||
plainBuf, err = crypto.Decrypt(r.Key(), plainBuf, buf[blob.Offset:blob.Offset+blob.Length])
|
||||
n, err := crypto.Decrypt(r.Key(), plainBuf, buf[blob.Offset:blob.Offset+blob.Length])
|
||||
if err != nil {
|
||||
debug.Log("Checker.checkPack", " error decrypting blob %v: %v", blob.ID.Str(), err)
|
||||
errs = append(errs, fmt.Errorf("blob %v: %v", i, err))
|
||||
debug.Log(" error decrypting blob %v: %v", blob.ID.Str(), err)
|
||||
errs = append(errs, errors.Errorf("blob %v: %v", i, err))
|
||||
continue
|
||||
}
|
||||
plainBuf = plainBuf[:n]
|
||||
|
||||
hash := backend.Hash(plainBuf)
|
||||
hash := restic.Hash(plainBuf)
|
||||
if !hash.Equal(blob.ID) {
|
||||
debug.Log("Checker.checkPack", " Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str())
|
||||
errs = append(errs, fmt.Errorf("Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str()))
|
||||
debug.Log(" Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str())
|
||||
errs = append(errs, errors.Errorf("Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str()))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return fmt.Errorf("pack %v contains %v errors: %v", id.Str(), len(errs), errs)
|
||||
return errors.Errorf("pack %v contains %v errors: %v", id.Str(), len(errs), errs)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -716,10 +709,10 @@ func (c *Checker) ReadData(p *restic.Progress, errChan chan<- error, done <-chan
|
||||
p.Start()
|
||||
defer p.Done()
|
||||
|
||||
worker := func(wg *sync.WaitGroup, in <-chan backend.ID) {
|
||||
worker := func(wg *sync.WaitGroup, in <-chan restic.ID) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
var id backend.ID
|
||||
var id restic.ID
|
||||
var ok bool
|
||||
|
||||
select {
|
||||
@@ -745,7 +738,7 @@ func (c *Checker) ReadData(p *restic.Progress, errChan chan<- error, done <-chan
|
||||
}
|
||||
}
|
||||
|
||||
ch := c.repo.List(backend.Data, done)
|
||||
ch := c.repo.List(restic.DataFile, done)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < defaultParallelism; i++ {
|
||||
|
||||
@@ -1,33 +1,21 @@
|
||||
package checker_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"restic"
|
||||
"restic/backend"
|
||||
"restic/archiver"
|
||||
"restic/backend/mem"
|
||||
"restic/checker"
|
||||
"restic/repository"
|
||||
. "restic/test"
|
||||
"restic/test"
|
||||
)
|
||||
|
||||
var checkerTestData = filepath.Join("testdata", "checker-test-repo.tar.gz")
|
||||
|
||||
func list(repo *repository.Repository, t backend.Type) (IDs []string) {
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
for id := range repo.List(t, done) {
|
||||
IDs = append(IDs, id.String())
|
||||
}
|
||||
|
||||
return IDs
|
||||
}
|
||||
|
||||
func collectErrors(f func(chan<- error, <-chan struct{})) (errs []error) {
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
@@ -60,164 +48,167 @@ func checkData(chkr *checker.Checker) []error {
|
||||
}
|
||||
|
||||
func TestCheckRepo(t *testing.T) {
|
||||
WithTestEnvironment(t, checkerTestData, func(repodir string) {
|
||||
repo := OpenLocalRepo(t, repodir)
|
||||
repodir, cleanup := test.Env(t, checkerTestData)
|
||||
defer cleanup()
|
||||
|
||||
chkr := checker.New(repo)
|
||||
hints, errs := chkr.LoadIndex()
|
||||
if len(errs) > 0 {
|
||||
t.Fatalf("expected no errors, got %v: %v", len(errs), errs)
|
||||
}
|
||||
repo := repository.TestOpenLocal(t, repodir)
|
||||
|
||||
if len(hints) > 0 {
|
||||
t.Errorf("expected no hints, got %v: %v", len(hints), hints)
|
||||
}
|
||||
chkr := checker.New(repo)
|
||||
hints, errs := chkr.LoadIndex()
|
||||
if len(errs) > 0 {
|
||||
t.Fatalf("expected no errors, got %v: %v", len(errs), errs)
|
||||
}
|
||||
|
||||
OKs(t, checkPacks(chkr))
|
||||
OKs(t, checkStruct(chkr))
|
||||
})
|
||||
if len(hints) > 0 {
|
||||
t.Errorf("expected no hints, got %v: %v", len(hints), hints)
|
||||
}
|
||||
|
||||
test.OKs(t, checkPacks(chkr))
|
||||
test.OKs(t, checkStruct(chkr))
|
||||
}
|
||||
|
||||
func TestMissingPack(t *testing.T) {
|
||||
WithTestEnvironment(t, checkerTestData, func(repodir string) {
|
||||
repo := OpenLocalRepo(t, repodir)
|
||||
repodir, cleanup := test.Env(t, checkerTestData)
|
||||
defer cleanup()
|
||||
|
||||
packID := "657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6"
|
||||
OK(t, repo.Backend().Remove(backend.Data, packID))
|
||||
repo := repository.TestOpenLocal(t, repodir)
|
||||
|
||||
chkr := checker.New(repo)
|
||||
hints, errs := chkr.LoadIndex()
|
||||
if len(errs) > 0 {
|
||||
t.Fatalf("expected no errors, got %v: %v", len(errs), errs)
|
||||
}
|
||||
packID := "657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6"
|
||||
test.OK(t, repo.Backend().Remove(restic.DataFile, packID))
|
||||
|
||||
if len(hints) > 0 {
|
||||
t.Errorf("expected no hints, got %v: %v", len(hints), hints)
|
||||
}
|
||||
chkr := checker.New(repo)
|
||||
hints, errs := chkr.LoadIndex()
|
||||
if len(errs) > 0 {
|
||||
t.Fatalf("expected no errors, got %v: %v", len(errs), errs)
|
||||
}
|
||||
|
||||
errs = checkPacks(chkr)
|
||||
if len(hints) > 0 {
|
||||
t.Errorf("expected no hints, got %v: %v", len(hints), hints)
|
||||
}
|
||||
|
||||
Assert(t, len(errs) == 1,
|
||||
"expected exactly one error, got %v", len(errs))
|
||||
errs = checkPacks(chkr)
|
||||
|
||||
if err, ok := errs[0].(checker.PackError); ok {
|
||||
Equals(t, packID, err.ID.String())
|
||||
} else {
|
||||
t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err)
|
||||
}
|
||||
})
|
||||
test.Assert(t, len(errs) == 1,
|
||||
"expected exactly one error, got %v", len(errs))
|
||||
|
||||
if err, ok := errs[0].(checker.PackError); ok {
|
||||
test.Equals(t, packID, err.ID.String())
|
||||
} else {
|
||||
t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnreferencedPack(t *testing.T) {
|
||||
WithTestEnvironment(t, checkerTestData, func(repodir string) {
|
||||
repo := OpenLocalRepo(t, repodir)
|
||||
repodir, cleanup := test.Env(t, checkerTestData)
|
||||
defer cleanup()
|
||||
|
||||
// index 3f1a only references pack 60e0
|
||||
indexID := "3f1abfcb79c6f7d0a3be517d2c83c8562fba64ef2c8e9a3544b4edaf8b5e3b44"
|
||||
packID := "60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e"
|
||||
OK(t, repo.Backend().Remove(backend.Index, indexID))
|
||||
repo := repository.TestOpenLocal(t, repodir)
|
||||
|
||||
chkr := checker.New(repo)
|
||||
hints, errs := chkr.LoadIndex()
|
||||
if len(errs) > 0 {
|
||||
t.Fatalf("expected no errors, got %v: %v", len(errs), errs)
|
||||
}
|
||||
// index 3f1a only references pack 60e0
|
||||
indexID := "3f1abfcb79c6f7d0a3be517d2c83c8562fba64ef2c8e9a3544b4edaf8b5e3b44"
|
||||
packID := "60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e"
|
||||
test.OK(t, repo.Backend().Remove(restic.IndexFile, indexID))
|
||||
|
||||
if len(hints) > 0 {
|
||||
t.Errorf("expected no hints, got %v: %v", len(hints), hints)
|
||||
}
|
||||
chkr := checker.New(repo)
|
||||
hints, errs := chkr.LoadIndex()
|
||||
if len(errs) > 0 {
|
||||
t.Fatalf("expected no errors, got %v: %v", len(errs), errs)
|
||||
}
|
||||
|
||||
errs = checkPacks(chkr)
|
||||
if len(hints) > 0 {
|
||||
t.Errorf("expected no hints, got %v: %v", len(hints), hints)
|
||||
}
|
||||
|
||||
Assert(t, len(errs) == 1,
|
||||
"expected exactly one error, got %v", len(errs))
|
||||
errs = checkPacks(chkr)
|
||||
|
||||
if err, ok := errs[0].(checker.PackError); ok {
|
||||
Equals(t, packID, err.ID.String())
|
||||
} else {
|
||||
t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err)
|
||||
}
|
||||
})
|
||||
test.Assert(t, len(errs) == 1,
|
||||
"expected exactly one error, got %v", len(errs))
|
||||
|
||||
if err, ok := errs[0].(checker.PackError); ok {
|
||||
test.Equals(t, packID, err.ID.String())
|
||||
} else {
|
||||
t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnreferencedBlobs(t *testing.T) {
|
||||
WithTestEnvironment(t, checkerTestData, func(repodir string) {
|
||||
repo := OpenLocalRepo(t, repodir)
|
||||
repodir, cleanup := test.Env(t, checkerTestData)
|
||||
defer cleanup()
|
||||
|
||||
snID := "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02"
|
||||
OK(t, repo.Backend().Remove(backend.Snapshot, snID))
|
||||
repo := repository.TestOpenLocal(t, repodir)
|
||||
|
||||
unusedBlobsBySnapshot := backend.IDs{
|
||||
ParseID("58c748bbe2929fdf30c73262bd8313fe828f8925b05d1d4a87fe109082acb849"),
|
||||
ParseID("988a272ab9768182abfd1fe7d7a7b68967825f0b861d3b36156795832c772235"),
|
||||
ParseID("c01952de4d91da1b1b80bc6e06eaa4ec21523f4853b69dc8231708b9b7ec62d8"),
|
||||
ParseID("bec3a53d7dc737f9a9bee68b107ec9e8ad722019f649b34d474b9982c3a3fec7"),
|
||||
ParseID("2a6f01e5e92d8343c4c6b78b51c5a4dc9c39d42c04e26088c7614b13d8d0559d"),
|
||||
ParseID("18b51b327df9391732ba7aaf841a4885f350d8a557b2da8352c9acf8898e3f10"),
|
||||
}
|
||||
snID := "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02"
|
||||
test.OK(t, repo.Backend().Remove(restic.SnapshotFile, snID))
|
||||
|
||||
sort.Sort(unusedBlobsBySnapshot)
|
||||
unusedBlobsBySnapshot := restic.IDs{
|
||||
restic.TestParseID("58c748bbe2929fdf30c73262bd8313fe828f8925b05d1d4a87fe109082acb849"),
|
||||
restic.TestParseID("988a272ab9768182abfd1fe7d7a7b68967825f0b861d3b36156795832c772235"),
|
||||
restic.TestParseID("c01952de4d91da1b1b80bc6e06eaa4ec21523f4853b69dc8231708b9b7ec62d8"),
|
||||
restic.TestParseID("bec3a53d7dc737f9a9bee68b107ec9e8ad722019f649b34d474b9982c3a3fec7"),
|
||||
restic.TestParseID("2a6f01e5e92d8343c4c6b78b51c5a4dc9c39d42c04e26088c7614b13d8d0559d"),
|
||||
restic.TestParseID("18b51b327df9391732ba7aaf841a4885f350d8a557b2da8352c9acf8898e3f10"),
|
||||
}
|
||||
|
||||
chkr := checker.New(repo)
|
||||
hints, errs := chkr.LoadIndex()
|
||||
if len(errs) > 0 {
|
||||
t.Fatalf("expected no errors, got %v: %v", len(errs), errs)
|
||||
}
|
||||
sort.Sort(unusedBlobsBySnapshot)
|
||||
|
||||
if len(hints) > 0 {
|
||||
t.Errorf("expected no hints, got %v: %v", len(hints), hints)
|
||||
}
|
||||
chkr := checker.New(repo)
|
||||
hints, errs := chkr.LoadIndex()
|
||||
if len(errs) > 0 {
|
||||
t.Fatalf("expected no errors, got %v: %v", len(errs), errs)
|
||||
}
|
||||
|
||||
OKs(t, checkPacks(chkr))
|
||||
OKs(t, checkStruct(chkr))
|
||||
if len(hints) > 0 {
|
||||
t.Errorf("expected no hints, got %v: %v", len(hints), hints)
|
||||
}
|
||||
|
||||
blobs := chkr.UnusedBlobs()
|
||||
sort.Sort(blobs)
|
||||
test.OKs(t, checkPacks(chkr))
|
||||
test.OKs(t, checkStruct(chkr))
|
||||
|
||||
Equals(t, unusedBlobsBySnapshot, blobs)
|
||||
})
|
||||
blobs := chkr.UnusedBlobs()
|
||||
sort.Sort(blobs)
|
||||
|
||||
test.Equals(t, unusedBlobsBySnapshot, blobs)
|
||||
}
|
||||
|
||||
var checkerDuplicateIndexTestData = filepath.Join("testdata", "duplicate-packs-in-index-test-repo.tar.gz")
|
||||
|
||||
func TestDuplicatePacksInIndex(t *testing.T) {
|
||||
WithTestEnvironment(t, checkerDuplicateIndexTestData, func(repodir string) {
|
||||
repo := OpenLocalRepo(t, repodir)
|
||||
repodir, cleanup := test.Env(t, checkerDuplicateIndexTestData)
|
||||
defer cleanup()
|
||||
|
||||
chkr := checker.New(repo)
|
||||
hints, errs := chkr.LoadIndex()
|
||||
if len(hints) == 0 {
|
||||
t.Fatalf("did not get expected checker hints for duplicate packs in indexes")
|
||||
repo := repository.TestOpenLocal(t, repodir)
|
||||
|
||||
chkr := checker.New(repo)
|
||||
hints, errs := chkr.LoadIndex()
|
||||
if len(hints) == 0 {
|
||||
t.Fatalf("did not get expected checker hints for duplicate packs in indexes")
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, hint := range hints {
|
||||
if _, ok := hint.(checker.ErrDuplicatePacks); ok {
|
||||
found = true
|
||||
} else {
|
||||
t.Errorf("got unexpected hint: %v", hint)
|
||||
}
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, hint := range hints {
|
||||
if _, ok := hint.(checker.ErrDuplicatePacks); ok {
|
||||
found = true
|
||||
} else {
|
||||
t.Errorf("got unexpected hint: %v", hint)
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatalf("did not find hint ErrDuplicatePacks")
|
||||
}
|
||||
|
||||
if !found {
|
||||
t.Fatalf("did not find hint ErrDuplicatePacks")
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
t.Errorf("expected no errors, got %v: %v", len(errs), errs)
|
||||
}
|
||||
|
||||
})
|
||||
if len(errs) > 0 {
|
||||
t.Errorf("expected no errors, got %v: %v", len(errs), errs)
|
||||
}
|
||||
}
|
||||
|
||||
// errorBackend randomly modifies data after reading.
|
||||
type errorBackend struct {
|
||||
backend.Backend
|
||||
restic.Backend
|
||||
ProduceErrors bool
|
||||
}
|
||||
|
||||
func (b errorBackend) Load(h backend.Handle, p []byte, off int64) (int, error) {
|
||||
fmt.Printf("load %v\n", h)
|
||||
func (b errorBackend) Load(h restic.Handle, p []byte, off int64) (int, error) {
|
||||
n, err := b.Backend.Load(h, p, off)
|
||||
|
||||
if b.ProduceErrors {
|
||||
@@ -239,17 +230,19 @@ func induceError(data []byte) {
|
||||
func TestCheckerModifiedData(t *testing.T) {
|
||||
be := mem.New()
|
||||
|
||||
repo := repository.New(be)
|
||||
OK(t, repo.Init(TestPassword))
|
||||
repository.TestUseLowSecurityKDFParameters(t)
|
||||
|
||||
arch := restic.NewArchiver(repo)
|
||||
_, id, err := arch.Snapshot(nil, []string{"."}, nil)
|
||||
OK(t, err)
|
||||
repo := repository.New(be)
|
||||
test.OK(t, repo.Init(test.TestPassword))
|
||||
|
||||
arch := archiver.New(repo)
|
||||
_, id, err := arch.Snapshot(nil, []string{"."}, nil, nil)
|
||||
test.OK(t, err)
|
||||
t.Logf("archived as %v", id.Str())
|
||||
|
||||
beError := &errorBackend{Backend: be}
|
||||
checkRepo := repository.New(beError)
|
||||
OK(t, checkRepo.SearchKey(TestPassword))
|
||||
test.OK(t, checkRepo.SearchKey(test.TestPassword, 5))
|
||||
|
||||
chkr := checker.New(checkRepo)
|
||||
|
||||
|
||||
@@ -1,163 +0,0 @@
|
||||
package checker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"restic/backend"
|
||||
"restic/debug"
|
||||
"restic/repository"
|
||||
)
|
||||
|
||||
// Repacker extracts still used blobs from packs with unused blobs and creates
|
||||
// new packs.
|
||||
type Repacker struct {
|
||||
unusedBlobs backend.IDSet
|
||||
repo *repository.Repository
|
||||
}
|
||||
|
||||
// NewRepacker returns a new repacker that (when Repack() in run) cleans up the
|
||||
// repository and creates new packs and indexs so that all blobs in unusedBlobs
|
||||
// aren't used any more.
|
||||
func NewRepacker(repo *repository.Repository, unusedBlobs backend.IDSet) *Repacker {
|
||||
return &Repacker{
|
||||
repo: repo,
|
||||
unusedBlobs: unusedBlobs,
|
||||
}
|
||||
}
|
||||
|
||||
// Repack runs the process of finding still used blobs in packs with unused
|
||||
// blobs, extracts them and creates new packs with just the still-in-use blobs.
|
||||
func (r *Repacker) Repack() error {
|
||||
debug.Log("Repacker.Repack", "searching packs for %v", r.unusedBlobs)
|
||||
|
||||
unneededPacks, err := FindPacksForBlobs(r.repo, r.unusedBlobs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
debug.Log("Repacker.Repack", "found packs: %v", unneededPacks)
|
||||
|
||||
blobs, err := FindBlobsForPacks(r.repo, unneededPacks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
debug.Log("Repacker.Repack", "found blobs: %v", blobs)
|
||||
|
||||
for id := range r.unusedBlobs {
|
||||
debug.Log("Repacker.Repack", "remove unused blob %v", id.Str())
|
||||
blobs.Delete(id)
|
||||
}
|
||||
|
||||
debug.Log("Repacker.Repack", "need to repack blobs: %v", blobs)
|
||||
|
||||
err = RepackBlobs(r.repo, r.repo, blobs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
debug.Log("Repacker.Repack", "remove unneeded packs: %v", unneededPacks)
|
||||
for packID := range unneededPacks {
|
||||
err = r.repo.Backend().Remove(backend.Data, packID.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
debug.Log("Repacker.Repack", "rebuild index, unneeded packs: %v", unneededPacks)
|
||||
idx, err := r.repo.Index().RebuildIndex(unneededPacks)
|
||||
|
||||
newIndexID, err := repository.SaveIndex(r.repo, idx)
|
||||
debug.Log("Repacker.Repack", "saved new index at %v, err %v", newIndexID.Str(), err)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
debug.Log("Repacker.Repack", "remove old indexes: %v", idx.Supersedes())
|
||||
for _, id := range idx.Supersedes() {
|
||||
err = r.repo.Backend().Remove(backend.Index, id.String())
|
||||
if err != nil {
|
||||
debug.Log("Repacker.Repack", "error removing index %v: %v", id.Str(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
debug.Log("Repacker.Repack", "removed index %v", id.Str())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FindPacksForBlobs returns the set of packs that contain the blobs.
|
||||
func FindPacksForBlobs(repo *repository.Repository, blobs backend.IDSet) (backend.IDSet, error) {
|
||||
packs := backend.NewIDSet()
|
||||
idx := repo.Index()
|
||||
for id := range blobs {
|
||||
blob, err := idx.Lookup(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
packs.Insert(blob.PackID)
|
||||
}
|
||||
|
||||
return packs, nil
|
||||
}
|
||||
|
||||
// FindBlobsForPacks returns the set of blobs contained in a pack of packs.
|
||||
func FindBlobsForPacks(repo *repository.Repository, packs backend.IDSet) (backend.IDSet, error) {
|
||||
blobs := backend.NewIDSet()
|
||||
|
||||
for packID := range packs {
|
||||
for _, packedBlob := range repo.Index().ListPack(packID) {
|
||||
blobs.Insert(packedBlob.ID)
|
||||
}
|
||||
}
|
||||
|
||||
return blobs, nil
|
||||
}
|
||||
|
||||
// repackBlob loads a single blob from src and saves it in dst.
|
||||
func repackBlob(src, dst *repository.Repository, id backend.ID) error {
|
||||
blob, err := src.Index().Lookup(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
debug.Log("RepackBlobs", "repacking blob %v, len %v", id.Str(), blob.PlaintextLength())
|
||||
|
||||
buf := make([]byte, 0, blob.PlaintextLength())
|
||||
buf, err = src.LoadBlob(blob.Type, id, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if uint(len(buf)) != blob.PlaintextLength() {
|
||||
debug.Log("RepackBlobs", "repack blob %v: len(buf) isn't equal to length: %v = %v", id.Str(), len(buf), blob.PlaintextLength())
|
||||
return errors.New("LoadBlob returned wrong data, len() doesn't match")
|
||||
}
|
||||
|
||||
_, err = dst.SaveAndEncrypt(blob.Type, buf, &id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RepackBlobs reads all blobs in blobIDs from src and saves them into new pack
|
||||
// files in dst. Source and destination repo may be the same.
|
||||
func RepackBlobs(src, dst *repository.Repository, blobIDs backend.IDSet) (err error) {
|
||||
for id := range blobIDs {
|
||||
err = repackBlob(src, dst, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = dst.Flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,127 +0,0 @@
|
||||
package checker_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"restic/backend"
|
||||
"restic/checker"
|
||||
|
||||
. "restic/test"
|
||||
)
|
||||
|
||||
var findPackTests = []struct {
|
||||
blobIDs backend.IDSet
|
||||
packIDs backend.IDSet
|
||||
}{
|
||||
{
|
||||
backend.IDSet{
|
||||
ParseID("534f211b4fc2cf5b362a24e8eba22db5372a75b7e974603ff9263f5a471760f4"): struct{}{},
|
||||
ParseID("51aa04744b518c6a85b4e7643cfa99d58789c2a6ca2a3fda831fa3032f28535c"): struct{}{},
|
||||
ParseID("454515bca5f4f60349a527bd814cc2681bc3625716460cc6310771c966d8a3bf"): struct{}{},
|
||||
ParseID("c01952de4d91da1b1b80bc6e06eaa4ec21523f4853b69dc8231708b9b7ec62d8"): struct{}{},
|
||||
},
|
||||
backend.IDSet{
|
||||
ParseID("19a731a515618ec8b75fc0ff3b887d8feb83aef1001c9899f6702761142ed068"): struct{}{},
|
||||
ParseID("657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6"): struct{}{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var findBlobTests = []struct {
|
||||
packIDs backend.IDSet
|
||||
blobIDs backend.IDSet
|
||||
}{
|
||||
{
|
||||
backend.IDSet{
|
||||
ParseID("60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e"): struct{}{},
|
||||
},
|
||||
backend.IDSet{
|
||||
ParseID("356493f0b00a614d36c698591bbb2b1d801932d85328c1f508019550034549fc"): struct{}{},
|
||||
ParseID("b8a6bcdddef5c0f542b4648b2ef79bc0ed4377d4109755d2fb78aff11e042663"): struct{}{},
|
||||
ParseID("5714f7274a8aa69b1692916739dc3835d09aac5395946b8ec4f58e563947199a"): struct{}{},
|
||||
ParseID("b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c"): struct{}{},
|
||||
ParseID("08d0444e9987fa6e35ce4232b2b71473e1a8f66b2f9664cc44dc57aad3c5a63a"): struct{}{},
|
||||
},
|
||||
},
|
||||
{
|
||||
backend.IDSet{
|
||||
ParseID("60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e"): struct{}{},
|
||||
ParseID("ff7e12cd66d896b08490e787d1915c641e678d7e6b4a00e60db5d13054f4def4"): struct{}{},
|
||||
},
|
||||
backend.IDSet{
|
||||
ParseID("356493f0b00a614d36c698591bbb2b1d801932d85328c1f508019550034549fc"): struct{}{},
|
||||
ParseID("b8a6bcdddef5c0f542b4648b2ef79bc0ed4377d4109755d2fb78aff11e042663"): struct{}{},
|
||||
ParseID("5714f7274a8aa69b1692916739dc3835d09aac5395946b8ec4f58e563947199a"): struct{}{},
|
||||
ParseID("b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c"): struct{}{},
|
||||
ParseID("08d0444e9987fa6e35ce4232b2b71473e1a8f66b2f9664cc44dc57aad3c5a63a"): struct{}{},
|
||||
ParseID("aa79d596dbd4c863e5400deaca869830888fe1ce9f51b4a983f532c77f16a596"): struct{}{},
|
||||
ParseID("b2396c92781307111accf2ebb1cd62b58134b744d90cb6f153ca456a98dc3e76"): struct{}{},
|
||||
ParseID("5249af22d3b2acd6da8048ac37b2a87fa346fabde55ed23bb866f7618843c9fe"): struct{}{},
|
||||
ParseID("f41c2089a9d58a4b0bf39369fa37588e6578c928aea8e90a4490a6315b9905c1"): struct{}{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestRepackerFindPacks(t *testing.T) {
|
||||
WithTestEnvironment(t, checkerTestData, func(repodir string) {
|
||||
repo := OpenLocalRepo(t, repodir)
|
||||
|
||||
OK(t, repo.LoadIndex())
|
||||
|
||||
for _, test := range findPackTests {
|
||||
packIDs, err := checker.FindPacksForBlobs(repo, test.blobIDs)
|
||||
OK(t, err)
|
||||
Equals(t, test.packIDs, packIDs)
|
||||
}
|
||||
|
||||
for _, test := range findBlobTests {
|
||||
blobs, err := checker.FindBlobsForPacks(repo, test.packIDs)
|
||||
OK(t, err)
|
||||
|
||||
Assert(t, test.blobIDs.Equals(blobs),
|
||||
"list of blobs for packs %v does not match, expected:\n %v\ngot:\n %v",
|
||||
test.packIDs, test.blobIDs, blobs)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestRepacker(t *testing.T) {
|
||||
WithTestEnvironment(t, checkerTestData, func(repodir string) {
|
||||
repo := OpenLocalRepo(t, repodir)
|
||||
OK(t, repo.LoadIndex())
|
||||
|
||||
repo.Backend().Remove(backend.Snapshot, "c2b53c5e6a16db92fbb9aa08bd2794c58b379d8724d661ee30d20898bdfdff22")
|
||||
|
||||
unusedBlobs := backend.IDSet{
|
||||
ParseID("5714f7274a8aa69b1692916739dc3835d09aac5395946b8ec4f58e563947199a"): struct{}{},
|
||||
ParseID("08d0444e9987fa6e35ce4232b2b71473e1a8f66b2f9664cc44dc57aad3c5a63a"): struct{}{},
|
||||
ParseID("356493f0b00a614d36c698591bbb2b1d801932d85328c1f508019550034549fc"): struct{}{},
|
||||
ParseID("b8a6bcdddef5c0f542b4648b2ef79bc0ed4377d4109755d2fb78aff11e042663"): struct{}{},
|
||||
}
|
||||
|
||||
chkr := checker.New(repo)
|
||||
_, errs := chkr.LoadIndex()
|
||||
OKs(t, errs)
|
||||
|
||||
errs = checkStruct(chkr)
|
||||
OKs(t, errs)
|
||||
|
||||
list := backend.NewIDSet(chkr.UnusedBlobs()...)
|
||||
if !unusedBlobs.Equals(list) {
|
||||
t.Fatalf("expected unused blobs:\n %v\ngot:\n %v", unusedBlobs, list)
|
||||
}
|
||||
|
||||
repacker := checker.NewRepacker(repo, unusedBlobs)
|
||||
OK(t, repacker.Repack())
|
||||
|
||||
chkr = checker.New(repo)
|
||||
_, errs = chkr.LoadIndex()
|
||||
OKs(t, errs)
|
||||
OKs(t, checkPacks(chkr))
|
||||
OKs(t, checkStruct(chkr))
|
||||
|
||||
blobs := chkr.UnusedBlobs()
|
||||
Assert(t, len(blobs) == 0,
|
||||
"expected zero unused blobs, got %v", blobs)
|
||||
})
|
||||
}
|
||||
53
src/restic/checker/testing.go
Normal file
53
src/restic/checker/testing.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package checker
|
||||
|
||||
import (
|
||||
"restic"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestCheckRepo runs the checker on repo.
|
||||
func TestCheckRepo(t testing.TB, repo restic.Repository) {
|
||||
chkr := New(repo)
|
||||
|
||||
hints, errs := chkr.LoadIndex()
|
||||
if len(errs) != 0 {
|
||||
t.Fatalf("errors loading index: %v", errs)
|
||||
}
|
||||
|
||||
if len(hints) != 0 {
|
||||
t.Fatalf("errors loading index: %v", hints)
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
// packs
|
||||
errChan := make(chan error)
|
||||
go chkr.Packs(errChan, done)
|
||||
|
||||
for err := range errChan {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// structure
|
||||
errChan = make(chan error)
|
||||
go chkr.Structure(errChan, done)
|
||||
|
||||
for err := range errChan {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// unused blobs
|
||||
blobs := chkr.UnusedBlobs()
|
||||
if len(blobs) > 0 {
|
||||
t.Errorf("unused blobs found: %v", blobs)
|
||||
}
|
||||
|
||||
// read data
|
||||
errChan = make(chan error)
|
||||
go chkr.ReadData(nil, errChan, done)
|
||||
|
||||
for err := range errChan {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
@@ -1,15 +1,13 @@
|
||||
package repository
|
||||
package restic
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"restic/errors"
|
||||
|
||||
"restic/debug"
|
||||
|
||||
"github.com/restic/chunker"
|
||||
"restic/backend"
|
||||
"restic/debug"
|
||||
)
|
||||
|
||||
// Config contains the configuration for a repository.
|
||||
@@ -19,26 +17,18 @@ type Config struct {
|
||||
ChunkerPolynomial chunker.Pol `json:"chunker_polynomial"`
|
||||
}
|
||||
|
||||
// repositoryIDSize is the length of the ID chosen at random for a new repository.
|
||||
const repositoryIDSize = sha256.Size
|
||||
|
||||
// RepoVersion is the version that is written to the config when a repository
|
||||
// is newly created with Init().
|
||||
const RepoVersion = 1
|
||||
|
||||
// JSONUnpackedSaver saves unpacked JSON.
|
||||
type JSONUnpackedSaver interface {
|
||||
SaveJSONUnpacked(backend.Type, interface{}) (backend.ID, error)
|
||||
}
|
||||
|
||||
// JSONUnpackedLoader loads unpacked JSON.
|
||||
type JSONUnpackedLoader interface {
|
||||
LoadJSONUnpacked(backend.Type, backend.ID, interface{}) error
|
||||
LoadJSONUnpacked(FileType, ID, interface{}) error
|
||||
}
|
||||
|
||||
// CreateConfig creates a config file with a randomly selected polynomial and
|
||||
// ID and saves the config in the repository.
|
||||
func CreateConfig(r JSONUnpackedSaver) (Config, error) {
|
||||
// ID.
|
||||
func CreateConfig() (Config, error) {
|
||||
var (
|
||||
err error
|
||||
cfg Config
|
||||
@@ -46,22 +36,24 @@ func CreateConfig(r JSONUnpackedSaver) (Config, error) {
|
||||
|
||||
cfg.ChunkerPolynomial, err = chunker.RandomPolynomial()
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
return Config{}, errors.Wrap(err, "chunker.RandomPolynomial")
|
||||
}
|
||||
|
||||
newID := make([]byte, repositoryIDSize)
|
||||
_, err = io.ReadFull(rand.Reader, newID)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
|
||||
cfg.ID = hex.EncodeToString(newID)
|
||||
cfg.ID = NewRandomID().String()
|
||||
cfg.Version = RepoVersion
|
||||
|
||||
debug.Log("Repo.CreateConfig", "New config: %#v", cfg)
|
||||
debug.Log("New config: %#v", cfg)
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
_, err = r.SaveJSONUnpacked(backend.Config, cfg)
|
||||
return cfg, err
|
||||
// TestCreateConfig creates a config for use within tests.
|
||||
func TestCreateConfig(t testing.TB, pol chunker.Pol) (cfg Config) {
|
||||
cfg.ChunkerPolynomial = pol
|
||||
|
||||
cfg.ID = NewRandomID().String()
|
||||
cfg.Version = RepoVersion
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
// LoadConfig returns loads, checks and returns the config for a repository.
|
||||
@@ -70,7 +62,7 @@ func LoadConfig(r JSONUnpackedLoader) (Config, error) {
|
||||
cfg Config
|
||||
)
|
||||
|
||||
err := r.LoadJSONUnpacked(backend.Config, backend.ID{}, &cfg)
|
||||
err := r.LoadJSONUnpacked(ConfigFile, ID{}, &cfg)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
54
src/restic/config_test.go
Normal file
54
src/restic/config_test.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package restic_test
|
||||
|
||||
import (
|
||||
"restic"
|
||||
"testing"
|
||||
|
||||
. "restic/test"
|
||||
)
|
||||
|
||||
type saver func(restic.FileType, interface{}) (restic.ID, error)
|
||||
|
||||
func (s saver) SaveJSONUnpacked(t restic.FileType, arg interface{}) (restic.ID, error) {
|
||||
return s(t, arg)
|
||||
}
|
||||
|
||||
type loader func(restic.FileType, restic.ID, interface{}) error
|
||||
|
||||
func (l loader) LoadJSONUnpacked(t restic.FileType, id restic.ID, arg interface{}) error {
|
||||
return l(t, id, arg)
|
||||
}
|
||||
|
||||
func TestConfig(t *testing.T) {
|
||||
resultConfig := restic.Config{}
|
||||
save := func(tpe restic.FileType, arg interface{}) (restic.ID, error) {
|
||||
Assert(t, tpe == restic.ConfigFile,
|
||||
"wrong backend type: got %v, wanted %v",
|
||||
tpe, restic.ConfigFile)
|
||||
|
||||
cfg := arg.(restic.Config)
|
||||
resultConfig = cfg
|
||||
return restic.ID{}, nil
|
||||
}
|
||||
|
||||
cfg1, err := restic.CreateConfig()
|
||||
OK(t, err)
|
||||
|
||||
_, err = saver(save).SaveJSONUnpacked(restic.ConfigFile, cfg1)
|
||||
|
||||
load := func(tpe restic.FileType, id restic.ID, arg interface{}) error {
|
||||
Assert(t, tpe == restic.ConfigFile,
|
||||
"wrong backend type: got %v, wanted %v",
|
||||
tpe, restic.ConfigFile)
|
||||
|
||||
cfg := arg.(*restic.Config)
|
||||
*cfg = resultConfig
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg2, err := restic.LoadConfig(loader(load))
|
||||
OK(t, err)
|
||||
|
||||
Assert(t, cfg1 == cfg2,
|
||||
"configs aren't equal: %v != %v", cfg1, cfg2)
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
package crypto
|
||||
|
||||
import "sync"
|
||||
|
||||
const defaultBufSize = 32 * 1024 // 32KiB
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return make([]byte, defaultBufSize)
|
||||
},
|
||||
}
|
||||
|
||||
func getBuffer() []byte {
|
||||
return bufPool.Get().([]byte)
|
||||
}
|
||||
|
||||
func freeBuffer(buf []byte) {
|
||||
bufPool.Put(buf)
|
||||
}
|
||||
@@ -5,11 +5,11 @@ import (
|
||||
"crypto/cipher"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"restic/errors"
|
||||
|
||||
"golang.org/x/crypto/poly1305"
|
||||
"golang.org/x/crypto/scrypt"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -26,10 +26,6 @@ const (
|
||||
var (
|
||||
// ErrUnauthenticated is returned when ciphertext verification has failed.
|
||||
ErrUnauthenticated = errors.New("ciphertext verification failed")
|
||||
|
||||
// ErrBufferTooSmall is returned when the destination slice is too small
|
||||
// for the ciphertext.
|
||||
ErrBufferTooSmall = errors.New("destination buffer too small")
|
||||
)
|
||||
|
||||
// Key holds encryption and message authentication keys for a repository. It is stored
|
||||
@@ -168,7 +164,7 @@ func (m *MACKey) UnmarshalJSON(data []byte) error {
|
||||
j := jsonMACKey{}
|
||||
err := json.Unmarshal(data, &j)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "Unmarshal")
|
||||
}
|
||||
copy(m.K[:], j.K)
|
||||
copy(m.R[:], j.R)
|
||||
@@ -206,7 +202,7 @@ func (k *EncryptionKey) UnmarshalJSON(data []byte) error {
|
||||
d := make([]byte, aesKeySize)
|
||||
err := json.Unmarshal(data, &d)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "Unmarshal")
|
||||
}
|
||||
copy(k[:], d)
|
||||
|
||||
@@ -274,9 +270,9 @@ func Encrypt(ks *Key, ciphertext []byte, plaintext []byte) ([]byte, error) {
|
||||
// Decrypt verifies and decrypts the ciphertext. Ciphertext must be in the form
|
||||
// IV || Ciphertext || MAC. plaintext and ciphertext may point to (exactly) the
|
||||
// same slice.
|
||||
func Decrypt(ks *Key, plaintext []byte, ciphertextWithMac []byte) ([]byte, error) {
|
||||
func Decrypt(ks *Key, plaintext []byte, ciphertextWithMac []byte) (int, error) {
|
||||
if !ks.Valid() {
|
||||
return nil, errors.New("invalid key")
|
||||
return 0, errors.New("invalid key")
|
||||
}
|
||||
|
||||
// check for plausible length
|
||||
@@ -284,21 +280,26 @@ func Decrypt(ks *Key, plaintext []byte, ciphertextWithMac []byte) ([]byte, error
|
||||
panic("trying to decrypt invalid data: ciphertext too small")
|
||||
}
|
||||
|
||||
// check buffer length for plaintext
|
||||
plaintextLength := len(ciphertextWithMac) - ivSize - macSize
|
||||
if len(plaintext) < plaintextLength {
|
||||
return 0, errors.Errorf("plaintext buffer too small, %d < %d", len(plaintext), plaintextLength)
|
||||
}
|
||||
|
||||
// extract mac
|
||||
l := len(ciphertextWithMac) - macSize
|
||||
ciphertextWithIV, mac := ciphertextWithMac[:l], ciphertextWithMac[l:]
|
||||
|
||||
// verify mac
|
||||
if !poly1305Verify(ciphertextWithIV[ivSize:], ciphertextWithIV[:ivSize], &ks.MAC, mac) {
|
||||
return nil, ErrUnauthenticated
|
||||
return 0, ErrUnauthenticated
|
||||
}
|
||||
|
||||
// extract iv
|
||||
iv, ciphertext := ciphertextWithIV[:ivSize], ciphertextWithIV[ivSize:]
|
||||
|
||||
if cap(plaintext) < len(ciphertext) {
|
||||
// extend plaintext
|
||||
plaintext = append(plaintext, make([]byte, len(ciphertext)-cap(plaintext))...)
|
||||
if len(ciphertext) != plaintextLength {
|
||||
return 0, errors.Errorf("plaintext and ciphertext lengths do not match: %d != %d", len(ciphertext), plaintextLength)
|
||||
}
|
||||
|
||||
// decrypt data
|
||||
@@ -312,35 +313,7 @@ func Decrypt(ks *Key, plaintext []byte, ciphertextWithMac []byte) ([]byte, error
|
||||
plaintext = plaintext[:len(ciphertext)]
|
||||
e.XORKeyStream(plaintext, ciphertext)
|
||||
|
||||
return plaintext, nil
|
||||
}
|
||||
|
||||
// KDF derives encryption and message authentication keys from the password
|
||||
// using the supplied parameters N, R and P and the Salt.
|
||||
func KDF(N, R, P int, salt []byte, password string) (*Key, error) {
|
||||
if len(salt) == 0 {
|
||||
return nil, fmt.Errorf("scrypt() called with empty salt")
|
||||
}
|
||||
|
||||
derKeys := &Key{}
|
||||
|
||||
keybytes := macKeySize + aesKeySize
|
||||
scryptKeys, err := scrypt.Key([]byte(password), salt, N, R, P, keybytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error deriving keys from password: %v", err)
|
||||
}
|
||||
|
||||
if len(scryptKeys) != keybytes {
|
||||
return nil, fmt.Errorf("invalid numbers of bytes expanded from scrypt(): %d", len(scryptKeys))
|
||||
}
|
||||
|
||||
// first 32 byte of scrypt output is the encryption key
|
||||
copy(derKeys.Encrypt[:], scryptKeys[:aesKeySize])
|
||||
|
||||
// next 32 byte of scrypt output is the mac key, in the form k||r
|
||||
macKeyFromSlice(&derKeys.MAC, scryptKeys[aesKeySize:])
|
||||
|
||||
return derKeys, nil
|
||||
return plaintextLength, nil
|
||||
}
|
||||
|
||||
// Valid tests if the key is valid.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user