Compare commits

...

635 Commits

Author SHA1 Message Date
Alexander Neumann
f678c97346 Add VERSION file for 0.5.0 2017-03-11 12:34:27 +01:00
Alexander Neumann
7635feb591 Add cross-compilation to README and Manual 2017-03-09 21:24:38 +01:00
Alexander Neumann
087c2917aa Check version in build.go
Print a sensible error for Go < 1.7 (the "context" package is missing
there)
2017-03-09 21:24:30 +01:00
Alexander Neumann
9eae789cd2 Add VERSION file for 0.5.0-rc.1 2017-03-09 10:28:33 +01:00
Alexander Neumann
d6104935d7 Add VERSION file for 0.5.0rc1 2017-03-09 10:20:11 +01:00
Alexander Neumann
406af5916c Also include the source in SHA256SUMS 2017-03-09 10:19:45 +01:00
Alexander Neumann
93e4e4f4fb Merge pull request #866 from middelink/widespread-tags
Add --tag filtering to every command, where applicable
2017-03-09 09:55:26 +01:00
Pauline Middelink
8a05de537f Refactor prune and rebuild_index
Factor out and reuse `rebuildIndex()` in cmd_rebuild_index and cmd_prune.

Use contexts.
2017-03-08 20:30:52 +01:00
Pauline Middelink
8a92687d9a Refactor find and ls commands
Implement filtering by using `FindFilteredSnapshots()` to iterate over the snapshots

Refactor cmd_ls' `PrintNode()` into format.go, reuse its pretty printing in both `find`
and `ls` commands.

Use contexts.
2017-03-08 20:29:31 +01:00
Pauline Middelink
3432e7edcd Refactor tag to use FindFilteredSnapshots() 2017-03-08 20:28:44 +01:00
Pauline Middelink
3c6c17abcd Refactor forget and snapshots command
Implement filtering by using `FindFilteredSnapshots()` to iterate over the snapshots

Refactor cmd_snapshots' `PrintSnapshots()` so its pretty printing can be used from
both `forget` and `snapshots`.

Use contexts.
2017-03-08 20:24:58 +01:00
Pauline Middelink
11d237c252 New helper function FindFilteredSnapshots to iterate over snapshots
This helper function takes a set of filters and/or a list of snapshots
from the commandline. It returns a channel of *Snapshot.

When snapshot ids are given, they are checked for validity and their
corresponding Snapshots returned. The snapshot id "latest" is handled
special to return either the last snapshot (no filters) or the last
snapshot matching the filters.

When no arguments are given, the filters are applied over all available
snapshots and these are returned.
2017-03-08 20:19:12 +01:00
Pauline Middelink
0f7b6ec5ac Adapt key command to context world. 2017-03-08 20:17:30 +01:00
Pauline Middelink
b4526c4e6e Enable the use of context in restic
Set up a cancelble context in global options, hook it into the ctrl-C handler
for proper cancel propegation.

Bump up minimal requirement for Go to version 1.7 in documentation
and test-build files.
2017-03-08 20:12:16 +01:00
Pauline Middelink
3eaaa0f286 Correct some typo's in comments. 2017-03-08 20:09:24 +01:00
Pauline Middelink
b1c8071163 Add filtering to mount command 2017-03-08 19:59:19 +01:00
Pauline Middelink
3468108d4c Implement --tag processing to backup and restore command
Add `tags` argument to `FindLatestSnapshot()`
2017-03-08 19:55:58 +01:00
Alexander Neumann
33c8dd4ee5 Merge pull request #867 from middelink/optimize
Optimize List() pipeline
2017-03-08 11:50:47 +01:00
Alexander Neumann
5e2c4caa32 Merge pull request #861 from middelink/fix-841
Refactor cmd_forget
2017-03-08 09:56:01 +01:00
Pauline Middelink
7c989ca487 Optimize List() pipeline 2017-03-07 20:58:36 +01:00
Pauline Middelink
e9a2982ecd Add --path for snapshot filtering by path.
Add `--group-by-tags` for grouping on host,tags,dirs instead of host,dirs.

Borrow the snapshot printing from cmd_snapshot.

Closes #841
2017-03-07 19:28:42 +01:00
Alexander Neumann
f5a55a81f7 Merge pull request #865 from restic/handle-empty-snapshots
Refuse to create empty snapshots
2017-03-07 11:21:48 +01:00
Alexander Neumann
340f2c80a0 Merge pull request #864 from restic/find-case-insensitive
find: Add option to ignore case
2017-03-07 11:20:33 +01:00
Alexander Neumann
bb144436c7 Add test for empty snapshot 2017-03-07 11:17:15 +01:00
Alexander Neumann
b50d3ba805 Refuse to create empty snapshots
Closes #862
2017-03-07 11:12:34 +01:00
Alexander Neumann
00e7158381 Merge pull request #860 from middelink/factor-out
Create a helper function to get the terminal width
2017-03-07 10:59:39 +01:00
Alexander Neumann
221bef48c0 find: Add option to ignore case
Closes #859
2017-03-07 10:58:09 +01:00
Pauline Middelink
afcc1ba706 Create a helper function to get the terminal width
Rationale: contain terminal access to one file.
2017-03-06 11:23:00 +01:00
Alexander Neumann
bf88a62a16 Merge pull request #858 from middelink/snapshot-layout
Fix layout issue in cmd_snapshot "ascii art"
2017-03-06 09:10:35 +01:00
Alexander Neumann
cc140744d6 Merge pull request #857 from middelink/fix-856
Fix SamePaths() and make it into a receiver function
2017-03-06 09:10:33 +01:00
Pauline Middelink
354e8ffb82 Fix layout issue in cmd_snapshot "ascii art"
The layouter does not account for multi tags when determining the
need for ascii art.

36fd8178  2017-03-03 21:35:04  abuseio.polyware.nl    NL          /
                                                      A       └──

vs

36fd8178  2017-03-03 21:35:04  abuseio.polyware.nl    NL      ┌── /
                                                      A       └──
2017-03-06 02:49:15 +01:00
Pauline Middelink
e1c828be3e Fix SamePaths() and make it into a receiver function
Add `HasPath(paths []string) bool` to Snapshot for testing if the
snapshot has at least the paths given to the function.

Reimplemented SamePaths(paths []string) so it does what the name implies,
compare if all given paths are in the snapshot.
2017-03-06 02:21:58 +01:00
Alexander Neumann
d50dc9f649 Merge pull request #855 from middelink/fix-851
Add `tag` command to restic cli to manipulate tags on existing snapshots.
2017-03-05 20:20:20 +01:00
Alexander Neumann
07695b3622 Documentation fixes 2017-03-05 20:12:25 +01:00
Pauline Middelink
be15a9261a Add design and user documentation for the restic tag command 2017-03-05 19:55:23 +01:00
Pauline Middelink
1fa2313aef Snapshot: Add Original ID
The Original ID is used when the snapshot is modified (e.g. by `tag`
command). Adjust integration testing to assert correctness.
2017-03-05 19:55:23 +01:00
Pauline Middelink
26e266a951 Fix type of ID field in cmd_snapshots type Snapshot 2017-03-05 19:55:22 +01:00
Pauline Middelink
208edaa3d1 Snapshot: Add AddTags() and RemoveTags()
Both prevent duplicate tags.
2017-03-05 19:55:22 +01:00
Pauline Middelink
f6a258b4a8 Add tag: Manipulate tags on existing snapshots
Add integration testing.
2017-03-05 19:55:22 +01:00
Alexander Neumann
db08581352 Add hint for other backend URI formats 2017-03-05 16:58:24 +01:00
Alexander Neumann
0afeb68e6c Merge pull request #854 from middelink/dynamic-host,tag-columns-in-cmd_snapshots
Dynamic host,tag columns widths in snapshots command
2017-03-05 16:11:38 +01:00
Alexander Neumann
a809c9ac5f Merge pull request #853 from middelink/cmd_backup-tag-integration-test
restic backup --tag integration test
2017-03-05 16:08:02 +01:00
Pauline Middelink
45e9f35654 Make columns for host and tags size width dynamicly on their content. 2017-03-05 05:32:01 +01:00
Pauline Middelink
edd5c8b44d Add integration test to make sure cmd_backup adds tags when required. 2017-03-05 05:24:40 +01:00
Pauline Middelink
7238a3ee89 Changed cmd_snapshots to be testable (no more using os.Stdout) 2017-03-05 05:24:11 +01:00
Alexander Neumann
bbcab800c9 Merge pull request #850 from middelink/fix-848
Add progressbar to repack and blob remove phases of prune cmd.
2017-03-04 18:47:03 +01:00
Pauline Middelink
5564c78e53 English typo: rewriten > rewritten. 2017-03-04 17:43:58 +01:00
Pauline Middelink
792b81725e Add progressbar to repack and blob remove phases of prune cmd. 2017-03-04 17:38:34 +01:00
Alexander Neumann
b101efe26e Merge pull request #845 from restic/update-pkg-xattr
Update github.com/pkg/xattr
2017-03-04 16:49:07 +01:00
Alexander Neumann
becc34a159 Merge pull request #847 from middelink/fix-846
Display the proper amount of bytes we will be pruning from the repo.
2017-03-04 16:47:20 +01:00
Pauline Middelink
1273c6f3d4 Display the proper amount of bytes we will be pruning from the repo. 2017-03-04 15:17:44 +01:00
Alexander Neumann
82458d4de0 Update github.com/pkg/xattr
Closes #843
2017-03-04 14:23:15 +01:00
Alexander Neumann
7066cc17bb Merge pull request #844 from welpo/patch-1
Fix Minio Server URL
2017-03-04 13:51:05 +01:00
welpo
12ed2f65e3 Fix Minio Server URL
The previous link gave a 404
2017-03-04 13:19:50 +01:00
Alexander Neumann
90bc187355 Merge pull request #840 from middelink/master
Display absolute paths when displaying the output of ls and find.
2017-03-03 12:09:20 +01:00
Pauline Middelink
5ecaaea90b Really use absolute pathnames, not all systems use /. 2017-03-03 11:14:39 +01:00
Alexander Neumann
f9fc8674eb Merge pull request #837 from restic/index
Misc improvements
2017-03-03 09:44:06 +01:00
Pauline Middelink
039e81b04b Fix unit test, we need to check for absolute paths now. 2017-03-02 23:30:56 +01:00
Pauline Middelink
efb4315a1e Display absolute paths when displaying the output of ls and find. 2017-03-02 22:41:11 +01:00
Alexander Neumann
f53d33ba34 Make ArchiveReader a struct 2017-03-02 15:45:35 +01:00
Alexander Neumann
9b776dc7ab Use new Index implementation for rebuild-index 2017-03-02 15:23:59 +01:00
Alexander Neumann
1d64a1dcbb Merge pull request #835 from restic/fix-834
Allow filtering absolute paths
2017-03-02 15:22:17 +01:00
Alexander Neumann
7c92994f10 Clarify variable name 2017-03-02 14:52:18 +01:00
Alexander Neumann
bf97cc7efa Allow filtering absolute paths
Before, the restorer called the filter function with a relative path,
this prevented anchoring absolute patterns (which just never matched).
Now call the restore function with an absolute virtual path, starting at
the filepath separator.

Closes #834
2017-03-02 14:50:54 +01:00
Alexander Neumann
4f5e9e939b Merge pull request #829 from restic/fix-822
Ignore empty lines in --files-from
2017-02-27 21:15:35 +01:00
Alexander Neumann
92ad35848a Fix Travis Go versions 2017-02-27 20:40:10 +01:00
Alexander Neumann
bb69b20aff Travis: Configure Go versions 2017-02-27 20:39:00 +01:00
Alexander Neumann
80e93621e1 Travis: Configure Go versions 2017-02-27 20:37:47 +01:00
Alexander Neumann
31ff506309 Ignore empty lines in --files-from
Closes #822
2017-02-27 19:42:00 +01:00
Alexander Neumann
4a51ddf741 Add section to the manual about same directory names
Related to #652
2017-02-21 20:46:26 +01:00
Alexander Neumann
3d1dc636d0 Merge pull request #817 from restic/add-forget-prune
Add `--prune` switch to `forget`
2017-02-21 11:40:30 +01:00
Alexander Neumann
8609ba28d0 Add News section 2017-02-21 11:12:07 +01:00
Alexander Neumann
685f5ebbd1 Add --prune switch to forget 2017-02-21 10:58:30 +01:00
Alexander Neumann
4e2f8145f5 Update Appveyor 2017-02-18 19:09:21 +01:00
Alexander Neumann
11b63d3417 Update Travis 2017-02-18 19:08:11 +01:00
Alexander Neumann
2c81bc35dc Merge pull request #814 from restic/fix-archiver
Correct archiver behavior in case of errors
2017-02-18 19:00:33 +01:00
Alexander Neumann
b8ce1b4e69 Correct archiver behavior in case of errors 2017-02-18 17:46:06 +01:00
Alexander Neumann
23c2717ab2 Merge pull request #809 from restic/fix-xattr
WIP: fix panic
2017-02-18 15:00:49 +01:00
Alexander Neumann
132afbe83b Correct error check for ENOTSUP, add errors.Wrap() 2017-02-18 14:36:37 +01:00
Alexander Neumann
ef52d15edd Continue if extended attribute cannot be read 2017-02-18 14:35:11 +01:00
Alexander Neumann
6df2f9e5ba Add support for extended attributes on FreeBSD 2017-02-18 14:35:11 +01:00
Alexander Neumann
eb9be4e884 Use github.com/pkg/xattr for extended attributes 2017-02-18 14:35:06 +01:00
Alexander Neumann
0674f32d79 Merge pull request #766 from jgfrm/nissue25
Add support for extended attributes (e.g. ACL)

Closes #766
2017-02-16 11:45:24 +01:00
Jaap Gordijn
49cae0904f Add support for extended attributes (e.g. ACL) 2017-02-16 11:44:51 +01:00
Alexander Neumann
40685a0e61 Merge pull request #803 from ibib/adds-json-support
Adds JSON support for the snapshots command
2017-02-16 11:26:14 +01:00
Alexander Neumann
4772a4986b Merge pull request #808 from restic/fix-807
restore: Make sure buffer is large enough
2017-02-15 19:58:03 +01:00
Alexander Neumann
c973a1f875 Merge pull request #806 from oysols/consistent-cli
Fix inconsistencies in CLI
2017-02-15 15:26:27 +01:00
Alexander Neumann
50d066befb restore: Make sure buffer is large enough 2017-02-15 15:19:28 +01:00
Øystein Olsen
c35e48291d Fix correct number of arguments for key command 2017-02-13 16:16:34 +01:00
Øystein Olsen
511278b66a Fix wrong description of rebuild-index command in help text 2017-02-13 16:07:29 +01:00
Øystein Olsen
514a11346d Add long description of list command in help text 2017-02-13 16:06:27 +01:00
Øystein Olsen
2eb75bb941 Consistently refer to 'the' instead of 'a' repository in help text 2017-02-13 16:05:25 +01:00
Øystein Olsen
9922ce97bf Use lowercase consistently in help text 2017-02-13 16:02:47 +01:00
ibib
3ed4127297 Adds JSON support for the snapshots command 2017-02-12 21:43:39 +01:00
Alexander Neumann
c83e608cce Merge pull request #800 from restic/jgfrm-hardlinks-fuse
fuse: correct nlink count for directories
2017-02-11 22:09:33 +01:00
Jaap Gordijn
3e2ae15882 Manual: Add section about hard links for fuse 2017-02-11 21:54:54 +01:00
Jaap Gordijn
3047702ded Correct hardlinks for fuse directories 2017-02-11 21:54:23 +01:00
Alexander Neumann
05cae4911d Merge pull request #798 from restic/fix-797
Checker: Propagate errors properly
2017-02-11 14:55:50 +01:00
Alexander Neumann
8c34eaad15 Improve error message 2017-02-11 14:28:15 +01:00
Alexander Neumann
0492eabff1 Improve error messages 2017-02-11 14:24:11 +01:00
Alexander Neumann
7797e084f9 checker: Pass on error loading an index 2017-02-11 14:22:14 +01:00
Alexander Neumann
b40aa66985 errors: Add method Wrapf 2017-02-11 14:22:04 +01:00
Alexander Neumann
76c06c5f2a Add check for modified index 2017-02-11 14:13:58 +01:00
Alexander Neumann
83538c745a Merge pull request #792 from restic/fix-791
s3: Increase MaxIdleConnsPerHost
2017-02-11 11:20:20 +01:00
Alexander Neumann
f266741f40 s3/rest: raise connection limit to 40 2017-02-11 10:40:51 +01:00
Alexander Neumann
4795a5c5d1 Merge pull request #795 from restic/fix-767
fuse: Add options --allow-other and --allow-root
2017-02-11 10:38:33 +01:00
Alexander Neumann
c14cb62cd7 fuse: Add options --allow-other and --allow-root
Closes #767
2017-02-10 21:58:10 +01:00
Alexander Neumann
b3ec01521e Merge pull request #794 from restic/fix-789
Use non-formatting functions of errors for strings
2017-02-10 21:42:59 +01:00
Alexander Neumann
6483df5ee4 Merge pull request #793 from restic/add-host-parameter
backup: Add `--hostname` parameter
2017-02-10 21:42:56 +01:00
Alexander Neumann
6275d69a36 fuse: Show link count 2017-02-10 21:16:48 +01:00
Alexander Neumann
6300c8df56 Merge pull request #763 from jgfrm/issue25
Support hard links
2017-02-10 20:58:39 +01:00
Jaap Gordijn
366bf4eb0c Support hard links
Closes #152
2017-02-10 20:58:19 +01:00
Alexander Neumann
21b358c742 backend tests: Always close reader 2017-02-10 20:49:46 +01:00
Alexander Neumann
7a0303f7ae s3: Make sure to return connection token 2017-02-10 20:49:37 +01:00
Alexander Neumann
64165ea4c8 s3: Hold connection semaphore until Close() 2017-02-10 20:17:52 +01:00
Alexander Neumann
c8fc789393 Use non-formatting functions of errors for strings
Commands used:

    $ gofmt -w -r 'errors.Fatalf(x) -> errors.Fatal(x)' src
    $ gofmt -w -r 'errors.Errorf(x) -> errors.New(x)' src

Closes #789
2017-02-10 19:39:49 +01:00
Alexander Neumann
f145e1de0f backup: Add --hostname parameter 2017-02-10 19:37:33 +01:00
Alexander Neumann
36dee7d892 s3: Increase MaxIdleConnsPerHost 2017-02-10 19:25:42 +01:00
Alexander Neumann
98ae30b513 Update Dockerfile 2017-02-10 17:25:48 +01:00
Alexander Neumann
073edd914d Merge pull request #783 from opennota/master
Fix some typos
2017-02-09 10:43:27 +01:00
Alexander Neumann
316b520ffb Merge pull request #782 from restic/fix-backup-stdin
Make sure backups read from stdin have a file name
2017-02-09 10:42:49 +01:00
opennota
25e459659a Fix some typos 2017-02-09 06:43:10 +07:00
Alexander Neumann
fdebb022e4 Make sure backups read from stdin have a file name 2017-02-08 22:37:02 +01:00
Alexander Neumann
ed1739acbd Merge pull request #779 from restic/benchmark-checker
checker: Reduce memory usage
2017-02-07 11:03:49 +01:00
Alexander Neumann
1f81919d4a checker: Reduce memory usage
benchmark              old bytes     new bytes     delta
    BenchmarkChecker-4     25551348      4288037       -83.22%
2017-02-06 21:19:27 +01:00
Alexander Neumann
436b5dc20c Add Blob.String() 2017-02-06 19:50:27 +01:00
Alexander Neumann
0c867b21ff Add benchmark for checker 2017-02-06 19:34:40 +01:00
Alexander Neumann
4cacb622eb Merge pull request #773 from opennota/master
Allow --files-from to take a dash for stdin (fixes #769)
2017-02-06 14:36:52 +01:00
opennota
24acb09a2a Allow --files-from to take a dash for stdin (fixes #769) 2017-02-06 18:43:44 +07:00
Alexander Neumann
ec45cdba84 Merge pull request #778 from restic/fix-777
prune: Close backend reader after download
2017-02-05 20:01:44 +01:00
Alexander Neumann
4ca134a41c prune: Close backend reader after download
Closes #777
2017-02-05 15:40:30 +01:00
Alexander Neumann
b85eae2aea Merge pull request #775 from restic/improve-backup-stdin
Add more tests for reading backups from stdin
2017-02-05 15:29:29 +01:00
Alexander Neumann
aee58a8c17 Add more tests for reading backups from stdin 2017-02-04 16:38:33 +01:00
Alexander Neumann
22f3e21266 Start error message with a lower case character 2017-02-03 17:06:06 +01:00
Alexander Neumann
d0de1ed2e4 Merge pull request #771 from restic/fix-770
Return an error if password is not set for stdin
2017-02-03 16:47:00 +01:00
Alexander Neumann
807fcf07d9 Return an error if password is not set for stdin
Closes #770
2017-02-03 15:53:07 +01:00
Alexander Neumann
f5faff9020 Merge pull request #768 from brikou/download_link
Add note about pre-compiled binaries
2017-02-03 11:11:05 +01:00
Brikou Carré
769a52df16 Bring back "Build restic" paragraph 2017-02-03 10:41:54 +01:00
Brikou Carré
dfb94290ae Add link to release page in doc 2017-02-03 09:09:49 +01:00
Alexander Neumann
f366a636e6 Fix build_release_binaries.sh 2017-02-02 10:23:18 +01:00
Alexander Neumann
a7c2f28f06 Add VERSION file for 0.4.0 2017-02-02 10:21:29 +01:00
Alexander Neumann
79e198451c Don't set GOMAXPROCS
This was a temporary fix for Go earlier than 1.5 to run code on all
avaialble cores. We don't need that any more since we require at least
Go 1.6.
2017-02-02 10:18:07 +01:00
Alexander Neumann
844c959912 Merge pull request #750 from restic/document-rest-create-repo
Document creating a new repo via REST
2017-02-02 10:03:07 +01:00
Alexander Neumann
19ec8f4a77 Add paragraph about code contributions 2017-02-01 12:00:38 +01:00
Alexander Neumann
f5aeda359c Merge pull request #764 from olgeni/typos
Fix typo.
2017-02-01 10:53:46 +01:00
olgeni
5a5687a506 Fix typo. 2017-01-31 10:09:59 +01:00
Alexander Neumann
7e9bfa51e9 REST: Make create repository idempotent 2017-01-30 19:33:17 +01:00
Alexander Neumann
c7d60279f7 Merge pull request #762 from restic/fix-759
Add 'index' to list of options for 'cat' command
2017-01-30 18:26:32 +01:00
Alexander Neumann
7d49c65dd0 Merge pull request #761 from restic/fix-758
Remove inconsistencies regarding the `cat` command
2017-01-30 18:25:49 +01:00
Alexander Neumann
32dfbad414 Merge pull request #760 from restic/fix-756
Allow listing blobs
2017-01-30 18:25:47 +01:00
Alexander Neumann
abd44ca7c5 Add 'index' to list of options for 'cat' command 2017-01-30 10:53:17 +01:00
Alexander Neumann
44f4ff9d37 Remove 'tree' from help text for 'cat' command 2017-01-30 10:50:52 +01:00
Alexander Neumann
658bee17e9 Clean up documentation, remove inconsistencies 2017-01-30 10:48:59 +01:00
Alexander Neumann
524ce01423 Remove 'cat tree' command 2017-01-30 10:48:50 +01:00
Alexander Neumann
41f59ffc78 Fix 'cat' command for tree blobs 2017-01-30 10:48:21 +01:00
Alexander Neumann
d7f52fd7e5 Allow listing blobs
Closes #756
2017-01-30 10:28:17 +01:00
Alexander Neumann
1f9db97d33 Merge pull request #728 from cit/improve-snapshot-output
Add box-drawing character to the snapshots command
2017-01-29 11:00:13 +01:00
Alexander Neumann
f806d8818d Add DELETE 2017-01-29 10:55:47 +01:00
Alexander Neumann
1a538509d0 Document creating a new repo via REST
Closes #736
2017-01-28 10:31:44 +01:00
Alexander Neumann
b2d00b2a86 Merge pull request #749 from restic/normalise-backend-api
Normalise the backend API
2017-01-27 13:30:30 +01:00
Alexander Neumann
31f6093513 Fix tests 2017-01-27 12:47:34 +01:00
Alexander Neumann
b9bddeff39 Normalise the backend API
This makes the following changes, before:

    type backend interface {
        // Test a boolean value whether a File with the name and type exists.
        Test(t FileType, name string) (bool, error)

        // Remove removes a File with type t and name.
        Remove(t FileType, name string) error
    }

After:

    type backend interface {
        // Test a boolean value whether a File with the name and type exists.
        Test(h Handle) (bool, error)

        // Remove removes a File with type t and name.
        Remove(h Handle) error
    }
2017-01-26 22:02:22 +01:00
Florian Adamsky
54c2f622a4 Add box-drawing character to the snapshots command
Remove underscore of variable name

Format code with gofmt

Change snapshot output according to the discussion
2017-01-26 21:54:27 +01:00
Alexander Neumann
d55b56edd3 Merge pull request #748 from restic/fix-747
rest backend: Do not close the reader
2017-01-26 20:20:55 +01:00
Alexander Neumann
8d6fdb7a3e Fix packer manager test on Windows 2017-01-25 17:51:34 +01:00
Alexander Neumann
dceaae33ed PackerManager: Remove unused Finalize() method 2017-01-25 17:12:06 +01:00
Alexander Neumann
e8995b85b8 rest backend: Do not close the reader
Closes #747
2017-01-25 17:12:06 +01:00
Alexander Neumann
925a3cfad7 backend: Check that backends do not close the reader 2017-01-25 17:12:03 +01:00
Alexander Neumann
c99a44b122 Fix Random() function 2017-01-25 13:26:16 +01:00
Alexander Neumann
439d3107f9 Merge pull request #746 from restic/improve-fuse-memory
fuse: Improve memory usage
2017-01-24 14:13:01 +01:00
Alexander Neumann
afc593676a fuse: Improve memory usage
Discard blobs that aren't in use any more. This greatly reduces memory
usage and will probably only trigger on sequential read (e.g. for
restore via fuse).

Closes #480
2017-01-24 12:38:44 +01:00
Alexander Neumann
17d7af6ccc Merge pull request #745 from restic/fix-742
Fix restore/fuse with larger files
2017-01-24 12:12:35 +01:00
Alexander Neumann
0b982d3316 Add restore test with larger files 2017-01-24 11:51:21 +01:00
Alexander Neumann
26da14f315 fuse: improve tests 2017-01-24 11:42:50 +01:00
Alexander Neumann
2de7e03698 repository.LoadBlob: Read correct number of bytes 2017-01-24 11:42:50 +01:00
Alexander Neumann
3d2fbed55f Add test for LoadBlob 2017-01-24 11:42:50 +01:00
Alexander Neumann
0e445ec0f5 checker: Use TestRepository 2017-01-24 11:42:50 +01:00
Alexander Neumann
31055d88a5 Add debug messages 2017-01-24 11:42:42 +01:00
Alexander Neumann
0d125725bc Merge pull request #741 from restic/rework-backend-api
Rework backend API
2017-01-23 20:18:31 +01:00
Alexander Neumann
8b09b5b3cd Merge pull request #740 from restic/add-debug-profiles
Add debug memory/cpu profile options
2017-01-23 20:18:28 +01:00
Alexander Neumann
2d8a699515 Merge pull request #739 from restic/fix-archiver-test
Fix Archiver test: Clean up temp files
2017-01-23 20:18:25 +01:00
Alexander Neumann
0d95507909 Fix test for PackerManager 2017-01-23 19:00:15 +01:00
Alexander Neumann
8e722d8fee Fix saving pack: close temp file before removing 2017-01-23 18:45:15 +01:00
Alexander Neumann
03292d10cc backend: Rename Get() -> Load() 2017-01-23 18:11:10 +01:00
Alexander Neumann
cfc9e8b2fa backends: Remove Load() 2017-01-23 17:54:12 +01:00
Alexander Neumann
f382696ccf repository: Use ReadAt() instead of Load() 2017-01-23 17:54:12 +01:00
Alexander Neumann
e8fcc7e74c repack: Use Get() instead of Load()
In addition, use a tempfile instead of a buffer.
2017-01-23 17:54:12 +01:00
Alexander Neumann
2bd9c9247c checker: Remove Load() from test error backend 2017-01-23 17:54:12 +01:00
Alexander Neumann
fc235317fe backend: Use Get instead of Load for ReaderAt 2017-01-23 17:54:12 +01:00
Alexander Neumann
82d9163955 backend: Ensure Reader is closed on error 2017-01-23 17:54:12 +01:00
Alexander Neumann
4a354befe5 Fix checker test 2017-01-23 17:54:12 +01:00
Alexander Neumann
212936eb52 Make backend.LoadAll() similar to ioutil.ReadAll() 2017-01-23 17:54:12 +01:00
Alexander Neumann
05afedd950 Add backend.Get() 2017-01-23 17:54:11 +01:00
Alexander Neumann
a36c01372d Use streaming functions for saving data in repo 2017-01-23 17:54:11 +01:00
Alexander Neumann
9b48da5b4e Change backend Save() function signature 2017-01-23 17:54:11 +01:00
Alexander Neumann
c93f79f0f3 Add hashing package 2017-01-23 17:54:11 +01:00
Alexander Neumann
89a5152f7d Fix Archiver test: Clean up temp files 2017-01-23 17:53:59 +01:00
Alexander Neumann
47bd9cdf2f Add options for creating a memory or CPU profile 2017-01-23 17:52:26 +01:00
Alexander Neumann
84255f4f4f Vendor github.com/pkg/profile 2017-01-23 17:41:07 +01:00
Alexander Neumann
668a36a652 Add option for debug pprof service 2017-01-23 17:27:42 +01:00
Alexander Neumann
30ff7413be Merge pull request #737 from restic/fix-734
Index: Store pack ID
2017-01-22 22:41:16 +01:00
Alexander Neumann
af1cc0717b Add integration test for forget and prune 2017-01-22 22:23:30 +01:00
Alexander Neumann
5e3365d233 Index: Store pack ID 2017-01-22 22:10:36 +01:00
Alexander Neumann
4f780a01f9 Index: Test pack ID 2017-01-22 22:09:56 +01:00
Alexander Neumann
dc6a832cc3 Correct BenchmarkIndexSave 2017-01-22 09:59:19 +01:00
Alexander Neumann
164ba823e5 Merge pull request #731 from restic/improve-memory-usage
Improve memory usage
2017-01-20 15:56:31 +01:00
Alexander Neumann
8dd7fe82ff Add TestIndexSave 2017-01-20 14:46:14 +01:00
Alexander Neumann
6c1032548b Correct FAQ 2017-01-19 11:47:33 +01:00
Alexander Neumann
f0eeb16b33 Merge pull request #729 from restic/add-faq
Add FAQ document
2017-01-19 11:42:38 +01:00
Alexander Neumann
c88b0d20e8 Correct typo 2017-01-18 21:57:59 +01:00
Alexander Neumann
fbecae7362 Add FAQ document 2017-01-18 21:48:39 +01:00
Alexander Neumann
38ba5fbceb Merge pull request #727 from MirkoDziadzka/fix-typo
fix typo: outputcomprehensive
2017-01-18 11:46:56 +01:00
Mirko Dziadzka
d47758a540 fix typo 2017-01-18 10:46:04 +01:00
Alexander Neumann
dac18e3bf8 Improve BenchmarkIndexSave 2017-01-17 13:00:59 +01:00
Alexander Neumann
c4f44c7bcb Reduce memory consuption of TestCreateSnapshot 2017-01-17 12:56:20 +01:00
Alexander Neumann
73ad3d418d Index: Remove unneeded allocation 2017-01-17 12:46:41 +01:00
Alexander Neumann
36276c41b2 Add Benchmark for IndexSave 2017-01-17 10:40:58 +01:00
Alexander Neumann
d40f566e41 Index: Use slices instead of maps, reduce data 2017-01-17 10:40:58 +01:00
Alexander Neumann
cd9b526203 Preallocate pack entries list 2017-01-17 10:40:58 +01:00
Alexander Neumann
caabc4ec44 Reduce memory usage while decoding index 2017-01-17 10:40:58 +01:00
Alexander Neumann
73e7a2bea8 Add BenchmarkLoadIndex 2017-01-17 10:40:58 +01:00
Alexander Neumann
e463587bad Add BenchmarkDecodeIndex 2017-01-17 10:40:57 +01:00
Alexander Neumann
e571b6a656 Use the same buffer for decryption 2017-01-17 10:40:57 +01:00
Alexander Neumann
710499cf46 Add benchmark for LoadAndDecrypt 2017-01-17 10:40:57 +01:00
Alexander Neumann
32a5c2c1f6 Add a few functions to calculate Blob buffer len 2017-01-17 10:40:57 +01:00
Alexander Neumann
91dcb958e0 Fix tests 2017-01-17 10:40:57 +01:00
Alexander Neumann
9a5b9253c4 LoadBlob: use buffer as scratch space
benchmark               old bytes     new bytes     delta
    BenchmarkLoadBlob-4     1010128       2256          -99.78%
2017-01-17 10:40:57 +01:00
Alexander Neumann
215af5c60a Add LoadBlob benchmark 2017-01-17 10:40:57 +01:00
Alexander Neumann
8734c2466c Fix call to debug.Log() 2017-01-17 10:40:57 +01:00
Alexander Neumann
0556687584 Reduce memory usage for prune 2017-01-17 10:40:57 +01:00
Alexander Neumann
094e80f4a4 Merge pull request #723 from ulziibuyan/ls-latest-cmd
Added latest keyword in ls command.
2017-01-15 12:40:58 +01:00
Uzi
c13a0953c8 User interface inconsistency fixed 2017-01-14 11:19:47 +08:00
Alexander Neumann
9f1f174c99 Add script to build release binaries 2017-01-12 19:52:03 +01:00
Alexander Neumann
7e0a4c66e7 build.go: Make binaries completely static (disables cgo) 2017-01-12 19:51:26 +01:00
Alexander Neumann
62b1056860 build.go: Allow setting the output file name 2017-01-12 19:51:08 +01:00
Alexander Neumann
a3181dbead CI: cross-compile for OpenBSD 2017-01-12 19:50:41 +01:00
Uzi
02c02283cf Added latest keyword in ls command. 2017-01-12 19:24:08 +08:00
Alexander Neumann
4d93da9f68 Add VERSION file for 0.3.3 2017-01-08 10:46:43 +01:00
Alexander Neumann
4a6086a14b Merge pull request #718 from mholt/flag-priority
CLI options now override env vars
2017-01-02 20:31:20 +01:00
Matthew Holt
0a34a2d5d8 Consider the environment 2017-01-02 12:21:30 -07:00
Matthew Holt
a394b675b0 CLI options now override env vars 2017-01-02 11:14:22 -07:00
Alexander Neumann
04846b10bc Merge pull request #717 from restic/fix-367
Only add entries to indexes inside PackerManager
2017-01-02 17:18:59 +01:00
Alexander Neumann
f9501e97a2 Only add entries to indexes inside PackerManager
This was a nasty bug. Users reported that restic aborts with panic:

    panic: store new item in finalized index

The code calling panic() is in the Store() method of an index and guards
the failure case that an index is to be modified while it has already
been saved in the repo.

What happens here (at least that's what I suspect): PackerManager calls
Current() on a MasterIndex, which yields one index A. Concurrently,
another goroutine calls Repository.SaveFullIndex(), which in turn calls
MasterIndex.FullIndexes(), which (among others) yields the index A. Then
all indexes are marked as final. Then the other goroutine is executed
which adds an entry to the index A, which is now marked as final. Then
the panic occurs.

The commit solves this by removing MasterIndex.Current() and adding a
Store() method that stores the entry in one non-finalized index. This
method uses the same RWMutex as the other methods (e.g. FullIndexes()),
thereby ensuring that the full indexes can only be processed before or
after Store() is called.

Closes #367
2017-01-02 14:14:51 +01:00
Alexander Neumann
3ef788765a Merge pull request #715 from zcalusic/master
Document REST backend
2017-01-02 11:13:35 +01:00
Alexander Neumann
8e16931949 Merge pull request #716 from zcalusic/rest-server-new-location
Rest server moved to https://github.com/restic/rest-server
2017-01-02 11:12:37 +01:00
Zlatko Čalušić
2267aca296 Rest server moved to https://github.com/restic/rest-server 2017-01-01 16:22:46 +01:00
Zlatko Čalušić
c70bc7ed0b Document REST backend
Closes #644
2016-12-31 13:14:44 +01:00
Alexander Neumann
8e3b81c5ec Merge pull request #713 from restic/update-travis
Update .travis.yml
2016-12-30 17:21:27 +01:00
Alexander Neumann
30975f7116 Update appveyor configuration 2016-12-30 17:07:42 +01:00
Alexander Neumann
0ef463d56a Update .travis.yml 2016-12-30 15:21:49 +01:00
Alexander Neumann
5132f5bfe6 Merge pull request #709 from restic/fix-708
Make sure cleanup is executed before exiting
2016-12-28 18:28:07 +01:00
Alexander Neumann
80457018d7 Make sure cleanup is executed before exiting
Closes #708
2016-12-28 10:53:31 +01:00
Alexander Neumann
b0997d05fb Merge pull request #704 from restic/remove-timestamp
Remove timestamp from `version` command
2016-12-19 22:22:43 +01:00
Alexander Neumann
3add2f0acb Merge pull request #703 from sjoerdsimons/master
Avoid duplicate backup paths
2016-12-19 22:21:18 +01:00
Alexander Neumann
166d1811a1 Remove timestamp from version command
This enables reproducible builds, for details see
https://reproducible-builds.org/docs/timestamps/
2016-12-19 21:14:12 +01:00
Sjoerd Simons
e1fc455079 Avoid duplicate backup paths
Target directories from the from-files argument get added to the command
line args, after which all command line args were appended to the same
variable again causing duplicates. Split the used variables to avoid
this.

Signed-off-by: Sjoerd Simons <sjoerd@luon.net>
2016-12-18 23:23:57 +01:00
Alexander Neumann
98237bf942 Add VERSION file for 0.3.2 2016-12-18 18:53:03 +01:00
Alexander Neumann
75f21f23ff Merge pull request #700 from restic/debug-panic
Make sure SaveFile always returns a node
2016-12-14 21:29:04 +01:00
Alexander Neumann
9885aeac3b Make sure SaveFile always returns a node 2016-12-14 18:56:11 +01:00
Alexander Neumann
85c87b9ab9 Add VERSION file for 0.3.1 2016-12-13 21:36:22 +01:00
Alexander Neumann
51cd78e16c Merge pull request #691 from restic/fix-604
Correctly save modified files
2016-12-10 17:31:20 +01:00
Alexander Neumann
e6a40af06d Treat changed files as a warning, not an error 2016-12-10 17:14:13 +01:00
Alexander Neumann
3fcbb4ac25 Use new Node if file has changed
Closes #604
2016-12-10 16:54:20 +01:00
Alexander Neumann
7d71bad4eb Test if modified files are correctly saved 2016-12-10 16:36:58 +01:00
Alexander Neumann
dbdfed6343 Merge pull request #690 from zcalusic/master
Even if file changes size during backup, still save it
2016-12-10 12:36:56 +01:00
Alexander Neumann
5e48c1fadc Merge pull request #688 from restic/fix-686
Save snapshot after saving all pack files
2016-12-10 12:33:58 +01:00
Zlatko Čalušić
deb6dd7f72 Even if file changes size during backup, still save it
Previously such files (typically log files) wouldn't be backed up at
all!

The proper behaviour is to backup what we can, and warn the operator
that file is possibly not complete. But it is a warning, not an error.

Closes #689
2016-12-10 12:24:45 +01:00
Alexander Neumann
c265673c8e Save snapshot after saving all pack files
Closes #686
2016-12-10 11:49:09 +01:00
Alexander Neumann
0fceeb20f1 Merge pull request #685 from jannic/patch-1
Update debug message
2016-12-06 08:16:33 +01:00
Jan Niehusmann
c5897e0d62 Update debug message
Since client.BucketExists was changed to return a separate 'found' value, instead of reporting an error when the bucket doesn't exist, the error code path does no longer imply a call to client.MakeBucket. So the second part of the debug message, "...trying to create the bucket" doesn't apply any more.
Also, changed the name of the return value from 'ok' to 'found', matching the API documentation at https://docs.minio.io/docs/golang-client-api-reference#BucketExists.
2016-12-05 23:12:30 +01:00
Alexander Neumann
8d13f22c50 Merge pull request #683 from jannic/pr1
Omit "archived as %v" messages in quiet mode.
2016-12-03 11:15:24 +01:00
Alexander Neumann
1815536534 Update build.go 2016-12-03 11:14:30 +01:00
Jan Niehusmann
9267c25aa0 Omit "archived as %v" messages in quiet mode. 2016-12-03 10:28:49 +01:00
Alexander Neumann
281cbbdf2e Merge pull request #682 from jpmens/patch-1
Small typo in dry-run of remove snapshot
2016-12-03 10:11:58 +01:00
JP Mens
5996d671a0 Small typo in dry-run of remove snapshot 2016-12-02 17:33:05 +01:00
Alexander Neumann
ef9b974bcd Merge pull request #681 from zcalusic/master
Stop trying to detect Go version
2016-12-02 11:15:29 +01:00
Zlatko Čalušić
7e66b73ce0 Stop trying to detect Go version
It fails on pre-release versions, anyway.  It's enough to mention the oldest
supported version in README.md.  Anything older than two latest Go releases
is bad idea, anyway, 'cause it's unsupported by Go development team.

Closes #680
2016-12-01 20:06:23 +01:00
Alexander Neumann
505a2097ad Manual: Add note about s3 bucket locations 2016-11-27 20:18:57 +01:00
Alexander Neumann
07380878fb Merge pull request #678 from restic/fix-676
Update github.com/elithrar/simple-scrypt
2016-11-19 19:22:44 +01:00
Alexander Neumann
3b29ae3c99 Update github.com/elithrar/simple-scrypt
Closes #676
2016-11-19 17:13:13 +01:00
Alexander Neumann
e5617b5fd1 Merge pull request #675 from restic/parent-check-hostname
Use the hostname filter to find a parent snasphot
2016-11-19 12:42:40 +01:00
Alexander Neumann
11f23ae663 Merge pull request #673 from Novex/restore-directory-metadata-for-existing-directories
Don't consider a pre-existing directory in the restore path to be a failure
2016-11-19 12:42:31 +01:00
Alexander Neumann
2828003d60 Test that existing files and dirs are restored 2016-11-15 21:41:41 +01:00
Alexander Neumann
16cef3b4c6 Use the hostname filter to find a parent snasphot
Closes #674
2016-11-15 21:04:51 +01:00
Alexander Neumann
699f39e3cf FindLatestSnapshot: Rename parameter to clarify meaning 2016-11-15 21:03:54 +01:00
Seb Patane
33b6a7381b Don't consider a pre-existing directory in the restore path to be a failure
* When a directory already exists, CreateDirAt returns an error stating so
  * This means that the restoreMetadata step is skipped, so for directories which already exist no file permissions, owners, groups, etc will be restored on them
* Not returning the error if it's a "directory exists" error means the metadata will get restored
  * It also removes the superfluous "error for ...: mkdir ...: file exists" messages
* This makes the behaviour of directories consistent with that of files (which always have their content & metadata restored, regardless of whether they existed or not)
2016-11-14 17:53:09 +10:00
Alexander Neumann
190673b24a Merge pull request #657 from AlexanderThaller/read_backup_files_from_file
Read files to backup from a file
2016-11-12 21:47:11 +01:00
Alexander Thaller
b7b03dbd4a Added new flag to backup subcommand that reads the files to backup from a file 2016-11-12 15:45:32 +01:00
Alexander Neumann
56009dd16e Merge pull request #670 from restic/remove-fadvise
Remove fadvise
2016-11-10 23:42:21 +01:00
Alexander Neumann
b56bde3f61 Remove fadvise
This commit removes the use of FADV_DONTNEED, which also purges active
cached pages for other processes.
2016-11-10 22:21:22 +01:00
Alexander Neumann
b1ed74eb43 Merge pull request #669 from zcalusic/master
Fix REST backend HTTP keepalive
2016-11-10 21:05:14 +01:00
Zlatko Čalušić
d8f0e7cbd1 Fix REST backend HTTP keepalive
This is subtle.  A combination od fast client disk (read: SSD) with lots
of files and fast network connection to restic-server would suddenly
start getting lots of "dial tcp: connect: cannot assign requested
address" errors during backup stage.  Further inspection revealed that
client machine was plagued with TCP sockets in TIME_WAIT state.  When
ephemeral port range was finally exhausted, no more sockets could be
opened, so restic would freak out.

To understand the magnitude of this problem, with ~18k ports and default
timeout of 60 seconds, it means more than 300 HTTP connections per
seconds were created and teared down.  Yeah, restic-server is that
fast. :)

As it turns out, this behavior was product of 2 subtle issues:

1) The body of HTTP response wasn't read completely with io.ReadFull()
   at the end of the Load() function.  This deactivated HTTP keepalive,
   so already open connections were not reused, but closed instead, and
   new ones opened for every new request.  io.Copy(ioutil.Discard,
   resp.Body) before resp.Body.Close() remedies this.

2) Even with the above fix, somehow having MaxIdleConnsPerHost at its
   default value of 2 wasn't enough to stop reconnecting.  It is hard to
   understand why this would be so detrimental, it could even be some
   subtle Go runtime bug.  Anyhow, setting this value to match the
   connection limit, as set by connLimit global variable, finally nails
   this ugly bug.

I fixed several other places where the response body wasn't read in
full (or at all).  For example, json.NewDecoder() is also known not to
read the whole body of response.

Unfortunately, this is not over yet. :( The check command is firing up
to 40 simultaneous connections to the restic-server.  Then, once again,
MaxIdleConnsPerHost is too low to support keepalive, and sockets in the
TIME_WAIT state pile up.  But, as this kind of concurrency absolutely
kill the poor disk on the server side, this is a completely different
bug then.
2016-11-10 09:32:07 +01:00
Alexander Neumann
5e721afb5d doc/mkdocs: Improve code hilighting
Additionally, refresh the restic sample output.
2016-11-08 20:23:39 +01:00
Alexander Neumann
149c01a86a Merge pull request #659 from restic/device-freebsd
fs.DeviceID(): Return errors whehn fi is nil
2016-11-05 13:35:16 +01:00
Alexander Neumann
51322a1055 selectFunc: handle nil 2016-11-05 12:38:33 +01:00
Alexander Neumann
c5bc802ff0 fs.DeviceID(): Return errors when fi is nil 2016-11-05 12:38:17 +01:00
Alexander Neumann
6b88d3b5d0 Merge pull request #651 from justinclift/issue649v1
Remove redundant check of error var e
2016-10-26 16:06:14 +02:00
Justin Clift
ecc1f92787 Remove redundant check of error var e
As per #649
2016-10-25 18:10:53 +01:00
Alexander Neumann
d4f76fbe26 Merge pull request #650 from restic/forget-remove-index-load
forget: do not load index
2016-10-24 14:33:11 +02:00
Alexander Neumann
1dd72693f9 forget: Remove unneeded index loading 2016-10-24 14:01:23 +02:00
Alexander Neumann
fe1013e779 cmds/ls: Format timestamp 2016-10-19 22:11:37 +02:00
Alexander Neumann
84ca5172f0 Remove Debian UID from GPG key printout 2016-10-17 13:10:16 +02:00
Alexander Neumann
7c49255c2a Add hints how to use the go tool and direnv 2016-10-17 13:09:56 +02:00
Alexander Neumann
a5a9c42185 Merge pull request #646 from stakewinner00/master
don't print status info when running in the background
2016-10-15 20:09:42 +02:00
David
5f8a6cea6f don't print status info if running in the background
clean

fix OS issues & format code

fix issues
2016-10-15 18:12:19 +00:00
Alexander Neumann
50212805aa Merge pull request #643 from restic/update-poly1305
Update golang.org/x/crypto/poly1305
2016-10-14 15:51:57 +02:00
Alexander Neumann
cd7feb0148 Update golang.org/x/crypto/poly1305 2016-10-14 12:44:06 +02:00
Alexander Neumann
974f2f78a9 Merge pull request #641 from restic/fix-640
Improve error message for 'forget'
2016-10-13 20:41:29 +02:00
Alexander Neumann
250b36eeb1 Improve error message for 'forget'
$ bin/restic forget /d 7 /w 4 /m 12
    argument "/d" is not a snapshot ID, ignoring
    argument "7" is not a snapshot ID, ignoring
    argument "/w" is not a snapshot ID, ignoring
    argument "4" is not a snapshot ID, ignoring
    argument "/m" is not a snapshot ID, ignoring
    cound not find a snapshot for ID "12", ignoring
2016-10-10 20:55:02 +02:00
Alexander Neumann
6f72164bbe Merge pull request #638 from hmsdao/patch-fixpath
Added long paths fix for samba network shares
2016-10-05 17:07:53 +02:00
Daniel Örn
ba8d960c8f using backtics instead of doublequotes 2016-10-05 08:26:32 +02:00
Daniel Örn
84421a7c68 structured file with gofmt 2016-10-05 07:30:46 +02:00
Daniel Örn
5c7325f44a Added long paths fix for samba network shares 2016-10-05 07:09:56 +02:00
Alexander Neumann
c45b498a8b Merge pull request #637 from ckemper67/s3-join
Use path.Join to create the s3 object name within the bucket.
2016-10-03 22:38:28 +02:00
Christian Kemper
a4261dcc9c Use path.Join to create the s3 object name within the bucket.
path.Join already automatically skips empty path segments when
joining, so this simplifies the s3Path code.
2016-10-02 16:56:07 -07:00
Alexander Neumann
d1ecdf7441 Add VERSION file for 0.3.0 2016-10-02 18:24:14 +02:00
Alexander Neumann
088ca033f8 Reword README 2016-10-02 16:08:05 +02:00
Alexander Neumann
5b7dd32c20 Manual: Reword section about fuse support 2016-10-02 16:03:02 +02:00
Alexander Neumann
eb94395f3d Merge pull request #635 from restic/fix-633
Fix short-hand option clash
2016-09-29 21:39:05 +02:00
Alexander Neumann
22f5fc5739 Improve help text for slice options 2016-09-29 20:39:55 +02:00
Alexander Neumann
e994cacbfe Fix short-hand option clash 2016-09-29 20:37:45 +02:00
Alexander Neumann
3114d41cb7 Merge pull request #632 from restic/rework-debug
Rework debug message printing
2016-09-28 21:11:02 +02:00
Alexander Neumann
968b2ece43 Add section to the manual about debug message filters 2016-09-28 20:22:22 +02:00
Alexander Neumann
feed54caef Remove timing, simplify function matching 2016-09-28 20:10:40 +02:00
Alexander Neumann
4eddcb344e Update calls to debug.Log() 2016-09-28 19:56:03 +02:00
Alexander Neumann
2ae06a7a01 Rework debug log function 2016-09-28 19:56:03 +02:00
Alexander Neumann
25945718a1 Fix recursive call to debug.Log 2016-09-28 19:56:03 +02:00
Alexander Neumann
254188f38f Merge pull request #631 from restic/switch-to-cobra
Switch to cobra/pflag for CLI
2016-09-28 19:54:59 +02:00
Alexander Neumann
3601c39177 Add comments 2016-09-27 20:22:01 +02:00
Alexander Neumann
02f7bb0d4c Add mousetrap library for Windows 2016-09-27 20:13:22 +02:00
Alexander Neumann
565d72ef36 Use cobra for all commands 2016-09-27 19:53:03 +02:00
Alexander Neumann
3806623c23 Vendor cobra and pflag 2016-09-27 19:52:48 +02:00
Alexander Neumann
0fa12839a5 Remove go-flags 2016-09-27 19:52:48 +02:00
Alexander Neumann
a257a613d7 Fix debug log 2016-09-27 19:52:48 +02:00
Alexander Neumann
0a752b9fab test helpers: Always print stack trace 2016-09-27 19:50:26 +02:00
Alexander Neumann
eeec0d63c2 Merge pull request #630 from restic/remove-unused
Remove unused bits and pieces
2016-09-24 12:03:26 +02:00
Alexander Neumann
04d6b5da2f Remove more unused bits 2016-09-21 20:45:18 +02:00
Alexander Neumann
1dfd3b8aa3 Remove unused bits and pieces
Reported by https://github.com/dominikh/go-unused
2016-09-21 20:22:32 +02:00
Alexander Neumann
0873821b98 Add section about --one-file-system to manual 2016-09-18 20:18:52 +02:00
Alexander Neumann
0a9cbd47c7 Merge pull request #626 from rfjakob/master
Add "-x", "--one-file-system" option
2016-09-18 20:03:58 +02:00
Alexander Neumann
b61027b48d Merge pull request #627 from restic/fix-fuse-test
fuse: fix tests for snapshots with same timestamps
2016-09-18 19:55:12 +02:00
Jakob Unterwurzacher
53701891a1 Add "-x", "--one-file-system" option
Equivalent to rsync's "-x" option.

Notes to the naming:

"--exclude-other-filesystems"
is used by Duplicity,

"--one-file-system"
is used rsync and tar.

This latter should be more familiar to the user.
2016-09-18 18:52:30 +02:00
Alexander Neumann
68b462d057 fuse: Add test for same timestamps 2016-09-18 18:30:25 +02:00
Alexander Neumann
649f789190 fuse: Fix test for timestamps with same second 2016-09-18 18:13:39 +02:00
Alexander Neumann
7b3e319398 Merge pull request #625 from restic/fix-624
fuse: correctly handle snapshots
2016-09-18 15:35:50 +02:00
Alexander Neumann
5494c1858e fuse: correctly handle snapshots
The fuse code kept adding snapshots to the top-level dir "snapshots". In
addition, snapshots with the same timestamp (same second) were not added
correctly, they will now be suffixed by an incrementing counter, e.g.:

    dr-xr-xr-x 1 fd0 users 0 Sep 18 15:01 2016-09-18T15:01:44+02:00
    dr-xr-xr-x 1 fd0 users 0 Sep 18 15:01 2016-09-18T15:01:48+02:00
    dr-xr-xr-x 1 fd0 users 0 Sep 18 15:01 2016-09-18T15:01:48+02:00-1

Closes #624
2016-09-18 15:04:39 +02:00
Alexander Neumann
c5763e59d5 Merge pull request #623 from restic/fix-622
Improve error messages for open repo
2016-09-18 14:04:30 +02:00
Alexander Neumann
b090c73bd4 Remove wrapper functions in errors package
This way, our own errors package does not appear in the stack traces.
2016-09-18 13:28:59 +02:00
Alexander Neumann
2b9a408ccc Return a fatal for location.Parse 2016-09-18 13:28:41 +02:00
Alexander Neumann
83c35bd6b5 Do not print stack trace when open repo failed
Closes #622
2016-09-18 13:24:46 +02:00
Alexander Neumann
98b012a04e Merge pull request #620 from restic/watch-529
Add verbose error when marshalling a node fails
2016-09-17 11:05:00 +02:00
Alexander Neumann
a9af896ddd Add verbose error when marshalling a node fails
This code is introduced to watch for issue #529, in which two users
describe that restic failed encoding a time in a node to JSON with the
error message:

    panic: json: error calling MarshalJSON for type *restic.Node: json: error calling MarshalJSON for type time.Time: Time.MarshalJSON: year outside of range [0,9999]

The error message now is:

    panic: Marshal: json: error calling MarshalJSON for type *restic.Node: node /home/fd0/shared/work/restic/restic/.git/hooks/applypatch-msg.sample has invalid ModTime year -1: -0001-01-02 03:04:05.000000006 +0053 LMT
2016-09-17 10:43:04 +02:00
Alexander Neumann
309dca8179 Merge pull request #619 from restic/update-deps
Update all vendored dependencies
2016-09-15 22:50:44 +02:00
Alexander Neumann
8144cd24d6 Add golang.org/x/crypto/ed25519 2016-09-15 22:36:49 +02:00
Alexander Neumann
0ce8191be5 Add golang.org/x/crypto/curve25519 2016-09-15 22:36:29 +02:00
Alexander Neumann
595f2582fa Update golang.org/x/sys/unix 2016-09-15 22:35:45 +02:00
Alexander Neumann
da83bd8265 Upadte golang.org/x/net/context 2016-09-15 22:34:06 +02:00
Alexander Neumann
799cc37c22 Update golang.org/x/crypto/ssh 2016-09-15 22:33:32 +02:00
Alexander Neumann
35ba817128 Update golang.org/x/crypto/scrypt 2016-09-15 22:32:38 +02:00
Alexander Neumann
29a61950dd Update golang.org/x/crypto/poly1305 2016-09-15 22:32:17 +02:00
Alexander Neumann
acd39eaab5 Update golang.org/x/crypto/pbkdf2 2016-09-15 22:31:49 +02:00
Alexander Neumann
3d55b54f3d Update github.com/pkg/sftp 2016-09-15 22:31:18 +02:00
Alexander Neumann
daae3500dd Update branch for github.com/kr/fs 2016-09-15 22:30:27 +02:00
Alexander Neumann
64fe9ec048 Update github.com/jessevdk/go-flags 2016-09-15 22:29:49 +02:00
Alexander Neumann
cb80a70aca Update bazil.org/fuse 2016-09-15 22:26:23 +02:00
Alexander Neumann
24398d2b9d Merge pull request #618 from restic/rework-ci-fuse-tests
Cleanup CI tests for fuse
2016-09-15 21:53:44 +02:00
Alexander Neumann
d4a2d70089 Retry umount for integration tests 2016-09-15 21:37:50 +02:00
Alexander Neumann
9add72e9d6 Exclude unneeded test run without fuse tests 2016-09-15 21:37:50 +02:00
Alexander Neumann
e7fc908ff1 Run fuse tests on Linux 2016-09-15 21:25:59 +02:00
Alexander Neumann
4ffca0f4b4 Improve integration tests for fuse 2016-09-15 21:17:20 +02:00
Alexander Neumann
a0f3e94655 fuse: handle duplicate timestamps for snapshots
This closes #606, which fails because several snapshots are created with
exactly the same timestamp, and the code checks that for each snapshot
there is a dir in the fuse mount. This fails for colliding timestamps,
so we now add a suffix "-1", "-2" etc for each duplicate timestamp.
2016-09-15 21:15:49 +02:00
Alexander Neumann
6485a6cdc0 Simplify mount logic 2016-09-15 19:59:07 +02:00
Alexander Neumann
931f5cdd33 Merge pull request #616 from restic/add-snapshot-tags
Add tags to snapshots
2016-09-14 20:58:12 +02:00
Alexander Neumann
3975d76f23 Correct filenames for expire policy tests 2016-09-13 21:20:41 +02:00
Alexander Neumann
bf6602bc1b Update golden file 2016-09-13 21:19:57 +02:00
Alexander Neumann
a85ffc66ae Add documentation for tags 2016-09-13 21:09:55 +02:00
Alexander Neumann
828267aaa3 Fix status for stdin archiver 2016-09-13 21:01:29 +02:00
Alexander Neumann
a77c615909 Fix 'forget' command with tags 2016-09-13 20:56:18 +02:00
Alexander Neumann
cfdf4c92f7 Add --keep-tag to forget command 2016-09-13 20:37:11 +02:00
Alexander Neumann
0f9fb37c78 Add tags to forget command 2016-09-13 20:20:55 +02:00
Alexander Neumann
673bce936e Add tags to 'backup' and 'snapshots' commands 2016-09-13 20:20:52 +02:00
Alexander Neumann
1f83635267 Add tags to snapshots and filter 2016-09-13 20:12:55 +02:00
Alexander Neumann
2d7e1b5804 Merge pull request #615 from kerel-fs/fix/manual
doc/Manual: Update usage help listing
2016-09-12 21:20:18 +02:00
Fabian P. Schmidt
085cf36199 doc/Manual: Update usage help listing 2016-09-12 20:42:38 +02:00
Alexander Neumann
ceb4a3ecc0 Merge pull request #613 from restic/read-password-from-file
Read password from file
2016-09-12 20:37:08 +02:00
Alexander Neumann
cf7795ce64 Merge pull request #614 from restic/improve-prune-stats
Improve statistics for `prune`
2016-09-12 20:37:00 +02:00
Alexander Neumann
223dc78acb Improve statistics for prune
Sample:

    counting files in repo
    building new index for repo
    [0:00] 100.00%  22 / 22 packs
    repository contains 22 packs (1377 blobs) with 90.610 MiB bytes
    processed 1377 blobs: 0 duplicate blobs, 0B duplicate
    load all snapshots
    find data that is still in use for 1 snapshots
    [0:00] 100.00%  1 / 1 snapshots
    found 409 of 1377 data blobs still in use, removing 968 blobs
    will delete 10 packs and rewrite 10 packs, this frees 64.232 MiB
    creating new index
    [0:00] 100.00%  7 / 7 packs
    saved new index as df467c6e
    done

Closes #581
2016-09-12 14:26:47 +02:00
Alexander Neumann
f63cd12569 Document new option 2016-09-12 14:10:36 +02:00
Alexander Neumann
65afeba19a Add option to read the password from a file 2016-09-12 14:09:22 +02:00
Alexander Neumann
791f73e0db Merge pull request #608 from rosetree/patch-1
Fix a small typo in `stdin` example
2016-09-05 19:27:26 +02:00
Micha Rosenbaum
8ded453ab0 Fix a small typo in stdin example
s/--stdin-filenam/--stdin-filename/
2016-09-05 19:01:53 +02:00
Alexander Neumann
e443454c4b Add OS and Arch to 'version' output 2016-09-04 15:46:50 +02:00
Alexander Neumann
1dd9a58e5a Merge pull request #600 from restic/restructure
WIP: restructure code
2016-09-04 15:36:26 +02:00
Alexander Neumann
b628bcee27 Remove redundant ParseID 2016-09-04 14:38:18 +02:00
Alexander Neumann
dfc0cbf3a8 Use one test password 2016-09-04 14:30:14 +02:00
Alexander Neumann
512a92895f Rename WithTestEnvironment -> Env 2016-09-04 14:29:04 +02:00
Alexander Neumann
6ab425f130 Remove SetupRepo 2016-09-04 13:24:51 +02:00
Alexander Neumann
f5b9ee53a3 Fix mock.Repository 2016-09-04 13:18:25 +02:00
Alexander Neumann
ea073f58cf Correct comment 2016-09-04 13:08:09 +02:00
Alexander Neumann
bef5c4acb8 Add mock.Repository, Rework SetupRepo 2016-09-04 12:52:43 +02:00
Alexander Neumann
b5b3c0eaf8 Add repository.SaveTree 2016-09-03 21:10:25 +02:00
Alexander Neumann
1fb80bf0e2 Fix fuse mount 2016-09-03 21:10:25 +02:00
Alexander Neumann
436332d5f2 LoadDataBlob -> LoadBlob 2016-09-03 21:10:25 +02:00
Alexander Neumann
fe8c12c798 Replace repolitoy.SaveAndEncrypt to SaveBlob() 2016-09-03 21:10:25 +02:00
Alexander Neumann
1cc59010f5 Remove LoadJSONPack, un-export loadBlob 2016-09-03 21:10:25 +02:00
Alexander Neumann
878c1cd936 Add more comments 2016-09-03 21:10:25 +02:00
Alexander Neumann
5170c4898a Address hound comments 2016-09-03 21:10:25 +02:00
Alexander Neumann
2054e3c026 Fix tests 2016-09-03 21:10:25 +02:00
Alexander Neumann
ffbe05af9b Rework crypto, use restic.Repository everywhere 2016-09-03 21:10:25 +02:00
Alexander Neumann
84f95a09d7 Introduce LoadTreeBlob and LoadDataBlob 2016-09-03 21:10:25 +02:00
Alexander Neumann
573410afab Fix archiver test 2016-09-03 21:10:25 +02:00
Alexander Neumann
619939ccd9 Reorder methods in interface Repository 2016-09-03 21:10:25 +02:00
Alexander Neumann
714a5d1dc4 Move tree walker to restic/walk 2016-09-03 21:10:25 +02:00
Alexander Neumann
bc42dbdf87 Create package restic/errors 2016-09-03 21:10:24 +02:00
Alexander Neumann
765b5437bd Fix command 'dump' 2016-09-03 21:10:24 +02:00
Alexander Neumann
5d7b38cabf Remove sentinel errors 2016-09-03 21:10:24 +02:00
Alexander Neumann
debf1fce54 Remove IDSize, TestRandomID -> NewRandomID 2016-09-03 21:10:24 +02:00
Alexander Neumann
0045f2fb61 Remove functions 2016-09-03 21:10:24 +02:00
Alexander Neumann
5764b55aee Rename Node.FileType -> Type 2016-09-03 21:10:24 +02:00
Alexander Neumann
5e3a41dbd2 Rename struct member FileType -> Type 2016-09-03 21:10:24 +02:00
Alexander Neumann
88d0f24ce7 Reduce lock timeout to zero 2016-09-03 21:10:24 +02:00
Alexander Neumann
eb6e3ba8b3 Fix imported package 2016-09-03 21:10:24 +02:00
Alexander Neumann
528c301891 Last fixes for integration tests 2016-09-03 21:10:24 +02:00
Alexander Neumann
f7ae0cb78f Fix cmds/restic for new structure 2016-09-03 21:10:24 +02:00
Alexander Neumann
3695ba5882 Tests pass for restic/ 2016-09-03 21:10:24 +02:00
Alexander Neumann
4c95d2cfdc wip 2016-09-03 21:10:24 +02:00
Alexander Neumann
cc6a8b6e15 wip 2016-09-03 21:10:24 +02:00
Alexander Neumann
51d8e6aa28 wip 2016-09-03 21:10:24 +02:00
Alexander Neumann
f0600c1d5f wip 2016-09-03 21:10:24 +02:00
Alexander Neumann
90da66261a Copy ID from backend to restic 2016-09-03 21:10:24 +02:00
Alexander Neumann
82c2dafb23 Copy interfaces and basic types to package restic/ 2016-09-03 21:10:24 +02:00
Alexander Neumann
bfdd26c541 Remove (unused) cache implementation 2016-09-03 21:10:24 +02:00
Alexander Neumann
e699f6d1bd Update doc comment 2016-09-03 21:10:24 +02:00
Alexander Neumann
fae65ebc61 Merge pull request #602 from restic/update-chunker
Update chunker
2016-09-03 21:10:18 +02:00
Alexander Neumann
f744c3534d Update chunker 2016-09-03 20:56:21 +02:00
Alexander Neumann
9ce40761c8 Remove coveralls.io 2016-09-03 11:06:09 +02:00
Alexander Neumann
48924009fe Add codecov.io 2016-09-03 10:44:37 +02:00
Alexander Neumann
d497fb6966 Merge pull request #599 from restic/remove-lowlevel-syscall
Replace lowlevel syscall to restore symlink times
2016-08-31 19:28:23 +02:00
Alexander Neumann
5bc7f150f8 Merge pull request #598 from restic/update-minio-go
Update minio-go
2016-08-31 19:28:17 +02:00
Alexander Neumann
a6eda344a4 Update minio-go 2016-08-31 18:08:43 +02:00
Alexander Neumann
1aa52e5e1e Replace lowlevel syscall to restore symlink times 2016-08-30 21:45:16 +02:00
Alexander Neumann
769f06cea2 Merge pull request #580 from restic/remove-juju-errors
Change errors library
2016-08-30 21:23:53 +02:00
Alexander Neumann
8d90588020 Add better error message for 'cat' 2016-08-30 21:19:04 +02:00
Alexander Neumann
9cf63c99cf Wrap errors #3 2016-08-29 22:16:58 +02:00
Alexander Neumann
4a0f77650b Wrap errors #2 2016-08-29 21:54:50 +02:00
Alexander Neumann
b53679a24d Wrap errors 2016-08-29 21:38:34 +02:00
Alexander Neumann
b06845c545 Always use errors.Cause() for testing error values 2016-08-29 19:52:03 +02:00
Alexander Neumann
c55b6ee544 Add restic.Fatal/f
This is a new error which implements the restic.Fataler interface.
Errors of this type are written to stderr, the restic exits. For all
other errors, restic prints the stack trace (if available).
2016-08-29 19:52:00 +02:00
Alexander Neumann
045f545085 repository: Handle errors correctly 2016-08-29 19:23:50 +02:00
Alexander Neumann
038b63f7f7 CI: Check for packages importing "errors" from stdlib 2016-08-29 19:23:50 +02:00
Alexander Neumann
d3f4c816c7 Print error stack if available 2016-08-29 19:23:50 +02:00
Alexander Neumann
72aa6be38d Replace fmt.Errorf() by errors.Errorf() 2016-08-29 19:23:50 +02:00
Alexander Neumann
444a268ce0 Replace stdlib errors with github.com/pkg/errors 2016-08-29 19:23:50 +02:00
Alexander Neumann
17a38faa43 Drop dependency github.com/juju/errors 2016-08-29 19:23:50 +02:00
Alexander Neumann
24385ff56e Merge pull request #597 from restic/fix-panic-596
Fix panic for debug.Log() with empty string
2016-08-29 17:13:37 +02:00
Alexander Neumann
f51bc8e9b9 Fix panic for debug.Log() with empty string 2016-08-28 22:43:05 +02:00
Alexander Neumann
6f5bf45212 Merge pull request #595 from restic/fix-cat
Fix the cat command
2016-08-28 22:28:25 +02:00
Alexander Neumann
3af8f53097 Allow 'cat' for tree blobs 2016-08-28 21:23:46 +02:00
Alexander Neumann
6c6b0e2395 cat: Add warning when pack was modified 2016-08-28 21:21:04 +02:00
Alexander Neumann
26351522c5 Merge pull request #594 from restic/fix-checker
Remove check for filemode 0
2016-08-28 21:09:02 +02:00
Alexander Neumann
dec2e4788e Remove flaky test 2016-08-28 21:06:27 +02:00
Alexander Neumann
f9cd736b33 Fix flaky test 2016-08-28 21:04:35 +02:00
Alexander Neumann
553dd00741 Merge pull request #592 from restic/fix-587
Fix panic when parsing sftp URIs
2016-08-28 20:14:17 +02:00
Alexander Neumann
88634dac3a Remove check for filemode 0 2016-08-28 20:04:09 +02:00
Alexander Neumann
83924d0864 Improve error message when sftp fails
Also add a prefix for all errors written to stderr by the client
2016-08-28 19:56:46 +02:00
Alexander Neumann
22bde5b277 sftp: Add debug log messages 2016-08-28 19:47:12 +02:00
Alexander Neumann
cdbdf74811 Remove debug output for tests 2016-08-28 19:30:56 +02:00
Alexander Neumann
db16702263 Report errors to stderr for tests 2016-08-28 19:30:56 +02:00
Alexander Neumann
5dd137d53e Improve error handling with the ssh subprocess 2016-08-28 19:30:56 +02:00
Alexander Neumann
8de06bd453 Vendor github.com/pkg/errors 2016-08-28 19:30:56 +02:00
Alexander Neumann
a7e64afc0d Update sftp library 2016-08-28 19:30:56 +02:00
Alexander Neumann
ed09887d9e Fix panic when parsing sftp URIs
Closes #587
2016-08-28 19:30:56 +02:00
Alexander Neumann
d097d40237 Merge pull request #593 from restic/correct-backend-errors
local/sftp: Fix broken error handling
2016-08-28 19:30:50 +02:00
Alexander Neumann
196bbbd25b local/sftp: Fix broken error handling
This yields the error messages for a full backup location:

    panic: write /home/fd0/mnt/temp/tmp/temp-987810174: no space left on device

Closes #540

Also connected to #574
2016-08-28 18:54:58 +02:00
Alexander Neumann
93e62c6f18 Merge pull request #591 from viric/packs-not-files
On prune report, packs instead of files + fix counter
2016-08-27 21:57:09 +02:00
Lluís Batlle i Rossell
3acf03986a On prune report, packs instead of files + fix counter 2016-08-27 20:04:35 +02:00
Alexander Neumann
12a904eb4b Fix reading password from stdin
This fixes a bug introduced in #585, it must by checked for stdin and
stdout separately whether it is a terminal.
2016-08-27 18:31:46 +02:00
Alexander Neumann
7f06ec98b8 Merge pull request #585 from trbs/progress_without_terminal
show progress every second when run non interactively
2016-08-27 10:10:18 +02:00
Alexander Neumann
d62264c837 Merge pull request #584 from restic/fix-panic
Add more safety checks for Unpacker
2016-08-27 10:09:57 +02:00
Alexander Neumann
b2a67d458c Remove unneeded packs without repacking 2016-08-25 22:35:22 +02:00
Alexander Neumann
de88fb2022 Simplify pack.List 2016-08-25 22:25:55 +02:00
trbs
71263b5090 show progress every second when run non interactively 2016-08-25 22:13:47 +02:00
Alexander Neumann
3fd1e4a992 Add backend.ReaderAt 2016-08-25 21:49:00 +02:00
Alexander Neumann
9f752b8306 Rework function for listing packs 2016-08-25 21:08:16 +02:00
Alexander Neumann
e07ae7631c Add more safety checks for Unpacker 2016-08-23 22:21:29 +02:00
Alexander Neumann
9fd941f6fc Merge pull request #583 from stuertz/windowsoutput
Fix progress output on Windows
2016-08-23 21:18:09 +02:00
Jan Stürtz
91c458bf74 Fixed gofmt 2016-08-22 22:07:10 +02:00
Jan Stürtz
374b1144de Dont't guess the max width, get it from the terminal 2016-08-22 17:27:58 +02:00
Jan Stürtz
f05b0871e9 fixed maxlen computation (off by one) on small terminals 2016-08-22 17:27:03 +02:00
Jan Stürtz
4cb8fe3210 Fixed style hints from hound
- no else, when if has a return
- Improve Comment on Function
2016-08-21 23:10:28 +02:00
Jan Stürtz
08eb5b42eb Fix progress output on Windows
The windows cmd shell is not aware of ANSI escape sequences and
does print them uninterpreted to the console. This is ugly.
Added a function to generate platform specific string for the escape sequence. On Windows this will be 79 white spaces with
a trailing \r.
2016-08-21 22:38:22 +02:00
Alexander Neumann
1c703e4161 Merge pull request #579 from restic/debug-544
Properly close connections to s3 backend on Stat()
2016-08-21 17:10:07 +02:00
Alexander Neumann
ebd3723a06 Properly close the minio object on Stat()
Closes #544
2016-08-21 16:15:41 +02:00
Alexander Neumann
06b23edb39 Fix code for newer minio-go 2016-08-21 16:14:58 +02:00
Alexander Neumann
e893be3dec Update minio-go 2016-08-21 16:14:22 +02:00
Alexander Neumann
ca14942c80 Merge pull request #578 from restic/fix-build-on-arm
Fix build on linux/arm
2016-08-21 15:09:46 +02:00
Alexander Neumann
11d01fcd32 Merge pull request #577 from restic/dynamic-scrypt
Dynamically calibrate scrypt parameters
2016-08-21 15:00:24 +02:00
Alexander Neumann
5061607e77 x/sys/unix: Manually add FADV_* constants for Linux/arm 2016-08-21 14:59:15 +02:00
Alexander Neumann
69d8fe5b4f Add check for cross-compilation 2016-08-21 14:21:19 +02:00
Alexander Neumann
916efa4e1a Merge pull request #576 from restic/fix-documentation-forget
Improve documentation, add explanation and weekly
2016-08-21 13:51:38 +02:00
Alexander Neumann
a3492d69dd Use low-security scrypt KDF parameters for testing 2016-08-21 13:42:04 +02:00
Alexander Neumann
8e24c51233 Fix commets for constants 2016-08-21 13:13:05 +02:00
Alexander Neumann
d8107f77aa Limit the number of key files checked on SearchKey 2016-08-21 13:10:16 +02:00
Alexander Neumann
79e950b710 Remove dead code 2016-08-21 13:10:15 +02:00
Alexander Neumann
f0d7f3f1bd Calibrate scrypt for the current hardware
Closes #17
2016-08-21 13:10:08 +02:00
Alexander Neumann
9afec53c55 Remove crypto reader/writer (unused) 2016-08-21 13:10:08 +02:00
Alexander Neumann
11098d6eb0 Move KDF() to kdf.go 2016-08-21 13:10:08 +02:00
Alexander Neumann
7e6fc15ece Vendor github.com/elithrar/simple-scrypt 2016-08-21 13:10:08 +02:00
Alexander Neumann
78c0995853 Improve documentation, add explanation and weekly 2016-08-21 11:53:05 +02:00
Alexander Neumann
84c14e623d Merge pull request #575 from restic/remove-constants
Remove POSIX constants, reduce code duplication
2016-08-21 11:02:06 +02:00
Alexander Neumann
d965d703d1 Reduce duplicate code in wrappers for os 2016-08-21 10:42:07 +02:00
Alexander Neumann
b20921d836 Use constants from /x/sys/unix 2016-08-21 10:36:20 +02:00
Alexander Neumann
a78493f549 Update golang.org/x/sys/unix 2016-08-21 10:35:12 +02:00
Alexander Neumann
2be0aa9dbc Merge pull request #518 from restic/implement-prune
Implement prune
2016-08-21 09:22:22 +02:00
Alexander Neumann
aa29c68189 Fix progress for new index 2016-08-20 20:44:57 +02:00
Alexander Neumann
d3da30e8fb Use UTC for snapshot time based tests 2016-08-20 18:49:02 +02:00
Alexander Neumann
3337b5d3c4 Add prune/forget to the manual 2016-08-20 18:38:16 +02:00
Alexander Neumann
458448357c Add help texts which cross-line prune/forget 2016-08-20 18:33:24 +02:00
Alexander Neumann
27d0909302 forget: Remove message when no policy is specified 2016-08-20 18:15:36 +02:00
Alexander Neumann
5f0ebb71b2 forget: Allow filtering for a hostname 2016-08-20 17:59:47 +02:00
Alexander Neumann
00f647dc92 forget: Join paths by ":" 2016-08-20 17:59:10 +02:00
Alexander Neumann
8e7202bd6a Rename function in debug 'dump' command 2016-08-20 17:54:27 +02:00
Alexander Neumann
5cf7c827b8 forget: Do nothing if no policy is configured 2016-08-20 17:53:03 +02:00
Alexander Neumann
71f7f4f543 Add ExpirePolicy.Empty() 2016-08-20 17:51:48 +02:00
Alexander Neumann
bf47dba1c4 Add 'forget' command 2016-08-20 17:43:25 +02:00
Alexander Neumann
cbd457e557 Add Hourly expire functions 2016-08-20 15:55:23 +02:00
Alexander Neumann
6cf4b81558 Add functions to filter snapshots 2016-08-20 15:22:40 +02:00
Alexander Neumann
bb84d351f1 Revert "ID: move Str() to non-pointer receiver"
This reverts commit f102406cd7.
2016-08-19 20:45:19 +02:00
Alexander Neumann
a107e3cc84 Correct comment 2016-08-19 20:36:24 +02:00
Alexander Neumann
e934966b54 Merge pull request #573 from restic/fix-osxfuse-travis
Fix osxfuse on Travis/darwin
2016-08-19 19:23:24 +02:00
Alexander Neumann
bd9f23f1d2 Fix osxfuse on Travis/darwin 2016-08-19 19:04:02 +02:00
Alexander Neumann
2a2fb74ba8 Merge pull request #569 from restic/fix-568
Use the platform-independent function for joining
2016-08-19 17:53:09 +02:00
Alexander Neumann
bd819a5e81 Fix panic 2016-08-16 21:59:43 +02:00
Alexander Neumann
162629571d Add BenchmarkFindUsedBlobs 2016-08-16 21:30:14 +02:00
Alexander Neumann
2c04ad3c29 TestCreateSnapshot: free buffer 2016-08-16 21:30:14 +02:00
Alexander Neumann
238d3807e9 prune: Format duplicate bytes properly 2016-08-16 21:30:14 +02:00
Alexander Neumann
7f9d227725 Use progress in prune command 2016-08-16 21:30:14 +02:00
Alexander Neumann
8de6e5a627 Add progress option to index 2016-08-16 21:30:14 +02:00
Alexander Neumann
8d735cf6a9 Explicitely specify supersedes for new index 2016-08-16 21:30:14 +02:00
Alexander Neumann
29bb845f0e Rebuild index at the end of prune 2016-08-16 21:30:14 +02:00
Alexander Neumann
1bb2d59e38 Add Save() method to Index 2016-08-16 21:30:14 +02:00
Alexander Neumann
3ceb2ad3cf Progress: Call OnUpdate before OnDone 2016-08-16 21:30:14 +02:00
Alexander Neumann
009c803c8a prune: Use new Index 2016-08-16 21:30:14 +02:00
Alexander Neumann
c0ef1ec6fd Add RemovePack for index 2016-08-16 21:30:14 +02:00
Alexander Neumann
69c2e8ce7e Add first version of the prune command 2016-08-16 21:30:14 +02:00
Alexander Neumann
f102406cd7 ID: move Str() to non-pointer receiver 2016-08-16 21:30:14 +02:00
Alexander Neumann
302619a11a Move interfaces to package restic/types 2016-08-16 21:30:14 +02:00
Alexander Neumann
80bcae44e2 Decouple ListAllPacks from repository 2016-08-16 21:30:14 +02:00
Alexander Neumann
1f263a7683 Decouple index/ and repository/ 2016-08-16 21:30:14 +02:00
Alexander Neumann
3b57075109 Add global interface Repository 2016-08-16 21:30:14 +02:00
Alexander Neumann
3fa7304e94 Add interfaces to ListAllPacks 2016-08-16 21:30:14 +02:00
Alexander Neumann
47950b82a0 Add test for loading index from documentation 2016-08-16 21:30:14 +02:00
Alexander Neumann
9ecf7070af Implement Lookup() and Save() for new Index 2016-08-16 21:30:14 +02:00
Alexander Neumann
2310773798 Compute negative offsets ourselves in the s3 backend 2016-08-16 21:30:14 +02:00
Alexander Neumann
a60e3b5030 Make backend tests less verbose 2016-08-16 21:30:14 +02:00
Alexander Neumann
b350b443d0 Stop backend tests early on failure 2016-08-16 21:30:14 +02:00
Alexander Neumann
2c517e4a33 Add Index structures for Blobs 2016-08-16 21:30:14 +02:00
Alexander Neumann
4bdd59b4ad Index: Add DuplicateBlobs() 2016-08-16 21:30:14 +02:00
Alexander Neumann
f5daf33322 Add pack size to ListAllPacks 2016-08-16 21:30:14 +02:00
Alexander Neumann
1058a91b39 Add option to create duplicate blobs in TestCreateSnapshot 2016-08-16 21:30:14 +02:00
Alexander Neumann
240b8f273a Add more index tests 2016-08-16 21:30:14 +02:00
Alexander Neumann
6808523d34 Add String() for Blob 2016-08-16 21:30:14 +02:00
Alexander Neumann
bad6184ab5 Add new Index data structure 2016-08-16 21:30:14 +02:00
Alexander Neumann
6b384287f3 Return error when it occurs 2016-08-16 21:30:14 +02:00
Alexander Neumann
ef33cf12ca Fix Unpacker for packs < 2048 byte 2016-08-16 21:30:14 +02:00
Alexander Neumann
a5cbbb8b5a Fix BufferLoader for negative offset 2016-08-16 21:30:14 +02:00
Alexander Neumann
71924fb7c0 Add tests for Load() with negative offset 2016-08-16 21:30:14 +02:00
Alexander Neumann
b0565015cc Remove ReadSeeker 2016-08-16 21:30:14 +02:00
Alexander Neumann
fa283c6ecd Remove unused GetReader() 2016-08-16 21:30:14 +02:00
Alexander Neumann
94d157d97a Introduce interface pack.Loader 2016-08-16 21:30:14 +02:00
Alexander Neumann
f72f3dbc6a Buffer last 2048 bytes of a file for unpack 2016-08-16 21:28:55 +02:00
Alexander Neumann
3c3a180417 Move RandomID() to backend package 2016-08-16 21:28:55 +02:00
Alexander Neumann
fd6c854a21 Add TestResetRepository and BenchmarkCreateSnapshot 2016-08-16 21:28:55 +02:00
Alexander Neumann
e9cddc0be5 Fix TestFindUsedBlobs 2016-08-16 21:28:55 +02:00
Alexander Neumann
d7e5f11b78 Export FindUsedBlobs 2016-08-16 21:28:55 +02:00
Alexander Neumann
2b1b6d8c2a Export ListAllPacks 2016-08-16 21:28:55 +02:00
Alexander Neumann
acc2fa5816 Fix TestRepack
* Decrease number of blobs for use in test
 * Fail the test when there's a duplicate blob
2016-08-16 21:28:54 +02:00
Alexander Neumann
6285f31604 Use pack.BlobSet instead of backend.IDSet 2016-08-16 21:28:54 +02:00
Alexander Neumann
3cca831b2e Fix invalid type in newly created packs 2016-08-16 21:28:54 +02:00
Alexander Neumann
cff6fea32a Fix 'cat' command 2016-08-16 21:28:54 +02:00
Alexander Neumann
17e1872544 Switch order of parameters to repo.LoadBlob() 2016-08-16 21:28:54 +02:00
Alexander Neumann
246302375d Index: Add multiple packs per blob, pack.Type
Change the index so that a blob can be contained in multiple packs.

Require passing the blob type to all lookup functions.
2016-08-16 21:28:54 +02:00
Alexander Neumann
231da4ff80 Remove old repacking code 2016-08-16 21:28:54 +02:00
Alexander Neumann
1b4b469440 Add pack.Handle and pack.Handles 2016-08-16 21:28:54 +02:00
Alexander Neumann
35e3762e37 Remove dead code 2016-08-16 21:28:54 +02:00
Alexander Neumann
7e732dbd2d Allow multiple entries in the index 2016-08-16 21:28:54 +02:00
Alexander Neumann
8b4d4ec25f Fix TestCreateSnapshot, do not store duplicate data 2016-08-16 21:28:54 +02:00
Alexander Neumann
035d0aeb31 Do not create duplicate content for tests 2016-08-16 21:28:54 +02:00
Alexander Neumann
f1bc181c5b Add more checks for tests 2016-08-16 21:28:54 +02:00
Alexander Neumann
50b724ca23 Fix stylistic issues with FindUsedBlobs 2016-08-16 21:28:54 +02:00
Alexander Neumann
6227821b4e Move functions to correct file 2016-08-16 21:28:54 +02:00
Alexander Neumann
810056c2bc Correct packages for tests 2016-08-16 21:28:54 +02:00
Alexander Neumann
34b3e3a095 Split index/repack functions to different files 2016-08-16 21:28:54 +02:00
Alexander Neumann
bdd085e9f1 Prevent loops when finding used blobs 2016-08-16 21:28:54 +02:00
Alexander Neumann
ffc3503e6f Add first version of FindUsedBlobs 2016-08-16 21:28:54 +02:00
Alexander Neumann
51b16ad57d Add handy functions to backend.IDSet 2016-08-16 21:28:54 +02:00
Alexander Neumann
723592d923 Move FindUsedBlobs to package restic 2016-08-16 21:28:54 +02:00
Alexander Neumann
22aa17091b Add test for FindUsedBlobs 2016-08-16 21:28:54 +02:00
Alexander Neumann
4720a7d807 Allow specifying chunker polynomial for tests 2016-08-16 21:28:54 +02:00
Alexander Neumann
d5323223f4 Change repository Init() function to allow better testing 2016-08-16 21:28:54 +02:00
Alexander Neumann
fe79177b40 Make TestCreateSnapshot return the snapshot itself 2016-08-16 21:28:54 +02:00
Alexander Neumann
5c32ae15c2 Move test checking repo code to checker package 2016-08-16 21:28:54 +02:00
Alexander Neumann
6c2334f505 Make TestCreateSnapshot less verbose 2016-08-16 21:28:54 +02:00
Alexander Neumann
b55ac2afd6 Make test files in test repo less random 2016-08-16 21:28:54 +02:00
Alexander Neumann
d9012b4a64 Add trees recursively to test snapshot 2016-08-16 21:28:54 +02:00
Alexander Neumann
952f124238 Use RandReader instead of rand directly
This is a fix to be backwards-compatible with Go < 1.6.
2016-08-16 21:28:54 +02:00
Alexander Neumann
14db71d3fa Move RandReader to repository package 2016-08-16 21:28:54 +02:00
Alexander Neumann
f59ffcaeae Correct comment 2016-08-16 21:28:54 +02:00
Alexander Neumann
d609e4a986 Extended plaintext buffer if necessary 2016-08-16 21:28:54 +02:00
Alexander Neumann
0e6c72ad1d Implement Repack() 2016-08-16 21:28:54 +02:00
Alexander Neumann
d5f42201c5 Fix test for Repack 2016-08-16 21:28:54 +02:00
Alexander Neumann
122a0944a6 Do not repack blobs that shouldn't be kept 2016-08-16 21:28:54 +02:00
Alexander Neumann
fa26ecc8f9 Make rebuild-index use the code in package repository 2016-08-16 21:28:54 +02:00
Alexander Neumann
00139648a0 Implement Repack() 2016-08-16 21:28:54 +02:00
Alexander Neumann
6ba38e9a38 Add tests for Repack() 2016-08-16 21:28:54 +02:00
Alexander Neumann
812cb0ba77 Update Go version in manual 2016-08-16 21:24:48 +02:00
Alexander Neumann
b5c397435c Merge pull request #571 from restic/raise-go-version
Require Go 1.6 or greater
2016-08-16 21:20:59 +02:00
Alexander Neumann
043424824c Only test cross-compilation on Go 1.7 2016-08-16 21:02:30 +02:00
Alexander Neumann
c88c48a29f Do not build toolchain with gox for Go >= 1.5 2016-08-16 20:51:46 +02:00
Alexander Neumann
2fa93b291a Update default Go version in Dockerfile 2016-08-16 20:51:31 +02:00
Alexander Neumann
1ad4d1aafd Require Go 1.6 or greater 2016-08-16 20:32:58 +02:00
Jan Stürtz
b108966b12 Fix 567 (#570)
* Patch for  https://github.com/restic/restic/issues/567
Backup also files on windows with longer pathnames than 255 chars (e.g. from node).

as fd0 says "So, as far as I can see, we need to have custom methods for all functions that accept a path, so that on Windows we can substitute the normal (possibly relative) path used within restic by an (absolute) UNC path, and only then call the underlying functions like os.Stat(), os.Lstat(), os.Open() and so on.

I've already thought about adding a generic abstraction for the file system (so we can mock this easier in tests), and this looks like a good opportunity to build it."

* fixed building tests

* Restructured patches
Add Wrapper for filepath.Walk

* using \\?\ requires absolute pathes to be used.
Now all tests run

* used gofmt on the code

* Restructured Code. No patches dir, integrate the file functions into restic/fs/

There is still an issue, because restic.fs.Open has a different api the os.Open, which returns the result of OpenFile, but takes only a string

* Changed the last os.Open() calls to fs.Open() after extending the File interface

* fixed name-clash of restic.fs and fuse.fs detected by travis

* fixed fmt with gofmt

* c&p failure: removed fixpath() call.

* missing include

* fixed includes in linux variant

* Fix for Linux. Fd() is required on File interface

* done gofmt
2016-08-15 21:59:13 +02:00
Alexander Neumann
1fe8deeb6e Use new URI syntax in documentation 2016-08-11 20:37:49 +02:00
Alexander Neumann
fa4570bde8 Always use forward slashes in file names 2016-08-11 19:41:47 +02:00
Alexander Neumann
f6c2787d80 Use the platform-independent function for joining 2016-08-11 19:37:22 +02:00
Alexander Neumann
4b8b625b90 Merge pull request #562 from damekr/implement-gomaxprocs-env
Issue-535: restic respect GOMAXPROCS env variable depending on go version
2016-08-11 19:09:54 +02:00
damekr
be00d91967 Respect GOMAXPROCS variable
Closes #535
2016-08-08 21:37:20 +02:00
Alexander Neumann
e4a9905d6f Merge pull request #563 from restic/fix-build-script
Invert go version test for ldflags
2016-08-04 19:37:30 +02:00
Alexander Neumann
68ec29e7ec Invert go version test for ldflags 2016-08-03 22:04:03 +02:00
Alexander Neumann
d860ce0570 Merge pull request #559 from vrischmann/master
Fix the debug environment variable name in the manual
2016-08-02 22:12:18 +02:00
Alexander Neumann
fc9b27c533 Revert "Fix TestCreateSnapshot, do not generate duplicate data"
This reverts commit 628fb0fb72.
2016-08-02 22:11:55 +02:00
Vincent Rischmann
d4a9b546c1 Fix the debug environment variable name in the manual 2016-08-01 22:23:42 +02:00
Alexander Neumann
628fb0fb72 Fix TestCreateSnapshot, do not generate duplicate data 2016-08-01 22:01:34 +02:00
Alexander Neumann
2de233fe8b Merge pull request #558 from vrischmann/master
Detect a devel version correctly in LDFlags()
2016-08-01 21:49:20 +02:00
Vincent Rischmann
d2834b61fb Detect a devel version correctly in LDFlags() 2016-08-01 20:47:33 +02:00
646 changed files with 55601 additions and 26561 deletions

1
.envrc Normal file
View File

@@ -0,0 +1 @@
GOPATH=$PWD:$PWD/vendor

View File

@@ -2,23 +2,38 @@ language: go
sudo: false
go:
- 1.3.3
- 1.4.3
- 1.5.4
- 1.6.2
- 1.7.5
- 1.8
- tip
os:
- linux
- osx
env:
matrix:
RESTIC_TEST_FUSE=0
matrix:
exclude:
- os: osx
go: 1.3.3
go: 1.7.5
- os: osx
go: 1.4.3
- os: osx
go: 1.5.4
go: tip
- os: linux
go: 1.8
include:
- os: linux
go: 1.8
sudo: true
env:
RESTIC_TEST_FUSE=1
allow_failures:
- go: tip
branches:
only:
- master
notifications:
irc:
@@ -33,10 +48,9 @@ install:
- export GOBIN="$GOPATH/bin"
- export PATH="$PATH:$GOBIN"
- go env
- ulimit -n 2048
script:
- go run run_integration_tests.go
after_success:
- GOPATH=$PWD:$PWD/vendor goveralls -coverprofile=all.cov -service=travis-ci -repotoken "$COVERALLS_TOKEN"
- bash <(curl -s https://codecov.io/bash) -f all.cov

View File

@@ -3,7 +3,10 @@ This document describes the way you can contribute to the restic project.
Ways to Help Out
================
Thank you for your contribution!
Thank you for your contribution! Please **open an issue first** (or add a
comment to an existing issue) if you plan to work on any code or add a new
feature. This way, duplicate work is prevented and we can discuss your ideas
and design first.
There are several ways you can help us out. First of all code contributions and
bug fixes are most welcome. However even "minor" details as fixing spelling
@@ -74,7 +77,7 @@ Just clone the repository, `cd` to it and run `gb build` to build the binary:
[...]
$ bin/restic version
restic compiled manually
compiled at unknown time with go1.6
compiled at unknown time with go1.7
The following commands can be used to run all the tests:
@@ -83,7 +86,7 @@ The following commands can be used to run all the tests:
[...]
If you want to run your tests on Linux, OpenBSD or FreeBSD, you can use
[vagrant](https://www.vagrantup.com/) with the proveded `Vagrantfile` to
[vagrant](https://www.vagrantup.com/) with the provided `Vagrantfile` to
quickly set up VMs and run the tests, e.g.:
$ vagrant up freebsd
@@ -92,6 +95,16 @@ quickly set up VMs and run the tests, e.g.:
$ vagrant ssh freebsd -c 'cd restic/restic; go test -v ./...'
[...]
The default `go` tool can also be used by setting the environment variable
`GOPATH` to the following value while being in the top level directory in the
git repository:
$ export GOPATH=$PWD:$PWD/vendor
The file `.envrc` allows automatic `GOPATH` configuration with
[direnv](https://direnv.net/), inspect the file and then allow automatic
configuration by running `direnv allow`.
Providing Patches
=================

View File

@@ -14,11 +14,11 @@
# docker run --rm -v $PWD:/home/travis/restic restic/test gb test -v ./backend
#
# build the image for an older version of Go:
# docker build --build-arg GOVERSION=1.3.3 -t restic/test:go1.3.3 .
# docker build --build-arg GOVERSION=1.6.4 -t restic/test:go1.6.4 .
FROM ubuntu:14.04
ARG GOVERSION=1.6
ARG GOVERSION=1.7.5
ARG GOARCH=amd64
# install dependencies
@@ -45,7 +45,6 @@ RUN mkdir -p $HOME/restic
# pre-install tools, this speeds up running the tests itself
RUN go get github.com/constabulary/gb/...
RUN go get golang.org/x/tools/cmd/cover
RUN go get github.com/mattn/goveralls
RUN go get github.com/mitchellh/gox
RUN go get github.com/pierrre/gotestcover
RUN mkdir $HOME/bin \

View File

@@ -2,7 +2,6 @@
[![Build Status](https://travis-ci.org/restic/restic.svg?branch=master)](https://travis-ci.org/restic/restic)
[![Build status](https://ci.appveyor.com/api/projects/status/nuy4lfbgfbytw92q/branch/master?svg=true)](https://ci.appveyor.com/project/fd0/restic/branch/master)
[![Report Card](http://goreportcard.com/badge/github.com/restic/restic)](http://goreportcard.com/report/github.com/restic/restic)
[![Coverage Status](https://coveralls.io/repos/restic/restic/badge.svg)](https://coveralls.io/r/restic/restic)
Introduction
@@ -12,17 +11,30 @@ restic is a backup program that is fast, efficient and secure. Detailed
information can be found in [the documentation](doc/index.md) and [the user
manual](doc/Manual.md). The [design document](doc/Design.md) lists the
technical background and gives detailed information about the structure of the
repository and the data saved therein.
repository and the data saved therein. The file [FAQ.md](doc/FAQ.md) lists the
most frequently asked questions.
The latest documentation can be viewed online at
<https://restic.readthedocs.io/en/latest>. On the bottom left corner there is
a menu that allows switching to the documentation and user manual for the
latest released version.
News
====
You can follow the restic project on Twitter
[@resticbackup](https://twitter.com/resticbackup) or by subscribing to the
[development blog](https://restic.github.io/blog/).
Install restic
==============
You can download the latest pre-compiled binary from the [restic release page](https://github.com/restic/restic/releases/latest).
Build restic
============
Install Go/Golang (at least version 1.3), then run `go run build.go`,
Install Go/Golang (at least version 1.7), then run `go run build.go`,
afterwards you'll find the binary in the current directory:
$ go run build.go
@@ -32,6 +44,16 @@ afterwards you'll find the binary in the current directory:
restic [OPTIONS] <command>
[...]
You can easily cross-compile restic for all supported platforms, just supply
the target OS and platform via the command-line options like this (for Windows
and FreeBSD respectively):
$ go run build.go --goos windows --goarch amd64
$ go run build.go --goos freebsd --goarch 386
The resulting binary is statically linked and does not require any libraries.
More documentation can be found in the [user manual](doc/Manual.md).
At the moment, the only tested compiler for restic is the official Go compiler.
@@ -40,11 +62,15 @@ Building restic with gccgo may work, but is not supported.
Contribute and Documentation
============================
Contributions are welcome! More information can be found in
[`CONTRIBUTING.md`](CONTRIBUTING.md). A document describing the design of
Contributions are welcome! Please **open an issue first** (or add a comment to
an existing issue) if you plan to work on any code or add a new feature. This
way, duplicate work is prevented and we can discuss your ideas and design
first.
More information and a description of the development environment can be found
in [`CONTRIBUTING.md`](CONTRIBUTING.md). A document describing the design of
restic and the data structures stored on the back end is contained in
[`doc/Design.md`](doc/Design.md).
The development environment is described in [`CONTRIBUTING.md`](CONTRIBUTING.md).
If you'd like to start contributing to restic, but don't know exactly what do
to, have a look at this great article by Dave Cheney:
@@ -69,7 +95,6 @@ alexander@bumpern.de. If possible, please encrypt your email using the following
pub 4096R/91A6868BD3F7A907 2014-11-01
Key fingerprint = CF8F 18F2 8445 7597 3F79 D4E1 91A6 868B D3F7 A907
uid Alexander Neumann <alexander@bumpern.de>
uid Alexander Neumann <alexander@debian.org>
sub 4096R/D5FC2ACF4043FDF1 2014-11-01
```

View File

@@ -1 +1 @@
0.2.0
0.5.0

2
Vagrantfile vendored
View File

@@ -1,7 +1,7 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
GO_VERSION = '1.6'
GO_VERSION = '1.7'
def packages_freebsd
return <<-EOF

View File

@@ -3,6 +3,10 @@ clone_folder: c:\restic
environment:
GOPATH: c:\gopath
branches:
only:
- master
init:
- ps: >-
$app = Get-WmiObject -Class Win32_Product -Filter "Vendor = 'http://golang.org'"
@@ -13,8 +17,8 @@ init:
install:
- rmdir c:\go /s /q
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.6.1.windows-amd64.msi
- msiexec /i go1.6.1.windows-amd64.msi /q
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.8.windows-amd64.msi
- msiexec /i go1.8.windows-amd64.msi /q
- go version
- go env
- appveyor DownloadFile http://sourceforge.netcologne.de/project/gnuwin32/tar/1.13-1/tar-1.13-1-bin.zip -FileName tar.zip

View File

@@ -12,7 +12,6 @@ import (
"path/filepath"
"runtime"
"strings"
"time"
)
var (
@@ -21,7 +20,17 @@ var (
runTests bool
)
const timeFormat = "2006-01-02 15:04:05"
var config = struct {
Name string
Namespace string
Main string
Tests []string
}{
Name: "restic", // name of the program executable and directory
Namespace: "", // subdir of GOPATH, e.g. "github.com/foo/bar"
Main: "cmds/restic", // package name for the main package
Tests: []string{"restic/...", "cmds/..."}, // tests to run
}
// specialDir returns true if the file begins with a special character ('.' or '_').
func specialDir(name string) bool {
@@ -96,6 +105,15 @@ func updateGopath(dst, src, prefix string) error {
})
}
func directoryExists(dirname string) bool {
stat, err := os.Stat(dirname)
if err != nil && os.IsNotExist(err) {
return false
}
return stat.IsDir()
}
// copyFile creates dst from src, preserving file attributes and timestamps.
func copyFile(dst, src string) error {
fi, err := os.Stat(src)
@@ -156,6 +174,7 @@ func showUsage(output io.Writer) {
fmt.Fprintf(output, " -t --tags specify additional build tags\n")
fmt.Fprintf(output, " -k --keep-gopath do not remove the GOPATH after build\n")
fmt.Fprintf(output, " -T --test run tests\n")
fmt.Fprintf(output, " -o --output set output file name\n")
fmt.Fprintf(output, " --goos value set GOOS for cross-compilation\n")
fmt.Fprintf(output, " --goarch value set GOARCH for cross-compilation\n")
}
@@ -186,7 +205,7 @@ func cleanEnv() (env []string) {
func build(cwd, goos, goarch, gopath string, args ...string) error {
args = append([]string{"build"}, args...)
cmd := exec.Command("go", args...)
cmd.Env = append(cleanEnv(), "GOPATH="+gopath, "GOARCH="+goarch, "GOOS="+goos)
cmd.Env = append(cleanEnv(), "GOPATH="+gopath, "GOARCH="+goarch, "GOOS="+goos, "CGO_ENABLED=0")
cmd.Dir = cwd
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
@@ -264,21 +283,20 @@ type Constants map[string]string
func (cs Constants) LDFlags() string {
l := make([]string, 0, len(cs))
v := runtime.Version()
if strings.HasPrefix(v, "go1.5") || strings.HasPrefix(v, "go1.6") || strings.HasPrefix(v, "go1.7") {
for k, v := range cs {
l = append(l, fmt.Sprintf(`-X "%s=%s"`, k, v))
}
} else {
for k, v := range cs {
l = append(l, fmt.Sprintf(`-X %q %q`, k, v))
}
for k, v := range cs {
l = append(l, fmt.Sprintf(`-X "%s=%s"`, k, v))
}
return strings.Join(l, " ")
}
func main() {
ver := runtime.Version()
if strings.HasPrefix(ver, "go1") && ver < "go1.7" {
fmt.Fprintf(os.Stderr, "Go version %s detected, restic requires at least Go 1.7\n", ver)
os.Exit(1)
}
buildTags := []string{}
skipNext := false
@@ -287,6 +305,8 @@ func main() {
targetGOOS := runtime.GOOS
targetGOARCH := runtime.GOARCH
var outputFilename string
for i, arg := range params {
if skipNext {
skipNext = false
@@ -299,8 +319,14 @@ func main() {
case "-k", "--keep-gopath":
keepGopath = true
case "-t", "-tags", "--tags":
if i+1 >= len(params) {
die("-t given but no tag specified")
}
skipNext = true
buildTags = strings.Split(params[i+1], " ")
case "-o", "--output":
skipNext = true
outputFilename = params[i+1]
case "-T", "--test":
runTests = true
case "--goos":
@@ -335,23 +361,21 @@ func main() {
die("Getwd(): %v\n", err)
}
gopath, err := ioutil.TempDir("", "restic-build-")
gopath, err := ioutil.TempDir("", fmt.Sprintf("%v-build-", config.Name))
if err != nil {
die("TempDir(): %v\n", err)
}
verbosePrintf("create GOPATH at %v\n", gopath)
if err = updateGopath(gopath, filepath.Join(root, "src", "restic"), "restic"); err != nil {
die("copying files from %v/src/restic to %v/src/restic failed: %v\n", root, gopath, err)
}
if err = updateGopath(gopath, filepath.Join(root, "src", "cmds"), "cmds"); err != nil {
die("copying files from %v/src/cmds to %v/src/restic/cmds failed: %v\n", root, gopath, err)
if err = updateGopath(gopath, filepath.Join(root, "src"), config.Namespace); err != nil {
die("copying files from %v/src to %v/src failed: %v\n", root, gopath, err)
}
vendor := filepath.Join(root, "vendor", "src")
if err = updateGopath(gopath, vendor, ""); err != nil {
die("copying files from %v to %v/src failed: %v\n", vendor, gopath, err)
if directoryExists(vendor) {
if err = updateGopath(gopath, vendor, ""); err != nil {
die("copying files from %v to %v failed: %v\n", root, gopath, err)
}
}
defer func() {
@@ -365,9 +389,11 @@ func main() {
}
}()
outputFilename := "restic"
if targetGOOS == "windows" {
outputFilename = "restic.exe"
if outputFilename == "" {
outputFilename = config.Name
if targetGOOS == "windows" {
outputFilename += ".exe"
}
}
cwd, err := os.Getwd()
@@ -377,8 +403,7 @@ func main() {
output := filepath.Join(cwd, outputFilename)
version := getVersion()
compileTime := time.Now().Format(timeFormat)
constants := Constants{`main.compiledAt`: compileTime}
constants := Constants{}
if version != "" {
constants["main.version"] = version
}
@@ -388,7 +413,7 @@ func main() {
args := []string{
"-tags", strings.Join(buildTags, " "),
"-ldflags", ldflags,
"-o", output, "cmds/restic",
"-o", output, config.Main,
}
err = build(filepath.Join(gopath, "src"), targetGOOS, targetGOARCH, gopath, args...)
@@ -399,7 +424,7 @@ func main() {
if runTests {
verbosePrintf("running tests\n")
err = test(filepath.Join(gopath, "src"), gopath, "restic/...")
err = test(cwd, gopath, config.Tests...)
if err != nil {
die("running tests failed: %v\n", err)
}

64
build_release_binaries.sh Executable file
View File

@@ -0,0 +1,64 @@
#!/bin/bash
set -e
if [[ -z "$VERSION" ]]; then
echo '$VERSION unset'
exit 1
fi
dir=$(mktemp -d --tmpdir restic-release-XXXXXX)
echo "path is ${dir}"
for R in \
darwin/386 \
darwin/amd64 \
freebsd/386 \
freebsd/amd64 \
freebsd/arm \
linux/386 \
linux/amd64 \
linux/arm \
linux/arm64 \
openbsd/386 \
openbsd/amd64 \
windows/386 \
windows/amd64 \
; do \
OS=$(dirname $R)
ARCH=$(basename $R)
filename=restic_${VERSION}_${OS}_${ARCH}
if [[ "$OS" == "windows" ]]; then
filename="${filename}.exe"
fi
echo $filename
go run build.go --goos $OS --goarch $ARCH --output ${filename}
if [[ "$OS" == "windows" ]]; then
zip ${filename%.exe}.zip ${filename}
rm ${filename}
mv ${filename%.exe}.zip ${dir}
else
bzip2 ${filename}
mv ${filename}.bz2 ${dir}
fi
done
echo "packing sources"
git archive --format=tar --prefix=restic-$VERSION/ v$VERSION | gzip -n > restic-$VERSION.tar.gz
mv restic-$VERSION.tar.gz ${dir}
echo "creating checksums"
pushd ${dir}
sha256sum restic_*.{zip,bz2} restic-$VERSION.tar.gz > SHA256SUMS
gpg --armor --detach-sign SHA256SUMS
popd
echo "creating source signature file"
gpg --armor --detach-sign ${dir}/restic-$VERSION.tar.gz
echo
echo "done, path is ${dir}"

View File

@@ -62,11 +62,13 @@ overhead is 32 bytes. For each file, a new random IV is selected.
The file `config` is encrypted this way and contains a JSON document like the
following:
{
"version": 1,
"id": "5956a3f67a6230d4a92cefb29529f10196c7d92582ec305fd71ff6d331d6271b",
"chunker_polynomial": "25b468838dcb75"
}
```json
{
"version": 1,
"id": "5956a3f67a6230d4a92cefb29529f10196c7d92582ec305fd71ff6d331d6271b",
"chunker_polynomial": "25b468838dcb75"
}
```
After decryption, restic first checks that the version field contains a version
number that it understands, otherwise it aborts. At the moment, the version is
@@ -102,7 +104,9 @@ The basic layout of a sample restic repository is shown here:
A repository can be initialized with the `restic init` command, e.g.:
$ restic -r /tmp/restic-repo init
```console
$ restic -r /tmp/restic-repo init
```
Pack Format
-----------
@@ -163,35 +167,37 @@ used to reconstruct the index. The files are encrypted and authenticated like
Data and Tree Blobs, so the outer structure is `IV || Ciphertext || MAC` again.
The plaintext consists of a JSON document like the following:
```json
{
"supersedes": [
"ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452"
],
"packs": [
{
"supersedes": [
"ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452"
],
"packs": [
"id": "73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c",
"blobs": [
{
"id": "73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c",
"blobs": [
{
"id": "3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce",
"type": "data",
"offset": 0,
"length": 25
},{
"id": "9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae",
"type": "tree",
"offset": 38,
"length": 100
},
{
"id": "d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66",
"type": "data",
"offset": 150,
"length": 123
}
]
}, [...]
"id": "3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce",
"type": "data",
"offset": 0,
"length": 25
},{
"id": "9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae",
"type": "tree",
"offset": 38,
"length": 100
},
{
"id": "d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66",
"type": "data",
"offset": 150,
"length": 123
}
]
}
}, [...]
]
}
```
This JSON document lists Packs and the blobs contained therein. In this
example, the Pack `73d04e61` contains two data Blobs and one Tree blob, the
@@ -258,14 +264,16 @@ document which contains the master encryption and message authentication keys
for this repository (encoded in Base64). The command `restic cat masterkey` can
be used as follows to decrypt and pretty-print the master key:
$ restic -r /tmp/restic-repo cat masterkey
{
"mac": {
"k": "evFWd9wWlndL9jc501268g==",
"r": "E9eEDnSJZgqwTOkDtOp+Dw=="
},
"encrypt": "UQCqa0lKZ94PygPxMRqkePTZnHRYh1k1pX2k2lM2v3Q=",
}
```console
$ restic -r /tmp/restic-repo cat masterkey
{
"mac": {
"k": "evFWd9wWlndL9jc501268g==",
"r": "E9eEDnSJZgqwTOkDtOp+Dw=="
},
"encrypt": "UQCqa0lKZ94PygPxMRqkePTZnHRYh1k1pX2k2lM2v3Q=",
}
```
All data in the repository is encrypted and authenticated with these master keys.
For encryption, the AES-256 algorithm in Counter mode is used. For message
@@ -277,7 +285,7 @@ This way, the password can be changed without having to re-encrypt all data.
Snapshots
---------
A snapshots represents a directory with all files and sub-directories at a
A snapshot represents a directory with all files and sub-directories at a
given point in time. For each backup that is made, a new snapshot is created. A
snapshot is a JSON document that is stored in an encrypted file below the
directory `snapshots` in the repository. The filename is the storage ID. This
@@ -286,20 +294,52 @@ string is unique and used within restic to uniquely identify a snapshot.
The command `restic cat snapshot` can be used as follows to decrypt and
pretty-print the contents of a snapshot file:
$ restic -r /tmp/restic-repo cat snapshot 22a5af1b
enter password for repository:
{
"time": "2015-01-02T18:10:50.895208559+01:00",
"tree": "2da81727b6585232894cfbb8f8bdab8d1eccd3d8f7c92bc934d62e62e618ffdf",
"dir": "/tmp/testdata",
"hostname": "kasimir",
"username": "fd0",
"uid": 1000,
"gid": 100
}
```console
$ restic -r /tmp/restic-repo cat snapshot 251c2e58
enter password for repository:
{
"time": "2015-01-02T18:10:50.895208559+01:00",
"tree": "2da81727b6585232894cfbb8f8bdab8d1eccd3d8f7c92bc934d62e62e618ffdf",
"dir": "/tmp/testdata",
"hostname": "kasimir",
"username": "fd0",
"uid": 1000,
"gid": 100,
"tags": [
"NL"
]
}
```
Here it can be seen that this snapshot represents the contents of the directory
`/tmp/testdata`. The most important field is `tree`.
`/tmp/testdata`. The most important field is `tree`. When the meta data (e.g.
the tags) of a snapshot change, the snapshot needs to be re-encrypted and saved.
This will change the storage ID, so in order to relate these seemingly
different snapshots, a field `original` is introduced which contains the ID of
the original snapshot, e.g. after adding the tag `DE` to the snapshot above it
becomes:
```console
$ restic -r /tmp/restic-repo cat snapshot 22a5af1b
enter password for repository:
{
"time": "2015-01-02T18:10:50.895208559+01:00",
"tree": "2da81727b6585232894cfbb8f8bdab8d1eccd3d8f7c92bc934d62e62e618ffdf",
"dir": "/tmp/testdata",
"hostname": "kasimir",
"username": "fd0",
"uid": 1000,
"gid": 100,
"tags": [
"NL",
"DE"
],
"original": "251c2e5841355f743f9d4ffd3260bee765acee40a6229857e32b60446991b837"
}
```
Once introduced, the `original` field is not modified when the snapshot's meta
data is changed again.
All content within a restic repository is referenced according to its SHA-256
hash. Before saving, each file is split into variable sized Blobs of data. The
@@ -317,70 +357,77 @@ A snapshot references a tree by the SHA-256 hash of the JSON string
representation of its contents. Trees and data are saved in pack files in a
subdirectory of the directory `data`.
The command `restic cat tree` can be used to inspect the tree referenced above:
The command `restic cat blob` can be used to inspect the tree referenced above
(piping the output of the command to `jq .` so that the JSON is indented):
$ restic -r /tmp/restic-repo cat tree b8138ab08a4722596ac89c917827358da4672eac68e3c03a8115b88dbf4bfb59
enter password for repository:
```console
$ restic -r /tmp/restic-repo cat blob b8138ab08a4722596ac89c917827358da4672eac68e3c03a8115b88dbf4bfb59 | jq .
enter password for repository:
{
"nodes": [
{
"nodes": [
{
"name": "testdata",
"type": "dir",
"mode": 493,
"mtime": "2014-12-22T14:47:59.912418701+01:00",
"atime": "2014-12-06T17:49:21.748468803+01:00",
"ctime": "2014-12-22T14:47:59.912418701+01:00",
"uid": 1000,
"gid": 100,
"user": "fd0",
"inode": 409704562,
"content": null,
"subtree": "b26e315b0988ddcd1cee64c351d13a100fedbc9fdbb144a67d1b765ab280b4dc"
}
]
"name": "testdata",
"type": "dir",
"mode": 493,
"mtime": "2014-12-22T14:47:59.912418701+01:00",
"atime": "2014-12-06T17:49:21.748468803+01:00",
"ctime": "2014-12-22T14:47:59.912418701+01:00",
"uid": 1000,
"gid": 100,
"user": "fd0",
"inode": 409704562,
"content": null,
"subtree": "b26e315b0988ddcd1cee64c351d13a100fedbc9fdbb144a67d1b765ab280b4dc"
}
]
}
```
A tree contains a list of entries (in the field `nodes`) which contain meta
data like a name and timestamps. When the entry references a directory, the
field `subtree` contains the plain text ID of another tree object.
When the command `restic cat tree` is used, the storage hash is needed to print
When the command `restic cat blob` is used, the plaintext ID is needed to print
a tree. The tree referenced above can be dumped as follows:
$ restic -r /tmp/restic-repo cat tree 8b238c8811cc362693e91a857460c78d3acf7d9edb2f111048691976803cf16e
enter password for repository:
```console
$ restic -r /tmp/restic-repo cat blob 8b238c8811cc362693e91a857460c78d3acf7d9edb2f111048691976803cf16e
enter password for repository:
{
"nodes": [
{
"nodes": [
{
"name": "testfile",
"type": "file",
"mode": 420,
"mtime": "2014-12-06T17:50:23.34513538+01:00",
"atime": "2014-12-06T17:50:23.338468713+01:00",
"ctime": "2014-12-06T17:50:23.34513538+01:00",
"uid": 1000,
"gid": 100,
"user": "fd0",
"inode": 416863351,
"size": 1234,
"links": 1,
"content": [
"50f77b3b4291e8411a027b9f9b9e64658181cc676ce6ba9958b95f268cb1109d"
]
},
[...]
"name": "testfile",
"type": "file",
"mode": 420,
"mtime": "2014-12-06T17:50:23.34513538+01:00",
"atime": "2014-12-06T17:50:23.338468713+01:00",
"ctime": "2014-12-06T17:50:23.34513538+01:00",
"uid": 1000,
"gid": 100,
"user": "fd0",
"inode": 416863351,
"size": 1234,
"links": 1,
"content": [
"50f77b3b4291e8411a027b9f9b9e64658181cc676ce6ba9958b95f268cb1109d"
]
}
},
[...]
]
}
```
This tree contains a file entry. This time, the `subtree` field is not present
and the `content` field contains a list with one plain text SHA-256 hash.
The command `restic cat data` can be used to extract and decrypt data given a
plaintext ID, e.g. for the data mentioned above:
The command `restic cat blob` can also be used to extract and decrypt data
given a plaintext ID, e.g. for the data mentioned above:
$ restic -r /tmp/restic-repo cat blob 50f77b3b4291e8411a027b9f9b9e64658181cc676ce6ba9958b95f268cb1109d | sha256sum
enter password for repository:
50f77b3b4291e8411a027b9f9b9e64658181cc676ce6ba9958b95f268cb1109d -
```console
$ restic -r /tmp/restic-repo cat blob 50f77b3b4291e8411a027b9f9b9e64658181cc676ce6ba9958b95f268cb1109d | sha256sum
enter password for repository:
50f77b3b4291e8411a027b9f9b9e64658181cc676ce6ba9958b95f268cb1109d -
```
As can be seen from the output of the program `sha256sum`, the hash matches the
plaintext hash from the map included in the tree above, so the correct data has
@@ -404,15 +451,17 @@ A lock is a file in the subdir `locks` whose filename is the storage ID of
the contents. It is encrypted and authenticated the same way as other files
in the repository and contains the following JSON structure:
{
"time": "2015-06-27T12:18:51.759239612+02:00",
"exclusive": false,
"hostname": "kasimir",
"username": "fd0",
"pid": 13607,
"uid": 1000,
"gid": 100
}
```json
{
"time": "2015-06-27T12:18:51.759239612+02:00",
"exclusive": false,
"hostname": "kasimir",
"username": "fd0",
"pid": 13607,
"uid": 1000,
"gid": 100
}
```
The field `exclusive` defines the type of lock. When a new lock is to be
created, restic checks all locks in the repository. When a lock is found, it

20
doc/FAQ.md Normal file
View File

@@ -0,0 +1,20 @@
FAQ
===
This is the list of Frequently Asked Questions for restic.
`restic check` reports packs that aren't referenced in any index, is my repository broken?
------------------------------------------------------------------------------------------
When `restic check` reports that there are pack files in the repository that are not referenced in any index, that's (in contrast to what restic reports at the moment) not a source for concern. The output looks like this:
$ restic check
Create exclusive lock for repository
Load indexes
Check all packs
pack 819a9a52e4f51230afa89aefbf90df37fb70996337ae57e6f7a822959206a85e: not referenced in any index
pack de299e69fb075354a3775b6b045d152387201f1cdc229c31d1caa34c3b340141: not referenced in any index
Check snapshots, trees and blobs
Fatal: repository contains errors
The message means that there is more data stored in the repo than strictly necessary. With high probability this is duplicate data. In order to clean it up, the command `restic prune` can be used. The cause of this bug is not yet known.

File diff suppressed because it is too large Load Diff

View File

@@ -6,6 +6,19 @@ following values are valid for `{type}`: `data`, `keys`, `locks`, `snapshots`,
`index`, `config`. `{path}` is a path to the repository, so that multiple
different repositories can be accessed. The default path is `/`.
## POST {path}?create=true
This request is used to initially create a new repository. The server responds
with "200 OK" if the repository structure was created successfully or already
exists, otherwise an error is returned.
## DELETE {path}
Deletes the repository on the server side. The server responds with "200 OK" if
the repository was successfully removed. If this function is not implemented
the server returns "501 Not Implemented", if this it is denied by the server it
returns "403 Forbidden".
## HEAD {path}/config
Returns "200 OK" if the repository has a configuration,

124
doc/code.css Normal file
View File

@@ -0,0 +1,124 @@
code {
font-size: 90%;
}
/* based on https://github.com/mkdocs/mkdocs/issues/1019 */
.codehilite code, .codehilite pre {
color:#3F3F3F;background-color:#F7F7F7;
overflow: auto;
box-sizing: border-box;
padding: 0.01em 4px;
padding-top: 0.01em;
padding-right-value: 4px;
padding-bottom: 0.01em;
padding-left-value: 4px;
padding-left-ltr-source: physical;
padding-left-rtl-source: physical;
padding-right-ltr-source: physical;
padding-right-rtl-source: physical;
border-radius: 4px !important;
border-top-left-radius: 4px;
border-top-right-radius: 4px;
border-bottom-right-radius: 4px;
border-bottom-left-radius: 4px;
border: 1px solid #CCC !important;
border-top-width: 1px;
border-right-width-value: 1px;
border-right-width-ltr-source: physical;
border-right-width-rtl-source: physical;
border-bottom-width: 1px;
border-left-width-value: 1px;
border-left-width-ltr-source: physical;
border-left-width-rtl-source: physical;
border-top-style: solid;
border-right-style-value: solid;
border-right-style-ltr-source: physical;
border-right-style-rtl-source: physical;
border-bottom-style: solid;
border-left-style-value: solid;
border-left-style-ltr-source: physical;
border-left-style-rtl-source: physical;
border-top-color: #CCC;
border-right-color-value: #CCC;
border-right-color-ltr-source: physical;
border-right-color-rtl-source: physical;
border-bottom-color: #CCC;
border-left-color-value: #CCC;
border-left-color-ltr-source: physical;
border-left-color-rtl-source: physical;
-moz-border-top-colors: none;
-moz-border-right-colors: none;
-moz-border-bottom-colors: none;
-moz-border-left-colors: none;
border-image-source: none;
border-image-slice: 100% 100% 100% 100%;
border-image-width: 1 1 1 1;
border-image-outset: 0 0 0 0;
border-image-repeat: stretch stretch;
}
.codehilite .hll { background-color: #ffffcc }
.codehilite .c { color: #999988; font-style: italic } /* Comment */
.codehilite .err { color: #a61717; background-color: #e3d2d2 } /* Error */
.codehilite .k { color: #000000; font-weight: bold } /* Keyword */
.codehilite .o { color: #000000; font-weight: bold } /* Operator */
.codehilite .cm { color: #999988; font-style: italic } /* Comment.Multiline */
.codehilite .cp { color: #999999; font-weight: bold; font-style: italic } /* Comment.Preproc */
.codehilite .c1 { color: #999988; font-style: italic } /* Comment.Single */
.codehilite .cs { color: #999999; font-weight: bold; font-style: italic } /* Comment.Special */
.codehilite .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */
.codehilite .ge { color: #000000; font-style: italic } /* Generic.Emph */
.codehilite .gr { color: #aa0000 } /* Generic.Error */
.codehilite .gh { color: #999999 } /* Generic.Heading */
.codehilite .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */
.codehilite .go { color: #888888 } /* Generic.Output */
.codehilite .gp { color: #555555 } /* Generic.Prompt */
.codehilite .gs { font-weight: bold } /* Generic.Strong */
.codehilite .gu { color: #aaaaaa } /* Generic.Subheading */
.codehilite .gt { color: #aa0000 } /* Generic.Traceback */
.codehilite .kc { color: #000000; font-weight: bold } /* Keyword.Constant */
.codehilite .kd { color: #000000; font-weight: bold } /* Keyword.Declaration */
.codehilite .kn { color: #000000; font-weight: bold } /* Keyword.Namespace */
.codehilite .kp { color: #000000; font-weight: bold } /* Keyword.Pseudo */
.codehilite .kr { color: #000000; font-weight: bold } /* Keyword.Reserved */
.codehilite .kt { color: #445588; font-weight: bold } /* Keyword.Type */
.codehilite .m { color: #009999 } /* Literal.Number */
.codehilite .s { color: #d01040 } /* Literal.String */
.codehilite .na { color: #008080 } /* Name.Attribute */
.codehilite .nb { color: #0086B3 } /* Name.Builtin */
.codehilite .nc { color: #445588; font-weight: bold } /* Name.Class */
.codehilite .no { color: #008080 } /* Name.Constant */
.codehilite .nd { color: #3c5d5d; font-weight: bold } /* Name.Decorator */
.codehilite .ni { color: #800080 } /* Name.Entity */
.codehilite .ne { color: #990000; font-weight: bold } /* Name.Exception */
.codehilite .nf { color: #990000; font-weight: bold } /* Name.Function */
.codehilite .nl { color: #990000; font-weight: bold } /* Name.Label */
.codehilite .nn { color: #555555 } /* Name.Namespace */
.codehilite .nt { color: #000080 } /* Name.Tag */
.codehilite .nv { color: #008080 } /* Name.Variable */
.codehilite .ow { color: #000000; font-weight: bold } /* Operator.Word */
.codehilite .w { color: #bbbbbb } /* Text.Whitespace */
.codehilite .mf { color: #009999 } /* Literal.Number.Float */
.codehilite .mh { color: #009999 } /* Literal.Number.Hex */
.codehilite .mi { color: #009999 } /* Literal.Number.Integer */
.codehilite .mo { color: #009999 } /* Literal.Number.Oct */
.codehilite .sb { color: #d01040 } /* Literal.String.Backtick */
.codehilite .sc { color: #d01040 } /* Literal.String.Char */
.codehilite .sd { color: #d01040 } /* Literal.String.Doc */
.codehilite .s2 { color: #d01040 } /* Literal.String.Double */
.codehilite .se { color: #d01040 } /* Literal.String.Escape */
.codehilite .sh { color: #d01040 } /* Literal.String.Heredoc */
.codehilite .si { color: #d01040 } /* Literal.String.Interpol */
.codehilite .sx { color: #d01040 } /* Literal.String.Other */
.codehilite .sr { color: #009926 } /* Literal.String.Regex */
.codehilite .s1 { color: #d01040 } /* Literal.String.Single */
.codehilite .ss { color: #990073 } /* Literal.String.Symbol */
.codehilite .bp { color: #999999 } /* Name.Builtin.Pseudo */
.codehilite .vc { color: #008080 } /* Name.Variable.Class */
.codehilite .vg { color: #008080 } /* Name.Variable.Global */
.codehilite .vi { color: #008080 } /* Name.Variable.Instance */
.codehilite .il { color: #009999 } /* Literal.Number.Integer.Long */

View File

@@ -20,10 +20,12 @@ this page, where you can select the version.
The restic documentation is built with [MkDocs](http://www.mkdocs.org). After
installing it, you can edit and view the documentation locally by running:
$ mkdocs serve
INFO - Building documentation...
INFO - Cleaning site directory
[I 160221 12:33:57 server:271] Serving on http://127.0.0.1:8000
```console
$ mkdocs serve
INFO - Building documentation...
INFO - Cleaning site directory
[I 160221 12:33:57 server:271] Serving on http://127.0.0.1:8000
```
Afterwards visit the URL with a browser.

View File

@@ -1,5 +1,9 @@
site_name: Documentation for restic
theme: readthedocs
markdown_extensions:
- codehilite:
extra_css:
- code.css
docs_dir: doc
pages:
- Getting Started: index.md

View File

@@ -3,7 +3,9 @@
package main
import (
"bufio"
"bytes"
"errors"
"flag"
"fmt"
"io"
@@ -17,6 +19,12 @@ import (
"strings"
)
// ForbiddenImports are the packages from the stdlib that should not be used in
// our code.
var ForbiddenImports = map[string]bool{
"errors": true,
}
var runCrossCompile = flag.Bool("cross-compile", true, "run cross compilation tests")
var minioServer = flag.String("minio", "", "path to the minio server binary")
var debug = flag.Bool("debug", false, "output debug messages")
@@ -142,7 +150,6 @@ func (env *TravisEnvironment) Prepare() error {
for _, pkg := range []string{
"golang.org/x/tools/cmd/cover",
"github.com/mattn/goveralls",
"github.com/pierrre/gotestcover",
} {
err := run("go", "get", pkg)
@@ -158,17 +165,7 @@ func (env *TravisEnvironment) Prepare() error {
return err
}
if runtime.GOOS == "darwin" {
// install the libraries necessary for fuse
if err := run("brew", "update"); err != nil {
return err
}
if err := run("brew", "cask", "install", "osxfuse"); err != nil {
return err
}
}
if *runCrossCompile {
if *runCrossCompile && !(runtime.Version() < "go1.7") {
// only test cross compilation on linux with Travis
if err := run("go", "get", "github.com/mitchellh/gox"); err != nil {
return err
@@ -179,7 +176,7 @@ func (env *TravisEnvironment) Prepare() error {
"windows/386", "windows/amd64",
"darwin/386", "darwin/amd64",
"freebsd/386", "freebsd/amd64",
"opendbsd/386", "opendbsd/amd64",
"openbsd/386", "openbsd/amd64",
}
if !strings.HasPrefix(runtime.Version(), "go1.3") {
env.goxOSArch = append(env.goxOSArch,
@@ -191,8 +188,7 @@ func (env *TravisEnvironment) Prepare() error {
msg("gox: OS/ARCH %v\n", env.goxOSArch)
v := runtime.Version()
if !strings.HasPrefix(v, "go1.5") && !strings.HasPrefix(v, "go1.6") {
if runtime.Version() < "go1.5" {
err := run("gox", "-build-toolchain",
"-osarch", strings.Join(env.goxOSArch, " "))
@@ -305,8 +301,8 @@ func StartBackgroundCommand(env map[string]string, cmd string, args ...string) (
// RunTests starts the tests for Travis.
func (env *TravisEnvironment) RunTests() error {
// run fuse tests on darwin
if runtime.GOOS != "darwin" {
// do not run fuse tests on darwin
if runtime.GOOS == "darwin" {
msg("skip fuse integration tests on %v\n", runtime.GOOS)
os.Setenv("RESTIC_TEST_FUSE", "0")
}
@@ -318,14 +314,17 @@ func (env *TravisEnvironment) RunTests() error {
env.env["GOPATH"] = cwd + ":" + filepath.Join(cwd, "vendor")
if *runCrossCompile {
if *runCrossCompile && !(runtime.Version() < "go1.7") {
// compile for all target architectures with tags
for _, tags := range []string{"release", "debug"} {
runWithEnv(env.env, "gox", "-verbose",
err := runWithEnv(env.env, "gox", "-verbose",
"-osarch", strings.Join(env.goxOSArch, " "),
"-tags", tags,
"-output", "/tmp/{{.Dir}}_{{.OS}}_{{.Arch}}",
"cmds/restic")
if err != nil {
return err
}
}
}
@@ -340,7 +339,30 @@ func (env *TravisEnvironment) RunTests() error {
return err
}
return runGofmt()
if err = runGofmt(); err != nil {
return err
}
deps, err := findImports()
if err != nil {
return err
}
foundForbiddenImports := false
for name, imports := range deps {
for _, pkg := range imports {
if _, ok := ForbiddenImports[pkg]; ok {
fmt.Fprintf(os.Stderr, "========== package %v imports forbidden package %v\n", name, pkg)
foundForbiddenImports = true
}
}
}
if foundForbiddenImports {
return errors.New("CI: forbidden imports found")
}
return nil
}
// AppveyorEnvironment is the environment on Windows.
@@ -408,6 +430,46 @@ func updateEnv(env []string, override map[string]string) []string {
return newEnv
}
func findImports() (map[string][]string, error) {
res := make(map[string][]string)
cwd, err := os.Getwd()
if err != nil {
return nil, fmt.Errorf("Getwd() returned error: %v", err)
}
gopath := cwd + ":" + filepath.Join(cwd, "vendor")
cmd := exec.Command("go", "list", "-f", `{{.ImportPath}} {{join .Imports " "}}`, "./src/...")
cmd.Env = updateEnv(os.Environ(), map[string]string{"GOPATH": gopath})
cmd.Stderr = os.Stderr
output, err := cmd.Output()
if err != nil {
return nil, err
}
sc := bufio.NewScanner(bytes.NewReader(output))
for sc.Scan() {
wordScanner := bufio.NewScanner(strings.NewReader(sc.Text()))
wordScanner.Split(bufio.ScanWords)
if !wordScanner.Scan() {
return nil, fmt.Errorf("package name not found in line: %s", output)
}
name := wordScanner.Text()
var deps []string
for wordScanner.Scan() {
deps = append(deps, wordScanner.Text())
}
res[name] = deps
}
return res, nil
}
func runGofmt() error {
dir, err := os.Getwd()
if err != nil {

View File

@@ -1,24 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

View File

@@ -1,29 +0,0 @@
# Restic Server
Restic Server is a sample server that implement restic's rest backend api.
It has been developed for demonstration purpose and is not intented to be used in production.
## Getting started
By default the server persists backup data in `/tmp/restic`.
Build and start the server with a custom persistence directory:
```
go build
./restic-server -path /user/home/backup
```
The server use an `.htpasswd` file to specify users. You can create such a file at the root of the persistence directory by executing the following command. In order to append new user to the file, just omit the `-c` argument.
```
htpasswd -s -c .htpasswd username
```
By default the server uses http. This is not very secure since with Basic Authentication, username and passwords will be present in every request. In order to enable TLS support just add the `-tls` argument and add a private and public key at the root of your persistence directory.
Signed certificate are required by the restic backend but if you just want to test the feature you can generate unsigned keys with the following commands:
```
openssl genrsa -out private_key 2048
openssl req -new -x509 -key private_key -out public_key -days 365
```

View File

@@ -1,192 +0,0 @@
// +build go1.4
package main
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"strings"
"time"
)
// Context contains repository meta-data.
type Context struct {
path string
}
// AuthHandler wraps h with a http.HandlerFunc that performs basic
// authentication against the user/passwords pairs stored in f and returns the
// http.HandlerFunc.
func AuthHandler(f *HtpasswdFile, h http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
username, password, ok := r.BasicAuth()
if !ok {
http.Error(w, "401 unauthorized", 401)
return
}
if !f.Validate(username, password) {
http.Error(w, "401 unauthorized", 401)
return
}
h.ServeHTTP(w, r)
}
}
// CheckConfig returns a http.HandlerFunc that checks whether
// a configuration exists.
func CheckConfig(c *Context) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
config := filepath.Join(c.path, "config")
st, err := os.Stat(config)
if err != nil {
http.Error(w, "404 not found", 404)
return
}
w.Header().Add("Content-Length", fmt.Sprint(st.Size()))
}
}
// GetConfig returns a http.HandlerFunc that allows for a
// config to be retrieved.
func GetConfig(c *Context) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
config := filepath.Join(c.path, "config")
bytes, err := ioutil.ReadFile(config)
if err != nil {
http.Error(w, "404 not found", 404)
return
}
w.Write(bytes)
}
}
// SaveConfig returns a http.HandlerFunc that allows for a
// config to be saved.
func SaveConfig(c *Context) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
config := filepath.Join(c.path, "config")
bytes, err := ioutil.ReadAll(r.Body)
if err != nil {
http.Error(w, "400 bad request", 400)
return
}
errw := ioutil.WriteFile(config, bytes, 0600)
if errw != nil {
http.Error(w, "500 internal server error", 500)
return
}
w.Write([]byte("200 ok"))
}
}
// ListBlobs returns a http.HandlerFunc that lists
// all blobs of a given type in an arbitrary order.
func ListBlobs(c *Context) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := strings.Split(r.RequestURI, "/")
dir := vars[1]
path := filepath.Join(c.path, dir)
files, err := ioutil.ReadDir(path)
if err != nil {
http.Error(w, "404 not found", 404)
return
}
names := make([]string, len(files))
for i, f := range files {
names[i] = f.Name()
}
data, err := json.Marshal(names)
if err != nil {
http.Error(w, "500 internal server error", 500)
return
}
w.Write(data)
}
}
// CheckBlob reutrns a http.HandlerFunc that tests whether a blob exists
// and returns 200, if it does, or 404 otherwise.
func CheckBlob(c *Context) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := strings.Split(r.RequestURI, "/")
dir := vars[1]
name := vars[2]
path := filepath.Join(c.path, dir, name)
st, err := os.Stat(path)
if err != nil {
http.Error(w, "404 not found", 404)
return
}
w.Header().Add("Content-Length", fmt.Sprint(st.Size()))
}
}
// GetBlob returns a http.HandlerFunc that retrieves a blob
// from the repository.
func GetBlob(c *Context) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := strings.Split(r.RequestURI, "/")
dir := vars[1]
name := vars[2]
path := filepath.Join(c.path, dir, name)
file, err := os.Open(path)
if err != nil {
http.Error(w, "404 not found", 404)
return
}
defer file.Close()
http.ServeContent(w, r, "", time.Unix(0, 0), file)
}
}
// SaveBlob returns a http.HandlerFunc that saves a blob to the repository.
func SaveBlob(c *Context) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := strings.Split(r.RequestURI, "/")
dir := vars[1]
name := vars[2]
path := filepath.Join(c.path, dir, name)
tmp := path + "_tmp"
tf, err := os.OpenFile(tmp, os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
http.Error(w, "500 internal server error", 500)
return
}
if _, err := io.Copy(tf, r.Body); err != nil {
http.Error(w, "400 bad request", 400)
tf.Close()
os.Remove(tmp)
return
}
if err := tf.Close(); err != nil {
http.Error(w, "500 internal server error", 500)
}
if err := os.Rename(tmp, path); err != nil {
http.Error(w, "500 internal server error", 500)
return
}
w.Write([]byte("200 ok"))
}
}
// DeleteBlob returns a http.HandlerFunc that deletes a blob from the
// repository.
func DeleteBlob(c *Context) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := strings.Split(r.RequestURI, "/")
dir := vars[1]
name := vars[2]
path := filepath.Join(c.path, dir, name)
err := os.Remove(path)
if err != nil {
http.Error(w, "500 internal server error", 500)
return
}
w.Write([]byte("200 ok"))
}
}

View File

@@ -1,96 +0,0 @@
// +build go1.4
package main
/*
Copied from: github.com/bitly/oauth2_proxy
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
import (
"crypto/sha1"
"encoding/base64"
"encoding/csv"
"io"
"log"
"os"
)
// lookup passwords in a htpasswd file
// The entries must have been created with -s for SHA encryption
// HtpasswdFile is a map for usernames to passwords.
type HtpasswdFile struct {
Users map[string]string
}
// NewHtpasswdFromFile reads the users and passwords from a htpasswd
// file and returns them. If an error is encountered, it is returned, together
// with a nil-Pointer for the HtpasswdFile.
func NewHtpasswdFromFile(path string) (*HtpasswdFile, error) {
r, err := os.Open(path)
if err != nil {
return nil, err
}
defer r.Close()
return NewHtpasswd(r)
}
// NewHtpasswd reads the users and passwords from a htpasswd
// datastream in file and returns them. If an error is encountered,
// it is returned, together with a nil-Pointer for the HtpasswdFile.
func NewHtpasswd(file io.Reader) (*HtpasswdFile, error) {
cr := csv.NewReader(file)
cr.Comma = ':'
cr.Comment = '#'
cr.TrimLeadingSpace = true
records, err := cr.ReadAll()
if err != nil {
return nil, err
}
h := &HtpasswdFile{Users: make(map[string]string)}
for _, record := range records {
h.Users[record[0]] = record[1]
}
return h, nil
}
// Validate returns true if password matches the stored password
// for user. If no password for user is stored, or the password
// is wrong, false is returned.
func (h *HtpasswdFile) Validate(user string, password string) bool {
realPassword, exists := h.Users[user]
if !exists {
return false
}
if realPassword[:5] == "{SHA}" {
d := sha1.New()
d.Write([]byte(password))
if realPassword[5:] == base64.StdEncoding.EncodeToString(d.Sum(nil)) {
return true
}
} else {
log.Printf("Invalid htpasswd entry for %s. Must be a SHA entry.", user)
}
return false
}

View File

@@ -1,137 +0,0 @@
// +build go1.4
package main
import (
"log"
"net/http"
"strings"
)
// Route is a handler for a path that was already split.
type Route struct {
path []string
handler http.Handler
}
// Router maps HTTP methods to a slice of Route handlers.
type Router struct {
routes map[string][]Route
}
// NewRouter creates a new Router and returns a pointer to it.
func NewRouter() *Router {
return &Router{make(map[string][]Route)}
}
// Options registers handler for path with method "OPTIONS".
func (router *Router) Options(path string, handler http.Handler) {
router.Handle("OPTIONS", path, handler)
}
// OptionsFunc registers handler for path with method "OPTIONS".
func (router *Router) OptionsFunc(path string, handler http.HandlerFunc) {
router.Handle("OPTIONS", path, handler)
}
// Get registers handler for path with method "GET".
func (router *Router) Get(path string, handler http.Handler) {
router.Handle("GET", path, handler)
}
// GetFunc registers handler for path with method "GET".
func (router *Router) GetFunc(path string, handler http.HandlerFunc) {
router.Handle("GET", path, handler)
}
// Head registers handler for path with method "HEAD".
func (router *Router) Head(path string, handler http.Handler) {
router.Handle("HEAD", path, handler)
}
// HeadFunc registers handler for path with method "HEAD".
func (router *Router) HeadFunc(path string, handler http.HandlerFunc) {
router.Handle("HEAD", path, handler)
}
// Post registers handler for path with method "POST".
func (router *Router) Post(path string, handler http.Handler) {
router.Handle("POST", path, handler)
}
// PostFunc registers handler for path with method "POST".
func (router *Router) PostFunc(path string, handler http.HandlerFunc) {
router.Handle("POST", path, handler)
}
// Put registers handler for path with method "PUT".
func (router *Router) Put(path string, handler http.Handler) {
router.Handle("PUT", path, handler)
}
// PutFunc registers handler for path with method "PUT".
func (router *Router) PutFunc(path string, handler http.HandlerFunc) {
router.Handle("PUT", path, handler)
}
// Delete registers handler for path with method "DELETE".
func (router *Router) Delete(path string, handler http.Handler) {
router.Handle("DELETE", path, handler)
}
// DeleteFunc registers handler for path with method "DELETE".
func (router *Router) DeleteFunc(path string, handler http.HandlerFunc) {
router.Handle("DELETE", path, handler)
}
// Trace registers handler for path with method "TRACE".
func (router *Router) Trace(path string, handler http.Handler) {
router.Handle("TRACE", path, handler)
}
// TraceFunc registers handler for path with method "TRACE".
func (router *Router) TraceFunc(path string, handler http.HandlerFunc) {
router.Handle("TRACE", path, handler)
}
// Connect registers handler for path with method "Connect".
func (router *Router) Connect(path string, handler http.Handler) {
router.Handle("Connect", path, handler)
}
// ConnectFunc registers handler for path with method "Connect".
func (router *Router) ConnectFunc(path string, handler http.HandlerFunc) {
router.Handle("Connect", path, handler)
}
// Handle registers a http.Handler for method and uri
func (router *Router) Handle(method string, uri string, handler http.Handler) {
routes := router.routes[method]
path := strings.Split(uri, "/")
routes = append(routes, Route{path, handler})
router.routes[method] = routes
}
func (router *Router) ServeHTTP(w http.ResponseWriter, r *http.Request) {
method := r.Method
uri := r.RequestURI
path := strings.Split(uri, "/")
log.Printf("%s %s", method, uri)
ROUTE:
for _, route := range router.routes[method] {
if len(route.path) != len(path) {
continue
}
for i := 0; i < len(route.path); i++ {
if !strings.HasPrefix(route.path[i], ":") && route.path[i] != path[i] {
continue ROUTE
}
}
route.handler.ServeHTTP(w, r)
return
}
http.Error(w, "404 not found", 404)
}

View File

@@ -1,74 +0,0 @@
// +build go1.4
package main
import (
"io/ioutil"
"net/http"
"net/http/httptest"
"strings"
"testing"
)
func TestRouter(t *testing.T) {
router := NewRouter()
getConfig := []byte("GET /config")
router.GetFunc("/config", func(w http.ResponseWriter, r *http.Request) {
w.Write(getConfig)
})
postConfig := []byte("POST /config")
router.PostFunc("/config", func(w http.ResponseWriter, r *http.Request) {
w.Write(postConfig)
})
getBlobs := []byte("GET /blobs/")
router.GetFunc("/blobs/", func(w http.ResponseWriter, r *http.Request) {
w.Write(getBlobs)
})
getBlob := []byte("GET /blobs/:sha")
router.GetFunc("/blobs/:sha", func(w http.ResponseWriter, r *http.Request) {
w.Write(getBlob)
})
server := httptest.NewServer(router)
defer server.Close()
getConfigResp, _ := http.Get(server.URL + "/config")
getConfigBody, _ := ioutil.ReadAll(getConfigResp.Body)
if getConfigResp.StatusCode != 200 {
t.Fatalf("Wanted HTTP Status 200, got %d", getConfigResp.StatusCode)
}
if string(getConfig) != string(getConfigBody) {
t.Fatalf("Config wrong:\nWanted '%s'\nGot: '%s'", string(getConfig), string(getConfigBody))
}
postConfigResp, _ := http.Post(server.URL+"/config", "binary/octet-stream", strings.NewReader("post test"))
postConfigBody, _ := ioutil.ReadAll(postConfigResp.Body)
if postConfigResp.StatusCode != 200 {
t.Fatalf("Wanted HTTP Status 200, got %d", postConfigResp.StatusCode)
}
if string(postConfig) != string(postConfigBody) {
t.Fatalf("Config wrong:\nWanted '%s'\nGot: '%s'", string(postConfig), string(postConfigBody))
}
getBlobsResp, _ := http.Get(server.URL + "/blobs/")
getBlobsBody, _ := ioutil.ReadAll(getBlobsResp.Body)
if getBlobsResp.StatusCode != 200 {
t.Fatalf("Wanted HTTP Status 200, got %d", getBlobsResp.StatusCode)
}
if string(getBlobs) != string(getBlobsBody) {
t.Fatalf("Config wrong:\nWanted '%s'\nGot: '%s'", string(getBlobs), string(getBlobsBody))
}
getBlobResp, _ := http.Get(server.URL + "/blobs/test")
getBlobBody, _ := ioutil.ReadAll(getBlobResp.Body)
if getBlobResp.StatusCode != 200 {
t.Fatalf("Wanted HTTP Status 200, got %d", getBlobResp.StatusCode)
}
if string(getBlob) != string(getBlobBody) {
t.Fatalf("Config wrong:\nWanted '%s'\nGot: '%s'", string(getBlob), string(getBlobBody))
}
}

View File

@@ -1,73 +0,0 @@
// +build go1.4
package main
import (
"flag"
"log"
"net/http"
"os"
"path/filepath"
)
const (
defaultHTTPPort = ":8000"
defaultHTTPSPort = ":8443"
)
func main() {
// Parse command-line args
var path = flag.String("path", "/tmp/restic", "specifies the path of the data directory")
var tls = flag.Bool("tls", false, "turns on tls support")
flag.Parse()
// Create the missing directories
dirs := []string{
"data",
"snapshots",
"index",
"locks",
"keys",
"tmp",
}
for _, d := range dirs {
os.MkdirAll(filepath.Join(*path, d), 0700)
}
// Define the routes
context := &Context{*path}
router := NewRouter()
router.HeadFunc("/config", CheckConfig(context))
router.GetFunc("/config", GetConfig(context))
router.PostFunc("/config", SaveConfig(context))
router.GetFunc("/:dir/", ListBlobs(context))
router.HeadFunc("/:dir/:name", CheckBlob(context))
router.GetFunc("/:type/:name", GetBlob(context))
router.PostFunc("/:type/:name", SaveBlob(context))
router.DeleteFunc("/:type/:name", DeleteBlob(context))
// Check for a password file
var handler http.Handler
htpasswdFile, err := NewHtpasswdFromFile(filepath.Join(*path, ".htpasswd"))
if err != nil {
log.Println("Authentication disabled")
handler = router
} else {
log.Println("Authentication enabled")
handler = AuthHandler(htpasswdFile, router)
}
// start the server
if !*tls {
log.Printf("start server on port %s\n", defaultHTTPPort)
http.ListenAndServe(defaultHTTPPort, handler)
} else {
privateKey := filepath.Join(*path, "private_key")
publicKey := filepath.Join(*path, "public_key")
log.Println("TLS enabled")
log.Printf("private key: %s", privateKey)
log.Printf("public key: %s", publicKey)
log.Printf("start server on port %s\n", defaultHTTPSPort)
http.ListenAndServeTLS(defaultHTTPSPort, publicKey, privateKey, handler)
}
}

View File

@@ -0,0 +1,9 @@
// +build !linux
package main
// IsProcessBackground should return true if it is running in the background or false if not
func IsProcessBackground() bool {
//TODO: Check if the process are running in the background in other OS than linux
return false
}

View File

@@ -0,0 +1,21 @@
package main
import (
"syscall"
"unsafe"
"restic/debug"
)
// IsProcessBackground returns true if it is running in the background or false if not
func IsProcessBackground() bool {
var pid int
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(syscall.Stdin), syscall.TIOCGPGRP, uintptr(unsafe.Pointer(&pid)))
if err != 0 {
debug.Log("Can't check if we are in the background. Using default behaviour. Error: %s\n", err.Error())
return false
}
return pid != syscall.Getpgrp()
}

View File

@@ -32,6 +32,9 @@ func AddCleanupHandler(f func() error) {
cleanupHandlers.Lock()
defer cleanupHandlers.Unlock()
// reset the done flag for integration tests
cleanupHandlers.done = false
cleanupHandlers.list = append(cleanupHandlers.list, f)
}
@@ -51,15 +54,21 @@ func RunCleanupHandlers() {
fmt.Fprintf(stderr, "error in cleanup handler: %v\n", err)
}
}
cleanupHandlers.list = nil
}
// CleanupHandler handles the SIGINT signal.
func CleanupHandler(c <-chan os.Signal) {
for s := range c {
debug.Log("CleanupHandler", "signal %v received, cleaning up", s)
fmt.Println("\x1b[2KInterrupt received, cleaning up")
RunCleanupHandlers()
fmt.Println("exiting")
os.Exit(0)
debug.Log("signal %v received, cleaning up", s)
fmt.Printf("%sInterrupt received, cleaning up\n", ClearLine())
Exit(0)
}
}
// Exit runs the cleanup handlers and then terminates the process with the
// given exit code.
func Exit(code int) {
RunCleanupHandlers()
os.Exit(code)
}

View File

@@ -2,137 +2,117 @@ package main
import (
"bufio"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"restic"
"restic/backend"
"restic/debug"
"restic/filter"
"strings"
"time"
"golang.org/x/crypto/ssh/terminal"
"github.com/spf13/cobra"
"restic/archiver"
"restic/debug"
"restic/errors"
"restic/filter"
"restic/fs"
)
type CmdBackup struct {
Parent string `short:"p" long:"parent" description:"use this parent snapshot (default: last snapshot in repo that has the same target)"`
Force bool `short:"f" long:"force" description:"Force re-reading the target. Overrides the \"parent\" flag"`
Excludes []string `short:"e" long:"exclude" description:"Exclude a pattern (can be specified multiple times)"`
ExcludeFile string `long:"exclude-file" description:"Read exclude-patterns from file"`
Stdin bool `long:"stdin" description:"read backup data from stdin"`
StdinFilename string `long:"stdin-filename" default:"stdin" description:"file name to use when reading from stdin"`
var cmdBackup = &cobra.Command{
Use: "backup [flags] FILE/DIR [FILE/DIR] ...",
Short: "create a new backup of files and/or directories",
Long: `
The "backup" command creates a new snapshot and saves the files and directories
given as the arguments.
`,
RunE: func(cmd *cobra.Command, args []string) error {
if backupOptions.Stdin && backupOptions.FilesFrom == "-" {
return errors.Fatal("cannot use both `--stdin` and `--files-from -`")
}
global *GlobalOptions
if backupOptions.Stdin {
return readBackupFromStdin(backupOptions, globalOptions, args)
}
return runBackup(backupOptions, globalOptions, args)
},
}
// BackupOptions bundles all options for the backup command.
type BackupOptions struct {
Parent string
Force bool
Excludes []string
ExcludeFile string
ExcludeOtherFS bool
Stdin bool
StdinFilename string
Tags []string
Hostname string
FilesFrom string
}
var backupOptions BackupOptions
func init() {
_, err := parser.AddCommand("backup",
"save file/directory",
"The backup command creates a snapshot of a file or directory",
&CmdBackup{global: &globalOpts})
cmdRoot.AddCommand(cmdBackup)
hostname, err := os.Hostname()
if err != nil {
panic(err)
}
}
func formatBytes(c uint64) string {
b := float64(c)
switch {
case c > 1<<40:
return fmt.Sprintf("%.3f TiB", b/(1<<40))
case c > 1<<30:
return fmt.Sprintf("%.3f GiB", b/(1<<30))
case c > 1<<20:
return fmt.Sprintf("%.3f MiB", b/(1<<20))
case c > 1<<10:
return fmt.Sprintf("%.3f KiB", b/(1<<10))
default:
return fmt.Sprintf("%dB", c)
}
}
func formatSeconds(sec uint64) string {
hours := sec / 3600
sec -= hours * 3600
min := sec / 60
sec -= min * 60
if hours > 0 {
return fmt.Sprintf("%d:%02d:%02d", hours, min, sec)
debug.Log("os.Hostname() returned err: %v", err)
hostname = ""
}
return fmt.Sprintf("%d:%02d", min, sec)
f := cmdBackup.Flags()
f.StringVar(&backupOptions.Parent, "parent", "", "use this parent snapshot (default: last snapshot in the repo that has the same target files/directories)")
f.BoolVarP(&backupOptions.Force, "force", "f", false, `force re-reading the target files/directories (overrides the "parent" flag)`)
f.StringSliceVarP(&backupOptions.Excludes, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)")
f.StringVar(&backupOptions.ExcludeFile, "exclude-file", "", "read exclude patterns from a file")
f.BoolVarP(&backupOptions.ExcludeOtherFS, "one-file-system", "x", false, "exclude other file systems")
f.BoolVar(&backupOptions.Stdin, "stdin", false, "read backup from stdin")
f.StringVar(&backupOptions.StdinFilename, "stdin-filename", "stdin", "file name to use when reading from stdin")
f.StringSliceVar(&backupOptions.Tags, "tag", nil, "add a `tag` for the new snapshot (can be specified multiple times)")
f.StringVar(&backupOptions.Hostname, "hostname", hostname, "set the `hostname` for the snapshot manually")
f.StringVar(&backupOptions.FilesFrom, "files-from", "", "read the files to backup from file (can be combined with file args)")
}
func formatPercent(numerator uint64, denominator uint64) string {
if denominator == 0 {
return ""
}
percent := 100.0 * float64(numerator) / float64(denominator)
if percent > 100 {
percent = 100
}
return fmt.Sprintf("%3.2f%%", percent)
}
func formatRate(bytes uint64, duration time.Duration) string {
sec := float64(duration) / float64(time.Second)
rate := float64(bytes) / sec / (1 << 20)
return fmt.Sprintf("%.2fMiB/s", rate)
}
func formatDuration(d time.Duration) string {
sec := uint64(d / time.Second)
return formatSeconds(sec)
}
func printTree2(indent int, t *restic.Tree) {
for _, node := range t.Nodes {
if node.Tree() != nil {
fmt.Printf("%s%s/\n", strings.Repeat(" ", indent), node.Name)
printTree2(indent+1, node.Tree())
} else {
fmt.Printf("%s%s\n", strings.Repeat(" ", indent), node.Name)
}
}
}
func (cmd CmdBackup) Usage() string {
return "DIR/FILE [DIR/FILE] [...]"
}
func (cmd CmdBackup) newScanProgress() *restic.Progress {
if !cmd.global.ShowProgress() {
func newScanProgress(gopts GlobalOptions) *restic.Progress {
if gopts.Quiet {
return nil
}
p := restic.NewProgress(time.Second)
p := restic.NewProgress()
p.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) {
fmt.Printf("\x1b[2K[%s] %d directories, %d files, %s\r", formatDuration(d), s.Dirs, s.Files, formatBytes(s.Bytes))
if IsProcessBackground() {
return
}
PrintProgress("[%s] %d directories, %d files, %s", formatDuration(d), s.Dirs, s.Files, formatBytes(s.Bytes))
}
p.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
fmt.Printf("\x1b[2Kscanned %d directories, %d files in %s\n", s.Dirs, s.Files, formatDuration(d))
PrintProgress("scanned %d directories, %d files in %s\n", s.Dirs, s.Files, formatDuration(d))
}
return p
}
func (cmd CmdBackup) newArchiveProgress(todo restic.Stat) *restic.Progress {
if !cmd.global.ShowProgress() {
func newArchiveProgress(gopts GlobalOptions, todo restic.Stat) *restic.Progress {
if gopts.Quiet {
return nil
}
archiveProgress := restic.NewProgress(time.Second)
archiveProgress := restic.NewProgress()
var bps, eta uint64
itemsTodo := todo.Files + todo.Dirs
archiveProgress.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) {
if IsProcessBackground() {
return
}
sec := uint64(d / time.Second)
if todo.Bytes > 0 && sec > 0 && ticker {
bps = s.Bytes / sec
@@ -154,9 +134,8 @@ func (cmd CmdBackup) newArchiveProgress(todo restic.Stat) *restic.Progress {
s.Errors)
status2 := fmt.Sprintf("ETA %s ", formatSeconds(eta))
w, _, err := terminal.GetSize(int(os.Stdout.Fd()))
if err == nil {
maxlen := w - len(status2)
if w := stdoutTerminalWidth(); w > 0 {
maxlen := w - len(status2) - 1
if maxlen < 4 {
status1 = ""
@@ -166,7 +145,7 @@ func (cmd CmdBackup) newArchiveProgress(todo restic.Stat) *restic.Progress {
}
}
fmt.Printf("\x1b[2K%s%s\r", status1, status2)
PrintProgress("%s%s", status1, status2)
}
archiveProgress.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
@@ -176,16 +155,20 @@ func (cmd CmdBackup) newArchiveProgress(todo restic.Stat) *restic.Progress {
return archiveProgress
}
func (cmd CmdBackup) newArchiveStdinProgress() *restic.Progress {
if !cmd.global.ShowProgress() {
func newArchiveStdinProgress(gopts GlobalOptions) *restic.Progress {
if gopts.Quiet {
return nil
}
archiveProgress := restic.NewProgress(time.Second)
archiveProgress := restic.NewProgress()
var bps uint64
archiveProgress.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) {
if IsProcessBackground() {
return
}
sec := uint64(d / time.Second)
if s.Bytes > 0 && sec > 0 && ticker {
bps = s.Bytes / sec
@@ -195,8 +178,7 @@ func (cmd CmdBackup) newArchiveStdinProgress() *restic.Progress {
formatBytes(s.Bytes),
formatBytes(bps))
w, _, err := terminal.GetSize(int(os.Stdout.Fd()))
if err == nil {
if w := stdoutTerminalWidth(); w > 0 {
maxlen := w - len(status1)
if maxlen < 4 {
@@ -207,7 +189,7 @@ func (cmd CmdBackup) newArchiveStdinProgress() *restic.Progress {
}
}
fmt.Printf("\x1b[2K%s\r", status1)
PrintProgress("%s", status1)
}
archiveProgress.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
@@ -221,8 +203,8 @@ func (cmd CmdBackup) newArchiveStdinProgress() *restic.Progress {
// items exist at all.
func filterExisting(items []string) (result []string, err error) {
for _, item := range items {
_, err := os.Lstat(item)
if err != nil && os.IsNotExist(err) {
_, err := fs.Lstat(item)
if err != nil && os.IsNotExist(errors.Cause(err)) {
continue
}
@@ -230,18 +212,47 @@ func filterExisting(items []string) (result []string, err error) {
}
if len(result) == 0 {
return nil, errors.New("all target directories/files do not exist")
return nil, errors.Fatal("all target directories/files do not exist")
}
return
}
func (cmd CmdBackup) readFromStdin(args []string) error {
// gatherDevices returns the set of unique device ids of the files and/or
// directory paths listed in "items".
func gatherDevices(items []string) (deviceMap map[uint64]struct{}, err error) {
deviceMap = make(map[uint64]struct{})
for _, item := range items {
fi, err := fs.Lstat(item)
if err != nil {
return nil, err
}
id, err := fs.DeviceID(fi)
if err != nil {
return nil, err
}
deviceMap[id] = struct{}{}
}
if len(deviceMap) == 0 {
return nil, errors.New("zero allowed devices")
}
return deviceMap, nil
}
func readBackupFromStdin(opts BackupOptions, gopts GlobalOptions, args []string) error {
if len(args) != 0 {
return fmt.Errorf("when reading from stdin, no additional files can be specified")
return errors.Fatal("when reading from stdin, no additional files can be specified")
}
repo, err := cmd.global.OpenRepository()
if opts.StdinFilename == "" {
return errors.Fatal("filename for backup from stdin must not be empty")
}
if gopts.password == "" && gopts.PasswordFile == "" {
return errors.Fatal("unable to read password from stdin when data is to be read from stdin, use --password-file or $RESTIC_PASSWORD")
}
repo, err := OpenRepository(gopts)
if err != nil {
return err
}
@@ -257,22 +268,74 @@ func (cmd CmdBackup) readFromStdin(args []string) error {
return err
}
_, id, err := restic.ArchiveReader(repo, cmd.newArchiveStdinProgress(), os.Stdin, cmd.StdinFilename)
r := &archiver.Reader{
Repository: repo,
Tags: opts.Tags,
Hostname: opts.Hostname,
}
_, id, err := r.Archive(opts.StdinFilename, os.Stdin, newArchiveStdinProgress(gopts))
if err != nil {
return err
}
fmt.Printf("archived as %v\n", id.Str())
Verbosef("archived as %v\n", id.Str())
return nil
}
func (cmd CmdBackup) Execute(args []string) error {
if cmd.Stdin {
return cmd.readFromStdin(args)
// readFromFile will read all lines from the given filename and write them to a
// string array, if filename is empty readFromFile returns and empty string
// array. If filename is a dash (-), readFromFile will read the lines from
// the standard input.
func readLinesFromFile(filename string) ([]string, error) {
if filename == "" {
return nil, nil
}
var r io.Reader = os.Stdin
if filename != "-" {
f, err := os.Open(filename)
if err != nil {
return nil, err
}
defer f.Close()
r = f
}
var lines []string
scanner := bufio.NewScanner(r)
for scanner.Scan() {
line := scanner.Text()
if line == "" {
continue
}
lines = append(lines, line)
}
if err := scanner.Err(); err != nil {
return nil, err
}
return lines, nil
}
func runBackup(opts BackupOptions, gopts GlobalOptions, args []string) error {
if opts.FilesFrom == "-" && gopts.password == "" && gopts.PasswordFile == "" {
return errors.Fatal("no password; either use `--password-file` option or put the password into the RESTIC_PASSWORD environment variable")
}
fromfile, err := readLinesFromFile(opts.FilesFrom)
if err != nil {
return err
}
// merge files from files-from into normal args so we can reuse the normal
// args checks and have the ability to use both files-from and args at the
// same time
args = append(args, fromfile...)
if len(args) == 0 {
return fmt.Errorf("wrong number of parameters, Usage: %s", cmd.Usage())
return errors.Fatal("wrong number of parameters")
}
target := make([]string, 0, len(args))
@@ -283,12 +346,22 @@ func (cmd CmdBackup) Execute(args []string) error {
target = append(target, d)
}
target, err := filterExisting(target)
target, err = filterExisting(target)
if err != nil {
return err
}
repo, err := cmd.global.OpenRepository()
// allowed devices
var allowedDevs map[uint64]struct{}
if opts.ExcludeOtherFS {
allowedDevs, err = gatherDevices(target)
if err != nil {
return err
}
debug.Log("allowed devices: %v\n", allowedDevs)
}
repo, err := OpenRepository(gopts)
if err != nil {
return err
}
@@ -304,21 +377,21 @@ func (cmd CmdBackup) Execute(args []string) error {
return err
}
var parentSnapshotID *backend.ID
var parentSnapshotID *restic.ID
// Force using a parent
if !cmd.Force && cmd.Parent != "" {
id, err := restic.FindSnapshot(repo, cmd.Parent)
if !opts.Force && opts.Parent != "" {
id, err := restic.FindSnapshot(repo, opts.Parent)
if err != nil {
return fmt.Errorf("invalid id %q: %v", cmd.Parent, err)
return errors.Fatalf("invalid id %q: %v", opts.Parent, err)
}
parentSnapshotID = &id
}
// Find last snapshot to set it as parent, if not already set
if !cmd.Force && parentSnapshotID == nil {
id, err := restic.FindLatestSnapshot(repo, target, "")
if !opts.Force && parentSnapshotID == nil {
id, err := restic.FindLatestSnapshot(repo, target, opts.Tags, opts.Hostname)
if err == nil {
parentSnapshotID = &id
} else if err != restic.ErrNoSnapshotFound {
@@ -327,16 +400,16 @@ func (cmd CmdBackup) Execute(args []string) error {
}
if parentSnapshotID != nil {
cmd.global.Verbosef("using parent snapshot %v\n", parentSnapshotID.Str())
Verbosef("using parent snapshot %v\n", parentSnapshotID.Str())
}
cmd.global.Verbosef("scan %v\n", target)
Verbosef("scan %v\n", target)
// add patterns from file
if cmd.ExcludeFile != "" {
file, err := os.Open(cmd.ExcludeFile)
if opts.ExcludeFile != "" {
file, err := fs.Open(opts.ExcludeFile)
if err != nil {
cmd.global.Warnf("error reading exclude patterns: %v", err)
Warnf("error reading exclude patterns: %v", err)
return nil
}
@@ -345,45 +418,61 @@ func (cmd CmdBackup) Execute(args []string) error {
line := scanner.Text()
if !strings.HasPrefix(line, "#") {
line = os.ExpandEnv(line)
cmd.Excludes = append(cmd.Excludes, line)
opts.Excludes = append(opts.Excludes, line)
}
}
}
selectFilter := func(item string, fi os.FileInfo) bool {
matched, err := filter.List(cmd.Excludes, item)
matched, err := filter.List(opts.Excludes, item)
if err != nil {
cmd.global.Warnf("error for exclude pattern: %v", err)
Warnf("error for exclude pattern: %v", err)
}
if matched {
debug.Log("backup.Execute", "path %q excluded by a filter", item)
debug.Log("path %q excluded by a filter", item)
return false
}
return !matched
if !opts.ExcludeOtherFS || fi == nil {
return true
}
id, err := fs.DeviceID(fi)
if err != nil {
// This should never happen because gatherDevices() would have
// errored out earlier. If it still does that's a reason to panic.
panic(err)
}
_, found := allowedDevs[id]
if !found {
debug.Log("path %q on disallowed device %d", item, id)
return false
}
return true
}
stat, err := restic.Scan(target, selectFilter, cmd.newScanProgress())
stat, err := archiver.Scan(target, selectFilter, newScanProgress(gopts))
if err != nil {
return err
}
arch := restic.NewArchiver(repo)
arch.Excludes = cmd.Excludes
arch := archiver.New(repo)
arch.Excludes = opts.Excludes
arch.SelectFilter = selectFilter
arch.Error = func(dir string, fi os.FileInfo, err error) error {
arch.Warn = func(dir string, fi os.FileInfo, err error) {
// TODO: make ignoring errors configurable
cmd.global.Warnf("\x1b[2K\rerror for %s: %v\n", dir, err)
return nil
Warnf("%s\rwarning for %s: %v\n", ClearLine(), dir, err)
}
_, id, err := arch.Snapshot(cmd.newArchiveProgress(stat), target, parentSnapshotID)
_, id, err := arch.Snapshot(newArchiveProgress(gopts, stat), target, opts.Tags, opts.Hostname, parentSnapshotID)
if err != nil {
return err
}
cmd.global.Verbosef("snapshot %s saved\n", id.Str())
Verbosef("snapshot %s saved\n", id.Str())
return nil
}

View File

@@ -1,56 +0,0 @@
package main
import (
"fmt"
"restic"
)
type CmdCache struct {
global *GlobalOptions
}
func init() {
_, err := parser.AddCommand("cache",
"manage cache",
"The cache command creates and manages the local cache",
&CmdCache{global: &globalOpts})
if err != nil {
panic(err)
}
}
func (cmd CmdCache) Usage() string {
return "[update|clear]"
}
func (cmd CmdCache) Execute(args []string) error {
// if len(args) == 0 || len(args) > 2 {
// return fmt.Errorf("wrong number of parameters, Usage: %s", cmd.Usage())
// }
repo, err := cmd.global.OpenRepository()
if err != nil {
return err
}
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
cache, err := restic.NewCache(repo, cmd.global.CacheDir)
if err != nil {
return err
}
fmt.Printf("clear cache for old snapshots\n")
err = cache.Clear(repo)
if err != nil {
return err
}
fmt.Printf("done\n")
return nil
}

View File

@@ -2,41 +2,38 @@ package main
import (
"encoding/json"
"errors"
"fmt"
"os"
"github.com/spf13/cobra"
"restic"
"restic/backend"
"restic/debug"
"restic/pack"
"restic/errors"
"restic/repository"
)
type CmdCat struct {
global *GlobalOptions
var cmdCat = &cobra.Command{
Use: "cat [flags] [pack|blob|snapshot|index|key|masterkey|config|lock] ID",
Short: "print internal objects to stdout",
Long: `
The "cat" command is used to print internal objects to stdout.
`,
RunE: func(cmd *cobra.Command, args []string) error {
return runCat(globalOptions, args)
},
}
func init() {
_, err := parser.AddCommand("cat",
"dump something",
"The cat command dumps data structures or data from a repository",
&CmdCat{global: &globalOpts})
if err != nil {
panic(err)
}
cmdRoot.AddCommand(cmdCat)
}
func (cmd CmdCat) Usage() string {
return "[pack|blob|tree|snapshot|key|masterkey|config|lock] ID"
}
func (cmd CmdCat) Execute(args []string) error {
func runCat(gopts GlobalOptions, args []string) error {
if len(args) < 1 || (args[0] != "masterkey" && args[0] != "config" && len(args) != 2) {
return fmt.Errorf("type or ID not specified, Usage: %s", cmd.Usage())
return errors.Fatal("type or ID not specified")
}
repo, err := cmd.global.OpenRepository()
repo, err := OpenRepository(gopts)
if err != nil {
return err
}
@@ -49,12 +46,12 @@ func (cmd CmdCat) Execute(args []string) error {
tpe := args[0]
var id backend.ID
var id restic.ID
if tpe != "masterkey" && tpe != "config" {
id, err = backend.ParseID(args[1])
id, err = restic.ParseID(args[1])
if err != nil {
if tpe != "snapshot" {
return err
return errors.Fatalf("unable to parse ID: %v\n", err)
}
// find snapshot id with prefix
@@ -68,7 +65,7 @@ func (cmd CmdCat) Execute(args []string) error {
// handle all types that don't need an index
switch tpe {
case "config":
buf, err := json.MarshalIndent(repo.Config, "", " ")
buf, err := json.MarshalIndent(repo.Config(), "", " ")
if err != nil {
return err
}
@@ -76,7 +73,7 @@ func (cmd CmdCat) Execute(args []string) error {
fmt.Println(string(buf))
return nil
case "index":
buf, err := repo.LoadAndDecrypt(backend.Index, id)
buf, err := repo.LoadAndDecrypt(restic.IndexFile, id)
if err != nil {
return err
}
@@ -86,7 +83,7 @@ func (cmd CmdCat) Execute(args []string) error {
case "snapshot":
sn := &restic.Snapshot{}
err = repo.LoadJSONUnpacked(backend.Snapshot, id, sn)
err = repo.LoadJSONUnpacked(restic.SnapshotFile, id, sn)
if err != nil {
return err
}
@@ -100,8 +97,8 @@ func (cmd CmdCat) Execute(args []string) error {
return nil
case "key":
h := backend.Handle{Type: backend.Key, Name: id.String()}
buf, err := backend.LoadAll(repo.Backend(), h, nil)
h := restic.Handle{Type: restic.KeyFile, Name: id.String()}
buf, err := backend.LoadAll(repo.Backend(), h)
if err != nil {
return err
}
@@ -151,49 +148,42 @@ func (cmd CmdCat) Execute(args []string) error {
switch tpe {
case "pack":
h := backend.Handle{Type: backend.Data, Name: id.String()}
buf, err := backend.LoadAll(repo.Backend(), h, nil)
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
buf, err := backend.LoadAll(repo.Backend(), h)
if err != nil {
return err
}
hash := restic.Hash(buf)
if !hash.Equal(id) {
fmt.Fprintf(stderr, "Warning: hash of data does not match ID, want\n %v\ngot:\n %v\n", id.String(), hash.String())
}
_, err = os.Stdout.Write(buf)
return err
case "blob":
blob, err := repo.Index().Lookup(id)
if err != nil {
for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} {
list, err := repo.Index().Lookup(id, t)
if err != nil {
continue
}
blob := list[0]
buf := make([]byte, blob.Length)
n, err := repo.LoadBlob(t, id, buf)
if err != nil {
return err
}
buf = buf[:n]
_, err = os.Stdout.Write(buf)
return err
}
buf := make([]byte, blob.Length)
data, err := repo.LoadBlob(blob.Type, id, buf)
if err != nil {
return err
}
_, err = os.Stdout.Write(data)
return err
case "tree":
debug.Log("cat", "cat tree %v", id.Str())
tree := restic.NewTree()
err = repo.LoadJSONPack(pack.Tree, id, tree)
if err != nil {
debug.Log("cat", "unable to load tree %v: %v", id.Str(), err)
return err
}
buf, err := json.MarshalIndent(&tree, "", " ")
if err != nil {
debug.Log("cat", "error json.MarshalIndent(): %v", err)
return err
}
_, err = os.Stdout.Write(append(buf, '\n'))
return nil
return errors.Fatal("blob not found")
default:
return errors.New("invalid type")
return errors.Fatal("invalid type")
}
}

View File

@@ -1,44 +1,51 @@
package main
import (
"errors"
"fmt"
"os"
"time"
"golang.org/x/crypto/ssh/terminal"
"github.com/spf13/cobra"
"restic"
"restic/checker"
"restic/errors"
)
type CmdCheck struct {
ReadData bool `long:"read-data" default:"false" description:"Read data blobs"`
CheckUnused bool `long:"check-unused" default:"false" description:"Check for unused blobs"`
global *GlobalOptions
var cmdCheck = &cobra.Command{
Use: "check [flags]",
Short: "check the repository for errors",
Long: `
The "check" command tests the repository for errors and reports any errors it
finds. It can also be used to read all data and therefore simulate a restore.
`,
RunE: func(cmd *cobra.Command, args []string) error {
return runCheck(checkOptions, globalOptions, args)
},
}
// CheckOptions bundles all options for the 'check' command.
type CheckOptions struct {
ReadData bool
CheckUnused bool
}
var checkOptions CheckOptions
func init() {
_, err := parser.AddCommand("check",
"check the repository",
"The check command check the integrity and consistency of the repository",
&CmdCheck{global: &globalOpts})
if err != nil {
panic(err)
}
cmdRoot.AddCommand(cmdCheck)
f := cmdCheck.Flags()
f.BoolVar(&checkOptions.ReadData, "read-data", false, "read all data blobs")
f.BoolVar(&checkOptions.CheckUnused, "check-unused", false, "find unused blobs")
}
func (cmd CmdCheck) Usage() string {
return "[check-options]"
}
func (cmd CmdCheck) newReadProgress(todo restic.Stat) *restic.Progress {
if !cmd.global.ShowProgress() {
func newReadProgress(gopts GlobalOptions, todo restic.Stat) *restic.Progress {
if gopts.Quiet {
return nil
}
readProgress := restic.NewProgress(time.Second)
readProgress := restic.NewProgress()
readProgress.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) {
status := fmt.Sprintf("[%s] %s %d / %d items",
@@ -46,15 +53,14 @@ func (cmd CmdCheck) newReadProgress(todo restic.Stat) *restic.Progress {
formatPercent(s.Blobs, todo.Blobs),
s.Blobs, todo.Blobs)
w, _, err := terminal.GetSize(int(os.Stdout.Fd()))
if err == nil {
if w := stdoutTerminalWidth(); w > 0 {
if len(status) > w {
max := w - len(status) - 4
status = status[:max] + "... "
}
}
fmt.Printf("\x1b[2K%s\r", status)
PrintProgress("%s", status)
}
readProgress.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
@@ -64,18 +70,18 @@ func (cmd CmdCheck) newReadProgress(todo restic.Stat) *restic.Progress {
return readProgress
}
func (cmd CmdCheck) Execute(args []string) error {
func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
if len(args) != 0 {
return errors.New("check has no arguments")
return errors.Fatal("check has no arguments")
}
repo, err := cmd.global.OpenRepository()
repo, err := OpenRepository(gopts)
if err != nil {
return err
}
if !cmd.global.NoLock {
cmd.global.Verbosef("Create exclusive lock for repository\n")
if !gopts.NoLock {
Verbosef("Create exclusive lock for repository\n")
lock, err := lockRepoExclusive(repo)
defer unlockRepo(lock)
if err != nil {
@@ -85,26 +91,26 @@ func (cmd CmdCheck) Execute(args []string) error {
chkr := checker.New(repo)
cmd.global.Verbosef("Load indexes\n")
Verbosef("Load indexes\n")
hints, errs := chkr.LoadIndex()
dupFound := false
for _, hint := range hints {
cmd.global.Printf("%v\n", hint)
Printf("%v\n", hint)
if _, ok := hint.(checker.ErrDuplicatePacks); ok {
dupFound = true
}
}
if dupFound {
cmd.global.Printf("\nrun `restic rebuild-index' to correct this\n")
Printf("\nrun `restic rebuild-index' to correct this\n")
}
if len(errs) > 0 {
for _, err := range errs {
cmd.global.Warnf("error: %v\n", err)
Warnf("error: %v\n", err)
}
return fmt.Errorf("LoadIndex returned errors")
return errors.Fatal("LoadIndex returned errors")
}
done := make(chan struct{})
@@ -113,7 +119,7 @@ func (cmd CmdCheck) Execute(args []string) error {
errorsFound := false
errChan := make(chan error)
cmd.global.Verbosef("Check all packs\n")
Verbosef("Check all packs\n")
go chkr.Packs(errChan, done)
for err := range errChan {
@@ -121,7 +127,7 @@ func (cmd CmdCheck) Execute(args []string) error {
fmt.Fprintf(os.Stderr, "%v\n", err)
}
cmd.global.Verbosef("Check snapshots, trees and blobs\n")
Verbosef("Check snapshots, trees and blobs\n")
errChan = make(chan error)
go chkr.Structure(errChan, done)
@@ -137,17 +143,17 @@ func (cmd CmdCheck) Execute(args []string) error {
}
}
if cmd.CheckUnused {
if opts.CheckUnused {
for _, id := range chkr.UnusedBlobs() {
cmd.global.Verbosef("unused blob %v\n", id.Str())
Verbosef("unused blob %v\n", id.Str())
errorsFound = true
}
}
if cmd.ReadData {
cmd.global.Verbosef("Read all data\n")
if opts.ReadData {
Verbosef("Read all data\n")
p := cmd.newReadProgress(restic.Stat{Blobs: chkr.CountPacks()})
p := newReadProgress(gopts, restic.Stat{Blobs: chkr.CountPacks()})
errChan := make(chan error)
go chkr.ReadData(p, errChan, done)
@@ -159,7 +165,7 @@ func (cmd CmdCheck) Execute(args []string) error {
}
if errorsFound {
return errors.New("repository contains errors")
return errors.Fatal("repository contains errors")
}
return nil
}

View File

@@ -8,34 +8,29 @@ import (
"io"
"os"
"github.com/spf13/cobra"
"restic"
"restic/backend"
"restic/errors"
"restic/pack"
"restic/repository"
"restic/worker"
"github.com/juju/errors"
)
type CmdDump struct {
global *GlobalOptions
repo *repository.Repository
var cmdDump = &cobra.Command{
Use: "dump [indexes|snapshots|trees|all|packs]",
Short: "dump data structures",
Long: `
The "dump" command dumps data structures from the repository as JSON objects. It
is used for debugging purposes only.`,
RunE: func(cmd *cobra.Command, args []string) error {
return runDump(globalOptions, args)
},
}
func init() {
_, err := parser.AddCommand("dump",
"dump data structures",
"The dump command dumps data structures from a repository as JSON documents",
&CmdDump{global: &globalOpts})
if err != nil {
panic(err)
}
}
func (cmd CmdDump) Usage() string {
return "[indexes|snapshots|trees|all|packs]"
cmdRoot.AddCommand(cmdDump)
}
func prettyPrintJSON(wr io.Writer, item interface{}) error {
@@ -48,11 +43,11 @@ func prettyPrintJSON(wr io.Writer, item interface{}) error {
return err
}
func printSnapshots(repo *repository.Repository, wr io.Writer) error {
func debugPrintSnapshots(repo *repository.Repository, wr io.Writer) error {
done := make(chan struct{})
defer close(done)
for id := range repo.List(backend.Snapshot, done) {
for id := range repo.List(restic.SnapshotFile, done) {
snapshot, err := restic.LoadSnapshot(repo, id)
if err != nil {
fmt.Fprintf(os.Stderr, "LoadSnapshot(%v): %v", id.Str(), err)
@@ -70,37 +65,6 @@ func printSnapshots(repo *repository.Repository, wr io.Writer) error {
return nil
}
func printTrees(repo *repository.Repository, wr io.Writer) error {
done := make(chan struct{})
defer close(done)
trees := []backend.ID{}
for _, idx := range repo.Index().All() {
for blob := range idx.Each(nil) {
if blob.Type != pack.Tree {
continue
}
trees = append(trees, blob.ID)
}
}
for _, id := range trees {
tree, err := restic.LoadTree(repo, id)
if err != nil {
fmt.Fprintf(os.Stderr, "LoadTree(%v): %v", id.Str(), err)
continue
}
fmt.Fprintf(wr, "tree_id: %v\n", id)
prettyPrintJSON(wr, tree)
}
return nil
}
const dumpPackWorkers = 10
// Pack is the struct used in printPacks.
@@ -112,10 +76,10 @@ type Pack struct {
// Blob is the struct used in printPacks.
type Blob struct {
Type pack.BlobType `json:"type"`
Length uint `json:"length"`
ID backend.ID `json:"id"`
Offset uint `json:"offset"`
Type restic.BlobType `json:"type"`
Length uint `json:"length"`
ID restic.ID `json:"id"`
Offset uint `json:"offset"`
}
func printPacks(repo *repository.Repository, wr io.Writer) error {
@@ -125,15 +89,19 @@ func printPacks(repo *repository.Repository, wr io.Writer) error {
f := func(job worker.Job, done <-chan struct{}) (interface{}, error) {
name := job.Data.(string)
h := backend.Handle{Type: backend.Data, Name: name}
rd := backend.NewReadSeeker(repo.Backend(), h)
h := restic.Handle{Type: restic.DataFile, Name: name}
unpacker, err := pack.NewUnpacker(repo.Key(), rd)
blobInfo, err := repo.Backend().Stat(h)
if err != nil {
return nil, err
}
return unpacker.Entries, nil
blobs, err := pack.List(repo.Key(), restic.ReaderAt(repo.Backend(), h), blobInfo.Size)
if err != nil {
return nil, err
}
return blobs, nil
}
jobCh := make(chan worker.Job)
@@ -141,7 +109,7 @@ func printPacks(repo *repository.Repository, wr io.Writer) error {
wp := worker.New(dumpPackWorkers, f, jobCh, resCh)
go func() {
for name := range repo.Backend().List(backend.Data, done) {
for name := range repo.Backend().List(restic.DataFile, done) {
jobCh <- worker.Job{Data: name}
}
close(jobCh)
@@ -155,7 +123,7 @@ func printPacks(repo *repository.Repository, wr io.Writer) error {
continue
}
entries := job.Result.([]pack.Blob)
entries := job.Result.([]restic.Blob)
p := Pack{
Name: name,
Blobs: make([]Blob, len(entries)),
@@ -177,14 +145,14 @@ func printPacks(repo *repository.Repository, wr io.Writer) error {
return nil
}
func (cmd CmdDump) DumpIndexes() error {
func dumpIndexes(repo restic.Repository) error {
done := make(chan struct{})
defer close(done)
for id := range cmd.repo.List(backend.Index, done) {
for id := range repo.List(restic.IndexFile, done) {
fmt.Printf("index_id: %v\n", id)
idx, err := repository.LoadIndex(cmd.repo, id)
idx, err := repository.LoadIndex(repo, id)
if err != nil {
return err
}
@@ -198,21 +166,22 @@ func (cmd CmdDump) DumpIndexes() error {
return nil
}
func (cmd CmdDump) Execute(args []string) error {
func runDump(gopts GlobalOptions, args []string) error {
if len(args) != 1 {
return fmt.Errorf("type not specified, Usage: %s", cmd.Usage())
return errors.Fatal("type not specified")
}
repo, err := cmd.global.OpenRepository()
repo, err := OpenRepository(gopts)
if err != nil {
return err
}
cmd.repo = repo
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
return err
if !gopts.NoLock {
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
}
err = repo.LoadIndex()
@@ -224,35 +193,26 @@ func (cmd CmdDump) Execute(args []string) error {
switch tpe {
case "indexes":
return cmd.DumpIndexes()
return dumpIndexes(repo)
case "snapshots":
return printSnapshots(repo, os.Stdout)
case "trees":
return printTrees(repo, os.Stdout)
return debugPrintSnapshots(repo, os.Stdout)
case "packs":
return printPacks(repo, os.Stdout)
case "all":
fmt.Printf("snapshots:\n")
err := printSnapshots(repo, os.Stdout)
if err != nil {
return err
}
fmt.Printf("\ntrees:\n")
err = printTrees(repo, os.Stdout)
err := debugPrintSnapshots(repo, os.Stdout)
if err != nil {
return err
}
fmt.Printf("\nindexes:\n")
err = cmd.DumpIndexes()
err = dumpIndexes(repo)
if err != nil {
return err
}
return nil
default:
return errors.Errorf("no such type %q", tpe)
return errors.Fatalf("no such type %q", tpe)
}
}

View File

@@ -1,29 +1,63 @@
package main
import (
"fmt"
"context"
"path/filepath"
"strings"
"time"
"github.com/spf13/cobra"
"restic"
"restic/backend"
"restic/debug"
"restic/errors"
"restic/repository"
)
type findResult struct {
node *restic.Node
path string
var cmdFind = &cobra.Command{
Use: "find [flags] PATTERN",
Short: "find a file or directory",
Long: `
The "find" command searches for files or directories in snapshots stored in the
repo. `,
RunE: func(cmd *cobra.Command, args []string) error {
return runFind(findOptions, globalOptions, args)
},
}
type CmdFind struct {
Oldest string `short:"o" long:"oldest" description:"Oldest modification date/time"`
Newest string `short:"n" long:"newest" description:"Newest modification date/time"`
Snapshot string `short:"s" long:"snapshot" description:"Snapshot ID to search in"`
// FindOptions bundles all options for the find command.
type FindOptions struct {
Oldest string
Newest string
Snapshots []string
CaseInsensitive bool
ListLong bool
Host string
Paths []string
Tags []string
}
var findOptions FindOptions
func init() {
cmdRoot.AddCommand(cmdFind)
f := cmdFind.Flags()
f.StringVarP(&findOptions.Oldest, "oldest", "o", "", "oldest modification date/time")
f.StringVarP(&findOptions.Newest, "newest", "n", "", "newest modification date/time")
f.StringSliceVarP(&findOptions.Snapshots, "snapshot", "s", nil, "snapshot `id` to search in (can be given multiple times)")
f.BoolVarP(&findOptions.CaseInsensitive, "ignore-case", "i", false, "ignore case for pattern")
f.BoolVarP(&findOptions.ListLong, "long", "l", false, "use a long listing format showing size and mode")
f.StringVarP(&findOptions.Host, "host", "H", "", "only consider snapshots for this `host`, when no snapshot ID is given")
f.StringSliceVar(&findOptions.Tags, "tag", nil, "only consider snapshots which include this `tag`, when no snapshot-ID is given")
f.StringSliceVar(&findOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path`, when no snapshot-ID is given")
}
type findPattern struct {
oldest, newest time.Time
pattern string
global *GlobalOptions
ignoreCase bool
}
var timeFormats = []string{
@@ -40,16 +74,6 @@ var timeFormats = []string{
"Mon Jan 2 15:04:05 -0700 MST 2006",
}
func init() {
_, err := parser.AddCommand("find",
"find a file/directory",
"The find command searches for files or directories in snapshots",
&CmdFind{global: &globalOpts})
if err != nil {
panic(err)
}
}
func parseTime(str string) (time.Time, error) {
for _, fmt := range timeFormats {
if t, err := time.ParseInLocation(fmt, str, time.Local); err == nil {
@@ -57,138 +81,116 @@ func parseTime(str string) (time.Time, error) {
}
}
return time.Time{}, fmt.Errorf("unable to parse time: %q", str)
return time.Time{}, errors.Fatalf("unable to parse time: %q", str)
}
func (c CmdFind) findInTree(repo *repository.Repository, id backend.ID, path string) ([]findResult, error) {
debug.Log("restic.find", "checking tree %v\n", id)
tree, err := restic.LoadTree(repo, id)
func findInTree(repo *repository.Repository, pat findPattern, id restic.ID, prefix string, snapshotID *string) error {
debug.Log("checking tree %v\n", id)
tree, err := repo.LoadTree(id)
if err != nil {
return nil, err
return err
}
results := []findResult{}
for _, node := range tree.Nodes {
debug.Log("restic.find", " testing entry %q\n", node.Name)
debug.Log(" testing entry %q\n", node.Name)
m, err := filepath.Match(c.pattern, node.Name)
name := node.Name
if pat.ignoreCase {
name = strings.ToLower(name)
}
m, err := filepath.Match(pat.pattern, name)
if err != nil {
return nil, err
return err
}
if m {
debug.Log("restic.find", " pattern matches\n")
if !c.oldest.IsZero() && node.ModTime.Before(c.oldest) {
debug.Log("restic.find", " ModTime is older than %s\n", c.oldest)
debug.Log(" pattern matches\n")
if !pat.oldest.IsZero() && node.ModTime.Before(pat.oldest) {
debug.Log(" ModTime is older than %s\n", pat.oldest)
continue
}
if !c.newest.IsZero() && node.ModTime.After(c.newest) {
debug.Log("restic.find", " ModTime is newer than %s\n", c.newest)
if !pat.newest.IsZero() && node.ModTime.After(pat.newest) {
debug.Log(" ModTime is newer than %s\n", pat.newest)
continue
}
results = append(results, findResult{node: node, path: path})
if snapshotID != nil {
Verbosef("Found matching entries in snapshot %s\n", *snapshotID)
snapshotID = nil
}
Printf(formatNode(prefix, node, findOptions.ListLong) + "\n")
} else {
debug.Log("restic.find", " pattern does not match\n")
debug.Log(" pattern does not match\n")
}
if node.Type == "dir" {
subdirResults, err := c.findInTree(repo, *node.Subtree, filepath.Join(path, node.Name))
if err != nil {
return nil, err
if err := findInTree(repo, pat, *node.Subtree, filepath.Join(prefix, node.Name), snapshotID); err != nil {
return err
}
results = append(results, subdirResults...)
}
}
return results, nil
}
func (c CmdFind) findInSnapshot(repo *repository.Repository, id backend.ID) error {
debug.Log("restic.find", "searching in snapshot %s\n for entries within [%s %s]", id.Str(), c.oldest, c.newest)
sn, err := restic.LoadSnapshot(repo, id)
if err != nil {
return err
}
results, err := c.findInTree(repo, *sn.Tree, "")
if err != nil {
return err
}
if len(results) == 0 {
return nil
}
c.global.Verbosef("found %d matching entries in snapshot %s\n", len(results), id)
for _, res := range results {
res.node.Name = filepath.Join(res.path, res.node.Name)
c.global.Printf(" %s\n", res.node)
}
return nil
}
func (CmdFind) Usage() string {
return "[find-OPTIONS] PATTERN"
func findInSnapshot(repo *repository.Repository, sn *restic.Snapshot, pat findPattern) error {
debug.Log("searching in snapshot %s\n for entries within [%s %s]", sn.ID(), pat.oldest, pat.newest)
snapshotID := sn.ID().Str()
if err := findInTree(repo, pat, *sn.Tree, string(filepath.Separator), &snapshotID); err != nil {
return err
}
return nil
}
func (c CmdFind) Execute(args []string) error {
func runFind(opts FindOptions, gopts GlobalOptions, args []string) error {
if len(args) != 1 {
return fmt.Errorf("wrong number of arguments, Usage: %s", c.Usage())
return errors.Fatal("wrong number of arguments")
}
var err error
pat := findPattern{pattern: args[0]}
if opts.CaseInsensitive {
pat.pattern = strings.ToLower(pat.pattern)
pat.ignoreCase = true
}
if c.Oldest != "" {
c.oldest, err = parseTime(c.Oldest)
if opts.Oldest != "" {
if pat.oldest, err = parseTime(opts.Oldest); err != nil {
return err
}
}
if opts.Newest != "" {
if pat.newest, err = parseTime(opts.Newest); err != nil {
return err
}
}
repo, err := OpenRepository(gopts)
if err != nil {
return err
}
if !gopts.NoLock {
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
}
if c.Newest != "" {
c.newest, err = parseTime(c.Newest)
if err != nil {
return err
}
}
repo, err := c.global.OpenRepository()
if err != nil {
if err = repo.LoadIndex(); err != nil {
return err
}
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
err = repo.LoadIndex()
if err != nil {
return err
}
c.pattern = args[0]
if c.Snapshot != "" {
snapshotID, err := restic.FindSnapshot(repo, c.Snapshot)
if err != nil {
return fmt.Errorf("invalid id %q: %v", args[1], err)
}
return c.findInSnapshot(repo, snapshotID)
}
done := make(chan struct{})
defer close(done)
for snapshotID := range repo.List(backend.Snapshot, done) {
err := c.findInSnapshot(repo, snapshotID)
if err != nil {
ctx, cancel := context.WithCancel(gopts.ctx)
defer cancel()
for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, opts.Snapshots) {
if err = findInSnapshot(repo, sn, pat); err != nil {
return err
}
}

View File

@@ -0,0 +1,184 @@
package main
import (
"context"
"encoding/json"
"restic"
"sort"
"strings"
"github.com/spf13/cobra"
)
var cmdForget = &cobra.Command{
Use: "forget [flags] [snapshot ID] [...]",
Short: "forget removes snapshots from the repository",
Long: `
The "forget" command removes snapshots according to a policy. Please note that
this command really only deletes the snapshot object in the repository, which
is a reference to data stored there. In order to remove this (now unreferenced)
data after 'forget' was run successfully, see the 'prune' command. `,
RunE: func(cmd *cobra.Command, args []string) error {
return runForget(forgetOptions, globalOptions, args)
},
}
// ForgetOptions collects all options for the forget command.
type ForgetOptions struct {
Last int
Hourly int
Daily int
Weekly int
Monthly int
Yearly int
KeepTags []string
Host string
Tags []string
Paths []string
GroupByTags bool
DryRun bool
Prune bool
}
var forgetOptions ForgetOptions
func init() {
cmdRoot.AddCommand(cmdForget)
f := cmdForget.Flags()
f.IntVarP(&forgetOptions.Last, "keep-last", "l", 0, "keep the last `n` snapshots")
f.IntVarP(&forgetOptions.Hourly, "keep-hourly", "H", 0, "keep the last `n` hourly snapshots")
f.IntVarP(&forgetOptions.Daily, "keep-daily", "d", 0, "keep the last `n` daily snapshots")
f.IntVarP(&forgetOptions.Weekly, "keep-weekly", "w", 0, "keep the last `n` weekly snapshots")
f.IntVarP(&forgetOptions.Monthly, "keep-monthly", "m", 0, "keep the last `n` monthly snapshots")
f.IntVarP(&forgetOptions.Yearly, "keep-yearly", "y", 0, "keep the last `n` yearly snapshots")
f.StringSliceVar(&forgetOptions.KeepTags, "keep-tag", []string{}, "keep snapshots with this `tag` (can be specified multiple times)")
f.BoolVarP(&forgetOptions.GroupByTags, "group-by-tags", "G", false, "Group by host,paths,tags instead of just host,paths")
// Sadly the commonly used shortcut `H` is already used.
f.StringVar(&forgetOptions.Host, "host", "", "only consider snapshots with the given `host`")
// Deprecated since 2017-03-07.
f.StringVar(&forgetOptions.Host, "hostname", "", "only consider snapshots with the given `hostname` (deprecated)")
f.StringSliceVar(&forgetOptions.Tags, "tag", nil, "only consider snapshots which include this `tag` (can be specified multiple times)")
f.StringSliceVar(&forgetOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path` (can be specified multiple times)")
f.BoolVarP(&forgetOptions.DryRun, "dry-run", "n", false, "do not delete anything, just print what would be done")
f.BoolVar(&forgetOptions.Prune, "prune", false, "automatically run the 'prune' command if snapshots have been removed")
}
func runForget(opts ForgetOptions, gopts GlobalOptions, args []string) error {
repo, err := OpenRepository(gopts)
if err != nil {
return err
}
lock, err := lockRepoExclusive(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
// group by hostname and dirs
type key struct {
Hostname string
Paths []string
Tags []string
}
snapshotGroups := make(map[string]restic.Snapshots)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args) {
if len(args) > 0 {
// When explicit snapshots args are given, remove them immediately.
if !opts.DryRun {
h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
if err = repo.Backend().Remove(h); err != nil {
return err
}
Verbosef("removed snapshot %v\n", sn.ID().Str())
} else {
Verbosef("would have removed snapshot %v\n", sn.ID().Str())
}
} else {
var tags []string
if opts.GroupByTags {
tags = sn.Tags
sort.StringSlice(tags).Sort()
}
sort.StringSlice(sn.Paths).Sort()
k, err := json.Marshal(key{Hostname: sn.Hostname, Tags: tags, Paths: sn.Paths})
if err != nil {
return err
}
snapshotGroups[string(k)] = append(snapshotGroups[string(k)], sn)
}
}
if len(args) > 0 {
return nil
}
policy := restic.ExpirePolicy{
Last: opts.Last,
Hourly: opts.Hourly,
Daily: opts.Daily,
Weekly: opts.Weekly,
Monthly: opts.Monthly,
Yearly: opts.Yearly,
Tags: opts.KeepTags,
}
if policy.Empty() {
Verbosef("no policy was specified, no snapshots will be removed\n")
return nil
}
removeSnapshots := 0
for k, snapshotGroup := range snapshotGroups {
var key key
if json.Unmarshal([]byte(k), &key) != nil {
return err
}
if opts.GroupByTags {
Printf("snapshots for host %v, tags [%v], paths: [%v]:\n\n", key.Hostname, strings.Join(key.Tags, ", "), strings.Join(key.Paths, ", "))
} else {
Printf("snapshots for host %v, paths: [%v]:\n\n", key.Hostname, strings.Join(key.Paths, ", "))
}
keep, remove := restic.ApplyPolicy(snapshotGroup, policy)
if len(keep) != 0 {
Printf("keep %d snapshots:\n", len(keep))
PrintSnapshots(globalOptions.stdout, keep)
Printf("\n")
}
if len(remove) != 0 {
Printf("remove %d snapshots:\n", len(remove))
PrintSnapshots(globalOptions.stdout, remove)
Printf("\n")
}
removeSnapshots += len(remove)
if !opts.DryRun {
for _, sn := range remove {
h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
err = repo.Backend().Remove(h)
if err != nil {
return err
}
}
}
}
if removeSnapshots > 0 && opts.Prune {
Printf("%d snapshots have been removed, running prune\n", removeSnapshots)
if !opts.DryRun {
return pruneRepository(gopts, repo)
}
}
return nil
}

View File

@@ -1,53 +1,58 @@
package main
import (
"errors"
"restic/errors"
"restic/repository"
"github.com/spf13/cobra"
)
type CmdInit struct {
global *GlobalOptions
var cmdInit = &cobra.Command{
Use: "init",
Short: "initialize a new repository",
Long: `
The "init" command initializes a new repository.
`,
RunE: func(cmd *cobra.Command, args []string) error {
return runInit(globalOptions, args)
},
}
func (cmd CmdInit) Execute(args []string) error {
if cmd.global.Repo == "" {
return errors.New("Please specify repository location (-r)")
func init() {
cmdRoot.AddCommand(cmdInit)
}
func runInit(gopts GlobalOptions, args []string) error {
if gopts.Repo == "" {
return errors.Fatal("Please specify repository location (-r)")
}
be, err := create(cmd.global.Repo)
be, err := create(gopts.Repo)
if err != nil {
cmd.global.Exitf(1, "creating backend at %s failed: %v\n", cmd.global.Repo, err)
return errors.Fatalf("create backend at %s failed: %v\n", gopts.Repo, err)
}
if cmd.global.password == "" {
cmd.global.password = cmd.global.ReadPasswordTwice(
if gopts.password == "" {
gopts.password, err = ReadPasswordTwice(gopts,
"enter password for new backend: ",
"enter password again: ")
if err != nil {
return err
}
}
s := repository.New(be)
err = s.Init(cmd.global.password)
err = s.Init(gopts.password)
if err != nil {
cmd.global.Exitf(1, "creating key in backend at %s failed: %v\n", cmd.global.Repo, err)
return errors.Fatalf("create key in backend at %s failed: %v\n", gopts.Repo, err)
}
cmd.global.Verbosef("created restic backend %v at %s\n", s.Config.ID[:10], cmd.global.Repo)
cmd.global.Verbosef("\n")
cmd.global.Verbosef("Please note that knowledge of your password is required to access\n")
cmd.global.Verbosef("the repository. Losing your password means that your data is\n")
cmd.global.Verbosef("irrecoverably lost.\n")
Verbosef("created restic backend %v at %s\n", s.Config().ID[:10], gopts.Repo)
Verbosef("\n")
Verbosef("Please note that knowledge of your password is required to access\n")
Verbosef("the repository. Losing your password means that your data is\n")
Verbosef("irrecoverably lost.\n")
return nil
}
func init() {
_, err := parser.AddCommand("init",
"create repository",
"The init command creates a new repository",
&CmdInit{global: &globalOpts})
if err != nil {
panic(err)
}
}

View File

@@ -1,45 +1,39 @@
package main
import (
"errors"
"context"
"fmt"
"restic/backend"
"restic"
"restic/errors"
"restic/repository"
"github.com/spf13/cobra"
)
type CmdKey struct {
global *GlobalOptions
newPassword string
var cmdKey = &cobra.Command{
Use: "key [list|add|rm|passwd] [ID]",
Short: "manage keys (passwords)",
Long: `
The "key" command manages keys (passwords) for accessing the repository.
`,
RunE: func(cmd *cobra.Command, args []string) error {
return runKey(globalOptions, args)
},
}
func init() {
_, err := parser.AddCommand("key",
"manage keys",
"The key command manages keys (passwords) of a repository",
&CmdKey{global: &globalOpts})
if err != nil {
panic(err)
}
cmdRoot.AddCommand(cmdKey)
}
func (cmd CmdKey) listKeys(s *repository.Repository) error {
func listKeys(ctx context.Context, s *repository.Repository) error {
tab := NewTable()
tab.Header = fmt.Sprintf(" %-10s %-10s %-10s %s", "ID", "User", "Host", "Created")
tab.RowFormat = "%s%-10s %-10s %-10s %s"
plen, err := s.PrefixLength(backend.Key)
if err != nil {
return err
}
done := make(chan struct{})
defer close(done)
for id := range s.List(backend.Key, done) {
for id := range s.List(restic.KeyFile, ctx.Done()) {
k, err := repository.LoadKey(s, id.String())
if err != nil {
cmd.global.Warnf("LoadKey() failed: %v\n", err)
Warnf("LoadKey() failed: %v\n", err)
continue
}
@@ -49,74 +43,88 @@ func (cmd CmdKey) listKeys(s *repository.Repository) error {
} else {
current = " "
}
tab.Rows = append(tab.Rows, []interface{}{current, id.String()[:plen],
tab.Rows = append(tab.Rows, []interface{}{current, id.Str(),
k.Username, k.Hostname, k.Created.Format(TimeFormat)})
}
return tab.Write(cmd.global.stdout)
return tab.Write(globalOptions.stdout)
}
func (cmd CmdKey) getNewPassword() string {
if cmd.newPassword != "" {
return cmd.newPassword
// testKeyNewPassword is used to set a new password during integration testing.
var testKeyNewPassword string
func getNewPassword(gopts GlobalOptions) (string, error) {
if testKeyNewPassword != "" {
return testKeyNewPassword, nil
}
return cmd.global.ReadPasswordTwice(
return ReadPasswordTwice(gopts,
"enter password for new key: ",
"enter password again: ")
}
func (cmd CmdKey) addKey(repo *repository.Repository) error {
id, err := repository.AddKey(repo, cmd.getNewPassword(), repo.Key())
func addKey(gopts GlobalOptions, repo *repository.Repository) error {
pw, err := getNewPassword(gopts)
if err != nil {
return fmt.Errorf("creating new key failed: %v\n", err)
return err
}
cmd.global.Verbosef("saved new key as %s\n", id)
id, err := repository.AddKey(repo, pw, repo.Key())
if err != nil {
return errors.Fatalf("creating new key failed: %v\n", err)
}
Verbosef("saved new key as %s\n", id)
return nil
}
func (cmd CmdKey) deleteKey(repo *repository.Repository, name string) error {
func deleteKey(repo *repository.Repository, name string) error {
if name == repo.KeyName() {
return errors.New("refusing to remove key currently used to access repository")
return errors.Fatal("refusing to remove key currently used to access repository")
}
err := repo.Backend().Remove(backend.Key, name)
h := restic.Handle{Type: restic.KeyFile, Name: name}
err := repo.Backend().Remove(h)
if err != nil {
return err
}
cmd.global.Verbosef("removed key %v\n", name)
Verbosef("removed key %v\n", name)
return nil
}
func (cmd CmdKey) changePassword(repo *repository.Repository) error {
id, err := repository.AddKey(repo, cmd.getNewPassword(), repo.Key())
if err != nil {
return fmt.Errorf("creating new key failed: %v\n", err)
}
err = repo.Backend().Remove(backend.Key, repo.KeyName())
func changePassword(gopts GlobalOptions, repo *repository.Repository) error {
pw, err := getNewPassword(gopts)
if err != nil {
return err
}
cmd.global.Verbosef("saved new key as %s\n", id)
id, err := repository.AddKey(repo, pw, repo.Key())
if err != nil {
return errors.Fatalf("creating new key failed: %v\n", err)
}
h := restic.Handle{Type: restic.KeyFile, Name: repo.KeyName()}
err = repo.Backend().Remove(h)
if err != nil {
return err
}
Verbosef("saved new key as %s\n", id)
return nil
}
func (cmd CmdKey) Usage() string {
return "[list|add|rm|passwd] [ID]"
}
func (cmd CmdKey) Execute(args []string) error {
if len(args) < 1 || (args[0] == "rm" && len(args) != 2) {
return fmt.Errorf("wrong number of arguments, Usage: %s", cmd.Usage())
func runKey(gopts GlobalOptions, args []string) error {
if len(args) < 1 || (args[0] == "rm" && len(args) != 2) || (args[0] != "rm" && len(args) != 1) {
return errors.Fatal("wrong number of arguments")
}
repo, err := cmd.global.OpenRepository()
ctx, cancel := context.WithCancel(gopts.ctx)
defer cancel()
repo, err := OpenRepository(gopts)
if err != nil {
return err
}
@@ -129,7 +137,7 @@ func (cmd CmdKey) Execute(args []string) error {
return err
}
return cmd.listKeys(repo)
return listKeys(ctx, repo)
case "add":
lock, err := lockRepo(repo)
defer unlockRepo(lock)
@@ -137,7 +145,7 @@ func (cmd CmdKey) Execute(args []string) error {
return err
}
return cmd.addKey(repo)
return addKey(gopts, repo)
case "rm":
lock, err := lockRepoExclusive(repo)
defer unlockRepo(lock)
@@ -145,12 +153,12 @@ func (cmd CmdKey) Execute(args []string) error {
return err
}
id, err := backend.Find(repo.Backend(), backend.Key, args[1])
id, err := restic.Find(repo.Backend(), restic.KeyFile, args[1])
if err != nil {
return err
}
return cmd.deleteKey(repo, id)
return deleteKey(repo, id)
case "passwd":
lock, err := lockRepoExclusive(repo)
defer unlockRepo(lock)
@@ -158,7 +166,7 @@ func (cmd CmdKey) Execute(args []string) error {
return err
}
return cmd.changePassword(repo)
return changePassword(gopts, repo)
}
return nil

View File

@@ -1,41 +1,40 @@
package main
import (
"errors"
"fmt"
"restic"
"restic/errors"
"restic/index"
"restic/backend"
"github.com/spf13/cobra"
)
type CmdList struct {
global *GlobalOptions
var cmdList = &cobra.Command{
Use: "list [blobs|packs|index|snapshots|keys|locks]",
Short: "list objects in the repository",
Long: `
The "list" command allows listing objects in the repository based on type.
`,
RunE: func(cmd *cobra.Command, args []string) error {
return runList(globalOptions, args)
},
}
func init() {
_, err := parser.AddCommand("list",
"lists data",
"The list command lists structures or data of a repository",
&CmdList{global: &globalOpts})
if err != nil {
panic(err)
}
cmdRoot.AddCommand(cmdList)
}
func (cmd CmdList) Usage() string {
return "[blobs|packs|index|snapshots|keys|locks]"
}
func (cmd CmdList) Execute(args []string) error {
func runList(opts GlobalOptions, args []string) error {
if len(args) != 1 {
return fmt.Errorf("type not specified, Usage: %s", cmd.Usage())
return errors.Fatal("type not specified")
}
repo, err := cmd.global.OpenRepository()
repo, err := OpenRepository(opts)
if err != nil {
return err
}
if !cmd.global.NoLock {
if !opts.NoLock {
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
@@ -43,37 +42,37 @@ func (cmd CmdList) Execute(args []string) error {
}
}
var t backend.Type
var t restic.FileType
switch args[0] {
case "packs":
t = restic.DataFile
case "index":
t = restic.IndexFile
case "snapshots":
t = restic.SnapshotFile
case "keys":
t = restic.KeyFile
case "locks":
t = restic.LockFile
case "blobs":
err = repo.LoadIndex()
idx, err := index.Load(repo, nil)
if err != nil {
return err
}
for _, idx := range repo.Index().All() {
for blob := range idx.Each(nil) {
cmd.global.Printf("%s\n", blob.ID)
for _, pack := range idx.Packs {
for _, entry := range pack.Entries {
fmt.Printf("%v %v\n", entry.Type, entry.ID)
}
}
return nil
case "packs":
t = backend.Data
case "index":
t = backend.Index
case "snapshots":
t = backend.Snapshot
case "keys":
t = backend.Key
case "locks":
t = backend.Lock
default:
return errors.New("invalid type")
return errors.Fatal("invalid type")
}
for id := range repo.List(t, nil) {
cmd.global.Printf("%s\n", id)
Printf("%s\n", id)
}
return nil

View File

@@ -1,63 +1,61 @@
package main
import (
"fmt"
"os"
"context"
"path/filepath"
"github.com/spf13/cobra"
"restic"
"restic/backend"
"restic/errors"
"restic/repository"
)
type CmdLs struct {
Long bool `short:"l" long:"long" description:"Use a long listing format showing size and mode"`
var cmdLs = &cobra.Command{
Use: "ls [flags] [snapshot-ID ...]",
Short: "list files in a snapshot",
Long: `
The "ls" command allows listing files and directories in a snapshot.
global *GlobalOptions
The special snapshot-ID "latest" can be used to list files and directories of the latest snapshot in the repository.
`,
RunE: func(cmd *cobra.Command, args []string) error {
return runLs(lsOptions, globalOptions, args)
},
}
// LsOptions collects all options for the ls command.
type LsOptions struct {
ListLong bool
Host string
Tags []string
Paths []string
}
var lsOptions LsOptions
func init() {
_, err := parser.AddCommand("ls",
"list files",
"The ls command lists all files and directories in a snapshot",
&CmdLs{global: &globalOpts})
if err != nil {
panic(err)
}
cmdRoot.AddCommand(cmdLs)
flags := cmdLs.Flags()
flags.BoolVarP(&lsOptions.ListLong, "long", "l", false, "use a long listing format showing size and mode")
flags.StringVarP(&lsOptions.Host, "host", "H", "", "only consider snapshots for this `host`, when no snapshot ID is given")
flags.StringSliceVar(&lsOptions.Tags, "tag", nil, "only consider snapshots which include this `tag`, when no snapshot ID is given")
flags.StringSliceVar(&lsOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path`, when no snapshot ID is given")
}
func (cmd CmdLs) printNode(prefix string, n *restic.Node) string {
if !cmd.Long {
return filepath.Join(prefix, n.Name)
}
switch n.Type {
case "file":
return fmt.Sprintf("%s %5d %5d %6d %s %s",
n.Mode, n.UID, n.GID, n.Size, n.ModTime, filepath.Join(prefix, n.Name))
case "dir":
return fmt.Sprintf("%s %5d %5d %6d %s %s",
n.Mode|os.ModeDir, n.UID, n.GID, n.Size, n.ModTime, filepath.Join(prefix, n.Name))
case "symlink":
return fmt.Sprintf("%s %5d %5d %6d %s %s -> %s",
n.Mode|os.ModeSymlink, n.UID, n.GID, n.Size, n.ModTime, filepath.Join(prefix, n.Name), n.LinkTarget)
default:
return fmt.Sprintf("<Node(%s) %s>", n.Type, n.Name)
}
}
func (cmd CmdLs) printTree(prefix string, repo *repository.Repository, id backend.ID) error {
tree, err := restic.LoadTree(repo, id)
func printTree(repo *repository.Repository, id *restic.ID, prefix string) error {
tree, err := repo.LoadTree(*id)
if err != nil {
return err
}
for _, entry := range tree.Nodes {
cmd.global.Printf(cmd.printNode(prefix, entry) + "\n")
Printf(formatNode(prefix, entry, lsOptions.ListLong) + "\n")
if entry.Type == "dir" && entry.Subtree != nil {
err = cmd.printTree(filepath.Join(prefix, entry.Name), repo, *entry.Subtree)
if err != nil {
if err = printTree(repo, entry.Subtree, filepath.Join(prefix, entry.Name)); err != nil {
return err
}
}
@@ -66,36 +64,28 @@ func (cmd CmdLs) printTree(prefix string, repo *repository.Repository, id backen
return nil
}
func (cmd CmdLs) Usage() string {
return "snapshot-ID [DIR]"
}
func (cmd CmdLs) Execute(args []string) error {
if len(args) < 1 || len(args) > 2 {
return fmt.Errorf("wrong number of arguments, Usage: %s", cmd.Usage())
}
repo, err := cmd.global.OpenRepository()
if err != nil {
return err
}
err = repo.LoadIndex()
if err != nil {
return err
}
id, err := restic.FindSnapshot(repo, args[0])
if err != nil {
return err
}
sn, err := restic.LoadSnapshot(repo, id)
if err != nil {
return err
}
cmd.global.Verbosef("snapshot of %v at %s:\n", sn.Paths, sn.Time)
return cmd.printTree("", repo, *sn.Tree)
func runLs(opts LsOptions, gopts GlobalOptions, args []string) error {
if len(args) == 0 && opts.Host == "" && len(opts.Tags) == 0 && len(opts.Paths) == 0 {
return errors.Fatal("Invalid arguments, either give one or more snapshot IDs or set filters.")
}
repo, err := OpenRepository(gopts)
if err != nil {
return err
}
if err = repo.LoadIndex(); err != nil {
return err
}
ctx, cancel := context.WithCancel(gopts.ctx)
defer cancel()
for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args) {
Verbosef("snapshot %s of %v at %s):\n", sn.ID().Str(), sn.Paths, sn.Time)
if err = printTree(repo, sn.Tree, string(filepath.Separator)); err != nil {
return err
}
}
return nil
}

View File

@@ -4,47 +4,62 @@
package main
import (
"fmt"
"os"
"github.com/spf13/cobra"
"restic/debug"
"restic/errors"
resticfs "restic/fs"
"restic/fuse"
systemFuse "bazil.org/fuse"
"bazil.org/fuse/fs"
)
type CmdMount struct {
Root bool `long:"owner-root" description:"use 'root' as the owner of files and dirs" default:"false"`
global *GlobalOptions
ready chan struct{}
done chan struct{}
var cmdMount = &cobra.Command{
Use: "mount [flags] mountpoint",
Short: "mount the repository",
Long: `
The "mount" command mounts the repository via fuse to a directory. This is a
read-only mount.
`,
RunE: func(cmd *cobra.Command, args []string) error {
return runMount(mountOptions, globalOptions, args)
},
}
// MountOptions collects all options for the mount command.
type MountOptions struct {
OwnerRoot bool
AllowRoot bool
AllowOther bool
Host string
Tags []string
Paths []string
}
var mountOptions MountOptions
func init() {
_, err := parser.AddCommand("mount",
"mount a repository",
"The mount command mounts a repository read-only to a given directory",
&CmdMount{
global: &globalOpts,
ready: make(chan struct{}, 1),
done: make(chan struct{}),
})
if err != nil {
panic(err)
}
cmdRoot.AddCommand(cmdMount)
mountFlags := cmdMount.Flags()
mountFlags.BoolVar(&mountOptions.OwnerRoot, "owner-root", false, "use 'root' as the owner of files and dirs")
mountFlags.BoolVar(&mountOptions.AllowRoot, "allow-root", false, "allow root user to access the data in the mounted directory")
mountFlags.BoolVar(&mountOptions.AllowOther, "allow-other", false, "allow other users to access the data in the mounted directory")
mountFlags.StringVarP(&mountOptions.Host, "host", "H", "", `only consider snapshots for this host`)
mountFlags.StringSliceVar(&mountOptions.Tags, "tag", nil, "only consider snapshots which include this `tag`")
mountFlags.StringSliceVar(&mountOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path`")
}
func (cmd CmdMount) Usage() string {
return "MOUNTPOINT"
}
func mount(opts MountOptions, gopts GlobalOptions, mountpoint string) error {
debug.Log("start mount")
defer debug.Log("finish mount")
func (cmd CmdMount) Execute(args []string) error {
if len(args) == 0 {
return fmt.Errorf("wrong number of parameters, Usage: %s", cmd.Usage())
}
repo, err := cmd.global.OpenRepository()
repo, err := OpenRepository(gopts)
if err != nil {
return err
}
@@ -54,54 +69,67 @@ func (cmd CmdMount) Execute(args []string) error {
return err
}
mountpoint := args[0]
if _, err := os.Stat(mountpoint); os.IsNotExist(err) {
cmd.global.Verbosef("Mountpoint %s doesn't exist, creating it\n", mountpoint)
err = os.Mkdir(mountpoint, os.ModeDir|0700)
if _, err := resticfs.Stat(mountpoint); os.IsNotExist(errors.Cause(err)) {
Verbosef("Mountpoint %s doesn't exist, creating it\n", mountpoint)
err = resticfs.Mkdir(mountpoint, os.ModeDir|0700)
if err != nil {
return err
}
}
c, err := systemFuse.Mount(
mountpoint,
mountOptions := []systemFuse.MountOption{
systemFuse.ReadOnly(),
systemFuse.FSName("restic"),
)
}
if opts.AllowRoot {
mountOptions = append(mountOptions, systemFuse.AllowRoot())
}
if opts.AllowOther {
mountOptions = append(mountOptions, systemFuse.AllowOther())
}
c, err := systemFuse.Mount(mountpoint, mountOptions...)
if err != nil {
return err
}
root := fs.Tree{}
root.Add("snapshots", fuse.NewSnapshotsDir(repo, cmd.Root))
Printf("Now serving the repository at %s\n", mountpoint)
Printf("Don't forget to umount after quitting!\n")
cmd.global.Printf("Now serving %s at %s\n", repo.Backend().Location(), mountpoint)
cmd.global.Printf("Don't forget to umount after quitting!\n")
root := fs.Tree{}
root.Add("snapshots", fuse.NewSnapshotsDir(repo, opts.OwnerRoot, opts.Paths, opts.Tags, opts.Host))
debug.Log("serving mount at %v", mountpoint)
err = fs.Serve(c, &root)
if err != nil {
return err
}
<-c.Ready
return c.MountError
}
func umount(mountpoint string) error {
return systemFuse.Unmount(mountpoint)
}
func runMount(opts MountOptions, gopts GlobalOptions, args []string) error {
if len(args) == 0 {
return errors.Fatal("wrong number of parameters")
}
mountpoint := args[0]
AddCleanupHandler(func() error {
return systemFuse.Unmount(mountpoint)
debug.Log("running umount cleanup handler for mount at %v", mountpoint)
err := umount(mountpoint)
if err != nil {
Warnf("unable to umount (maybe already umounted?): %v\n", err)
}
return nil
})
cmd.ready <- struct{}{}
errServe := make(chan error)
go func() {
err = fs.Serve(c, &root)
if err != nil {
errServe <- err
}
<-c.Ready
errServe <- c.MountError
}()
select {
case err := <-errServe:
return err
case <-cmd.done:
err := systemFuse.Unmount(mountpoint)
if err != nil {
cmd.global.Printf("Error umounting: %s\n", err)
}
return c.Close()
}
return mount(opts, gopts, mountpoint)
}

View File

@@ -0,0 +1,247 @@
package main
import (
"context"
"fmt"
"restic"
"restic/debug"
"restic/errors"
"restic/index"
"restic/repository"
"time"
"github.com/spf13/cobra"
)
var cmdPrune = &cobra.Command{
Use: "prune [flags]",
Short: "remove unneeded data from the repository",
Long: `
The "prune" command checks the repository and removes data that is not
referenced and therefore not needed any more.
`,
RunE: func(cmd *cobra.Command, args []string) error {
return runPrune(globalOptions)
},
}
func init() {
cmdRoot.AddCommand(cmdPrune)
}
// newProgressMax returns a progress that counts blobs.
func newProgressMax(show bool, max uint64, description string) *restic.Progress {
if !show {
return nil
}
p := restic.NewProgress()
p.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) {
status := fmt.Sprintf("[%s] %s %d / %d %s",
formatDuration(d),
formatPercent(s.Blobs, max),
s.Blobs, max, description)
if w := stdoutTerminalWidth(); w > 0 {
if len(status) > w {
max := w - len(status) - 4
status = status[:max] + "... "
}
}
PrintProgress("%s", status)
}
p.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
fmt.Printf("\n")
}
return p
}
func runPrune(gopts GlobalOptions) error {
repo, err := OpenRepository(gopts)
if err != nil {
return err
}
lock, err := lockRepoExclusive(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
return pruneRepository(gopts, repo)
}
func pruneRepository(gopts GlobalOptions, repo restic.Repository) error {
err := repo.LoadIndex()
if err != nil {
return err
}
ctx, cancel := context.WithCancel(gopts.ctx)
defer cancel()
var stats struct {
blobs int
packs int
snapshots int
bytes int64
}
Verbosef("counting files in repo\n")
for _ = range repo.List(restic.DataFile, ctx.Done()) {
stats.packs++
}
Verbosef("building new index for repo\n")
bar := newProgressMax(!gopts.Quiet, uint64(stats.packs), "packs")
idx, err := index.New(repo, bar)
if err != nil {
return err
}
blobs := 0
for _, pack := range idx.Packs {
stats.bytes += pack.Size
blobs += len(pack.Entries)
}
Verbosef("repository contains %v packs (%v blobs) with %v bytes\n",
len(idx.Packs), blobs, formatBytes(uint64(stats.bytes)))
blobCount := make(map[restic.BlobHandle]int)
duplicateBlobs := 0
duplicateBytes := 0
// find duplicate blobs
for _, p := range idx.Packs {
for _, entry := range p.Entries {
stats.blobs++
h := restic.BlobHandle{ID: entry.ID, Type: entry.Type}
blobCount[h]++
if blobCount[h] > 1 {
duplicateBlobs++
duplicateBytes += int(entry.Length)
}
}
}
Verbosef("processed %d blobs: %d duplicate blobs, %v duplicate\n",
stats.blobs, duplicateBlobs, formatBytes(uint64(duplicateBytes)))
Verbosef("load all snapshots\n")
// find referenced blobs
snapshots, err := restic.LoadAllSnapshots(repo)
if err != nil {
return err
}
stats.snapshots = len(snapshots)
Verbosef("find data that is still in use for %d snapshots\n", stats.snapshots)
usedBlobs := restic.NewBlobSet()
seenBlobs := restic.NewBlobSet()
bar = newProgressMax(!gopts.Quiet, uint64(len(snapshots)), "snapshots")
bar.Start()
for _, sn := range snapshots {
debug.Log("process snapshot %v", sn.ID().Str())
err = restic.FindUsedBlobs(repo, *sn.Tree, usedBlobs, seenBlobs)
if err != nil {
return err
}
debug.Log("found %v blobs for snapshot %v", sn.ID().Str())
bar.Report(restic.Stat{Blobs: 1})
}
bar.Done()
Verbosef("found %d of %d data blobs still in use, removing %d blobs\n",
len(usedBlobs), stats.blobs, stats.blobs-len(usedBlobs))
// find packs that need a rewrite
rewritePacks := restic.NewIDSet()
for _, pack := range idx.Packs {
for _, blob := range pack.Entries {
h := restic.BlobHandle{ID: blob.ID, Type: blob.Type}
if !usedBlobs.Has(h) {
rewritePacks.Insert(pack.ID)
continue
}
if blobCount[h] > 1 {
rewritePacks.Insert(pack.ID)
}
}
}
removeBytes := duplicateBytes
// find packs that are unneeded
removePacks := restic.NewIDSet()
for packID, p := range idx.Packs {
hasActiveBlob := false
for _, blob := range p.Entries {
h := restic.BlobHandle{ID: blob.ID, Type: blob.Type}
if usedBlobs.Has(h) {
hasActiveBlob = true
continue
}
removeBytes += int(blob.Length)
}
if hasActiveBlob {
continue
}
removePacks.Insert(packID)
if !rewritePacks.Has(packID) {
return errors.Fatalf("pack %v is unneeded, but not contained in rewritePacks", packID.Str())
}
rewritePacks.Delete(packID)
}
Verbosef("will delete %d packs and rewrite %d packs, this frees %s\n",
len(removePacks), len(rewritePacks), formatBytes(uint64(removeBytes)))
if len(rewritePacks) != 0 {
bar = newProgressMax(!gopts.Quiet, uint64(len(rewritePacks)), "packs rewritten")
bar.Start()
err = repository.Repack(repo, rewritePacks, usedBlobs, bar)
if err != nil {
return err
}
bar.Done()
}
if len(removePacks) != 0 {
bar = newProgressMax(!gopts.Quiet, uint64(len(removePacks)), "packs deleted")
bar.Start()
for packID := range removePacks {
h := restic.Handle{Type: restic.DataFile, Name: packID.String()}
err = repo.Backend().Remove(h)
if err != nil {
Warnf("unable to remove file %v from the repository\n", packID.Str())
}
bar.Report(restic.Stat{Blobs: 1})
}
bar.Done()
}
if err = rebuildIndex(ctx, repo); err != nil {
return err
}
Verbosef("done\n")
return nil
}

View File

@@ -1,125 +1,34 @@
package main
import (
"fmt"
"os"
"restic/backend"
"restic/debug"
"restic/pack"
"restic/repository"
"restic/worker"
"context"
"restic"
"restic/index"
"github.com/spf13/cobra"
)
type CmdRebuildIndex struct {
global *GlobalOptions
repo *repository.Repository
var cmdRebuildIndex = &cobra.Command{
Use: "rebuild-index [flags]",
Short: "build a new index file",
Long: `
The "rebuild-index" command creates a new index based on the pack files in the
repository.
`,
RunE: func(cmd *cobra.Command, args []string) error {
return runRebuildIndex(globalOptions)
},
}
func init() {
_, err := parser.AddCommand("rebuild-index",
"rebuild the index",
"The rebuild-index command builds a new index",
&CmdRebuildIndex{global: &globalOpts})
if err != nil {
panic(err)
}
cmdRoot.AddCommand(cmdRebuildIndex)
}
const rebuildIndexWorkers = 10
func loadBlobsFromPacks(repo *repository.Repository) (packs map[backend.ID][]pack.Blob) {
done := make(chan struct{})
defer close(done)
f := func(job worker.Job, done <-chan struct{}) (interface{}, error) {
return repo.ListPack(job.Data.(backend.ID))
}
jobCh := make(chan worker.Job)
resCh := make(chan worker.Job)
wp := worker.New(rebuildIndexWorkers, f, jobCh, resCh)
go func() {
for id := range repo.List(backend.Data, done) {
jobCh <- worker.Job{Data: id}
}
close(jobCh)
}()
packs = make(map[backend.ID][]pack.Blob)
for job := range resCh {
id := job.Data.(backend.ID)
if job.Error != nil {
fmt.Fprintf(os.Stderr, "error for pack %v: %v\n", id, job.Error)
continue
}
entries := job.Result.([]pack.Blob)
packs[id] = entries
}
wp.Wait()
return packs
}
func listIndexIDs(repo *repository.Repository) (list backend.IDs) {
done := make(chan struct{})
for id := range repo.List(backend.Index, done) {
list = append(list, id)
}
return list
}
func (cmd CmdRebuildIndex) rebuildIndex() error {
debug.Log("RebuildIndex.RebuildIndex", "start rebuilding index")
packs := loadBlobsFromPacks(cmd.repo)
cmd.global.Verbosef("loaded blobs from %d packs\n", len(packs))
idx := repository.NewIndex()
for packID, entries := range packs {
for _, entry := range entries {
pb := repository.PackedBlob{
ID: entry.ID,
Type: entry.Type,
Length: entry.Length,
Offset: entry.Offset,
PackID: packID,
}
idx.Store(pb)
}
}
oldIndexes := listIndexIDs(cmd.repo)
idx.AddToSupersedes(oldIndexes...)
cmd.global.Printf(" saving new index\n")
id, err := repository.SaveIndex(cmd.repo, idx)
if err != nil {
debug.Log("RebuildIndex.RebuildIndex", "error saving index: %v", err)
return err
}
debug.Log("RebuildIndex.RebuildIndex", "new index saved as %v", id.Str())
for _, indexID := range oldIndexes {
err := cmd.repo.Backend().Remove(backend.Index, indexID.String())
if err != nil {
cmd.global.Warnf("unable to remove index %v: %v\n", indexID.Str(), err)
}
}
return nil
}
func (cmd CmdRebuildIndex) Execute(args []string) error {
repo, err := cmd.global.OpenRepository()
func runRebuildIndex(gopts GlobalOptions) error {
repo, err := OpenRepository(gopts)
if err != nil {
return err
}
cmd.repo = repo
lock, err := lockRepoExclusive(repo)
defer unlockRepo(lock)
@@ -127,5 +36,49 @@ func (cmd CmdRebuildIndex) Execute(args []string) error {
return err
}
return cmd.rebuildIndex()
ctx, cancel := context.WithCancel(gopts.ctx)
defer cancel()
return rebuildIndex(ctx, repo)
}
func rebuildIndex(ctx context.Context, repo restic.Repository) error {
Verbosef("counting files in repo\n")
var packs uint64
for _ = range repo.List(restic.DataFile, ctx.Done()) {
packs++
}
bar := newProgressMax(!globalOptions.Quiet, packs, "packs")
idx, err := index.New(repo, bar)
if err != nil {
return err
}
Verbosef("finding old index files\n")
var supersedes restic.IDs
for id := range repo.List(restic.IndexFile, ctx.Done()) {
supersedes = append(supersedes, id)
}
id, err := idx.Save(repo, supersedes)
if err != nil {
return err
}
Verbosef("saved new index as %v\n", id.Str())
Verbosef("remove %d old index files\n", len(supersedes))
for _, id := range supersedes {
if err := repo.Backend().Remove(restic.Handle{
Type: restic.IndexFile,
Name: id.String(),
}); err != nil {
Warnf("error removing old index %v: %v\n", id.Str(), err)
}
}
return nil
}

View File

@@ -1,62 +1,77 @@
package main
import (
"errors"
"fmt"
"restic"
"restic/backend"
"restic/debug"
"restic/errors"
"restic/filter"
"github.com/spf13/cobra"
)
type CmdRestore struct {
Exclude []string `short:"e" long:"exclude" description:"Exclude a pattern (can be specified multiple times)"`
Include []string `short:"i" long:"include" description:"Include a pattern, exclude everything else (can be specified multiple times)"`
Target string `short:"t" long:"target" description:"Directory to restore to"`
Host string `short:"h" long:"host" description:"Source Filter (for id=latest)"`
Paths []string `short:"p" long:"path" description:"Path Filter (absolute path;for id=latest) (can be specified multiple times)"`
var cmdRestore = &cobra.Command{
Use: "restore [flags] snapshotID",
Short: "extract the data from a snapshot",
Long: `
The "restore" command extracts the data from a snapshot from the repository to
a directory.
global *GlobalOptions
The special snapshot "latest" can be used to restore the latest snapshot in the
repository.
`,
RunE: func(cmd *cobra.Command, args []string) error {
return runRestore(restoreOptions, globalOptions, args)
},
}
// RestoreOptions collects all options for the restore command.
type RestoreOptions struct {
Exclude []string
Include []string
Target string
Host string
Paths []string
Tags []string
}
var restoreOptions RestoreOptions
func init() {
_, err := parser.AddCommand("restore",
"restore a snapshot",
"The restore command restores a snapshot to a directory",
&CmdRestore{global: &globalOpts})
if err != nil {
panic(err)
}
cmdRoot.AddCommand(cmdRestore)
flags := cmdRestore.Flags()
flags.StringSliceVarP(&restoreOptions.Exclude, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)")
flags.StringSliceVarP(&restoreOptions.Include, "include", "i", nil, "include a `pattern`, exclude everything else (can be specified multiple times)")
flags.StringVarP(&restoreOptions.Target, "target", "t", "", "directory to extract data to")
flags.StringVarP(&restoreOptions.Host, "host", "H", "", `only consider snapshots for this host when the snapshot ID is "latest"`)
flags.StringSliceVar(&restoreOptions.Tags, "tag", nil, "only consider snapshots which include this `tag` for snapshot ID \"latest\"")
flags.StringSliceVar(&restoreOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path` for snapshot ID \"latest\"")
}
func (cmd CmdRestore) Usage() string {
return "snapshot-ID"
}
func (cmd CmdRestore) Execute(args []string) error {
func runRestore(opts RestoreOptions, gopts GlobalOptions, args []string) error {
if len(args) != 1 {
return fmt.Errorf("wrong number of arguments, Usage: %s", cmd.Usage())
return errors.Fatal("no snapshot ID specified")
}
if cmd.Target == "" {
return errors.New("please specify a directory to restore to (--target)")
if opts.Target == "" {
return errors.Fatal("please specify a directory to restore to (--target)")
}
if len(cmd.Exclude) > 0 && len(cmd.Include) > 0 {
return errors.New("exclude and include patterns are mutually exclusive")
if len(opts.Exclude) > 0 && len(opts.Include) > 0 {
return errors.Fatal("exclude and include patterns are mutually exclusive")
}
snapshotIDString := args[0]
debug.Log("restore", "restore %v to %v", snapshotIDString, cmd.Target)
debug.Log("restore %v to %v", snapshotIDString, opts.Target)
repo, err := cmd.global.OpenRepository()
repo, err := OpenRepository(gopts)
if err != nil {
return err
}
if !cmd.global.NoLock {
if !gopts.NoLock {
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
@@ -69,60 +84,55 @@ func (cmd CmdRestore) Execute(args []string) error {
return err
}
var id backend.ID
var id restic.ID
if snapshotIDString == "latest" {
id, err = restic.FindLatestSnapshot(repo, cmd.Paths, cmd.Host)
id, err = restic.FindLatestSnapshot(repo, opts.Paths, opts.Tags, opts.Host)
if err != nil {
cmd.global.Exitf(1, "latest snapshot for criteria not found: %v Paths:%v Host:%v", err, cmd.Paths, cmd.Host)
Exitf(1, "latest snapshot for criteria not found: %v Paths:%v Host:%v", err, opts.Paths, opts.Host)
}
} else {
id, err = restic.FindSnapshot(repo, snapshotIDString)
if err != nil {
cmd.global.Exitf(1, "invalid id %q: %v", snapshotIDString, err)
Exitf(1, "invalid id %q: %v", snapshotIDString, err)
}
}
res, err := restic.NewRestorer(repo, id)
if err != nil {
cmd.global.Exitf(2, "creating restorer failed: %v\n", err)
Exitf(2, "creating restorer failed: %v\n", err)
}
res.Error = func(dir string, node *restic.Node, err error) error {
cmd.global.Warnf("error for %s: %+v\n", dir, err)
Warnf("error for %s: %+v\n", dir, err)
return nil
}
selectExcludeFilter := func(item string, dstpath string, node *restic.Node) bool {
matched, err := filter.List(cmd.Exclude, item)
matched, err := filter.List(opts.Exclude, item)
if err != nil {
cmd.global.Warnf("error for exclude pattern: %v", err)
Warnf("error for exclude pattern: %v", err)
}
return !matched
}
selectIncludeFilter := func(item string, dstpath string, node *restic.Node) bool {
matched, err := filter.List(cmd.Include, item)
matched, err := filter.List(opts.Include, item)
if err != nil {
cmd.global.Warnf("error for include pattern: %v", err)
Warnf("error for include pattern: %v", err)
}
return matched
}
if len(cmd.Exclude) > 0 {
if len(opts.Exclude) > 0 {
res.SelectFilter = selectExcludeFilter
} else if len(cmd.Include) > 0 {
} else if len(opts.Include) > 0 {
res.SelectFilter = selectIncludeFilter
}
cmd.global.Verbosef("restoring %s to %s\n", res.Snapshot(), cmd.Target)
Verbosef("restoring %s to %s\n", res.Snapshot(), opts.Target)
err = res.RestoreTo(cmd.Target)
if err != nil {
return err
}
return nil
return res.RestoreTo(opts.Target)
}

View File

@@ -1,140 +1,170 @@
package main
import (
"encoding/hex"
"context"
"encoding/json"
"fmt"
"io"
"os"
"sort"
"strings"
"github.com/spf13/cobra"
"restic"
"restic/backend"
)
type Table struct {
Header string
Rows [][]interface{}
RowFormat string
var cmdSnapshots = &cobra.Command{
Use: "snapshots [snapshotID ...]",
Short: "list all snapshots",
Long: `
The "snapshots" command lists all snapshots stored in the repository.
`,
RunE: func(cmd *cobra.Command, args []string) error {
return runSnapshots(snapshotOptions, globalOptions, args)
},
}
func NewTable() Table {
return Table{
Rows: [][]interface{}{},
}
// SnapshotOptions bundles all options for the snapshots command.
type SnapshotOptions struct {
Host string
Tags []string
Paths []string
}
func (t Table) Write(w io.Writer) error {
_, err := fmt.Fprintln(w, t.Header)
if err != nil {
return err
}
_, err = fmt.Fprintln(w, strings.Repeat("-", 70))
var snapshotOptions SnapshotOptions
func init() {
cmdRoot.AddCommand(cmdSnapshots)
f := cmdSnapshots.Flags()
f.StringVarP(&snapshotOptions.Host, "host", "H", "", "only consider snapshots for this `host`")
f.StringSliceVar(&snapshotOptions.Tags, "tag", nil, "only consider snapshots which include this `tag` (can be specified multiple times)")
f.StringSliceVar(&snapshotOptions.Paths, "path", nil, "only consider snapshots for this `path` (can be specified multiple times)")
}
func runSnapshots(opts SnapshotOptions, gopts GlobalOptions, args []string) error {
repo, err := OpenRepository(gopts)
if err != nil {
return err
}
for _, row := range t.Rows {
_, err = fmt.Fprintf(w, t.RowFormat+"\n", row...)
if !gopts.NoLock {
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
}
ctx, cancel := context.WithCancel(gopts.ctx)
defer cancel()
var list restic.Snapshots
for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args) {
list = append(list, sn)
}
sort.Sort(sort.Reverse(list))
if gopts.JSON {
err := printSnapshotsJSON(gopts.stdout, list)
if err != nil {
Warnf("error printing snapshot: %v\n", err)
}
return nil
}
PrintSnapshots(gopts.stdout, list)
return nil
}
const TimeFormat = "2006-01-02 15:04:05"
// PrintSnapshots prints a text table of the snapshots in list to stdout.
func PrintSnapshots(stdout io.Writer, list restic.Snapshots) {
type CmdSnapshots struct {
Host string `short:"h" long:"host" description:"Host Filter"`
Paths []string `short:"p" long:"path" description:"Path Filter (absolute path) (can be specified multiple times)"`
global *GlobalOptions
}
func init() {
_, err := parser.AddCommand("snapshots",
"show snapshots",
"The snapshots command lists all snapshots stored in a repository",
&CmdSnapshots{global: &globalOpts})
if err != nil {
panic(err)
}
}
func (cmd CmdSnapshots) Usage() string {
return ""
}
func (cmd CmdSnapshots) Execute(args []string) error {
if len(args) != 0 {
return fmt.Errorf("wrong number of arguments, usage: %s", cmd.Usage())
}
repo, err := cmd.global.OpenRepository()
if err != nil {
return err
}
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
return err
// Determine the max widths for host and tag.
maxHost, maxTag := 10, 6
for _, sn := range list {
if len(sn.Hostname) > maxHost {
maxHost = len(sn.Hostname)
}
for _, tag := range sn.Tags {
if len(tag) > maxTag {
maxTag = len(tag)
}
}
}
tab := NewTable()
tab.Header = fmt.Sprintf("%-8s %-19s %-10s %s", "ID", "Date", "Host", "Directory")
tab.RowFormat = "%-8s %-19s %-10s %s"
done := make(chan struct{})
defer close(done)
list := []*restic.Snapshot{}
for id := range repo.List(backend.Snapshot, done) {
sn, err := restic.LoadSnapshot(repo, id)
if err != nil {
fmt.Fprintf(os.Stderr, "error loading snapshot %s: %v\n", id, err)
continue
}
if restic.SamePaths(sn.Paths, cmd.Paths) && (cmd.Host == "" || cmd.Host == sn.Hostname) {
pos := sort.Search(len(list), func(i int) bool {
return list[i].Time.After(sn.Time)
})
if pos < len(list) {
list = append(list, nil)
copy(list[pos+1:], list[pos:])
list[pos] = sn
} else {
list = append(list, sn)
}
}
}
plen, err := repo.PrefixLength(backend.Snapshot)
if err != nil {
return err
}
tab.Header = fmt.Sprintf("%-8s %-19s %-*s %-*s %-3s %s", "ID", "Date", -maxHost, "Host", -maxTag, "Tags", "", "Directory")
tab.RowFormat = fmt.Sprintf("%%-8s %%-19s %%%ds %%%ds %%-3s %%s", -maxHost, -maxTag)
for _, sn := range list {
if len(sn.Paths) == 0 {
continue
}
id := sn.ID()
tab.Rows = append(tab.Rows, []interface{}{hex.EncodeToString(id[:plen/2]), sn.Time.Format(TimeFormat), sn.Hostname, sn.Paths[0]})
if len(sn.Paths) > 1 {
for _, path := range sn.Paths[1:] {
tab.Rows = append(tab.Rows, []interface{}{"", "", "", path})
firstTag := ""
if len(sn.Tags) > 0 {
firstTag = sn.Tags[0]
}
rows := len(sn.Paths)
if rows < len(sn.Tags) {
rows = len(sn.Tags)
}
treeElement := " "
if rows != 1 {
treeElement = "┌──"
}
tab.Rows = append(tab.Rows, []interface{}{sn.ID().Str(), sn.Time.Format(TimeFormat), sn.Hostname, firstTag, treeElement, sn.Paths[0]})
if len(sn.Tags) > rows {
rows = len(sn.Tags)
}
for i := 1; i < rows; i++ {
path := ""
if len(sn.Paths) > i {
path = sn.Paths[i]
}
tag := ""
if len(sn.Tags) > i {
tag = sn.Tags[i]
}
treeElement := "│"
if i == (rows - 1) {
treeElement = "└──"
}
tab.Rows = append(tab.Rows, []interface{}{"", "", "", tag, treeElement, path})
}
}
tab.Write(os.Stdout)
return nil
tab.Write(stdout)
}
// Snapshot helps to print Snaphots as JSON with their ID included.
type Snapshot struct {
*restic.Snapshot
ID *restic.ID `json:"id"`
}
// printSnapshotsJSON writes the JSON representation of list to stdout.
func printSnapshotsJSON(stdout io.Writer, list restic.Snapshots) error {
var snapshots []Snapshot
for _, sn := range list {
k := Snapshot{
Snapshot: sn,
ID: sn.ID(),
}
snapshots = append(snapshots, k)
}
return json.NewEncoder(stdout).Encode(snapshots)
}

142
src/cmds/restic/cmd_tag.go Normal file
View File

@@ -0,0 +1,142 @@
package main
import (
"context"
"github.com/spf13/cobra"
"restic"
"restic/debug"
"restic/errors"
"restic/repository"
)
var cmdTag = &cobra.Command{
Use: "tag [flags] [snapshot-ID ...]",
Short: "modifies tags on snapshots",
Long: `
The "tag" command allows you to modify tags on exiting snapshots.
You can either set/replace the entire set of tags on a snapshot, or
add tags to/remove tags from the existing set.
When no snapshot-ID is given, all snapshots matching the host, tag and path filter criteria are modified.
`,
RunE: func(cmd *cobra.Command, args []string) error {
return runTag(tagOptions, globalOptions, args)
},
}
// TagOptions bundles all options for the 'tag' command.
type TagOptions struct {
Host string
Paths []string
Tags []string
SetTags []string
AddTags []string
RemoveTags []string
}
var tagOptions TagOptions
func init() {
cmdRoot.AddCommand(cmdTag)
tagFlags := cmdTag.Flags()
tagFlags.StringSliceVar(&tagOptions.SetTags, "set", nil, "`tag` which will replace the existing tags (can be given multiple times)")
tagFlags.StringSliceVar(&tagOptions.AddTags, "add", nil, "`tag` which will be added to the existing tags (can be given multiple times)")
tagFlags.StringSliceVar(&tagOptions.RemoveTags, "remove", nil, "`tag` which will be removed from the existing tags (can be given multiple times)")
tagFlags.StringVarP(&tagOptions.Host, "host", "H", "", "only consider snapshots for this `host`, when no snapshot ID is given")
tagFlags.StringSliceVar(&tagOptions.Tags, "tag", nil, "only consider snapshots which include this `tag`, when no snapshot-ID is given")
tagFlags.StringSliceVar(&tagOptions.Paths, "path", nil, "only consider snapshots which include this (absolute) `path`, when no snapshot-ID is given")
}
func changeTags(repo *repository.Repository, sn *restic.Snapshot, setTags, addTags, removeTags []string) (bool, error) {
var changed bool
if len(setTags) != 0 {
// Setting the tag to an empty string really means no tags.
if len(setTags) == 1 && setTags[0] == "" {
setTags = nil
}
sn.Tags = setTags
changed = true
} else {
changed = sn.AddTags(addTags)
if sn.RemoveTags(removeTags) {
changed = true
}
}
if changed {
// Retain the original snapshot id over all tag changes.
if sn.Original == nil {
sn.Original = sn.ID()
}
// Save the new snapshot.
id, err := repo.SaveJSONUnpacked(restic.SnapshotFile, sn)
if err != nil {
return false, err
}
debug.Log("new snapshot saved as %v", id.Str())
if err = repo.Flush(); err != nil {
return false, err
}
// Remove the old snapshot.
h := restic.Handle{Type: restic.SnapshotFile, Name: sn.ID().String()}
if err = repo.Backend().Remove(h); err != nil {
return false, err
}
debug.Log("old snapshot %v removed", sn.ID())
}
return changed, nil
}
func runTag(opts TagOptions, gopts GlobalOptions, args []string) error {
if len(opts.SetTags) == 0 && len(opts.AddTags) == 0 && len(opts.RemoveTags) == 0 {
return errors.Fatal("nothing to do!")
}
if len(opts.SetTags) != 0 && (len(opts.AddTags) != 0 || len(opts.RemoveTags) != 0) {
return errors.Fatal("--set and --add/--remove cannot be given at the same time")
}
repo, err := OpenRepository(gopts)
if err != nil {
return err
}
if !gopts.NoLock {
Verbosef("Create exclusive lock for repository\n")
lock, err := lockRepoExclusive(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
}
changeCnt := 0
ctx, cancel := context.WithCancel(gopts.ctx)
defer cancel()
for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args) {
changed, err := changeTags(repo, sn, opts.SetTags, opts.AddTags, opts.RemoveTags)
if err != nil {
Warnf("unable to modify the tags for snapshot ID %q, ignoring: %v\n", sn.ID(), err)
continue
}
if changed {
changeCnt++
}
}
if changeCnt == 0 {
Verbosef("No snapshots were modified\n")
} else {
Verbosef("Modified tags on %v snapshots\n", changeCnt)
}
return nil
}

View File

@@ -1,35 +1,43 @@
package main
import "restic"
import (
"restic"
type CmdUnlock struct {
RemoveAll bool `long:"remove-all" description:"Remove all locks, even stale ones"`
"github.com/spf13/cobra"
)
global *GlobalOptions
var unlockCmd = &cobra.Command{
Use: "unlock",
Short: "remove locks other processes created",
Long: `
The "unlock" command removes stale locks that have been created by other restic processes.
`,
RunE: func(cmd *cobra.Command, args []string) error {
return runUnlock(unlockOptions, globalOptions)
},
}
// UnlockOptions collects all options for the unlock command.
type UnlockOptions struct {
RemoveAll bool
}
var unlockOptions UnlockOptions
func init() {
_, err := parser.AddCommand("unlock",
"remove locks",
"The unlock command checks for stale locks and removes them",
&CmdUnlock{global: &globalOpts})
if err != nil {
panic(err)
}
cmdRoot.AddCommand(unlockCmd)
unlockCmd.Flags().BoolVar(&unlockOptions.RemoveAll, "remove-all", false, "remove all locks, even non-stale ones")
}
func (cmd CmdUnlock) Usage() string {
return "[unlock-options]"
}
func (cmd CmdUnlock) Execute(args []string) error {
repo, err := cmd.global.OpenRepository()
func runUnlock(opts UnlockOptions, gopts GlobalOptions) error {
repo, err := OpenRepository(gopts)
if err != nil {
return err
}
fn := restic.RemoveStaleLocks
if cmd.RemoveAll {
if opts.RemoveAll {
fn = restic.RemoveAllLocks
}
@@ -38,6 +46,6 @@ func (cmd CmdUnlock) Execute(args []string) error {
return err
}
cmd.global.Verbosef("successfully removed locks\n")
Verbosef("successfully removed locks\n")
return nil
}

View File

@@ -3,23 +3,23 @@ package main
import (
"fmt"
"runtime"
"github.com/spf13/cobra"
)
type CmdVersion struct{}
var versionCmd = &cobra.Command{
Use: "version",
Short: "print version information",
Long: `
The "version" command prints detailed information about the build environment
and the version of this software.
`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("restic %s\ncompiled with %v on %v/%v\n",
version, runtime.Version(), runtime.GOOS, runtime.GOARCH)
},
}
func init() {
_, err := parser.AddCommand("version",
"display version",
"The version command displays detailed information about the version",
&CmdVersion{})
if err != nil {
panic(err)
}
}
func (cmd CmdVersion) Execute(args []string) error {
fmt.Printf("restic %s\ncompiled at %s with %v\n",
version, compiledAt, runtime.Version())
return nil
cmdRoot.AddCommand(versionCmd)
}

78
src/cmds/restic/find.go Normal file
View File

@@ -0,0 +1,78 @@
package main
import (
"context"
"restic"
"restic/repository"
)
// FindFilteredSnapshots yields Snapshots, either given explicitly by `snapshotIDs` or filtered from the list of all snapshots.
func FindFilteredSnapshots(ctx context.Context, repo *repository.Repository, host string, tags []string, paths []string, snapshotIDs []string) <-chan *restic.Snapshot {
out := make(chan *restic.Snapshot)
go func() {
defer close(out)
if len(snapshotIDs) != 0 {
var (
id restic.ID
usedFilter bool
err error
)
ids := make(restic.IDs, 0, len(snapshotIDs))
// Process all snapshot IDs given as arguments.
for _, s := range snapshotIDs {
if s == "latest" {
id, err = restic.FindLatestSnapshot(repo, paths, tags, host)
if err != nil {
Warnf("Ignoring %q, no snapshot matched given filter (Paths:%v Tags:%v Host:%v)\n", s, paths, tags, host)
usedFilter = true
continue
}
} else {
id, err = restic.FindSnapshot(repo, s)
if err != nil {
Warnf("Ignoring %q, it is not a snapshot id\n", s)
continue
}
}
ids = append(ids, id)
}
// Give the user some indication their filters are not used.
if !usedFilter && (host != "" || len(tags) != 0 || len(paths) != 0) {
Warnf("Ignoring filters as there are explicit snapshot ids given\n")
}
for _, id := range ids.Uniq() {
sn, err := restic.LoadSnapshot(repo, id)
if err != nil {
Warnf("Ignoring %q, could not load snapshot: %v\n", id, err)
continue
}
select {
case <-ctx.Done():
return
case out <- sn:
}
}
return
}
for id := range repo.List(restic.SnapshotFile, ctx.Done()) {
sn, err := restic.LoadSnapshot(repo, id)
if err != nil {
Warnf("Ignoring %q, could not load snapshot: %v\n", id, err)
continue
}
if (host != "" && host != sn.Hostname) || !sn.HasTags(tags) || !sn.HasPaths(paths) {
continue
}
select {
case <-ctx.Done():
return
case out <- sn:
}
}
}()
return out
}

84
src/cmds/restic/format.go Normal file
View File

@@ -0,0 +1,84 @@
package main
import (
"fmt"
"os"
"path/filepath"
"time"
"restic"
)
func formatBytes(c uint64) string {
b := float64(c)
switch {
case c > 1<<40:
return fmt.Sprintf("%.3f TiB", b/(1<<40))
case c > 1<<30:
return fmt.Sprintf("%.3f GiB", b/(1<<30))
case c > 1<<20:
return fmt.Sprintf("%.3f MiB", b/(1<<20))
case c > 1<<10:
return fmt.Sprintf("%.3f KiB", b/(1<<10))
default:
return fmt.Sprintf("%dB", c)
}
}
func formatSeconds(sec uint64) string {
hours := sec / 3600
sec -= hours * 3600
min := sec / 60
sec -= min * 60
if hours > 0 {
return fmt.Sprintf("%d:%02d:%02d", hours, min, sec)
}
return fmt.Sprintf("%d:%02d", min, sec)
}
func formatPercent(numerator uint64, denominator uint64) string {
if denominator == 0 {
return ""
}
percent := 100.0 * float64(numerator) / float64(denominator)
if percent > 100 {
percent = 100
}
return fmt.Sprintf("%3.2f%%", percent)
}
func formatRate(bytes uint64, duration time.Duration) string {
sec := float64(duration) / float64(time.Second)
rate := float64(bytes) / sec / (1 << 20)
return fmt.Sprintf("%.2fMiB/s", rate)
}
func formatDuration(d time.Duration) string {
sec := uint64(d / time.Second)
return formatSeconds(sec)
}
func formatNode(prefix string, n *restic.Node, long bool) string {
if !long {
return filepath.Join(prefix, n.Name)
}
switch n.Type {
case "file":
return fmt.Sprintf("%s %5d %5d %6d %s %s",
n.Mode, n.UID, n.GID, n.Size, n.ModTime.Format(TimeFormat), filepath.Join(prefix, n.Name))
case "dir":
return fmt.Sprintf("%s %5d %5d %6d %s %s",
n.Mode|os.ModeDir, n.UID, n.GID, n.Size, n.ModTime.Format(TimeFormat), filepath.Join(prefix, n.Name))
case "symlink":
return fmt.Sprintf("%s %5d %5d %6d %s %s -> %s",
n.Mode|os.ModeSymlink, n.UID, n.GID, n.Size, n.ModTime.Format(TimeFormat), filepath.Join(prefix, n.Name), n.LinkTarget)
default:
return fmt.Sprintf("<Node(%s) %s>", n.Type, n.Name)
}
}

View File

@@ -1,14 +1,16 @@
package main
import (
"errors"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"restic"
"runtime"
"strings"
"syscall"
"restic/backend"
"restic/backend/local"
"restic/backend/rest"
"restic/backend/s3"
@@ -17,27 +19,52 @@ import (
"restic/location"
"restic/repository"
"github.com/jessevdk/go-flags"
"restic/errors"
"golang.org/x/crypto/ssh/terminal"
)
var version = "compiled manually"
var compiledAt = "unknown time"
// GlobalOptions holds all those options that can be set for every command.
// GlobalOptions hold all global options for restic.
type GlobalOptions struct {
Repo string `short:"r" long:"repo" description:"Repository directory to backup to/restore from"`
CacheDir string ` long:"cache-dir" description:"Directory to use as a local cache"`
Quiet bool `short:"q" long:"quiet" default:"false" description:"Do not output comprehensive progress report"`
NoLock bool ` long:"no-lock" default:"false" description:"Do not lock the repo, this allows some operations on read-only repos."`
Options []string `short:"o" long:"option" description:"Specify options in the form 'foo.key=value'"`
Repo string
PasswordFile string
Quiet bool
NoLock bool
JSON bool
ctx context.Context
password string
stdout io.Writer
stderr io.Writer
}
var globalOptions = GlobalOptions{
stdout: os.Stdout,
stderr: os.Stderr,
}
func init() {
pw := os.Getenv("RESTIC_PASSWORD")
if pw != "" {
globalOptions.password = pw
}
var cancel context.CancelFunc
globalOptions.ctx, cancel = context.WithCancel(context.Background())
AddCleanupHandler(func() error {
cancel()
return nil
})
f := cmdRoot.PersistentFlags()
f.StringVarP(&globalOptions.Repo, "repo", "r", os.Getenv("RESTIC_REPOSITORY"), "repository to backup to or restore from (default: $RESTIC_REPOSITORY)")
f.StringVarP(&globalOptions.PasswordFile, "password-file", "p", "", "read the repository password from a file")
f.BoolVarP(&globalOptions.Quiet, "quiet", "q", false, "do not output comprehensive progress report")
f.BoolVar(&globalOptions.NoLock, "no-lock", false, "do not lock the repo, this allows some operations on read-only repos")
f.BoolVarP(&globalOptions.JSON, "json", "", false, "set output mode to JSON for commands that support it")
restoreTerminal()
}
@@ -56,14 +83,30 @@ func checkErrno(err error) error {
return err
}
func stdinIsTerminal() bool {
return terminal.IsTerminal(int(os.Stdin.Fd()))
}
func stdoutIsTerminal() bool {
return terminal.IsTerminal(int(os.Stdout.Fd()))
}
func stdoutTerminalWidth() int {
w, _, err := terminal.GetSize(int(os.Stdout.Fd()))
if err != nil {
return 0
}
return w
}
// restoreTerminal installs a cleanup handler that restores the previous
// terminal state on exit.
func restoreTerminal() {
fd := int(os.Stdout.Fd())
if !terminal.IsTerminal(fd) {
if !stdoutIsTerminal() {
return
}
fd := int(os.Stdout.Fd())
state, err := terminal.GetState(fd)
if err != nil {
fmt.Fprintf(os.Stderr, "unable to get terminal state: %v\n", err)
@@ -79,58 +122,80 @@ func restoreTerminal() {
})
}
var globalOpts = GlobalOptions{stdout: os.Stdout, stderr: os.Stderr}
var parser = flags.NewParser(&globalOpts, flags.HelpFlag|flags.PassDoubleDash)
// ClearLine creates a platform dependent string to clear the current
// line, so it can be overwritten. ANSI sequences are not supported on
// current windows cmd shell.
func ClearLine() string {
if runtime.GOOS == "windows" {
if w := stdoutTerminalWidth(); w > 0 {
return strings.Repeat(" ", w-1) + "\r"
}
return ""
}
return "\x1b[2K"
}
// Printf writes the message to the configured stdout stream.
func (o GlobalOptions) Printf(format string, args ...interface{}) {
_, err := fmt.Fprintf(o.stdout, format, args...)
func Printf(format string, args ...interface{}) {
_, err := fmt.Fprintf(globalOptions.stdout, format, args...)
if err != nil {
fmt.Fprintf(os.Stderr, "unable to write to stdout: %v\n", err)
os.Exit(100)
Exit(100)
}
}
// Verbosef calls Printf to write the message when the verbose flag is set.
func (o GlobalOptions) Verbosef(format string, args ...interface{}) {
if o.Quiet {
func Verbosef(format string, args ...interface{}) {
if globalOptions.Quiet {
return
}
o.Printf(format, args...)
Printf(format, args...)
}
// ShowProgress returns true iff the progress status should be written, i.e.
// the quiet flag is not set and the output is a terminal.
func (o GlobalOptions) ShowProgress() bool {
if o.Quiet {
return false
// PrintProgress wraps fmt.Printf to handle the difference in writing progress
// information to terminals and non-terminal stdout
func PrintProgress(format string, args ...interface{}) {
var (
message string
carriageControl string
)
message = fmt.Sprintf(format, args...)
if !(strings.HasSuffix(message, "\r") || strings.HasSuffix(message, "\n")) {
if stdoutIsTerminal() {
carriageControl = "\r"
} else {
carriageControl = "\n"
}
message = fmt.Sprintf("%s%s", message, carriageControl)
}
if !terminal.IsTerminal(int(os.Stdout.Fd())) {
return false
if stdoutIsTerminal() {
message = fmt.Sprintf("%s%s", ClearLine(), message)
}
return true
fmt.Print(message)
}
// Warnf writes the message to the configured stderr stream.
func (o GlobalOptions) Warnf(format string, args ...interface{}) {
_, err := fmt.Fprintf(o.stderr, format, args...)
func Warnf(format string, args ...interface{}) {
_, err := fmt.Fprintf(globalOptions.stderr, format, args...)
if err != nil {
fmt.Fprintf(os.Stderr, "unable to write to stderr: %v\n", err)
os.Exit(100)
Exit(100)
}
}
// Exitf uses Warnf to write the message and then calls os.Exit(exitcode).
func (o GlobalOptions) Exitf(exitcode int, format string, args ...interface{}) {
// Exitf uses Warnf to write the message and then terminates the process with
// the given exit code.
func Exitf(exitcode int, format string, args ...interface{}) {
if format[len(format)-1] != '\n' {
format += "\n"
}
o.Warnf(format, args...)
os.Exit(exitcode)
Warnf(format, args...)
Exit(exitcode)
}
// readPassword reads the password from the given reader directly.
@@ -139,8 +204,8 @@ func readPassword(in io.Reader) (password string, err error) {
n, err := io.ReadFull(in, buf)
buf = buf[:n]
if err != nil && err != io.ErrUnexpectedEOF {
return "", err
if err != nil && errors.Cause(err) != io.ErrUnexpectedEOF {
return "", errors.Wrap(err, "ReadFull")
}
return strings.TrimRight(string(buf), "\r\n"), nil
@@ -154,89 +219,113 @@ func readPasswordTerminal(in *os.File, out io.Writer, prompt string) (password s
buf, err := terminal.ReadPassword(int(in.Fd()))
fmt.Fprintln(out)
if err != nil {
return "", err
return "", errors.Wrap(err, "ReadPassword")
}
password = string(buf)
return password, nil
}
// ReadPassword reads the password from stdin.
func (o GlobalOptions) ReadPassword(prompt string) string {
// ReadPassword reads the password from a password file, the environment
// variable RESTIC_PASSWORD or prompts the user.
func ReadPassword(opts GlobalOptions, prompt string) (string, error) {
if opts.PasswordFile != "" {
s, err := ioutil.ReadFile(opts.PasswordFile)
return strings.TrimSpace(string(s)), errors.Wrap(err, "Readfile")
}
if pwd := os.Getenv("RESTIC_PASSWORD"); pwd != "" {
return pwd, nil
}
var (
password string
err error
)
if terminal.IsTerminal(int(os.Stdin.Fd())) {
if stdinIsTerminal() {
password, err = readPasswordTerminal(os.Stdin, os.Stderr, prompt)
} else {
password, err = readPassword(os.Stdin)
}
if err != nil {
o.Exitf(2, "unable to read password: %v", err)
return "", errors.Wrap(err, "unable to read password")
}
if len(password) == 0 {
o.Exitf(1, "an empty password is not a password")
return "", errors.Fatal("an empty password is not a password")
}
return password
return password, nil
}
// ReadPasswordTwice calls ReadPassword two times and returns an error when the
// passwords don't match.
func (o GlobalOptions) ReadPasswordTwice(prompt1, prompt2 string) string {
pw1 := o.ReadPassword(prompt1)
pw2 := o.ReadPassword(prompt2)
if pw1 != pw2 {
o.Exitf(1, "passwords do not match")
func ReadPasswordTwice(gopts GlobalOptions, prompt1, prompt2 string) (string, error) {
pw1, err := ReadPassword(gopts, prompt1)
if err != nil {
return "", err
}
pw2, err := ReadPassword(gopts, prompt2)
if err != nil {
return "", err
}
return pw1
if pw1 != pw2 {
return "", errors.Fatal("passwords do not match")
}
return pw1, nil
}
const maxKeys = 20
// OpenRepository reads the password and opens the repository.
func (o GlobalOptions) OpenRepository() (*repository.Repository, error) {
if o.Repo == "" {
return nil, errors.New("Please specify repository location (-r)")
func OpenRepository(opts GlobalOptions) (*repository.Repository, error) {
if opts.Repo == "" {
return nil, errors.Fatal("Please specify repository location (-r)")
}
be, err := open(o.Repo)
be, err := open(opts.Repo)
if err != nil {
return nil, err
}
s := repository.New(be)
if o.password == "" {
o.password = o.ReadPassword("enter password for repository: ")
if opts.password == "" {
opts.password, err = ReadPassword(opts, "enter password for repository: ")
if err != nil {
return nil, err
}
}
err = s.SearchKey(o.password)
err = s.SearchKey(opts.password, maxKeys)
if err != nil {
return nil, fmt.Errorf("unable to open repo: %v", err)
return nil, errors.Fatalf("unable to open repo: %v", err)
}
return s, nil
}
// Open the backend specified by a location config.
func open(s string) (backend.Backend, error) {
debug.Log("open", "parsing location %v", s)
func open(s string) (restic.Backend, error) {
debug.Log("parsing location %v", s)
loc, err := location.Parse(s)
if err != nil {
return nil, err
return nil, errors.Fatalf("parsing repository location failed: %v", err)
}
var be restic.Backend
switch loc.Scheme {
case "local":
debug.Log("open", "opening local repository at %#v", loc.Config)
return local.Open(loc.Config.(string))
debug.Log("opening local repository at %#v", loc.Config)
be, err = local.Open(loc.Config.(string))
case "sftp":
debug.Log("open", "opening sftp repository at %#v", loc.Config)
return sftp.OpenWithConfig(loc.Config.(sftp.Config))
debug.Log("opening sftp repository at %#v", loc.Config)
be, err = sftp.OpenWithConfig(loc.Config.(sftp.Config))
case "s3":
cfg := loc.Config.(s3.Config)
if cfg.KeyID == "" {
@@ -247,19 +336,24 @@ func open(s string) (backend.Backend, error) {
cfg.Secret = os.Getenv("AWS_SECRET_ACCESS_KEY")
}
debug.Log("open", "opening s3 repository at %#v", cfg)
return s3.Open(cfg)
debug.Log("opening s3 repository at %#v", cfg)
be, err = s3.Open(cfg)
case "rest":
return rest.Open(loc.Config.(rest.Config))
be, err = rest.Open(loc.Config.(rest.Config))
default:
return nil, errors.Fatalf("invalid backend: %q", loc.Scheme)
}
debug.Log("open", "invalid repository location: %v", s)
return nil, fmt.Errorf("invalid scheme %q", loc.Scheme)
if err != nil {
return nil, errors.Fatalf("unable to open repo at %v: %v", s, err)
}
return be, nil
}
// Create the backend specified by URI.
func create(s string) (backend.Backend, error) {
debug.Log("open", "parsing location %v", s)
func create(s string) (restic.Backend, error) {
debug.Log("parsing location %v", s)
loc, err := location.Parse(s)
if err != nil {
return nil, err
@@ -267,10 +361,10 @@ func create(s string) (backend.Backend, error) {
switch loc.Scheme {
case "local":
debug.Log("open", "create local repository at %#v", loc.Config)
debug.Log("create local repository at %#v", loc.Config)
return local.Create(loc.Config.(string))
case "sftp":
debug.Log("open", "create sftp repository at %#v", loc.Config)
debug.Log("create sftp repository at %#v", loc.Config)
return sftp.CreateWithConfig(loc.Config.(sftp.Config))
case "s3":
cfg := loc.Config.(s3.Config)
@@ -282,12 +376,12 @@ func create(s string) (backend.Backend, error) {
cfg.Secret = os.Getenv("AWS_SECRET_ACCESS_KEY")
}
debug.Log("open", "create s3 repository at %#v", loc.Config)
debug.Log("create s3 repository at %#v", loc.Config)
return s3.Open(cfg)
case "rest":
return rest.Open(loc.Config.(rest.Config))
}
debug.Log("open", "invalid repository scheme: %v", s)
return nil, fmt.Errorf("invalid scheme %q", loc.Scheme)
debug.Log("invalid repository scheme: %v", s)
return nil, errors.Fatalf("invalid scheme %q", loc.Scheme)
}

View File

@@ -0,0 +1,60 @@
// +build debug
package main
import (
"fmt"
"net/http"
_ "net/http/pprof"
"os"
"restic/errors"
"github.com/pkg/profile"
)
var (
listenMemoryProfile string
memProfilePath string
cpuProfilePath string
prof interface {
Stop()
}
)
func init() {
f := cmdRoot.PersistentFlags()
f.StringVar(&listenMemoryProfile, "listen-profile", "", "listen on this `address:port` for memory profiling")
f.StringVar(&memProfilePath, "mem-profile", "", "write memory profile to `dir`")
f.StringVar(&cpuProfilePath, "cpu-profile", "", "write cpu profile to `dir`")
}
func runDebug() error {
if listenMemoryProfile != "" {
fmt.Fprintf(os.Stderr, "running memory profile HTTP server on %v\n", listenMemoryProfile)
go func() {
err := http.ListenAndServe(listenMemoryProfile, nil)
if err != nil {
fmt.Fprintf(os.Stderr, "memory profile listen failed: %v\n", err)
}
}()
}
if memProfilePath != "" && cpuProfilePath != "" {
return errors.Fatal("only one profile (memory or CPU) may be activated at the same time")
}
if memProfilePath != "" {
prof = profile.Start(profile.Quiet, profile.MemProfile, profile.ProfilePath(memProfilePath))
} else if memProfilePath != "" {
prof = profile.Start(profile.Quiet, profile.CPUProfile, profile.ProfilePath(memProfilePath))
}
return nil
}
func shutdownDebug() {
if prof != nil {
prof.Stop()
}
}

View File

@@ -0,0 +1,9 @@
// +build !debug
package main
// runDebug is a noop without the debug tag.
func runDebug() error { return nil }
// shutdownDebug is a noop without the debug tag.
func shutdownDebug() {}

View File

@@ -1,3 +1,4 @@
// +build ignore
// +build !openbsd
// +build !windows
@@ -12,7 +13,6 @@ import (
"time"
"restic"
"restic/backend"
"restic/repository"
. "restic/test"
)
@@ -23,45 +23,119 @@ const (
mountTestSubdir = "snapshots"
)
func snapshotsDirExists(t testing.TB, dir string) bool {
f, err := os.Open(filepath.Join(dir, mountTestSubdir))
if err != nil && os.IsNotExist(err) {
return false
}
if err != nil {
t.Error(err)
}
if err := f.Close(); err != nil {
t.Error(err)
}
return true
}
// waitForMount blocks (max mountWait * mountSleep) until the subdir
// "snapshots" appears in the dir.
func waitForMount(dir string) error {
func waitForMount(t testing.TB, dir string) {
for i := 0; i < mountWait; i++ {
f, err := os.Open(dir)
if err != nil {
return err
}
names, err := f.Readdirnames(-1)
if err != nil {
return err
}
if err = f.Close(); err != nil {
return err
}
for _, name := range names {
if name == mountTestSubdir {
return nil
}
if snapshotsDirExists(t, dir) {
t.Log("mounted directory is ready")
return
}
time.Sleep(mountSleep)
}
return fmt.Errorf("subdir %q of dir %s never appeared", mountTestSubdir, dir)
t.Errorf("subdir %q of dir %s never appeared", mountTestSubdir, dir)
}
func cmdMount(t testing.TB, global GlobalOptions, dir string, ready, done chan struct{}) {
defer func() {
ready <- struct{}{}
}()
func mount(t testing.TB, global GlobalOptions, dir string) {
cmd := &CmdMount{global: &global}
OK(t, cmd.Mount(dir))
}
cmd := &CmdMount{global: &global, ready: ready, done: done}
OK(t, cmd.Execute([]string{dir}))
if TestCleanupTempDirs {
RemoveAll(t, dir)
func umount(t testing.TB, global GlobalOptions, dir string) {
cmd := &CmdMount{global: &global}
var err error
for i := 0; i < mountWait; i++ {
if err = cmd.Umount(dir); err == nil {
t.Logf("directory %v umounted", dir)
return
}
time.Sleep(mountSleep)
}
t.Errorf("unable to umount dir %v, last error was: %v", dir, err)
}
func listSnapshots(t testing.TB, dir string) []string {
snapshotsDir, err := os.Open(filepath.Join(dir, "snapshots"))
OK(t, err)
names, err := snapshotsDir.Readdirnames(-1)
OK(t, err)
OK(t, snapshotsDir.Close())
return names
}
func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Repository, mountpoint, repodir string, snapshotIDs restic.IDs) {
t.Logf("checking for %d snapshots: %v", len(snapshotIDs), snapshotIDs)
go mount(t, global, mountpoint)
waitForMount(t, mountpoint)
defer umount(t, global, mountpoint)
if !snapshotsDirExists(t, mountpoint) {
t.Fatal(`virtual directory "snapshots" doesn't exist`)
}
ids := listSnapshots(t, repodir)
t.Logf("found %v snapshots in repo: %v", len(ids), ids)
namesInSnapshots := listSnapshots(t, mountpoint)
t.Logf("found %v snapshots in fuse mount: %v", len(namesInSnapshots), namesInSnapshots)
Assert(t,
len(namesInSnapshots) == len(snapshotIDs),
"Invalid number of snapshots: expected %d, got %d", len(snapshotIDs), len(namesInSnapshots))
namesMap := make(map[string]bool)
for _, name := range namesInSnapshots {
namesMap[name] = false
}
for _, id := range snapshotIDs {
snapshot, err := restic.LoadSnapshot(repo, id)
OK(t, err)
ts := snapshot.Time.Format(time.RFC3339)
present, ok := namesMap[ts]
if !ok {
t.Errorf("Snapshot %v (%q) isn't present in fuse dir", id.Str(), ts)
}
for i := 1; present; i++ {
ts = fmt.Sprintf("%s-%d", snapshot.Time.Format(time.RFC3339), i)
present, ok = namesMap[ts]
if !ok {
t.Errorf("Snapshot %v (%q) isn't present in fuse dir", id.Str(), ts)
}
if !present {
break
}
}
namesMap[ts] = true
}
for name, present := range namesMap {
Assert(t, present, "Directory %s is present in fuse dir but is not a snapshot", name)
}
}
@@ -70,34 +144,8 @@ func TestMount(t *testing.T) {
t.Skip("Skipping fuse tests")
}
checkSnapshots := func(repo *repository.Repository, mountpoint string, snapshotIDs []backend.ID) {
snapshotsDir, err := os.Open(filepath.Join(mountpoint, "snapshots"))
OK(t, err)
namesInSnapshots, err := snapshotsDir.Readdirnames(-1)
OK(t, err)
Assert(t,
len(namesInSnapshots) == len(snapshotIDs),
"Invalid number of snapshots: expected %d, got %d", len(snapshotIDs), len(namesInSnapshots))
namesMap := make(map[string]bool)
for _, name := range namesInSnapshots {
namesMap[name] = false
}
for _, id := range snapshotIDs {
snapshot, err := restic.LoadSnapshot(repo, id)
OK(t, err)
_, ok := namesMap[snapshot.Time.Format(time.RFC3339)]
Assert(t, ok, "Snapshot %s isn't present in fuse dir", snapshot.Time.Format(time.RFC3339))
namesMap[snapshot.Time.Format(time.RFC3339)] = true
}
for name, present := range namesMap {
Assert(t, present, "Directory %s is present in fuse dir but is not a snapshot", name)
}
OK(t, snapshotsDir.Close())
}
withTestEnvironment(t, func(env *testEnvironment, global GlobalOptions) {
cmdInit(t, global)
repo, err := global.OpenRepository()
OK(t, err)
@@ -108,32 +156,9 @@ func TestMount(t *testing.T) {
// We remove the mountpoint now to check that cmdMount creates it
RemoveAll(t, mountpoint)
ready := make(chan struct{}, 2)
done := make(chan struct{})
go cmdMount(t, global, mountpoint, ready, done)
<-ready
defer close(done)
OK(t, waitForMount(mountpoint))
checkSnapshots(t, global, repo, mountpoint, env.repo, []restic.ID{})
mountpointDir, err := os.Open(mountpoint)
OK(t, err)
names, err := mountpointDir.Readdirnames(-1)
OK(t, err)
Assert(t, len(names) == 1 && names[0] == "snapshots", `The fuse virtual directory "snapshots" doesn't exist`)
OK(t, mountpointDir.Close())
checkSnapshots(repo, mountpoint, []backend.ID{})
datafile := filepath.Join("testdata", "backup-data.tar.gz")
fd, err := os.Open(datafile)
if os.IsNotExist(err) {
t.Skipf("unable to find data file %q, skipping", datafile)
return
}
OK(t, err)
OK(t, fd.Close())
SetupTarTestFixture(t, env.testdata, datafile)
SetupTarTestFixture(t, env.testdata, filepath.Join("testdata", "backup-data.tar.gz"))
// first backup
cmdBackup(t, global, []string{env.testdata}, nil)
@@ -141,7 +166,7 @@ func TestMount(t *testing.T) {
Assert(t, len(snapshotIDs) == 1,
"expected one snapshot, got %v", snapshotIDs)
checkSnapshots(repo, mountpoint, snapshotIDs)
checkSnapshots(t, global, repo, mountpoint, env.repo, snapshotIDs)
// second backup, implicit incremental
cmdBackup(t, global, []string{env.testdata}, nil)
@@ -149,7 +174,7 @@ func TestMount(t *testing.T) {
Assert(t, len(snapshotIDs) == 2,
"expected two snapshots, got %v", snapshotIDs)
checkSnapshots(repo, mountpoint, snapshotIDs)
checkSnapshots(t, global, repo, mountpoint, env.repo, snapshotIDs)
// third backup, explicit incremental
cmdBackup(t, global, []string{env.testdata}, &snapshotIDs[0])
@@ -157,6 +182,30 @@ func TestMount(t *testing.T) {
Assert(t, len(snapshotIDs) == 3,
"expected three snapshots, got %v", snapshotIDs)
checkSnapshots(repo, mountpoint, snapshotIDs)
checkSnapshots(t, global, repo, mountpoint, env.repo, snapshotIDs)
})
}
func TestMountSameTimestamps(t *testing.T) {
if !RunFuseTest {
t.Skip("Skipping fuse tests")
}
withTestEnvironment(t, func(env *testEnvironment, global GlobalOptions) {
SetupTarTestFixture(t, env.base, filepath.Join("testdata", "repo-same-timestamps.tar.gz"))
repo, err := global.OpenRepository()
OK(t, err)
mountpoint, err := ioutil.TempDir(TestTempDir, "restic-test-mount-")
OK(t, err)
ids := []restic.ID{
restic.TestParseID("280303689e5027328889a06d718b729e96a1ce6ae9ef8290bff550459ae611ee"),
restic.TestParseID("75ad6cdc0868e082f2596d5ab8705e9f7d87316f5bf5690385eeff8dbe49d9f5"),
restic.TestParseID("5fd0d8b2ef0fa5d23e58f1e460188abb0f525c0f0c4af8365a1280c807a80a1b"),
}
checkSnapshots(t, global, repo, mountpoint, env.repo, ids)
})
}

View File

@@ -1,6 +1,7 @@
package main
import (
"context"
"fmt"
"io/ioutil"
"os"
@@ -8,12 +9,14 @@ import (
"runtime"
"testing"
"restic/repository"
. "restic/test"
)
type dirEntry struct {
path string
fi os.FileInfo
link uint64
}
func walkDir(dir string) <-chan *dirEntry {
@@ -35,6 +38,7 @@ func walkDir(dir string) <-chan *dirEntry {
ch <- &dirEntry{
path: name,
fi: info,
link: nlink(info),
}
return nil
@@ -165,27 +169,6 @@ type testEnvironment struct {
base, cache, repo, testdata string
}
func configureRestic(t testing.TB, cache, repo string) GlobalOptions {
return GlobalOptions{
CacheDir: cache,
Repo: repo,
Quiet: true,
password: TestPassword,
stdout: os.Stdout,
stderr: os.Stderr,
}
}
func cleanupTempdir(t testing.TB, tempdir string) {
if !TestCleanupTempDirs {
t.Logf("leaving temporary directory %v used for test", tempdir)
return
}
RemoveAll(t, tempdir)
}
// withTestEnvironment creates a test environment and calls f with it. After f has
// returned, the temporary directory is removed.
func withTestEnvironment(t testing.TB, f func(*testEnvironment, GlobalOptions)) {
@@ -193,6 +176,8 @@ func withTestEnvironment(t testing.TB, f func(*testEnvironment, GlobalOptions))
t.Skip("integration tests disabled")
}
repository.TestUseLowSecurityKDFParameters(t)
tempdir, err := ioutil.TempDir(TestTempDir, "restic-test-")
OK(t, err)
@@ -207,7 +192,19 @@ func withTestEnvironment(t testing.TB, f func(*testEnvironment, GlobalOptions))
OK(t, os.MkdirAll(env.cache, 0700))
OK(t, os.MkdirAll(env.repo, 0700))
f(&env, configureRestic(t, env.cache, env.repo))
gopts := GlobalOptions{
Repo: env.repo,
Quiet: true,
ctx: context.Background(),
password: TestPassword,
stdout: os.Stdout,
stderr: os.Stderr,
}
// always overwrite global options
globalOptions = gopts
f(&env, gopts)
if !TestCleanupTempDirs {
t.Logf("leaving temporary directory %v used for test", tempdir)
@@ -216,13 +213,3 @@ func withTestEnvironment(t testing.TB, f func(*testEnvironment, GlobalOptions))
RemoveAll(t, tempdir)
}
// removeFile resets the read-only flag and then deletes the file.
func removeFile(fn string) error {
err := os.Chmod(fn, 0666)
if err != nil {
return err
}
return os.Remove(fn)
}

View File

@@ -4,7 +4,9 @@ package main
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"syscall"
)
@@ -37,5 +39,37 @@ func (e *dirEntry) equals(other *dirEntry) bool {
return false
}
if stat.Nlink != stat2.Nlink {
fmt.Fprintf(os.Stderr, "%v: Number of links do not match (%v != %v)\n", e.path, stat.Nlink, stat2.Nlink)
return false
}
return true
}
func nlink(info os.FileInfo) uint64 {
stat, _ := info.Sys().(*syscall.Stat_t)
return uint64(stat.Nlink)
}
func inode(info os.FileInfo) uint64 {
stat, _ := info.Sys().(*syscall.Stat_t)
return uint64(stat.Ino)
}
func createFileSetPerHardlink(dir string) map[uint64][]string {
var stat syscall.Stat_t
linkTests := make(map[uint64][]string)
files, err := ioutil.ReadDir(dir)
if err != nil {
return nil
}
for _, f := range files {
if err := syscall.Stat(filepath.Join(dir, f.Name()), &stat); err != nil {
return nil
}
linkTests[uint64(stat.Ino)] = append(linkTests[uint64(stat.Ino)], f.Name())
}
return linkTests
}

View File

@@ -4,6 +4,7 @@ package main
import (
"fmt"
"io/ioutil"
"os"
)
@@ -25,3 +26,24 @@ func (e *dirEntry) equals(other *dirEntry) bool {
return true
}
func nlink(info os.FileInfo) uint64 {
return 1
}
func inode(info os.FileInfo) uint64 {
return uint64(0)
}
func createFileSetPerHardlink(dir string) map[uint64][]string {
linkTests := make(map[uint64][]string)
files, err := ioutil.ReadDir(dir)
if err != nil {
return nil
}
for i, f := range files {
linkTests[uint64(i)] = append(linkTests[uint64(i)], f.Name())
i++
}
return linkTests
}

File diff suppressed because it is too large Load Diff

View File

@@ -36,10 +36,11 @@ func lockRepository(repo *repository.Repository, exclusive bool) (*restic.Lock,
if err != nil {
return nil, err
}
debug.Log("create lock %p (exclusive %v)", lock, exclusive)
globalLocks.Lock()
if globalLocks.cancelRefresh == nil {
debug.Log("main.lockRepository", "start goroutine for lock refresh")
debug.Log("start goroutine for lock refresh")
globalLocks.cancelRefresh = make(chan struct{})
globalLocks.refreshWG = sync.WaitGroup{}
globalLocks.refreshWG.Add(1)
@@ -55,7 +56,7 @@ func lockRepository(repo *repository.Repository, exclusive bool) (*restic.Lock,
var refreshInterval = 5 * time.Minute
func refreshLocks(wg *sync.WaitGroup, done <-chan struct{}) {
debug.Log("main.refreshLocks", "start")
debug.Log("start")
defer func() {
wg.Done()
globalLocks.Lock()
@@ -68,10 +69,10 @@ func refreshLocks(wg *sync.WaitGroup, done <-chan struct{}) {
for {
select {
case <-done:
debug.Log("main.refreshLocks", "terminate")
debug.Log("terminate")
return
case <-ticker.C:
debug.Log("main.refreshLocks", "refreshing locks")
debug.Log("refreshing locks")
globalLocks.Lock()
for _, lock := range globalLocks.locks {
err := lock.Refresh()
@@ -88,9 +89,9 @@ func unlockRepo(lock *restic.Lock) error {
globalLocks.Lock()
defer globalLocks.Unlock()
debug.Log("unlockRepo", "unlocking repository")
debug.Log("unlocking repository with lock %p", lock)
if err := lock.Unlock(); err != nil {
debug.Log("unlockRepo", "error while unlocking: %v", err)
debug.Log("error while unlocking: %v", err)
return err
}
@@ -108,13 +109,13 @@ func unlockAll() error {
globalLocks.Lock()
defer globalLocks.Unlock()
debug.Log("unlockAll", "unlocking %d locks", len(globalLocks.locks))
debug.Log("unlocking %d locks", len(globalLocks.locks))
for _, lock := range globalLocks.locks {
if err := lock.Unlock(); err != nil {
debug.Log("unlockAll", "error while unlocking: %v", err)
debug.Log("error while unlocking: %v", err)
return err
}
debug.Log("unlockAll", "successfully removed lock")
debug.Log("successfully removed lock")
}
return nil

View File

@@ -3,43 +3,52 @@ package main
import (
"fmt"
"os"
"runtime"
"github.com/jessevdk/go-flags"
"restic"
"restic/debug"
"github.com/spf13/cobra"
"restic/errors"
)
func init() {
// set GOMAXPROCS to number of CPUs
runtime.GOMAXPROCS(runtime.NumCPU())
// cmdRoot is the base command when no other command has been specified.
var cmdRoot = &cobra.Command{
Use: "restic",
Short: "backup and restore files",
Long: `
restic is a backup program which allows saving multiple revisions of files and
directories in an encrypted repository stored on different backends.
`,
SilenceErrors: true,
SilenceUsage: true,
// run the debug functions for all subcommands (if build tag "debug" is
// enabled)
PersistentPreRunE: func(*cobra.Command, []string) error {
return runDebug()
},
PersistentPostRun: func(*cobra.Command, []string) {
shutdownDebug()
},
}
func main() {
// defer profile.Start(profile.MemProfileRate(100000), profile.ProfilePath(".")).Stop()
// defer profile.Start(profile.CPUProfile, profile.ProfilePath(".")).Stop()
globalOpts.Repo = os.Getenv("RESTIC_REPOSITORY")
globalOpts.password = os.Getenv("RESTIC_PASSWORD")
debug.Log("main %#v", os.Args)
err := cmdRoot.Execute()
debug.Log("restic", "main %#v", os.Args)
_, err := parser.Parse()
if e, ok := err.(*flags.Error); ok && e.Type == flags.ErrHelp {
parser.WriteHelp(os.Stdout)
os.Exit(0)
}
if err != nil {
switch {
case restic.IsAlreadyLocked(errors.Cause(err)):
fmt.Fprintf(os.Stderr, "%v\nthe `unlock` command can be used to remove stale locks\n", err)
case errors.IsFatal(errors.Cause(err)):
fmt.Fprintf(os.Stderr, "%v\n", err)
case err != nil:
fmt.Fprintf(os.Stderr, "%+v\n", err)
}
if restic.IsAlreadyLocked(err) {
fmt.Fprintf(os.Stderr, "\nthe `unlock` command can be used to remove stale locks\n")
}
RunCleanupHandlers()
var exitCode int
if err != nil {
os.Exit(1)
exitCode = 1
}
Exit(exitCode)
}

46
src/cmds/restic/table.go Normal file
View File

@@ -0,0 +1,46 @@
package main
import (
"fmt"
"io"
"strings"
)
// Table contains data for a table to be printed.
type Table struct {
Header string
Rows [][]interface{}
RowFormat string
}
// NewTable initializes a new Table.
func NewTable() Table {
return Table{
Rows: [][]interface{}{},
}
}
// Write prints the table to w.
func (t Table) Write(w io.Writer) error {
_, err := fmt.Fprintln(w, t.Header)
if err != nil {
return err
}
_, err = fmt.Fprintln(w, strings.Repeat("-", 70))
if err != nil {
return err
}
for _, row := range t.Rows {
_, err = fmt.Fprintf(w, t.RowFormat+"\n", row...)
if err != nil {
return err
}
}
return nil
}
// TimeFormat is the format used for all timestamps printed by restic.
const TimeFormat = "2006-01-02 15:04:05"

Binary file not shown.

BIN
src/cmds/restic/testdata/test.hl.tar.gz vendored Normal file

Binary file not shown.

View File

@@ -1,122 +0,0 @@
package restic
import (
"encoding/json"
"io"
"restic/backend"
"restic/debug"
"restic/pack"
"restic/repository"
"time"
"github.com/restic/chunker"
)
// saveTreeJSON stores a tree in the repository.
func saveTreeJSON(repo *repository.Repository, item interface{}) (backend.ID, error) {
data, err := json.Marshal(item)
if err != nil {
return backend.ID{}, err
}
data = append(data, '\n')
// check if tree has been saved before
id := backend.Hash(data)
if repo.Index().Has(id) {
return id, nil
}
return repo.SaveJSON(pack.Tree, item)
}
// ArchiveReader reads from the reader and archives the data. Returned is the
// resulting snapshot and its ID.
func ArchiveReader(repo *repository.Repository, p *Progress, rd io.Reader, name string) (*Snapshot, backend.ID, error) {
debug.Log("ArchiveReader", "start archiving %s", name)
sn, err := NewSnapshot([]string{name})
if err != nil {
return nil, backend.ID{}, err
}
p.Start()
defer p.Done()
chnker := chunker.New(rd, repo.Config.ChunkerPolynomial)
var ids backend.IDs
var fileSize uint64
for {
chunk, err := chnker.Next(getBuf())
if err == io.EOF {
break
}
if err != nil {
return nil, backend.ID{}, err
}
id := backend.Hash(chunk.Data)
if !repo.Index().Has(id) {
_, err := repo.SaveAndEncrypt(pack.Data, chunk.Data, nil)
if err != nil {
return nil, backend.ID{}, err
}
debug.Log("ArchiveReader", "saved blob %v (%d bytes)\n", id.Str(), chunk.Length)
} else {
debug.Log("ArchiveReader", "blob %v already saved in the repo\n", id.Str())
}
freeBuf(chunk.Data)
ids = append(ids, id)
p.Report(Stat{Bytes: uint64(chunk.Length)})
fileSize += uint64(chunk.Length)
}
tree := &Tree{
Nodes: []*Node{
&Node{
Name: name,
AccessTime: time.Now(),
ModTime: time.Now(),
Type: "file",
Mode: 0644,
Size: fileSize,
UID: sn.UID,
GID: sn.GID,
User: sn.Username,
Content: ids,
},
},
}
treeID, err := saveTreeJSON(repo, tree)
if err != nil {
return nil, backend.ID{}, err
}
sn.Tree = &treeID
debug.Log("ArchiveReader", "tree saved as %v", treeID.Str())
id, err := repo.SaveJSONUnpacked(backend.Snapshot, sn)
if err != nil {
return nil, backend.ID{}, err
}
sn.id = &id
debug.Log("ArchiveReader", "snapshot saved as %v", id.Str())
err = repo.Flush()
if err != nil {
return nil, backend.ID{}, err
}
err = repo.SaveIndex()
if err != nil {
return nil, backend.ID{}, err
}
return sn, id, nil
}

View File

@@ -1,103 +0,0 @@
package restic
import (
"bytes"
"io"
"math/rand"
"restic/backend"
"restic/pack"
"restic/repository"
"testing"
"github.com/restic/chunker"
)
func loadBlob(t *testing.T, repo *repository.Repository, id backend.ID, buf []byte) []byte {
buf, err := repo.LoadBlob(pack.Data, id, buf)
if err != nil {
t.Fatalf("LoadBlob(%v) returned error %v", id, err)
}
return buf
}
func checkSavedFile(t *testing.T, repo *repository.Repository, treeID backend.ID, name string, rd io.Reader) {
tree, err := LoadTree(repo, treeID)
if err != nil {
t.Fatalf("LoadTree() returned error %v", err)
}
if len(tree.Nodes) != 1 {
t.Fatalf("wrong number of nodes for tree, want %v, got %v", 1, len(tree.Nodes))
}
node := tree.Nodes[0]
if node.Name != "fakefile" {
t.Fatalf("wrong filename, want %v, got %v", "fakefile", node.Name)
}
if len(node.Content) == 0 {
t.Fatalf("node.Content has length 0")
}
// check blobs
buf := make([]byte, chunker.MaxSize)
buf2 := make([]byte, chunker.MaxSize)
for i, id := range node.Content {
buf = loadBlob(t, repo, id, buf)
buf2 = buf2[:len(buf)]
_, err = io.ReadFull(rd, buf2)
if !bytes.Equal(buf, buf2) {
t.Fatalf("blob %d (%v) is wrong", i, id.Str())
}
}
}
func TestArchiveReader(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
seed := rand.Int63()
size := int64(rand.Intn(50*1024*1024) + 50*1024*1024)
t.Logf("seed is 0x%016x, size is %v", seed, size)
f := fakeFile(t, seed, size)
sn, id, err := ArchiveReader(repo, nil, f, "fakefile")
if err != nil {
t.Fatalf("ArchiveReader() returned error %v", err)
}
if id.IsNull() {
t.Fatalf("ArchiveReader() returned null ID")
}
t.Logf("snapshot saved as %v, tree is %v", id.Str(), sn.Tree.Str())
checkSavedFile(t, repo, *sn.Tree, "fakefile", fakeFile(t, seed, size))
}
func BenchmarkArchiveReader(t *testing.B) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
const size = 50 * 1024 * 1024
buf := make([]byte, size)
_, err := io.ReadFull(fakeFile(t, 23, size), buf)
if err != nil {
t.Fatal(err)
}
t.SetBytes(size)
t.ResetTimer()
for i := 0; i < t.N; i++ {
_, _, err := ArchiveReader(repo, nil, bytes.NewReader(buf), "fakefile")
if err != nil {
t.Fatal(err)
}
}
}

View File

@@ -0,0 +1,115 @@
package archiver
import (
"io"
"restic"
"restic/debug"
"time"
"restic/errors"
"github.com/restic/chunker"
)
// Reader allows saving a stream of data to the repository.
type Reader struct {
restic.Repository
Tags []string
Hostname string
}
// Archive reads data from the reader and saves it to the repo.
func (r *Reader) Archive(name string, rd io.Reader, p *restic.Progress) (*restic.Snapshot, restic.ID, error) {
if name == "" {
return nil, restic.ID{}, errors.New("no filename given")
}
debug.Log("start archiving %s", name)
sn, err := restic.NewSnapshot([]string{name}, r.Tags, r.Hostname)
if err != nil {
return nil, restic.ID{}, err
}
p.Start()
defer p.Done()
repo := r.Repository
chnker := chunker.New(rd, repo.Config().ChunkerPolynomial)
ids := restic.IDs{}
var fileSize uint64
for {
chunk, err := chnker.Next(getBuf())
if errors.Cause(err) == io.EOF {
break
}
if err != nil {
return nil, restic.ID{}, errors.Wrap(err, "chunker.Next()")
}
id := restic.Hash(chunk.Data)
if !repo.Index().Has(id, restic.DataBlob) {
_, err := repo.SaveBlob(restic.DataBlob, chunk.Data, id)
if err != nil {
return nil, restic.ID{}, err
}
debug.Log("saved blob %v (%d bytes)\n", id.Str(), chunk.Length)
} else {
debug.Log("blob %v already saved in the repo\n", id.Str())
}
freeBuf(chunk.Data)
ids = append(ids, id)
p.Report(restic.Stat{Bytes: uint64(chunk.Length)})
fileSize += uint64(chunk.Length)
}
tree := &restic.Tree{
Nodes: []*restic.Node{
&restic.Node{
Name: name,
AccessTime: time.Now(),
ModTime: time.Now(),
Type: "file",
Mode: 0644,
Size: fileSize,
UID: sn.UID,
GID: sn.GID,
User: sn.Username,
Content: ids,
},
},
}
treeID, err := repo.SaveTree(tree)
if err != nil {
return nil, restic.ID{}, err
}
sn.Tree = &treeID
debug.Log("tree saved as %v", treeID.Str())
id, err := repo.SaveJSONUnpacked(restic.SnapshotFile, sn)
if err != nil {
return nil, restic.ID{}, err
}
debug.Log("snapshot saved as %v", id.Str())
err = repo.Flush()
if err != nil {
return nil, restic.ID{}, err
}
err = repo.SaveIndex()
if err != nil {
return nil, restic.ID{}, err
}
return sn, id, nil
}

View File

@@ -0,0 +1,203 @@
package archiver
import (
"bytes"
"errors"
"io"
"math/rand"
"restic"
"restic/checker"
"restic/repository"
"testing"
)
func loadBlob(t *testing.T, repo restic.Repository, id restic.ID, buf []byte) int {
n, err := repo.LoadBlob(restic.DataBlob, id, buf)
if err != nil {
t.Fatalf("LoadBlob(%v) returned error %v", id, err)
}
return n
}
func checkSavedFile(t *testing.T, repo restic.Repository, treeID restic.ID, name string, rd io.Reader) {
tree, err := repo.LoadTree(treeID)
if err != nil {
t.Fatalf("LoadTree() returned error %v", err)
}
if len(tree.Nodes) != 1 {
t.Fatalf("wrong number of nodes for tree, want %v, got %v", 1, len(tree.Nodes))
}
node := tree.Nodes[0]
if node.Name != "fakefile" {
t.Fatalf("wrong filename, want %v, got %v", "fakefile", node.Name)
}
if len(node.Content) == 0 {
t.Fatalf("node.Content has length 0")
}
// check blobs
for i, id := range node.Content {
size, err := repo.LookupBlobSize(id, restic.DataBlob)
if err != nil {
t.Fatal(err)
}
buf := restic.NewBlobBuffer(int(size))
n := loadBlob(t, repo, id, buf)
if n != len(buf) {
t.Errorf("wrong number of bytes read, want %d, got %d", len(buf), n)
}
buf2 := make([]byte, int(size))
_, err = io.ReadFull(rd, buf2)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf, buf2) {
t.Fatalf("blob %d (%v) is wrong", i, id.Str())
}
}
}
// fakeFile returns a reader which yields deterministic pseudo-random data.
func fakeFile(t testing.TB, seed, size int64) io.Reader {
return io.LimitReader(restic.NewRandReader(rand.New(rand.NewSource(seed))), size)
}
func TestArchiveReader(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
seed := rand.Int63()
size := int64(rand.Intn(50*1024*1024) + 50*1024*1024)
t.Logf("seed is 0x%016x, size is %v", seed, size)
f := fakeFile(t, seed, size)
r := &Reader{
Repository: repo,
Hostname: "localhost",
Tags: []string{"test"},
}
sn, id, err := r.Archive("fakefile", f, nil)
if err != nil {
t.Fatalf("ArchiveReader() returned error %v", err)
}
if id.IsNull() {
t.Fatalf("ArchiveReader() returned null ID")
}
t.Logf("snapshot saved as %v, tree is %v", id.Str(), sn.Tree.Str())
checkSavedFile(t, repo, *sn.Tree, "fakefile", fakeFile(t, seed, size))
checker.TestCheckRepo(t, repo)
}
func TestArchiveReaderNull(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
r := &Reader{
Repository: repo,
Hostname: "localhost",
Tags: []string{"test"},
}
sn, id, err := r.Archive("fakefile", bytes.NewReader(nil), nil)
if err != nil {
t.Fatalf("ArchiveReader() returned error %v", err)
}
if id.IsNull() {
t.Fatalf("ArchiveReader() returned null ID")
}
t.Logf("snapshot saved as %v, tree is %v", id.Str(), sn.Tree.Str())
checker.TestCheckRepo(t, repo)
}
type errReader string
func (e errReader) Read([]byte) (int, error) {
return 0, errors.New(string(e))
}
func countSnapshots(t testing.TB, repo restic.Repository) int {
done := make(chan struct{})
defer close(done)
snapshots := 0
for range repo.List(restic.SnapshotFile, done) {
snapshots++
}
return snapshots
}
func TestArchiveReaderError(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
r := &Reader{
Repository: repo,
Hostname: "localhost",
Tags: []string{"test"},
}
sn, id, err := r.Archive("fakefile", errReader("error returned by reading stdin"), nil)
if err == nil {
t.Errorf("expected error not returned")
}
if sn != nil {
t.Errorf("Snapshot should be nil, but isn't")
}
if !id.IsNull() {
t.Errorf("id should be null, but %v returned", id.Str())
}
n := countSnapshots(t, repo)
if n > 0 {
t.Errorf("expected zero snapshots, but got %d", n)
}
checker.TestCheckRepo(t, repo)
}
func BenchmarkArchiveReader(t *testing.B) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
const size = 50 * 1024 * 1024
buf := make([]byte, size)
_, err := io.ReadFull(fakeFile(t, 23, size), buf)
if err != nil {
t.Fatal(err)
}
r := &Reader{
Repository: repo,
Hostname: "localhost",
Tags: []string{"test"},
}
t.SetBytes(size)
t.ResetTimer()
for i := 0; i < t.N; i++ {
_, _, err := r.Archive("fakefile", bytes.NewReader(buf), nil)
if err != nil {
t.Fatal(err)
}
}
}

View File

@@ -1,4 +1,4 @@
package restic
package archiver
import (
"encoding/json"
@@ -6,20 +6,19 @@ import (
"io"
"os"
"path/filepath"
"restic"
"sort"
"sync"
"time"
"restic/backend"
"restic/errors"
"restic/walk"
"restic/debug"
"restic/fs"
"restic/pack"
"restic/pipe"
"restic/repository"
"github.com/restic/chunker"
"github.com/juju/errors"
)
const (
@@ -27,34 +26,36 @@ const (
maxConcurrency = 10
)
var archiverAbortOnAllErrors = func(str string, fi os.FileInfo, err error) error { return err }
var archiverPrintWarnings = func(path string, fi os.FileInfo, err error) {
fmt.Fprintf(os.Stderr, "warning for %v: %v", path, err)
}
var archiverAllowAllFiles = func(string, os.FileInfo) bool { return true }
// Archiver is used to backup a set of directories.
type Archiver struct {
repo *repository.Repository
repo restic.Repository
knownBlobs struct {
backend.IDSet
restic.IDSet
sync.Mutex
}
blobToken chan struct{}
Error func(dir string, fi os.FileInfo, err error) error
Warn func(dir string, fi os.FileInfo, err error)
SelectFilter pipe.SelectFunc
Excludes []string
}
// NewArchiver returns a new archiver.
func NewArchiver(repo *repository.Repository) *Archiver {
// New returns a new archiver.
func New(repo restic.Repository) *Archiver {
arch := &Archiver{
repo: repo,
blobToken: make(chan struct{}, maxConcurrentBlobs),
knownBlobs: struct {
backend.IDSet
restic.IDSet
sync.Mutex
}{
IDSet: backend.NewIDSet(),
IDSet: restic.NewIDSet(),
},
}
@@ -62,7 +63,7 @@ func NewArchiver(repo *repository.Repository) *Archiver {
arch.blobToken <- struct{}{}
}
arch.Error = archiverAbortOnAllErrors
arch.Warn = archiverPrintWarnings
arch.SelectFilter = archiverAllowAllFiles
return arch
@@ -72,7 +73,7 @@ func NewArchiver(repo *repository.Repository) *Archiver {
// When the blob is not known, false is returned and the blob is added to the
// list. This means that the caller false is returned to is responsible to save
// the blob to the backend.
func (arch *Archiver) isKnownBlob(id backend.ID) bool {
func (arch *Archiver) isKnownBlob(id restic.ID, t restic.BlobType) bool {
arch.knownBlobs.Lock()
defer arch.knownBlobs.Unlock()
@@ -82,7 +83,7 @@ func (arch *Archiver) isKnownBlob(id backend.ID) bool {
arch.knownBlobs.Insert(id)
_, err := arch.repo.Index().Lookup(id)
_, err := arch.repo.Index().Lookup(id, t)
if err == nil {
return true
}
@@ -91,81 +92,78 @@ func (arch *Archiver) isKnownBlob(id backend.ID) bool {
}
// Save stores a blob read from rd in the repository.
func (arch *Archiver) Save(t pack.BlobType, data []byte, id backend.ID) error {
debug.Log("Archiver.Save", "Save(%v, %v)\n", t, id.Str())
func (arch *Archiver) Save(t restic.BlobType, data []byte, id restic.ID) error {
debug.Log("Save(%v, %v)\n", t, id.Str())
if arch.isKnownBlob(id) {
debug.Log("Archiver.Save", "blob %v is known\n", id.Str())
if arch.isKnownBlob(id, restic.DataBlob) {
debug.Log("blob %v is known\n", id.Str())
return nil
}
_, err := arch.repo.SaveAndEncrypt(t, data, &id)
_, err := arch.repo.SaveBlob(t, data, id)
if err != nil {
debug.Log("Archiver.Save", "Save(%v, %v): error %v\n", t, id.Str(), err)
debug.Log("Save(%v, %v): error %v\n", t, id.Str(), err)
return err
}
debug.Log("Archiver.Save", "Save(%v, %v): new blob\n", t, id.Str())
debug.Log("Save(%v, %v): new blob\n", t, id.Str())
return nil
}
// SaveTreeJSON stores a tree in the repository.
func (arch *Archiver) SaveTreeJSON(item interface{}) (backend.ID, error) {
data, err := json.Marshal(item)
func (arch *Archiver) SaveTreeJSON(tree *restic.Tree) (restic.ID, error) {
data, err := json.Marshal(tree)
if err != nil {
return backend.ID{}, err
return restic.ID{}, errors.Wrap(err, "Marshal")
}
data = append(data, '\n')
// check if tree has been saved before
id := backend.Hash(data)
if arch.isKnownBlob(id) {
id := restic.Hash(data)
if arch.isKnownBlob(id, restic.TreeBlob) {
return id, nil
}
return arch.repo.SaveJSON(pack.Tree, item)
return arch.repo.SaveBlob(restic.TreeBlob, data, id)
}
func (arch *Archiver) reloadFileIfChanged(node *Node, file fs.File) (*Node, error) {
func (arch *Archiver) reloadFileIfChanged(node *restic.Node, file fs.File) (*restic.Node, error) {
fi, err := file.Stat()
if err != nil {
return nil, err
return nil, errors.Wrap(err, "restic.Stat")
}
if fi.ModTime() == node.ModTime {
return node, nil
}
err = arch.Error(node.path, fi, errors.New("file has changed"))
if err != nil {
return nil, err
}
arch.Warn(node.Path, fi, errors.New("file has changed"))
node, err = NodeFromFileInfo(node.path, fi)
node, err = restic.NodeFromFileInfo(node.Path, fi)
if err != nil {
debug.Log("Archiver.SaveFile", "NodeFromFileInfo returned error for %v: %v", node.path, err)
return nil, err
debug.Log("restic.NodeFromFileInfo returned error for %v: %v", node.Path, err)
arch.Warn(node.Path, fi, err)
}
return node, nil
}
type saveResult struct {
id backend.ID
id restic.ID
bytes uint64
}
func (arch *Archiver) saveChunk(chunk chunker.Chunk, p *Progress, token struct{}, file fs.File, resultChannel chan<- saveResult) {
func (arch *Archiver) saveChunk(chunk chunker.Chunk, p *restic.Progress, token struct{}, file fs.File, resultChannel chan<- saveResult) {
defer freeBuf(chunk.Data)
id := backend.Hash(chunk.Data)
err := arch.Save(pack.Data, chunk.Data, id)
id := restic.Hash(chunk.Data)
err := arch.Save(restic.DataBlob, chunk.Data, id)
// TODO handle error
if err != nil {
panic(err)
}
p.Report(Stat{Bytes: uint64(chunk.Length)})
p.Report(restic.Stat{Bytes: uint64(chunk.Length)})
arch.blobToken <- token
resultChannel <- saveResult{id: id, bytes: uint64(chunk.Length)}
}
@@ -178,59 +176,61 @@ func waitForResults(resultChannels [](<-chan saveResult)) ([]saveResult, error)
}
if len(results) != len(resultChannels) {
return nil, fmt.Errorf("chunker returned %v chunks, but only %v blobs saved", len(resultChannels), len(results))
return nil, errors.Errorf("chunker returned %v chunks, but only %v blobs saved", len(resultChannels), len(results))
}
return results, nil
}
func updateNodeContent(node *Node, results []saveResult) error {
debug.Log("Archiver.Save", "checking size for file %s", node.path)
func updateNodeContent(node *restic.Node, results []saveResult) error {
debug.Log("checking size for file %s", node.Path)
var bytes uint64
node.Content = make([]backend.ID, len(results))
node.Content = make([]restic.ID, len(results))
for i, b := range results {
node.Content[i] = b.id
bytes += b.bytes
debug.Log("Archiver.Save", " adding blob %s, %d bytes", b.id.Str(), b.bytes)
debug.Log(" adding blob %s, %d bytes", b.id.Str(), b.bytes)
}
if bytes != node.Size {
return fmt.Errorf("errors saving node %q: saved %d bytes, wanted %d bytes", node.path, bytes, node.Size)
fmt.Fprintf(os.Stderr, "warning for %v: expected %d bytes, saved %d bytes\n", node.Path, node.Size, bytes)
}
debug.Log("Archiver.SaveFile", "SaveFile(%q): %v blobs\n", node.path, len(results))
debug.Log("SaveFile(%q): %v blobs\n", node.Path, len(results))
return nil
}
// SaveFile stores the content of the file on the backend as a Blob by calling
// Save for each chunk.
func (arch *Archiver) SaveFile(p *Progress, node *Node) error {
file, err := fs.Open(node.path)
func (arch *Archiver) SaveFile(p *restic.Progress, node *restic.Node) (*restic.Node, error) {
file, err := fs.Open(node.Path)
defer file.Close()
if err != nil {
return err
return node, errors.Wrap(err, "Open")
}
debug.RunHook("archiver.SaveFile", node.Path)
node, err = arch.reloadFileIfChanged(node, file)
if err != nil {
return err
return node, err
}
chnker := chunker.New(file, arch.repo.Config.ChunkerPolynomial)
chnker := chunker.New(file, arch.repo.Config().ChunkerPolynomial)
resultChannels := [](<-chan saveResult){}
for {
chunk, err := chnker.Next(getBuf())
if err == io.EOF {
if errors.Cause(err) == io.EOF {
break
}
if err != nil {
return errors.Annotate(err, "SaveFile() chunker.Next()")
return node, errors.Wrap(err, "chunker.Next")
}
resCh := make(chan saveResult, 1)
@@ -240,16 +240,16 @@ func (arch *Archiver) SaveFile(p *Progress, node *Node) error {
results, err := waitForResults(resultChannels)
if err != nil {
return err
return node, err
}
err = updateNodeContent(node, results)
return err
return node, err
}
func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan struct{}, entCh <-chan pipe.Entry) {
func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *restic.Progress, done <-chan struct{}, entCh <-chan pipe.Entry) {
defer func() {
debug.Log("Archiver.fileWorker", "done")
debug.Log("done")
wg.Done()
}()
for {
@@ -260,38 +260,35 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st
return
}
debug.Log("Archiver.fileWorker", "got job %v", e)
debug.Log("got job %v", e)
// check for errors
if e.Error() != nil {
debug.Log("Archiver.fileWorker", "job %v has errors: %v", e.Path(), e.Error())
debug.Log("job %v has errors: %v", e.Path(), e.Error())
// TODO: integrate error reporting
fmt.Fprintf(os.Stderr, "error for %v: %v\n", e.Path(), e.Error())
// ignore this file
e.Result() <- nil
p.Report(Stat{Errors: 1})
p.Report(restic.Stat{Errors: 1})
continue
}
node, err := NodeFromFileInfo(e.Fullpath(), e.Info())
node, err := restic.NodeFromFileInfo(e.Fullpath(), e.Info())
if err != nil {
// TODO: integrate error reporting
debug.Log("Archiver.fileWorker", "NodeFromFileInfo returned error for %v: %v", node.path, err)
e.Result() <- nil
p.Report(Stat{Errors: 1})
continue
debug.Log("restic.NodeFromFileInfo returned error for %v: %v", node.Path, err)
arch.Warn(e.Fullpath(), e.Info(), err)
}
// try to use old node, if present
if e.Node != nil {
debug.Log("Archiver.fileWorker", " %v use old data", e.Path())
debug.Log(" %v use old data", e.Path())
oldNode := e.Node.(*Node)
oldNode := e.Node.(*restic.Node)
// check if all content is still available in the repository
contentMissing := false
for _, blob := range oldNode.blobs {
if ok, err := arch.repo.Backend().Test(backend.Data, blob.Storage.String()); !ok || err != nil {
debug.Log("Archiver.fileWorker", " %v not using old data, %v (%v) is missing", e.Path(), blob.ID.Str(), blob.Storage.Str())
for _, blob := range oldNode.Content {
if !arch.repo.Index().Has(blob, restic.DataBlob) {
debug.Log(" %v not using old data, %v is missing", e.Path(), blob.Str())
contentMissing = true
break
}
@@ -299,33 +296,32 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st
if !contentMissing {
node.Content = oldNode.Content
node.blobs = oldNode.blobs
debug.Log("Archiver.fileWorker", " %v content is complete", e.Path())
debug.Log(" %v content is complete", e.Path())
}
} else {
debug.Log("Archiver.fileWorker", " %v no old data", e.Path())
debug.Log(" %v no old data", e.Path())
}
// otherwise read file normally
if node.Type == "file" && len(node.Content) == 0 {
debug.Log("Archiver.fileWorker", " read and save %v, content: %v", e.Path(), node.Content)
err = arch.SaveFile(p, node)
debug.Log(" read and save %v", e.Path())
node, err = arch.SaveFile(p, node)
if err != nil {
// TODO: integrate error reporting
fmt.Fprintf(os.Stderr, "error for %v: %v\n", node.path, err)
fmt.Fprintf(os.Stderr, "error for %v: %v\n", node.Path, err)
arch.Warn(e.Path(), nil, err)
// ignore this file
e.Result() <- nil
p.Report(Stat{Errors: 1})
p.Report(restic.Stat{Errors: 1})
continue
}
} else {
// report old data size
p.Report(Stat{Bytes: node.Size})
p.Report(restic.Stat{Bytes: node.Size})
}
debug.Log("Archiver.fileWorker", " processed %v, %d/%d blobs", e.Path(), len(node.Content), len(node.blobs))
debug.Log(" processed %v, %d blobs", e.Path(), len(node.Content))
e.Result() <- node
p.Report(Stat{Files: 1})
p.Report(restic.Stat{Files: 1})
case <-done:
// pipeline was cancelled
return
@@ -333,10 +329,10 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st
}
}
func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan struct{}, dirCh <-chan pipe.Dir) {
debug.Log("Archiver.dirWorker", "start")
func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *restic.Progress, done <-chan struct{}, dirCh <-chan pipe.Dir) {
debug.Log("start")
defer func() {
debug.Log("Archiver.dirWorker", "done")
debug.Log("done")
wg.Done()
}()
for {
@@ -346,51 +342,54 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str
// channel is closed
return
}
debug.Log("Archiver.dirWorker", "save dir %v (%d entries), error %v\n", dir.Path(), len(dir.Entries), dir.Error())
debug.Log("save dir %v (%d entries), error %v\n", dir.Path(), len(dir.Entries), dir.Error())
// ignore dir nodes with errors
if dir.Error() != nil {
fmt.Fprintf(os.Stderr, "error walking dir %v: %v\n", dir.Path(), dir.Error())
dir.Result() <- nil
p.Report(Stat{Errors: 1})
p.Report(restic.Stat{Errors: 1})
continue
}
tree := NewTree()
tree := restic.NewTree()
// wait for all content
for _, ch := range dir.Entries {
debug.Log("Archiver.dirWorker", "receiving result from %v", ch)
debug.Log("receiving result from %v", ch)
res := <-ch
// if we get a nil pointer here, an error has happened while
// processing this entry. Ignore it for now.
if res == nil {
debug.Log("Archiver.dirWorker", "got nil result?")
debug.Log("got nil result?")
continue
}
// else insert node
node := res.(*Node)
tree.Insert(node)
node := res.(*restic.Node)
if node.Type == "dir" {
debug.Log("Archiver.dirWorker", "got tree node for %s: %v", node.path, node.Subtree)
debug.Log("got tree node for %s: %v", node.Path, node.Subtree)
if node.Subtree == nil {
debug.Log("subtree is nil for node %v", node.Path)
continue
}
if node.Subtree.IsNull() {
panic("invalid null subtree ID")
panic("invalid null subtree restic.ID")
}
}
tree.Insert(node)
}
node := &Node{}
node := &restic.Node{}
if dir.Path() != "" && dir.Info() != nil {
n, err := NodeFromFileInfo(dir.Path(), dir.Info())
n, err := restic.NodeFromFileInfo(dir.Fullpath(), dir.Info())
if err != nil {
n.Error = err.Error()
dir.Result() <- n
continue
arch.Warn(dir.Path(), dir.Info(), err)
}
node = n
}
@@ -403,18 +402,18 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str
if err != nil {
panic(err)
}
debug.Log("Archiver.dirWorker", "save tree for %s: %v", dir.Path(), id.Str())
debug.Log("save tree for %s: %v", dir.Path(), id.Str())
if id.IsNull() {
panic("invalid null subtree ID return from SaveTreeJSON()")
panic("invalid null subtree restic.ID return from SaveTreeJSON()")
}
node.Subtree = &id
debug.Log("Archiver.dirWorker", "sending result to %v", dir.Result())
debug.Log("sending result to %v", dir.Result())
dir.Result() <- node
if dir.Path() != "" {
p.Report(Stat{Dirs: 1})
p.Report(restic.Stat{Dirs: 1})
}
case <-done:
// pipeline was cancelled
@@ -424,7 +423,7 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str
}
type archivePipe struct {
Old <-chan WalkTreeJob
Old <-chan walk.TreeJob
New <-chan pipe.Job
}
@@ -445,7 +444,7 @@ func copyJobs(done <-chan struct{}, in <-chan pipe.Job, out chan<- pipe.Job) {
case job, ok = <-inCh:
if !ok {
// input channel closed, we're done
debug.Log("copyJobs", "input channel closed, we're done")
debug.Log("input channel closed, we're done")
return
}
inCh = nil
@@ -459,21 +458,21 @@ func copyJobs(done <-chan struct{}, in <-chan pipe.Job, out chan<- pipe.Job) {
type archiveJob struct {
hasOld bool
old WalkTreeJob
old walk.TreeJob
new pipe.Job
}
func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) {
defer func() {
close(out)
debug.Log("ArchivePipe.compare", "done")
debug.Log("done")
}()
debug.Log("ArchivePipe.compare", "start")
debug.Log("start")
var (
loadOld, loadNew bool = true, true
ok bool
oldJob WalkTreeJob
oldJob walk.TreeJob
newJob pipe.Job
)
@@ -482,7 +481,7 @@ func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) {
oldJob, ok = <-a.Old
// if the old channel is closed, just pass through the new jobs
if !ok {
debug.Log("ArchivePipe.compare", "old channel is closed, copy from new channel")
debug.Log("old channel is closed, copy from new channel")
// handle remaining newJob
if !loadNew {
@@ -500,15 +499,15 @@ func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) {
newJob, ok = <-a.New
// if the new channel is closed, there are no more files in the current snapshot, return
if !ok {
debug.Log("ArchivePipe.compare", "new channel is closed, we're done")
debug.Log("new channel is closed, we're done")
return
}
loadNew = false
}
debug.Log("ArchivePipe.compare", "old job: %v", oldJob.Path)
debug.Log("ArchivePipe.compare", "new job: %v", newJob.Path())
debug.Log("old job: %v", oldJob.Path)
debug.Log("new job: %v", newJob.Path())
// at this point we have received an old job as well as a new job, compare paths
file1 := oldJob.Path
@@ -518,7 +517,7 @@ func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) {
dir2 := filepath.Dir(file2)
if file1 == file2 {
debug.Log("ArchivePipe.compare", " same filename %q", file1)
debug.Log(" same filename %q", file1)
// send job
out <- archiveJob{hasOld: true, old: oldJob, new: newJob}.Copy()
@@ -526,19 +525,19 @@ func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) {
loadNew = true
continue
} else if dir1 < dir2 {
debug.Log("ArchivePipe.compare", " %q < %q, file %q added", dir1, dir2, file2)
debug.Log(" %q < %q, file %q added", dir1, dir2, file2)
// file is new, send new job and load new
loadNew = true
out <- archiveJob{new: newJob}.Copy()
continue
} else if dir1 == dir2 {
if file1 < file2 {
debug.Log("ArchivePipe.compare", " %q < %q, file %q removed", file1, file2, file1)
debug.Log(" %q < %q, file %q removed", file1, file2, file1)
// file has been removed, load new old
loadOld = true
continue
} else {
debug.Log("ArchivePipe.compare", " %q > %q, file %q added", file1, file2, file2)
debug.Log(" %q > %q, file %q added", file1, file2, file2)
// file is new, send new job and load new
loadNew = true
out <- archiveJob{new: newJob}.Copy()
@@ -546,7 +545,7 @@ func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) {
}
}
debug.Log("ArchivePipe.compare", " %q > %q, file %q removed", file1, file2, file1)
debug.Log(" %q > %q, file %q removed", file1, file2, file1)
// file has been removed, throw away old job and load new
loadOld = true
}
@@ -559,7 +558,7 @@ func (j archiveJob) Copy() pipe.Job {
// handle files
if isRegularFile(j.new.Info()) {
debug.Log("archiveJob.Copy", " job %v is file", j.new.Path())
debug.Log(" job %v is file", j.new.Path())
// if type has changed, return new job directly
if j.old.Node == nil {
@@ -567,12 +566,12 @@ func (j archiveJob) Copy() pipe.Job {
}
// if file is newer, return the new job
if j.old.Node.isNewer(j.new.Fullpath(), j.new.Info()) {
debug.Log("archiveJob.Copy", " job %v is newer", j.new.Path())
if j.old.Node.IsNewer(j.new.Fullpath(), j.new.Info()) {
debug.Log(" job %v is newer", j.new.Path())
return j.new
}
debug.Log("archiveJob.Copy", " job %v add old data", j.new.Path())
debug.Log(" job %v add old data", j.new.Path())
// otherwise annotate job with old data
e := j.new.(pipe.Entry)
e.Node = j.old.Node
@@ -597,10 +596,10 @@ func (arch *Archiver) saveIndexes(wg *sync.WaitGroup, done <-chan struct{}) {
case <-done:
return
case <-ticker.C:
debug.Log("Archiver.saveIndexes", "saving full indexes")
debug.Log("saving full indexes")
err := arch.repo.SaveFullIndex()
if err != nil {
debug.Log("Archiver.saveIndexes", "save indexes returned an error: %v", err)
debug.Log("save indexes returned an error: %v", err)
fmt.Fprintf(os.Stderr, "error saving preliminary index: %v\n", err)
}
}
@@ -632,14 +631,14 @@ func (p baseNameSlice) Len() int { return len(p) }
func (p baseNameSlice) Less(i, j int) bool { return filepath.Base(p[i]) < filepath.Base(p[j]) }
func (p baseNameSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// Snapshot creates a snapshot of the given paths. If parentID is set, this is
// Snapshot creates a snapshot of the given paths. If parentrestic.ID is set, this is
// used to compare the files to the ones archived at the time this snapshot was
// taken.
func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID) (*Snapshot, backend.ID, error) {
func (arch *Archiver) Snapshot(p *restic.Progress, paths, tags []string, hostname string, parentID *restic.ID) (*restic.Snapshot, restic.ID, error) {
paths = unique(paths)
sort.Sort(baseNameSlice(paths))
debug.Log("Archiver.Snapshot", "start for %v", paths)
debug.Log("start for %v", paths)
debug.RunHook("Archiver.Snapshot", nil)
@@ -651,9 +650,9 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID
defer p.Done()
// create new snapshot
sn, err := NewSnapshot(paths)
sn, err := restic.NewSnapshot(paths, tags, hostname)
if err != nil {
return nil, backend.ID{}, err
return nil, restic.ID{}, err
}
sn.Excludes = arch.Excludes
@@ -664,18 +663,18 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID
sn.Parent = parentID
// load parent snapshot
parent, err := LoadSnapshot(arch.repo, *parentID)
parent, err := restic.LoadSnapshot(arch.repo, *parentID)
if err != nil {
return nil, backend.ID{}, err
return nil, restic.ID{}, err
}
// start walker on old tree
ch := make(chan WalkTreeJob)
go WalkTree(arch.repo, *parent.Tree, done, ch)
ch := make(chan walk.TreeJob)
go walk.Tree(arch.repo, *parent.Tree, done, ch)
jobs.Old = ch
} else {
// use closed channel
ch := make(chan WalkTreeJob)
ch := make(chan walk.TreeJob)
close(ch)
jobs.Old = ch
}
@@ -685,7 +684,7 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID
resCh := make(chan pipe.Result, 1)
go func() {
pipe.Walk(paths, arch.SelectFilter, done, pipeCh, resCh)
debug.Log("Archiver.Snapshot", "pipe.Walk done")
debug.Log("pipe.Walk done")
}()
jobs.New = pipeCh
@@ -700,7 +699,7 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID
wg.Add(1)
go func() {
pipe.Split(ch, dirCh, entCh)
debug.Log("Archiver.Snapshot", "split done")
debug.Log("split done")
close(dirCh)
close(entCh)
wg.Done()
@@ -720,44 +719,52 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID
go arch.saveIndexes(&wgIndexSaver, stopIndexSaver)
// wait for all workers to terminate
debug.Log("Archiver.Snapshot", "wait for workers")
debug.Log("wait for workers")
wg.Wait()
// stop index saver
close(stopIndexSaver)
wgIndexSaver.Wait()
debug.Log("Archiver.Snapshot", "workers terminated")
// receive the top-level tree
root := (<-resCh).(*Node)
debug.Log("Archiver.Snapshot", "root node received: %v", root.Subtree.Str())
sn.Tree = root.Subtree
// save snapshot
id, err := arch.repo.SaveJSONUnpacked(backend.Snapshot, sn)
if err != nil {
return nil, backend.ID{}, err
}
// store ID in snapshot struct
sn.id = &id
debug.Log("Archiver.Snapshot", "saved snapshot %v", id.Str())
debug.Log("workers terminated")
// flush repository
err = arch.repo.Flush()
if err != nil {
return nil, backend.ID{}, err
return nil, restic.ID{}, err
}
// receive the top-level tree
root := (<-resCh).(*restic.Node)
debug.Log("root node received: %v", root.Subtree.Str())
sn.Tree = root.Subtree
// load top-level tree again to see if it is empty
toptree, err := arch.repo.LoadTree(*root.Subtree)
if err != nil {
return nil, restic.ID{}, err
}
if len(toptree.Nodes) == 0 {
return nil, restic.ID{}, errors.Fatal("no files/dirs saved, refusing to create empty snapshot")
}
// save index
err = arch.repo.SaveIndex()
if err != nil {
debug.Log("Archiver.Snapshot", "error saving index: %v", err)
return nil, backend.ID{}, err
debug.Log("error saving index: %v", err)
return nil, restic.ID{}, err
}
debug.Log("Archiver.Snapshot", "saved indexes")
debug.Log("saved indexes")
// save snapshot
id, err := arch.repo.SaveJSONUnpacked(restic.SnapshotFile, sn)
if err != nil {
return nil, restic.ID{}, err
}
debug.Log("saved snapshot %v", id.Str())
return sn, id, nil
}
@@ -770,17 +777,17 @@ func isRegularFile(fi os.FileInfo) bool {
return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0
}
// Scan traverses the dirs to collect Stat information while emitting progress
// Scan traverses the dirs to collect restic.Stat information while emitting progress
// information with p.
func Scan(dirs []string, filter pipe.SelectFunc, p *Progress) (Stat, error) {
func Scan(dirs []string, filter pipe.SelectFunc, p *restic.Progress) (restic.Stat, error) {
p.Start()
defer p.Done()
var stat Stat
var stat restic.Stat
for _, dir := range dirs {
debug.Log("Scan", "Start for %v", dir)
err := filepath.Walk(dir, func(str string, fi os.FileInfo, err error) error {
debug.Log("Start for %v", dir)
err := fs.Walk(dir, func(str string, fi os.FileInfo, err error) error {
// TODO: integrate error reporting
if err != nil {
fmt.Fprintf(os.Stderr, "error for %v: %v\n", str, err)
@@ -792,14 +799,14 @@ func Scan(dirs []string, filter pipe.SelectFunc, p *Progress) (Stat, error) {
}
if !filter(str, fi) {
debug.Log("Scan.Walk", "path %v excluded", str)
debug.Log("path %v excluded", str)
if fi.IsDir() {
return filepath.SkipDir
}
return nil
}
s := Stat{}
s := restic.Stat{}
if fi.IsDir() {
s.Dirs++
} else {
@@ -817,9 +824,9 @@ func Scan(dirs []string, filter pipe.SelectFunc, p *Progress) (Stat, error) {
return nil
})
debug.Log("Scan", "Done for %v, err: %v", dir, err)
debug.Log("Done for %v, err: %v", dir, err)
if err != nil {
return Stat{}, err
return restic.Stat{}, errors.Wrap(err, "fs.Walk")
}
}

View File

@@ -1,17 +1,18 @@
package restic_test
package archiver_test
import (
"crypto/rand"
"errors"
"io"
mrand "math/rand"
"sync"
"testing"
"time"
"restic/errors"
"restic"
"restic/backend"
"restic/pack"
"restic/archiver"
"restic/mock"
"restic/repository"
)
@@ -19,14 +20,14 @@ const parallelSaves = 50
const testSaveIndexTime = 100 * time.Millisecond
const testTimeout = 2 * time.Second
var DupID backend.ID
var DupID restic.ID
func randomID() backend.ID {
func randomID() restic.ID {
if mrand.Float32() < 0.5 {
return DupID
}
id := backend.ID{}
id := restic.ID{}
_, err := io.ReadFull(rand.Reader, id[:])
if err != nil {
panic(err)
@@ -35,30 +36,30 @@ func randomID() backend.ID {
}
// forgetfulBackend returns a backend that forgets everything.
func forgetfulBackend() backend.Backend {
be := &backend.MockBackend{}
func forgetfulBackend() restic.Backend {
be := &mock.Backend{}
be.TestFn = func(t backend.Type, name string) (bool, error) {
be.TestFn = func(h restic.Handle) (bool, error) {
return false, nil
}
be.LoadFn = func(h backend.Handle, p []byte, off int64) (int, error) {
return 0, errors.New("not found")
be.LoadFn = func(h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
return nil, errors.New("not found")
}
be.SaveFn = func(h backend.Handle, p []byte) error {
be.SaveFn = func(h restic.Handle, rd io.Reader) error {
return nil
}
be.StatFn = func(h backend.Handle) (backend.BlobInfo, error) {
return backend.BlobInfo{}, errors.New("not found")
be.StatFn = func(h restic.Handle) (restic.FileInfo, error) {
return restic.FileInfo{}, errors.New("not found")
}
be.RemoveFn = func(t backend.Type, name string) error {
be.RemoveFn = func(h restic.Handle) error {
return nil
}
be.ListFn = func(t backend.Type, done <-chan struct{}) <-chan string {
be.ListFn = func(t restic.FileType, done <-chan struct{}) <-chan string {
ch := make(chan string)
close(ch)
return ch
@@ -84,7 +85,7 @@ func testArchiverDuplication(t *testing.T) {
t.Fatal(err)
}
arch := restic.NewArchiver(repo)
arch := archiver.New(repo)
wg := &sync.WaitGroup{}
done := make(chan struct{})
@@ -101,13 +102,13 @@ func testArchiverDuplication(t *testing.T) {
id := randomID()
if repo.Index().Has(id) {
if repo.Index().Has(id, restic.DataBlob) {
continue
}
buf := make([]byte, 50)
err := arch.Save(pack.Data, buf, id)
err := arch.Save(restic.DataBlob, buf, id)
if err != nil {
t.Fatal(err)
}
@@ -141,6 +142,11 @@ func testArchiverDuplication(t *testing.T) {
close(done)
wg.Wait()
err = repo.Flush()
if err != nil {
t.Fatal(err)
}
}
func TestArchiverDuplication(t *testing.T) {

View File

@@ -1,10 +1,11 @@
package restic
package archiver
import (
"os"
"testing"
"restic/pipe"
"restic/walk"
)
var treeJobs = []string{
@@ -82,12 +83,12 @@ func (j testPipeJob) Error() error { return j.err }
func (j testPipeJob) Info() os.FileInfo { return j.fi }
func (j testPipeJob) Result() chan<- pipe.Result { return j.res }
func testTreeWalker(done <-chan struct{}, out chan<- WalkTreeJob) {
func testTreeWalker(done <-chan struct{}, out chan<- walk.TreeJob) {
for _, e := range treeJobs {
select {
case <-done:
return
case out <- WalkTreeJob{Path: e}:
case out <- walk.TreeJob{Path: e}:
}
}
@@ -109,7 +110,7 @@ func testPipeWalker(done <-chan struct{}, out chan<- pipe.Job) {
func TestArchivePipe(t *testing.T) {
done := make(chan struct{})
treeCh := make(chan WalkTreeJob)
treeCh := make(chan walk.TreeJob)
pipeCh := make(chan pipe.Job)
go testTreeWalker(done, treeCh)

View File

@@ -1,4 +1,4 @@
package restic_test
package archiver_test
import (
"bytes"
@@ -7,13 +7,14 @@ import (
"time"
"restic"
"restic/backend"
"restic/archiver"
"restic/checker"
"restic/crypto"
"restic/pack"
"restic/repository"
. "restic/test"
"restic/errors"
"github.com/restic/chunker"
)
@@ -31,7 +32,7 @@ func benchmarkChunkEncrypt(b testing.TB, buf, buf2 []byte, rd Rdr, key *crypto.K
for {
chunk, err := ch.Next(buf)
if err == io.EOF {
if errors.Cause(err) == io.EOF {
break
}
@@ -47,8 +48,8 @@ func benchmarkChunkEncrypt(b testing.TB, buf, buf2 []byte, rd Rdr, key *crypto.K
}
func BenchmarkChunkEncrypt(b *testing.B) {
repo := SetupRepo()
defer TeardownRepo(repo)
repo, cleanup := repository.TestRepository(b)
defer cleanup()
data := Random(23, 10<<20) // 10MiB
rd := bytes.NewReader(data)
@@ -69,7 +70,7 @@ func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *crypto.Key)
for {
chunk, err := ch.Next(buf)
if err == io.EOF {
if errors.Cause(err) == io.EOF {
break
}
@@ -79,8 +80,8 @@ func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *crypto.Key)
}
func BenchmarkChunkEncryptParallel(b *testing.B) {
repo := SetupRepo()
defer TeardownRepo(repo)
repo, cleanup := repository.TestRepository(b)
defer cleanup()
data := Random(23, 10<<20) // 10MiB
@@ -98,12 +99,12 @@ func BenchmarkChunkEncryptParallel(b *testing.B) {
}
func archiveDirectory(b testing.TB) {
repo := SetupRepo()
defer TeardownRepo(repo)
repo, cleanup := repository.TestRepository(b)
defer cleanup()
arch := restic.NewArchiver(repo)
arch := archiver.New(repo)
_, id, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil)
_, id, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil, "localhost", nil)
OK(b, err)
b.Logf("snapshot archived as %v", id)
@@ -127,9 +128,17 @@ func BenchmarkArchiveDirectory(b *testing.B) {
}
}
func countPacks(repo restic.Repository, t restic.FileType) (n uint) {
for _ = range repo.Backend().List(t, nil) {
n++
}
return n
}
func archiveWithDedup(t testing.TB) {
repo := SetupRepo()
defer TeardownRepo(repo)
repo, cleanup := repository.TestRepository(t)
defer cleanup()
if BenchArchiveDirectory == "" {
t.Skip("benchdir not set, skipping TestArchiverDedup")
@@ -142,24 +151,24 @@ func archiveWithDedup(t testing.TB) {
}
// archive a few files
sn := SnapshotDir(t, repo, BenchArchiveDirectory, nil)
sn := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, nil)
t.Logf("archived snapshot %v", sn.ID().Str())
// get archive stats
cnt.before.packs = repo.Count(backend.Data)
cnt.before.dataBlobs = repo.Index().Count(pack.Data)
cnt.before.treeBlobs = repo.Index().Count(pack.Tree)
cnt.before.packs = countPacks(repo, restic.DataFile)
cnt.before.dataBlobs = repo.Index().Count(restic.DataBlob)
cnt.before.treeBlobs = repo.Index().Count(restic.TreeBlob)
t.Logf("packs %v, data blobs %v, tree blobs %v",
cnt.before.packs, cnt.before.dataBlobs, cnt.before.treeBlobs)
// archive the same files again, without parent snapshot
sn2 := SnapshotDir(t, repo, BenchArchiveDirectory, nil)
sn2 := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, nil)
t.Logf("archived snapshot %v", sn2.ID().Str())
// get archive stats again
cnt.after.packs = repo.Count(backend.Data)
cnt.after.dataBlobs = repo.Index().Count(pack.Data)
cnt.after.treeBlobs = repo.Index().Count(pack.Tree)
cnt.after.packs = countPacks(repo, restic.DataFile)
cnt.after.dataBlobs = repo.Index().Count(restic.DataBlob)
cnt.after.treeBlobs = repo.Index().Count(restic.TreeBlob)
t.Logf("packs %v, data blobs %v, tree blobs %v",
cnt.after.packs, cnt.after.dataBlobs, cnt.after.treeBlobs)
@@ -170,13 +179,13 @@ func archiveWithDedup(t testing.TB) {
}
// archive the same files again, with a parent snapshot
sn3 := SnapshotDir(t, repo, BenchArchiveDirectory, sn2.ID())
sn3 := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, sn2.ID())
t.Logf("archived snapshot %v, parent %v", sn3.ID().Str(), sn2.ID().Str())
// get archive stats again
cnt.after2.packs = repo.Count(backend.Data)
cnt.after2.dataBlobs = repo.Index().Count(pack.Data)
cnt.after2.treeBlobs = repo.Index().Count(pack.Tree)
cnt.after2.packs = countPacks(repo, restic.DataFile)
cnt.after2.dataBlobs = repo.Index().Count(restic.DataBlob)
cnt.after2.treeBlobs = repo.Index().Count(restic.TreeBlob)
t.Logf("packs %v, data blobs %v, tree blobs %v",
cnt.after2.packs, cnt.after2.dataBlobs, cnt.after2.treeBlobs)
@@ -191,48 +200,6 @@ func TestArchiveDedup(t *testing.T) {
archiveWithDedup(t)
}
func BenchmarkLoadTree(t *testing.B) {
repo := SetupRepo()
defer TeardownRepo(repo)
if BenchArchiveDirectory == "" {
t.Skip("benchdir not set, skipping TestArchiverDedup")
}
// archive a few files
arch := restic.NewArchiver(repo)
sn, _, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil)
OK(t, err)
t.Logf("archived snapshot %v", sn.ID())
list := make([]backend.ID, 0, 10)
done := make(chan struct{})
for _, idx := range repo.Index().All() {
for blob := range idx.Each(done) {
if blob.Type != pack.Tree {
continue
}
list = append(list, blob.ID)
if len(list) == cap(list) {
close(done)
break
}
}
}
// start benchmark
t.ResetTimer()
for i := 0; i < t.N; i++ {
for _, id := range list {
_, err := restic.LoadTree(repo, id)
OK(t, err)
}
}
}
// Saves several identical chunks concurrently and later checks that there are no
// unreferenced packs in the repository. See also #292 and #358.
func TestParallelSaveWithDuplication(t *testing.T) {
@@ -242,18 +209,18 @@ func TestParallelSaveWithDuplication(t *testing.T) {
}
func testParallelSaveWithDuplication(t *testing.T, seed int) {
repo := SetupRepo()
defer TeardownRepo(repo)
repo, cleanup := repository.TestRepository(t)
defer cleanup()
dataSizeMb := 128
duplication := 7
arch := restic.NewArchiver(repo)
arch := archiver.New(repo)
chunks := getRandomData(seed, dataSizeMb*1024*1024)
errChannels := [](<-chan error){}
// interweaved processing of subsequent chunks
// interwoven processing of subsequent chunks
maxParallel := 2*duplication - 1
barrier := make(chan struct{}, maxParallel)
@@ -265,9 +232,9 @@ func testParallelSaveWithDuplication(t *testing.T, seed int) {
go func(c chunker.Chunk, errChan chan<- error) {
barrier <- struct{}{}
id := backend.Hash(c.Data)
id := restic.Hash(c.Data)
time.Sleep(time.Duration(id[0]))
err := arch.Save(pack.Data, c.Data, id)
err := arch.Save(restic.DataBlob, c.Data, id)
<-barrier
errChan <- err
}(c, errChan)
@@ -292,7 +259,7 @@ func getRandomData(seed int, size int) []chunker.Chunk {
for {
c, err := chunker.Next(nil)
if err == io.EOF {
if errors.Cause(err) == io.EOF {
break
}
chunks = append(chunks, c)
@@ -301,7 +268,7 @@ func getRandomData(seed int, size int) []chunker.Chunk {
return chunks
}
func createAndInitChecker(t *testing.T, repo *repository.Repository) *checker.Checker {
func createAndInitChecker(t *testing.T, repo restic.Repository) *checker.Checker {
chkr := checker.New(repo)
hints, errs := chkr.LoadIndex()
@@ -327,3 +294,23 @@ func assertNoUnreferencedPacks(t *testing.T, chkr *checker.Checker) {
OK(t, err)
}
}
func TestArchiveEmptySnapshot(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
arch := archiver.New(repo)
sn, id, err := arch.Snapshot(nil, []string{"file-does-not-exist-123123213123", "file2-does-not-exist-too-123123123"}, nil, "localhost", nil)
if err == nil {
t.Errorf("expected error for empty snapshot, got nil")
}
if !id.IsNull() {
t.Errorf("expected null ID for empty snapshot, got %v", id.Str())
}
if sn != nil {
t.Errorf("expected null snapshot for empty snapshot, got %v", sn)
}
}

View File

@@ -0,0 +1,21 @@
package archiver
import (
"sync"
"github.com/restic/chunker"
)
var bufPool = sync.Pool{
New: func() interface{} {
return make([]byte, chunker.MinSize)
},
}
func getBuf() []byte {
return bufPool.Get().([]byte)
}
func freeBuf(data []byte) {
bufPool.Put(data)
}

View File

@@ -0,0 +1,16 @@
package archiver
import (
"restic"
"testing"
)
// TestSnapshot creates a new snapshot of path.
func TestSnapshot(t testing.TB, repo restic.Repository, path string, parent *restic.ID) *restic.Snapshot {
arch := New(repo)
sn, _, err := arch.Snapshot(nil, []string{path}, []string{"test"}, "localhost", parent)
if err != nil {
t.Fatal(err)
}
return sn
}

40
src/restic/backend.go Normal file
View File

@@ -0,0 +1,40 @@
package restic
import "io"
// Backend is used to store and access data.
type Backend interface {
// Location returns a string that describes the type and location of the
// repository.
Location() string
// Test a boolean value whether a File with the name and type exists.
Test(h Handle) (bool, error)
// Remove removes a File with type t and name.
Remove(h Handle) error
// Close the backend
Close() error
// Save stores the data in the backend under the given handle.
Save(h Handle, rd io.Reader) error
// Load returns a reader that yields the contents of the file at h at the
// given offset. If length is larger than zero, only a portion of the file
// is returned. rd must be closed after use. If an error is returned, the
// ReadCloser must be nil.
Load(h Handle, length int, offset int64) (io.ReadCloser, error)
// Stat returns information about the File identified by h.
Stat(h Handle) (FileInfo, error)
// List returns a channel that yields all names of files of type t in an
// arbitrary order. A goroutine is started for this. If the channel done is
// closed, sending stops.
List(t FileType, done <-chan struct{}) <-chan string
}
// FileInfo is returned by Stat() and contains information about a file in the
// backend.
type FileInfo struct{ Size int64 }

View File

@@ -1,5 +1,4 @@
// Package backend provides local and remote storage for restic repositories.
// All backends need to implement the Backend interface. There is a
// MockBackend, which can be used for mocking in tests, and a MemBackend, which
// stores all data in a hash internally.
// All backends need to implement the Backend interface. There is a MemBackend,
// which stores all data in a map internally and can be used for testing.
package backend

View File

@@ -1,61 +0,0 @@
package backend_test
import (
"testing"
"restic/backend"
. "restic/test"
)
type mockBackend struct {
list func(backend.Type, <-chan struct{}) <-chan string
}
func (m mockBackend) List(t backend.Type, done <-chan struct{}) <-chan string {
return m.list(t, done)
}
var samples = backend.IDs{
ParseID("20bdc1402a6fc9b633aaffffffffffffffffffffffffffffffffffffffffffff"),
ParseID("20bdc1402a6fc9b633ccd578c4a92d0f4ef1a457fa2e16c596bc73fb409d6cc0"),
ParseID("20bdc1402a6fc9b633ffffffffffffffffffffffffffffffffffffffffffffff"),
ParseID("20ff988befa5fc40350f00d531a767606efefe242c837aaccb80673f286be53d"),
ParseID("326cb59dfe802304f96ee9b5b9af93bdee73a30f53981e5ec579aedb6f1d0f07"),
ParseID("86b60b9594d1d429c4aa98fa9562082cabf53b98c7dc083abe5dae31074dd15a"),
ParseID("96c8dbe225079e624b5ce509f5bd817d1453cd0a85d30d536d01b64a8669aeae"),
ParseID("fa31d65b87affcd167b119e9d3d2a27b8236ca4836cb077ed3e96fcbe209b792"),
}
func TestPrefixLength(t *testing.T) {
list := samples
m := mockBackend{}
m.list = func(t backend.Type, done <-chan struct{}) <-chan string {
ch := make(chan string)
go func() {
defer close(ch)
for _, id := range list {
select {
case ch <- id.String():
case <-done:
return
}
}
}()
return ch
}
l, err := backend.PrefixLength(m, backend.Snapshot)
OK(t, err)
Equals(t, 19, l)
list = samples[:3]
l, err = backend.PrefixLength(m, backend.Snapshot)
OK(t, err)
Equals(t, 19, l)
list = samples[3:]
l, err = backend.PrefixLength(m, backend.Snapshot)
OK(t, err)
Equals(t, 8, l)
}

View File

@@ -1,48 +0,0 @@
package backend
import (
"errors"
"fmt"
)
// Handle is used to store and access data in a backend.
type Handle struct {
Type Type
Name string
}
func (h Handle) String() string {
name := h.Name
if len(name) > 10 {
name = name[:10]
}
return fmt.Sprintf("<%s/%s>", h.Type, name)
}
// Valid returns an error if h is not valid.
func (h Handle) Valid() error {
if h.Type == "" {
return errors.New("type is empty")
}
switch h.Type {
case Data:
case Key:
case Lock:
case Snapshot:
case Index:
case Config:
default:
return fmt.Errorf("invalid Type %q", h.Type)
}
if h.Type == Config {
return nil
}
if h.Name == "" {
return errors.New("invalid Name")
}
return nil
}

View File

@@ -1,58 +0,0 @@
package backend_test
import (
"reflect"
"testing"
"restic/backend"
. "restic/test"
)
var uniqTests = []struct {
before, after backend.IDs
}{
{
backend.IDs{
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
},
backend.IDs{
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
},
},
{
backend.IDs{
ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
},
backend.IDs{
ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
},
},
{
backend.IDs{
ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"),
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
},
backend.IDs{
ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"),
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
},
},
}
func TestUniqIDs(t *testing.T) {
for i, test := range uniqTests {
uniq := test.before.Uniq()
if !reflect.DeepEqual(uniq, test.after) {
t.Errorf("uniqIDs() test %v failed\n wanted: %v\n got: %v", i, test.after, uniq)
}
}
}

View File

@@ -1,35 +0,0 @@
package backend_test
import (
"testing"
"restic/backend"
. "restic/test"
)
var idsetTests = []struct {
id backend.ID
seen bool
}{
{ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), false},
{ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), false},
{ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true},
{ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true},
{ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true},
{ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), false},
{ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true},
{ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true},
{ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), true},
{ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true},
}
func TestIDSet(t *testing.T) {
set := backend.NewIDSet()
for i, test := range idsetTests {
seen := set.Has(test.id)
if seen != test.seen {
t.Errorf("IDSet test %v failed: wanted %v, got %v", i, test.seen, seen)
}
set.Insert(test.id)
}
}

View File

@@ -1,61 +0,0 @@
package backend
// Type is the type of a Blob.
type Type string
// These are the different data types a backend can store.
const (
Data Type = "data"
Key = "key"
Lock = "lock"
Snapshot = "snapshot"
Index = "index"
Config = "config"
)
// Backend is used to store and access data.
type Backend interface {
// Location returns a string that describes the type and location of the
// repository.
Location() string
// Test a boolean value whether a Blob with the name and type exists.
Test(t Type, name string) (bool, error)
// Remove removes a Blob with type t and name.
Remove(t Type, name string) error
// Close the backend
Close() error
Lister
// Load returns the data stored in the backend for h at the given offset
// and saves it in p. Load has the same semantics as io.ReaderAt.
Load(h Handle, p []byte, off int64) (int, error)
// Save stores the data in the backend under the given handle.
Save(h Handle, p []byte) error
// Stat returns information about the blob identified by h.
Stat(h Handle) (BlobInfo, error)
}
// Lister implements listing data items stored in a backend.
type Lister interface {
// List returns a channel that yields all names of blobs of type t in an
// arbitrary order. A goroutine is started for this. If the channel done is
// closed, sending stops.
List(t Type, done <-chan struct{}) <-chan string
}
// Deleter are backends that allow to self-delete all content stored in them.
type Deleter interface {
// Delete the complete repository.
Delete() error
}
// BlobInfo is returned by Stat() and contains information about a stored blob.
type BlobInfo struct {
Size int64
}

View File

@@ -1,8 +1,9 @@
package local
import (
"errors"
"strings"
"restic/errors"
)
// ParseConfig parses a local backend config.

View File

@@ -1,12 +1,13 @@
package local
import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"restic"
"restic/errors"
"restic/backend"
"restic/debug"
@@ -18,6 +19,8 @@ type Local struct {
p string
}
var _ restic.Backend = &Local{}
func paths(dir string) []string {
return []string{
dir,
@@ -34,8 +37,8 @@ func paths(dir string) []string {
func Open(dir string) (*Local, error) {
// test if all necessary dirs are there
for _, d := range paths(dir) {
if _, err := os.Stat(d); err != nil {
return nil, fmt.Errorf("%s does not exist", d)
if _, err := fs.Stat(d); err != nil {
return nil, errors.Wrap(err, "Open")
}
}
@@ -46,16 +49,16 @@ func Open(dir string) (*Local, error) {
// backend at dir. Afterwards a new config blob should be created.
func Create(dir string) (*Local, error) {
// test if config file already exists
_, err := os.Lstat(filepath.Join(dir, backend.Paths.Config))
_, err := fs.Lstat(filepath.Join(dir, backend.Paths.Config))
if err == nil {
return nil, errors.New("config file already exists")
}
// create paths for data, refs and temp
for _, d := range paths(dir) {
err := os.MkdirAll(d, backend.Modes.Dir)
err := fs.MkdirAll(d, backend.Modes.Dir)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "MkdirAll")
}
}
@@ -69,8 +72,8 @@ func (b *Local) Location() string {
}
// Construct path for given Type and name.
func filename(base string, t backend.Type, name string) string {
if t == backend.Config {
func filename(base string, t restic.FileType, name string) string {
if t == restic.ConfigFile {
return filepath.Join(base, "config")
}
@@ -78,167 +81,169 @@ func filename(base string, t backend.Type, name string) string {
}
// Construct directory for given Type.
func dirname(base string, t backend.Type, name string) string {
func dirname(base string, t restic.FileType, name string) string {
var n string
switch t {
case backend.Data:
case restic.DataFile:
n = backend.Paths.Data
if len(name) > 2 {
n = filepath.Join(n, name[:2])
}
case backend.Snapshot:
case restic.SnapshotFile:
n = backend.Paths.Snapshots
case backend.Index:
case restic.IndexFile:
n = backend.Paths.Index
case backend.Lock:
case restic.LockFile:
n = backend.Paths.Locks
case backend.Key:
case restic.KeyFile:
n = backend.Paths.Keys
}
return filepath.Join(base, n)
}
// Load returns the data stored in the backend for h at the given offset
// and saves it in p. Load has the same semantics as io.ReaderAt.
func (b *Local) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
if err := h.Valid(); err != nil {
return 0, err
}
f, err := os.Open(filename(b.p, h.Type, h.Name))
if err != nil {
return 0, err
}
defer func() {
e := f.Close()
if err == nil && e != nil {
err = e
}
}()
if off > 0 {
_, err = f.Seek(off, 0)
if err != nil {
return 0, err
}
}
return io.ReadFull(f, p)
}
// writeToTempfile saves p into a tempfile in tempdir.
func writeToTempfile(tempdir string, p []byte) (filename string, err error) {
// copyToTempfile saves p into a tempfile in tempdir.
func copyToTempfile(tempdir string, rd io.Reader) (filename string, err error) {
tmpfile, err := ioutil.TempFile(tempdir, "temp-")
if err != nil {
return "", err
return "", errors.Wrap(err, "TempFile")
}
n, err := tmpfile.Write(p)
_, err = io.Copy(tmpfile, rd)
if err != nil {
return "", err
}
if n != len(p) {
return "", errors.New("not all bytes writen")
return "", errors.Wrap(err, "Write")
}
if err = tmpfile.Sync(); err != nil {
return "", err
}
err = fs.ClearCache(tmpfile)
if err != nil {
return "", err
return "", errors.Wrap(err, "Syncn")
}
err = tmpfile.Close()
if err != nil {
return "", err
return "", errors.Wrap(err, "Close")
}
return tmpfile.Name(), nil
}
// Save stores data in the backend at the handle.
func (b *Local) Save(h backend.Handle, p []byte) (err error) {
func (b *Local) Save(h restic.Handle, rd io.Reader) (err error) {
debug.Log("Save %v", h)
if err := h.Valid(); err != nil {
return err
}
tmpfile, err := writeToTempfile(filepath.Join(b.p, backend.Paths.Temp), p)
debug.Log("local.Save", "saved %v (%d bytes) to %v", h, len(p), tmpfile)
tmpfile, err := copyToTempfile(filepath.Join(b.p, backend.Paths.Temp), rd)
debug.Log("saved %v to %v", h, tmpfile)
if err != nil {
return err
}
filename := filename(b.p, h.Type, h.Name)
// test if new path already exists
if _, err := os.Stat(filename); err == nil {
return fmt.Errorf("Rename(): file %v already exists", filename)
if _, err := fs.Stat(filename); err == nil {
return errors.Errorf("Rename(): file %v already exists", filename)
}
// create directories if necessary, ignore errors
if h.Type == backend.Data {
err = os.MkdirAll(filepath.Dir(filename), backend.Modes.Dir)
if h.Type == restic.DataFile {
err = fs.MkdirAll(filepath.Dir(filename), backend.Modes.Dir)
if err != nil {
return err
return errors.Wrap(err, "MkdirAll")
}
}
err = os.Rename(tmpfile, filename)
debug.Log("local.Save", "save %v: rename %v -> %v: %v",
err = fs.Rename(tmpfile, filename)
debug.Log("save %v: rename %v -> %v: %v",
h, filepath.Base(tmpfile), filepath.Base(filename), err)
if err != nil {
return err
return errors.Wrap(err, "Rename")
}
// set mode to read-only
fi, err := os.Stat(filename)
fi, err := fs.Stat(filename)
if err != nil {
return err
return errors.Wrap(err, "Stat")
}
return setNewFileMode(filename, fi)
}
// Stat returns information about a blob.
func (b *Local) Stat(h backend.Handle) (backend.BlobInfo, error) {
// Load returns a reader that yields the contents of the file at h at the
// given offset. If length is nonzero, only a portion of the file is
// returned. rd must be closed after use.
func (b *Local) Load(h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
debug.Log("Load %v, length %v, offset %v", h, length, offset)
if err := h.Valid(); err != nil {
return backend.BlobInfo{}, err
return nil, err
}
fi, err := os.Stat(filename(b.p, h.Type, h.Name))
if offset < 0 {
return nil, errors.New("offset is negative")
}
f, err := os.Open(filename(b.p, h.Type, h.Name))
if err != nil {
return backend.BlobInfo{}, err
return nil, err
}
return backend.BlobInfo{Size: fi.Size()}, nil
if offset > 0 {
_, err = f.Seek(offset, 0)
if err != nil {
f.Close()
return nil, err
}
}
if length > 0 {
return backend.LimitReadCloser(f, int64(length)), nil
}
return f, nil
}
// Stat returns information about a blob.
func (b *Local) Stat(h restic.Handle) (restic.FileInfo, error) {
debug.Log("Stat %v", h)
if err := h.Valid(); err != nil {
return restic.FileInfo{}, err
}
fi, err := fs.Stat(filename(b.p, h.Type, h.Name))
if err != nil {
return restic.FileInfo{}, errors.Wrap(err, "Stat")
}
return restic.FileInfo{Size: fi.Size()}, nil
}
// Test returns true if a blob of the given type and name exists in the backend.
func (b *Local) Test(t backend.Type, name string) (bool, error) {
_, err := os.Stat(filename(b.p, t, name))
func (b *Local) Test(h restic.Handle) (bool, error) {
debug.Log("Test %v", h)
_, err := fs.Stat(filename(b.p, h.Type, h.Name))
if err != nil {
if os.IsNotExist(err) {
if os.IsNotExist(errors.Cause(err)) {
return false, nil
}
return false, err
return false, errors.Wrap(err, "Stat")
}
return true, nil
}
// Remove removes the blob with the given name and type.
func (b *Local) Remove(t backend.Type, name string) error {
fn := filename(b.p, t, name)
func (b *Local) Remove(h restic.Handle) error {
debug.Log("Remove %v", h)
fn := filename(b.p, h.Type, h.Name)
// reset read-only flag
err := os.Chmod(fn, 0666)
err := fs.Chmod(fn, 0666)
if err != nil {
return err
return errors.Wrap(err, "Chmod")
}
return os.Remove(fn)
return fs.Remove(fn)
}
func isFile(fi os.FileInfo) bool {
@@ -246,15 +251,15 @@ func isFile(fi os.FileInfo) bool {
}
func readdir(d string) (fileInfos []os.FileInfo, err error) {
f, e := os.Open(d)
f, e := fs.Open(d)
if e != nil {
return nil, e
return nil, errors.Wrap(e, "Open")
}
defer func() {
e := f.Close()
if err == nil {
err = e
err = errors.Wrap(e, "Close")
}
}()
@@ -303,9 +308,10 @@ func listDirs(dir string) (filenames []string, err error) {
// List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending
// stops.
func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string {
func (b *Local) List(t restic.FileType, done <-chan struct{}) <-chan string {
debug.Log("List %v", t)
lister := listDir
if t == backend.Data {
if t == restic.DataFile {
lister = listDirs
}
@@ -336,11 +342,13 @@ func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string {
// Delete removes the repository and all files.
func (b *Local) Delete() error {
return os.RemoveAll(b.p)
debug.Log("Delete()")
return fs.RemoveAll(b.p)
}
// Close closes all open files.
func (b *Local) Close() error {
debug.Log("Close()")
// this does not need to do anything, all open files are closed within the
// same function.
return nil

View File

@@ -4,8 +4,8 @@ import (
"fmt"
"io/ioutil"
"os"
"restic"
"restic/backend"
"restic/backend/local"
"restic/backend/test"
)
@@ -30,7 +30,7 @@ func createTempdir() error {
}
func init() {
test.CreateFn = func() (backend.Backend, error) {
test.CreateFn = func() (restic.Backend, error) {
err := createTempdir()
if err != nil {
return nil, err
@@ -38,7 +38,7 @@ func init() {
return local.Create(tempBackendDir)
}
test.OpenFn = func() (backend.Backend, error) {
test.OpenFn = func() (restic.Backend, error) {
err := createTempdir()
if err != nil {
return nil, err

View File

@@ -4,9 +4,10 @@ package local
import (
"os"
"restic/fs"
)
// set file to readonly
func setNewFileMode(f string, fi os.FileInfo) error {
return os.Chmod(f, fi.Mode()&os.FileMode(^uint32(0222)))
return fs.Chmod(f, fi.Mode()&os.FileMode(^uint32(0222)))
}

View File

@@ -1,28 +1,28 @@
package mem
import (
"errors"
"bytes"
"io"
"io/ioutil"
"restic"
"sync"
"restic/backend"
"restic/errors"
"restic/debug"
)
type entry struct {
Type backend.Type
Name string
}
type memMap map[restic.Handle][]byte
type memMap map[entry][]byte
// make sure that MemoryBackend implements backend.Backend
var _ restic.Backend = &MemoryBackend{}
// MemoryBackend is a mock backend that uses a map for storing all data in
// memory. This should only be used for tests.
type MemoryBackend struct {
data memMap
m sync.Mutex
backend.MockBackend
}
// New returns a new backend that saves all data in a map in memory.
@@ -31,107 +31,27 @@ func New() *MemoryBackend {
data: make(memMap),
}
be.MockBackend.TestFn = func(t backend.Type, name string) (bool, error) {
return memTest(be, t, name)
}
be.MockBackend.LoadFn = func(h backend.Handle, p []byte, off int64) (int, error) {
return memLoad(be, h, p, off)
}
be.MockBackend.SaveFn = func(h backend.Handle, p []byte) error {
return memSave(be, h, p)
}
be.MockBackend.StatFn = func(h backend.Handle) (backend.BlobInfo, error) {
return memStat(be, h)
}
be.MockBackend.RemoveFn = func(t backend.Type, name string) error {
return memRemove(be, t, name)
}
be.MockBackend.ListFn = func(t backend.Type, done <-chan struct{}) <-chan string {
return memList(be, t, done)
}
be.MockBackend.DeleteFn = func() error {
be.m.Lock()
defer be.m.Unlock()
be.data = make(memMap)
return nil
}
be.MockBackend.LocationFn = func() string {
return "Memory Backend"
}
debug.Log("MemoryBackend.New", "created new memory backend")
debug.Log("created new memory backend")
return be
}
func (be *MemoryBackend) insert(t backend.Type, name string, data []byte) error {
// Test returns whether a file exists.
func (be *MemoryBackend) Test(h restic.Handle) (bool, error) {
be.m.Lock()
defer be.m.Unlock()
if _, ok := be.data[entry{t, name}]; ok {
return errors.New("already present")
}
debug.Log("Test %v", h)
be.data[entry{t, name}] = data
return nil
}
func memTest(be *MemoryBackend, t backend.Type, name string) (bool, error) {
be.m.Lock()
defer be.m.Unlock()
debug.Log("MemoryBackend.Test", "test %v %v", t, name)
if _, ok := be.data[entry{t, name}]; ok {
if _, ok := be.data[h]; ok {
return true, nil
}
return false, nil
}
func memLoad(be *MemoryBackend, h backend.Handle, p []byte, off int64) (int, error) {
if err := h.Valid(); err != nil {
return 0, err
}
be.m.Lock()
defer be.m.Unlock()
if h.Type == backend.Config {
h.Name = ""
}
debug.Log("MemoryBackend.Load", "get %v offset %v len %v", h, off, len(p))
if _, ok := be.data[entry{h.Type, h.Name}]; !ok {
return 0, errors.New("no such data")
}
buf := be.data[entry{h.Type, h.Name}]
if off > int64(len(buf)) {
return 0, errors.New("offset beyond end of file")
}
buf = buf[off:]
n := copy(p, buf)
if len(p) > len(buf) {
return n, io.ErrUnexpectedEOF
}
return n, nil
}
func memSave(be *MemoryBackend, h backend.Handle, p []byte) error {
// Save adds new Data to the backend.
func (be *MemoryBackend) Save(h restic.Handle, rd io.Reader) error {
if err := h.Valid(); err != nil {
return err
}
@@ -139,60 +59,104 @@ func memSave(be *MemoryBackend, h backend.Handle, p []byte) error {
be.m.Lock()
defer be.m.Unlock()
if h.Type == backend.Config {
if h.Type == restic.ConfigFile {
h.Name = ""
}
if _, ok := be.data[entry{h.Type, h.Name}]; ok {
if _, ok := be.data[h]; ok {
return errors.New("file already exists")
}
debug.Log("MemoryBackend.Save", "save %v bytes at %v", len(p), h)
buf := make([]byte, len(p))
copy(buf, p)
be.data[entry{h.Type, h.Name}] = buf
buf, err := ioutil.ReadAll(rd)
if err != nil {
return err
}
be.data[h] = buf
debug.Log("saved %v bytes at %v", len(buf), h)
return nil
}
func memStat(be *MemoryBackend, h backend.Handle) (backend.BlobInfo, error) {
// Load returns a reader that yields the contents of the file at h at the
// given offset. If length is nonzero, only a portion of the file is
// returned. rd must be closed after use.
func (be *MemoryBackend) Load(h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
if err := h.Valid(); err != nil {
return nil, err
}
be.m.Lock()
defer be.m.Unlock()
if h.Type == restic.ConfigFile {
h.Name = ""
}
debug.Log("Load %v offset %v len %v", h, offset, length)
if offset < 0 {
return nil, errors.New("offset is negative")
}
if _, ok := be.data[h]; !ok {
return nil, errors.New("no such data")
}
buf := be.data[h]
if offset > int64(len(buf)) {
return nil, errors.New("offset beyond end of file")
}
buf = buf[offset:]
if length > 0 && len(buf) > length {
buf = buf[:length]
}
return backend.Closer{Reader: bytes.NewReader(buf)}, nil
}
// Stat returns information about a file in the backend.
func (be *MemoryBackend) Stat(h restic.Handle) (restic.FileInfo, error) {
be.m.Lock()
defer be.m.Unlock()
if err := h.Valid(); err != nil {
return backend.BlobInfo{}, err
return restic.FileInfo{}, err
}
if h.Type == backend.Config {
if h.Type == restic.ConfigFile {
h.Name = ""
}
debug.Log("MemoryBackend.Stat", "stat %v", h)
debug.Log("stat %v", h)
e, ok := be.data[entry{h.Type, h.Name}]
e, ok := be.data[h]
if !ok {
return backend.BlobInfo{}, errors.New("no such data")
return restic.FileInfo{}, errors.New("no such data")
}
return backend.BlobInfo{Size: int64(len(e))}, nil
return restic.FileInfo{Size: int64(len(e))}, nil
}
func memRemove(be *MemoryBackend, t backend.Type, name string) error {
// Remove deletes a file from the backend.
func (be *MemoryBackend) Remove(h restic.Handle) error {
be.m.Lock()
defer be.m.Unlock()
debug.Log("MemoryBackend.Remove", "get %v %v", t, name)
debug.Log("Remove %v", h)
if _, ok := be.data[entry{t, name}]; !ok {
if _, ok := be.data[h]; !ok {
return errors.New("no such data")
}
delete(be.data, entry{t, name})
delete(be.data, h)
return nil
}
func memList(be *MemoryBackend, t backend.Type, done <-chan struct{}) <-chan string {
// List returns a channel which yields entries from the backend.
func (be *MemoryBackend) List(t restic.FileType, done <-chan struct{}) <-chan string {
be.m.Lock()
defer be.m.Unlock()
@@ -206,7 +170,7 @@ func memList(be *MemoryBackend, t backend.Type, done <-chan struct{}) <-chan str
ids = append(ids, entry.Name)
}
debug.Log("MemoryBackend.List", "list %v: %v", t, ids)
debug.Log("list %v: %v", t, ids)
go func() {
defer close(ch)
@@ -221,3 +185,22 @@ func memList(be *MemoryBackend, t backend.Type, done <-chan struct{}) <-chan str
return ch
}
// Location returns the location of the backend (RAM).
func (be *MemoryBackend) Location() string {
return "RAM"
}
// Delete removes all data in the backend.
func (be *MemoryBackend) Delete() error {
be.m.Lock()
defer be.m.Unlock()
be.data = make(memMap)
return nil
}
// Close closes the backend.
func (be *MemoryBackend) Close() error {
return nil
}

View File

@@ -1,19 +1,20 @@
package mem_test
import (
"errors"
"restic"
"restic/errors"
"restic/backend"
"restic/backend/mem"
"restic/backend/test"
)
var be backend.Backend
var be restic.Backend
//go:generate go run ../test/generate_backend_tests.go
func init() {
test.CreateFn = func() (backend.Backend, error) {
test.CreateFn = func() (restic.Backend, error) {
if be != nil {
return nil, errors.New("temporary memory backend dir already exists")
}
@@ -23,7 +24,7 @@ func init() {
return be, nil
}
test.OpenFn = func() (backend.Backend, error) {
test.OpenFn = func() (restic.Backend, error) {
if be == nil {
return nil, errors.New("repository not initialized")
}

View File

@@ -1,103 +0,0 @@
package backend
import "errors"
// MockBackend implements a backend whose functions can be specified. This
// should only be used for tests.
type MockBackend struct {
CloseFn func() error
LoadFn func(h Handle, p []byte, off int64) (int, error)
SaveFn func(h Handle, p []byte) error
StatFn func(h Handle) (BlobInfo, error)
ListFn func(Type, <-chan struct{}) <-chan string
RemoveFn func(Type, string) error
TestFn func(Type, string) (bool, error)
DeleteFn func() error
LocationFn func() string
}
// Close the backend.
func (m *MockBackend) Close() error {
if m.CloseFn == nil {
return nil
}
return m.CloseFn()
}
// Location returns a location string.
func (m *MockBackend) Location() string {
if m.LocationFn == nil {
return ""
}
return m.LocationFn()
}
// Load loads data from the backend.
func (m *MockBackend) Load(h Handle, p []byte, off int64) (int, error) {
if m.LoadFn == nil {
return 0, errors.New("not implemented")
}
return m.LoadFn(h, p, off)
}
// Save data in the backend.
func (m *MockBackend) Save(h Handle, p []byte) error {
if m.SaveFn == nil {
return errors.New("not implemented")
}
return m.SaveFn(h, p)
}
// Stat an object in the backend.
func (m *MockBackend) Stat(h Handle) (BlobInfo, error) {
if m.StatFn == nil {
return BlobInfo{}, errors.New("not implemented")
}
return m.StatFn(h)
}
// List items of type t.
func (m *MockBackend) List(t Type, done <-chan struct{}) <-chan string {
if m.ListFn == nil {
ch := make(chan string)
close(ch)
return ch
}
return m.ListFn(t, done)
}
// Remove data from the backend.
func (m *MockBackend) Remove(t Type, name string) error {
if m.RemoveFn == nil {
return errors.New("not implemented")
}
return m.RemoveFn(t, name)
}
// Test for the existence of a specific item.
func (m *MockBackend) Test(t Type, name string) (bool, error) {
if m.TestFn == nil {
return false, errors.New("not implemented")
}
return m.TestFn(t, name)
}
// Delete all data.
func (m *MockBackend) Delete() error {
if m.DeleteFn == nil {
return errors.New("not implemented")
}
return m.DeleteFn()
}
// Make sure that MockBackend implements the backend interface.
var _ Backend = &MockBackend{}

View File

@@ -1,63 +0,0 @@
package backend
import (
"errors"
"io"
)
type readSeeker struct {
be Backend
h Handle
t Type
name string
offset int64
size int64
}
// NewReadSeeker returns an io.ReadSeeker for the given object in the backend.
func NewReadSeeker(be Backend, h Handle) io.ReadSeeker {
return &readSeeker{be: be, h: h}
}
func (rd *readSeeker) Read(p []byte) (int, error) {
n, err := rd.be.Load(rd.h, p, rd.offset)
rd.offset += int64(n)
return n, err
}
func (rd *readSeeker) Seek(offset int64, whence int) (n int64, err error) {
switch whence {
case 0:
rd.offset = offset
case 1:
rd.offset += offset
case 2:
if rd.size == 0 {
rd.size, err = rd.getSize()
if err != nil {
return 0, err
}
}
pos := rd.size + offset
if pos < 0 {
return 0, errors.New("invalid offset, before start of blob")
}
rd.offset = pos
return rd.offset, nil
default:
return 0, errors.New("invalid value for parameter whence")
}
return rd.offset, nil
}
func (rd *readSeeker) getSize() (int64, error) {
stat, err := rd.be.Stat(rd.h)
if err != nil {
return 0, err
}
return stat.Size, nil
}

View File

@@ -1,114 +0,0 @@
package backend_test
import (
"bytes"
"io"
"math/rand"
"restic/backend"
"restic/backend/mem"
"testing"
. "restic/test"
)
func abs(a int) int {
if a < 0 {
return -a
}
return a
}
func loadAndCompare(t testing.TB, rd io.ReadSeeker, size int, offset int64, expected []byte) {
var (
pos int64
err error
)
if offset >= 0 {
pos, err = rd.Seek(offset, 0)
} else {
pos, err = rd.Seek(offset, 2)
}
if err != nil {
t.Errorf("Seek(%d, 0) returned error: %v", offset, err)
return
}
if offset >= 0 && pos != offset {
t.Errorf("pos after seek is wrong, want %d, got %d", offset, pos)
} else if offset < 0 && pos != int64(size)+offset {
t.Errorf("pos after relative seek is wrong, want %d, got %d", int64(size)+offset, pos)
}
buf := make([]byte, len(expected))
n, err := rd.Read(buf)
// if we requested data beyond the end of the file, ignore
// ErrUnexpectedEOF error
if offset > 0 && len(buf) > size && err == io.ErrUnexpectedEOF {
err = nil
buf = buf[:size]
}
if offset < 0 && len(buf) > abs(int(offset)) && err == io.ErrUnexpectedEOF {
err = nil
buf = buf[:abs(int(offset))]
}
if n != len(buf) {
t.Errorf("Load(%d, %d): wrong length returned, want %d, got %d",
len(buf), offset, len(buf), n)
return
}
if err != nil {
t.Errorf("Load(%d, %d): unexpected error: %v", len(buf), offset, err)
return
}
buf = buf[:n]
if !bytes.Equal(buf, expected) {
t.Errorf("Load(%d, %d) returned wrong bytes", len(buf), offset)
return
}
}
func TestReadSeeker(t *testing.T) {
b := mem.New()
length := rand.Intn(1<<24) + 2000
data := Random(23, length)
id := backend.Hash(data)
handle := backend.Handle{Type: backend.Data, Name: id.String()}
err := b.Save(handle, data)
if err != nil {
t.Fatalf("Save() error: %v", err)
}
for i := 0; i < 50; i++ {
l := rand.Intn(length + 2000)
o := rand.Intn(length + 2000)
if rand.Float32() > 0.5 {
o = -o
}
d := data
if o > 0 && o < len(d) {
d = d[o:]
} else {
o = len(d)
d = d[:0]
}
if l > 0 && l < len(d) {
d = d[:l]
}
rd := backend.NewReadSeeker(b, handle)
loadAndCompare(t, rd, len(data), int64(o), d)
}
}

View File

@@ -1,9 +1,10 @@
package rest
import (
"errors"
"net/url"
"strings"
"restic/errors"
)
// Config contains all configuration necessary to connect to a REST server.
@@ -21,7 +22,7 @@ func ParseConfig(s string) (interface{}, error) {
u, err := url.Parse(s)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "url.Parse")
}
cfg := Config{URL: u}

View File

@@ -1,40 +1,46 @@
package rest
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
"restic"
"strings"
"restic/debug"
"restic/errors"
"restic/backend"
)
const connLimit = 10
const connLimit = 40
// make sure the rest backend implements restic.Backend
var _ restic.Backend = &restBackend{}
// restPath returns the path to the given resource.
func restPath(url *url.URL, h backend.Handle) string {
func restPath(url *url.URL, h restic.Handle) string {
u := *url
var dir string
switch h.Type {
case backend.Config:
case restic.ConfigFile:
dir = ""
h.Name = "config"
case backend.Data:
case restic.DataFile:
dir = backend.Paths.Data
case backend.Snapshot:
case restic.SnapshotFile:
dir = backend.Paths.Snapshots
case backend.Index:
case restic.IndexFile:
dir = backend.Paths.Index
case backend.Lock:
case restic.LockFile:
dir = backend.Paths.Locks
case backend.Key:
case restic.KeyFile:
dir = backend.Paths.Keys
default:
dir = string(h.Type)
@@ -52,12 +58,12 @@ type restBackend struct {
}
// Open opens the REST backend with the given config.
func Open(cfg Config) (backend.Backend, error) {
func Open(cfg Config) (restic.Backend, error) {
connChan := make(chan struct{}, connLimit)
for i := 0; i < connLimit; i++ {
connChan <- struct{}{}
}
tr := &http.Transport{}
tr := &http.Transport{MaxIdleConnsPerHost: connLimit}
client := http.Client{Transport: tr}
return &restBackend{url: cfg.URL, connChan: connChan, client: client}, nil
@@ -68,99 +74,119 @@ func (b *restBackend) Location() string {
return b.url.String()
}
// Load returns the data stored in the backend for h at the given offset
// and saves it in p. Load has the same semantics as io.ReaderAt.
func (b *restBackend) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
if err := h.Valid(); err != nil {
return 0, err
}
req, err := http.NewRequest("GET", restPath(b.url, h), nil)
if err != nil {
return 0, err
}
req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", off, off+int64(len(p))))
<-b.connChan
resp, err := b.client.Do(req)
b.connChan <- struct{}{}
if resp != nil {
defer func() {
e := resp.Body.Close()
if err == nil {
err = e
}
}()
}
if err != nil {
return 0, err
}
if resp.StatusCode != 200 && resp.StatusCode != 206 {
return 0, fmt.Errorf("unexpected HTTP response code %v", resp.StatusCode)
}
return io.ReadFull(resp.Body, p)
}
// Save stores data in the backend at the handle.
func (b *restBackend) Save(h backend.Handle, p []byte) (err error) {
func (b *restBackend) Save(h restic.Handle, rd io.Reader) (err error) {
if err := h.Valid(); err != nil {
return err
}
// make sure that client.Post() cannot close the reader by wrapping it in
// backend.Closer, which has a noop method.
rd = backend.Closer{Reader: rd}
<-b.connChan
resp, err := b.client.Post(restPath(b.url, h), "binary/octet-stream", bytes.NewReader(p))
resp, err := b.client.Post(restPath(b.url, h), "binary/octet-stream", rd)
b.connChan <- struct{}{}
if resp != nil {
defer func() {
io.Copy(ioutil.Discard, resp.Body)
e := resp.Body.Close()
if err == nil {
err = e
err = errors.Wrap(e, "Close")
}
}()
}
if err != nil {
return err
return errors.Wrap(err, "client.Post")
}
if resp.StatusCode != 200 {
return fmt.Errorf("unexpected HTTP response code %v", resp.StatusCode)
return errors.Errorf("unexpected HTTP response code %v", resp.StatusCode)
}
return nil
}
// Stat returns information about a blob.
func (b *restBackend) Stat(h backend.Handle) (backend.BlobInfo, error) {
// Load returns a reader that yields the contents of the file at h at the
// given offset. If length is nonzero, only a portion of the file is
// returned. rd must be closed after use.
func (b *restBackend) Load(h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
debug.Log("Load %v, length %v, offset %v", h, length, offset)
if err := h.Valid(); err != nil {
return backend.BlobInfo{}, err
return nil, err
}
if offset < 0 {
return nil, errors.New("offset is negative")
}
if length < 0 {
return nil, errors.Errorf("invalid length %d", length)
}
req, err := http.NewRequest("GET", restPath(b.url, h), nil)
if err != nil {
return nil, errors.Wrap(err, "http.NewRequest")
}
byteRange := fmt.Sprintf("bytes=%d-", offset)
if length > 0 {
byteRange = fmt.Sprintf("bytes=%d-%d", offset, offset+int64(length)-1)
}
req.Header.Add("Range", byteRange)
debug.Log("Load(%v) send range %v", h, byteRange)
<-b.connChan
resp, err := b.client.Do(req)
b.connChan <- struct{}{}
if err != nil {
if resp != nil {
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}
return nil, errors.Wrap(err, "client.Do")
}
if resp.StatusCode != 200 && resp.StatusCode != 206 {
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
return nil, errors.Errorf("unexpected HTTP response code %v", resp.StatusCode)
}
return resp.Body, nil
}
// Stat returns information about a blob.
func (b *restBackend) Stat(h restic.Handle) (restic.FileInfo, error) {
if err := h.Valid(); err != nil {
return restic.FileInfo{}, err
}
<-b.connChan
resp, err := b.client.Head(restPath(b.url, h))
b.connChan <- struct{}{}
if err != nil {
return backend.BlobInfo{}, err
return restic.FileInfo{}, errors.Wrap(err, "client.Head")
}
io.Copy(ioutil.Discard, resp.Body)
if err = resp.Body.Close(); err != nil {
return backend.BlobInfo{}, err
return restic.FileInfo{}, errors.Wrap(err, "Close")
}
if resp.StatusCode != 200 {
return backend.BlobInfo{}, fmt.Errorf("unexpected HTTP response code %v", resp.StatusCode)
return restic.FileInfo{}, errors.Errorf("unexpected HTTP response code %v", resp.StatusCode)
}
if resp.ContentLength < 0 {
return backend.BlobInfo{}, errors.New("negative content length")
return restic.FileInfo{}, errors.New("negative content length")
}
bi := backend.BlobInfo{
bi := restic.FileInfo{
Size: resp.ContentLength,
}
@@ -168,8 +194,8 @@ func (b *restBackend) Stat(h backend.Handle) (backend.BlobInfo, error) {
}
// Test returns true if a blob of the given type and name exists in the backend.
func (b *restBackend) Test(t backend.Type, name string) (bool, error) {
_, err := b.Stat(backend.Handle{Type: t, Name: name})
func (b *restBackend) Test(h restic.Handle) (bool, error) {
_, err := b.Stat(h)
if err != nil {
return false, nil
}
@@ -178,38 +204,38 @@ func (b *restBackend) Test(t backend.Type, name string) (bool, error) {
}
// Remove removes the blob with the given name and type.
func (b *restBackend) Remove(t backend.Type, name string) error {
h := backend.Handle{Type: t, Name: name}
func (b *restBackend) Remove(h restic.Handle) error {
if err := h.Valid(); err != nil {
return err
}
req, err := http.NewRequest("DELETE", restPath(b.url, h), nil)
if err != nil {
return err
return errors.Wrap(err, "http.NewRequest")
}
<-b.connChan
resp, err := b.client.Do(req)
b.connChan <- struct{}{}
if err != nil {
return err
return errors.Wrap(err, "client.Do")
}
if resp.StatusCode != 200 {
return errors.New("blob not removed")
}
io.Copy(ioutil.Discard, resp.Body)
return resp.Body.Close()
}
// List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending
// stops.
func (b *restBackend) List(t backend.Type, done <-chan struct{}) <-chan string {
func (b *restBackend) List(t restic.FileType, done <-chan struct{}) <-chan string {
ch := make(chan string)
url := restPath(b.url, backend.Handle{Type: t})
url := restPath(b.url, restic.Handle{Type: t})
if !strings.HasSuffix(url, "/") {
url += "/"
}
@@ -219,7 +245,14 @@ func (b *restBackend) List(t backend.Type, done <-chan struct{}) <-chan string {
b.connChan <- struct{}{}
if resp != nil {
defer resp.Body.Close()
defer func() {
io.Copy(ioutil.Discard, resp.Body)
e := resp.Body.Close()
if err == nil {
err = errors.Wrap(e, "Close")
}
}()
}
if err != nil {

View File

@@ -2,35 +2,35 @@ package rest
import (
"net/url"
"restic/backend"
"restic"
"testing"
)
var restPathTests = []struct {
Handle backend.Handle
Handle restic.Handle
URL *url.URL
Result string
}{
{
URL: parseURL("https://hostname.foo"),
Handle: backend.Handle{
Type: backend.Data,
Handle: restic.Handle{
Type: restic.DataFile,
Name: "foobar",
},
Result: "https://hostname.foo/data/foobar",
},
{
URL: parseURL("https://hostname.foo:1234/prefix/repo"),
Handle: backend.Handle{
Type: backend.Lock,
Handle: restic.Handle{
Type: restic.LockFile,
Name: "foobar",
},
Result: "https://hostname.foo:1234/prefix/repo/locks/foobar",
},
{
URL: parseURL("https://hostname.foo:1234/prefix/repo"),
Handle: backend.Handle{
Type: backend.Config,
Handle: restic.Handle{
Type: restic.ConfigFile,
Name: "foobar",
},
Result: "https://hostname.foo:1234/prefix/repo/config",

View File

@@ -1,12 +1,13 @@
package rest_test
import (
"errors"
"fmt"
"net/url"
"os"
"restic"
"restic/errors"
"restic/backend"
"restic/backend/rest"
"restic/backend/test"
. "restic/test"
@@ -30,13 +31,13 @@ func init() {
URL: url,
}
test.CreateFn = func() (backend.Backend, error) {
test.CreateFn = func() (restic.Backend, error) {
be, err := rest.Open(cfg)
if err != nil {
return nil, err
}
exists, err := be.Test(backend.Config, "")
exists, err := be.Test(restic.Handle{Type: restic.ConfigFile, Name: ""})
if err != nil {
return nil, err
}
@@ -48,7 +49,7 @@ func init() {
return be, nil
}
test.OpenFn = func() (backend.Backend, error) {
test.OpenFn = func() (restic.Backend, error) {
return rest.Open(cfg)
}
}

View File

@@ -1,10 +1,11 @@
package s3
import (
"errors"
"net/url"
"path"
"strings"
"restic/errors"
)
// Config contains all configuration necessary to connect to an s3 compatible
@@ -31,7 +32,7 @@ func ParseConfig(s string) (interface{}, error) {
// bucket name and prefix
url, err := url.Parse(s[3:])
if err != nil {
return nil, err
return nil, errors.Wrap(err, "url.Parse")
}
if url.Path == "" {

View File

@@ -2,17 +2,21 @@ package s3
import (
"bytes"
"errors"
"io"
"net/http"
"path"
"restic"
"strings"
"restic/backend"
"restic/errors"
"github.com/minio/minio-go"
"restic/backend"
"restic/debug"
)
const connLimit = 10
const connLimit = 40
// s3 is a backend which stores the data on an S3 endpoint.
type s3 struct {
@@ -24,43 +28,43 @@ type s3 struct {
// Open opens the S3 backend at bucket and region. The bucket is created if it
// does not exist yet.
func Open(cfg Config) (backend.Backend, error) {
debug.Log("s3.Open", "open, config %#v", cfg)
func Open(cfg Config) (restic.Backend, error) {
debug.Log("open, config %#v", cfg)
client, err := minio.New(cfg.Endpoint, cfg.KeyID, cfg.Secret, !cfg.UseHTTP)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "minio.New")
}
be := &s3{client: client, bucketname: cfg.Bucket, prefix: cfg.Prefix}
tr := &http.Transport{MaxIdleConnsPerHost: connLimit}
client.SetCustomTransport(tr)
be.createConnections()
if err := client.BucketExists(cfg.Bucket); err != nil {
debug.Log("s3.Open", "BucketExists(%v) returned err %v, trying to create the bucket", cfg.Bucket, err)
found, err := client.BucketExists(cfg.Bucket)
if err != nil {
debug.Log("BucketExists(%v) returned err %v", cfg.Bucket, err)
return nil, errors.Wrap(err, "client.BucketExists")
}
if !found {
// create new bucket with default ACL in default region
err = client.MakeBucket(cfg.Bucket, "")
if err != nil {
return nil, err
return nil, errors.Wrap(err, "client.MakeBucket")
}
}
return be, nil
}
func (be *s3) s3path(t backend.Type, name string) string {
var path string
if be.prefix != "" {
path = be.prefix + "/"
func (be *s3) s3path(h restic.Handle) string {
if h.Type == restic.ConfigFile {
return path.Join(be.prefix, string(h.Type))
}
path += string(t)
if t == backend.Config {
return path
}
return path + "/" + name
return path.Join(be.prefix, string(h.Type), h.Name)
}
func (be *s3) createConnections() {
@@ -75,55 +79,20 @@ func (be *s3) Location() string {
return be.bucketname
}
// Load returns the data stored in the backend for h at the given offset
// and saves it in p. Load has the same semantics as io.ReaderAt.
func (be s3) Load(h backend.Handle, p []byte, off int64) (int, error) {
debug.Log("s3.Load", "%v, offset %v, len %v", h, off, len(p))
path := be.s3path(h.Type, h.Name)
obj, err := be.client.GetObject(be.bucketname, path)
if err != nil {
debug.Log("s3.GetReader", " err %v", err)
return 0, err
}
if off > 0 {
_, err = obj.Seek(off, 0)
if err != nil {
return 0, err
}
}
<-be.connChan
defer func() {
be.connChan <- struct{}{}
}()
// This may not read the whole object, so ensure object
// is closed to avoid duplicate connections.
n, err := io.ReadFull(obj, p)
if err != nil {
obj.Close()
} else {
err = obj.Close()
}
return n, err
}
// Save stores data in the backend at the handle.
func (be s3) Save(h backend.Handle, p []byte) (err error) {
func (be *s3) Save(h restic.Handle, rd io.Reader) (err error) {
if err := h.Valid(); err != nil {
return err
}
debug.Log("s3.Save", "%v bytes at %d", len(p), h)
debug.Log("Save %v", h)
path := be.s3path(h.Type, h.Name)
objName := be.s3path(h)
// Check key does not already exist
_, err = be.client.StatObject(be.bucketname, path)
_, err = be.client.StatObject(be.bucketname, objName)
if err == nil {
debug.Log("s3.blob.Finalize()", "%v already exists", h)
debug.Log("%v already exists", h)
return errors.New("key already exists")
}
@@ -132,38 +101,159 @@ func (be s3) Save(h backend.Handle, p []byte) (err error) {
be.connChan <- struct{}{}
}()
debug.Log("s3.Save", "PutObject(%v, %v, %v, %v)",
be.bucketname, path, int64(len(p)), "binary/octet-stream")
n, err := be.client.PutObject(be.bucketname, path, bytes.NewReader(p), "binary/octet-stream")
debug.Log("s3.Save", "%v -> %v bytes, err %#v", path, n, err)
debug.Log("PutObject(%v, %v)",
be.bucketname, objName)
n, err := be.client.PutObject(be.bucketname, objName, rd, "binary/octet-stream")
debug.Log("%v -> %v bytes, err %#v", objName, n, err)
return errors.Wrap(err, "client.PutObject")
}
// wrapReader wraps an io.ReadCloser to run an additional function on Close.
type wrapReader struct {
io.ReadCloser
f func()
}
func (wr wrapReader) Close() error {
err := wr.ReadCloser.Close()
wr.f()
return err
}
// Stat returns information about a blob.
func (be s3) Stat(h backend.Handle) (backend.BlobInfo, error) {
debug.Log("s3.Stat", "%v", h)
path := be.s3path(h.Type, h.Name)
obj, err := be.client.GetObject(be.bucketname, path)
if err != nil {
debug.Log("s3.Stat", "GetObject() err %v", err)
return backend.BlobInfo{}, err
// Load returns a reader that yields the contents of the file at h at the
// given offset. If length is nonzero, only a portion of the file is
// returned. rd must be closed after use.
func (be *s3) Load(h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
debug.Log("Load %v, length %v, offset %v", h, length, offset)
if err := h.Valid(); err != nil {
return nil, err
}
if offset < 0 {
return nil, errors.New("offset is negative")
}
if length < 0 {
return nil, errors.Errorf("invalid length %d", length)
}
var obj *minio.Object
objName := be.s3path(h)
// get token for connection
<-be.connChan
obj, err := be.client.GetObject(be.bucketname, objName)
if err != nil {
debug.Log(" err %v", err)
// return token
be.connChan <- struct{}{}
return nil, errors.Wrap(err, "client.GetObject")
}
// if we're going to read the whole object, just pass it on.
if length == 0 {
debug.Log("Load %v: pass on object", h)
_, err = obj.Seek(offset, 0)
if err != nil {
_ = obj.Close()
// return token
be.connChan <- struct{}{}
return nil, errors.Wrap(err, "obj.Seek")
}
rd := wrapReader{
ReadCloser: obj,
f: func() {
debug.Log("Close()")
// return token
be.connChan <- struct{}{}
},
}
return rd, nil
}
defer func() {
// return token
be.connChan <- struct{}{}
}()
// otherwise use a buffer with ReadAt
info, err := obj.Stat()
if err != nil {
_ = obj.Close()
return nil, errors.Wrap(err, "obj.Stat")
}
if offset > info.Size {
_ = obj.Close()
return nil, errors.New("offset larger than file size")
}
l := int64(length)
if offset+l > info.Size {
l = info.Size - offset
}
buf := make([]byte, l)
n, err := obj.ReadAt(buf, offset)
debug.Log("Load %v: use buffer with ReadAt: %v, %v", h, n, err)
if err == io.EOF {
debug.Log("Load %v: shorten buffer %v -> %v", h, len(buf), n)
buf = buf[:n]
err = nil
}
if err != nil {
_ = obj.Close()
return nil, errors.Wrap(err, "obj.ReadAt")
}
return backend.Closer{Reader: bytes.NewReader(buf)}, nil
}
// Stat returns information about a blob.
func (be *s3) Stat(h restic.Handle) (bi restic.FileInfo, err error) {
debug.Log("%v", h)
objName := be.s3path(h)
var obj *minio.Object
obj, err = be.client.GetObject(be.bucketname, objName)
if err != nil {
debug.Log("GetObject() err %v", err)
return restic.FileInfo{}, errors.Wrap(err, "client.GetObject")
}
// make sure that the object is closed properly.
defer func() {
e := obj.Close()
if err == nil {
err = errors.Wrap(e, "Close")
}
}()
fi, err := obj.Stat()
if err != nil {
debug.Log("s3.Stat", "Stat() err %v", err)
return backend.BlobInfo{}, err
debug.Log("Stat() err %v", err)
return restic.FileInfo{}, errors.Wrap(err, "Stat")
}
return backend.BlobInfo{Size: fi.Size}, nil
return restic.FileInfo{Size: fi.Size}, nil
}
// Test returns true if a blob of the given type and name exists in the backend.
func (be *s3) Test(t backend.Type, name string) (bool, error) {
func (be *s3) Test(h restic.Handle) (bool, error) {
found := false
path := be.s3path(t, name)
_, err := be.client.StatObject(be.bucketname, path)
objName := be.s3path(h)
_, err := be.client.StatObject(be.bucketname, objName)
if err == nil {
found = true
}
@@ -173,21 +263,21 @@ func (be *s3) Test(t backend.Type, name string) (bool, error) {
}
// Remove removes the blob with the given name and type.
func (be *s3) Remove(t backend.Type, name string) error {
path := be.s3path(t, name)
err := be.client.RemoveObject(be.bucketname, path)
debug.Log("s3.Remove", "%v %v -> err %v", t, name, err)
return err
func (be *s3) Remove(h restic.Handle) error {
objName := be.s3path(h)
err := be.client.RemoveObject(be.bucketname, objName)
debug.Log("Remove(%v) -> err %v", h, err)
return errors.Wrap(err, "client.RemoveObject")
}
// List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending
// stops.
func (be *s3) List(t backend.Type, done <-chan struct{}) <-chan string {
debug.Log("s3.List", "listing %v", t)
func (be *s3) List(t restic.FileType, done <-chan struct{}) <-chan string {
debug.Log("listing %v", t)
ch := make(chan string)
prefix := be.s3path(t, "")
prefix := be.s3path(restic.Handle{Type: t}) + "/"
listresp := be.client.ListObjects(be.bucketname, prefix, true, done)
@@ -211,11 +301,11 @@ func (be *s3) List(t backend.Type, done <-chan struct{}) <-chan string {
}
// Remove keys for a specified backend type.
func (be *s3) removeKeys(t backend.Type) error {
func (be *s3) removeKeys(t restic.FileType) error {
done := make(chan struct{})
defer close(done)
for key := range be.List(backend.Data, done) {
err := be.Remove(backend.Data, key)
for key := range be.List(restic.DataFile, done) {
err := be.Remove(restic.Handle{Type: restic.DataFile, Name: key})
if err != nil {
return err
}
@@ -226,12 +316,12 @@ func (be *s3) removeKeys(t backend.Type) error {
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
func (be *s3) Delete() error {
alltypes := []backend.Type{
backend.Data,
backend.Key,
backend.Lock,
backend.Snapshot,
backend.Index}
alltypes := []restic.FileType{
restic.DataFile,
restic.KeyFile,
restic.LockFile,
restic.SnapshotFile,
restic.IndexFile}
for _, t := range alltypes {
err := be.removeKeys(t)
@@ -240,7 +330,7 @@ func (be *s3) Delete() error {
}
}
return be.Remove(backend.Config, "")
return be.Remove(restic.Handle{Type: restic.ConfigFile})
}
// Close does nothing

View File

@@ -1,12 +1,13 @@
package s3_test
import (
"errors"
"fmt"
"net/url"
"os"
"restic"
"restic/errors"
"restic/backend"
"restic/backend/s3"
"restic/backend/test"
. "restic/test"
@@ -37,13 +38,13 @@ func init() {
cfg.UseHTTP = true
}
test.CreateFn = func() (backend.Backend, error) {
test.CreateFn = func() (restic.Backend, error) {
be, err := s3.Open(cfg)
if err != nil {
return nil, err
}
exists, err := be.Test(backend.Config, "")
exists, err := be.Test(restic.Handle{Type: restic.ConfigFile})
if err != nil {
return nil, err
}
@@ -55,7 +56,7 @@ func init() {
return be, nil
}
test.OpenFn = func() (backend.Backend, error) {
test.OpenFn = func() (restic.Backend, error) {
return s3.Open(cfg)
}

View File

@@ -1,10 +1,11 @@
package sftp
import (
"errors"
"net/url"
"path"
"strings"
"restic/errors"
)
// Config collects all information required to connect to an sftp server.
@@ -25,13 +26,18 @@ func ParseConfig(s string) (interface{}, error) {
// parse the "sftp://user@host/path" url format
url, err := url.Parse(s)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "url.Parse")
}
if url.User != nil {
user = url.User.Username()
}
host = url.Host
dir = url.Path[1:]
dir = url.Path
if dir == "" {
return nil, errors.Errorf("invalid backend %q, no directory specified", s)
}
dir = dir[1:]
case strings.HasPrefix(s, "sftp:"):
// parse the sftp:user@host:path format, which means we'll get
// "user@host:path" in s

View File

@@ -74,3 +74,17 @@ func TestParseConfig(t *testing.T) {
}
}
}
var configTestsInvalid = []string{
"sftp://host:dir",
}
func TestParseConfigInvalid(t *testing.T) {
for i, test := range configTestsInvalid {
_, err := ParseConfig(test)
if err == nil {
t.Errorf("test %d: invalid config %s did not return an error", i, test)
continue
}
}
}

View File

@@ -1,20 +1,24 @@
package sftp
import (
"bufio"
"crypto/rand"
"encoding/hex"
"fmt"
"io"
"log"
"os"
"os/exec"
"path/filepath"
"path"
"restic"
"strings"
"time"
"restic/errors"
"github.com/juju/errors"
"github.com/pkg/sftp"
"restic/backend"
"restic/debug"
"github.com/pkg/sftp"
)
const (
@@ -26,16 +30,29 @@ type SFTP struct {
c *sftp.Client
p string
cmd *exec.Cmd
cmd *exec.Cmd
result <-chan error
}
var _ restic.Backend = &SFTP{}
func startClient(program string, args ...string) (*SFTP, error) {
// Connect to a remote host and request the sftp subsystem via the 'ssh'
// command. This assumes that passwordless login is correctly configured.
cmd := exec.Command(program, args...)
// send errors from ssh to stderr
cmd.Stderr = os.Stderr
// prefix the errors with the program name
stderr, err := cmd.StderrPipe()
if err != nil {
return nil, errors.Wrap(err, "cmd.StderrPipe")
}
go func() {
sc := bufio.NewScanner(stderr)
for sc.Scan() {
fmt.Fprintf(os.Stderr, "subprocess %v: %v\n", program, sc.Text())
}
}()
// ignore signals sent to the parent (e.g. SIGINT)
cmd.SysProcAttr = ignoreSigIntProcAttr()
@@ -43,25 +60,33 @@ func startClient(program string, args ...string) (*SFTP, error) {
// get stdin and stdout
wr, err := cmd.StdinPipe()
if err != nil {
log.Fatal(err)
return nil, errors.Wrap(err, "cmd.StdinPipe")
}
rd, err := cmd.StdoutPipe()
if err != nil {
log.Fatal(err)
return nil, errors.Wrap(err, "cmd.StdoutPipe")
}
// start the process
if err := cmd.Start(); err != nil {
log.Fatal(err)
return nil, errors.Wrap(err, "cmd.Start")
}
// wait in a different goroutine
ch := make(chan error, 1)
go func() {
err := cmd.Wait()
debug.Log("ssh command exited, err %v", err)
ch <- errors.Wrap(err, "cmd.Wait")
}()
// open the SFTP session
client, err := sftp.NewClientPipe(rd, wr)
if err != nil {
log.Fatal(err)
return nil, errors.Errorf("unable to start the sftp session, error: %v", err)
}
return &SFTP{c: client, cmd: cmd}, nil
return &SFTP{c: client, cmd: cmd, result: ch}, nil
}
func paths(dir string) []string {
@@ -76,19 +101,35 @@ func paths(dir string) []string {
}
}
// clientError returns an error if the client has exited. Otherwise, nil is
// returned immediately.
func (r *SFTP) clientError() error {
select {
case err := <-r.result:
debug.Log("client has exited with err %v", err)
return err
default:
}
return nil
}
// Open opens an sftp backend. When the command is started via
// exec.Command, it is expected to speak sftp on stdin/stdout. The backend
// is expected at the given path.
// is expected at the given path. `dir` must be delimited by forward slashes
// ("/"), which is required by sftp.
func Open(dir string, program string, args ...string) (*SFTP, error) {
debug.Log("open backend with program %v, %v at %v", program, args, dir)
sftp, err := startClient(program, args...)
if err != nil {
debug.Log("unable to start program: %v", err)
return nil, err
}
// test if all necessary dirs and files are there
for _, d := range paths(dir) {
if _, err := sftp.c.Lstat(d); err != nil {
return nil, fmt.Errorf("%s does not exist", d)
return nil, errors.Errorf("%s does not exist", d)
}
}
@@ -112,14 +153,17 @@ func buildSSHCommand(cfg Config) []string {
}
// OpenWithConfig opens an sftp backend as described by the config by running
// "ssh" with the appropiate arguments.
// "ssh" with the appropriate arguments.
func OpenWithConfig(cfg Config) (*SFTP, error) {
debug.Log("open with config %v", cfg)
return Open(cfg.Dir, "ssh", buildSSHCommand(cfg)...)
}
// Create creates all the necessary files and directories for a new sftp
// backend at dir. Afterwards a new config blob should be created.
// backend at dir. Afterwards a new config blob should be created. `dir` must
// be delimited by forward slashes ("/"), which is required by sftp.
func Create(dir string, program string, args ...string) (*SFTP, error) {
debug.Log("%v %v", program, args)
sftp, err := startClient(program, args...)
if err != nil {
return nil, err
@@ -139,14 +183,9 @@ func Create(dir string, program string, args ...string) (*SFTP, error) {
}
}
err = sftp.c.Close()
err = sftp.Close()
if err != nil {
return nil, err
}
err = sftp.cmd.Wait()
if err != nil {
return nil, err
return nil, errors.Wrap(err, "Close")
}
// open backend
@@ -154,8 +193,9 @@ func Create(dir string, program string, args ...string) (*SFTP, error) {
}
// CreateWithConfig creates an sftp backend as described by the config by running
// "ssh" with the appropiate arguments.
// "ssh" with the appropriate arguments.
func CreateWithConfig(cfg Config) (*SFTP, error) {
debug.Log("config %v", cfg)
return Create(cfg.Dir, "ssh", buildSSHCommand(cfg)...)
}
@@ -170,9 +210,8 @@ func (r *SFTP) tempFile() (string, *sftp.File, error) {
buf := make([]byte, tempfileRandomSuffixLength)
_, err := io.ReadFull(rand.Reader, buf)
if err != nil {
return "", nil, errors.Annotatef(err,
"unable to read %d random bytes for tempfile name",
tempfileRandomSuffixLength)
return "", nil, errors.Errorf("unable to read %d random bytes for tempfile name: %v",
tempfileRandomSuffixLength, err)
}
// construct tempfile name
@@ -181,7 +220,7 @@ func (r *SFTP) tempFile() (string, *sftp.File, error) {
// create file in temp dir
f, err := r.c.Create(name)
if err != nil {
return "", nil, errors.Annotatef(err, "creating tempfile %q failed", name)
return "", nil, errors.Errorf("creating tempfile %q failed: %v", name, err)
}
return name, f, nil
@@ -195,11 +234,11 @@ func (r *SFTP) mkdirAll(dir string, mode os.FileMode) error {
return nil
}
return fmt.Errorf("mkdirAll(%s): entry exists but is not a directory", dir)
return errors.Errorf("mkdirAll(%s): entry exists but is not a directory", dir)
}
// create parent directories
errMkdirAll := r.mkdirAll(filepath.Dir(dir), backend.Modes.Dir)
errMkdirAll := r.mkdirAll(path.Dir(dir), backend.Modes.Dir)
// create directory
errMkdir := r.c.Mkdir(dir)
@@ -208,11 +247,11 @@ func (r *SFTP) mkdirAll(dir string, mode os.FileMode) error {
fi, err = r.c.Lstat(dir)
if err != nil {
// return previous errors
return fmt.Errorf("mkdirAll(%s): unable to create directories: %v, %v", dir, errMkdirAll, errMkdir)
return errors.Errorf("mkdirAll(%s): unable to create directories: %v, %v", dir, errMkdirAll, errMkdir)
}
if !fi.IsDir() {
return fmt.Errorf("mkdirAll(%s): entry exists but is not a directory", dir)
return errors.Errorf("mkdirAll(%s): entry exists but is not a directory", dir)
}
// set mode
@@ -220,12 +259,12 @@ func (r *SFTP) mkdirAll(dir string, mode os.FileMode) error {
}
// Rename temp file to final name according to type and name.
func (r *SFTP) renameFile(oldname string, t backend.Type, name string) error {
filename := r.filename(t, name)
func (r *SFTP) renameFile(oldname string, h restic.Handle) error {
filename := r.filename(h)
// create directories if necessary
if t == backend.Data {
err := r.mkdirAll(filepath.Dir(filename), backend.Modes.Dir)
if h.Type == restic.DataFile {
err := r.mkdirAll(path.Dir(filename), backend.Modes.Dir)
if err != nil {
return err
}
@@ -233,165 +272,188 @@ func (r *SFTP) renameFile(oldname string, t backend.Type, name string) error {
// test if new file exists
if _, err := r.c.Lstat(filename); err == nil {
return fmt.Errorf("Close(): file %v already exists", filename)
return errors.Errorf("Close(): file %v already exists", filename)
}
err := r.c.Rename(oldname, filename)
if err != nil {
return err
return errors.Wrap(err, "Rename")
}
// set mode to read-only
fi, err := r.c.Lstat(filename)
if err != nil {
return err
return errors.Wrap(err, "Lstat")
}
return r.c.Chmod(filename, fi.Mode()&os.FileMode(^uint32(0222)))
err = r.c.Chmod(filename, fi.Mode()&os.FileMode(^uint32(0222)))
return errors.Wrap(err, "Chmod")
}
// Join joins the given paths and cleans them afterwards.
// Join joins the given paths and cleans them afterwards. This always uses
// forward slashes, which is required by sftp.
func Join(parts ...string) string {
return filepath.Clean(strings.Join(parts, "/"))
return path.Clean(path.Join(parts...))
}
// Construct path for given backend.Type and name.
func (r *SFTP) filename(t backend.Type, name string) string {
if t == backend.Config {
// Construct path for given restic.Type and name.
func (r *SFTP) filename(h restic.Handle) string {
if h.Type == restic.ConfigFile {
return Join(r.p, "config")
}
return Join(r.dirname(t, name), name)
return Join(r.dirname(h), h.Name)
}
// Construct directory for given backend.Type.
func (r *SFTP) dirname(t backend.Type, name string) string {
func (r *SFTP) dirname(h restic.Handle) string {
var n string
switch t {
case backend.Data:
switch h.Type {
case restic.DataFile:
n = backend.Paths.Data
if len(name) > 2 {
n = Join(n, name[:2])
if len(h.Name) > 2 {
n = Join(n, h.Name[:2])
}
case backend.Snapshot:
case restic.SnapshotFile:
n = backend.Paths.Snapshots
case backend.Index:
case restic.IndexFile:
n = backend.Paths.Index
case backend.Lock:
case restic.LockFile:
n = backend.Paths.Locks
case backend.Key:
case restic.KeyFile:
n = backend.Paths.Keys
}
return Join(r.p, n)
}
// Load returns the data stored in the backend for h at the given offset
// and saves it in p. Load has the same semantics as io.ReaderAt.
func (r *SFTP) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
if err := h.Valid(); err != nil {
return 0, err
}
f, err := r.c.Open(r.filename(h.Type, h.Name))
if err != nil {
return 0, err
}
defer func() {
e := f.Close()
if err == nil && e != nil {
err = e
}
}()
if off > 0 {
_, err = f.Seek(off, 0)
if err != nil {
return 0, err
}
}
return io.ReadFull(f, p)
}
// Save stores data in the backend at the handle.
func (r *SFTP) Save(h backend.Handle, p []byte) (err error) {
func (r *SFTP) Save(h restic.Handle, rd io.Reader) (err error) {
debug.Log("save to %v", h)
if err := r.clientError(); err != nil {
return err
}
if err := h.Valid(); err != nil {
return err
}
filename, tmpfile, err := r.tempFile()
debug.Log("sftp.Save", "save %v (%d bytes) to %v", h, len(p), filename)
n, err := tmpfile.Write(p)
if err != nil {
return err
}
if n != len(p) {
return errors.New("not all bytes writen")
n, err := io.Copy(tmpfile, rd)
if err != nil {
return errors.Wrap(err, "Write")
}
debug.Log("saved %v (%d bytes) to %v", h, n, filename)
err = tmpfile.Close()
if err != nil {
return err
return errors.Wrap(err, "Close")
}
err = r.renameFile(filename, h.Type, h.Name)
debug.Log("sftp.Save", "save %v: rename %v: %v",
h, filepath.Base(filename), err)
err = r.renameFile(filename, h)
debug.Log("save %v: rename %v: %v",
h, path.Base(filename), err)
return err
}
// Load returns a reader that yields the contents of the file at h at the
// given offset. If length is nonzero, only a portion of the file is
// returned. rd must be closed after use.
func (r *SFTP) Load(h restic.Handle, length int, offset int64) (io.ReadCloser, error) {
debug.Log("Load %v, length %v, offset %v", h, length, offset)
if err := h.Valid(); err != nil {
return nil, err
}
if offset < 0 {
return nil, errors.New("offset is negative")
}
f, err := r.c.Open(r.filename(h))
if err != nil {
return fmt.Errorf("sftp: renameFile: %v", err)
return nil, err
}
return nil
if offset > 0 {
_, err = f.Seek(offset, 0)
if err != nil {
_ = f.Close()
return nil, err
}
}
if length > 0 {
return backend.LimitReadCloser(f, int64(length)), nil
}
return f, nil
}
// Stat returns information about a blob.
func (r *SFTP) Stat(h backend.Handle) (backend.BlobInfo, error) {
func (r *SFTP) Stat(h restic.Handle) (restic.FileInfo, error) {
debug.Log("Stat(%v)", h)
if err := r.clientError(); err != nil {
return restic.FileInfo{}, err
}
if err := h.Valid(); err != nil {
return backend.BlobInfo{}, err
return restic.FileInfo{}, err
}
fi, err := r.c.Lstat(r.filename(h.Type, h.Name))
fi, err := r.c.Lstat(r.filename(h))
if err != nil {
return backend.BlobInfo{}, err
return restic.FileInfo{}, errors.Wrap(err, "Lstat")
}
return backend.BlobInfo{Size: fi.Size()}, nil
return restic.FileInfo{Size: fi.Size()}, nil
}
// Test returns true if a blob of the given type and name exists in the backend.
func (r *SFTP) Test(t backend.Type, name string) (bool, error) {
_, err := r.c.Lstat(r.filename(t, name))
if os.IsNotExist(err) {
func (r *SFTP) Test(h restic.Handle) (bool, error) {
debug.Log("Test(%v)", h)
if err := r.clientError(); err != nil {
return false, err
}
_, err := r.c.Lstat(r.filename(h))
if os.IsNotExist(errors.Cause(err)) {
return false, nil
}
if err != nil {
return false, err
return false, errors.Wrap(err, "Lstat")
}
return true, nil
}
// Remove removes the content stored at name.
func (r *SFTP) Remove(t backend.Type, name string) error {
return r.c.Remove(r.filename(t, name))
func (r *SFTP) Remove(h restic.Handle) error {
debug.Log("Remove(%v)", h)
if err := r.clientError(); err != nil {
return err
}
return r.c.Remove(r.filename(h))
}
// List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending
// stops.
func (r *SFTP) List(t backend.Type, done <-chan struct{}) <-chan string {
func (r *SFTP) List(t restic.FileType, done <-chan struct{}) <-chan string {
debug.Log("list all %v", t)
ch := make(chan string)
go func() {
defer close(ch)
if t == backend.Data {
if t == restic.DataFile {
// read first level
basedir := r.dirname(t, "")
basedir := r.dirname(restic.Handle{Type: t})
list1, err := r.c.ReadDir(basedir)
if err != nil {
@@ -424,7 +486,7 @@ func (r *SFTP) List(t backend.Type, done <-chan struct{}) <-chan string {
}
}
} else {
entries, err := r.c.ReadDir(r.dirname(t, ""))
entries, err := r.c.ReadDir(r.dirname(restic.Handle{Type: t}))
if err != nil {
return
}
@@ -448,18 +510,30 @@ func (r *SFTP) List(t backend.Type, done <-chan struct{}) <-chan string {
}
var closeTimeout = 2 * time.Second
// Close closes the sftp connection and terminates the underlying command.
func (r *SFTP) Close() error {
debug.Log("")
if r == nil {
return nil
}
err := r.c.Close()
debug.Log("sftp.Close", "Close returned error %v", err)
debug.Log("Close returned error %v", err)
// wait for closeTimeout before killing the process
select {
case err := <-r.result:
return err
case <-time.After(closeTimeout):
}
if err := r.cmd.Process.Kill(); err != nil {
return err
}
return r.cmd.Wait()
// get the error, but ignore it
<-r.result
return nil
}

View File

@@ -1,13 +1,14 @@
package sftp_test
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"restic"
"strings"
"restic/backend"
"restic/errors"
"restic/backend/sftp"
"restic/backend/test"
@@ -28,7 +29,6 @@ func createTempdir() error {
return err
}
fmt.Printf("created new test backend at %v\n", tempdir)
tempBackendDir = tempdir
return nil
}
@@ -39,7 +39,7 @@ func init() {
for _, dir := range strings.Split(TestSFTPPath, ":") {
testpath := filepath.Join(dir, "sftp-server")
_, err := os.Stat(testpath)
if !os.IsNotExist(err) {
if !os.IsNotExist(errors.Cause(err)) {
sftpserver = testpath
break
}
@@ -50,21 +50,23 @@ func init() {
return
}
test.CreateFn = func() (backend.Backend, error) {
args := []string{"-e"}
test.CreateFn = func() (restic.Backend, error) {
err := createTempdir()
if err != nil {
return nil, err
}
return sftp.Create(tempBackendDir, sftpserver)
return sftp.Create(tempBackendDir, sftpserver, args...)
}
test.OpenFn = func() (backend.Backend, error) {
test.OpenFn = func() (restic.Backend, error) {
err := createTempdir()
if err != nil {
return nil, err
}
return sftp.Open(tempBackendDir, sftpserver)
return sftp.Open(tempBackendDir, sftpserver, args...)
}
test.CleanupFn = func() error {
@@ -72,7 +74,6 @@ func init() {
return nil
}
fmt.Printf("removing test backend at %v\n", tempBackendDir)
err := os.RemoveAll(tempBackendDir)
tempBackendDir = ""
return err

View File

@@ -6,27 +6,32 @@ import (
"io"
"io/ioutil"
"math/rand"
"os"
"reflect"
"restic"
"restic/errors"
"sort"
"strings"
"testing"
"restic/test"
"restic/backend"
. "restic/test"
)
// CreateFn is a function that creates a temporary repository for the tests.
var CreateFn func() (backend.Backend, error)
var CreateFn func() (restic.Backend, error)
// OpenFn is a function that opens a previously created temporary repository.
var OpenFn func() (backend.Backend, error)
var OpenFn func() (restic.Backend, error)
// CleanupFn removes temporary files and directories created during the tests.
var CleanupFn func() error
var but backend.Backend // backendUnderTest
var but restic.Backend // backendUnderTest
var butInitialized bool
func open(t testing.TB) backend.Backend {
func open(t testing.TB) restic.Backend {
if OpenFn == nil {
t.Fatal("OpenFn not set")
}
@@ -116,7 +121,7 @@ func TestCreateWithConfig(t testing.TB) {
defer close(t)
// save a config
store(t, b, backend.Config, []byte("test config"))
store(t, b, restic.ConfigFile, []byte("test config"))
// now create the backend again, this must fail
_, err := CreateFn()
@@ -125,7 +130,7 @@ func TestCreateWithConfig(t testing.TB) {
}
// remove config
err = b.Remove(backend.Config, "")
err = b.Remove(restic.Handle{Type: restic.ConfigFile, Name: ""})
if err != nil {
t.Fatalf("unexpected error removing config: %v", err)
}
@@ -150,12 +155,12 @@ func TestConfig(t testing.TB) {
var testString = "Config"
// create config and read it back
_, err := backend.LoadAll(b, backend.Handle{Type: backend.Config}, nil)
_, err := backend.LoadAll(b, restic.Handle{Type: restic.ConfigFile})
if err == nil {
t.Fatalf("did not get expected error for non-existing config")
}
err = b.Save(backend.Handle{Type: backend.Config}, []byte(testString))
err = b.Save(restic.Handle{Type: restic.ConfigFile}, strings.NewReader(testString))
if err != nil {
t.Fatalf("Save() error: %v", err)
}
@@ -163,8 +168,8 @@ func TestConfig(t testing.TB) {
// try accessing the config with different names, should all return the
// same config
for _, name := range []string{"", "foo", "bar", "0000000000000000000000000000000000000000000000000000000000000000"} {
h := backend.Handle{Type: backend.Config, Name: name}
buf, err := backend.LoadAll(b, h, nil)
h := restic.Handle{Type: restic.ConfigFile, Name: name}
buf, err := backend.LoadAll(b, h)
if err != nil {
t.Fatalf("unable to read config with name %q: %v", name, err)
}
@@ -180,27 +185,36 @@ func TestLoad(t testing.TB) {
b := open(t)
defer close(t)
_, err := b.Load(backend.Handle{}, nil, 0)
_, err := b.Load(restic.Handle{}, 0, 0)
if err == nil {
t.Fatalf("Load() did not return an error for invalid handle")
}
_, err = b.Load(backend.Handle{Type: backend.Data, Name: "foobar"}, nil, 0)
_, err = b.Load(restic.Handle{Type: restic.DataFile, Name: "foobar"}, 0, 0)
if err == nil {
t.Fatalf("Load() did not return an error for non-existing blob")
}
length := rand.Intn(1<<24) + 2000
data := Random(23, length)
id := backend.Hash(data)
data := test.Random(23, length)
id := restic.Hash(data)
handle := backend.Handle{Type: backend.Data, Name: id.String()}
err = b.Save(handle, data)
handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
err = b.Save(handle, bytes.NewReader(data))
if err != nil {
t.Fatalf("Save() error: %v", err)
}
rd, err := b.Load(handle, 100, -1)
if err == nil {
t.Fatalf("Load() returned no error for negative offset!")
}
if rd != nil {
t.Fatalf("Load() returned a non-nil reader for negative offset!")
}
for i := 0; i < 50; i++ {
l := rand.Intn(length + 2000)
o := rand.Intn(length + 2000)
@@ -213,73 +227,87 @@ func TestLoad(t testing.TB) {
d = d[:0]
}
getlen := l
if l >= len(d) && rand.Float32() >= 0.5 {
getlen = 0
}
if l > 0 && l < len(d) {
d = d[:l]
}
buf := make([]byte, l)
n, err := b.Load(handle, buf, int64(o))
// if we requested data beyond the end of the file, ignore
// ErrUnexpectedEOF error
if l > len(d) && err == io.ErrUnexpectedEOF {
err = nil
buf = buf[:len(d)]
}
rd, err := b.Load(handle, getlen, int64(o))
if err != nil {
t.Errorf("Load(%d, %d): unexpected error: %v", len(buf), int64(o), err)
t.Errorf("Load(%d, %d) returned unexpected error: %v", l, o, err)
continue
}
if n != len(buf) {
t.Errorf("Load(%d, %d): wrong length returned, want %d, got %d",
len(buf), int64(o), len(buf), n)
buf, err := ioutil.ReadAll(rd)
if err != nil {
t.Errorf("Load(%d, %d) ReadAll() returned unexpected error: %v", l, o, err)
rd.Close()
continue
}
if l <= len(d) && len(buf) != l {
t.Errorf("Load(%d, %d) wrong number of bytes read: want %d, got %d", l, o, l, len(buf))
rd.Close()
continue
}
if l > len(d) && len(buf) != len(d) {
t.Errorf("Load(%d, %d) wrong number of bytes read for overlong read: want %d, got %d", l, o, l, len(buf))
rd.Close()
continue
}
buf = buf[:n]
if !bytes.Equal(buf, d) {
t.Errorf("Load(%d, %d) returned wrong bytes", len(buf), int64(o))
t.Errorf("Load(%d, %d) returned wrong bytes", l, o)
rd.Close()
continue
}
err = rd.Close()
if err != nil {
t.Errorf("Load(%d, %d) rd.Close() returned unexpected error: %v", l, o, err)
continue
}
}
// load with a too-large buffer, this should return io.ErrUnexpectedEOF
buf := make([]byte, length+100)
n, err := b.Load(handle, buf, 0)
if n != length {
t.Errorf("wrong length for larger buffer returned, want %d, got %d", length, n)
}
test.OK(t, b.Remove(handle))
}
if err != io.ErrUnexpectedEOF {
t.Errorf("wrong error returned for larger buffer: want io.ErrUnexpectedEOF, got %#v", err)
}
type errorCloser struct {
io.Reader
t testing.TB
}
OK(t, b.Remove(backend.Data, id.String()))
func (ec errorCloser) Close() error {
ec.t.Error("forbidden method close was called")
return errors.New("forbidden method close was called")
}
// TestSave tests saving data in the backend.
func TestSave(t testing.TB) {
b := open(t)
defer close(t)
var id backend.ID
var id restic.ID
for i := 0; i < 10; i++ {
length := rand.Intn(1<<23) + 200000
data := Random(23, length)
data := test.Random(23, length)
// use the first 32 byte as the ID
copy(id[:], data)
h := backend.Handle{
Type: backend.Data,
h := restic.Handle{
Type: restic.DataFile,
Name: fmt.Sprintf("%s-%d", id, i),
}
err := b.Save(h, data)
OK(t, err)
err := b.Save(h, bytes.NewReader(data))
test.OK(t, err)
buf, err := backend.LoadAll(b, h, nil)
OK(t, err)
buf, err := backend.LoadAll(b, h)
test.OK(t, err)
if len(buf) != len(data) {
t.Fatalf("number of bytes does not match, want %v, got %v", len(data), len(buf))
}
@@ -289,17 +317,57 @@ func TestSave(t testing.TB) {
}
fi, err := b.Stat(h)
OK(t, err)
test.OK(t, err)
if fi.Size != int64(len(data)) {
t.Fatalf("Stat() returned different size, want %q, got %d", len(data), fi.Size)
}
err = b.Remove(h.Type, h.Name)
err = b.Remove(h)
if err != nil {
t.Fatalf("error removing item: %v", err)
}
}
// test saving from a tempfile
tmpfile, err := ioutil.TempFile("", "restic-backend-save-test-")
if err != nil {
t.Fatal(err)
}
length := rand.Intn(1<<23) + 200000
data := test.Random(23, length)
copy(id[:], data)
if _, err = tmpfile.Write(data); err != nil {
t.Fatal(err)
}
if _, err = tmpfile.Seek(0, 0); err != nil {
t.Fatal(err)
}
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
// wrap the tempfile in an errorCloser, so we can detect if the backend
// closes the reader
err = b.Save(h, errorCloser{t: t, Reader: tmpfile})
if err != nil {
t.Fatal(err)
}
if err = tmpfile.Close(); err != nil {
t.Fatal(err)
}
if err = os.Remove(tmpfile.Name()); err != nil {
t.Fatal(err)
}
err = b.Remove(h)
if err != nil {
t.Fatalf("error removing item: %v", err)
}
}
var filenameTests = []struct {
@@ -320,14 +388,14 @@ func TestSaveFilenames(t testing.TB) {
defer close(t)
for i, test := range filenameTests {
h := backend.Handle{Name: test.name, Type: backend.Data}
err := b.Save(h, []byte(test.data))
h := restic.Handle{Name: test.name, Type: restic.DataFile}
err := b.Save(h, strings.NewReader(test.data))
if err != nil {
t.Errorf("test %d failed: Save() returned %v", i, err)
continue
}
buf, err := backend.LoadAll(b, h, nil)
buf, err := backend.LoadAll(b, h)
if err != nil {
t.Errorf("test %d failed: Load() returned %v", i, err)
continue
@@ -337,7 +405,7 @@ func TestSaveFilenames(t testing.TB) {
t.Errorf("test %d: returned wrong bytes", i)
}
err = b.Remove(h.Type, h.Name)
err = b.Remove(h)
if err != nil {
t.Errorf("test %d failed: Remove() returned %v", i, err)
continue
@@ -355,18 +423,12 @@ var testStrings = []struct {
{"4e54d2c721cbdb730f01b10b62dec622962b36966ec685880effa63d71c808f2", "foo/../../baz"},
}
func store(t testing.TB, b backend.Backend, tpe backend.Type, data []byte) {
id := backend.Hash(data)
err := b.Save(backend.Handle{Name: id.String(), Type: tpe}, data)
OK(t, err)
}
func read(t testing.TB, rd io.Reader, expectedData []byte) {
buf, err := ioutil.ReadAll(rd)
OK(t, err)
if expectedData != nil {
Equals(t, expectedData, buf)
}
func store(t testing.TB, b restic.Backend, tpe restic.FileType, data []byte) restic.Handle {
id := restic.Hash(data)
h := restic.Handle{Name: id.String(), Type: tpe}
err := b.Save(h, bytes.NewReader(data))
test.OK(t, err)
return h
}
// TestBackend tests all functions of the backend.
@@ -374,90 +436,100 @@ func TestBackend(t testing.TB) {
b := open(t)
defer close(t)
for _, tpe := range []backend.Type{
backend.Data, backend.Key, backend.Lock,
backend.Snapshot, backend.Index,
for _, tpe := range []restic.FileType{
restic.DataFile, restic.KeyFile, restic.LockFile,
restic.SnapshotFile, restic.IndexFile,
} {
// detect non-existing files
for _, test := range testStrings {
id, err := backend.ParseID(test.id)
OK(t, err)
for _, ts := range testStrings {
id, err := restic.ParseID(ts.id)
test.OK(t, err)
// test if blob is already in repository
ret, err := b.Test(tpe, id.String())
OK(t, err)
Assert(t, !ret, "blob was found to exist before creating")
h := restic.Handle{Type: tpe, Name: id.String()}
ret, err := b.Test(h)
test.OK(t, err)
test.Assert(t, !ret, "blob was found to exist before creating")
// try to stat a not existing blob
h := backend.Handle{Type: tpe, Name: id.String()}
_, err = b.Stat(h)
Assert(t, err != nil, "blob data could be extracted before creation")
test.Assert(t, err != nil, "blob data could be extracted before creation")
// try to read not existing blob
_, err = b.Load(h, nil, 0)
Assert(t, err != nil, "blob reader could be obtained before creation")
_, err = b.Load(h, 0, 0)
test.Assert(t, err != nil, "blob reader could be obtained before creation")
// try to get string out, should fail
ret, err = b.Test(tpe, id.String())
OK(t, err)
Assert(t, !ret, "id %q was found (but should not have)", test.id)
ret, err = b.Test(h)
test.OK(t, err)
test.Assert(t, !ret, "id %q was found (but should not have)", ts.id)
}
// add files
for _, test := range testStrings {
store(t, b, tpe, []byte(test.data))
for _, ts := range testStrings {
store(t, b, tpe, []byte(ts.data))
// test Load()
h := backend.Handle{Type: tpe, Name: test.id}
buf, err := backend.LoadAll(b, h, nil)
OK(t, err)
Equals(t, test.data, string(buf))
h := restic.Handle{Type: tpe, Name: ts.id}
buf, err := backend.LoadAll(b, h)
test.OK(t, err)
test.Equals(t, ts.data, string(buf))
// try to read it out with an offset and a length
start := 1
end := len(test.data) - 2
end := len(ts.data) - 2
length := end - start
buf2 := make([]byte, length)
n, err := b.Load(h, buf2, int64(start))
OK(t, err)
Equals(t, length, n)
Equals(t, test.data[start:end], string(buf2))
rd, err := b.Load(h, len(buf2), int64(start))
test.OK(t, err)
n, err := io.ReadFull(rd, buf2)
test.OK(t, err)
test.Equals(t, len(buf2), n)
remaining, err := io.Copy(ioutil.Discard, rd)
test.OK(t, err)
test.Equals(t, int64(0), remaining)
test.OK(t, rd.Close())
test.Equals(t, ts.data[start:end], string(buf2))
}
// test adding the first file again
test := testStrings[0]
ts := testStrings[0]
// create blob
err := b.Save(backend.Handle{Type: tpe, Name: test.id}, []byte(test.data))
Assert(t, err != nil, "expected error, got %v", err)
err := b.Save(restic.Handle{Type: tpe, Name: ts.id}, strings.NewReader(ts.data))
test.Assert(t, err != nil, "expected error, got %v", err)
// remove and recreate
err = b.Remove(tpe, test.id)
OK(t, err)
h := restic.Handle{Type: tpe, Name: ts.id}
err = b.Remove(h)
test.OK(t, err)
// test that the blob is gone
ok, err := b.Test(tpe, test.id)
OK(t, err)
Assert(t, ok == false, "removed blob still present")
ok, err := b.Test(h)
test.OK(t, err)
test.Assert(t, ok == false, "removed blob still present")
// create blob
err = b.Save(backend.Handle{Type: tpe, Name: test.id}, []byte(test.data))
OK(t, err)
err = b.Save(h, strings.NewReader(ts.data))
test.OK(t, err)
// list items
IDs := backend.IDs{}
IDs := restic.IDs{}
for _, test := range testStrings {
id, err := backend.ParseID(test.id)
OK(t, err)
for _, ts := range testStrings {
id, err := restic.ParseID(ts.id)
test.OK(t, err)
IDs = append(IDs, id)
}
list := backend.IDs{}
list := restic.IDs{}
for s := range b.List(tpe, nil) {
list = append(list, ParseID(s))
list = append(list, restic.TestParseID(s))
}
if len(IDs) != len(list) {
@@ -472,19 +544,21 @@ func TestBackend(t testing.TB) {
}
// remove content if requested
if TestCleanupTempDirs {
for _, test := range testStrings {
id, err := backend.ParseID(test.id)
OK(t, err)
if test.TestCleanupTempDirs {
for _, ts := range testStrings {
id, err := restic.ParseID(ts.id)
test.OK(t, err)
found, err := b.Test(tpe, id.String())
OK(t, err)
h := restic.Handle{Type: tpe, Name: id.String()}
OK(t, b.Remove(tpe, id.String()))
found, err := b.Test(h)
test.OK(t, err)
found, err = b.Test(tpe, id.String())
OK(t, err)
Assert(t, !found, fmt.Sprintf("id %q not found after removal", id))
test.OK(t, b.Remove(h))
found, err = b.Test(h)
test.OK(t, err)
test.Assert(t, !found, fmt.Sprintf("id %q not found after removal", id))
}
}
}
@@ -495,7 +569,7 @@ func TestDelete(t testing.TB) {
b := open(t)
defer close(t)
be, ok := b.(backend.Deleter)
be, ok := b.(restic.Deleter)
if !ok {
return
}
@@ -513,7 +587,7 @@ func TestCleanup(t testing.TB) {
return
}
if !TestCleanupTempDirs {
if !test.TestCleanupTempDirs {
t.Logf("not cleaning up backend")
return
}

Some files were not shown because too many files have changed in this diff Show More