Compare commits

...

575 Commits

Author SHA1 Message Date
Alexander Neumann
c7f5ac22eb add VERSION file for 0.2.0 2016-07-30 11:24:52 +02:00
Alexander Neumann
959df5cc14 Merge pull request #554 from restic/debug-fuse-panic
Fix fuse panic with empty files
2016-07-29 21:51:58 +02:00
Alexander Neumann
e575494353 Correct goreportcard badge URLs 2016-07-29 21:35:06 +02:00
Alexander Neumann
c0fb2c306d Merge pull request #553 from restic/update-minio-go
Update minio-go
2016-07-29 21:23:39 +02:00
Alexander Neumann
8418fed18e Handle empty files correctly 2016-07-29 21:18:32 +02:00
Alexander Neumann
3de989b7bb Fix panic with empty files 2016-07-29 21:05:36 +02:00
Alexander Neumann
5afda94a3c Handle reads with large offsets 2016-07-29 20:55:09 +02:00
Alexander Neumann
56dd4c0595 Update minio-go 2016-07-29 20:28:44 +02:00
Alexander Neumann
a9729eeb1b Merge pull request #552 from benagricola/fix-connection-leak
Explicitly Close() obj after ReadFull()
2016-07-29 20:28:22 +02:00
Ben Agricola
edb1843f24 Explicitly Close() obj after ReadFull()
Signed-off-by: Ben Agricola <bagricola@squiz.co.uk>
2016-07-29 14:18:02 +01:00
Alexander Neumann
e1960cadb2 Merge pull request #548 from mappu/patch-1
idset.go: micro-optimise away redundant scan
2016-07-28 20:05:23 +02:00
mappu
32985f7904 idset.go: micro-optimise away redundant scan 2016-07-26 09:36:31 +12:00
Alexander Neumann
e8e45fe2e3 Merge pull request #545 from restic/fix-528
Don't report valid types as invalid
2016-07-20 21:17:43 +02:00
Alexander Neumann
6b7ddf1b03 Don't report valid types as invalid
Closes #528
2016-07-20 20:46:57 +02:00
Alexander Neumann
b8c7622a8a Merge pull request #543 from mappu/master
Updates for build.go
2016-07-17 21:32:13 +02:00
mappu
983e509388 build.go: gofmt 2016-07-16 10:14:41 +12:00
mappu
c9400d5c61 build.go: Support cross-compilation via new --goos and --goarch flags 2016-07-16 09:42:41 +12:00
mappu
f967e90a96 build.go: Strip harder (add -w flag) 2016-07-16 09:42:22 +12:00
mappu
38d4522ea5 build.go: Add go1.7 to list of linkers requiring -Xfoo=bar syntax 2016-07-16 09:42:02 +12:00
Alexander Neumann
c9ab75a44c Fix build tag in run_integration_tests.go 2016-06-29 09:36:40 +02:00
Alexander Neumann
8b47ca5f98 Merge pull request #537 from xet7/master
Fix typo in Manual.md
2016-06-26 13:58:12 +02:00
Lauri Ojansivu
4e98c951e0 Fix typo in Manual.md 2016-06-26 14:35:48 +03:00
Alexander Neumann
814424fa6e Merge pull request #531 from restic/update-minio-go
Update minio-go
2016-06-08 22:00:16 +02:00
Alexander Neumann
902f619a06 Fix call to minio.New()
The last parameter changed semantics from `insecure` to `secure`.
2016-06-08 21:33:18 +02:00
Alexander Neumann
d66a98c2db Update minio-go
This fixes #520.
2016-06-08 21:11:48 +02:00
Alexander Neumann
e1b5593e07 Merge pull request #525 from koolhead17/miniorestic
Added Minio.io configuration steps to run as backend for restic.
2016-05-31 22:32:33 +02:00
koolhead17
789b8c8b49 Added minor modification to the doc as suggested by the author. 2016-05-25 02:26:00 +05:30
Alexander Neumann
795e3d5b6c Merge pull request #503 from gerdus/restore-latest
Add option to restore latest snapshot with optional path and source filters
2016-05-11 20:48:56 +02:00
Gerdus van Zyl
73e9cac5c4 gofmt + small doc fix 2016-05-10 22:20:03 +02:00
Gerdus van Zyl
8010a0d90c fix 2016-05-10 21:57:30 +02:00
Gerdus van Zyl
3cb68ddb0d Add option to restore latest snapshot with optional path and source filters
eg restic -r r1 restore latest --target restore2 --path "D:\dev\restic\bin\s1"
path and source filters also added to snapshot cmd
eg restic -r r1 snapshots --source nucore --path="D:\dev\restic\bin\s1"

Add option to restore latest snapshot with optional path and source filters

eg restic -r r1 restore latest --target restore2 --path "D:\dev\restic\bin\s1"
path and source filters also added to snapshot cmd
eg restic -r r1 snapshots --source nucore --path="D:\dev\restic\bin\s1"
2016-05-10 21:41:26 +02:00
Gerdus van Zyl
49f82f54b0 rebase, change source to host and add description to manual 2016-05-10 21:40:32 +02:00
Alexander Neumann
6bc7a71e55 Merge pull request #516 from restic/fix-flaky-test
Fix flaky worker cancel test
2016-05-09 22:12:39 +02:00
Alexander Neumann
2c1e590e47 Merge pull request #509 from restic/read-from-stdin
Allow reading data from stdin
2016-05-09 22:12:22 +02:00
Alexander Neumann
84f7d28abf Merge pull request #515 from viric/fix_traverse_order
Traverse paths in the same order as parent snapshot
2016-05-09 22:10:36 +02:00
Alexander Neumann
cb75737770 Merge pull request #514 from viric/fix_parent_search
Better backup parent snapshot search. Part of #513
2016-05-09 22:10:32 +02:00
Alexander Neumann
fb45ea139d Add barrier 2016-05-09 21:29:13 +02:00
Alexander Neumann
bce0bbeda2 Add Benchmark for ArchiveReader 2016-05-09 21:16:59 +02:00
Alexander Neumann
c6d934a685 Fix flaky worker cancel test 2016-05-09 20:41:55 +02:00
Alexander Neumann
143fde66bc Add documentation 2016-05-09 20:11:32 +02:00
Alexander Neumann
4146c09a04 Add test for ArchiveReader() 2016-05-09 20:11:32 +02:00
Alexander Neumann
43f9c2d36e backup: Save file size when reading from stdin 2016-05-09 20:11:32 +02:00
Alexander Neumann
5e0813ca04 fuse: Use correct file size in case it's zero 2016-05-09 20:11:32 +02:00
Alexander Neumann
6ee9baa9c5 fuse: Add debug logs 2016-05-09 20:11:32 +02:00
Alexander Neumann
7c76ff3aaf Allow reading backups from stdin 2016-05-09 20:11:32 +02:00
Alexander Neumann
deae1e7e29 Merge pull request #511 from restic/cleanups
Cleanups and test functions
2016-05-09 20:10:39 +02:00
Lluís Batlle i Rossell
4818a8e356 Fix gofmt 2016-05-09 16:31:59 +02:00
Lluís Batlle i Rossell
83aa63365a Not exporting baseNameSlice. Noone else wants it. 2016-05-09 14:46:14 +02:00
Lluís Batlle i Rossell
aed73be93d Improve comment according to hound guidelines 2016-05-09 14:44:03 +02:00
Lluís Batlle i Rossell
4ea62ecbcc Traverse paths in the same order as parent snapshot
This is the 2nd partial fix to #513.

The archivepipe requires the snapshot paths and the backup paths to be
traversed in the same order, and they were sorted differently: the backup paths
by full path, and the snapshot by basename path.
2016-05-09 14:32:17 +02:00
Lluís Batlle i Rossell
60c8c90d35 Better backup parent snapshot search. Part of #513
I look for the newest snapshot that contains all supplied paths to backup.
2016-05-09 12:42:12 +02:00
Alexander Neumann
c523b38abb Remove darwin/arm 2016-05-08 23:42:13 +02:00
Alexander Neumann
f928b30caa CI: Use gox with -osarch
This allows not to cross-compile to darwin/arm, which fails at the
moment.
2016-05-08 23:25:30 +02:00
Alexander Neumann
20afed4058 Checker: handle symlinks 2016-05-08 23:16:17 +02:00
Alexander Neumann
a2224e380b Address style issues identified by Hound 2016-05-08 22:38:38 +02:00
Alexander Neumann
31030baca3 Add comment 2016-05-08 13:51:33 +02:00
Alexander Neumann
173940cbdf Add repository.ListPack 2016-05-08 13:51:21 +02:00
Alexander Neumann
6fc3590838 Remove repository.SaveFrom() 2016-05-08 13:13:29 +02:00
Alexander Neumann
43f7a1fcd9 Correct log statement 2016-05-08 13:09:36 +02:00
Alexander Neumann
7faf272996 Progress: Use reference to sync.Once 2016-05-08 13:04:58 +02:00
Alexander Neumann
514a43f74b Add more tests 2016-05-08 12:25:01 +02:00
Alexander Neumann
6655511ab8 checker: test file mode 2016-05-08 12:25:01 +02:00
Alexander Neumann
168cfc2f6d Add testing helper functions 2016-05-08 12:25:01 +02:00
Alexander Neumann
6cfa0d502d Add LoadAllSnapshots() 2016-05-08 12:25:01 +02:00
Alexander Neumann
a996dbb9d6 check: Add more checks for nodes 2016-05-08 12:25:01 +02:00
Alexander Neumann
7572586ded Fix image URL for readthedocs.org 2016-05-08 12:24:09 +02:00
Alexander Neumann
a0ab9f2fdf Merge pull request #507 from restic/debug-minio-on-darwin
Update minio-go
2016-05-08 12:20:25 +02:00
Alexander Neumann
1b50d55d0c Update minio-go 2016-05-08 11:24:24 +02:00
Alexander Neumann
7dc7f0d295 Vagrantfile: Fix network for darwin 2016-05-07 23:38:41 +02:00
Alexander Neumann
3f8da47a0c Fix restic s3 backend for new minio-go version 2016-05-07 23:38:41 +02:00
Alexander Neumann
72fdd0bc09 Update minio-go 2016-05-07 23:38:41 +02:00
Alexander Neumann
be04a3b683 Travis: Update Go version, set ulimit 2016-05-07 23:38:30 +02:00
Alexander Neumann
60f1fbe35b Update domain for readthedocs 2016-05-05 13:25:06 +02:00
Alexander Neumann
4531456be5 Merge pull request #497 from Thor77/excludeFileExpandEnv
Expand environment-variables in exclude-files
2016-04-18 21:40:30 +02:00
Alexander Neumann
59ec393be1 Merge pull request #502 from restic/update-poly1305
Update crypto library
2016-04-18 21:40:16 +02:00
Alexander Neumann
306c0fea16 Update crypto library
This allows building restic with Go 1.3 on ARM.

Closes #501
2016-04-18 21:23:00 +02:00
Alexander Neumann
039019689a Merge pull request #500 from restic/fix-499
Fix exclude filters with trailing slash
2016-04-18 21:02:27 +02:00
Alexander Neumann
1eb896ae6f Merge pull request #498 from restic/fix-travis-drawin
Fix CI tests
2016-04-18 20:58:45 +02:00
Alexander Neumann
22338903bf Fix golint 2016-04-18 20:33:45 +02:00
Alexander Neumann
baece5eeb3 Add error checking for CI tests 2016-04-18 20:24:12 +02:00
Alexander Neumann
b2846ea49d Add error handling 2016-04-18 20:24:12 +02:00
Alexander Neumann
aa43b69651 Better error reporting for CI tests 2016-04-18 20:24:12 +02:00
Alexander Neumann
6fe25548bd Add another filter test 2016-04-17 22:04:42 +02:00
Alexander Neumann
9002eaa259 Fix exclude filters with trailing slash 2016-04-17 21:54:12 +02:00
Alexander Neumann
2e3c541237 Rework Go program to run CI tests 2016-04-17 17:55:13 +02:00
Alexander Neumann
ead6d11ecf Backend tests: remove debug 2016-04-17 17:39:14 +02:00
Alexander Neumann
41e3e12f4b Travis: correct exclude for darwin 2016-04-17 13:08:57 +02:00
Alexander Neumann
d4b202243a Update Go versions 2016-04-16 22:50:36 +02:00
Alexander Neumann
87250c4489 Call minio server with env variables 2016-04-16 22:49:23 +02:00
Alexander Neumann
4a576af855 Fix CI tests on darwin 2016-04-16 22:27:34 +02:00
Thor77
4fb6669196 add documentation for environment-var expanding in exclude-files 2016-04-16 22:07:36 +02:00
Thor77
9644399074 add environment-var expanding for exclude-files 2016-04-16 22:04:29 +02:00
Alexander Neumann
5b33a7a903 Merge pull request #496 from jzacsh/jzacsh-nit-on-doc-goget
doc: point to correct `gb` arg for `go get`
2016-04-14 08:58:12 +02:00
Jonathan Zacsh
1fe0e30d71 doc: point to correct db arg for go get
per side-chat in https://github.com/restic/restic/issues/494#issuecomment-209591073
2016-04-13 15:52:08 -04:00
Alexander Neumann
75dd9e0fee Merge pull request #495 from restic/fix-494
Umount fuse in tests
2016-04-13 20:48:50 +02:00
Alexander Neumann
23d7464306 Umount fuse in tests
This corrects the order when the fuse mount is terminated by closing the
done channel: Before, restic would close the fuse connection and only
afterwards try to remove the mount, that does not work.

Closes #494
2016-04-13 20:18:54 +02:00
Alexander Neumann
32a5778602 Merge pull request #490 from Thor77/backupExcludeFile
add backup --exclude-file
2016-04-06 00:09:46 +02:00
Thor77
b4493b4640 add documentation for --exclude[-file] and patterns 2016-04-01 15:56:52 +02:00
Thor77
1c1eacfc94 add backup --exclude-file 2016-04-01 13:53:22 +02:00
Alexander Neumann
aac2405e95 Merge pull request #489 from xmaka/xmaka-patch-1
add debian stable help
2016-03-31 22:01:14 +02:00
xmaka
90765a7dac add debian stable help
add a little clue to debian stable users, to install 'go' from the repositories
2016-03-31 21:02:05 +02:00
Alexander Neumann
008337aad4 Merge pull request #487 from restic/use-fadvise
Purge read and written data from OS cache
2016-03-31 19:27:24 +02:00
Alexander Neumann
380e9b8119 Fix Makefile 2016-03-31 19:20:57 +02:00
Alexander Neumann
ddfadae6f6 Fix compilation for Go 1.3 2016-03-28 16:09:28 +02:00
Alexander Neumann
c8f46ce81d fs: Require Go1.4 for Linux 2016-03-28 15:48:18 +02:00
Alexander Neumann
c30f4a9134 fs: remove unneeded code 2016-03-28 15:33:10 +02:00
Alexander Neumann
b7713d2d34 local backend: Drop file content from cache after write 2016-03-28 15:31:25 +02:00
Alexander Neumann
5b5bb070b9 fs: Split out ClearCache from File 2016-03-28 15:31:25 +02:00
Alexander Neumann
feb664620a Use fadvise() to not cache the content of files read 2016-03-28 15:26:46 +02:00
Alexander Neumann
d2df2ad92d Add dep: golang.org/x/sys/unix 2016-03-28 15:25:45 +02:00
Alexander Neumann
9e81b158bf Add hints on how to get started 2016-03-12 11:28:10 +01:00
Alexander Neumann
49eb55c457 Merge pull request #484 from mholt/patch-1
Change ErrNoKeyFound message
2016-03-11 21:26:19 +01:00
Matt Holt
e6ba9e5849 Change ErrNoKeyFound message
For #438. I was just going to change it to "wrong password" but then I saw that it might actually be the case that no key could be found, so I changed it to what I did. Let me know if you'd like something different!
2016-03-11 08:21:01 -07:00
Alexander Neumann
a747cf994e Merge pull request #482 from restic/update-chunker
Update chunker
2016-03-10 11:02:19 +01:00
Alexander Neumann
afd0eb7f67 Merge pull request #481 from restic/fix-memory-usage
Use tempfiles for not-yet-uploaded pack files
2016-03-06 16:28:44 +01:00
Alexander Neumann
e4a6dd8c8c Use newRandReader instead of rand.New()
This needs to be done since for Go < 1.6 rand.Rand does not implement
io.Reader.
2016-03-06 14:21:02 +01:00
Alexander Neumann
18c3024171 Unexport NewPackerManager 2016-03-06 14:20:48 +01:00
Alexander Neumann
1e1368eea3 Add randReader for tests
This can be removed once we require Go 1.6.
2016-03-06 13:59:06 +01:00
Alexander Neumann
cda7616c82 Remove tempdir for packerManager 2016-03-06 13:14:06 +01:00
Alexander Neumann
015cea0c50 PackerManager: Remove debug comment 2016-03-06 12:35:21 +01:00
Alexander Neumann
c0b5f5a8af Fix all code which uses repository.New() 2016-03-06 12:34:23 +01:00
Alexander Neumann
f956f60f9f PackerManager: use tempfiles instead of memory buffers 2016-03-06 12:26:25 +01:00
Alexander Neumann
f893ec57cb Add test and benchmark for PackerManager 2016-03-05 15:58:39 +01:00
Alexander Neumann
9e24238cdd Update chunker 2016-03-05 13:46:20 +01:00
Alexander Neumann
4dac6d45fd Merge pull request #459 from restic/debug-434
Debug issue #434
2016-02-27 14:02:59 +01:00
Alexander Neumann
8d1a5731f3 Remove integration test for 'optimize' 2016-02-27 13:38:05 +01:00
Alexander Neumann
04b3ce00e2 Remove command 'optimize'
It was discovered that the current code may delete still-referenced
blobs, so we'll remove the command for now.

This closes #434
2016-02-27 13:12:22 +01:00
Alexander Neumann
9386bfbafa checker: Do not use reference in checker errors 2016-02-27 13:10:35 +01:00
Alexander Neumann
a613e23e34 checker: Use backend.IDSet instead of custom struct 2016-02-27 13:10:35 +01:00
Alexander Neumann
5ce1375ddd Rename non-exported function 2016-02-27 13:10:35 +01:00
Alexander Neumann
4cefd456bb Refactor rebuild-index code
This code reads all pack headers from all packs and rebuilds the index
from scratch. Afterwards, all indexes are removed. This is needed
because in #434 the command `optimize` produced a broken index that
did not contain a blob any more. Running `rebuild-index` should fix
this.
2016-02-27 13:10:35 +01:00
Alexander Neumann
bc911f4609 cmd_dump: Only load pack header 2016-02-27 13:06:21 +01:00
Alexander Neumann
090920039f pack: Add test with backend.NewReadSeeker
This uses the new backend ReadSeeker with the unpacker.
2016-02-27 13:06:21 +01:00
Alexander Neumann
21a99397ff worker: fix tests
The test failed on Windows, probably because the machine used for CI was
too slow. The new test doesn't depend on timing any more.
2016-02-27 13:06:21 +01:00
Alexander Neumann
482fc9f51d Add backend.readSeeker
This struct implements an io.ReadSeeker on top of a backend. This is the
easiest way to make the packer compatible to the new backend without
loading a complete pack into a bytes.Buffer.
2016-02-27 13:06:21 +01:00
Alexander Neumann
b114ab7108 cmd_dump: Allow dumping all blobs in all packs
I had the suspicion that one of my repositories contained redundant
data blobs that is not recorder in the index. In order to check this I
needed to dump information about the pack files without consulting the
index. The code here iterates over all packs and dumps the headers, and
dumps information about them in JSON (so we can use other tools to parse
that information again).
2016-02-27 13:06:21 +01:00
Alexander Neumann
4cb4a3ac7f Add separate goroutine that closes the output chan
This allows iterating over the output channel without having to start
another Goroutine outside of the worker pool. This also removes the need
for calling Wait().
2016-02-27 13:06:21 +01:00
Alexander Neumann
ee422110c8 Make worker pools input/output chans symmetric
Input and output channel are now both of type `chan Job`, this makes it
possible to chain multiple worker pools together.
2016-02-27 13:06:21 +01:00
Alexander Neumann
e5ee4eba53 Add worker pool
A worker pool is needed whenever something should be done concurrently.
This small library makes it easy to create a worker pool by specifying
channels, concurrency and a function that should be executed for each
job and returns a result and an error.
2016-02-27 13:06:21 +01:00
Alexander Neumann
1e0b7dbdd2 Merge pull request #477 from restic/rest-backend
rest backend: Remove indirection on http.Client
2016-02-25 22:24:00 +01:00
Alexander Neumann
5e152b7753 Merge pull request #476 from restic/fix-475
Ignore invalid index files
2016-02-25 17:56:49 +01:00
Alexander Neumann
17f5b524a6 local: Replace matching code with proper Readdir() 2016-02-24 22:43:04 +01:00
Alexander Neumann
4ae16d7661 repository: Use backend.ID to load index
This commit uses ParallelWorkFuncParseID() to load all indexes and
ignores file names with invalid format.

This fixes #475.
2016-02-24 22:41:32 +01:00
Alexander Neumann
77d85cee52 Merge pull request #472 from restic/update-chunker
Update chunker
2016-02-24 21:25:15 +01:00
Alexander Neumann
d84dec47bf Merge pull request #473 from restic/fix-coveralls
Fix coveralls.io service
2016-02-23 23:51:24 +01:00
Alexander Neumann
6c9170da51 Merge pull request #471 from fawick/move-cmd-folders
Refactor src/restic/cmd to src/cmds
2016-02-23 23:24:09 +01:00
Alexander Neumann
2ce49ea0ee Update code to use the new Chunker interface 2016-02-23 23:14:35 +01:00
Alexander Neumann
3db569c45a Update chunker 2016-02-23 23:14:35 +01:00
Alexander Neumann
98985019f9 Set GOPATH for goveralls 2016-02-23 23:03:38 +01:00
Alexander Neumann
699cb5ed8f Let goveralls fail if it needs to
Maybe this fixes coverage reporting...
2016-02-23 22:16:01 +01:00
Fabian Wickborn
6005bd9833 Fix relative paths in integrations tests 2016-02-23 09:21:50 +01:00
Fabian Wickborn
ee494ab939 Use relocated command folders in build.go and run_integration_tests.go
The resulting command structure is almost compatible to that of that gb
reference project (example-gsftp), as the subfolder for commands is
'cmds' instead of 'cmd'.
2016-02-23 09:18:09 +01:00
Fabian Wickborn
442780f214 Move commands to src/cmds 2016-02-23 07:21:28 +01:00
Alexander Neumann
9c47a8abfc Merge pull request #467 from fawick/master
Merging restic-server
2016-02-22 20:48:18 +01:00
Fabian Wickborn
dd5680dab6 restic-server: Create tmp folder 2016-02-22 20:14:11 +01:00
Fabian Wickborn
1cdbc8e1aa restic-server: Fix folder permissions 2016-02-22 20:14:11 +01:00
Fabian Wickborn
e4168fdde5 restic-server: Reduce memory footprint for saving blobs
Before, the restic-server read the whole blob (up to 8MB) into memory
prior to writing it to disk. Concurrent writes consumed a lot
of memory. This change writes the blob to a tmp file directly and
renames it afterwards in case there where no errors.
2016-02-22 20:14:11 +01:00
Fabian Wickborn
4749e610af restic-server: Fix content length for HEAD requests 2016-02-22 20:14:11 +01:00
Fabian Wickborn
51d86370a5 Fixes for the PR
- Removed external dependencies for test
- Prevent building restic-server w/ Go 1.3

Go versions 1.0, 1.1., and 1.2 are going to fail as well, but they
are "excluded" by README.md already.
2016-02-22 20:13:55 +01:00
Fabian Wickborn
d86c093480 Merged the restic-server by @bchapuis
Commit ID in fd0/restic-server at time of merge is
07fae00e7ddd8751b150e2ebf0bff8b2871c77ce
2016-02-22 20:12:50 +01:00
Alexander Neumann
bb7b9ef3fc Merge pull request #466 from ckemper67/sftp-path-clean
Cleaned up the sftp parsing logic.
2016-02-22 19:04:56 +01:00
Alexander Neumann
5dd65a5c19 Merge pull request #464 from restic/rest-backend
Add REST backend
2016-02-22 18:52:37 +01:00
Christian Kemper
6eb97ca6cc Cleaned up the sftp parsing logic.
Simplified and cleaned up the sftp parsing logic. Added support to
path.Clean the directory. Added additional tests.
2016-02-21 10:50:36 -08:00
Alexander Neumann
9a822285eb Merge pull request #468 from ckemper67/s3-logging
Added missing handle to the s3.Stat log message output
2016-02-21 19:13:10 +01:00
Christian Kemper
c2716755f1 Added missing handle to the s3.Stat log message output 2016-02-21 09:59:27 -08:00
Alexander Neumann
7087efaa79 rest backend: Remove indirection on http.Client 2016-02-21 17:06:35 +01:00
Alexander Neumann
921c2f6069 rest backend: Improve documentation 2016-02-21 17:03:27 +01:00
Alexander Neumann
8ad98e8040 rest backend: Fixes 2016-02-21 16:35:25 +01:00
Alexander Neumann
f7a10a9b9c backend tests: Test accessing config
This commit adds real testing for accessing the config file with
different names.
2016-02-21 16:02:13 +01:00
Alexander Neumann
bd621197f8 Add rest backend to ui parser 2016-02-21 15:33:13 +01:00
Alexander Neumann
ec34da2d66 Add rest backend to location 2016-02-21 15:33:13 +01:00
Alexander Neumann
c2348ba768 Add REST backend
This is a port of the original work by @bchapuis in
https://github.com/restic/restic/pull/253
2016-02-21 15:33:13 +01:00
Alexander Neumann
75d69639e6 .gitignore: Add /vendor/pkg 2016-02-21 15:33:13 +01:00
Alexander Neumann
9485fd0c4d Merge pull request #462 from restic/update-documentation
doc: Reduce text in README, add to Manual
2016-02-21 14:01:57 +01:00
Alexander Neumann
9b93b3a72c doc: Reduce text in README, add to Manual 2016-02-21 13:37:55 +01:00
Alexander Neumann
eaa2f899d5 Merge pull request #455 from restic/mkdocs
Add mkdocs for readthedocs.org
2016-02-21 13:14:14 +01:00
Alexander Neumann
1edf9c1ee4 doc: Add paragraph about mkdocs 2016-02-21 13:04:45 +01:00
Alexander Neumann
45e9561b48 doc: Add paragraph about documentation version 2016-02-21 12:58:42 +01:00
Alexander Neumann
7de8bf6c27 Manual: Correct headings, add section about debug 2016-02-21 12:56:25 +01:00
Alexander Neumann
1c2992e2e5 Update manual 2016-02-21 12:52:31 +01:00
Alexander Neumann
dc994699d9 Remove obsolete structure image 2016-02-21 12:29:13 +01:00
Alexander Neumann
a13f9f14d0 Add something to the front page 2016-02-21 12:28:46 +01:00
Alexander Neumann
e71e2c74f8 README: Add readthedocs badge 2016-02-21 12:28:46 +01:00
Alexander Neumann
7c4bd662cb Manual: Fix shell blocks and ToC 2016-02-21 12:28:46 +01:00
Alexander Neumann
6559fa7382 Add mkdocs for readthedocs.org 2016-02-21 12:28:46 +01:00
Alexander Neumann
b9eea24728 Merge pull request #460 from restic/add-github-issue-template
Add issue template
2016-02-21 12:23:24 +01:00
Alexander Neumann
8f33afead4 Add issue template 2016-02-21 00:35:58 +01:00
Alexander Neumann
625c987d23 Move sftp test 2016-02-20 20:53:40 +01:00
Alexander Neumann
933479047f Merge pull request #458 from restic/use-gb-vendor
Properly vendor dependencies with gb-vendor
2016-02-20 19:19:34 +01:00
Alexander Neumann
eef73d466d Properly vendor dependencies with gb-vendor 2016-02-20 18:33:06 +01:00
Alexander Neumann
134d129986 Merge pull request #445 from restic/switch-to-gb
Switch to gb
2016-02-20 18:23:40 +01:00
Alexander Neumann
4dffd3de66 Update .travis.yml 2016-02-20 17:56:11 +01:00
Alexander Neumann
cc8a929d43 Vagrantfile: Update to new structure (and Go version)
Also add /.vagrant to .gitignore
2016-02-20 17:44:48 +01:00
Alexander Neumann
1ab8220022 Update CONTRIBUTING.md 2016-02-20 17:31:39 +01:00
Alexander Neumann
6e31f4bb19 Dockerfile: Update for gb 2016-02-20 17:31:39 +01:00
Alexander Neumann
9bda164ed3 Update Appveyor configuration 2016-02-20 17:31:22 +01:00
Alexander Neumann
c51889c157 Ignore vendor/ directory for gofmt tests 2016-02-20 17:31:22 +01:00
Alexander Neumann
b3c2febf79 Add output of build.go to gitignore 2016-02-20 17:31:22 +01:00
Alexander Neumann
96e66bb3e9 Update build.go and run_integration_tests.go 2016-02-20 17:31:22 +01:00
Alexander Neumann
841326d713 Move build.go and run_integration_tests.go to root 2016-02-20 17:31:21 +01:00
Alexander Neumann
7b6629802b Move "doc" to root dir 2016-02-20 17:31:21 +01:00
Alexander Neumann
c0bd660a9e Rename package
* github.com/restic/restic -> restic
2016-02-20 17:31:21 +01:00
Alexander Neumann
0a8ef79dad Move top-level files 2016-02-20 17:31:21 +01:00
Alexander Neumann
b63399d606 Move things around for gb
This moves all restic source files to src/, and all vendored
dependencies to vendor/src.
2016-02-20 17:31:20 +01:00
Alexander Neumann
273d028a82 Merge pull request #451 from fawick/master
Support custom SSH ports in URL of SFTP-repo
2016-02-16 21:13:42 +01:00
Fabian Wickborn
2d94e71117 Support custom SSH ports in URL of SFTP-repo 2016-02-15 19:17:41 +01:00
Alexander Neumann
d918d0f0c3 Merge pull request #443 from ckemper67/fix-387
Introduced a configurable object path prefix for s3 repositories to address #387
2016-02-14 22:43:40 +01:00
Alexander Neumann
517eff7e48 Merge pull request #447 from ckemper67/fix-442
Fix #442: set blocks and blocksize in fuse
2016-02-14 22:25:37 +01:00
Christian Kemper
1f81865847 set Blocks and BlockSize in Attr using a default blocksize of 512 to address #442. 2016-02-14 13:14:08 -08:00
Christian Kemper
f3f1404849 always use "restic" as the default prefix for parsing.
improved test error message to make it easier to find the problematic pattern
2016-02-14 11:26:46 -08:00
Christian Kemper
24b7514fe0 cleaner sharing of s3: and s3:// configuration 2016-02-14 09:45:58 -08:00
Christian Kemper
91dc14c9fc fix Hound warning 2016-02-14 09:27:00 -08:00
Christian Kemper
32c2cafa89 Simplify creation of the Config by moving it to a separate function. Simplify the parsing logic
by sharing the handling of s3: and s3://
2016-02-14 09:18:22 -08:00
Christian Kemper
74608531c7 strip off the trailing slash for the object prefix 2016-02-14 07:16:50 -08:00
Christian Kemper
48f85fbb09 replaced if-else chain with switch 2016-02-14 07:01:14 -08:00
Christian Kemper
535dfaf097 address first round of review comments 2016-02-14 06:40:15 -08:00
Christian Kemper
8f5ff379b7 Introduced a configurable object path prefix for s3 repositories.
Prepends the object path prefix to all s3 paths and allows to have multiple independent
restic backup repositories in a single s3 bucket.

Removed the hardcoded "restic" prefix from s3 paths.

Use "restic" as the default object path prefix for s3 if no other prefix gets specified.
This will retain backward compatibility with existing s3 repository configurations.

Simplified the parse flow to have a single point where we parse the bucket name and the prefix within the bucket.

Added tests for s3 object path prefix and the new default prefix to config_test and location_test.
2016-02-14 06:05:38 -08:00
Alexander Neumann
b7260cafac CONTRIBUTING: Add paragraph about PRs and tests 2016-02-14 11:49:10 +01:00
Alexander Neumann
3d27751b69 Merge pull request #441 from restic/add-note-gccgo
README: Add note about gccgo
2016-02-13 20:21:58 +01:00
Alexander Neumann
4053fe0a9b Merge pull request #440 from restic/fix-431
sftp: Use os.IsNotExist() for Test()
2016-02-13 19:45:14 +01:00
Alexander Neumann
e75856a7d2 README: Add note about gccgo
Closes #432
2016-02-13 19:20:25 +01:00
Alexander Neumann
50380f8c14 Merge pull request #439 from restic/fix-402
Add cleanup handler to restore terminal state
2016-02-13 19:14:59 +01:00
Alexander Neumann
4032af0b78 sftp: Use os.IsNotExist() for Test()
The sftp library introduced a change so that the error returned for
(among others) Lstat() can be used with os.IsNotExist() to test whether
the target file does not exist.
2016-02-13 19:11:41 +01:00
Alexander Neumann
2bb55f017d Update pkg/sftp library 2016-02-13 19:11:35 +01:00
Alexander Neumann
1287b307ac Add cleanup handler to restore terminal state
Closes #402
2016-02-13 18:29:26 +01:00
Alexander Neumann
24618305cc Merge pull request #429 from restic/fix-428
backup: Hide status output for narrow terminals
2016-02-10 18:02:18 +01:00
Alexander Neumann
4ec5050cbb backup: Hide status output for narrow terminals
closes #428
2016-02-10 17:28:48 +01:00
Alexander Neumann
eccbcb73a1 Merge pull request #425 from restic/fix-loadall
backend.LoadAll: return nil on expected error
2016-02-08 00:37:04 +01:00
Alexander Neumann
e781e1cf1d Merge pull request #424 from restic/fix-backup
Fix backup of root directory
2016-02-08 00:06:19 +01:00
Alexander Neumann
e9a21c1dc6 backend.LoadAll: return nil on expected error
The current code returns io.ErrUnexpectedEOF, but it is the normal,
expected behaviour of the function LoadAll() to load until the item is
completely loaded. Therefore, the io.ErrUnexpectedEOF is not returned to
the caller.
2016-02-07 23:48:54 +01:00
Alexander Neumann
a37ed45534 Add test for LoadAll with too large buffer
LoadAll() should not pass on io.ErrUnexpectedEOF, since the occurrence
of this error is normal.
2016-02-07 23:48:03 +01:00
Alexander Neumann
26484d0c7b pipe: Ignore excluded directories
When top-level directories are ignored, they were still added to the
top-level Dir{} object. This commit ignores them.
2016-02-07 23:23:16 +01:00
Alexander Neumann
68db75b4e3 pipe/archiver: Add more debug messages 2016-02-07 23:22:06 +01:00
Alexander Neumann
9048eb676b pipe: join replaced paths with original path
When saving `/`, it was replaced with the contents, but without the
proper path. So `/` was replaced by [`boot`, `bin`, `home`, ...], but
without prefixing the entry name with the proper path.
2016-02-07 22:18:37 +01:00
Alexander Neumann
6a5b022939 archiver: Add error reporting for directories
When an error occurred while walking a directory, this error wasn't
reported to the user before.
2016-02-07 22:18:00 +01:00
Alexander Neumann
57a24b2cdf Merge pull request #421 from restic/fix-fuse-with-special-names
fuse: Replace special node names with their content
2016-02-07 20:43:29 +01:00
Alexander Neumann
da47389483 Merge pull request #420 from restic/archiver-unique-paths
archiver: deduplicate list of paths to save
2016-02-07 20:31:18 +01:00
Alexander Neumann
4c329110c5 Merge pull request #419 from restic/fix-backup-root
Fix backup of "/"
2016-02-07 20:31:08 +01:00
Alexander Neumann
46fbae0d71 Merge pull request #418 from benmur/show-pack-name-on-rebuild-index-error
Handle pack loading errors in rebuild-index
2016-02-07 19:59:01 +01:00
Alexander Neumann
1835e988cf fuse: Replace special node names with their content
This is the companion fix for #419 and allows mounting repositories with
special directory names directly below the snapshot.

Closes #403
2016-02-07 19:51:29 +01:00
Alexander Neumann
537347d9b5 archiver: deduplicate list of paths to save 2016-02-07 19:35:35 +01:00
Alexander Neumann
811dbfa52d Make TestWalkerPath absolute before walking 2016-02-07 19:34:02 +01:00
Rached Ben Mustapha
ba35ca522a Handle pack loading errors in rebuild-index
Errors returned from backend.LoadAll() were not handled, leading to
these fatal errors from the unpacker trying to read the size from the end of
an empty buffer:

`seeking to read header length failed: bytes.Reader.Seek: negative position`

This change takes care of returning on error, as well as showing which pack
failed to load and validating pack integrity.
2016-02-07 18:30:47 +00:00
Alexander Neumann
c6a1f2e2f3 pipe: Handle special paths gracefully
This fixes handling special paths "." and "/". When such a path name is
found, it is replaced by the contents of the path before walking.
2016-02-07 19:30:00 +01:00
Alexander Neumann
604c27f001 Test pipe walker for invalid paths
It was discovered that when restic is instructed to save `/`, the tree
structures in the repository contain an invalid node.

When saving the dir `/home/user`, the following structure is created:

    snapshot
         -> tree
             nodes: ["user"]
             [...]

When the root directory `/` is saved, the structure is as follows:

    snapshot
         -> tree
             nodes: ["/"]
             [...]

This behavior is caused by the walker in pipe.go sending a node with the
name "." to the archiver, so this commit adds a test for invalid node
names.
2016-02-07 19:29:44 +01:00
Alexander Neumann
0535490618 Merge pull request #413 from restic/reduce-travis-matrix
Reduce jobs run on Travis
2016-02-06 14:10:54 +01:00
Alexander Neumann
9c0fc4930b Merge pull request #412 from restic/activate-hound
Enable HoundCI checking for Go
2016-02-06 13:37:13 +01:00
Alexander Neumann
a45f2cb205 Reduce jobs run on Travis
Skip building and testing restic with Go 1.3, 1.4 and 1.6 on osx.
2016-02-06 13:04:49 +01:00
Alexander Neumann
3fd5b5975a Merge pull request #411 from restic/fix-appveyor-sourceforge
Always use NetCologne SourceForge mirror
2016-02-06 13:01:55 +01:00
Alexander Neumann
9175f0b6af Enable HoundCI checking for Go 2016-02-05 21:15:46 +01:00
Alexander Neumann
1160d03279 Always use NetCologne SourceForge mirror
The one automatically selected by the SourceForge CDN fails currently:

    appveyor DownloadFile http://downloads.sourceforge.net/project/gnuwin32/tar/1.13-1/tar-1.13-1-bin.zip -FileName tar.zip
    Error downloading file: Unable to connect to the remote server Command exited with code 2
2016-02-05 21:13:14 +01:00
Alexander Neumann
789e0df49e Merge pull request #408 from benmur/validate-pack-checksums
checker: Validate pack checksums before unpacking
2016-02-05 19:45:21 +01:00
Rached Ben Mustapha
83bbf21f1a checker: Validate pack checksums before unpacking
This avoids reading a possibly invalid size at the end of a corrupted pack
2016-02-04 22:55:39 +01:00
Alexander Neumann
7a8054d678 Add link to record of talk at C4 Cologne 2016-02-04 19:56:41 +01:00
Alexander Neumann
c0bbb7254d Merge pull request #406 from restic/fix-405
Test and fix for #405
2016-02-02 20:53:52 +01:00
Alexander Neumann
4f1f03cdb9 Move testing for known blobs to Archiver
This removes the list of in-flight blobs from the master index and
instead keeps a list of "known" blobs in the Archiver. "known" here
means: either already processed, or included in an index. This property
is tested atomically, when the blob is not in the list of "known" blobs,
it is added to the list and the caller is responsible to make this
happen (i.e. save the blob).
2016-02-01 23:50:56 +01:00
Alexander Neumann
382c766983 Move test for #405: Test Archiver instead of Repo 2016-02-01 23:50:41 +01:00
Alexander Neumann
f5f6e9cf37 Add test to reproduce #405 2016-02-01 23:35:01 +01:00
Alexander Neumann
cf88b33383 Merge pull request #398 from restic/update-minio-go
Update minio-library
2016-01-29 13:14:37 +01:00
Alexander Neumann
57615edd3a Merge pull request #400 from restic/update-go-appveyor
Update Go version for CI tests
2016-01-29 13:13:42 +01:00
Alexander Neumann
49cb88d158 Add note about lightning talk at FOSDEM 2016-01-28 22:40:52 +01:00
Alexander Neumann
1464d84cf5 Update minio-go
This should work with Go 1.3/1.4 again
2016-01-28 22:38:29 +01:00
Alexander Neumann
74ce027924 Merge pull request #397 from restic/remove-readcloser
Remove backend.ReadCloser, this is not used any more
2016-01-28 22:31:30 +01:00
Alexander Neumann
39f698886a Update Travis Go version 2016-01-28 22:28:17 +01:00
Alexander Neumann
9f8f2bc874 Update Go version for appveyor 2016-01-28 00:00:28 +01:00
Alexander Neumann
f8daadc5ef Update minio-library
This addresses #388
2016-01-27 23:23:47 +01:00
Alexander Neumann
f2371db2a9 repository: remove decryptReadCloser 2016-01-27 22:47:09 +01:00
Alexander Neumann
7d1775e000 Remove backend.ReadCloser 2016-01-27 22:35:18 +01:00
Alexander Neumann
ce4a7f16ca Merge pull request #395 from restic/rework-backend-interface
WIP: Rework backend interface
2016-01-27 22:11:20 +01:00
Alexander Neumann
322eca86bc mem backend: remove unused code 2016-01-27 21:33:48 +01:00
Alexander Neumann
3d06e6083a CI: download minio for the correct os and arch 2016-01-26 23:52:39 +01:00
Alexander Neumann
b64006221c CI: Download minio server, do not compile latest master 2016-01-26 23:50:27 +01:00
Alexander Neumann
1fde872016 CI: only build minio on Go 1.5.1 and above 2016-01-26 22:36:06 +01:00
Alexander Neumann
2701eabe39 Remove ContinuousReader 2016-01-26 22:35:51 +01:00
Alexander Neumann
c388101217 s3: Unexport structure 2016-01-26 22:19:44 +01:00
Alexander Neumann
1528d1ca83 sftp: Reduce duplicate code, add error check 2016-01-26 22:16:24 +01:00
Alexander Neumann
0bbad683c5 local: split out tempfile write function 2016-01-26 22:12:53 +01:00
Alexander Neumann
9ec435d863 local: remove duplicate code 2016-01-26 22:09:29 +01:00
Alexander Neumann
9b1c4b2dd6 local: Remove mutex and hash of open files 2016-01-26 22:08:20 +01:00
Alexander Neumann
7196971159 Remove unneeded HashingReader implementation 2016-01-26 22:00:11 +01:00
Alexander Neumann
eb1669a061 Add a lot of comments 2016-01-26 21:56:13 +01:00
Alexander Neumann
c34aa72538 Remove duplicate function str2id 2016-01-26 21:52:02 +01:00
Alexander Neumann
da883d6196 Cleanups, move Hash() to id.go 2016-01-26 21:49:33 +01:00
Alexander Neumann
b482df04ec Add more documentation 2016-01-26 21:49:22 +01:00
Alexander Neumann
5fcb5ae549 Reduce number of tests for Load() 2016-01-24 21:40:54 +01:00
Alexander Neumann
a0d484113a backends: Do not sort strings
Closes #305
2016-01-24 21:32:45 +01:00
Alexander Neumann
d9c87559b5 s3/local backend: Fix error for overwriting files 2016-01-24 21:13:24 +01:00
Alexander Neumann
1547d3b656 Remove Create() everywhere 2016-01-24 20:23:50 +01:00
Alexander Neumann
ea29ad6f96 Remove last ocurrence of Create() 2016-01-24 19:30:14 +01:00
Alexander Neumann
1a95e48389 Remove unneeded special readers 2016-01-24 18:58:15 +01:00
Alexander Neumann
ac2fe4e04f Remove BlobWriter 2016-01-24 18:53:39 +01:00
Alexander Neumann
cfdd3a853d Remove usage of CreateEncryptedBlob() 2016-01-24 18:52:11 +01:00
Alexander Neumann
01e40e62bf repo: Use Save() instead of Create() 2016-01-24 18:50:41 +01:00
Alexander Neumann
35f9eae6c3 local backend: do not call Sync() on directory
This fails at least on Windows.
2016-01-24 18:01:00 +01:00
Alexander Neumann
fe565e17c3 Key: Use Save() instead of Create() 2016-01-24 17:52:44 +01:00
Alexander Neumann
4735a7f9b5 Improve random reader for tests 2016-01-24 17:47:45 +01:00
Alexander Neumann
54f8860612 backends: Add Save() 2016-01-24 16:59:38 +01:00
Alexander Neumann
ed172c06e0 backends: Add Save() function 2016-01-24 01:15:35 +01:00
Alexander Neumann
adbe9e2e1c backend: Remove GetReader 2016-01-24 01:00:27 +01:00
Alexander Neumann
2c3a6a6fa9 cmd_rebuild_index: Remove calls to GetReader() 2016-01-24 00:42:04 +01:00
Alexander Neumann
61551b0591 cmd_cat: Remove calls to GetReader() 2016-01-24 00:42:04 +01:00
Alexander Neumann
280d580ae2 checker: Use Load() instead of GetReader() 2016-01-24 00:42:04 +01:00
Alexander Neumann
782a1bf7b0 repository: remove GetDecryptReader() 2016-01-24 00:12:17 +01:00
Alexander Neumann
3191778d33 repository: Use Load() instead of GetReader() 2016-01-24 00:12:09 +01:00
Alexander Neumann
9bfa633187 repository/key: Use Load() instead of GetReader() 2016-01-23 23:48:19 +01:00
Alexander Neumann
9209dcfa26 Add LoadAll() 2016-01-23 23:41:55 +01:00
Alexander Neumann
919b40c6cf Add Stat() method to backend interface 2016-01-23 23:27:58 +01:00
Alexander Neumann
10b03eee27 Add comment 2016-01-23 23:27:40 +01:00
Alexander Neumann
0b50f9e02c Move MemoryBackend to backend/mem 2016-01-23 19:50:11 +01:00
Alexander Neumann
f05a32509e Add "Test" prefix to backend test functions 2016-01-23 19:12:02 +01:00
Alexander Neumann
e4f2e4a203 Remove old s3 tests 2016-01-23 19:11:47 +01:00
Alexander Neumann
6ba56befad Abort fuse integration test on error
Before, the fuse integration test was run and the tests were never
finished, because the testing code did not detect any errors when the
fusermount binary returned an error. This commit fixes it.
2016-01-23 19:10:43 +01:00
Alexander Neumann
15c8b85a4b Add tests for s3 backend 2016-01-23 18:46:04 +01:00
Alexander Neumann
c6db567e3f Add sftp tests 2016-01-23 18:30:02 +01:00
Alexander Neumann
4952f86682 Add test for to prevent double create 2016-01-23 18:07:15 +01:00
Alexander Neumann
16b7cc7655 Remove redundant local tests 2016-01-23 17:45:33 +01:00
Alexander Neumann
99fab793c0 Remove timestamp from generated tests 2016-01-23 17:43:49 +01:00
Alexander Neumann
9423767827 Update test generate script, add tests to membackend 2016-01-23 17:42:26 +01:00
Alexander Neumann
e966df3fed Add Load() to MemBackend 2016-01-23 17:19:55 +01:00
Alexander Neumann
3aafa21887 Fix MockBackend.Load() 2016-01-23 17:19:47 +01:00
Alexander Neumann
9a490f9e01 Implement package-local tests 2016-01-23 17:08:03 +01:00
Alexander Neumann
0a24261afb Add Load() for all existing backends 2016-01-23 14:12:12 +01:00
Alexander Neumann
8b7bf8691d backend: Remove Get()
This is the first commit that removes the (redundant) Get() method of
the backend interface. Get(x, y) is equivalent to GetReader(x, y, 0, 0).
2016-01-23 13:13:05 +01:00
Alexander Neumann
d3a6e2a991 Drop requirement from List()
Closes #305
2016-01-23 12:47:16 +01:00
Alexander Neumann
171cd0dfe1 Add backend.Handle, add comments 2016-01-23 12:46:20 +01:00
Alexander Neumann
4d7e802c44 Merge pull request #392 from restic/fix-build
Allow saving duplicate blobs in the repacker
2016-01-17 22:02:09 +01:00
Alexander Neumann
109a120b39 Fix RandomReader 2016-01-17 21:27:51 +01:00
Alexander Neumann
f53008d916 Allow saving duplicate blobs in the repacker
This adds code to the master index to allow saving duplicate blobs
within the repacker. In this mode, only the list of currently in flight
blobs is consulted, and not the index. This correct because while
repacking, a unique list of blobs is saved again to the index.
2016-01-17 21:14:55 +01:00
Alexander Neumann
34c1056efc Merge pull request #358 from episource/iss358_pack_not_referened_add_test
Closes #365
Closes #358
2016-01-17 20:07:56 +01:00
Alexander Neumann
00e7a76ecc Merge branch 'iss358_pack_not_referenced_fix' of https://github.com/episource/restic into episource-358 2016-01-17 20:07:31 +01:00
Alexander Neumann
e689d499e7 Improve RandomReader 2016-01-17 19:46:48 +01:00
Alexander Neumann
5df9bdec9a Merge pull request #366 from restic/howeyc-s3-minio
rebase: Switch s3 library to allow for s3 compatible backends
2016-01-17 19:21:13 +01:00
Alexander Neumann
c722851f92 Update Dockerfile 2016-01-17 18:50:50 +01:00
Alexander Neumann
877f3f61a0 Add flag to disable cross-compilation 2016-01-17 18:49:43 +01:00
Alexander Neumann
1dd4c52a8b Add comments, configure flag library 2016-01-17 18:48:05 +01:00
Alexander Neumann
c6e1696f07 Fix debug message 2016-01-17 18:48:05 +01:00
Alexander Neumann
1483e15e4e Update s3 library (again) 2016-01-17 18:48:05 +01:00
Alexander Neumann
6a56d5b87b Repo: Add more debug 2016-01-17 18:48:05 +01:00
Alexander Neumann
289aee9448 Adapt s3 backend to new library 2016-01-17 18:48:05 +01:00
Alexander Neumann
0e9236475b Update s3 library (again) 2016-01-17 18:48:05 +01:00
Alexander Neumann
181480b68b Update s3 library 2016-01-17 18:48:05 +01:00
Alexander Neumann
61e66e936f Fix imports 2016-01-17 18:48:05 +01:00
Alexander Neumann
314182e7e0 Add debug, do not create bucket if it already exists 2016-01-17 18:48:05 +01:00
Alexander Neumann
69e6e9e5c7 Update s3 library (again) 2016-01-17 18:48:05 +01:00
Alexander Neumann
fc347ba60f Add new test with multiple writes for backends 2016-01-17 18:48:05 +01:00
Alexander Neumann
26eb859663 Dockerfile: Add sftp server binary 2016-01-17 18:48:05 +01:00
Alexander Neumann
338ad42273 location: fix tests 2016-01-17 18:48:05 +01:00
Alexander Neumann
5722ccfcda Fix s3 backend, add more tests 2016-01-17 18:48:05 +01:00
Alexander Neumann
0237b0d972 Update s3 library again 2016-01-17 18:48:05 +01:00
Alexander Neumann
a850041cf0 ContReader: Remove debug output 2016-01-17 18:48:05 +01:00
Alexander Neumann
5071f28d55 ReadCloser: Call close if reader implements it 2016-01-17 18:48:05 +01:00
Alexander Neumann
e0361b1f9f Add ContinuousReader 2016-01-17 18:48:05 +01:00
Alexander Neumann
f319354174 Update s3 library again 2016-01-17 18:48:05 +01:00
Alexander Neumann
a73c4bd5a7 update s3 library for bugfix 2016-01-17 18:48:05 +01:00
Alexander Neumann
d79c85af62 Fix s3 tests 2016-01-17 18:48:05 +01:00
Alexander Neumann
407819e5a9 s3: properly integrate minio-go lib 2016-01-17 18:48:05 +01:00
Alexander Neumann
2c15597e24 walker: print errors 2016-01-17 18:48:05 +01:00
Alexander Neumann
a17b6bbb64 Update minio-go library 2016-01-17 18:48:05 +01:00
Alexander Neumann
1922a4272c s3: fix usage
Ignore error response for existing bucket, add more debug code.
2016-01-17 18:48:05 +01:00
Alexander Neumann
2b10791df2 location: Fix test 2016-01-17 18:48:05 +01:00
Alexander Neumann
1ad5c3813c correct CI s3 test server url 2016-01-17 18:48:05 +01:00
Alexander Neumann
7d5f8214cf use new backend open with config 2016-01-17 18:48:05 +01:00
Alexander Neumann
2b0b44c5ce s3: implement open with config 2016-01-17 18:48:05 +01:00
Alexander Neumann
f7c9091970 sftp: implement open with config 2016-01-17 18:48:05 +01:00
Alexander Neumann
7b1e8fdd06 local: correct comment 2016-01-17 18:48:05 +01:00
Alexander Neumann
d257dedf42 rename LocationParse -> Parse 2016-01-17 18:48:05 +01:00
Alexander Neumann
3d2a714b5a Update minio-go library 2016-01-17 18:48:05 +01:00
Alexander Neumann
de933a1d48 Rename URI -> Config/Location 2016-01-17 18:48:05 +01:00
Alexander Neumann
566a15285a Add repository location parsing code 2016-01-17 18:48:05 +01:00
Alexander Neumann
43cf95e3c6 Correctly stop the minio server after the tests 2016-01-17 18:48:05 +01:00
Alexander Neumann
0b12ceabe9 Dockerfile: Install go in home dir
This allows cross-compilation with gox with Go < 1.5
2016-01-17 18:48:05 +01:00
Alexander Neumann
e96f28c536 Output stderr when minio server failed 2016-01-17 18:48:05 +01:00
Alexander Neumann
d5e36bd2f0 Only run minio server for Go >= 1.5.1 2016-01-17 18:48:05 +01:00
Alexander Neumann
34e8f63f77 Increase debug output for minio server 2016-01-17 18:47:24 +01:00
Alexander Neumann
3e422c8776 Add debug output, listen on localhost 2016-01-17 18:47:24 +01:00
Alexander Neumann
edfb31f4fe s3: Run integration test with minio server 2016-01-17 18:47:24 +01:00
Alexander Neumann
8562a1bb2f Dockerfile: Also install minio 2016-01-17 18:46:08 +01:00
Alexander Neumann
fa7192fdfb CI: save cross-compiled binaries in /tmp 2016-01-17 18:46:08 +01:00
Alexander Neumann
c22c0f2706 Add Dockerfile that resembles the Travis environment 2016-01-17 18:46:08 +01:00
Alexander Neumann
5736742c3e s3: Open() creates bucket if it does not exist 2016-01-17 18:46:08 +01:00
Alexander Neumann
248f991ad4 s3: don't remove the bucket on Delete() 2016-01-17 18:46:08 +01:00
Alexander Neumann
55f10eb1c1 Fix s3 test with local minio server instance 2016-01-17 18:46:08 +01:00
Alexander Neumann
d0ca118387 Fix usage of the done chan 2016-01-17 18:46:08 +01:00
Chris Howey
69a9adc4c3 Use local instance of minio server.
Need to figure out how to have tests automatically start and kill
server.
2016-01-17 18:46:08 +01:00
Chris Howey
e2445f4c97 GetPartialObject does not work. 2016-01-17 18:46:08 +01:00
Chris Howey
ed2a4ba1d5 Fix s3 backend test 2016-01-17 18:46:08 +01:00
Chris Howey
6d1552af51 Switch s3 library to allow for s3 compatible backends. Fixes #315 2016-01-17 18:46:08 +01:00
Alexander Neumann
c969de7fad Merge pull request #390 from restic/fix-travis
Fix travis
2016-01-16 14:39:51 +01:00
Alexander Neumann
b8c300e61e Remove run_tests.go from Makefile 2016-01-16 14:37:23 +01:00
Alexander Neumann
2499bbb09d Also specify new -X syntax for go1.6 2016-01-16 14:08:13 +01:00
Alexander Neumann
7c70d5c1bd Build toolchain for gox only on older Versions of Go 2016-01-16 13:40:16 +01:00
Alexander Neumann
f90381910b Remove Go tip, add 1.6beta2 2016-01-16 13:39:12 +01:00
Alexander Neumann
172c31ff45 Use gotestcover instead of homebrew run_tests.go 2016-01-16 13:32:23 +01:00
Alexander Neumann
bbfd1dd0c0 Fix ignore tip build failure 2016-01-16 13:23:45 +01:00
Alexander Neumann
8d71e5d698 Travis CI: Update Go version, add tip 2016-01-16 13:00:28 +01:00
Alexander Neumann
0f69169262 OpenChaos lecture 2016-01-13 20:16:47 +01:00
Alexander Neumann
72bcebbfb1 Remove (broken) sourcegraph and waffle badges 2016-01-07 21:09:32 +01:00
Philipp Serr
0fde09a866 Lock MasterIndex and InFlight store together
fixes: #358
2015-12-28 18:40:43 +01:00
Philipp Serr
e7bf936d2b Increase number of chunks and test repetitions 2015-12-28 18:33:28 +01:00
Philipp Serr
3d7f72311a Provoke unreferenced packs using fewer goroutines
TestParallelSaveWithDuplication has been reworked to provoke
unreferenced packs using fewer goroutines than before and create
only one bytes.Reader per blob. This reduces memory usage
significantly.

The following actions have been taken to keep the chance of provoking
unreferenced packs due to #358 high:
 * Interweaved processing of subsequent chunks
 * Delaying each goroutine by a few pseudo-randomly chosen nanoseconds
   (depending on the platform this will most probably only make the os
   yield execution to another thread): together with the interweaved
   processing of subsequent chunks, this ensures a minimalistic delay
   between processing of (some) duplicated chunks
 * Repeating the test 5 times with different seeds

On my test machine, the modified test provoked unreferenced packs 60
times in a row.
2015-12-28 18:33:26 +01:00
Philipp Serr
6a548336ec Add a test concurrently saving duplicated chunks
This commit adds an integration test, that calls Archiver.Save from
many goroutines processing several duplicated chunks concurrently.
The test asserts, that after all chunks have been saved, there are no
unreferenced packs in the repository.

The test has been checked to give the expected results:
 1) Running the test with maxParallel=1 (all chunks are processed
    sequentially) has been verified not to produce any unreferenced
    packs. Consequently the test passes.
 2) Running the test with unbounded parallelism (maxParallel=
    math.MaxInt32) has been verified to produce unreferenced packs
    all the time (at least 25 test runs). Consequently the test fails
    due to #358.

references: #358
2015-12-28 18:33:22 +01:00
Alexander Neumann
d3e7766f89 Merge pull request #380 from restic/PKGBUILD-update
Update PKGBUILD to reflect restic official version numbering
2015-12-27 22:07:28 +01:00
Florian Daniel
360193320f Update PKGBUILD to reflect restic official version numbering 2015-12-27 22:05:05 +01:00
Alexander Neumann
1f1b8e16a7 Add Code Quality Badge
Closes #379
2015-12-27 20:35:27 +01:00
Alexander Neumann
3abff7928c Merge pull request #375 from restic/fix-suid
Backup and restore setuid/setgid/sticky bits
2015-12-20 20:45:18 +01:00
Alexander Neumann
2976df2dc6 Call brew update before installing 2015-12-20 20:01:13 +01:00
Alexander Neumann
f49cb62812 Backup and restore setuid/setgid/sticky bits
A user discovered that restic does not restore setuid/setgid/sticky file
attributes. This commit fixes that. The mode is stored in the Go format
as an uint32: https://golang.org/pkg/os/#FileMode
2015-12-20 19:45:36 +01:00
Alexander Neumann
55d9c5f80c Merge pull request #364 from restic/fix-356
Allow reading all data
2015-12-06 21:55:21 +01:00
Alexander Neumann
d1ca986f55 Fix tests 2015-12-06 17:38:51 +01:00
Alexander Neumann
3ac1d0e4d1 add progress 2015-12-06 17:29:31 +01:00
Alexander Neumann
0e66a66bce read packs concurrently 2015-12-06 17:09:06 +01:00
Alexander Neumann
43a23f91a6 checker: add function to read and verify all data 2015-12-02 22:40:36 +01:00
Alexander Neumann
8d229bfd21 Make ReadCloser public 2015-12-02 22:37:58 +01:00
Alexander Neumann
04cd318f6c Merge pull request #362 from restic/improvements
Cleanups
2015-11-30 22:09:24 +01:00
Alexander Neumann
141d400b4a test: improvements 2015-11-29 14:54:20 +01:00
Alexander Neumann
b841eb4c54 crypto: check key for validity 2015-11-29 14:54:20 +01:00
Alexander Neumann
4f6bc754b8 MemBackend: Add Delete() and more debug 2015-11-29 14:53:02 +01:00
Alexander Neumann
26697a0223 Fix MemoryBackend GetReader() method 2015-11-29 14:52:19 +01:00
Alexander Neumann
480054bc3a MemoryBackend: handle config correctly, add tests for that 2015-11-29 14:52:19 +01:00
Alexander Neumann
538e5878a1 add debug logging to MemoryBackend 2015-11-29 14:52:19 +01:00
Alexander Neumann
9cb4e14327 add MemBackend and MockBackend 2015-11-29 14:52:19 +01:00
Alexander Neumann
da71da23d9 Add MockBackend 2015-11-29 14:52:19 +01:00
Alexander Neumann
0d5731383f Remove HashAppendWriter 2015-11-29 14:29:59 +01:00
Alexander Neumann
4fd7676e92 HashingWriter: Add documentation 2015-11-29 14:29:59 +01:00
Alexander Neumann
8209bb309b split out decryptReader and packerManager 2015-11-29 14:29:59 +01:00
Alexander Neumann
d4b873ca76 Repo: add documentation 2015-11-29 14:29:57 +01:00
Alexander Neumann
5b601f00b1 Add error checking 2015-11-29 14:25:57 +01:00
Alexander Neumann
2c95772a6a Update README, instruct users to always open an issue 2015-11-22 19:23:48 +01:00
Alexander Neumann
867fc5bd4b Merge pull request #354 from restic/fix-index
Save new packs in index atomically
2015-11-20 23:20:27 +01:00
Alexander Neumann
567de35df4 Save new packs in index atomically
This commit fixes a situation reported by a user where two indexes
contained information about the same pack without overlap, e.g.:

Index 3e6a32 contained:

    {
      "id": "c02e3b",
      "blobs": [
        {
          "id": "8114b1",
          "type": "data",
          "offset": 0,
          "length": 530107
        }
      ]
    }

And index 62da5f contained:

    {
      "id": "c02e3b",
      "blobs": [
        {
          "id": "e344f8",
          "type": "data",
          "offset": 1975848,
          "length": 3426468
        },
        {
          "id": "939ed9",
          "type": "data",
          "offset": 530107,
          "length": 1445741
        }
      ]
    }

This commit adds all blobs in a pack in one atomic operation so that
intermediate such as these do not happen.
2015-11-20 22:56:56 +01:00
Alexander Neumann
34bf70faea Merge pull request #352 from restic/rework-readme
Rework README
2015-11-13 23:48:17 +01:00
Alexander Neumann
6b9c8ffd14 Merge pull request #350 from restic/allow-read-only-ops
Allow check/restore read-only, without locking
2015-11-13 23:48:05 +01:00
Alexander Neumann
96061d2a2f Fix debug log message 2015-11-13 23:47:53 +01:00
Alexander Neumann
88b167cc10 Add note about unreleased code in the master branch 2015-11-13 12:41:47 +01:00
Alexander Neumann
a79fba13e1 Shorten README, remove options 2015-11-13 12:41:36 +01:00
Alexander Neumann
a1440c819b Make NoLock a global option 2015-11-13 12:33:59 +01:00
Alexander Neumann
6edb7e02d0 Allow restoring and listing without locking 2015-11-10 22:13:53 +01:00
Alexander Neumann
c79dcbd7c4 Allow checking a read-only repo with --no-lock 2015-11-10 21:41:22 +01:00
Alexander Neumann
acba82c8f7 Merge pull request #252 from restic/repack-blobs
WIP: Repack blobs
2015-11-09 20:57:57 +01:00
Alexander Neumann
1f9aea9905 fix test on windows, reset read-only flag 2015-11-08 22:41:45 +01:00
Alexander Neumann
742d69bf4d Add another test for optimizing unused blobs 2015-11-08 22:38:17 +01:00
Alexander Neumann
5776b8f01c remove ConvertIndex 2015-11-08 22:27:13 +01:00
Alexander Neumann
6c54d3fa82 index: also mark old index as final on decode 2015-11-08 22:21:29 +01:00
Alexander Neumann
2e6eee991d Add test for optimize command with old indexes 2015-11-08 22:21:08 +01:00
Alexander Neumann
c59b12c939 Show a hint whech the checker finds an old index 2015-11-08 21:50:48 +01:00
Alexander Neumann
0222b1701e Remvoe automatic index conversion 2015-11-08 21:35:48 +01:00
Alexander Neumann
43e2c9837e check: removing orphaned packs is handled in 'optimize' 2015-11-08 21:24:51 +01:00
Alexander Neumann
c4fc7b52ae Add 'optimize' command that repacks blobs 2015-11-08 21:10:03 +01:00
Alexander Neumann
cd948b56ac cmd_check: Don't display unused blobs by default 2015-11-08 20:46:52 +01:00
Alexander Neumann
19a713970f Merge pull request #344 from restic/fix-338
pipe: propagate errors properly
2015-11-08 17:58:55 +01:00
Alexander Neumann
4484a3ea0d archiver: ignore dir nodes with errors 2015-11-07 11:42:28 +01:00
Alexander Neumann
ea41a1045f Add integration test for error on readdirnames 2015-11-06 23:19:56 +01:00
Alexander Neumann
005c13ff05 pipe: make test platform-independent 2015-11-06 22:38:34 +01:00
Alexander Neumann
1569176e48 pipe: propagate errors properly 2015-11-06 19:41:57 +01:00
Alexander Neumann
7c3d227527 Merge pull request #342 from restic/add-bug-reporting-instructions
Add instructions on reporting bugs
2015-11-06 00:41:22 +01:00
Alexander Neumann
0f92f6319f Add note on determinism 2015-11-05 23:10:35 +01:00
Alexander Neumann
7b9f2fa9ef Add instructions on reporting bugs 2015-11-05 23:09:01 +01:00
Alexander Neumann
8de8ca05f1 Merge pull request #341 from restic/read-password-from-stdin
Read password from stdin if terminal is not a tty
2015-11-04 22:45:54 +01:00
Alexander Neumann
18d7f7f835 Read password from stdin if terminal is not a tty 2015-11-04 22:20:58 +01:00
Alexander Neumann
73e085ae23 Merge pull request #335 from JaCoB1123/windowspathnames
Always use forward slashes in SFTP (Fixes #334)
2015-11-03 21:29:00 +01:00
Jan Bader
af960b9b40 Simplify Implementation of Join 2015-11-03 18:48:51 +01:00
Jan Bader
d09e6d5b0f Fix missing Join calls 2015-11-03 18:47:01 +01:00
Alexander Neumann
db41102bfa Finalize repacker 2015-11-02 19:28:30 +01:00
Alexander Neumann
1fc0d78913 Refactor Index.Store() to take a PackedBlob 2015-11-02 19:07:03 +01:00
Alexander Neumann
f3f84b1544 Add ID handling for index 2015-11-02 18:52:13 +01:00
Alexander Neumann
60a34087c9 Move LoadIndexWithDecoder to index.go 2015-11-02 18:52:13 +01:00
Alexander Neumann
266bc05edc Add mostly ready repacker 2015-11-02 18:52:13 +01:00
Alexander Neumann
51aff3ca57 Add FindBlobsForPacks() 2015-11-02 18:52:13 +01:00
Alexander Neumann
30cf002574 Sort IDSet.List() 2015-11-02 18:52:13 +01:00
Alexander Neumann
89a77ab2f9 Add Index.ListPack() 2015-11-02 18:52:13 +01:00
Alexander Neumann
484331cd8d Add repacker 2015-11-02 18:52:13 +01:00
Alexander Neumann
181963ba08 Fix IDSet.String() 2015-11-02 17:36:05 +01:00
Alexander Neumann
50c2f2e87f cmd_cat: allow dumping raw tree blobs 2015-11-02 17:36:04 +01:00
Alexander Neumann
ba8e6035b0 Merge pull request #336 from restic/refactor-index
Refactor Index.Lookup() to return struct PackedBlob
2015-11-02 17:35:09 +01:00
Jan Bader
81ec7337e0 Always use forward slashes in SFTP (Fixes #334)
Add custom Join func that always uses forward slashes in SFTP
2015-11-02 14:53:42 +01:00
Alexander Neumann
ed9470b19d Remove tempfiles after test 2015-10-31 14:53:03 +01:00
Alexander Neumann
fccde030d5 Refactor Index.Lookup() to return struct PackedBlob 2015-10-31 14:47:42 +01:00
Alexander Neumann
fc0f5d8f72 Merge pull request #331 from tyll/gpgfingerprint
Show full GPG fingerprint in README
2015-10-29 21:55:28 +01:00
Till Maas
34af39667b Show full GPG fingerprint in README 2015-10-29 21:51:21 +01:00
Alexander Neumann
6ffd7da4d7 Merge pull request #328 from restic/improve-backup-speed
Load trees from repository in parallel
2015-10-29 20:03:29 +01:00
Alexander Neumann
5958dc920b Fix walk tree test for windows 2015-10-28 22:02:37 +01:00
Alexander Neumann
5a45d95b80 Fix compatibility with Go < 1.5 2015-10-27 23:06:56 +01:00
Alexander Neumann
23aeca85ff load trees in parallel 2015-10-27 22:44:10 +01:00
Alexander Neumann
b5976474dd backup: add debug output for excluded files/dirs 2015-10-27 22:34:30 +01:00
Alexander Neumann
18478e2d3d walk_test: test correct number of items 2015-10-27 21:41:25 +01:00
Alexander Neumann
ca5c0bf78e Test WalkTree() for correct order 2015-10-27 20:51:55 +01:00
Alexander Neumann
4cc9d946de Add benchmark for WalkTree with high-latency repo 2015-10-27 20:51:55 +01:00
Alexander Neumann
50fd8f6f44 make repo for tree walker mockable 2015-10-27 20:51:55 +01:00
Alexander Neumann
7711fcda69 use new index format for repository tests 2015-10-27 20:51:55 +01:00
Alexander Neumann
7717ea5cca Add benchmark for LoadJSONPack 2015-10-27 20:51:55 +01:00
Alexander Neumann
ae46674cd3 debug: log timing 2015-10-27 20:51:55 +01:00
Alexander Neumann
00e05ae3c9 bugfix: close pack files after reading the header 2015-10-27 20:39:52 +01:00
Alexander Neumann
4bc81c2bd2 Merge pull request #325 from restic/fix-rebuild-index
rebuild-index: Remember already stored blobs
2015-10-25 22:57:12 +01:00
Alexander Neumann
74cd134b54 rebuild index: remember already stored blobs 2015-10-25 22:34:22 +01:00
Alexander Neumann
734ae7fcb8 Add test for corner case
It was observed that a restic repository still contained overlapping
indexes after `rebuild-index` has been called. This is caused by
instantly forgetting that blobs have already been saved once a full
index has been written during index rebuilding.

This commit adds a (failing) test that shows the behaviour.
2015-10-25 21:51:57 +01:00
Alexander Neumann
7b8e42a763 Silence rebuild-index tests 2015-10-25 21:51:46 +01:00
Alexander Neumann
566fb22bcf Merge pull request #324 from restic/fix-index-grow
Fix really large index and memory exhaustion
2015-10-25 18:58:31 +01:00
Alexander Neumann
b88ccb4f1b Fix Unpacker test 2015-10-25 18:21:48 +01:00
Alexander Neumann
efbce9f0fa rebuild-index: handle not yet indexed packs 2015-10-25 18:07:51 +01:00
Alexander Neumann
88849c06a6 rebuild-index: Refactor a bit 2015-10-25 17:53:02 +01:00
Alexander Neumann
5d617edbbf local/sftp backend: Do not seek if offset is 0 2015-10-25 17:51:26 +01:00
Alexander Neumann
6aed9f268b Add command rebuild-index 2015-10-25 17:24:52 +01:00
Alexander Neumann
9074c923ea index: add AddToSupersedes() 2015-10-25 17:06:56 +01:00
Alexander Neumann
1365495599 debug: remove extra space between filename and line 2015-10-25 17:06:20 +01:00
Alexander Neumann
461d54e43c Refactor repository.SaveIndex() 2015-10-25 17:05:54 +01:00
Alexander Neumann
96ecc26507 Let the checker return a list of hints along with errors 2015-10-25 16:26:50 +01:00
Alexander Neumann
91e1929b52 checker: test for packs in multiple indexes 2015-10-25 16:00:06 +01:00
Alexander Neumann
04614c7527 Add test for packs in duplicate indexes 2015-10-25 15:35:33 +01:00
Alexander Neumann
f7ff5b766c Mark written indexes as finalized 2015-10-25 15:35:18 +01:00
Alexander Neumann
d9f9b77d68 Add Index.Packs() and IDSet.Equals() 2015-10-25 15:28:01 +01:00
Alexander Neumann
4b1a2caea7 Allow overwriting the IndexFull function for tests 2015-10-25 15:05:22 +01:00
Alexander Neumann
af0d6f58b9 Remove unneeded pointer to pack id 2015-10-25 14:35:08 +01:00
Alexander Neumann
2710d6399a Cleanup index code
The selectFn wasn't used any more, so remove it from generatePackList().
2015-10-25 14:26:04 +01:00
Alexander Neumann
650eab6a0e Fix typo in dump usage 2015-10-25 13:19:35 +01:00
Alexander Neumann
5de36dfdf0 Merge pull request #310 from restic/resume-backups
resume interrupted backups
2015-10-14 21:53:25 +02:00
Alexander Neumann
1dd731fdb8 Handle concurrent access to the inFlight list 2015-10-14 20:50:54 +02:00
Alexander Neumann
6fa4be5af2 Regularly save intermediate indexes 2015-10-12 23:59:17 +02:00
Alexander Neumann
941b7025b6 Delete Index.Remove() 2015-10-12 22:51:11 +02:00
Alexander Neumann
4b2a4b03ec Remove Index.StoreInProgress() 2015-10-12 22:49:31 +02:00
Alexander Neumann
7ab9915859 Fix 'dump' command 2015-10-12 22:42:31 +02:00
Alexander Neumann
86fcd170f6 Add and use MasterIndex 2015-10-12 22:34:12 +02:00
Alexander Neumann
64fa89d406 Add error checks 2015-10-12 22:07:56 +02:00
Alexander Neumann
eb73182fcf Rework index decode and handling old format 2015-10-12 20:53:07 +02:00
Alexander Neumann
356bb62243 Add CreateEncryptedBlob and GetDecryptReader 2015-10-12 20:53:07 +02:00
Alexander Neumann
de2c20be84 Dump individual indexes 2015-10-12 20:53:07 +02:00
Alexander Neumann
96f2165067 Allow loading index with old format 2015-10-12 20:53:07 +02:00
Alexander Neumann
7944e8e323 Update index format 2015-10-12 20:53:07 +02:00
Alexander Neumann
5c39abfe53 Merge pull request #319 from restic/fix-311
Handle null subtree IDs
2015-10-11 21:48:52 +02:00
Alexander Neumann
1020e9c3af Check for data blobs with null ID, improve errors 2015-10-11 20:58:57 +02:00
Alexander Neumann
cc7acba02b Return the original backend ID on duplicate entries 2015-10-11 20:45:50 +02:00
Alexander Neumann
f188cf81dc Add more panic() calls for invalid conditions 2015-10-11 20:45:42 +02:00
Alexander Neumann
7db2369081 Shorten error message for tree errors 2015-10-11 20:22:52 +02:00
Alexander Neumann
db85ab8aa0 Use the correct channel for sending errors 2015-10-11 19:13:45 +02:00
Alexander Neumann
86c8328f62 Handle null subtree IDs 2015-10-11 19:13:35 +02:00
Alexander Neumann
72fcd00859 Check subtrees with null ID 2015-10-11 18:46:26 +02:00
Alexander Neumann
8a7873ee3a Handle invalid subtree IDs 2015-10-11 18:45:16 +02:00
Alexander Neumann
e738d35c4e Merge pull request #309 from restic/update-ci-go-version
Update Go 1.4 to version 1.4.3
2015-09-27 20:20:35 +02:00
Alexander Neumann
6ddda5fc5e appveyor: remove old Go installation 2015-09-27 18:34:11 +02:00
Alexander Neumann
7291342723 Install current version of Go
This is inspired by the appveyor.yaml from the go-plus project:
https://github.com/joefitzgerald/go-plus/blob/master/appveyor.yml
2015-09-27 18:02:44 +02:00
Alexander Neumann
70a6233b94 Install the 'cover' tool 2015-09-27 17:58:13 +02:00
Alexander Neumann
749ca28534 Update Go 1.4 to version 1.4.3 2015-09-27 17:22:12 +02:00
Alexander Neumann
321c2e6a47 Merge pull request #308 from episource/fix/restic_cr292_unreferenced_pack
fix:restic#292 Prevent concurrent processing of the same blob
2015-09-27 17:18:28 +02:00
Philipp Serr
7b11660f4f Prevent concurrent processing of same blob
... by first adding a preliminary index entry and making this fail if
an index entry for the same blob already exists.

A preliminary index entry is characterized by not yet being associated
with a pack. Until now, these entries where added to the index just
like final index entries using index.Store, which silently overwrites
existing index entries.

This commit adds a new method index.StoreInProgress which refuses to
overwrite existing index entries and allows for creating preliminary
index entries only. The existing method index.Store has not been
changed and continues to silently overwrite existing index entries.
This distinction is important, as otherwise, it would be impossible to
update a preliminary index entry after the blob has been written to a
pack.

Resolves: restic#292
2015-09-27 16:56:49 +02:00
Alexander Neumann
4fb46faae7 Use Go 1.5.1 for travis tests 2015-09-12 22:15:09 +02:00
Alexander Neumann
316f6ed313 Update chunker version 2015-09-12 22:14:58 +02:00
Alexander Neumann
108d28316a Merge pull request #294 from restic/rework-id
Refactor IDs and IDSet
2015-09-08 21:26:07 +02:00
Alexander Neumann
5c46dc41de Add methods to IDSet 2015-09-05 18:49:28 +02:00
Alexander Neumann
d42ff509ba Small refactorings
* use uint instead of uint32 in packs/indexes
 * use ID.Str() for debug messages
 * add ParallelIDWorkFunc
2015-09-05 18:41:58 +02:00
Alexander Neumann
2cb0fbf589 backend: Add String() to IDs 2015-09-05 18:41:58 +02:00
Alexander Neumann
a0bad1695c Remove comment 2015-09-05 18:41:58 +02:00
Alexander Neumann
681d7851aa index: use backend.ID instead of string for maps 2015-09-05 18:41:58 +02:00
Alexander Neumann
3063ad1d05 Split id.go into several files 2015-09-05 18:41:56 +02:00
Alexander Neumann
76b1f017c0 Merge pull request #290 from bchapuis/fix-289
Load the index and search subtree
2015-09-01 21:08:21 +02:00
Chapuis Bertil
c765688779 find command integration tests 2015-08-28 19:31:05 +02:00
Chapuis Bertil
d4686ebcc5 Load the index and search subtree 2015-08-27 23:21:44 +02:00
Alexander Neumann
f653aca0ed Merge pull request #287 from restic/fix-279
Remove tests for directories
2015-08-27 22:07:07 +02:00
Alexander Neumann
0a457eafed Correctly test for config file 2015-08-26 22:06:52 +02:00
Alexander Neumann
b211f834fa Remove tests for directories
For testing whether a repository already exists it is sufficient to
test if the config file (and therefore the master key) exists.

Closes #279
2015-08-26 21:51:40 +02:00
Alexander Neumann
9aefc2b7a6 Merge pull request #281 from restic/version-with-git
build.go: use new combined version string
2015-08-26 20:53:24 +02:00
Alexander Neumann
10f0d7ccac Merge pull request #280 from restic/ldflags-go1.5
build.go: Make `-ldflags` compatible to Go 1.5
2015-08-26 20:33:43 +02:00
Alexander Neumann
cb460b7dec Merge pull request #285 from howeyc/fix-aws-v4
Use new version of s3 library, Fixes #276
2015-08-26 20:20:32 +02:00
Alexander Neumann
39a82d951b Refactor getVersion(), address code review comments 2015-08-26 20:17:51 +02:00
Alexander Neumann
a54f9715b1 Add "build: " prefix to verbose messages 2015-08-26 20:03:26 +02:00
Alexander Neumann
4c47c2b2c9 Address code review comments 2015-08-26 20:03:16 +02:00
Chris Howey
ccb2f00b8a typo 2015-08-26 07:54:39 -05:00
Chris Howey
3bf447b422 Update tests for new s3 lib 2015-08-26 07:44:00 -05:00
Chris Howey
10cd672a92 Use new version of s3 library, Fixes #276 2015-08-26 06:25:05 -05:00
Alexander Neumann
f3c64d0740 build.go: use new combined version string
Previously, when a VERSION file exists it takes precendence over the
git version. This is unfortunate because all restic binaries compiled
from a git checkout will just identify as the latest release (e.g.
'0.1.0'), regardeless of any commits on top of it.

This commit adds a combined version string by using the contents of
the VERSION file, and append the current git version returned by `git
describe` if available, e.g.:

    0.1.0 (v0.1.0-6-gb188217-dirty).
2015-08-25 22:20:53 +02:00
Alexander Neumann
dca200c2e9 build.go: Make -ldflags compatible to Go 1.5
This change uses the old syntax (-ldflags "-X foo bar") for Go <= 1.4
and the new syntax for (-ldflags "-X foo=bar") for Go 1.5 (without a
warning).
2015-08-25 22:07:52 +02:00
Alexander Neumann
b188217e83 Merge pull request #274 from restic/fix-documentation
Documentation fixes
2015-08-22 23:07:31 +02:00
Alexander Neumann
3a50c2bbfb Fix docs 2015-08-22 23:03:25 +02:00
Alexander Neumann
e0e9cd8680 More documentation fixes 2015-08-22 15:09:53 +02:00
Alexander Neumann
b6872fb454 Clarify documentation about MAC key 2015-08-22 15:09:21 +02:00
Florian Daniel
3f3cca8f2a Merge pull request #273 from restic/fix-124
fix typo in Readme
2015-08-22 00:01:35 +02:00
Florian Daniel
647ee5b74a fix typo in Readme 2015-08-21 23:53:59 +02:00
785 changed files with 141572 additions and 11653 deletions

12
.github/ISSUE_TEMPLATE.md vendored Normal file
View File

@@ -0,0 +1,12 @@
## Output of `restic version`
## Expected behavior
## Actual behavior
## Steps to reproduce the behavior

9
.gitignore vendored
View File

@@ -1,8 +1,5 @@
/.gopath
/pkg
/bin
/restic
/restic.debug
/dirdiff
cmd/dirdiff/dirdiff
cmd/gentestdata/gentestdata
cmd/restic/restic
/.vagrant
/vendor/pkg

2
.hound.yml Normal file
View File

@@ -0,0 +1,2 @@
go:
enabled: true

View File

@@ -3,13 +3,23 @@ sudo: false
go:
- 1.3.3
- 1.4.2
- 1.5
- 1.4.3
- 1.5.4
- 1.6.2
os:
- linux
- osx
matrix:
exclude:
- os: osx
go: 1.3.3
- os: osx
go: 1.4.3
- os: osx
go: 1.5.4
notifications:
irc:
channels:
@@ -22,11 +32,11 @@ install:
- go version
- export GOBIN="$GOPATH/bin"
- export PATH="$PATH:$GOBIN"
- export GOPATH="$GOPATH:${TRAVIS_BUILD_DIR}/Godeps/_workspace"
- go env
- ulimit -n 2048
script:
- go run run_integration_tests.go
after_success:
- goveralls -coverprofile=all.cov -service=travis-ci -repotoken "$COVERALLS_TOKEN" || true
- GOPATH=$PWD:$PWD/vendor goveralls -coverprofile=all.cov -service=travis-ci -repotoken "$COVERALLS_TOKEN"

View File

@@ -25,42 +25,64 @@ those tagged
[minor complexity](https://github.com/restic/restic/labels/minor%20complexity).
Reporting Bugs
==============
You've found a bug? Thanks for letting us know so we can fix it! It is a good
idea to describe in detail how to reproduce the bug (when you know how), what
environment was used and so on. Please tell us at least the following things:
* What's the version of restic you used? Please include the output of
`restic version` in your bug report.
* What commands did you execute to get to where the bug occurred?
* What did you expect?
* What happened instead?
* Are you aware of a way to reproduce the bug?
Remember, the easier it is for us to reproduce the bug, the earlier it will be
corrected!
In addition, you can compile restic with debug support by running
`go run build.go -tags debug` and instructing it to create a debug log by
setting the environment variable `DEBUG_LOG` to a file, e.g. like this:
$ export DEBUG_LOG=/tmp/restic-debug.log
$ restic backup ~/work
Please be aware that the debug log file will contain potentially sensitive
things like file and directory names, so please either redact it before
uploading it somewhere or post only the parts that are really relevant.
Development Environment
=======================
For development, it is recommended to check out the restic repository within a
`GOPATH`, an introductory text is
["How to Write Go Code"](https://golang.org/doc/code.html). It is recommended
to have a working directory, we're using `~/work/restic` in the following. This
directory mainly contains the directory `src`, where the source code is stored.
For development you need the build tool [`gb`](https://getgb.io), it can be
installed by running the following command:
First, create the necessary directory structure and clone the restic repository
to the correct location:
$ go get github.com/constabulary/gb/...
The repository contains two directories with code: `src/` contains the code
written for restic, whereas `vendor/` contains copies of libraries restic
depends on. The libraries are managed with the `gb vendor` command.
Just clone the repository, `cd` to it and run `gb build` to build the binary:
$ mkdir --parents ~/work/restic/src/github.com/restic
$ cd ~/work/restic/src/github.com/restic
$ git clone https://github.com/restic/restic
$ cd restic
Now we're in the main directory of the restic repository. The last step is to
set the environment variable `$GOPATH` to the correct value:
$ export GOPATH=~/work/restic:~/work/restic/src/github.com/restic/restic/Godeps/_workspace
$ gb build
[...]
$ bin/restic version
restic compiled manually
compiled at unknown time with go1.6
The following commands can be used to run all the tests:
$ go test ./...
$ gb test
ok github.com/restic/restic 8.174s
[...]
The restic binary can be built from the directory `cmd/restic` this way:
$ cd cmd/restic
$ go build
$ ./restic version
restic compiled manually on go1.4.2
if you want to run your tests on Linux, OpenBSD or FreeBSD, you can use
If you want to run your tests on Linux, OpenBSD or FreeBSD, you can use
[vagrant](https://www.vagrantup.com/) with the proveded `Vagrantfile` to
quickly set up VMs and run the tests, e.g.:
@@ -78,23 +100,32 @@ get it into the project! The workflow we're using is also described on the
[GitHub Flow](https://guides.github.com/introduction/flow/) website, it boils
down to the following steps:
0. If you want to work on something, please add a comment to the issue on
GitHub. For a new feature, please add an issue before starting to work on
it, so that duplicate work is prevented.
1. First we would kindly ask you to fork our project on GitHub if you haven't
done so already.
2. Clone the repository locally and create a new branch. If you are working on
the code itself, please set up the development environment as described in
the previous section and instead of cloning add your fork on GitHub as a
remote to the clone of the restic repository.
the previous section.
3. Then commit your changes as fine grained as possible, as smaller patches,
that handle one and only one issue are easier to discuss and merge.
4. Push the new branch with your changes to your fork of the repository.
5. Create a pull request by visiting the GitHub website, it will guide you
through the process.
6. You will receive comments on your code and the feature or bug that they
address. Maybe you need to rework some minor things, in this case push new
commits to the branch you created for the pull request, they will be
automatically added to the pull request.
7. Once your code looks good, we'll merge it. Thanks a low for your
contribution!
7. Once your code looks good and passes all the tests, we'll merge it. Thanks
a low for your contribution!
Please provide the patches for each bug or feature in a separate branch and
open up a pull request for each.
@@ -109,6 +140,14 @@ in the project root directory before committing. Installing the script
pre-commit hook checks formatting before committing automatically, just copy
this script to `.git/hooks/pre-commit`.
For each pull request, several different systems run the integration tests on
Linux, OS X and Windows. We won't merge any code that does not pass all tests
for all systems, so when a tests fails, try to find out what's wrong and fix
it. If you need help on this, please leave a comment in the pull request, and
we'll be glad to assist. Having a PR with failing integration tests is nothing
to be ashamed of. In contrast, that happens regularly for all of us. That's
what the tests are there for.
Git Commits
-----------

58
Dockerfile Normal file
View File

@@ -0,0 +1,58 @@
# This Dockerfiles configures a container that is similar to the Travis CI
# environment and can be used to run tests locally.
#
# build the image:
# docker build -t restic/test .
#
# run all tests and cross-compile restic:
# docker run --rm -v $PWD:/home/travis/restic restic/test go run run_integration_tests.go -minio minio
#
# run interactively:
# docker run --interactive --tty --rm -v $PWD:/home/travis/restic restic/test /bin/bash
#
# run a subset of tests:
# docker run --rm -v $PWD:/home/travis/restic restic/test gb test -v ./backend
#
# build the image for an older version of Go:
# docker build --build-arg GOVERSION=1.3.3 -t restic/test:go1.3.3 .
FROM ubuntu:14.04
ARG GOVERSION=1.6
ARG GOARCH=amd64
# install dependencies
RUN apt-get update
RUN apt-get install -y --no-install-recommends ca-certificates wget git build-essential openssh-server
# add and configure user
ENV HOME /home/travis
RUN useradd -m -d $HOME -s /bin/bash travis
# run everything below as user travis
USER travis
WORKDIR $HOME
# download and install Go
RUN wget -q -O /tmp/go.tar.gz https://storage.googleapis.com/golang/go${GOVERSION}.linux-${GOARCH}.tar.gz
RUN tar xf /tmp/go.tar.gz && rm -f /tmp/go.tar.gz
ENV GOROOT $HOME/go
ENV GOPATH $HOME/gopath
ENV PATH $PATH:$GOROOT/bin:$GOPATH/bin:$HOME/bin
RUN mkdir -p $HOME/restic
# pre-install tools, this speeds up running the tests itself
RUN go get github.com/constabulary/gb/...
RUN go get golang.org/x/tools/cmd/cover
RUN go get github.com/mattn/goveralls
RUN go get github.com/mitchellh/gox
RUN go get github.com/pierrre/gotestcover
RUN mkdir $HOME/bin \
&& wget -q -O $HOME/bin/minio https://dl.minio.io/server/minio/release/linux-${GOARCH}/minio \
&& chmod +x $HOME/bin/minio
# set TRAVIS_BUILD_DIR for integration script
ENV TRAVIS_BUILD_DIR $HOME/restic
WORKDIR $HOME/restic

62
Godeps/Godeps.json generated
View File

@@ -1,62 +0,0 @@
{
"ImportPath": "github.com/restic/restic",
"GoVersion": "go1.4.2",
"Packages": [
"./..."
],
"Deps": [
{
"ImportPath": "bazil.org/fuse",
"Rev": "18419ee53958df28fcfc9490fe6123bd59e237bb"
},
{
"ImportPath": "github.com/jessevdk/go-flags",
"Comment": "v1-297-g1b89bf7",
"Rev": "1b89bf73cd2c3a911d7b2a279ab085c4a18cf539"
},
{
"ImportPath": "github.com/juju/errors",
"Rev": "4567a5e69fd3130ca0d89f69478e7ac025b67452"
},
{
"ImportPath": "github.com/kr/fs",
"Rev": "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
},
{
"ImportPath": "github.com/mitchellh/goamz/aws",
"Rev": "caaaea8b30ee15616494ee68abd5d8ebbbef05cf"
},
{
"ImportPath": "github.com/mitchellh/goamz/s3",
"Rev": "caaaea8b30ee15616494ee68abd5d8ebbbef05cf"
},
{
"ImportPath": "github.com/pkg/sftp",
"Rev": "518aed2757a65cfa64d4b1b2baf08410f8b7a6bc"
},
{
"ImportPath": "github.com/restic/chunker",
"Rev": "e795b80f4c927ebcf2687ce18bcf1a39fee740b1"
},
{
"ImportPath": "github.com/vaughan0/go-ini",
"Rev": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"
},
{
"ImportPath": "golang.org/x/crypto/pbkdf2",
"Rev": "cc04154d65fb9296747569b107cfd05380b1ea3e"
},
{
"ImportPath": "golang.org/x/crypto/poly1305",
"Rev": "cc04154d65fb9296747569b107cfd05380b1ea3e"
},
{
"ImportPath": "golang.org/x/crypto/scrypt",
"Rev": "cc04154d65fb9296747569b107cfd05380b1ea3e"
},
{
"ImportPath": "golang.org/x/crypto/ssh",
"Rev": "cc04154d65fb9296747569b107cfd05380b1ea3e"
}
]
}

5
Godeps/Readme generated
View File

@@ -1,5 +0,0 @@
This directory tree is generated automatically by godep.
Please do not edit.
See https://github.com/tools/godep for more information.

2
Godeps/_workspace/.gitignore generated vendored
View File

@@ -1,2 +0,0 @@
/pkg
/bin

View File

@@ -1,2 +0,0 @@
*.go filter=gofmt
*.cgo filter=gofmt

View File

@@ -1,11 +0,0 @@
*~
.#*
## the next line needs to start with a backslash to avoid looking like
## a comment
\#*#
.*.swp
*.test
/clockfs
/hellofs

View File

@@ -1,4 +0,0 @@
/*.seq.svg
# not ignoring *.seq.png; we want those committed to the repo
# for embedding on Github

View File

@@ -1 +0,0 @@
package fstestutil

View File

@@ -1,35 +0,0 @@
language: go
install:
# go-flags
- go get -d -v ./...
- go build -v ./...
# linting
- go get golang.org/x/tools/cmd/vet
- go get github.com/golang/lint
- go install github.com/golang/lint/golint
# code coverage
- go get golang.org/x/tools/cmd/cover
- go get github.com/onsi/ginkgo/ginkgo
- go get github.com/modocache/gover
- if [ "$TRAVIS_SECURE_ENV_VARS" = "true" ]; then go get github.com/mattn/goveralls; fi
script:
# go-flags
- $(exit $(gofmt -l . | wc -l))
- go test -v ./...
# linting
- go tool vet -all=true -v=true . || true
- $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/golint ./...
# code coverage
- $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/ginkgo -r -cover
- $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/gover
- if [ "$TRAVIS_SECURE_ENV_VARS" = "true" ]; then $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/goveralls -coverprofile=gover.coverprofile -service=travis-ci -repotoken $COVERALLS_TOKEN; fi
env:
# coveralls.io
secure: "RCYbiB4P0RjQRIoUx/vG/AjP3mmYCbzOmr86DCww1Z88yNcy3hYr3Cq8rpPtYU5v0g7wTpu4adaKIcqRE9xknYGbqj3YWZiCoBP1/n4Z+9sHW3Dsd9D/GRGeHUus0laJUGARjWoCTvoEtOgTdGQDoX7mH+pUUY0FBltNYUdOiiU="

View File

@@ -1,74 +0,0 @@
package aws
import (
"time"
)
// AttemptStrategy represents a strategy for waiting for an action
// to complete successfully. This is an internal type used by the
// implementation of other goamz packages.
type AttemptStrategy struct {
Total time.Duration // total duration of attempt.
Delay time.Duration // interval between each try in the burst.
Min int // minimum number of retries; overrides Total
}
type Attempt struct {
strategy AttemptStrategy
last time.Time
end time.Time
force bool
count int
}
// Start begins a new sequence of attempts for the given strategy.
func (s AttemptStrategy) Start() *Attempt {
now := time.Now()
return &Attempt{
strategy: s,
last: now,
end: now.Add(s.Total),
force: true,
}
}
// Next waits until it is time to perform the next attempt or returns
// false if it is time to stop trying.
func (a *Attempt) Next() bool {
now := time.Now()
sleep := a.nextSleep(now)
if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count {
return false
}
a.force = false
if sleep > 0 && a.count > 0 {
time.Sleep(sleep)
now = time.Now()
}
a.count++
a.last = now
return true
}
func (a *Attempt) nextSleep(now time.Time) time.Duration {
sleep := a.strategy.Delay - now.Sub(a.last)
if sleep < 0 {
return 0
}
return sleep
}
// HasNext returns whether another attempt will be made if the current
// one fails. If it returns true, the following call to Next is
// guaranteed to return true.
func (a *Attempt) HasNext() bool {
if a.force || a.strategy.Min > a.count {
return true
}
now := time.Now()
if now.Add(a.nextSleep(now)).Before(a.end) {
a.force = true
return true
}
return false
}

View File

@@ -1,57 +0,0 @@
package aws_test
import (
"github.com/mitchellh/goamz/aws"
. "github.com/motain/gocheck"
"time"
)
func (S) TestAttemptTiming(c *C) {
testAttempt := aws.AttemptStrategy{
Total: 0.25e9,
Delay: 0.1e9,
}
want := []time.Duration{0, 0.1e9, 0.2e9, 0.2e9}
got := make([]time.Duration, 0, len(want)) // avoid allocation when testing timing
t0 := time.Now()
for a := testAttempt.Start(); a.Next(); {
got = append(got, time.Now().Sub(t0))
}
got = append(got, time.Now().Sub(t0))
c.Assert(got, HasLen, len(want))
const margin = 0.01e9
for i, got := range want {
lo := want[i] - margin
hi := want[i] + margin
if got < lo || got > hi {
c.Errorf("attempt %d want %g got %g", i, want[i].Seconds(), got.Seconds())
}
}
}
func (S) TestAttemptNextHasNext(c *C) {
a := aws.AttemptStrategy{}.Start()
c.Assert(a.Next(), Equals, true)
c.Assert(a.Next(), Equals, false)
a = aws.AttemptStrategy{}.Start()
c.Assert(a.Next(), Equals, true)
c.Assert(a.HasNext(), Equals, false)
c.Assert(a.Next(), Equals, false)
a = aws.AttemptStrategy{Total: 2e8}.Start()
c.Assert(a.Next(), Equals, true)
c.Assert(a.HasNext(), Equals, true)
time.Sleep(2e8)
c.Assert(a.HasNext(), Equals, true)
c.Assert(a.Next(), Equals, true)
c.Assert(a.Next(), Equals, false)
a = aws.AttemptStrategy{Total: 1e8, Min: 2}.Start()
time.Sleep(1e8)
c.Assert(a.Next(), Equals, true)
c.Assert(a.HasNext(), Equals, true)
c.Assert(a.Next(), Equals, true)
c.Assert(a.HasNext(), Equals, false)
c.Assert(a.Next(), Equals, false)
}

View File

@@ -1,445 +0,0 @@
//
// goamz - Go packages to interact with the Amazon Web Services.
//
// https://wiki.ubuntu.com/goamz
//
// Copyright (c) 2011 Canonical Ltd.
//
// Written by Gustavo Niemeyer <gustavo.niemeyer@canonical.com>
//
package aws
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"github.com/vaughan0/go-ini"
)
// Region defines the URLs where AWS services may be accessed.
//
// See http://goo.gl/d8BP1 for more details.
type Region struct {
Name string // the canonical name of this region.
EC2Endpoint string
S3Endpoint string
S3BucketEndpoint string // Not needed by AWS S3. Use ${bucket} for bucket name.
S3LocationConstraint bool // true if this region requires a LocationConstraint declaration.
S3LowercaseBucket bool // true if the region requires bucket names to be lower case.
SDBEndpoint string
SNSEndpoint string
SQSEndpoint string
IAMEndpoint string
ELBEndpoint string
AutoScalingEndpoint string
RdsEndpoint string
Route53Endpoint string
}
var USGovWest = Region{
"us-gov-west-1",
"https://ec2.us-gov-west-1.amazonaws.com",
"https://s3-fips-us-gov-west-1.amazonaws.com",
"",
true,
true,
"",
"https://sns.us-gov-west-1.amazonaws.com",
"https://sqs.us-gov-west-1.amazonaws.com",
"https://iam.us-gov.amazonaws.com",
"https://elasticloadbalancing.us-gov-west-1.amazonaws.com",
"https://autoscaling.us-gov-west-1.amazonaws.com",
"https://rds.us-gov-west-1.amazonaws.com",
"https://route53.amazonaws.com",
}
var USEast = Region{
"us-east-1",
"https://ec2.us-east-1.amazonaws.com",
"https://s3.amazonaws.com",
"",
false,
false,
"https://sdb.amazonaws.com",
"https://sns.us-east-1.amazonaws.com",
"https://sqs.us-east-1.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.us-east-1.amazonaws.com",
"https://autoscaling.us-east-1.amazonaws.com",
"https://rds.us-east-1.amazonaws.com",
"https://route53.amazonaws.com",
}
var USWest = Region{
"us-west-1",
"https://ec2.us-west-1.amazonaws.com",
"https://s3-us-west-1.amazonaws.com",
"",
true,
true,
"https://sdb.us-west-1.amazonaws.com",
"https://sns.us-west-1.amazonaws.com",
"https://sqs.us-west-1.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.us-west-1.amazonaws.com",
"https://autoscaling.us-west-1.amazonaws.com",
"https://rds.us-west-1.amazonaws.com",
"https://route53.amazonaws.com",
}
var USWest2 = Region{
"us-west-2",
"https://ec2.us-west-2.amazonaws.com",
"https://s3-us-west-2.amazonaws.com",
"",
true,
true,
"https://sdb.us-west-2.amazonaws.com",
"https://sns.us-west-2.amazonaws.com",
"https://sqs.us-west-2.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.us-west-2.amazonaws.com",
"https://autoscaling.us-west-2.amazonaws.com",
"https://rds.us-west-2.amazonaws.com",
"https://route53.amazonaws.com",
}
var EUWest = Region{
"eu-west-1",
"https://ec2.eu-west-1.amazonaws.com",
"https://s3-eu-west-1.amazonaws.com",
"",
true,
true,
"https://sdb.eu-west-1.amazonaws.com",
"https://sns.eu-west-1.amazonaws.com",
"https://sqs.eu-west-1.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.eu-west-1.amazonaws.com",
"https://autoscaling.eu-west-1.amazonaws.com",
"https://rds.eu-west-1.amazonaws.com",
"https://route53.amazonaws.com",
}
var EUCentral = Region{
"eu-central-1",
"https://ec2.eu-central-1.amazonaws.com",
"https://s3-eu-central-1.amazonaws.com",
"",
true,
true,
"",
"https://sns.eu-central-1.amazonaws.com",
"https://sqs.eu-central-1.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.eu-central-1.amazonaws.com",
"https://autoscaling.eu-central-1.amazonaws.com",
"https://rds.eu-central-1.amazonaws.com",
"https://route53.amazonaws.com",
}
var APSoutheast = Region{
"ap-southeast-1",
"https://ec2.ap-southeast-1.amazonaws.com",
"https://s3-ap-southeast-1.amazonaws.com",
"",
true,
true,
"https://sdb.ap-southeast-1.amazonaws.com",
"https://sns.ap-southeast-1.amazonaws.com",
"https://sqs.ap-southeast-1.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.ap-southeast-1.amazonaws.com",
"https://autoscaling.ap-southeast-1.amazonaws.com",
"https://rds.ap-southeast-1.amazonaws.com",
"https://route53.amazonaws.com",
}
var APSoutheast2 = Region{
"ap-southeast-2",
"https://ec2.ap-southeast-2.amazonaws.com",
"https://s3-ap-southeast-2.amazonaws.com",
"",
true,
true,
"https://sdb.ap-southeast-2.amazonaws.com",
"https://sns.ap-southeast-2.amazonaws.com",
"https://sqs.ap-southeast-2.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.ap-southeast-2.amazonaws.com",
"https://autoscaling.ap-southeast-2.amazonaws.com",
"https://rds.ap-southeast-2.amazonaws.com",
"https://route53.amazonaws.com",
}
var APNortheast = Region{
"ap-northeast-1",
"https://ec2.ap-northeast-1.amazonaws.com",
"https://s3-ap-northeast-1.amazonaws.com",
"",
true,
true,
"https://sdb.ap-northeast-1.amazonaws.com",
"https://sns.ap-northeast-1.amazonaws.com",
"https://sqs.ap-northeast-1.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.ap-northeast-1.amazonaws.com",
"https://autoscaling.ap-northeast-1.amazonaws.com",
"https://rds.ap-northeast-1.amazonaws.com",
"https://route53.amazonaws.com",
}
var SAEast = Region{
"sa-east-1",
"https://ec2.sa-east-1.amazonaws.com",
"https://s3-sa-east-1.amazonaws.com",
"",
true,
true,
"https://sdb.sa-east-1.amazonaws.com",
"https://sns.sa-east-1.amazonaws.com",
"https://sqs.sa-east-1.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.sa-east-1.amazonaws.com",
"https://autoscaling.sa-east-1.amazonaws.com",
"https://rds.sa-east-1.amazonaws.com",
"https://route53.amazonaws.com",
}
var CNNorth = Region{
"cn-north-1",
"https://ec2.cn-north-1.amazonaws.com.cn",
"https://s3.cn-north-1.amazonaws.com.cn",
"",
true,
true,
"",
"https://sns.cn-north-1.amazonaws.com.cn",
"https://sqs.cn-north-1.amazonaws.com.cn",
"https://iam.cn-north-1.amazonaws.com.cn",
"https://elasticloadbalancing.cn-north-1.amazonaws.com.cn",
"https://autoscaling.cn-north-1.amazonaws.com.cn",
"https://rds.cn-north-1.amazonaws.com.cn",
"https://route53.amazonaws.com",
}
var Regions = map[string]Region{
APNortheast.Name: APNortheast,
APSoutheast.Name: APSoutheast,
APSoutheast2.Name: APSoutheast2,
EUWest.Name: EUWest,
EUCentral.Name: EUCentral,
USEast.Name: USEast,
USWest.Name: USWest,
USWest2.Name: USWest2,
SAEast.Name: SAEast,
USGovWest.Name: USGovWest,
CNNorth.Name: CNNorth,
}
type Auth struct {
AccessKey, SecretKey, Token string
}
var unreserved = make([]bool, 128)
var hex = "0123456789ABCDEF"
func init() {
// RFC3986
u := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890-_.~"
for _, c := range u {
unreserved[c] = true
}
}
type credentials struct {
Code string
LastUpdated string
Type string
AccessKeyId string
SecretAccessKey string
Token string
Expiration string
}
// GetMetaData retrieves instance metadata about the current machine.
//
// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html for more details.
func GetMetaData(path string) (contents []byte, err error) {
url := "http://169.254.169.254/latest/meta-data/" + path
resp, err := RetryingClient.Get(url)
if err != nil {
return
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
err = fmt.Errorf("Code %d returned for url %s", resp.StatusCode, url)
return
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return
}
return []byte(body), err
}
func getInstanceCredentials() (cred credentials, err error) {
credentialPath := "iam/security-credentials/"
// Get the instance role
role, err := GetMetaData(credentialPath)
if err != nil {
return
}
// Get the instance role credentials
credentialJSON, err := GetMetaData(credentialPath + string(role))
if err != nil {
return
}
err = json.Unmarshal([]byte(credentialJSON), &cred)
return
}
// GetAuth creates an Auth based on either passed in credentials,
// environment information or instance based role credentials.
func GetAuth(accessKey string, secretKey string) (auth Auth, err error) {
// First try passed in credentials
if accessKey != "" && secretKey != "" {
return Auth{accessKey, secretKey, ""}, nil
}
// Next try to get auth from the environment
auth, err = SharedAuth()
if err == nil {
// Found auth, return
return
}
// Next try to get auth from the environment
auth, err = EnvAuth()
if err == nil {
// Found auth, return
return
}
// Next try getting auth from the instance role
cred, err := getInstanceCredentials()
if err == nil {
// Found auth, return
auth.AccessKey = cred.AccessKeyId
auth.SecretKey = cred.SecretAccessKey
auth.Token = cred.Token
return
}
err = errors.New("No valid AWS authentication found")
return
}
// SharedAuth creates an Auth based on shared credentials stored in
// $HOME/.aws/credentials. The AWS_PROFILE environment variables is used to
// select the profile.
func SharedAuth() (auth Auth, err error) {
var profileName = os.Getenv("AWS_PROFILE")
if profileName == "" {
profileName = "default"
}
var credentialsFile = os.Getenv("AWS_CREDENTIAL_FILE")
if credentialsFile == "" {
var homeDir = os.Getenv("HOME")
if homeDir == "" {
err = errors.New("Could not get HOME")
return
}
credentialsFile = homeDir + "/.aws/credentials"
}
file, err := ini.LoadFile(credentialsFile)
if err != nil {
err = errors.New("Couldn't parse AWS credentials file")
return
}
var profile = file[profileName]
if profile == nil {
err = errors.New("Couldn't find profile in AWS credentials file")
return
}
auth.AccessKey = profile["aws_access_key_id"]
auth.SecretKey = profile["aws_secret_access_key"]
if auth.AccessKey == "" {
err = errors.New("AWS_ACCESS_KEY_ID not found in environment in credentials file")
}
if auth.SecretKey == "" {
err = errors.New("AWS_SECRET_ACCESS_KEY not found in credentials file")
}
return
}
// EnvAuth creates an Auth based on environment information.
// The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment
// For accounts that require a security token, it is read from AWS_SECURITY_TOKEN
// variables are used.
func EnvAuth() (auth Auth, err error) {
auth.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID")
if auth.AccessKey == "" {
auth.AccessKey = os.Getenv("AWS_ACCESS_KEY")
}
auth.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY")
if auth.SecretKey == "" {
auth.SecretKey = os.Getenv("AWS_SECRET_KEY")
}
auth.Token = os.Getenv("AWS_SECURITY_TOKEN")
if auth.AccessKey == "" {
err = errors.New("AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment")
}
if auth.SecretKey == "" {
err = errors.New("AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment")
}
return
}
// Encode takes a string and URI-encodes it in a way suitable
// to be used in AWS signatures.
func Encode(s string) string {
encode := false
for i := 0; i != len(s); i++ {
c := s[i]
if c > 127 || !unreserved[c] {
encode = true
break
}
}
if !encode {
return s
}
e := make([]byte, len(s)*3)
ei := 0
for i := 0; i != len(s); i++ {
c := s[i]
if c > 127 || !unreserved[c] {
e[ei] = '%'
e[ei+1] = hex[c>>4]
e[ei+2] = hex[c&0xF]
ei += 3
} else {
e[ei] = c
ei += 1
}
}
return string(e[:ei])
}

View File

@@ -1,203 +0,0 @@
package aws_test
import (
"github.com/mitchellh/goamz/aws"
. "github.com/motain/gocheck"
"io/ioutil"
"os"
"strings"
"testing"
)
func Test(t *testing.T) {
TestingT(t)
}
var _ = Suite(&S{})
type S struct {
environ []string
}
func (s *S) SetUpSuite(c *C) {
s.environ = os.Environ()
}
func (s *S) TearDownTest(c *C) {
os.Clearenv()
for _, kv := range s.environ {
l := strings.SplitN(kv, "=", 2)
os.Setenv(l[0], l[1])
}
}
func (s *S) TestSharedAuthNoHome(c *C) {
os.Clearenv()
os.Setenv("AWS_PROFILE", "foo")
_, err := aws.SharedAuth()
c.Assert(err, ErrorMatches, "Could not get HOME")
}
func (s *S) TestSharedAuthNoCredentialsFile(c *C) {
os.Clearenv()
os.Setenv("AWS_PROFILE", "foo")
os.Setenv("HOME", "/tmp")
_, err := aws.SharedAuth()
c.Assert(err, ErrorMatches, "Couldn't parse AWS credentials file")
}
func (s *S) TestSharedAuthNoProfileInFile(c *C) {
os.Clearenv()
os.Setenv("AWS_PROFILE", "foo")
d, err := ioutil.TempDir("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(d)
err = os.Mkdir(d+"/.aws", 0755)
if err != nil {
panic(err)
}
ioutil.WriteFile(d+"/.aws/credentials", []byte("[bar]\n"), 0644)
os.Setenv("HOME", d)
_, err = aws.SharedAuth()
c.Assert(err, ErrorMatches, "Couldn't find profile in AWS credentials file")
}
func (s *S) TestSharedAuthNoKeysInProfile(c *C) {
os.Clearenv()
os.Setenv("AWS_PROFILE", "bar")
d, err := ioutil.TempDir("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(d)
err = os.Mkdir(d+"/.aws", 0755)
if err != nil {
panic(err)
}
ioutil.WriteFile(d+"/.aws/credentials", []byte("[bar]\nawsaccesskeyid = AK.."), 0644)
os.Setenv("HOME", d)
_, err = aws.SharedAuth()
c.Assert(err, ErrorMatches, "AWS_SECRET_ACCESS_KEY not found in credentials file")
}
func (s *S) TestSharedAuthDefaultCredentials(c *C) {
os.Clearenv()
d, err := ioutil.TempDir("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(d)
err = os.Mkdir(d+"/.aws", 0755)
if err != nil {
panic(err)
}
ioutil.WriteFile(d+"/.aws/credentials", []byte("[default]\naws_access_key_id = access\naws_secret_access_key = secret\n"), 0644)
os.Setenv("HOME", d)
auth, err := aws.SharedAuth()
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestSharedAuth(c *C) {
os.Clearenv()
os.Setenv("AWS_PROFILE", "bar")
d, err := ioutil.TempDir("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(d)
err = os.Mkdir(d+"/.aws", 0755)
if err != nil {
panic(err)
}
ioutil.WriteFile(d+"/.aws/credentials", []byte("[bar]\naws_access_key_id = access\naws_secret_access_key = secret\n"), 0644)
os.Setenv("HOME", d)
auth, err := aws.SharedAuth()
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestEnvAuthNoSecret(c *C) {
os.Clearenv()
_, err := aws.EnvAuth()
c.Assert(err, ErrorMatches, "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment")
}
func (s *S) TestEnvAuthNoAccess(c *C) {
os.Clearenv()
os.Setenv("AWS_SECRET_ACCESS_KEY", "foo")
_, err := aws.EnvAuth()
c.Assert(err, ErrorMatches, "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment")
}
func (s *S) TestEnvAuth(c *C) {
os.Clearenv()
os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
os.Setenv("AWS_ACCESS_KEY_ID", "access")
auth, err := aws.EnvAuth()
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestEnvAuthWithToken(c *C) {
os.Clearenv()
os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
os.Setenv("AWS_ACCESS_KEY_ID", "access")
os.Setenv("AWS_SECURITY_TOKEN", "token")
auth, err := aws.EnvAuth()
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access", Token: "token"})
}
func (s *S) TestEnvAuthAlt(c *C) {
os.Clearenv()
os.Setenv("AWS_SECRET_KEY", "secret")
os.Setenv("AWS_ACCESS_KEY", "access")
auth, err := aws.EnvAuth()
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestGetAuthStatic(c *C) {
auth, err := aws.GetAuth("access", "secret")
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestGetAuthEnv(c *C) {
os.Clearenv()
os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
os.Setenv("AWS_ACCESS_KEY_ID", "access")
auth, err := aws.GetAuth("", "")
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestEncode(c *C) {
c.Assert(aws.Encode("foo"), Equals, "foo")
c.Assert(aws.Encode("/"), Equals, "%2F")
}
func (s *S) TestRegionsAreNamed(c *C) {
for n, r := range aws.Regions {
c.Assert(n, Equals, r.Name)
}
}

View File

@@ -1,125 +0,0 @@
package aws
import (
"math"
"net"
"net/http"
"time"
)
type RetryableFunc func(*http.Request, *http.Response, error) bool
type WaitFunc func(try int)
type DeadlineFunc func() time.Time
type ResilientTransport struct {
// Timeout is the maximum amount of time a dial will wait for
// a connect to complete.
//
// The default is no timeout.
//
// With or without a timeout, the operating system may impose
// its own earlier timeout. For instance, TCP timeouts are
// often around 3 minutes.
DialTimeout time.Duration
// MaxTries, if non-zero, specifies the number of times we will retry on
// failure. Retries are only attempted for temporary network errors or known
// safe failures.
MaxTries int
Deadline DeadlineFunc
ShouldRetry RetryableFunc
Wait WaitFunc
transport *http.Transport
}
// Convenience method for creating an http client
func NewClient(rt *ResilientTransport) *http.Client {
rt.transport = &http.Transport{
Dial: func(netw, addr string) (net.Conn, error) {
c, err := net.DialTimeout(netw, addr, rt.DialTimeout)
if err != nil {
return nil, err
}
c.SetDeadline(rt.Deadline())
return c, nil
},
DisableKeepAlives: true,
Proxy: http.ProxyFromEnvironment,
}
// TODO: Would be nice is ResilientTransport allowed clients to initialize
// with http.Transport attributes.
return &http.Client{
Transport: rt,
}
}
var retryingTransport = &ResilientTransport{
Deadline: func() time.Time {
return time.Now().Add(5 * time.Second)
},
DialTimeout: 10 * time.Second,
MaxTries: 3,
ShouldRetry: awsRetry,
Wait: ExpBackoff,
}
// Exported default client
var RetryingClient = NewClient(retryingTransport)
func (t *ResilientTransport) RoundTrip(req *http.Request) (*http.Response, error) {
return t.tries(req)
}
// Retry a request a maximum of t.MaxTries times.
// We'll only retry if the proper criteria are met.
// If a wait function is specified, wait that amount of time
// In between requests.
func (t *ResilientTransport) tries(req *http.Request) (res *http.Response, err error) {
for try := 0; try < t.MaxTries; try += 1 {
res, err = t.transport.RoundTrip(req)
if !t.ShouldRetry(req, res, err) {
break
}
if res != nil {
res.Body.Close()
}
if t.Wait != nil {
t.Wait(try)
}
}
return
}
func ExpBackoff(try int) {
time.Sleep(100 * time.Millisecond *
time.Duration(math.Exp2(float64(try))))
}
func LinearBackoff(try int) {
time.Sleep(time.Duration(try*100) * time.Millisecond)
}
// Decide if we should retry a request.
// In general, the criteria for retrying a request is described here
// http://docs.aws.amazon.com/general/latest/gr/api-retries.html
func awsRetry(req *http.Request, res *http.Response, err error) bool {
retry := false
// Retry if there's a temporary network error.
if neterr, ok := err.(net.Error); ok {
if neterr.Temporary() {
retry = true
}
}
// Retry if we get a 5xx series error.
if res != nil {
if res.StatusCode >= 500 && res.StatusCode < 600 {
retry = true
}
}
return retry
}

View File

@@ -1,121 +0,0 @@
package aws_test
import (
"fmt"
"github.com/mitchellh/goamz/aws"
"io/ioutil"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
)
// Retrieve the response from handler using aws.RetryingClient
func serveAndGet(handler http.HandlerFunc) (body string, err error) {
ts := httptest.NewServer(handler)
defer ts.Close()
resp, err := aws.RetryingClient.Get(ts.URL)
if err != nil {
return
}
if resp.StatusCode != 200 {
return "", fmt.Errorf("Bad status code: %d", resp.StatusCode)
}
greeting, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
return
}
return strings.TrimSpace(string(greeting)), nil
}
func TestClient_expected(t *testing.T) {
body := "foo bar"
resp, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, body)
})
if err != nil {
t.Fatal(err)
}
if resp != body {
t.Fatal("Body not as expected.")
}
}
func TestClient_delay(t *testing.T) {
body := "baz"
wait := 4
resp, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) {
if wait < 0 {
// If we dipped to zero delay and still failed.
t.Fatal("Never succeeded.")
}
wait -= 1
time.Sleep(time.Second * time.Duration(wait))
fmt.Fprintln(w, body)
})
if err != nil {
t.Fatal(err)
}
if resp != body {
t.Fatal("Body not as expected.", resp)
}
}
func TestClient_no4xxRetry(t *testing.T) {
tries := 0
// Fail once before succeeding.
_, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) {
tries += 1
http.Error(w, "error", 404)
})
if err == nil {
t.Fatal("should have error")
}
if tries != 1 {
t.Fatalf("should only try once: %d", tries)
}
}
func TestClient_retries(t *testing.T) {
body := "biz"
failed := false
// Fail once before succeeding.
resp, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) {
if !failed {
http.Error(w, "error", 500)
failed = true
} else {
fmt.Fprintln(w, body)
}
})
if failed != true {
t.Error("We didn't retry!")
}
if err != nil {
t.Fatal(err)
}
if resp != body {
t.Fatal("Body not as expected.")
}
}
func TestClient_fails(t *testing.T) {
tries := 0
// Fail 3 times and return the last error.
_, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) {
tries += 1
http.Error(w, "error", 500)
})
if err == nil {
t.Fatal(err)
}
if tries != 3 {
t.Fatal("Didn't retry enough")
}
}

View File

@@ -1,27 +0,0 @@
package s3
import (
"github.com/mitchellh/goamz/aws"
)
var originalStrategy = attempts
func SetAttemptStrategy(s *aws.AttemptStrategy) {
if s == nil {
attempts = originalStrategy
} else {
attempts = *s
}
}
func Sign(auth aws.Auth, method, path string, params, headers map[string][]string) {
sign(auth, method, path, params, headers)
}
func SetListPartsMax(n int) {
listPartsMax = n
}
func SetListMultiMax(n int) {
listMultiMax = n
}

View File

@@ -1,409 +0,0 @@
package s3
import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"encoding/xml"
"errors"
"io"
"sort"
"strconv"
)
// Multi represents an unfinished multipart upload.
//
// Multipart uploads allow sending big objects in smaller chunks.
// After all parts have been sent, the upload must be explicitly
// completed by calling Complete with the list of parts.
//
// See http://goo.gl/vJfTG for an overview of multipart uploads.
type Multi struct {
Bucket *Bucket
Key string
UploadId string
}
// That's the default. Here just for testing.
var listMultiMax = 1000
type listMultiResp struct {
NextKeyMarker string
NextUploadIdMarker string
IsTruncated bool
Upload []Multi
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"`
}
// ListMulti returns the list of unfinished multipart uploads in b.
//
// The prefix parameter limits the response to keys that begin with the
// specified prefix. You can use prefixes to separate a bucket into different
// groupings of keys (to get the feeling of folders, for example).
//
// The delim parameter causes the response to group all of the keys that
// share a common prefix up to the next delimiter in a single entry within
// the CommonPrefixes field. You can use delimiters to separate a bucket
// into different groupings of keys, similar to how folders would work.
//
// See http://goo.gl/ePioY for details.
func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) {
params := map[string][]string{
"uploads": {""},
"max-uploads": {strconv.FormatInt(int64(listMultiMax), 10)},
"prefix": {prefix},
"delimiter": {delim},
}
for attempt := attempts.Start(); attempt.Next(); {
req := &request{
method: "GET",
bucket: b.Name,
params: params,
}
var resp listMultiResp
err := b.S3.query(req, &resp)
if shouldRetry(err) && attempt.HasNext() {
continue
}
if err != nil {
return nil, nil, err
}
for i := range resp.Upload {
multi := &resp.Upload[i]
multi.Bucket = b
multis = append(multis, multi)
}
prefixes = append(prefixes, resp.CommonPrefixes...)
if !resp.IsTruncated {
return multis, prefixes, nil
}
params["key-marker"] = []string{resp.NextKeyMarker}
params["upload-id-marker"] = []string{resp.NextUploadIdMarker}
attempt = attempts.Start() // Last request worked.
}
panic("unreachable")
}
// Multi returns a multipart upload handler for the provided key
// inside b. If a multipart upload exists for key, it is returned,
// otherwise a new multipart upload is initiated with contType and perm.
func (b *Bucket) Multi(key, contType string, perm ACL) (*Multi, error) {
multis, _, err := b.ListMulti(key, "")
if err != nil && !hasCode(err, "NoSuchUpload") {
return nil, err
}
for _, m := range multis {
if m.Key == key {
return m, nil
}
}
return b.InitMulti(key, contType, perm)
}
// InitMulti initializes a new multipart upload at the provided
// key inside b and returns a value for manipulating it.
//
// See http://goo.gl/XP8kL for details.
func (b *Bucket) InitMulti(key string, contType string, perm ACL) (*Multi, error) {
headers := map[string][]string{
"Content-Type": {contType},
"Content-Length": {"0"},
"x-amz-acl": {string(perm)},
}
params := map[string][]string{
"uploads": {""},
}
req := &request{
method: "POST",
bucket: b.Name,
path: key,
headers: headers,
params: params,
}
var err error
var resp struct {
UploadId string `xml:"UploadId"`
}
for attempt := attempts.Start(); attempt.Next(); {
err = b.S3.query(req, &resp)
if !shouldRetry(err) {
break
}
}
if err != nil {
return nil, err
}
return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil
}
// PutPart sends part n of the multipart upload, reading all the content from r.
// Each part, except for the last one, must be at least 5MB in size.
//
// See http://goo.gl/pqZer for details.
func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) {
partSize, _, md5b64, err := seekerInfo(r)
if err != nil {
return Part{}, err
}
return m.putPart(n, r, partSize, md5b64)
}
func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) {
headers := map[string][]string{
"Content-Length": {strconv.FormatInt(partSize, 10)},
"Content-MD5": {md5b64},
}
params := map[string][]string{
"uploadId": {m.UploadId},
"partNumber": {strconv.FormatInt(int64(n), 10)},
}
for attempt := attempts.Start(); attempt.Next(); {
_, err := r.Seek(0, 0)
if err != nil {
return Part{}, err
}
req := &request{
method: "PUT",
bucket: m.Bucket.Name,
path: m.Key,
headers: headers,
params: params,
payload: r,
}
err = m.Bucket.S3.prepare(req)
if err != nil {
return Part{}, err
}
resp, err := m.Bucket.S3.run(req, nil)
if shouldRetry(err) && attempt.HasNext() {
continue
}
if err != nil {
return Part{}, err
}
etag := resp.Header.Get("ETag")
if etag == "" {
return Part{}, errors.New("part upload succeeded with no ETag")
}
return Part{n, etag, partSize}, nil
}
panic("unreachable")
}
func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) {
_, err = r.Seek(0, 0)
if err != nil {
return 0, "", "", err
}
digest := md5.New()
size, err = io.Copy(digest, r)
if err != nil {
return 0, "", "", err
}
sum := digest.Sum(nil)
md5hex = hex.EncodeToString(sum)
md5b64 = base64.StdEncoding.EncodeToString(sum)
return size, md5hex, md5b64, nil
}
type Part struct {
N int `xml:"PartNumber"`
ETag string
Size int64
}
type partSlice []Part
func (s partSlice) Len() int { return len(s) }
func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N }
func (s partSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
type listPartsResp struct {
NextPartNumberMarker string
IsTruncated bool
Part []Part
}
// That's the default. Here just for testing.
var listPartsMax = 1000
// ListParts returns the list of previously uploaded parts in m,
// ordered by part number.
//
// See http://goo.gl/ePioY for details.
func (m *Multi) ListParts() ([]Part, error) {
params := map[string][]string{
"uploadId": {m.UploadId},
"max-parts": {strconv.FormatInt(int64(listPartsMax), 10)},
}
var parts partSlice
for attempt := attempts.Start(); attempt.Next(); {
req := &request{
method: "GET",
bucket: m.Bucket.Name,
path: m.Key,
params: params,
}
var resp listPartsResp
err := m.Bucket.S3.query(req, &resp)
if shouldRetry(err) && attempt.HasNext() {
continue
}
if err != nil {
return nil, err
}
parts = append(parts, resp.Part...)
if !resp.IsTruncated {
sort.Sort(parts)
return parts, nil
}
params["part-number-marker"] = []string{resp.NextPartNumberMarker}
attempt = attempts.Start() // Last request worked.
}
panic("unreachable")
}
type ReaderAtSeeker interface {
io.ReaderAt
io.ReadSeeker
}
// PutAll sends all of r via a multipart upload with parts no larger
// than partSize bytes, which must be set to at least 5MB.
// Parts previously uploaded are either reused if their checksum
// and size match the new part, or otherwise overwritten with the
// new content.
// PutAll returns all the parts of m (reused or not).
func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) {
old, err := m.ListParts()
if err != nil && !hasCode(err, "NoSuchUpload") {
return nil, err
}
reuse := 0 // Index of next old part to consider reusing.
current := 1 // Part number of latest good part handled.
totalSize, err := r.Seek(0, 2)
if err != nil {
return nil, err
}
first := true // Must send at least one empty part if the file is empty.
var result []Part
NextSection:
for offset := int64(0); offset < totalSize || first; offset += partSize {
first = false
if offset+partSize > totalSize {
partSize = totalSize - offset
}
section := io.NewSectionReader(r, offset, partSize)
_, md5hex, md5b64, err := seekerInfo(section)
if err != nil {
return nil, err
}
for reuse < len(old) && old[reuse].N <= current {
// Looks like this part was already sent.
part := &old[reuse]
etag := `"` + md5hex + `"`
if part.N == current && part.Size == partSize && part.ETag == etag {
// Checksum matches. Reuse the old part.
result = append(result, *part)
current++
continue NextSection
}
reuse++
}
// Part wasn't found or doesn't match. Send it.
part, err := m.putPart(current, section, partSize, md5b64)
if err != nil {
return nil, err
}
result = append(result, part)
current++
}
return result, nil
}
type completeUpload struct {
XMLName xml.Name `xml:"CompleteMultipartUpload"`
Parts completeParts `xml:"Part"`
}
type completePart struct {
PartNumber int
ETag string
}
type completeParts []completePart
func (p completeParts) Len() int { return len(p) }
func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber }
func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// Complete assembles the given previously uploaded parts into the
// final object. This operation may take several minutes.
//
// See http://goo.gl/2Z7Tw for details.
func (m *Multi) Complete(parts []Part) error {
params := map[string][]string{
"uploadId": {m.UploadId},
}
c := completeUpload{}
for _, p := range parts {
c.Parts = append(c.Parts, completePart{p.N, p.ETag})
}
sort.Sort(c.Parts)
data, err := xml.Marshal(&c)
if err != nil {
return err
}
for attempt := attempts.Start(); attempt.Next(); {
req := &request{
method: "POST",
bucket: m.Bucket.Name,
path: m.Key,
params: params,
payload: bytes.NewReader(data),
}
err := m.Bucket.S3.query(req, nil)
if shouldRetry(err) && attempt.HasNext() {
continue
}
return err
}
panic("unreachable")
}
// Abort deletes an unifinished multipart upload and any previously
// uploaded parts for it.
//
// After a multipart upload is aborted, no additional parts can be
// uploaded using it. However, if any part uploads are currently in
// progress, those part uploads might or might not succeed. As a result,
// it might be necessary to abort a given multipart upload multiple
// times in order to completely free all storage consumed by all parts.
//
// NOTE: If the described scenario happens to you, please report back to
// the goamz authors with details. In the future such retrying should be
// handled internally, but it's not clear what happens precisely (Is an
// error returned? Is the issue completely undetectable?).
//
// See http://goo.gl/dnyJw for details.
func (m *Multi) Abort() error {
params := map[string][]string{
"uploadId": {m.UploadId},
}
for attempt := attempts.Start(); attempt.Next(); {
req := &request{
method: "DELETE",
bucket: m.Bucket.Name,
path: m.Key,
params: params,
}
err := m.Bucket.S3.query(req, nil)
if shouldRetry(err) && attempt.HasNext() {
continue
}
return err
}
panic("unreachable")
}

View File

@@ -1,370 +0,0 @@
package s3_test
import (
"encoding/xml"
"github.com/mitchellh/goamz/s3"
. "github.com/motain/gocheck"
"io"
"io/ioutil"
"strings"
)
func (s *S) TestInitMulti(c *C) {
testServer.Response(200, nil, InitMultiResultDump)
b := s.s3.Bucket("sample")
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "POST")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Header["Content-Type"], DeepEquals, []string{"text/plain"})
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
c.Assert(req.Form["uploads"], DeepEquals, []string{""})
c.Assert(multi.UploadId, Matches, "JNbR_[A-Za-z0-9.]+QQ--")
}
func (s *S) TestMultiNoPreviousUpload(c *C) {
// Don't retry the NoSuchUpload error.
s.DisableRetries()
testServer.Response(404, nil, NoSuchUploadErrorDump)
testServer.Response(200, nil, InitMultiResultDump)
b := s.s3.Bucket("sample")
multi, err := b.Multi("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/")
c.Assert(req.Form["uploads"], DeepEquals, []string{""})
c.Assert(req.Form["prefix"], DeepEquals, []string{"multi"})
req = testServer.WaitRequest()
c.Assert(req.Method, Equals, "POST")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form["uploads"], DeepEquals, []string{""})
c.Assert(multi.UploadId, Matches, "JNbR_[A-Za-z0-9.]+QQ--")
}
func (s *S) TestMultiReturnOld(c *C) {
testServer.Response(200, nil, ListMultiResultDump)
b := s.s3.Bucket("sample")
multi, err := b.Multi("multi1", "text/plain", s3.Private)
c.Assert(err, IsNil)
c.Assert(multi.Key, Equals, "multi1")
c.Assert(multi.UploadId, Equals, "iUVug89pPvSswrikD")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/")
c.Assert(req.Form["uploads"], DeepEquals, []string{""})
c.Assert(req.Form["prefix"], DeepEquals, []string{"multi1"})
}
func (s *S) TestListParts(c *C) {
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(200, nil, ListPartsResultDump1)
testServer.Response(404, nil, NoSuchUploadErrorDump) // :-(
testServer.Response(200, nil, ListPartsResultDump2)
b := s.s3.Bucket("sample")
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
parts, err := multi.ListParts()
c.Assert(err, IsNil)
c.Assert(parts, HasLen, 3)
c.Assert(parts[0].N, Equals, 1)
c.Assert(parts[0].Size, Equals, int64(5))
c.Assert(parts[0].ETag, Equals, `"ffc88b4ca90a355f8ddba6b2c3b2af5c"`)
c.Assert(parts[1].N, Equals, 2)
c.Assert(parts[1].Size, Equals, int64(5))
c.Assert(parts[1].ETag, Equals, `"d067a0fa9dc61a6e7195ca99696b5a89"`)
c.Assert(parts[2].N, Equals, 3)
c.Assert(parts[2].Size, Equals, int64(5))
c.Assert(parts[2].ETag, Equals, `"49dcd91231f801159e893fb5c6674985"`)
testServer.WaitRequest()
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
c.Assert(req.Form["max-parts"], DeepEquals, []string{"1000"})
testServer.WaitRequest() // The internal error.
req = testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
c.Assert(req.Form["max-parts"], DeepEquals, []string{"1000"})
c.Assert(req.Form["part-number-marker"], DeepEquals, []string{"2"})
}
func (s *S) TestPutPart(c *C) {
headers := map[string]string{
"ETag": `"26f90efd10d614f100252ff56d88dad8"`,
}
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(200, headers, "")
b := s.s3.Bucket("sample")
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
part, err := multi.PutPart(1, strings.NewReader("<part 1>"))
c.Assert(err, IsNil)
c.Assert(part.N, Equals, 1)
c.Assert(part.Size, Equals, int64(8))
c.Assert(part.ETag, Equals, headers["ETag"])
testServer.WaitRequest()
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"8"})
c.Assert(req.Header["Content-Md5"], DeepEquals, []string{"JvkO/RDWFPEAJS/1bYja2A=="})
}
func readAll(r io.Reader) string {
data, err := ioutil.ReadAll(r)
if err != nil {
panic(err)
}
return string(data)
}
func (s *S) TestPutAllNoPreviousUpload(c *C) {
// Don't retry the NoSuchUpload error.
s.DisableRetries()
etag1 := map[string]string{"ETag": `"etag1"`}
etag2 := map[string]string{"ETag": `"etag2"`}
etag3 := map[string]string{"ETag": `"etag3"`}
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(404, nil, NoSuchUploadErrorDump)
testServer.Response(200, etag1, "")
testServer.Response(200, etag2, "")
testServer.Response(200, etag3, "")
b := s.s3.Bucket("sample")
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
parts, err := multi.PutAll(strings.NewReader("part1part2last"), 5)
c.Assert(parts, HasLen, 3)
c.Assert(parts[0].ETag, Equals, `"etag1"`)
c.Assert(parts[1].ETag, Equals, `"etag2"`)
c.Assert(parts[2].ETag, Equals, `"etag3"`)
c.Assert(err, IsNil)
// Init
testServer.WaitRequest()
// List old parts. Won't find anything.
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/multi")
// Send part 1.
req = testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"})
c.Assert(readAll(req.Body), Equals, "part1")
// Send part 2.
req = testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form["partNumber"], DeepEquals, []string{"2"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"})
c.Assert(readAll(req.Body), Equals, "part2")
// Send part 3 with shorter body.
req = testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form["partNumber"], DeepEquals, []string{"3"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"4"})
c.Assert(readAll(req.Body), Equals, "last")
}
func (s *S) TestPutAllZeroSizeFile(c *C) {
// Don't retry the NoSuchUpload error.
s.DisableRetries()
etag1 := map[string]string{"ETag": `"etag1"`}
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(404, nil, NoSuchUploadErrorDump)
testServer.Response(200, etag1, "")
b := s.s3.Bucket("sample")
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
// Must send at least one part, so that completing it will work.
parts, err := multi.PutAll(strings.NewReader(""), 5)
c.Assert(parts, HasLen, 1)
c.Assert(parts[0].ETag, Equals, `"etag1"`)
c.Assert(err, IsNil)
// Init
testServer.WaitRequest()
// List old parts. Won't find anything.
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/multi")
// Send empty part.
req = testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form["partNumber"], DeepEquals, []string{"1"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"0"})
c.Assert(readAll(req.Body), Equals, "")
}
func (s *S) TestPutAllResume(c *C) {
etag2 := map[string]string{"ETag": `"etag2"`}
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(200, nil, ListPartsResultDump1)
testServer.Response(200, nil, ListPartsResultDump2)
testServer.Response(200, etag2, "")
b := s.s3.Bucket("sample")
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
// "part1" and "part3" match the checksums in ResultDump1.
// The middle one is a mismatch (it refers to "part2").
parts, err := multi.PutAll(strings.NewReader("part1partXpart3"), 5)
c.Assert(parts, HasLen, 3)
c.Assert(parts[0].N, Equals, 1)
c.Assert(parts[0].Size, Equals, int64(5))
c.Assert(parts[0].ETag, Equals, `"ffc88b4ca90a355f8ddba6b2c3b2af5c"`)
c.Assert(parts[1].N, Equals, 2)
c.Assert(parts[1].Size, Equals, int64(5))
c.Assert(parts[1].ETag, Equals, `"etag2"`)
c.Assert(parts[2].N, Equals, 3)
c.Assert(parts[2].Size, Equals, int64(5))
c.Assert(parts[2].ETag, Equals, `"49dcd91231f801159e893fb5c6674985"`)
c.Assert(err, IsNil)
// Init
testServer.WaitRequest()
// List old parts, broken in two requests.
for i := 0; i < 2; i++ {
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/multi")
}
// Send part 2, as it didn't match the checksum.
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form["partNumber"], DeepEquals, []string{"2"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"5"})
c.Assert(readAll(req.Body), Equals, "partX")
}
func (s *S) TestMultiComplete(c *C) {
testServer.Response(200, nil, InitMultiResultDump)
// Note the 200 response. Completing will hold the connection on some
// kind of long poll, and may return a late error even after a 200.
testServer.Response(200, nil, InternalErrorDump)
testServer.Response(200, nil, "")
b := s.s3.Bucket("sample")
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
err = multi.Complete([]s3.Part{{2, `"ETag2"`, 32}, {1, `"ETag1"`, 64}})
c.Assert(err, IsNil)
testServer.WaitRequest()
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "POST")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
var payload struct {
XMLName xml.Name
Part []struct {
PartNumber int
ETag string
}
}
dec := xml.NewDecoder(req.Body)
err = dec.Decode(&payload)
c.Assert(err, IsNil)
c.Assert(payload.XMLName.Local, Equals, "CompleteMultipartUpload")
c.Assert(len(payload.Part), Equals, 2)
c.Assert(payload.Part[0].PartNumber, Equals, 1)
c.Assert(payload.Part[0].ETag, Equals, `"ETag1"`)
c.Assert(payload.Part[1].PartNumber, Equals, 2)
c.Assert(payload.Part[1].ETag, Equals, `"ETag2"`)
}
func (s *S) TestMultiAbort(c *C) {
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(200, nil, "")
b := s.s3.Bucket("sample")
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
err = multi.Abort()
c.Assert(err, IsNil)
testServer.WaitRequest()
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "DELETE")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
}
func (s *S) TestListMulti(c *C) {
testServer.Response(200, nil, ListMultiResultDump)
b := s.s3.Bucket("sample")
multis, prefixes, err := b.ListMulti("", "/")
c.Assert(err, IsNil)
c.Assert(prefixes, DeepEquals, []string{"a/", "b/"})
c.Assert(multis, HasLen, 2)
c.Assert(multis[0].Key, Equals, "multi1")
c.Assert(multis[0].UploadId, Equals, "iUVug89pPvSswrikD")
c.Assert(multis[1].Key, Equals, "multi2")
c.Assert(multis[1].UploadId, Equals, "DkirwsSvPp98guVUi")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/sample/")
c.Assert(req.Form["uploads"], DeepEquals, []string{""})
c.Assert(req.Form["prefix"], DeepEquals, []string{""})
c.Assert(req.Form["delimiter"], DeepEquals, []string{"/"})
c.Assert(req.Form["max-uploads"], DeepEquals, []string{"1000"})
}

View File

@@ -1,241 +0,0 @@
package s3_test
var GetObjectErrorDump = `
<?xml version="1.0" encoding="UTF-8"?>
<Error><Code>NoSuchBucket</Code><Message>The specified bucket does not exist</Message>
<BucketName>non-existent-bucket</BucketName><RequestId>3F1B667FAD71C3D8</RequestId>
<HostId>L4ee/zrm1irFXY5F45fKXIRdOf9ktsKY/8TDVawuMK2jWRb1RF84i1uBzkdNqS5D</HostId></Error>
`
var GetListResultDump1 = `
<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Name>quotes</Name>
<Prefix>N</Prefix>
<IsTruncated>false</IsTruncated>
<Contents>
<Key>Nelson</Key>
<LastModified>2006-01-01T12:00:00.000Z</LastModified>
<ETag>&quot;828ef3fdfa96f00ad9f27c383fc9ac7f&quot;</ETag>
<Size>5</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>bcaf161ca5fb16fd081034f</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Contents>
<Contents>
<Key>Neo</Key>
<LastModified>2006-01-01T12:00:00.000Z</LastModified>
<ETag>&quot;828ef3fdfa96f00ad9f27c383fc9ac7f&quot;</ETag>
<Size>4</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>bcaf1ffd86a5fb16fd081034f</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Contents>
</ListBucketResult>
`
var GetListResultDump2 = `
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>example-bucket</Name>
<Prefix>photos/2006/</Prefix>
<Marker>some-marker</Marker>
<MaxKeys>1000</MaxKeys>
<Delimiter>/</Delimiter>
<IsTruncated>false</IsTruncated>
<CommonPrefixes>
<Prefix>photos/2006/feb/</Prefix>
</CommonPrefixes>
<CommonPrefixes>
<Prefix>photos/2006/jan/</Prefix>
</CommonPrefixes>
</ListBucketResult>
`
var InitMultiResultDump = `
<?xml version="1.0" encoding="UTF-8"?>
<InitiateMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>sample</Bucket>
<Key>multi</Key>
<UploadId>JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ--</UploadId>
</InitiateMultipartUploadResult>
`
var ListPartsResultDump1 = `
<?xml version="1.0" encoding="UTF-8"?>
<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>sample</Bucket>
<Key>multi</Key>
<UploadId>JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ--</UploadId>
<Initiator>
<ID>bb5c0f63b0b25f2d099c</ID>
<DisplayName>joe</DisplayName>
</Initiator>
<Owner>
<ID>bb5c0f63b0b25f2d099c</ID>
<DisplayName>joe</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<PartNumberMarker>0</PartNumberMarker>
<NextPartNumberMarker>2</NextPartNumberMarker>
<MaxParts>2</MaxParts>
<IsTruncated>true</IsTruncated>
<Part>
<PartNumber>1</PartNumber>
<LastModified>2013-01-30T13:45:51.000Z</LastModified>
<ETag>&quot;ffc88b4ca90a355f8ddba6b2c3b2af5c&quot;</ETag>
<Size>5</Size>
</Part>
<Part>
<PartNumber>2</PartNumber>
<LastModified>2013-01-30T13:45:52.000Z</LastModified>
<ETag>&quot;d067a0fa9dc61a6e7195ca99696b5a89&quot;</ETag>
<Size>5</Size>
</Part>
</ListPartsResult>
`
var ListPartsResultDump2 = `
<?xml version="1.0" encoding="UTF-8"?>
<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>sample</Bucket>
<Key>multi</Key>
<UploadId>JNbR_cMdwnGiD12jKAd6WK2PUkfj2VxA7i4nCwjE6t71nI9Tl3eVDPFlU0nOixhftH7I17ZPGkV3QA.l7ZD.QQ--</UploadId>
<Initiator>
<ID>bb5c0f63b0b25f2d099c</ID>
<DisplayName>joe</DisplayName>
</Initiator>
<Owner>
<ID>bb5c0f63b0b25f2d099c</ID>
<DisplayName>joe</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<PartNumberMarker>2</PartNumberMarker>
<NextPartNumberMarker>3</NextPartNumberMarker>
<MaxParts>2</MaxParts>
<IsTruncated>false</IsTruncated>
<Part>
<PartNumber>3</PartNumber>
<LastModified>2013-01-30T13:46:50.000Z</LastModified>
<ETag>&quot;49dcd91231f801159e893fb5c6674985&quot;</ETag>
<Size>5</Size>
</Part>
</ListPartsResult>
`
var ListMultiResultDump = `
<?xml version="1.0"?>
<ListMultipartUploadsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>goamz-test-bucket-us-east-1-akiajk3wyewhctyqbf7a</Bucket>
<KeyMarker/>
<UploadIdMarker/>
<NextKeyMarker>multi1</NextKeyMarker>
<NextUploadIdMarker>iUVug89pPvSswrikD72p8uO62EzhNtpDxRmwC5WSiWDdK9SfzmDqe3xpP1kMWimyimSnz4uzFc3waVM5ufrKYQ--</NextUploadIdMarker>
<Delimiter>/</Delimiter>
<MaxUploads>1000</MaxUploads>
<IsTruncated>false</IsTruncated>
<Upload>
<Key>multi1</Key>
<UploadId>iUVug89pPvSswrikD</UploadId>
<Initiator>
<ID>bb5c0f63b0b25f2d0</ID>
<DisplayName>gustavoniemeyer</DisplayName>
</Initiator>
<Owner>
<ID>bb5c0f63b0b25f2d0</ID>
<DisplayName>gustavoniemeyer</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<Initiated>2013-01-30T18:15:47.000Z</Initiated>
</Upload>
<Upload>
<Key>multi2</Key>
<UploadId>DkirwsSvPp98guVUi</UploadId>
<Initiator>
<ID>bb5c0f63b0b25f2d0</ID>
<DisplayName>joe</DisplayName>
</Initiator>
<Owner>
<ID>bb5c0f63b0b25f2d0</ID>
<DisplayName>joe</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<Initiated>2013-01-30T18:15:47.000Z</Initiated>
</Upload>
<CommonPrefixes>
<Prefix>a/</Prefix>
</CommonPrefixes>
<CommonPrefixes>
<Prefix>b/</Prefix>
</CommonPrefixes>
</ListMultipartUploadsResult>
`
var NoSuchUploadErrorDump = `
<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchUpload</Code>
<Message>Not relevant</Message>
<BucketName>sample</BucketName>
<RequestId>3F1B667FAD71C3D8</RequestId>
<HostId>kjhwqk</HostId>
</Error>
`
var InternalErrorDump = `
<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>InternalError</Code>
<Message>Not relevant</Message>
<BucketName>sample</BucketName>
<RequestId>3F1B667FAD71C3D8</RequestId>
<HostId>kjhwqk</HostId>
</Error>
`
var GetKeyHeaderDump = map[string]string{
"x-amz-id-2": "ef8yU9AS1ed4OpIszj7UDNEHGran",
"x-amz-request-id": "318BC8BC143432E5",
"x-amz-version-id": "3HL4kqtJlcpXroDTDmjVBH40Nrjfkd",
"Date": "Wed, 28 Oct 2009 22:32:00 GMT",
"Last-Modified": "Sun, 1 Jan 2006 12:00:00 GMT",
"ETag": "fba9dede5f27731c9771645a39863328",
"Content-Length": "434234",
"Content-Type": "text/plain",
}
var GetListBucketsDump = `
<?xml version="1.0" encoding="UTF-8"?>
<ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Owner>
<ID>bb5c0f63b0b25f2d0</ID>
<DisplayName>joe</DisplayName>
</Owner>
<Buckets>
<Bucket>
<Name>bucket1</Name>
<CreationDate>2012-01-01T02:03:04.000Z</CreationDate>
</Bucket>
<Bucket>
<Name>bucket2</Name>
<CreationDate>2014-01-11T02:03:04.000Z</CreationDate>
</Bucket>
</Buckets>
</ListAllMyBucketsResult>
`
var MultiDelDump = `
<?xml version="1.0" encoding="UTF-8"?>
<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Deleted>
<Key>a.go</Key>
</Deleted>
<Deleted>
<Key>b.go</Key>
</Deleted>
</DeleteResult>
`

View File

@@ -1,893 +0,0 @@
//
// goamz - Go packages to interact with the Amazon Web Services.
//
// https://wiki.ubuntu.com/goamz
//
// Copyright (c) 2011 Canonical Ltd.
//
// Written by Gustavo Niemeyer <gustavo.niemeyer@canonical.com>
//
package s3
import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/xml"
"fmt"
"github.com/mitchellh/goamz/aws"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/httputil"
"net/url"
"strconv"
"strings"
"time"
)
const debug = false
// The S3 type encapsulates operations with an S3 region.
type S3 struct {
aws.Auth
aws.Region
HTTPClient func() *http.Client
private byte // Reserve the right of using private data.
}
// The Bucket type encapsulates operations with an S3 bucket.
type Bucket struct {
*S3
Name string
}
// The Owner type represents the owner of the object in an S3 bucket.
type Owner struct {
ID string
DisplayName string
}
var attempts = aws.AttemptStrategy{
Min: 5,
Total: 5 * time.Second,
Delay: 200 * time.Millisecond,
}
// New creates a new S3.
func New(auth aws.Auth, region aws.Region) *S3 {
return &S3{
Auth: auth,
Region: region,
HTTPClient: func() *http.Client {
return http.DefaultClient
},
private: 0}
}
// Bucket returns a Bucket with the given name.
func (s3 *S3) Bucket(name string) *Bucket {
if s3.Region.S3BucketEndpoint != "" || s3.Region.S3LowercaseBucket {
name = strings.ToLower(name)
}
return &Bucket{s3, name}
}
var createBucketConfiguration = `<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<LocationConstraint>%s</LocationConstraint>
</CreateBucketConfiguration>`
// locationConstraint returns an io.Reader specifying a LocationConstraint if
// required for the region.
//
// See http://goo.gl/bh9Kq for details.
func (s3 *S3) locationConstraint() io.Reader {
constraint := ""
if s3.Region.S3LocationConstraint {
constraint = fmt.Sprintf(createBucketConfiguration, s3.Region.Name)
}
return strings.NewReader(constraint)
}
type ACL string
const (
Private = ACL("private")
PublicRead = ACL("public-read")
PublicReadWrite = ACL("public-read-write")
AuthenticatedRead = ACL("authenticated-read")
BucketOwnerRead = ACL("bucket-owner-read")
BucketOwnerFull = ACL("bucket-owner-full-control")
)
// The ListBucketsResp type holds the results of a List buckets operation.
type ListBucketsResp struct {
Buckets []Bucket `xml:">Bucket"`
}
// ListBuckets lists all buckets
//
// See: http://goo.gl/NqlyMN
func (s3 *S3) ListBuckets() (result *ListBucketsResp, err error) {
req := &request{
path: "/",
}
result = &ListBucketsResp{}
for attempt := attempts.Start(); attempt.Next(); {
err = s3.query(req, result)
if !shouldRetry(err) {
break
}
}
if err != nil {
return nil, err
}
// set S3 instance on buckets
for i := range result.Buckets {
result.Buckets[i].S3 = s3
}
return result, nil
}
// PutBucket creates a new bucket.
//
// See http://goo.gl/ndjnR for details.
func (b *Bucket) PutBucket(perm ACL) error {
headers := map[string][]string{
"x-amz-acl": {string(perm)},
}
req := &request{
method: "PUT",
bucket: b.Name,
path: "/",
headers: headers,
payload: b.locationConstraint(),
}
return b.S3.query(req, nil)
}
// DelBucket removes an existing S3 bucket. All objects in the bucket must
// be removed before the bucket itself can be removed.
//
// See http://goo.gl/GoBrY for details.
func (b *Bucket) DelBucket() (err error) {
req := &request{
method: "DELETE",
bucket: b.Name,
path: "/",
}
for attempt := attempts.Start(); attempt.Next(); {
err = b.S3.query(req, nil)
if !shouldRetry(err) {
break
}
}
return err
}
// Get retrieves an object from an S3 bucket.
//
// See http://goo.gl/isCO7 for details.
func (b *Bucket) Get(path string) (data []byte, err error) {
body, err := b.GetReader(path)
if err != nil {
return nil, err
}
data, err = ioutil.ReadAll(body)
body.Close()
return data, err
}
// GetReader retrieves an object from an S3 bucket.
// It is the caller's responsibility to call Close on rc when
// finished reading.
func (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) {
resp, err := b.GetResponse(path)
if resp != nil {
return resp.Body, err
}
return nil, err
}
// GetResponse retrieves an object from an S3 bucket returning the http response
// It is the caller's responsibility to call Close on rc when
// finished reading.
func (b *Bucket) GetResponse(path string) (*http.Response, error) {
return b.getResponseParams(path, nil)
}
// GetTorrent retrieves an Torrent object from an S3 bucket an io.ReadCloser.
// It is the caller's responsibility to call Close on rc when finished reading.
func (b *Bucket) GetTorrentReader(path string) (io.ReadCloser, error) {
resp, err := b.getResponseParams(path, url.Values{"torrent": {""}})
if err != nil {
return nil, err
}
return resp.Body, nil
}
// GetTorrent retrieves an Torrent object from an S3, returning
// the torrent as a []byte.
func (b *Bucket) GetTorrent(path string) ([]byte, error) {
body, err := b.GetTorrentReader(path)
if err != nil {
return nil, err
}
defer body.Close()
return ioutil.ReadAll(body)
}
func (b *Bucket) getResponseParams(path string, params url.Values) (*http.Response, error) {
req := &request{
bucket: b.Name,
path: path,
params: params,
}
err := b.S3.prepare(req)
if err != nil {
return nil, err
}
for attempt := attempts.Start(); attempt.Next(); {
resp, err := b.S3.run(req, nil)
if shouldRetry(err) && attempt.HasNext() {
continue
}
if err != nil {
return nil, err
}
return resp, nil
}
panic("unreachable")
}
func (b *Bucket) Head(path string) (*http.Response, error) {
req := &request{
method: "HEAD",
bucket: b.Name,
path: path,
}
err := b.S3.prepare(req)
if err != nil {
return nil, err
}
for attempt := attempts.Start(); attempt.Next(); {
resp, err := b.S3.run(req, nil)
if shouldRetry(err) && attempt.HasNext() {
continue
}
if err != nil {
return nil, err
}
return resp, nil
}
panic("unreachable")
}
// Put inserts an object into the S3 bucket.
//
// See http://goo.gl/FEBPD for details.
func (b *Bucket) Put(path string, data []byte, contType string, perm ACL) error {
body := bytes.NewBuffer(data)
return b.PutReader(path, body, int64(len(data)), contType, perm)
}
/*
PutHeader - like Put, inserts an object into the S3 bucket.
Instead of Content-Type string, pass in custom headers to override defaults.
*/
func (b *Bucket) PutHeader(path string, data []byte, customHeaders map[string][]string, perm ACL) error {
body := bytes.NewBuffer(data)
return b.PutReaderHeader(path, body, int64(len(data)), customHeaders, perm)
}
// PutReader inserts an object into the S3 bucket by consuming data
// from r until EOF.
func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL) error {
headers := map[string][]string{
"Content-Length": {strconv.FormatInt(length, 10)},
"Content-Type": {contType},
"x-amz-acl": {string(perm)},
}
req := &request{
method: "PUT",
bucket: b.Name,
path: path,
headers: headers,
payload: r,
}
return b.S3.query(req, nil)
}
/*
PutReaderHeader - like PutReader, inserts an object into S3 from a reader.
Instead of Content-Type string, pass in custom headers to override defaults.
*/
func (b *Bucket) PutReaderHeader(path string, r io.Reader, length int64, customHeaders map[string][]string, perm ACL) error {
// Default headers
headers := map[string][]string{
"Content-Length": {strconv.FormatInt(length, 10)},
"Content-Type": {"application/text"},
"x-amz-acl": {string(perm)},
}
// Override with custom headers
for key, value := range customHeaders {
headers[key] = value
}
req := &request{
method: "PUT",
bucket: b.Name,
path: path,
headers: headers,
payload: r,
}
return b.S3.query(req, nil)
}
/*
Copy - copy objects inside bucket
*/
func (b *Bucket) Copy(oldPath, newPath string, perm ACL) error {
if !strings.HasPrefix(oldPath, "/") {
oldPath = "/" + oldPath
}
req := &request{
method: "PUT",
bucket: b.Name,
path: newPath,
headers: map[string][]string{
"x-amz-copy-source": {amazonEscape("/" + b.Name + oldPath)},
"x-amz-acl": {string(perm)},
},
}
err := b.S3.prepare(req)
if err != nil {
return err
}
for attempt := attempts.Start(); attempt.Next(); {
_, err = b.S3.run(req, nil)
if shouldRetry(err) && attempt.HasNext() {
continue
}
if err != nil {
return err
}
return nil
}
panic("unreachable")
}
// Del removes an object from the S3 bucket.
//
// See http://goo.gl/APeTt for details.
func (b *Bucket) Del(path string) error {
req := &request{
method: "DELETE",
bucket: b.Name,
path: path,
}
return b.S3.query(req, nil)
}
type Object struct {
Key string
}
type MultiObjectDeleteBody struct {
XMLName xml.Name `xml:"Delete"`
Quiet bool
Object []Object
}
func base64md5(data []byte) string {
h := md5.New()
h.Write(data)
return base64.StdEncoding.EncodeToString(h.Sum(nil))
}
// MultiDel removes multiple objects from the S3 bucket efficiently.
// A maximum of 1000 keys at once may be specified.
//
// See http://goo.gl/WvA5sj for details.
func (b *Bucket) MultiDel(paths []string) error {
// create XML payload
v := MultiObjectDeleteBody{}
v.Object = make([]Object, len(paths))
for i, path := range paths {
v.Object[i] = Object{path}
}
data, _ := xml.Marshal(v)
// Content-MD5 is required
md5hash := base64md5(data)
req := &request{
method: "POST",
bucket: b.Name,
path: "/",
params: url.Values{"delete": {""}},
headers: http.Header{"Content-MD5": {md5hash}},
payload: bytes.NewReader(data),
}
return b.S3.query(req, nil)
}
// The ListResp type holds the results of a List bucket operation.
type ListResp struct {
Name string
Prefix string
Delimiter string
Marker string
NextMarker string
MaxKeys int
// IsTruncated is true if the results have been truncated because
// there are more keys and prefixes than can fit in MaxKeys.
// N.B. this is the opposite sense to that documented (incorrectly) in
// http://goo.gl/YjQTc
IsTruncated bool
Contents []Key
CommonPrefixes []string `xml:">Prefix"`
}
// The Key type represents an item stored in an S3 bucket.
type Key struct {
Key string
LastModified string
Size int64
// ETag gives the hex-encoded MD5 sum of the contents,
// surrounded with double-quotes.
ETag string
StorageClass string
Owner Owner
}
// List returns information about objects in an S3 bucket.
//
// The prefix parameter limits the response to keys that begin with the
// specified prefix.
//
// The delim parameter causes the response to group all of the keys that
// share a common prefix up to the next delimiter in a single entry within
// the CommonPrefixes field. You can use delimiters to separate a bucket
// into different groupings of keys, similar to how folders would work.
//
// The marker parameter specifies the key to start with when listing objects
// in a bucket. Amazon S3 lists objects in alphabetical order and
// will return keys alphabetically greater than the marker.
//
// The max parameter specifies how many keys + common prefixes to return in
// the response. The default is 1000.
//
// For example, given these keys in a bucket:
//
// index.html
// index2.html
// photos/2006/January/sample.jpg
// photos/2006/February/sample2.jpg
// photos/2006/February/sample3.jpg
// photos/2006/February/sample4.jpg
//
// Listing this bucket with delimiter set to "/" would yield the
// following result:
//
// &ListResp{
// Name: "sample-bucket",
// MaxKeys: 1000,
// Delimiter: "/",
// Contents: []Key{
// {Key: "index.html", "index2.html"},
// },
// CommonPrefixes: []string{
// "photos/",
// },
// }
//
// Listing the same bucket with delimiter set to "/" and prefix set to
// "photos/2006/" would yield the following result:
//
// &ListResp{
// Name: "sample-bucket",
// MaxKeys: 1000,
// Delimiter: "/",
// Prefix: "photos/2006/",
// CommonPrefixes: []string{
// "photos/2006/February/",
// "photos/2006/January/",
// },
// }
//
// See http://goo.gl/YjQTc for details.
func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, err error) {
params := map[string][]string{
"prefix": {prefix},
"delimiter": {delim},
"marker": {marker},
}
if max != 0 {
params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)}
}
req := &request{
bucket: b.Name,
params: params,
}
result = &ListResp{}
for attempt := attempts.Start(); attempt.Next(); {
err = b.S3.query(req, result)
if !shouldRetry(err) {
break
}
}
if err != nil {
return nil, err
}
return result, nil
}
// Returns a mapping of all key names in this bucket to Key objects
func (b *Bucket) GetBucketContents() (*map[string]Key, error) {
bucket_contents := map[string]Key{}
prefix := ""
path_separator := ""
marker := ""
for {
contents, err := b.List(prefix, path_separator, marker, 1000)
if err != nil {
return &bucket_contents, err
}
last_key := ""
for _, key := range contents.Contents {
bucket_contents[key.Key] = key
last_key = key.Key
}
if contents.IsTruncated {
marker = contents.NextMarker
if marker == "" {
// From the s3 docs: If response does not include the
// NextMarker and it is truncated, you can use the value of the
// last Key in the response as the marker in the subsequent
// request to get the next set of object keys.
marker = last_key
}
} else {
break
}
}
return &bucket_contents, nil
}
// Get metadata from the key without returning the key content
func (b *Bucket) GetKey(path string) (*Key, error) {
req := &request{
bucket: b.Name,
path: path,
method: "HEAD",
}
err := b.S3.prepare(req)
if err != nil {
return nil, err
}
key := &Key{}
for attempt := attempts.Start(); attempt.Next(); {
resp, err := b.S3.run(req, nil)
if shouldRetry(err) && attempt.HasNext() {
continue
}
if err != nil {
return nil, err
}
key.Key = path
key.LastModified = resp.Header.Get("Last-Modified")
key.ETag = resp.Header.Get("ETag")
contentLength := resp.Header.Get("Content-Length")
size, err := strconv.ParseInt(contentLength, 10, 64)
if err != nil {
return key, fmt.Errorf("bad s3 content-length %v: %v",
contentLength, err)
}
key.Size = size
return key, nil
}
panic("unreachable")
}
// URL returns a non-signed URL that allows retriving the
// object at path. It only works if the object is publicly
// readable (see SignedURL).
func (b *Bucket) URL(path string) string {
req := &request{
bucket: b.Name,
path: path,
}
err := b.S3.prepare(req)
if err != nil {
panic(err)
}
u, err := req.url(true)
if err != nil {
panic(err)
}
u.RawQuery = ""
return u.String()
}
// SignedURL returns a signed URL that allows anyone holding the URL
// to retrieve the object at path. The signature is valid until expires.
func (b *Bucket) SignedURL(path string, expires time.Time) string {
req := &request{
bucket: b.Name,
path: path,
params: url.Values{"Expires": {strconv.FormatInt(expires.Unix(), 10)}},
}
err := b.S3.prepare(req)
if err != nil {
panic(err)
}
u, err := req.url(true)
if err != nil {
panic(err)
}
return u.String()
}
type request struct {
method string
bucket string
path string
signpath string
params url.Values
headers http.Header
baseurl string
payload io.Reader
prepared bool
}
// amazonShouldEscape returns true if byte should be escaped
func amazonShouldEscape(c byte) bool {
return !((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') ||
(c >= '0' && c <= '9') || c == '_' || c == '-' || c == '~' || c == '.' || c == '/' || c == ':')
}
// amazonEscape does uri escaping exactly as Amazon does
func amazonEscape(s string) string {
hexCount := 0
for i := 0; i < len(s); i++ {
if amazonShouldEscape(s[i]) {
hexCount++
}
}
if hexCount == 0 {
return s
}
t := make([]byte, len(s)+2*hexCount)
j := 0
for i := 0; i < len(s); i++ {
if c := s[i]; amazonShouldEscape(c) {
t[j] = '%'
t[j+1] = "0123456789ABCDEF"[c>>4]
t[j+2] = "0123456789ABCDEF"[c&15]
j += 3
} else {
t[j] = s[i]
j++
}
}
return string(t)
}
// url returns url to resource, either full (with host/scheme) or
// partial for HTTP request
func (req *request) url(full bool) (*url.URL, error) {
u, err := url.Parse(req.baseurl)
if err != nil {
return nil, fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err)
}
u.Opaque = amazonEscape(req.path)
if full {
u.Opaque = "//" + u.Host + u.Opaque
}
u.RawQuery = req.params.Encode()
return u, nil
}
// query prepares and runs the req request.
// If resp is not nil, the XML data contained in the response
// body will be unmarshalled on it.
func (s3 *S3) query(req *request, resp interface{}) error {
err := s3.prepare(req)
if err == nil {
var httpResponse *http.Response
httpResponse, err = s3.run(req, resp)
if resp == nil && httpResponse != nil {
httpResponse.Body.Close()
}
}
return err
}
// prepare sets up req to be delivered to S3.
func (s3 *S3) prepare(req *request) error {
if !req.prepared {
req.prepared = true
if req.method == "" {
req.method = "GET"
}
// Copy so they can be mutated without affecting on retries.
params := make(url.Values)
headers := make(http.Header)
for k, v := range req.params {
params[k] = v
}
for k, v := range req.headers {
headers[k] = v
}
req.params = params
req.headers = headers
if !strings.HasPrefix(req.path, "/") {
req.path = "/" + req.path
}
req.signpath = req.path
if req.bucket != "" {
req.baseurl = s3.Region.S3BucketEndpoint
if req.baseurl == "" {
// Use the path method to address the bucket.
req.baseurl = s3.Region.S3Endpoint
req.path = "/" + req.bucket + req.path
} else {
// Just in case, prevent injection.
if strings.IndexAny(req.bucket, "/:@") >= 0 {
return fmt.Errorf("bad S3 bucket: %q", req.bucket)
}
req.baseurl = strings.Replace(req.baseurl, "${bucket}", req.bucket, -1)
}
req.signpath = "/" + req.bucket + req.signpath
} else {
req.baseurl = s3.Region.S3Endpoint
}
}
// Always sign again as it's not clear how far the
// server has handled a previous attempt.
u, err := url.Parse(req.baseurl)
if err != nil {
return fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err)
}
req.headers["Host"] = []string{u.Host}
req.headers["Date"] = []string{time.Now().In(time.UTC).Format(time.RFC1123)}
sign(s3.Auth, req.method, amazonEscape(req.signpath), req.params, req.headers)
return nil
}
// run sends req and returns the http response from the server.
// If resp is not nil, the XML data contained in the response
// body will be unmarshalled on it.
func (s3 *S3) run(req *request, resp interface{}) (*http.Response, error) {
if debug {
log.Printf("Running S3 request: %#v", req)
}
u, err := req.url(false)
if err != nil {
return nil, err
}
hreq := http.Request{
URL: u,
Method: req.method,
ProtoMajor: 1,
ProtoMinor: 1,
Close: true,
Header: req.headers,
}
if v, ok := req.headers["Content-Length"]; ok {
hreq.ContentLength, _ = strconv.ParseInt(v[0], 10, 64)
delete(req.headers, "Content-Length")
}
if req.payload != nil {
hreq.Body = ioutil.NopCloser(req.payload)
}
hresp, err := s3.HTTPClient().Do(&hreq)
if err != nil {
return nil, err
}
if debug {
dump, _ := httputil.DumpResponse(hresp, true)
log.Printf("} -> %s\n", dump)
}
if hresp.StatusCode != 200 && hresp.StatusCode != 204 {
defer hresp.Body.Close()
return nil, buildError(hresp)
}
if resp != nil {
err = xml.NewDecoder(hresp.Body).Decode(resp)
hresp.Body.Close()
}
return hresp, err
}
// Error represents an error in an operation with S3.
type Error struct {
StatusCode int // HTTP status code (200, 403, ...)
Code string // EC2 error code ("UnsupportedOperation", ...)
Message string // The human-oriented error message
BucketName string
RequestId string
HostId string
}
func (e *Error) Error() string {
return e.Message
}
func buildError(r *http.Response) error {
if debug {
log.Printf("got error (status code %v)", r.StatusCode)
data, err := ioutil.ReadAll(r.Body)
if err != nil {
log.Printf("\tread error: %v", err)
} else {
log.Printf("\tdata:\n%s\n\n", data)
}
r.Body = ioutil.NopCloser(bytes.NewBuffer(data))
}
err := Error{}
// TODO return error if Unmarshal fails?
xml.NewDecoder(r.Body).Decode(&err)
r.Body.Close()
err.StatusCode = r.StatusCode
if err.Message == "" {
err.Message = r.Status
}
if debug {
log.Printf("err: %#v\n", err)
}
return &err
}
func shouldRetry(err error) bool {
if err == nil {
return false
}
switch err {
case io.ErrUnexpectedEOF, io.EOF:
return true
}
switch e := err.(type) {
case *net.DNSError:
return true
case *net.OpError:
switch e.Op {
case "read", "write":
return true
}
case *Error:
switch e.Code {
case "InternalError", "NoSuchUpload", "NoSuchBucket":
return true
}
}
return false
}
func hasCode(err error, code string) bool {
s3err, ok := err.(*Error)
return ok && s3err.Code == code
}

View File

@@ -1,435 +0,0 @@
package s3_test
import (
"bytes"
"io/ioutil"
"net/http"
"testing"
"time"
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/s3"
"github.com/mitchellh/goamz/testutil"
. "github.com/motain/gocheck"
)
func Test(t *testing.T) {
TestingT(t)
}
type S struct {
s3 *s3.S3
}
var _ = Suite(&S{})
var testServer = testutil.NewHTTPServer()
func (s *S) SetUpSuite(c *C) {
testServer.Start()
auth := aws.Auth{"abc", "123", ""}
s.s3 = s3.New(auth, aws.Region{Name: "faux-region-1", S3Endpoint: testServer.URL})
}
func (s *S) TearDownSuite(c *C) {
s3.SetAttemptStrategy(nil)
}
func (s *S) SetUpTest(c *C) {
attempts := aws.AttemptStrategy{
Total: 300 * time.Millisecond,
Delay: 100 * time.Millisecond,
}
s3.SetAttemptStrategy(&attempts)
}
func (s *S) TearDownTest(c *C) {
testServer.Flush()
}
func (s *S) DisableRetries() {
s3.SetAttemptStrategy(&aws.AttemptStrategy{})
}
// PutBucket docs: http://goo.gl/kBTCu
func (s *S) TestPutBucket(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
err := b.PutBucket(s3.Private)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/bucket/")
c.Assert(req.Header["Date"], Not(Equals), "")
}
// DeleteBucket docs: http://goo.gl/GoBrY
func (s *S) TestDelBucket(c *C) {
testServer.Response(204, nil, "")
b := s.s3.Bucket("bucket")
err := b.DelBucket()
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "DELETE")
c.Assert(req.URL.Path, Equals, "/bucket/")
c.Assert(req.Header["Date"], Not(Equals), "")
}
// ListBuckets: http://goo.gl/NqlyMN
func (s *S) TestListBuckets(c *C) {
testServer.Response(200, nil, GetListBucketsDump)
buckets, err := s.s3.ListBuckets()
c.Assert(err, IsNil)
c.Assert(len(buckets.Buckets), Equals, 2)
c.Assert(buckets.Buckets[0].Name, Equals, "bucket1")
c.Assert(buckets.Buckets[1].Name, Equals, "bucket2")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/")
}
// GetObject docs: http://goo.gl/isCO7
func (s *S) TestGet(c *C) {
testServer.Response(200, nil, "content")
b := s.s3.Bucket("bucket")
data, err := b.Get("name")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(Equals), "")
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "content")
}
func (s *S) TestHead(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
resp, err := b.Head("name")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "HEAD")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(Equals), "")
c.Assert(err, IsNil)
body, err := ioutil.ReadAll(resp.Body)
c.Assert(err, IsNil)
c.Assert(len(body), Equals, 0)
}
func (s *S) TestURL(c *C) {
testServer.Response(200, nil, "content")
b := s.s3.Bucket("bucket")
url := b.URL("name")
r, err := http.Get(url)
c.Assert(err, IsNil)
data, err := ioutil.ReadAll(r.Body)
r.Body.Close()
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "content")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/bucket/name")
}
func (s *S) TestGetReader(c *C) {
testServer.Response(200, nil, "content")
b := s.s3.Bucket("bucket")
rc, err := b.GetReader("name")
c.Assert(err, IsNil)
data, err := ioutil.ReadAll(rc)
rc.Close()
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "content")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(Equals), "")
}
func (s *S) TestGetNotFound(c *C) {
for i := 0; i < 10; i++ {
testServer.Response(404, nil, GetObjectErrorDump)
}
b := s.s3.Bucket("non-existent-bucket")
data, err := b.Get("non-existent")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/non-existent-bucket/non-existent")
c.Assert(req.Header["Date"], Not(Equals), "")
s3err, _ := err.(*s3.Error)
c.Assert(s3err, NotNil)
c.Assert(s3err.StatusCode, Equals, 404)
c.Assert(s3err.BucketName, Equals, "non-existent-bucket")
c.Assert(s3err.RequestId, Equals, "3F1B667FAD71C3D8")
c.Assert(s3err.HostId, Equals, "L4ee/zrm1irFXY5F45fKXIRdOf9ktsKY/8TDVawuMK2jWRb1RF84i1uBzkdNqS5D")
c.Assert(s3err.Code, Equals, "NoSuchBucket")
c.Assert(s3err.Message, Equals, "The specified bucket does not exist")
c.Assert(s3err.Error(), Equals, "The specified bucket does not exist")
c.Assert(data, IsNil)
}
// PutObject docs: http://goo.gl/FEBPD
func (s *S) TestPutObject(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
err := b.Put("name", []byte("content"), "content-type", s3.Private)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(DeepEquals), []string{""})
c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"})
//c.Assert(req.Header["Content-MD5"], DeepEquals, "...")
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
}
func (s *S) TestPutObjectHeader(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
err := b.PutHeader(
"name",
[]byte("content"),
map[string][]string{"Content-Type": {"content-type"}},
s3.Private,
)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(DeepEquals), []string{""})
c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"})
//c.Assert(req.Header["Content-MD5"], DeepEquals, "...")
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
}
func (s *S) TestPutReader(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
buf := bytes.NewBufferString("content")
err := b.PutReader("name", buf, int64(buf.Len()), "content-type", s3.Private)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(DeepEquals), []string{""})
c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"})
//c.Assert(req.Header["Content-MD5"], Equals, "...")
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
}
func (s *S) TestPutReaderHeader(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
buf := bytes.NewBufferString("content")
err := b.PutReaderHeader(
"name",
buf,
int64(buf.Len()),
map[string][]string{"Content-Type": {"content-type"}},
s3.Private,
)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(DeepEquals), []string{""})
c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"})
//c.Assert(req.Header["Content-MD5"], Equals, "...")
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
}
func (s *S) TestCopy(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
err := b.Copy(
"old/file",
"new/file",
s3.Private,
)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/bucket/new/file")
c.Assert(req.Header["X-Amz-Copy-Source"], DeepEquals, []string{"/bucket/old/file"})
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
}
func (s *S) TestPlusInURL(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
err := b.Copy(
"dir/old+f?le",
"dir/new+f?le",
s3.Private,
)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.RequestURI, Equals, "/bucket/dir/new%2Bf%3Fle")
c.Assert(req.Header["X-Amz-Copy-Source"], DeepEquals, []string{"/bucket/dir/old%2Bf%3Fle"})
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
}
// DelObject docs: http://goo.gl/APeTt
func (s *S) TestDelObject(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
err := b.Del("name")
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "DELETE")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(Equals), "")
}
// Delete Multiple Objects docs: http://goo.gl/WvA5sj
func (s *S) TestMultiDelObject(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
err := b.MultiDel([]string{"a", "b"})
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "POST")
c.Assert(req.URL.Path, Equals, "/bucket/")
c.Assert(req.RequestURI, Equals, "/bucket/?delete=")
c.Assert(req.Header["Content-Md5"], DeepEquals, []string{"nos/vZNvjGs17xIyjEFlwQ=="})
data, err := ioutil.ReadAll(req.Body)
req.Body.Close()
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "<Delete><Quiet>false</Quiet><Object><Key>a</Key></Object><Object><Key>b</Key></Object></Delete>")
}
// Bucket List Objects docs: http://goo.gl/YjQTc
func (s *S) TestList(c *C) {
testServer.Response(200, nil, GetListResultDump1)
b := s.s3.Bucket("quotes")
data, err := b.List("N", "", "", 0)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/quotes/")
c.Assert(req.Header["Date"], Not(Equals), "")
c.Assert(req.Form["prefix"], DeepEquals, []string{"N"})
c.Assert(req.Form["delimiter"], DeepEquals, []string{""})
c.Assert(req.Form["marker"], DeepEquals, []string{""})
c.Assert(req.Form["max-keys"], DeepEquals, []string(nil))
c.Assert(data.Name, Equals, "quotes")
c.Assert(data.Prefix, Equals, "N")
c.Assert(data.IsTruncated, Equals, false)
c.Assert(len(data.Contents), Equals, 2)
c.Assert(data.Contents[0].Key, Equals, "Nelson")
c.Assert(data.Contents[0].LastModified, Equals, "2006-01-01T12:00:00.000Z")
c.Assert(data.Contents[0].ETag, Equals, `"828ef3fdfa96f00ad9f27c383fc9ac7f"`)
c.Assert(data.Contents[0].Size, Equals, int64(5))
c.Assert(data.Contents[0].StorageClass, Equals, "STANDARD")
c.Assert(data.Contents[0].Owner.ID, Equals, "bcaf161ca5fb16fd081034f")
c.Assert(data.Contents[0].Owner.DisplayName, Equals, "webfile")
c.Assert(data.Contents[1].Key, Equals, "Neo")
c.Assert(data.Contents[1].LastModified, Equals, "2006-01-01T12:00:00.000Z")
c.Assert(data.Contents[1].ETag, Equals, `"828ef3fdfa96f00ad9f27c383fc9ac7f"`)
c.Assert(data.Contents[1].Size, Equals, int64(4))
c.Assert(data.Contents[1].StorageClass, Equals, "STANDARD")
c.Assert(data.Contents[1].Owner.ID, Equals, "bcaf1ffd86a5fb16fd081034f")
c.Assert(data.Contents[1].Owner.DisplayName, Equals, "webfile")
}
func (s *S) TestListWithDelimiter(c *C) {
testServer.Response(200, nil, GetListResultDump2)
b := s.s3.Bucket("quotes")
data, err := b.List("photos/2006/", "/", "some-marker", 1000)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/quotes/")
c.Assert(req.Header["Date"], Not(Equals), "")
c.Assert(req.Form["prefix"], DeepEquals, []string{"photos/2006/"})
c.Assert(req.Form["delimiter"], DeepEquals, []string{"/"})
c.Assert(req.Form["marker"], DeepEquals, []string{"some-marker"})
c.Assert(req.Form["max-keys"], DeepEquals, []string{"1000"})
c.Assert(data.Name, Equals, "example-bucket")
c.Assert(data.Prefix, Equals, "photos/2006/")
c.Assert(data.Delimiter, Equals, "/")
c.Assert(data.Marker, Equals, "some-marker")
c.Assert(data.IsTruncated, Equals, false)
c.Assert(len(data.Contents), Equals, 0)
c.Assert(data.CommonPrefixes, DeepEquals, []string{"photos/2006/feb/", "photos/2006/jan/"})
}
func (s *S) TestGetKey(c *C) {
testServer.Response(200, GetKeyHeaderDump, "")
b := s.s3.Bucket("bucket")
key, err := b.GetKey("name")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "HEAD")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(Equals), "")
c.Assert(err, IsNil)
c.Assert(key.Key, Equals, "name")
c.Assert(key.LastModified, Equals, GetKeyHeaderDump["Last-Modified"])
c.Assert(key.Size, Equals, int64(434234))
c.Assert(key.ETag, Equals, GetKeyHeaderDump["ETag"])
}
func (s *S) TestUnescapedColon(c *C) {
b := s.s3.Bucket("bucket")
u := b.URL("foo:bar")
c.Assert(u, Equals, "http://localhost:4444/bucket/foo:bar")
}

View File

@@ -1,616 +0,0 @@
package s3_test
import (
"bytes"
"crypto/md5"
"fmt"
"io/ioutil"
"net/http"
"strings"
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/s3"
"github.com/mitchellh/goamz/testutil"
. "github.com/motain/gocheck"
"net"
"sort"
"time"
)
// AmazonServer represents an Amazon S3 server.
type AmazonServer struct {
auth aws.Auth
}
func (s *AmazonServer) SetUp(c *C) {
auth, err := aws.EnvAuth()
if err != nil {
c.Fatal(err.Error())
}
s.auth = auth
}
var _ = Suite(&AmazonClientSuite{Region: aws.USEast})
var _ = Suite(&AmazonClientSuite{Region: aws.EUWest})
var _ = Suite(&AmazonClientSuite{Region: aws.EUCentral})
var _ = Suite(&AmazonDomainClientSuite{Region: aws.USEast})
// AmazonClientSuite tests the client against a live S3 server.
type AmazonClientSuite struct {
aws.Region
srv AmazonServer
ClientTests
}
func (s *AmazonClientSuite) SetUpSuite(c *C) {
if !testutil.Amazon {
c.Skip("live tests against AWS disabled (no -amazon)")
}
s.srv.SetUp(c)
s.s3 = s3.New(s.srv.auth, s.Region)
// In case tests were interrupted in the middle before.
s.ClientTests.Cleanup()
}
func (s *AmazonClientSuite) TearDownTest(c *C) {
s.ClientTests.Cleanup()
}
// AmazonDomainClientSuite tests the client against a live S3
// server using bucket names in the endpoint domain name rather
// than the request path.
type AmazonDomainClientSuite struct {
aws.Region
srv AmazonServer
ClientTests
}
func (s *AmazonDomainClientSuite) SetUpSuite(c *C) {
if !testutil.Amazon {
c.Skip("live tests against AWS disabled (no -amazon)")
}
s.srv.SetUp(c)
region := s.Region
region.S3BucketEndpoint = "https://${bucket}.s3.amazonaws.com"
s.s3 = s3.New(s.srv.auth, region)
s.ClientTests.Cleanup()
}
func (s *AmazonDomainClientSuite) TearDownTest(c *C) {
s.ClientTests.Cleanup()
}
// ClientTests defines integration tests designed to test the client.
// It is not used as a test suite in itself, but embedded within
// another type.
type ClientTests struct {
s3 *s3.S3
authIsBroken bool
}
func (s *ClientTests) Cleanup() {
killBucket(testBucket(s.s3))
}
func testBucket(s *s3.S3) *s3.Bucket {
// Watch out! If this function is corrupted and made to match with something
// people own, killBucket will happily remove *everything* inside the bucket.
key := s.Auth.AccessKey
if len(key) >= 8 {
key = s.Auth.AccessKey[:8]
}
return s.Bucket(fmt.Sprintf("goamz-%s-%s", s.Region.Name, key))
}
var attempts = aws.AttemptStrategy{
Min: 5,
Total: 20 * time.Second,
Delay: 100 * time.Millisecond,
}
func killBucket(b *s3.Bucket) {
var err error
for attempt := attempts.Start(); attempt.Next(); {
err = b.DelBucket()
if err == nil {
return
}
if _, ok := err.(*net.DNSError); ok {
return
}
e, ok := err.(*s3.Error)
if ok && e.Code == "NoSuchBucket" {
return
}
if ok && e.Code == "BucketNotEmpty" {
// Errors are ignored here. Just retry.
resp, err := b.List("", "", "", 1000)
if err == nil {
for _, key := range resp.Contents {
_ = b.Del(key.Key)
}
}
multis, _, _ := b.ListMulti("", "")
for _, m := range multis {
_ = m.Abort()
}
}
}
message := "cannot delete test bucket"
if err != nil {
message += ": " + err.Error()
}
panic(message)
}
func get(url string) ([]byte, error) {
for attempt := attempts.Start(); attempt.Next(); {
resp, err := http.Get(url)
if err != nil {
if attempt.HasNext() {
continue
}
return nil, err
}
data, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
if attempt.HasNext() {
continue
}
return nil, err
}
return data, err
}
panic("unreachable")
}
func (s *ClientTests) TestBasicFunctionality(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.PublicRead)
c.Assert(err, IsNil)
err = b.Put("name", []byte("yo!"), "text/plain", s3.PublicRead)
c.Assert(err, IsNil)
defer b.Del("name")
data, err := b.Get("name")
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "yo!")
data, err = get(b.URL("name"))
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "yo!")
buf := bytes.NewBufferString("hey!")
err = b.PutReader("name2", buf, int64(buf.Len()), "text/plain", s3.Private)
c.Assert(err, IsNil)
defer b.Del("name2")
rc, err := b.GetReader("name2")
c.Assert(err, IsNil)
data, err = ioutil.ReadAll(rc)
c.Check(err, IsNil)
c.Check(string(data), Equals, "hey!")
rc.Close()
data, err = get(b.SignedURL("name2", time.Now().Add(time.Hour)))
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "hey!")
if !s.authIsBroken {
data, err = get(b.SignedURL("name2", time.Now().Add(-time.Hour)))
c.Assert(err, IsNil)
c.Assert(string(data), Matches, "(?s).*AccessDenied.*")
}
err = b.DelBucket()
c.Assert(err, NotNil)
s3err, ok := err.(*s3.Error)
c.Assert(ok, Equals, true)
c.Assert(s3err.Code, Equals, "BucketNotEmpty")
c.Assert(s3err.BucketName, Equals, b.Name)
c.Assert(s3err.Message, Equals, "The bucket you tried to delete is not empty")
err = b.Del("name")
c.Assert(err, IsNil)
err = b.Del("name2")
c.Assert(err, IsNil)
err = b.DelBucket()
c.Assert(err, IsNil)
}
func (s *ClientTests) TestCopy(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.PublicRead)
err = b.Put("name+1", []byte("yo!"), "text/plain", s3.PublicRead)
c.Assert(err, IsNil)
defer b.Del("name+1")
err = b.Copy("name+1", "name+2", s3.PublicRead)
c.Assert(err, IsNil)
defer b.Del("name+2")
data, err := b.Get("name+2")
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "yo!")
err = b.Del("name+1")
c.Assert(err, IsNil)
err = b.Del("name+2")
c.Assert(err, IsNil)
err = b.DelBucket()
c.Assert(err, IsNil)
}
func (s *ClientTests) TestGetNotFound(c *C) {
b := s.s3.Bucket("goamz-" + s.s3.Auth.AccessKey)
data, err := b.Get("non-existent")
s3err, _ := err.(*s3.Error)
c.Assert(s3err, NotNil)
c.Assert(s3err.StatusCode, Equals, 404)
c.Assert(s3err.Code, Equals, "NoSuchBucket")
c.Assert(s3err.Message, Equals, "The specified bucket does not exist")
c.Assert(data, IsNil)
}
// Communicate with all endpoints to see if they are alive.
func (s *ClientTests) TestRegions(c *C) {
errs := make(chan error, len(aws.Regions))
for _, region := range aws.Regions {
go func(r aws.Region) {
s := s3.New(s.s3.Auth, r)
b := s.Bucket("goamz-" + s.Auth.AccessKey)
_, err := b.Get("non-existent")
errs <- err
}(region)
}
for _ = range aws.Regions {
err := <-errs
if err != nil {
s3_err, ok := err.(*s3.Error)
if ok {
c.Check(s3_err.Code, Matches, "NoSuchBucket")
} else if _, ok = err.(*net.DNSError); ok {
// Okay as well.
} else {
c.Errorf("Non-S3 error: %s", err)
}
} else {
c.Errorf("Test should have errored but it seems to have succeeded")
}
}
}
var objectNames = []string{
"index.html",
"index2.html",
"photos/2006/February/sample2.jpg",
"photos/2006/February/sample3.jpg",
"photos/2006/February/sample4.jpg",
"photos/2006/January/sample.jpg",
"test/bar",
"test/foo",
}
func keys(names ...string) []s3.Key {
ks := make([]s3.Key, len(names))
for i, name := range names {
ks[i].Key = name
}
return ks
}
// As the ListResp specifies all the parameters to the
// request too, we use it to specify request parameters
// and expected results. The Contents field is
// used only for the key names inside it.
var listTests = []s3.ListResp{
// normal list.
{
Contents: keys(objectNames...),
}, {
Marker: objectNames[0],
Contents: keys(objectNames[1:]...),
}, {
Marker: objectNames[0] + "a",
Contents: keys(objectNames[1:]...),
}, {
Marker: "z",
},
// limited results.
{
MaxKeys: 2,
Contents: keys(objectNames[0:2]...),
IsTruncated: true,
}, {
MaxKeys: 2,
Marker: objectNames[0],
Contents: keys(objectNames[1:3]...),
IsTruncated: true,
}, {
MaxKeys: 2,
Marker: objectNames[len(objectNames)-2],
Contents: keys(objectNames[len(objectNames)-1:]...),
},
// with delimiter
{
Delimiter: "/",
CommonPrefixes: []string{"photos/", "test/"},
Contents: keys("index.html", "index2.html"),
}, {
Delimiter: "/",
Prefix: "photos/2006/",
CommonPrefixes: []string{"photos/2006/February/", "photos/2006/January/"},
}, {
Delimiter: "/",
Prefix: "t",
CommonPrefixes: []string{"test/"},
}, {
Delimiter: "/",
MaxKeys: 1,
Contents: keys("index.html"),
IsTruncated: true,
}, {
Delimiter: "/",
MaxKeys: 1,
Marker: "index2.html",
CommonPrefixes: []string{"photos/"},
IsTruncated: true,
}, {
Delimiter: "/",
MaxKeys: 1,
Marker: "photos/",
CommonPrefixes: []string{"test/"},
IsTruncated: false,
}, {
Delimiter: "Feb",
CommonPrefixes: []string{"photos/2006/Feb"},
Contents: keys("index.html", "index2.html", "photos/2006/January/sample.jpg", "test/bar", "test/foo"),
},
}
func (s *ClientTests) TestDoublePutBucket(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.PublicRead)
c.Assert(err, IsNil)
err = b.PutBucket(s3.PublicRead)
if err != nil {
c.Assert(err, FitsTypeOf, new(s3.Error))
c.Assert(err.(*s3.Error).Code, Equals, "BucketAlreadyOwnedByYou")
}
}
func (s *ClientTests) TestBucketList(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.Private)
c.Assert(err, IsNil)
objData := make(map[string][]byte)
for i, path := range objectNames {
data := []byte(strings.Repeat("a", i))
err := b.Put(path, data, "text/plain", s3.Private)
c.Assert(err, IsNil)
defer b.Del(path)
objData[path] = data
}
for i, t := range listTests {
c.Logf("test %d", i)
resp, err := b.List(t.Prefix, t.Delimiter, t.Marker, t.MaxKeys)
c.Assert(err, IsNil)
c.Check(resp.Name, Equals, b.Name)
c.Check(resp.Delimiter, Equals, t.Delimiter)
c.Check(resp.IsTruncated, Equals, t.IsTruncated)
c.Check(resp.CommonPrefixes, DeepEquals, t.CommonPrefixes)
checkContents(c, resp.Contents, objData, t.Contents)
}
}
func etag(data []byte) string {
sum := md5.New()
sum.Write(data)
return fmt.Sprintf(`"%x"`, sum.Sum(nil))
}
func checkContents(c *C, contents []s3.Key, data map[string][]byte, expected []s3.Key) {
c.Assert(contents, HasLen, len(expected))
for i, k := range contents {
c.Check(k.Key, Equals, expected[i].Key)
// TODO mtime
c.Check(k.Size, Equals, int64(len(data[k.Key])))
c.Check(k.ETag, Equals, etag(data[k.Key]))
}
}
func (s *ClientTests) TestMultiInitPutList(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.Private)
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
c.Assert(multi.UploadId, Matches, ".+")
defer multi.Abort()
var sent []s3.Part
for i := 0; i < 5; i++ {
p, err := multi.PutPart(i+1, strings.NewReader(fmt.Sprintf("<part %d>", i+1)))
c.Assert(err, IsNil)
c.Assert(p.N, Equals, i+1)
c.Assert(p.Size, Equals, int64(8))
c.Assert(p.ETag, Matches, ".+")
sent = append(sent, p)
}
s3.SetListPartsMax(2)
parts, err := multi.ListParts()
c.Assert(err, IsNil)
c.Assert(parts, HasLen, len(sent))
for i := range parts {
c.Assert(parts[i].N, Equals, sent[i].N)
c.Assert(parts[i].Size, Equals, sent[i].Size)
c.Assert(parts[i].ETag, Equals, sent[i].ETag)
}
err = multi.Complete(parts)
s3err, failed := err.(*s3.Error)
c.Assert(failed, Equals, true)
c.Assert(s3err.Code, Equals, "EntityTooSmall")
err = multi.Abort()
c.Assert(err, IsNil)
_, err = multi.ListParts()
s3err, ok := err.(*s3.Error)
c.Assert(ok, Equals, true)
c.Assert(s3err.Code, Equals, "NoSuchUpload")
}
// This may take a minute or more due to the minimum size accepted S3
// on multipart upload parts.
func (s *ClientTests) TestMultiComplete(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.Private)
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
c.Assert(multi.UploadId, Matches, ".+")
defer multi.Abort()
// Minimum size S3 accepts for all but the last part is 5MB.
data1 := make([]byte, 5*1024*1024)
data2 := []byte("<part 2>")
part1, err := multi.PutPart(1, bytes.NewReader(data1))
c.Assert(err, IsNil)
part2, err := multi.PutPart(2, bytes.NewReader(data2))
c.Assert(err, IsNil)
// Purposefully reversed. The order requirement must be handled.
err = multi.Complete([]s3.Part{part2, part1})
c.Assert(err, IsNil)
data, err := b.Get("multi")
c.Assert(err, IsNil)
c.Assert(len(data), Equals, len(data1)+len(data2))
for i := range data1 {
if data[i] != data1[i] {
c.Fatalf("uploaded object at byte %d: want %d, got %d", data1[i], data[i])
}
}
c.Assert(string(data[len(data1):]), Equals, string(data2))
}
type multiList []*s3.Multi
func (l multiList) Len() int { return len(l) }
func (l multiList) Less(i, j int) bool { return l[i].Key < l[j].Key }
func (l multiList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (s *ClientTests) TestListMulti(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.Private)
c.Assert(err, IsNil)
// Ensure an empty state before testing its behavior.
multis, _, err := b.ListMulti("", "")
for _, m := range multis {
err := m.Abort()
c.Assert(err, IsNil)
}
keys := []string{
"a/multi2",
"a/multi3",
"b/multi4",
"multi1",
}
for _, key := range keys {
m, err := b.InitMulti(key, "", s3.Private)
c.Assert(err, IsNil)
defer m.Abort()
}
// Amazon's implementation of the multiple-request listing for
// multipart uploads in progress seems broken in multiple ways.
// (next tokens are not provided, etc).
//s3.SetListMultiMax(2)
multis, prefixes, err := b.ListMulti("", "")
c.Assert(err, IsNil)
for attempt := attempts.Start(); attempt.Next() && len(multis) < len(keys); {
multis, prefixes, err = b.ListMulti("", "")
c.Assert(err, IsNil)
}
sort.Sort(multiList(multis))
c.Assert(prefixes, IsNil)
var gotKeys []string
for _, m := range multis {
gotKeys = append(gotKeys, m.Key)
}
c.Assert(gotKeys, DeepEquals, keys)
for _, m := range multis {
c.Assert(m.Bucket, Equals, b)
c.Assert(m.UploadId, Matches, ".+")
}
multis, prefixes, err = b.ListMulti("", "/")
for attempt := attempts.Start(); attempt.Next() && len(prefixes) < 2; {
multis, prefixes, err = b.ListMulti("", "")
c.Assert(err, IsNil)
}
c.Assert(err, IsNil)
c.Assert(prefixes, DeepEquals, []string{"a/", "b/"})
c.Assert(multis, HasLen, 1)
c.Assert(multis[0].Bucket, Equals, b)
c.Assert(multis[0].Key, Equals, "multi1")
c.Assert(multis[0].UploadId, Matches, ".+")
for attempt := attempts.Start(); attempt.Next() && len(multis) < 2; {
multis, prefixes, err = b.ListMulti("", "")
c.Assert(err, IsNil)
}
multis, prefixes, err = b.ListMulti("a/", "/")
c.Assert(err, IsNil)
c.Assert(prefixes, IsNil)
c.Assert(multis, HasLen, 2)
c.Assert(multis[0].Bucket, Equals, b)
c.Assert(multis[0].Key, Equals, "a/multi2")
c.Assert(multis[0].UploadId, Matches, ".+")
c.Assert(multis[1].Bucket, Equals, b)
c.Assert(multis[1].Key, Equals, "a/multi3")
c.Assert(multis[1].UploadId, Matches, ".+")
}
func (s *ClientTests) TestMultiPutAllZeroLength(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.Private)
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
defer multi.Abort()
// This tests an edge case. Amazon requires at least one
// part for multiprat uploads to work, even the part is empty.
parts, err := multi.PutAll(strings.NewReader(""), 5*1024*1024)
c.Assert(err, IsNil)
c.Assert(parts, HasLen, 1)
c.Assert(parts[0].Size, Equals, int64(0))
c.Assert(parts[0].ETag, Equals, `"d41d8cd98f00b204e9800998ecf8427e"`)
err = multi.Complete(parts)
c.Assert(err, IsNil)
}

View File

@@ -1,79 +0,0 @@
package s3_test
import (
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/s3"
"github.com/mitchellh/goamz/s3/s3test"
. "github.com/motain/gocheck"
)
type LocalServer struct {
auth aws.Auth
region aws.Region
srv *s3test.Server
config *s3test.Config
}
func (s *LocalServer) SetUp(c *C) {
srv, err := s3test.NewServer(s.config)
c.Assert(err, IsNil)
c.Assert(srv, NotNil)
s.srv = srv
s.region = aws.Region{
Name: "faux-region-1",
S3Endpoint: srv.URL(),
S3LocationConstraint: true, // s3test server requires a LocationConstraint
}
}
// LocalServerSuite defines tests that will run
// against the local s3test server. It includes
// selected tests from ClientTests;
// when the s3test functionality is sufficient, it should
// include all of them, and ClientTests can be simply embedded.
type LocalServerSuite struct {
srv LocalServer
clientTests ClientTests
}
var (
// run tests twice, once in us-east-1 mode, once not.
_ = Suite(&LocalServerSuite{})
_ = Suite(&LocalServerSuite{
srv: LocalServer{
config: &s3test.Config{
Send409Conflict: true,
},
},
})
)
func (s *LocalServerSuite) SetUpSuite(c *C) {
s.srv.SetUp(c)
s.clientTests.s3 = s3.New(s.srv.auth, s.srv.region)
// TODO Sadly the fake server ignores auth completely right now. :-(
s.clientTests.authIsBroken = true
s.clientTests.Cleanup()
}
func (s *LocalServerSuite) TearDownTest(c *C) {
s.clientTests.Cleanup()
}
func (s *LocalServerSuite) TestBasicFunctionality(c *C) {
s.clientTests.TestBasicFunctionality(c)
}
func (s *LocalServerSuite) TestGetNotFound(c *C) {
s.clientTests.TestGetNotFound(c)
}
func (s *LocalServerSuite) TestBucketList(c *C) {
s.clientTests.TestBucketList(c)
}
func (s *LocalServerSuite) TestDoublePutBucket(c *C) {
s.clientTests.TestDoublePutBucket(c)
}

View File

@@ -1,666 +0,0 @@
package s3test
import (
"bytes"
"crypto/md5"
"encoding/hex"
"encoding/xml"
"fmt"
"github.com/mitchellh/goamz/s3"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/url"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"time"
)
const debug = false
type s3Error struct {
statusCode int
XMLName struct{} `xml:"Error"`
Code string
Message string
BucketName string
RequestId string
HostId string
}
type action struct {
srv *Server
w http.ResponseWriter
req *http.Request
reqId string
}
// Config controls the internal behaviour of the Server. A nil config is the default
// and behaves as if all configurations assume their default behaviour. Once passed
// to NewServer, the configuration must not be modified.
type Config struct {
// Send409Conflict controls how the Server will respond to calls to PUT on a
// previously existing bucket. The default is false, and corresponds to the
// us-east-1 s3 enpoint. Setting this value to true emulates the behaviour of
// all other regions.
// http://docs.amazonwebservices.com/AmazonS3/latest/API/ErrorResponses.html
Send409Conflict bool
}
func (c *Config) send409Conflict() bool {
if c != nil {
return c.Send409Conflict
}
return false
}
// Server is a fake S3 server for testing purposes.
// All of the data for the server is kept in memory.
type Server struct {
url string
reqId int
listener net.Listener
mu sync.Mutex
buckets map[string]*bucket
config *Config
}
type bucket struct {
name string
acl s3.ACL
ctime time.Time
objects map[string]*object
}
type object struct {
name string
mtime time.Time
meta http.Header // metadata to return with requests.
checksum []byte // also held as Content-MD5 in meta.
data []byte
}
// A resource encapsulates the subject of an HTTP request.
// The resource referred to may or may not exist
// when the request is made.
type resource interface {
put(a *action) interface{}
get(a *action) interface{}
post(a *action) interface{}
delete(a *action) interface{}
}
func NewServer(config *Config) (*Server, error) {
l, err := net.Listen("tcp", "localhost:0")
if err != nil {
return nil, fmt.Errorf("cannot listen on localhost: %v", err)
}
srv := &Server{
listener: l,
url: "http://" + l.Addr().String(),
buckets: make(map[string]*bucket),
config: config,
}
go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
srv.serveHTTP(w, req)
}))
return srv, nil
}
// Quit closes down the server.
func (srv *Server) Quit() {
srv.listener.Close()
}
// URL returns a URL for the server.
func (srv *Server) URL() string {
return srv.url
}
func fatalf(code int, codeStr string, errf string, a ...interface{}) {
panic(&s3Error{
statusCode: code,
Code: codeStr,
Message: fmt.Sprintf(errf, a...),
})
}
// serveHTTP serves the S3 protocol.
func (srv *Server) serveHTTP(w http.ResponseWriter, req *http.Request) {
// ignore error from ParseForm as it's usually spurious.
req.ParseForm()
srv.mu.Lock()
defer srv.mu.Unlock()
if debug {
log.Printf("s3test %q %q", req.Method, req.URL)
}
a := &action{
srv: srv,
w: w,
req: req,
reqId: fmt.Sprintf("%09X", srv.reqId),
}
srv.reqId++
var r resource
defer func() {
switch err := recover().(type) {
case *s3Error:
switch r := r.(type) {
case objectResource:
err.BucketName = r.bucket.name
case bucketResource:
err.BucketName = r.name
}
err.RequestId = a.reqId
// TODO HostId
w.Header().Set("Content-Type", `xml version="1.0" encoding="UTF-8"`)
w.WriteHeader(err.statusCode)
xmlMarshal(w, err)
case nil:
default:
panic(err)
}
}()
r = srv.resourceForURL(req.URL)
var resp interface{}
switch req.Method {
case "PUT":
resp = r.put(a)
case "GET", "HEAD":
resp = r.get(a)
case "DELETE":
resp = r.delete(a)
case "POST":
resp = r.post(a)
default:
fatalf(400, "MethodNotAllowed", "unknown http request method %q", req.Method)
}
if resp != nil && req.Method != "HEAD" {
xmlMarshal(w, resp)
}
}
// xmlMarshal is the same as xml.Marshal except that
// it panics on error. The marshalling should not fail,
// but we want to know if it does.
func xmlMarshal(w io.Writer, x interface{}) {
if err := xml.NewEncoder(w).Encode(x); err != nil {
panic(fmt.Errorf("error marshalling %#v: %v", x, err))
}
}
// In a fully implemented test server, each of these would have
// its own resource type.
var unimplementedBucketResourceNames = map[string]bool{
"acl": true,
"lifecycle": true,
"policy": true,
"location": true,
"logging": true,
"notification": true,
"versions": true,
"requestPayment": true,
"versioning": true,
"website": true,
"uploads": true,
}
var unimplementedObjectResourceNames = map[string]bool{
"uploadId": true,
"acl": true,
"torrent": true,
"uploads": true,
}
var pathRegexp = regexp.MustCompile("/(([^/]+)(/(.*))?)?")
// resourceForURL returns a resource object for the given URL.
func (srv *Server) resourceForURL(u *url.URL) (r resource) {
if u.Path == "/" {
return serviceResource{
buckets: srv.buckets,
}
}
m := pathRegexp.FindStringSubmatch(u.Path)
if m == nil {
fatalf(404, "InvalidURI", "Couldn't parse the specified URI")
}
bucketName := m[2]
objectName := m[4]
if bucketName == "" {
return nullResource{} // root
}
b := bucketResource{
name: bucketName,
bucket: srv.buckets[bucketName],
}
q := u.Query()
if objectName == "" {
for name := range q {
if unimplementedBucketResourceNames[name] {
return nullResource{}
}
}
return b
}
if b.bucket == nil {
fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
}
objr := objectResource{
name: objectName,
version: q.Get("versionId"),
bucket: b.bucket,
}
for name := range q {
if unimplementedObjectResourceNames[name] {
return nullResource{}
}
}
if obj := objr.bucket.objects[objr.name]; obj != nil {
objr.object = obj
}
return objr
}
// nullResource has error stubs for all resource methods.
type nullResource struct{}
func notAllowed() interface{} {
fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource")
return nil
}
func (nullResource) put(a *action) interface{} { return notAllowed() }
func (nullResource) get(a *action) interface{} { return notAllowed() }
func (nullResource) post(a *action) interface{} { return notAllowed() }
func (nullResource) delete(a *action) interface{} { return notAllowed() }
const timeFormat = "2006-01-02T15:04:05.000Z07:00"
type serviceResource struct {
buckets map[string]*bucket
}
func (serviceResource) put(a *action) interface{} { return notAllowed() }
func (serviceResource) post(a *action) interface{} { return notAllowed() }
func (serviceResource) delete(a *action) interface{} { return notAllowed() }
// GET on an s3 service lists the buckets.
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTServiceGET.html
func (r serviceResource) get(a *action) interface{} {
type respBucket struct {
Name string
}
type response struct {
Buckets []respBucket `xml:">Bucket"`
}
resp := response{}
for _, bucketPtr := range r.buckets {
bkt := respBucket{
Name: bucketPtr.name,
}
resp.Buckets = append(resp.Buckets, bkt)
}
return &resp
}
type bucketResource struct {
name string
bucket *bucket // non-nil if the bucket already exists.
}
// GET on a bucket lists the objects in the bucket.
// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html
func (r bucketResource) get(a *action) interface{} {
if r.bucket == nil {
fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
}
delimiter := a.req.Form.Get("delimiter")
marker := a.req.Form.Get("marker")
maxKeys := -1
if s := a.req.Form.Get("max-keys"); s != "" {
i, err := strconv.Atoi(s)
if err != nil || i < 0 {
fatalf(400, "invalid value for max-keys: %q", s)
}
maxKeys = i
}
prefix := a.req.Form.Get("prefix")
a.w.Header().Set("Content-Type", "application/xml")
if a.req.Method == "HEAD" {
return nil
}
var objs orderedObjects
// first get all matching objects and arrange them in alphabetical order.
for name, obj := range r.bucket.objects {
if strings.HasPrefix(name, prefix) {
objs = append(objs, obj)
}
}
sort.Sort(objs)
if maxKeys <= 0 {
maxKeys = 1000
}
resp := &s3.ListResp{
Name: r.bucket.name,
Prefix: prefix,
Delimiter: delimiter,
Marker: marker,
MaxKeys: maxKeys,
}
var prefixes []string
for _, obj := range objs {
if !strings.HasPrefix(obj.name, prefix) {
continue
}
name := obj.name
isPrefix := false
if delimiter != "" {
if i := strings.Index(obj.name[len(prefix):], delimiter); i >= 0 {
name = obj.name[:len(prefix)+i+len(delimiter)]
if prefixes != nil && prefixes[len(prefixes)-1] == name {
continue
}
isPrefix = true
}
}
if name <= marker {
continue
}
if len(resp.Contents)+len(prefixes) >= maxKeys {
resp.IsTruncated = true
break
}
if isPrefix {
prefixes = append(prefixes, name)
} else {
// Contents contains only keys not found in CommonPrefixes
resp.Contents = append(resp.Contents, obj.s3Key())
}
}
resp.CommonPrefixes = prefixes
return resp
}
// orderedObjects holds a slice of objects that can be sorted
// by name.
type orderedObjects []*object
func (s orderedObjects) Len() int {
return len(s)
}
func (s orderedObjects) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s orderedObjects) Less(i, j int) bool {
return s[i].name < s[j].name
}
func (obj *object) s3Key() s3.Key {
return s3.Key{
Key: obj.name,
LastModified: obj.mtime.Format(timeFormat),
Size: int64(len(obj.data)),
ETag: fmt.Sprintf(`"%x"`, obj.checksum),
// TODO StorageClass
// TODO Owner
}
}
// DELETE on a bucket deletes the bucket if it's not empty.
func (r bucketResource) delete(a *action) interface{} {
b := r.bucket
if b == nil {
fatalf(404, "NoSuchBucket", "The specified bucket does not exist")
}
if len(b.objects) > 0 {
fatalf(400, "BucketNotEmpty", "The bucket you tried to delete is not empty")
}
delete(a.srv.buckets, b.name)
return nil
}
// PUT on a bucket creates the bucket.
// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html
func (r bucketResource) put(a *action) interface{} {
var created bool
if r.bucket == nil {
if !validBucketName(r.name) {
fatalf(400, "InvalidBucketName", "The specified bucket is not valid")
}
if loc := locationConstraint(a); loc == "" {
fatalf(400, "InvalidRequets", "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.")
}
// TODO validate acl
r.bucket = &bucket{
name: r.name,
// TODO default acl
objects: make(map[string]*object),
}
a.srv.buckets[r.name] = r.bucket
created = true
}
if !created && a.srv.config.send409Conflict() {
fatalf(409, "BucketAlreadyOwnedByYou", "Your previous request to create the named bucket succeeded and you already own it.")
}
r.bucket.acl = s3.ACL(a.req.Header.Get("x-amz-acl"))
return nil
}
func (bucketResource) post(a *action) interface{} {
fatalf(400, "Method", "bucket POST method not available")
return nil
}
// validBucketName returns whether name is a valid bucket name.
// Here are the rules, from:
// http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/BucketRestrictions.html
//
// Can contain lowercase letters, numbers, periods (.), underscores (_),
// and dashes (-). You can use uppercase letters for buckets only in the
// US Standard region.
//
// Must start with a number or letter
//
// Must be between 3 and 255 characters long
//
// There's one extra rule (Must not be formatted as an IP address (e.g., 192.168.5.4)
// but the real S3 server does not seem to check that rule, so we will not
// check it either.
//
func validBucketName(name string) bool {
if len(name) < 3 || len(name) > 255 {
return false
}
r := name[0]
if !(r >= '0' && r <= '9' || r >= 'a' && r <= 'z') {
return false
}
for _, r := range name {
switch {
case r >= '0' && r <= '9':
case r >= 'a' && r <= 'z':
case r == '_' || r == '-':
case r == '.':
default:
return false
}
}
return true
}
var responseParams = map[string]bool{
"content-type": true,
"content-language": true,
"expires": true,
"cache-control": true,
"content-disposition": true,
"content-encoding": true,
}
type objectResource struct {
name string
version string
bucket *bucket // always non-nil.
object *object // may be nil.
}
// GET on an object gets the contents of the object.
// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html
func (objr objectResource) get(a *action) interface{} {
obj := objr.object
if obj == nil {
fatalf(404, "NoSuchKey", "The specified key does not exist.")
}
h := a.w.Header()
// add metadata
for name, d := range obj.meta {
h[name] = d
}
// override header values in response to request parameters.
for name, vals := range a.req.Form {
if strings.HasPrefix(name, "response-") {
name = name[len("response-"):]
if !responseParams[name] {
continue
}
h.Set(name, vals[0])
}
}
if r := a.req.Header.Get("Range"); r != "" {
fatalf(400, "NotImplemented", "range unimplemented")
}
// TODO Last-Modified-Since
// TODO If-Modified-Since
// TODO If-Unmodified-Since
// TODO If-Match
// TODO If-None-Match
// TODO Connection: close ??
// TODO x-amz-request-id
h.Set("Content-Length", fmt.Sprint(len(obj.data)))
h.Set("ETag", hex.EncodeToString(obj.checksum))
h.Set("Last-Modified", obj.mtime.Format(time.RFC1123))
if a.req.Method == "HEAD" {
return nil
}
// TODO avoid holding the lock when writing data.
_, err := a.w.Write(obj.data)
if err != nil {
// we can't do much except just log the fact.
log.Printf("error writing data: %v", err)
}
return nil
}
var metaHeaders = map[string]bool{
"Content-MD5": true,
"x-amz-acl": true,
"Content-Type": true,
"Content-Encoding": true,
"Content-Disposition": true,
}
// PUT on an object creates the object.
func (objr objectResource) put(a *action) interface{} {
// TODO Cache-Control header
// TODO Expires header
// TODO x-amz-server-side-encryption
// TODO x-amz-storage-class
// TODO is this correct, or should we erase all previous metadata?
obj := objr.object
if obj == nil {
obj = &object{
name: objr.name,
meta: make(http.Header),
}
}
var expectHash []byte
if c := a.req.Header.Get("Content-MD5"); c != "" {
var err error
expectHash, err = hex.DecodeString(c)
if err != nil || len(expectHash) != md5.Size {
fatalf(400, "InvalidDigest", "The Content-MD5 you specified was invalid")
}
}
sum := md5.New()
// TODO avoid holding lock while reading data.
data, err := ioutil.ReadAll(io.TeeReader(a.req.Body, sum))
if err != nil {
fatalf(400, "TODO", "read error")
}
gotHash := sum.Sum(nil)
if expectHash != nil && bytes.Compare(gotHash, expectHash) != 0 {
fatalf(400, "BadDigest", "The Content-MD5 you specified did not match what we received")
}
if a.req.ContentLength >= 0 && int64(len(data)) != a.req.ContentLength {
fatalf(400, "IncompleteBody", "You did not provide the number of bytes specified by the Content-Length HTTP header")
}
// PUT request has been successful - save data and metadata
for key, values := range a.req.Header {
key = http.CanonicalHeaderKey(key)
if metaHeaders[key] || strings.HasPrefix(key, "X-Amz-Meta-") {
obj.meta[key] = values
}
}
obj.data = data
obj.checksum = gotHash
obj.mtime = time.Now()
objr.bucket.objects[objr.name] = obj
return nil
}
func (objr objectResource) delete(a *action) interface{} {
delete(objr.bucket.objects, objr.name)
return nil
}
func (objr objectResource) post(a *action) interface{} {
fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource")
return nil
}
type CreateBucketConfiguration struct {
LocationConstraint string
}
// locationConstraint parses the <CreateBucketConfiguration /> request body (if present).
// If there is no body, an empty string will be returned.
func locationConstraint(a *action) string {
var body bytes.Buffer
if _, err := io.Copy(&body, a.req.Body); err != nil {
fatalf(400, "InvalidRequest", err.Error())
}
if body.Len() == 0 {
return ""
}
var loc CreateBucketConfiguration
if err := xml.NewDecoder(&body).Decode(&loc); err != nil {
fatalf(400, "InvalidRequest", err.Error())
}
return loc.LocationConstraint
}

View File

@@ -1,126 +0,0 @@
package s3
import (
"crypto/hmac"
"crypto/sha1"
"encoding/base64"
"log"
"sort"
"strings"
"github.com/mitchellh/goamz/aws"
)
var b64 = base64.StdEncoding
// ----------------------------------------------------------------------------
// S3 signing (http://goo.gl/G1LrK)
var s3ParamsToSign = map[string]bool{
"acl": true,
"delete": true,
"location": true,
"logging": true,
"notification": true,
"partNumber": true,
"policy": true,
"requestPayment": true,
"torrent": true,
"uploadId": true,
"uploads": true,
"versionId": true,
"versioning": true,
"versions": true,
"response-content-type": true,
"response-content-language": true,
"response-expires": true,
"response-cache-control": true,
"response-content-disposition": true,
"response-content-encoding": true,
}
func sign(auth aws.Auth, method, canonicalPath string, params, headers map[string][]string) {
var md5, ctype, date, xamz string
var xamzDate bool
var sarray []string
// add security token
if auth.Token != "" {
headers["x-amz-security-token"] = []string{auth.Token}
}
if auth.SecretKey == "" {
// no auth secret; skip signing, e.g. for public read-only buckets.
return
}
for k, v := range headers {
k = strings.ToLower(k)
switch k {
case "content-md5":
md5 = v[0]
case "content-type":
ctype = v[0]
case "date":
if !xamzDate {
date = v[0]
}
default:
if strings.HasPrefix(k, "x-amz-") {
vall := strings.Join(v, ",")
sarray = append(sarray, k+":"+vall)
if k == "x-amz-date" {
xamzDate = true
date = ""
}
}
}
}
if len(sarray) > 0 {
sort.StringSlice(sarray).Sort()
xamz = strings.Join(sarray, "\n") + "\n"
}
expires := false
if v, ok := params["Expires"]; ok {
// Query string request authentication alternative.
expires = true
date = v[0]
params["AWSAccessKeyId"] = []string{auth.AccessKey}
}
sarray = sarray[0:0]
for k, v := range params {
if s3ParamsToSign[k] {
for _, vi := range v {
if vi == "" {
sarray = append(sarray, k)
} else {
// "When signing you do not encode these values."
sarray = append(sarray, k+"="+vi)
}
}
}
}
if len(sarray) > 0 {
sort.StringSlice(sarray).Sort()
canonicalPath = canonicalPath + "?" + strings.Join(sarray, "&")
}
payload := method + "\n" + md5 + "\n" + ctype + "\n" + date + "\n" + xamz + canonicalPath
hash := hmac.New(sha1.New, []byte(auth.SecretKey))
hash.Write([]byte(payload))
signature := make([]byte, b64.EncodedLen(hash.Size()))
b64.Encode(signature, hash.Sum(nil))
if expires {
params["Signature"] = []string{string(signature)}
} else {
headers["Authorization"] = []string{"AWS " + auth.AccessKey + ":" + string(signature)}
}
if debug {
log.Printf("Signature payload: %q", payload)
log.Printf("Signature: %q", signature)
}
}

View File

@@ -1,194 +0,0 @@
package s3_test
import (
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/s3"
. "github.com/motain/gocheck"
)
// S3 ReST authentication docs: http://goo.gl/G1LrK
var testAuth = aws.Auth{"0PN5J17HBGZHT7JJ3X82", "uV3F3YluFJax1cknvbcGwgjvx4QpvB+leU8dUj2o", ""}
var emptyAuth = aws.Auth{"", "", ""}
func (s *S) TestSignExampleObjectGet(c *C) {
method := "GET"
path := "/johnsmith/photos/puppy.jpg"
headers := map[string][]string{
"Host": {"johnsmith.s3.amazonaws.com"},
"Date": {"Tue, 27 Mar 2007 19:36:42 +0000"},
}
s3.Sign(testAuth, method, path, nil, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:xXjDGYUmKxnwqr5KXNPGldn5LbA="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
}
func (s *S) TestSignExampleObjectGetNoAuth(c *C) {
method := "GET"
path := "/johnsmith/photos/puppy.jpg"
headers := map[string][]string{
"Host": {"johnsmith.s3.amazonaws.com"},
"Date": {"Tue, 27 Mar 2007 19:36:42 +0000"},
}
s3.Sign(emptyAuth, method, path, nil, headers)
c.Assert(headers["Authorization"], IsNil)
}
func (s *S) TestSignExampleObjectPut(c *C) {
method := "PUT"
path := "/johnsmith/photos/puppy.jpg"
headers := map[string][]string{
"Host": {"johnsmith.s3.amazonaws.com"},
"Date": {"Tue, 27 Mar 2007 21:15:45 +0000"},
"Content-Type": {"image/jpeg"},
"Content-Length": {"94328"},
}
s3.Sign(testAuth, method, path, nil, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:hcicpDDvL9SsO6AkvxqmIWkmOuQ="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
}
func (s *S) TestSignExampleList(c *C) {
method := "GET"
path := "/johnsmith/"
params := map[string][]string{
"prefix": {"photos"},
"max-keys": {"50"},
"marker": {"puppy"},
}
headers := map[string][]string{
"Host": {"johnsmith.s3.amazonaws.com"},
"Date": {"Tue, 27 Mar 2007 19:42:41 +0000"},
"User-Agent": {"Mozilla/5.0"},
}
s3.Sign(testAuth, method, path, params, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:jsRt/rhG+Vtp88HrYL706QhE4w4="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
}
func (s *S) TestSignExampleListNoAuth(c *C) {
method := "GET"
path := "/johnsmith/"
params := map[string][]string{
"prefix": {"photos"},
"max-keys": {"50"},
"marker": {"puppy"},
}
headers := map[string][]string{
"Host": {"johnsmith.s3.amazonaws.com"},
"Date": {"Tue, 27 Mar 2007 19:42:41 +0000"},
"User-Agent": {"Mozilla/5.0"},
}
s3.Sign(emptyAuth, method, path, params, headers)
c.Assert(headers["Authorization"], IsNil)
}
func (s *S) TestSignExampleFetch(c *C) {
method := "GET"
path := "/johnsmith/"
params := map[string][]string{
"acl": {""},
}
headers := map[string][]string{
"Host": {"johnsmith.s3.amazonaws.com"},
"Date": {"Tue, 27 Mar 2007 19:44:46 +0000"},
}
s3.Sign(testAuth, method, path, params, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:thdUi9VAkzhkniLj96JIrOPGi0g="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
}
func (s *S) TestSignExampleFetchNoAuth(c *C) {
method := "GET"
path := "/johnsmith/"
params := map[string][]string{
"acl": {""},
}
headers := map[string][]string{
"Host": {"johnsmith.s3.amazonaws.com"},
"Date": {"Tue, 27 Mar 2007 19:44:46 +0000"},
}
s3.Sign(emptyAuth, method, path, params, headers)
c.Assert(headers["Authorization"], IsNil)
}
func (s *S) TestSignExampleDelete(c *C) {
method := "DELETE"
path := "/johnsmith/photos/puppy.jpg"
params := map[string][]string{}
headers := map[string][]string{
"Host": {"s3.amazonaws.com"},
"Date": {"Tue, 27 Mar 2007 21:20:27 +0000"},
"User-Agent": {"dotnet"},
"x-amz-date": {"Tue, 27 Mar 2007 21:20:26 +0000"},
}
s3.Sign(testAuth, method, path, params, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:k3nL7gH3+PadhTEVn5Ip83xlYzk="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
}
func (s *S) TestSignExampleUpload(c *C) {
method := "PUT"
path := "/static.johnsmith.net/db-backup.dat.gz"
params := map[string][]string{}
headers := map[string][]string{
"Host": {"static.johnsmith.net:8080"},
"Date": {"Tue, 27 Mar 2007 21:06:08 +0000"},
"User-Agent": {"curl/7.15.5"},
"x-amz-acl": {"public-read"},
"content-type": {"application/x-download"},
"Content-MD5": {"4gJE4saaMU4BqNR0kLY+lw=="},
"X-Amz-Meta-ReviewedBy": {"joe@johnsmith.net,jane@johnsmith.net"},
"X-Amz-Meta-FileChecksum": {"0x02661779"},
"X-Amz-Meta-ChecksumAlgorithm": {"crc32"},
"Content-Disposition": {"attachment; filename=database.dat"},
"Content-Encoding": {"gzip"},
"Content-Length": {"5913339"},
}
s3.Sign(testAuth, method, path, params, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:C0FlOtU8Ylb9KDTpZqYkZPX91iI="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
}
func (s *S) TestSignExampleListAllMyBuckets(c *C) {
method := "GET"
path := "/"
headers := map[string][]string{
"Host": {"s3.amazonaws.com"},
"Date": {"Wed, 28 Mar 2007 01:29:59 +0000"},
}
s3.Sign(testAuth, method, path, nil, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:Db+gepJSUbZKwpx1FR0DLtEYoZA="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
}
func (s *S) TestSignExampleUnicodeKeys(c *C) {
method := "GET"
path := "/dictionary/fran%C3%A7ais/pr%c3%a9f%c3%a8re"
headers := map[string][]string{
"Host": {"s3.amazonaws.com"},
"Date": {"Wed, 28 Mar 2007 01:49:49 +0000"},
}
s3.Sign(testAuth, method, path, nil, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:dxhSBHoI6eVSPcXJqEghlUzZMnY="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
}
// Not included in AWS documentation
func (s *S) TestSignWithIAMToken(c *C) {
method := "GET"
path := "/"
headers := map[string][]string{
"Host": {"s3.amazonaws.com"},
"Date": {"Wed, 28 Mar 2007 01:29:59 +0000"},
}
authWithToken := testAuth
authWithToken.Token = "totallysecret"
s3.Sign(authWithToken, method, path, nil, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:SJ0yQO7NpHyXJ7zkxY+/fGQ6aUw="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
c.Assert(headers["x-amz-security-token"], DeepEquals, []string{authWithToken.Token})
}

View File

@@ -1,75 +0,0 @@
package sftp
import (
"io"
"os"
"testing"
"github.com/kr/fs"
)
// assert that *Client implements fs.FileSystem
var _ fs.FileSystem = new(Client)
// assert that *File implements io.ReadWriteCloser
var _ io.ReadWriteCloser = new(File)
var ok = &StatusError{Code: ssh_FX_OK}
var eof = &StatusError{Code: ssh_FX_EOF}
var fail = &StatusError{Code: ssh_FX_FAILURE}
var eofOrErrTests = []struct {
err, want error
}{
{nil, nil},
{eof, io.EOF},
{ok, ok},
{io.EOF, io.EOF},
}
func TestEofOrErr(t *testing.T) {
for _, tt := range eofOrErrTests {
got := eofOrErr(tt.err)
if got != tt.want {
t.Errorf("eofOrErr(%#v): want: %#v, got: %#v", tt.err, tt.want, got)
}
}
}
var okOrErrTests = []struct {
err, want error
}{
{nil, nil},
{eof, eof},
{ok, nil},
{io.EOF, io.EOF},
}
func TestOkOrErr(t *testing.T) {
for _, tt := range okOrErrTests {
got := okOrErr(tt.err)
if got != tt.want {
t.Errorf("okOrErr(%#v): want: %#v, got: %#v", tt.err, tt.want, got)
}
}
}
var flagsTests = []struct {
flags int
want uint32
}{
{os.O_RDONLY, ssh_FXF_READ},
{os.O_WRONLY, ssh_FXF_WRITE},
{os.O_RDWR, ssh_FXF_READ | ssh_FXF_WRITE},
{os.O_RDWR | os.O_CREATE | os.O_TRUNC, ssh_FXF_READ | ssh_FXF_WRITE | ssh_FXF_CREAT | ssh_FXF_TRUNC},
{os.O_WRONLY | os.O_APPEND, ssh_FXF_WRITE | ssh_FXF_APPEND},
}
func TestFlags(t *testing.T) {
for i, tt := range flagsTests {
got := flags(tt.flags)
if got != tt.want {
t.Errorf("test %v: flags(%x): want: %x, got: %x", i, tt.flags, tt.want, got)
}
}
}

View File

@@ -1,401 +0,0 @@
package sftp
import (
"encoding"
"fmt"
"io"
"reflect"
)
func marshalUint32(b []byte, v uint32) []byte {
return append(b, byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
}
func marshalUint64(b []byte, v uint64) []byte {
return marshalUint32(marshalUint32(b, uint32(v>>32)), uint32(v))
}
func marshalString(b []byte, v string) []byte {
return append(marshalUint32(b, uint32(len(v))), v...)
}
func marshal(b []byte, v interface{}) []byte {
switch v := v.(type) {
case uint8:
return append(b, v)
case uint32:
return marshalUint32(b, v)
case uint64:
return marshalUint64(b, v)
case string:
return marshalString(b, v)
default:
switch d := reflect.ValueOf(v); d.Kind() {
case reflect.Struct:
for i, n := 0, d.NumField(); i < n; i++ {
b = append(marshal(b, d.Field(i).Interface()))
}
return b
case reflect.Slice:
for i, n := 0, d.Len(); i < n; i++ {
b = append(marshal(b, d.Index(i).Interface()))
}
return b
default:
panic(fmt.Sprintf("marshal(%#v): cannot handle type %T", v, v))
}
}
}
func unmarshalUint32(b []byte) (uint32, []byte) {
v := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
return v, b[4:]
}
func unmarshalUint64(b []byte) (uint64, []byte) {
h, b := unmarshalUint32(b)
l, b := unmarshalUint32(b)
return uint64(h)<<32 | uint64(l), b
}
func unmarshalString(b []byte) (string, []byte) {
n, b := unmarshalUint32(b)
return string(b[:n]), b[n:]
}
// sendPacket marshals p according to RFC 4234.
func sendPacket(w io.Writer, m encoding.BinaryMarshaler) error {
bb, err := m.MarshalBinary()
if err != nil {
return fmt.Errorf("marshal2(%#v): binary marshaller failed", err)
}
l := uint32(len(bb))
hdr := []byte{byte(l >> 24), byte(l >> 16), byte(l >> 8), byte(l)}
debug("send packet %T, len: %v", m, l)
_, err = w.Write(hdr)
if err != nil {
return err
}
_, err = w.Write(bb)
return err
}
func recvPacket(r io.Reader) (uint8, []byte, error) {
var b = []byte{0, 0, 0, 0}
if _, err := io.ReadFull(r, b); err != nil {
return 0, nil, err
}
l, _ := unmarshalUint32(b)
b = make([]byte, l)
if _, err := io.ReadFull(r, b); err != nil {
return 0, nil, err
}
return b[0], b[1:], nil
}
// Here starts the definition of packets along with their MarshalBinary
// implementations.
// Manually writing the marshalling logic wins us a lot of time and
// allocation.
type sshFxInitPacket struct {
Version uint32
Extensions []struct {
Name, Data string
}
}
func (p sshFxInitPacket) MarshalBinary() ([]byte, error) {
l := 1 + 4 // byte + uint32
for _, e := range p.Extensions {
l += 4 + len(e.Name) + 4 + len(e.Data)
}
b := make([]byte, 0, l)
b = append(b, ssh_FXP_INIT)
b = marshalUint32(b, p.Version)
for _, e := range p.Extensions {
b = marshalString(b, e.Name)
b = marshalString(b, e.Data)
}
return b, nil
}
func marshalIdString(packetType byte, id uint32, str string) ([]byte, error) {
l := 1 + 4 + // type(byte) + uint32
4 + len(str)
b := make([]byte, 0, l)
b = append(b, packetType)
b = marshalUint32(b, id)
b = marshalString(b, str)
return b, nil
}
type sshFxpReaddirPacket struct {
Id uint32
Handle string
}
func (p sshFxpReaddirPacket) MarshalBinary() ([]byte, error) {
return marshalIdString(ssh_FXP_READDIR, p.Id, p.Handle)
}
func (p sshFxpReaddirPacket) id() uint32 { return p.Id }
type sshFxpOpendirPacket struct {
Id uint32
Path string
}
func (p sshFxpOpendirPacket) MarshalBinary() ([]byte, error) {
return marshalIdString(ssh_FXP_OPENDIR, p.Id, p.Path)
}
func (p sshFxpOpendirPacket) id() uint32 { return p.Id }
type sshFxpLstatPacket struct {
Id uint32
Path string
}
func (p sshFxpLstatPacket) id() uint32 { return p.Id }
func (p sshFxpLstatPacket) MarshalBinary() ([]byte, error) {
return marshalIdString(ssh_FXP_LSTAT, p.Id, p.Path)
}
type sshFxpFstatPacket struct {
Id uint32
Handle string
}
func (p sshFxpFstatPacket) id() uint32 { return p.Id }
func (p sshFxpFstatPacket) MarshalBinary() ([]byte, error) {
return marshalIdString(ssh_FXP_FSTAT, p.Id, p.Handle)
}
type sshFxpClosePacket struct {
Id uint32
Handle string
}
func (p sshFxpClosePacket) MarshalBinary() ([]byte, error) {
return marshalIdString(ssh_FXP_CLOSE, p.Id, p.Handle)
}
func (p sshFxpClosePacket) id() uint32 { return p.Id }
type sshFxpRemovePacket struct {
Id uint32
Filename string
}
func (p sshFxpRemovePacket) id() uint32 { return p.Id }
func (p sshFxpRemovePacket) MarshalBinary() ([]byte, error) {
return marshalIdString(ssh_FXP_REMOVE, p.Id, p.Filename)
}
type sshFxpRmdirPacket struct {
Id uint32
Path string
}
func (p sshFxpRmdirPacket) id() uint32 { return p.Id }
func (p sshFxpRmdirPacket) MarshalBinary() ([]byte, error) {
return marshalIdString(ssh_FXP_RMDIR, p.Id, p.Path)
}
type sshFxpReadlinkPacket struct {
Id uint32
Path string
}
func (p sshFxpReadlinkPacket) id() uint32 { return p.Id }
func (p sshFxpReadlinkPacket) MarshalBinary() ([]byte, error) {
return marshalIdString(ssh_FXP_READLINK, p.Id, p.Path)
}
type sshFxpOpenPacket struct {
Id uint32
Path string
Pflags uint32
Flags uint32 // ignored
}
func (p sshFxpOpenPacket) id() uint32 { return p.Id }
func (p sshFxpOpenPacket) MarshalBinary() ([]byte, error) {
l := 1 + 4 +
4 + len(p.Path) +
4 + 4
b := make([]byte, 0, l)
b = append(b, ssh_FXP_OPEN)
b = marshalUint32(b, p.Id)
b = marshalString(b, p.Path)
b = marshalUint32(b, p.Pflags)
b = marshalUint32(b, p.Flags)
return b, nil
}
type sshFxpReadPacket struct {
Id uint32
Handle string
Offset uint64
Len uint32
}
func (p sshFxpReadPacket) id() uint32 { return p.Id }
func (p sshFxpReadPacket) MarshalBinary() ([]byte, error) {
l := 1 + 4 + // type(byte) + uint32
4 + len(p.Handle) +
8 + 4 // uint64 + uint32
b := make([]byte, 0, l)
b = append(b, ssh_FXP_READ)
b = marshalUint32(b, p.Id)
b = marshalString(b, p.Handle)
b = marshalUint64(b, p.Offset)
b = marshalUint32(b, p.Len)
return b, nil
}
type sshFxpRenamePacket struct {
Id uint32
Oldpath string
Newpath string
}
func (p sshFxpRenamePacket) id() uint32 { return p.Id }
func (p sshFxpRenamePacket) MarshalBinary() ([]byte, error) {
l := 1 + 4 + // type(byte) + uint32
4 + len(p.Oldpath) +
4 + len(p.Newpath)
b := make([]byte, 0, l)
b = append(b, ssh_FXP_RENAME)
b = marshalUint32(b, p.Id)
b = marshalString(b, p.Oldpath)
b = marshalString(b, p.Newpath)
return b, nil
}
type sshFxpWritePacket struct {
Id uint32
Handle string
Offset uint64
Length uint32
Data []byte
}
func (s sshFxpWritePacket) id() uint32 { return s.Id }
func (s sshFxpWritePacket) MarshalBinary() ([]byte, error) {
l := 1 + 4 + // type(byte) + uint32
4 + len(s.Handle) +
8 + 4 + // uint64 + uint32
len(s.Data)
b := make([]byte, 0, l)
b = append(b, ssh_FXP_WRITE)
b = marshalUint32(b, s.Id)
b = marshalString(b, s.Handle)
b = marshalUint64(b, s.Offset)
b = marshalUint32(b, s.Length)
b = append(b, s.Data...)
return b, nil
}
type sshFxpMkdirPacket struct {
Id uint32
Path string
Flags uint32 // ignored
}
func (p sshFxpMkdirPacket) id() uint32 { return p.Id }
func (p sshFxpMkdirPacket) MarshalBinary() ([]byte, error) {
l := 1 + 4 + // type(byte) + uint32
4 + len(p.Path) +
4 // uint32
b := make([]byte, 0, l)
b = append(b, ssh_FXP_MKDIR)
b = marshalUint32(b, p.Id)
b = marshalString(b, p.Path)
b = marshalUint32(b, p.Flags)
return b, nil
}
type sshFxpSetstatPacket struct {
Id uint32
Path string
Flags uint32
Attrs interface{}
}
func (p sshFxpSetstatPacket) id() uint32 { return p.Id }
func (p sshFxpSetstatPacket) MarshalBinary() ([]byte, error) {
l := 1 + 4 + // type(byte) + uint32
4 + len(p.Path) +
4 // uint32 + uint64
b := make([]byte, 0, l)
b = append(b, ssh_FXP_SETSTAT)
b = marshalUint32(b, p.Id)
b = marshalString(b, p.Path)
b = marshalUint32(b, p.Flags)
b = marshal(b, p.Attrs)
return b, nil
}
type sshFxpStatvfsPacket struct {
Id uint32
Path string
}
func (p sshFxpStatvfsPacket) id() uint32 { return p.Id }
func (p sshFxpStatvfsPacket) MarshalBinary() ([]byte, error) {
l := 1 + 4 + // type(byte) + uint32
len(p.Path) +
len("statvfs@openssh.com")
b := make([]byte, 0, l)
b = append(b, ssh_FXP_EXTENDED)
b = marshalUint32(b, p.Id)
b = marshalString(b, "statvfs@openssh.com")
b = marshalString(b, p.Path)
return b, nil
}
type StatVFS struct {
Id uint32
Bsize uint64 /* file system block size */
Frsize uint64 /* fundamental fs block size */
Blocks uint64 /* number of blocks (unit f_frsize) */
Bfree uint64 /* free blocks in file system */
Bavail uint64 /* free blocks for non-root */
Files uint64 /* total file inodes */
Ffree uint64 /* free file inodes */
Favail uint64 /* free file inodes for to non-root */
Fsid uint64 /* file system id */
Flag uint64 /* bit mask of f_flag values */
Namemax uint64 /* maximum filename length */
}
func (p *StatVFS) TotalSpace() uint64 {
return p.Frsize * p.Blocks
}
func (p *StatVFS) FreeSpace() uint64 {
return p.Frsize * p.Bfree
}

View File

@@ -1 +0,0 @@
box: wercker/golang

View File

@@ -1,14 +0,0 @@
Copyright (c) 2013 Vaughan Newton
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -1,70 +0,0 @@
go-ini
======
INI parsing library for Go (golang).
View the API documentation [here](http://godoc.org/github.com/vaughan0/go-ini).
Usage
-----
Parse an INI file:
```go
import "github.com/vaughan0/go-ini"
file, err := ini.LoadFile("myfile.ini")
```
Get data from the parsed file:
```go
name, ok := file.Get("person", "name")
if !ok {
panic("'name' variable missing from 'person' section")
}
```
Iterate through values in a section:
```go
for key, value := range file["mysection"] {
fmt.Printf("%s => %s\n", key, value)
}
```
Iterate through sections in a file:
```go
for name, section := range file {
fmt.Printf("Section name: %s\n", name)
}
```
File Format
-----------
INI files are parsed by go-ini line-by-line. Each line may be one of the following:
* A section definition: [section-name]
* A property: key = value
* A comment: #blahblah _or_ ;blahblah
* Blank. The line will be ignored.
Properties defined before any section headers are placed in the default section, which has
the empty string as it's key.
Example:
```ini
# I am a comment
; So am I!
[apples]
colour = red or green
shape = applish
[oranges]
shape = square
colour = blue
```

View File

@@ -1,123 +0,0 @@
// Package ini provides functions for parsing INI configuration files.
package ini
import (
"bufio"
"fmt"
"io"
"os"
"regexp"
"strings"
)
var (
sectionRegex = regexp.MustCompile(`^\[(.*)\]$`)
assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`)
)
// ErrSyntax is returned when there is a syntax error in an INI file.
type ErrSyntax struct {
Line int
Source string // The contents of the erroneous line, without leading or trailing whitespace
}
func (e ErrSyntax) Error() string {
return fmt.Sprintf("invalid INI syntax on line %d: %s", e.Line, e.Source)
}
// A File represents a parsed INI file.
type File map[string]Section
// A Section represents a single section of an INI file.
type Section map[string]string
// Returns a named Section. A Section will be created if one does not already exist for the given name.
func (f File) Section(name string) Section {
section := f[name]
if section == nil {
section = make(Section)
f[name] = section
}
return section
}
// Looks up a value for a key in a section and returns that value, along with a boolean result similar to a map lookup.
func (f File) Get(section, key string) (value string, ok bool) {
if s := f[section]; s != nil {
value, ok = s[key]
}
return
}
// Loads INI data from a reader and stores the data in the File.
func (f File) Load(in io.Reader) (err error) {
bufin, ok := in.(*bufio.Reader)
if !ok {
bufin = bufio.NewReader(in)
}
return parseFile(bufin, f)
}
// Loads INI data from a named file and stores the data in the File.
func (f File) LoadFile(file string) (err error) {
in, err := os.Open(file)
if err != nil {
return
}
defer in.Close()
return f.Load(in)
}
func parseFile(in *bufio.Reader, file File) (err error) {
section := ""
lineNum := 0
for done := false; !done; {
var line string
if line, err = in.ReadString('\n'); err != nil {
if err == io.EOF {
done = true
} else {
return
}
}
lineNum++
line = strings.TrimSpace(line)
if len(line) == 0 {
// Skip blank lines
continue
}
if line[0] == ';' || line[0] == '#' {
// Skip comments
continue
}
if groups := assignRegex.FindStringSubmatch(line); groups != nil {
key, val := groups[1], groups[2]
key, val = strings.TrimSpace(key), strings.TrimSpace(val)
file.Section(section)[key] = val
} else if groups := sectionRegex.FindStringSubmatch(line); groups != nil {
name := strings.TrimSpace(groups[1])
section = name
// Create the section if it does not exist
file.Section(section)
} else {
return ErrSyntax{lineNum, line}
}
}
return nil
}
// Loads and returns a File from a reader.
func Load(in io.Reader) (File, error) {
file := make(File)
err := file.Load(in)
return file, err
}
// Loads and returns an INI File from a file on disk.
func LoadFile(filename string) (File, error) {
file := make(File)
err := file.LoadFile(filename)
return file, err
}

View File

@@ -1,43 +0,0 @@
package ini
import (
"reflect"
"syscall"
"testing"
)
func TestLoadFile(t *testing.T) {
originalOpenFiles := numFilesOpen(t)
file, err := LoadFile("test.ini")
if err != nil {
t.Fatal(err)
}
if originalOpenFiles != numFilesOpen(t) {
t.Error("test.ini not closed")
}
if !reflect.DeepEqual(file, File{"default": {"stuff": "things"}}) {
t.Error("file not read correctly")
}
}
func numFilesOpen(t *testing.T) (num uint64) {
var rlimit syscall.Rlimit
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit)
if err != nil {
t.Fatal(err)
}
maxFds := int(rlimit.Cur)
var stat syscall.Stat_t
for i := 0; i < maxFds; i++ {
if syscall.Fstat(i, &stat) == nil {
num++
} else {
return
}
}
return
}

View File

@@ -1,89 +0,0 @@
package ini
import (
"reflect"
"strings"
"testing"
)
func TestLoad(t *testing.T) {
src := `
# Comments are ignored
herp = derp
[foo]
hello=world
whitespace should = not matter
; sneaky semicolon-style comment
multiple = equals = signs
[bar]
this = that`
file, err := Load(strings.NewReader(src))
if err != nil {
t.Fatal(err)
}
check := func(section, key, expect string) {
if value, _ := file.Get(section, key); value != expect {
t.Errorf("Get(%q, %q): expected %q, got %q", section, key, expect, value)
}
}
check("", "herp", "derp")
check("foo", "hello", "world")
check("foo", "whitespace should", "not matter")
check("foo", "multiple", "equals = signs")
check("bar", "this", "that")
}
func TestSyntaxError(t *testing.T) {
src := `
# Line 2
[foo]
bar = baz
# Here's an error on line 6:
wut?
herp = derp`
_, err := Load(strings.NewReader(src))
t.Logf("%T: %v", err, err)
if err == nil {
t.Fatal("expected an error, got nil")
}
syntaxErr, ok := err.(ErrSyntax)
if !ok {
t.Fatal("expected an error of type ErrSyntax")
}
if syntaxErr.Line != 6 {
t.Fatal("incorrect line number")
}
if syntaxErr.Source != "wut?" {
t.Fatal("incorrect source")
}
}
func TestDefinedSectionBehaviour(t *testing.T) {
check := func(src string, expect File) {
file, err := Load(strings.NewReader(src))
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(file, expect) {
t.Errorf("expected %v, got %v", expect, file)
}
}
// No sections for an empty file
check("", File{})
// Default section only if there are actually values for it
check("foo=bar", File{"": {"foo": "bar"}})
// User-defined sections should always be present, even if empty
check("[a]\n[b]\nfoo=bar", File{
"a": {},
"b": {"foo": "bar"},
})
check("foo=bar\n[a]\nthis=that", File{
"": {"foo": "bar"},
"a": {"this": "that"},
})
}

View File

@@ -1,2 +0,0 @@
[default]
stuff = things

View File

@@ -1,12 +1,12 @@
.PHONY: all clean test
.PHONY: all clean test restic
all: restic
restic: $(SOURCE)
restic:
go run build.go
clean:
rm -rf restic
test: $(SOURCE)
go run run_tests.go /dev/null
test:
go test ./...

117
README.md
View File

@@ -1,40 +1,23 @@
[![Stories in Ready](https://badge.waffle.io/restic/restic.png?label=ready&title=Ready)](https://waffle.io/restic/restic)
[![Documentation](https://readthedocs.org/projects/restic/badge/?version=latest)](https://restic.readthedocs.io/en/latest/?badge=latest)
[![Build Status](https://travis-ci.org/restic/restic.svg?branch=master)](https://travis-ci.org/restic/restic)
[![Build status](https://ci.appveyor.com/api/projects/status/nuy4lfbgfbytw92q/branch/master?svg=true)](https://ci.appveyor.com/project/fd0/restic/branch/master)
[![sourcegraph status](https://sourcegraph.com/api/repos/github.com/restic/restic/.badges/status.png)](https://sourcegraph.com/github.com/restic/restic)
[![Report Card](http://goreportcard.com/badge/github.com/restic/restic)](http://goreportcard.com/report/github.com/restic/restic)
[![Coverage Status](https://coveralls.io/repos/restic/restic/badge.svg)](https://coveralls.io/r/restic/restic)
Restic Design Principles
========================
Restic is a program that does backups right and was designed with the following
principles in mind:
Introduction
============
* Easy: Doing backups should be a frictionless process, otherwise you might be
tempted to skip it. Restic should be easy to configure and use, so that, in
the event of a data loss, you can just restore it. Likewise,
restoring data should not be complicated.
* Fast: Backing up your data with restic should only be limited by your
network or hard disk bandwidth so that you can backup your files every day.
Nobody does backups if it takes too much time. Restoring backups should only
transfer data that is needed for the files that are to be restored, so that
this process is also fast.
* Verifiable: Much more important than backup is restore, so restic enables
you to easily verify that all data can be restored.
* Secure: Restic uses cryptography to guarantee confidentiality and integrity
of your data. The location the backup data is stored is assumed not to be a
trusted environment (e.g. a shared space where others like system
administrators are able to access your backups). Restic is built to secure
your data against such attackers.
* Efficient: With the growth of data, additional snapshots should only take
the storage of the actual increment. Even more, duplicate data should be
de-duplicated before it is actually written to the storage back end to save
precious backup space.
restic is a backup program that is fast, efficient and secure. Detailed
information can be found in [the documentation](doc/index.md) and [the user
manual](doc/Manual.md). The [design document](doc/Design.md) lists the
technical background and gives detailed information about the structure of the
repository and the data saved therein.
The latest documentation can be viewed online at
<https://restic.readthedocs.io/en/latest>. On the bottom left corner there is
a menu that allows switching to the documentation and user manual for the
latest released version.
Build restic
============
@@ -47,74 +30,48 @@ afterwards you'll find the binary in the current directory:
$ ./restic --help
Usage:
restic [OPTIONS] <command>
[...]
Application Options:
-r, --repo= Repository directory to backup to/restore from
More documentation can be found in the [user manual](doc/Manual.md).
Help Options:
-h, --help Show this help message
Available commands:
backup save file/directory
cache manage cache
cat dump something
check check the repository
find find a file/directory
init create repository
key manage keys
list lists data
ls list files
restore restore a snapshot
snapshots show snapshots
unlock remove locks
version display version
A short demo recording can be found here:
[![asciicast](https://asciinema.org/a/23554.png)](https://asciinema.org/a/23554)
Compatibility
=============
Backward compatibility for backups is important so that our users are always
able to restore saved data. Therefore restic follows [Semantic
Versioning](http://semver.org) to clearly define which versions are compatible.
The repository and data structures contained therein are considered the "Public
API" in the sense of Semantic Versioning.
We guarantee backward compatibility of all repositories within one major version;
as long as we do not increment the major version, data can be read and restored.
We strive to be fully backward compatible to all prior versions.
At the moment, the only tested compiler for restic is the official Go compiler.
Building restic with gccgo may work, but is not supported.
Contribute and Documentation
============================
Contributions are welcome! More information can be found in
[`CONTRIBUTING.md`](CONTRIBUTING.md). A document describing the design of
restic and the data structures stored on the backend is contained in
restic and the data structures stored on the back end is contained in
[`doc/Design.md`](doc/Design.md).
The development environment is described in [`CONTRIBUTING.md`](CONTRIBUTING.md).
If you'd like to start contributing to restic, but don't know exactly what do
to, have a look at this great article by Dave Cheney:
[Suggestions for contributing to an Open Source project](http://dave.cheney.net/2016/03/12/suggestions-for-contributing-to-an-open-source-project)
A few issues have been tagged with the label `help wanted`, you can start
looking at those: https://github.com/restic/restic/labels/help%20wanted
Contact
=======
If you discover a bug or find something surprising, please feel free to [open a
github issue](https://github.com/restic/restic/issues/new). If you would like
to chat about restic, there is also the IRC channel #restic on
irc.freenode.net. Or just write me an email :)
If you discover a bug, find something surprising or if you would like to
discuss or ask something, please [open a github issue](https://github.com/restic/restic/issues/new).
If you would like to chat about restic, there is also the IRC channel #restic
on irc.freenode.net.
**Important**: If you discover something that you believe to be a possible critical
security problem, please do *not* open a GitHub issue but send an email directly to
alexander@bumpern.de. If possible, please encrypt your email using PGP
([0xD3F7A907](https://pgp.mit.edu/pks/lookup?op=get&search=0x91A6868BD3F7A907)).
alexander@bumpern.de. If possible, please encrypt your email using the following PGP key
([0x91A6868BD3F7A907](https://pgp.mit.edu/pks/lookup?op=get&search=0xCF8F18F2844575973F79D4E191A6868BD3F7A907)):
Talks
=====
The following talks will be or have been given about restic:
* 2015-08-23: [A Solution to the Backup Inconvenience](https://programm.froscon.de/2015/events/1515.html): Lecture at [FROSCON 2015](https://www.froscon.de) in Bonn, Germany
* 2015-02-01: [Lightning Talk at FOSDEM 2015](https://www.youtube.com/watch?v=oM-MfeflUZ8&t=11m40s): A short introduction (with slightly outdated command line)
* 2015-01-27: [Talk about restic at CCC Aachen](https://videoag.fsmpi.rwth-aachen.de/?view=player&lectureid=4442#content) (in German)
```
pub 4096R/91A6868BD3F7A907 2014-11-01
Key fingerprint = CF8F 18F2 8445 7597 3F79 D4E1 91A6 868B D3F7 A907
uid Alexander Neumann <alexander@bumpern.de>
uid Alexander Neumann <alexander@debian.org>
sub 4096R/D5FC2ACF4043FDF1 2014-11-01
```
License
=======

View File

@@ -1 +1 @@
0.1.0
0.2.0

18
Vagrantfile vendored
View File

@@ -1,7 +1,7 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
GO_VERSION = '1.4.2'
GO_VERSION = '1.6'
def packages_freebsd
return <<-EOF
@@ -35,7 +35,6 @@ end
def packages_darwin
return <<-EOF
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
brew install caskroom/cask/brew-cask
brew cask install osxfuse
EOF
end
@@ -57,24 +56,23 @@ def prepare_user(boxname)
gimme #{GO_VERSION} >> ~/.profile
echo export 'GOPATH=/vagrant/go' >> ~/.profile
echo export 'CDPATH=.:$GOPATH/src/github.com' >> ~/.profile
echo export 'PATH=$GOPATH/bin:/usr/local/bin:$PATH' >> ~/.profile
. ~/.profile
go get golang.org/x/tools/cmd/cover
go get github.com/tools/godep
go get github.com/constabulary/gb/...
echo
echo "Run:"
echo " vagrant rsync #{boxname}"
echo " vagrant ssh #{boxname} -c 'cd project/path; godep go test ./...'"
echo " vagrant ssh #{boxname} -c 'cd /vagrant; gb build && gb test'"
EOF
end
def fix_perms
return <<-EOF
chown -R vagrant /vagrant/go
chown -R vagrant /vagrant
EOF
end
@@ -84,8 +82,7 @@ end
# you're doing.
Vagrant.configure(2) do |config|
# use rsync to copy content to the folder
config.vm.synced_folder ".", "/vagrant/go/src/github.com/restic/restic", :type => "rsync"
config.vm.synced_folder ".", "/vagrant", disabled: true
config.vm.synced_folder ".", "/vagrant", :type => "rsync"
# fix permissions on synced folder
config.vm.provision "fix perms", :type => :shell, :inline => fix_perms
@@ -95,6 +92,11 @@ Vagrant.configure(2) do |config|
b.vm.provision "packages linux", :type => :shell, :inline => packages_linux
b.vm.provision "install gimme", :type => :shell, :inline => install_gimme
b.vm.provision "prepare user", :type => :shell, :privileged => false, :inline => prepare_user("linux")
# fix network card
config.vm.provider "virtualbox" do |v|
v.customize ["modifyvm", :id, "--nictype1", "virtio"]
end
end
config.vm.define "freebsd" do |b|

View File

@@ -1,12 +1,23 @@
clone_folder: c:\gopath\src\github.com\restic\restic
clone_folder: c:\restic
environment:
GOPATH: c:\gopath;c:\gopath\src\github.com\restic\restic\Godeps\_workspace
GOPATH: c:\gopath
init:
- ps: >-
$app = Get-WmiObject -Class Win32_Product -Filter "Vendor = 'http://golang.org'"
if ($app) {
$app.Uninstall()
}
install:
- rmdir c:\go /s /q
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.6.1.windows-amd64.msi
- msiexec /i go1.6.1.windows-amd64.msi /q
- go version
- go env
- appveyor DownloadFile http://downloads.sourceforge.net/project/gnuwin32/tar/1.13-1/tar-1.13-1-bin.zip -FileName tar.zip
- appveyor DownloadFile http://sourceforge.netcologne.de/project/gnuwin32/tar/1.13-1/tar-1.13-1-bin.zip -FileName tar.zip
- 7z x tar.zip bin/tar.exe
- set PATH=bin/;%PATH%

View File

@@ -1,143 +0,0 @@
package backend_test
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"sort"
"testing"
"github.com/restic/restic/backend"
. "github.com/restic/restic/test"
)
func testBackend(b backend.Backend, t *testing.T) {
for _, tpe := range []backend.Type{
backend.Data, backend.Key, backend.Lock,
backend.Snapshot, backend.Index,
} {
// detect non-existing files
for _, test := range TestStrings {
id, err := backend.ParseID(test.id)
OK(t, err)
// test if blob is already in repository
ret, err := b.Test(tpe, id.String())
OK(t, err)
Assert(t, !ret, "blob was found to exist before creating")
// try to open not existing blob
_, err = b.Get(tpe, id.String())
Assert(t, err != nil, "blob data could be extracted before creation")
// try to read not existing blob
_, err = b.GetReader(tpe, id.String(), 0, 1)
Assert(t, err != nil, "blob reader could be obtained before creation")
// try to get string out, should fail
ret, err = b.Test(tpe, id.String())
OK(t, err)
Assert(t, !ret, "id %q was found (but should not have)", test.id)
}
// add files
for _, test := range TestStrings {
// store string in backend
blob, err := b.Create()
OK(t, err)
_, err = blob.Write([]byte(test.data))
OK(t, err)
OK(t, blob.Finalize(tpe, test.id))
// try to get it out again
rd, err := b.Get(tpe, test.id)
OK(t, err)
Assert(t, rd != nil, "Get() returned nil")
// try to read it out again
reader, err := b.GetReader(tpe, test.id, 0, uint(len(test.data)))
OK(t, err)
Assert(t, reader != nil, "GetReader() returned nil")
bytes := make([]byte, len(test.data))
reader.Read(bytes)
Assert(t, test.data == string(bytes), "Read() returned different content")
// try to read it out with an offset and a length
readerOffLen, err := b.GetReader(tpe, test.id, 1, uint(len(test.data)-2))
OK(t, err)
Assert(t, readerOffLen != nil, "GetReader() returned nil")
bytesOffLen := make([]byte, len(test.data)-2)
readerOffLen.Read(bytesOffLen)
Assert(t, test.data[1:len(test.data)-1] == string(bytesOffLen), "Read() with offset and length returned different content")
buf, err := ioutil.ReadAll(rd)
OK(t, err)
Equals(t, test.data, string(buf))
// compare content
Equals(t, test.data, string(buf))
}
// test adding the first file again
test := TestStrings[0]
// create blob
blob, err := b.Create()
OK(t, err)
_, err = blob.Write([]byte(test.data))
OK(t, err)
err = blob.Finalize(tpe, test.id)
Assert(t, err != nil, "expected error, got %v", err)
// remove and recreate
err = b.Remove(tpe, test.id)
OK(t, err)
// create blob
blob, err = b.Create()
OK(t, err)
_, err = io.Copy(blob, bytes.NewReader([]byte(test.data)))
OK(t, err)
OK(t, blob.Finalize(tpe, test.id))
// list items
IDs := backend.IDs{}
for _, test := range TestStrings {
id, err := backend.ParseID(test.id)
OK(t, err)
IDs = append(IDs, id)
}
sort.Sort(IDs)
i := 0
for s := range b.List(tpe, nil) {
Equals(t, IDs[i].String(), s)
i++
}
// remove content if requested
if TestCleanup {
for _, test := range TestStrings {
id, err := backend.ParseID(test.id)
OK(t, err)
found, err := b.Test(tpe, id.String())
OK(t, err)
Assert(t, found, fmt.Sprintf("id %q was not found before removal", id))
OK(t, b.Remove(tpe, id.String()))
found, err = b.Test(tpe, id.String())
OK(t, err)
Assert(t, !found, fmt.Sprintf("id %q not found after removal", id))
}
}
}
}

View File

@@ -1,2 +0,0 @@
// Package backend provides local and remote storage for restic repositories.
package backend

View File

@@ -1,66 +0,0 @@
package backend
import "io"
// Type is the type of a Blob.
type Type string
const (
Data Type = "data"
Key = "key"
Lock = "lock"
Snapshot = "snapshot"
Index = "index"
Config = "config"
)
// A Backend manages data stored somewhere.
type Backend interface {
// Location returns a string that specifies the location of the repository,
// like a URL.
Location() string
// Create creates a new Blob. The data is available only after Finalize()
// has been called on the returned Blob.
Create() (Blob, error)
// Get returns an io.ReadCloser for the Blob with the given name of type t.
Get(t Type, name string) (io.ReadCloser, error)
// GetReader returns an io.ReadCloser for the Blob with the given name of
// type t at offset and length.
GetReader(t Type, name string, offset, length uint) (io.ReadCloser, error)
// Test a boolean value whether a Blob with the name and type exists.
Test(t Type, name string) (bool, error)
// Remove removes a Blob with type t and name.
Remove(t Type, name string) error
// Close the backend
Close() error
Lister
}
type Lister interface {
// List returns a channel that yields all names of blobs of type t in
// lexicographic order. A goroutine is started for this. If the channel
// done is closed, sending stops.
List(t Type, done <-chan struct{}) <-chan string
}
type Deleter interface {
// Delete the complete repository.
Delete() error
}
type Blob interface {
io.Writer
// Finalize moves the data blob to the final location for type and name.
Finalize(t Type, name string) error
// Size returns the number of bytes written to the backend so far.
Size() uint
}

View File

@@ -1,341 +0,0 @@
package local
import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"sync"
"github.com/restic/restic/backend"
)
var ErrWrongData = errors.New("wrong data returned by backend, checksum does not match")
type Local struct {
p string
mu sync.Mutex
open map[string][]*os.File // Contains open files. Guarded by 'mu'.
}
// Open opens the local backend at dir.
func Open(dir string) (*Local, error) {
items := []string{
dir,
filepath.Join(dir, backend.Paths.Data),
filepath.Join(dir, backend.Paths.Snapshots),
filepath.Join(dir, backend.Paths.Index),
filepath.Join(dir, backend.Paths.Locks),
filepath.Join(dir, backend.Paths.Keys),
filepath.Join(dir, backend.Paths.Temp),
}
// test if all necessary dirs are there
for _, d := range items {
if _, err := os.Stat(d); err != nil {
return nil, fmt.Errorf("%s does not exist", d)
}
}
return &Local{p: dir, open: make(map[string][]*os.File)}, nil
}
// Create creates all the necessary files and directories for a new local
// backend at dir. Afterwards a new config blob should be created.
func Create(dir string) (*Local, error) {
dirs := []string{
dir,
filepath.Join(dir, backend.Paths.Data),
filepath.Join(dir, backend.Paths.Snapshots),
filepath.Join(dir, backend.Paths.Index),
filepath.Join(dir, backend.Paths.Locks),
filepath.Join(dir, backend.Paths.Keys),
filepath.Join(dir, backend.Paths.Temp),
}
// test if config file already exists
_, err := os.Lstat(backend.Paths.Config)
if err == nil {
return nil, errors.New("config file already exists")
}
// test if directories already exist
for _, d := range dirs[1:] {
if _, err := os.Stat(d); err == nil {
return nil, fmt.Errorf("dir %s already exists", d)
}
}
// create paths for data, refs and temp
for _, d := range dirs {
err := os.MkdirAll(d, backend.Modes.Dir)
if err != nil {
return nil, err
}
}
// open backend
return Open(dir)
}
// Location returns this backend's location (the directory name).
func (b *Local) Location() string {
return b.p
}
// Return temp directory in correct directory for this backend.
func (b *Local) tempFile() (*os.File, error) {
return ioutil.TempFile(filepath.Join(b.p, backend.Paths.Temp), "temp-")
}
type localBlob struct {
f *os.File
size uint
final bool
basedir string
}
func (lb *localBlob) Write(p []byte) (int, error) {
if lb.final {
return 0, errors.New("blob already closed")
}
n, err := lb.f.Write(p)
lb.size += uint(n)
return n, err
}
func (lb *localBlob) Size() uint {
return lb.size
}
func (lb *localBlob) Finalize(t backend.Type, name string) error {
if lb.final {
return errors.New("Already finalized")
}
lb.final = true
err := lb.f.Close()
if err != nil {
return fmt.Errorf("local: file.Close: %v", err)
}
f := filename(lb.basedir, t, name)
// create directories if necessary, ignore errors
if t == backend.Data {
os.MkdirAll(filepath.Dir(f), backend.Modes.Dir)
}
// test if new path already exists
if _, err := os.Stat(f); err == nil {
return fmt.Errorf("Close(): file %v already exists", f)
}
if err := os.Rename(lb.f.Name(), f); err != nil {
return err
}
// set mode to read-only
fi, err := os.Stat(f)
if err != nil {
return err
}
return setNewFileMode(f, fi)
}
// Create creates a new Blob. The data is available only after Finalize()
// has been called on the returned Blob.
func (b *Local) Create() (backend.Blob, error) {
// TODO: make sure that tempfile is removed upon error
// create tempfile in backend
file, err := b.tempFile()
if err != nil {
return nil, err
}
blob := localBlob{
f: file,
basedir: b.p,
}
b.mu.Lock()
open, _ := b.open["blobs"]
b.open["blobs"] = append(open, file)
b.mu.Unlock()
return &blob, nil
}
// Construct path for given Type and name.
func filename(base string, t backend.Type, name string) string {
if t == backend.Config {
return filepath.Join(base, "config")
}
return filepath.Join(dirname(base, t, name), name)
}
// Construct directory for given Type.
func dirname(base string, t backend.Type, name string) string {
var n string
switch t {
case backend.Data:
n = backend.Paths.Data
if len(name) > 2 {
n = filepath.Join(n, name[:2])
}
case backend.Snapshot:
n = backend.Paths.Snapshots
case backend.Index:
n = backend.Paths.Index
case backend.Lock:
n = backend.Paths.Locks
case backend.Key:
n = backend.Paths.Keys
}
return filepath.Join(base, n)
}
// Get returns a reader that yields the content stored under the given
// name. The reader should be closed after draining it.
func (b *Local) Get(t backend.Type, name string) (io.ReadCloser, error) {
file, err := os.Open(filename(b.p, t, name))
if err != nil {
return nil, err
}
b.mu.Lock()
open, _ := b.open[filename(b.p, t, name)]
b.open[filename(b.p, t, name)] = append(open, file)
b.mu.Unlock()
return file, nil
}
// GetReader returns an io.ReadCloser for the Blob with the given name of
// type t at offset and length. If length is 0, the reader reads until EOF.
func (b *Local) GetReader(t backend.Type, name string, offset, length uint) (io.ReadCloser, error) {
f, err := os.Open(filename(b.p, t, name))
if err != nil {
return nil, err
}
b.mu.Lock()
open, _ := b.open[filename(b.p, t, name)]
b.open[filename(b.p, t, name)] = append(open, f)
b.mu.Unlock()
_, err = f.Seek(int64(offset), 0)
if err != nil {
return nil, err
}
if length == 0 {
return f, nil
}
return backend.LimitReadCloser(f, int64(length)), nil
}
// Test returns true if a blob of the given type and name exists in the backend.
func (b *Local) Test(t backend.Type, name string) (bool, error) {
_, err := os.Stat(filename(b.p, t, name))
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
return true, nil
}
// Remove removes the blob with the given name and type.
func (b *Local) Remove(t backend.Type, name string) error {
// close all open files we may have.
fn := filename(b.p, t, name)
b.mu.Lock()
open, _ := b.open[fn]
for _, file := range open {
file.Close()
}
b.open[fn] = nil
b.mu.Unlock()
// reset read-only flag
err := os.Chmod(fn, 0666)
if err != nil {
return err
}
return os.Remove(fn)
}
// List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending
// stops.
func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string {
// TODO: use os.Open() and d.Readdirnames() instead of Glob()
var pattern string
if t == backend.Data {
pattern = filepath.Join(dirname(b.p, t, ""), "*", "*")
} else {
pattern = filepath.Join(dirname(b.p, t, ""), "*")
}
ch := make(chan string)
matches, err := filepath.Glob(pattern)
if err != nil {
close(ch)
return ch
}
for i := range matches {
matches[i] = filepath.Base(matches[i])
}
sort.Strings(matches)
go func() {
defer close(ch)
for _, m := range matches {
if m == "" {
continue
}
select {
case ch <- m:
case <-done:
return
}
}
}()
return ch
}
// Delete removes the repository and all files.
func (b *Local) Delete() error {
b.Close()
return os.RemoveAll(b.p)
}
// Close closes all open files.
// They may have been closed already,
// so we ignore all errors.
func (b *Local) Close() error {
b.mu.Lock()
for _, open := range b.open {
for _, file := range open {
file.Close()
}
}
b.open = make(map[string][]*os.File)
b.mu.Unlock()
return nil
}

View File

@@ -1,56 +0,0 @@
package backend_test
import (
"fmt"
"io/ioutil"
"testing"
"github.com/restic/restic/backend/local"
. "github.com/restic/restic/test"
)
func setupLocalBackend(t *testing.T) *local.Local {
tempdir, err := ioutil.TempDir("", "restic-test-")
OK(t, err)
b, err := local.Create(tempdir)
OK(t, err)
t.Logf("created local backend at %s", tempdir)
return b
}
func teardownLocalBackend(t *testing.T, b *local.Local) {
if !TestCleanup {
t.Logf("leaving local backend at %s\n", b.Location())
return
}
OK(t, b.Delete())
}
func TestLocalBackend(t *testing.T) {
// test for non-existing backend
b, err := local.Open("/invalid-restic-test")
Assert(t, err != nil, "opening invalid repository at /invalid-restic-test should have failed, but err is nil")
Assert(t, b == nil, fmt.Sprintf("opening invalid repository at /invalid-restic-test should have failed, but b is not nil: %v", b))
s := setupLocalBackend(t)
defer teardownLocalBackend(t, s)
testBackend(s, t)
}
func TestLocalBackendCreationFailures(t *testing.T) {
b := setupLocalBackend(t)
defer teardownLocalBackend(t, b)
// test failure to create a new repository at the same location
b2, err := local.Create(b.Location())
Assert(t, err != nil && b2 == nil, fmt.Sprintf("creating a repository at %s for the second time should have failed", b.Location()))
// test failure to create a new repository at the same location without a config file
b2, err = local.Create(b.Location())
Assert(t, err != nil && b2 == nil, fmt.Sprintf("creating a repository at %s for the second time should have failed", b.Location()))
}

View File

@@ -1,73 +0,0 @@
package backend
import (
"hash"
"io"
)
type HashAppendReader struct {
r io.Reader
h hash.Hash
sum []byte
closed bool
}
func NewHashAppendReader(r io.Reader, h hash.Hash) *HashAppendReader {
return &HashAppendReader{
h: h,
r: io.TeeReader(r, h),
sum: make([]byte, 0, h.Size()),
}
}
func (h *HashAppendReader) Read(p []byte) (n int, err error) {
if !h.closed {
n, err = h.r.Read(p)
if err == io.EOF {
h.closed = true
h.sum = h.h.Sum(h.sum)
} else if err != nil {
return
}
}
if h.closed {
// output hash
r := len(p) - n
if r > 0 {
c := copy(p[n:], h.sum)
h.sum = h.sum[c:]
n += c
err = nil
}
if len(h.sum) == 0 {
err = io.EOF
}
}
return
}
type HashingReader struct {
r io.Reader
h hash.Hash
}
func NewHashingReader(r io.Reader, h hash.Hash) *HashingReader {
return &HashingReader{
h: h,
r: io.TeeReader(r, h),
}
}
func (h *HashingReader) Read(p []byte) (int, error) {
return h.r.Read(p)
}
func (h *HashingReader) Sum(d []byte) []byte {
return h.h.Sum(d)
}

View File

@@ -1,81 +0,0 @@
package backend_test
import (
"bytes"
"crypto/rand"
"crypto/sha256"
"io"
"io/ioutil"
"testing"
"github.com/restic/restic/backend"
. "github.com/restic/restic/test"
)
func TestHashAppendReader(t *testing.T) {
tests := []int{5, 23, 2<<18 + 23, 1 << 20}
for _, size := range tests {
data := make([]byte, size)
_, err := io.ReadFull(rand.Reader, data)
if err != nil {
t.Fatalf("ReadFull: %v", err)
}
expectedHash := sha256.Sum256(data)
rd := backend.NewHashAppendReader(bytes.NewReader(data), sha256.New())
target := bytes.NewBuffer(nil)
n, err := io.Copy(target, rd)
OK(t, err)
Assert(t, n == int64(size)+int64(len(expectedHash)),
"HashAppendReader: invalid number of bytes read: got %d, expected %d",
n, size+len(expectedHash))
r := target.Bytes()
resultingHash := r[len(r)-len(expectedHash):]
Assert(t, bytes.Equal(expectedHash[:], resultingHash),
"HashAppendReader: hashes do not match: expected %02x, got %02x",
expectedHash, resultingHash)
// try to read again, must return io.EOF
n2, err := rd.Read(make([]byte, 100))
Assert(t, n2 == 0, "HashAppendReader returned %d additional bytes", n)
Assert(t, err == io.EOF, "HashAppendReader returned %v instead of EOF", err)
}
}
func TestHashingReader(t *testing.T) {
tests := []int{5, 23, 2<<18 + 23, 1 << 20}
for _, size := range tests {
data := make([]byte, size)
_, err := io.ReadFull(rand.Reader, data)
if err != nil {
t.Fatalf("ReadFull: %v", err)
}
expectedHash := sha256.Sum256(data)
rd := backend.NewHashingReader(bytes.NewReader(data), sha256.New())
n, err := io.Copy(ioutil.Discard, rd)
OK(t, err)
Assert(t, n == int64(size),
"HashAppendReader: invalid number of bytes read: got %d, expected %d",
n, size)
resultingHash := rd.Sum(nil)
Assert(t, bytes.Equal(expectedHash[:], resultingHash),
"HashAppendReader: hashes do not match: expected %02x, got %02x",
expectedHash, resultingHash)
// try to read again, must return io.EOF
n2, err := rd.Read(make([]byte, 100))
Assert(t, n2 == 0, "HashAppendReader returned %d additional bytes", n)
Assert(t, err == io.EOF, "HashAppendReader returned %v instead of EOF", err)
}
}

View File

@@ -1,243 +0,0 @@
package s3
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"strings"
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/s3"
"github.com/restic/restic/backend"
)
const maxKeysInList = 1000
const connLimit = 10
const backendPrefix = "restic"
func s3path(t backend.Type, name string) string {
if t == backend.Config {
return backendPrefix + "/" + string(t)
}
return backendPrefix + "/" + string(t) + "/" + name
}
type S3Backend struct {
bucket *s3.Bucket
connChan chan struct{}
path string
}
// Open a backend using an S3 bucket object
func OpenS3Bucket(bucket *s3.Bucket, bucketname string) *S3Backend {
connChan := make(chan struct{}, connLimit)
for i := 0; i < connLimit; i++ {
connChan <- struct{}{}
}
return &S3Backend{bucket: bucket, path: bucketname, connChan: connChan}
}
// Open opens the S3 backend at bucket and region.
func Open(regionname, bucketname string) (backend.Backend, error) {
auth, err := aws.EnvAuth()
if err != nil {
return nil, err
}
client := s3.New(auth, aws.Regions[regionname])
return OpenS3Bucket(client.Bucket(bucketname), bucketname), nil
}
// Location returns this backend's location (the bucket name).
func (be *S3Backend) Location() string {
return be.path
}
type s3Blob struct {
b *S3Backend
buf *bytes.Buffer
final bool
}
func (bb *s3Blob) Write(p []byte) (int, error) {
if bb.final {
return 0, errors.New("blob already closed")
}
n, err := bb.buf.Write(p)
return n, err
}
func (bb *s3Blob) Read(p []byte) (int, error) {
return bb.buf.Read(p)
}
func (bb *s3Blob) Close() error {
bb.final = true
bb.buf.Reset()
return nil
}
func (bb *s3Blob) Size() uint {
return uint(bb.buf.Len())
}
func (bb *s3Blob) Finalize(t backend.Type, name string) error {
if bb.final {
return errors.New("Already finalized")
}
bb.final = true
path := s3path(t, name)
// Check key does not already exist
key, err := bb.b.bucket.GetKey(path)
if err == nil && key.Key == path {
return errors.New("key already exists!")
}
<-bb.b.connChan
err = bb.b.bucket.PutReader(path, bb.buf, int64(bb.buf.Len()), "binary/octet-stream", "private")
bb.b.connChan <- struct{}{}
bb.buf.Reset()
return err
}
// Create creates a new Blob. The data is available only after Finalize()
// has been called on the returned Blob.
func (be *S3Backend) Create() (backend.Blob, error) {
blob := s3Blob{
b: be,
buf: &bytes.Buffer{},
}
return &blob, nil
}
// Get returns a reader that yields the content stored under the given
// name. The reader should be closed after draining it.
func (be *S3Backend) Get(t backend.Type, name string) (io.ReadCloser, error) {
path := s3path(t, name)
return be.bucket.GetReader(path)
}
// GetReader returns an io.ReadCloser for the Blob with the given name of
// type t at offset and length. If length is 0, the reader reads until EOF.
func (be *S3Backend) GetReader(t backend.Type, name string, offset, length uint) (io.ReadCloser, error) {
rc, err := be.Get(t, name)
if err != nil {
return nil, err
}
n, errc := io.CopyN(ioutil.Discard, rc, int64(offset))
if errc != nil {
return nil, errc
} else if n != int64(offset) {
return nil, fmt.Errorf("less bytes read than expected, read: %d, expected: %d", n, offset)
}
if length == 0 {
return rc, nil
}
return backend.LimitReadCloser(rc, int64(length)), nil
}
// Test returns true if a blob of the given type and name exists in the backend.
func (be *S3Backend) Test(t backend.Type, name string) (bool, error) {
found := false
path := s3path(t, name)
key, err := be.bucket.GetKey(path)
if err == nil && key.Key == path {
found = true
}
// If error, then not found
return found, nil
}
// Remove removes the blob with the given name and type.
func (be *S3Backend) Remove(t backend.Type, name string) error {
path := s3path(t, name)
return be.bucket.Del(path)
}
// List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending
// stops.
func (be *S3Backend) List(t backend.Type, done <-chan struct{}) <-chan string {
ch := make(chan string)
prefix := s3path(t, "")
listresp, err := be.bucket.List(prefix, "/", "", maxKeysInList)
if err != nil {
close(ch)
return ch
}
matches := make([]string, len(listresp.Contents))
for idx, key := range listresp.Contents {
matches[idx] = strings.TrimPrefix(key.Key, prefix)
}
// Continue making requests to get full list.
for listresp.IsTruncated {
listresp, err = be.bucket.List(prefix, "/", listresp.NextMarker, maxKeysInList)
if err != nil {
close(ch)
return ch
}
for _, key := range listresp.Contents {
matches = append(matches, strings.TrimPrefix(key.Key, prefix))
}
}
go func() {
defer close(ch)
for _, m := range matches {
if m == "" {
continue
}
select {
case ch <- m:
case <-done:
return
}
}
}()
return ch
}
// Remove keys for a specified backend type
func (be *S3Backend) removeKeys(t backend.Type) {
doneChan := make(chan struct{})
for key := range be.List(backend.Data, doneChan) {
be.Remove(backend.Data, key)
}
doneChan <- struct{}{}
}
// Delete removes all restic keys
func (be *S3Backend) Delete() error {
be.removeKeys(backend.Data)
be.removeKeys(backend.Key)
be.removeKeys(backend.Lock)
be.removeKeys(backend.Snapshot)
be.removeKeys(backend.Index)
be.removeKeys(backend.Config)
return nil
}
// Close does nothing
func (be *S3Backend) Close() error { return nil }

View File

@@ -1,53 +0,0 @@
package backend_test
import (
"testing"
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/s3"
"github.com/mitchellh/goamz/s3/s3test"
bes3 "github.com/restic/restic/backend/s3"
. "github.com/restic/restic/test"
)
type LocalServer struct {
auth aws.Auth
region aws.Region
srv *s3test.Server
config *s3test.Config
}
var s LocalServer
func setupS3Backend(t *testing.T) *bes3.S3Backend {
s.config = &s3test.Config{
Send409Conflict: true,
}
srv, err := s3test.NewServer(s.config)
OK(t, err)
s.srv = srv
s.region = aws.Region{
Name: "faux-region-1",
S3Endpoint: srv.URL(),
S3LocationConstraint: true, // s3test server requires a LocationConstraint
}
s.auth = aws.Auth{"abc", "123", ""}
service := s3.New(s.auth, s.region)
bucket := service.Bucket("testbucket")
err = bucket.PutBucket("private")
OK(t, err)
t.Logf("created s3 backend locally")
return bes3.OpenS3Bucket(bucket, "testbucket")
}
func TestS3Backend(t *testing.T) {
s := setupS3Backend(t)
testBackend(s, t)
}

View File

@@ -1,65 +0,0 @@
package backend_test
import (
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"github.com/restic/restic/backend/sftp"
. "github.com/restic/restic/test"
)
func setupSFTPBackend(t *testing.T) *sftp.SFTP {
sftpserver := ""
for _, dir := range strings.Split(TestSFTPPath, ":") {
testpath := filepath.Join(dir, "sftp-server")
fd, err := os.Open(testpath)
fd.Close()
if !os.IsNotExist(err) {
sftpserver = testpath
break
}
}
if sftpserver == "" {
return nil
}
tempdir, err := ioutil.TempDir("", "restic-test-")
OK(t, err)
b, err := sftp.Create(tempdir, sftpserver)
OK(t, err)
t.Logf("created sftp backend locally at %s", tempdir)
return b
}
func teardownSFTPBackend(t *testing.T, b *sftp.SFTP) {
if !TestCleanup {
t.Logf("leaving backend at %s\n", b.Location())
return
}
err := os.RemoveAll(b.Location())
OK(t, err)
}
func TestSFTPBackend(t *testing.T) {
if !RunIntegrationTest {
t.Skip("integration tests disabled")
}
s := setupSFTPBackend(t)
if s == nil {
t.Skip("unable to find sftp-server binary")
return
}
defer teardownSFTPBackend(t, s)
testBackend(s, t)
}

View File

@@ -1,74 +0,0 @@
package backend
import (
"errors"
"hash"
"io"
)
type HashAppendWriter struct {
w io.Writer
origWr io.Writer
h hash.Hash
sum []byte
closed bool
}
func NewHashAppendWriter(w io.Writer, h hash.Hash) *HashAppendWriter {
return &HashAppendWriter{
h: h,
w: io.MultiWriter(w, h),
origWr: w,
sum: make([]byte, 0, h.Size()),
}
}
func (h *HashAppendWriter) Close() error {
if h == nil {
return nil
}
if !h.closed {
h.closed = true
_, err := h.origWr.Write(h.h.Sum(nil))
return err
}
return nil
}
func (h *HashAppendWriter) Write(p []byte) (n int, err error) {
if !h.closed {
return h.w.Write(p)
}
return 0, errors.New("Write() called on closed HashAppendWriter")
}
type HashingWriter struct {
w io.Writer
h hash.Hash
size int
}
func NewHashingWriter(w io.Writer, h hash.Hash) *HashingWriter {
return &HashingWriter{
h: h,
w: io.MultiWriter(w, h),
}
}
func (h *HashingWriter) Write(p []byte) (int, error) {
n, err := h.w.Write(p)
h.size += n
return n, err
}
func (h *HashingWriter) Sum(d []byte) []byte {
return h.h.Sum(d)
}
func (h *HashingWriter) Size() int {
return h.size
}

View File

@@ -1,81 +0,0 @@
package backend_test
import (
"bytes"
"crypto/rand"
"crypto/sha256"
"io"
"io/ioutil"
"testing"
"github.com/restic/restic/backend"
. "github.com/restic/restic/test"
)
func TestHashAppendWriter(t *testing.T) {
tests := []int{5, 23, 2<<18 + 23, 1 << 20}
for _, size := range tests {
data := make([]byte, size)
_, err := io.ReadFull(rand.Reader, data)
if err != nil {
t.Fatalf("ReadFull: %v", err)
}
expectedHash := sha256.Sum256(data)
target := bytes.NewBuffer(nil)
wr := backend.NewHashAppendWriter(target, sha256.New())
_, err = wr.Write(data)
OK(t, err)
OK(t, wr.Close())
Assert(t, len(target.Bytes()) == size+len(expectedHash),
"HashAppendWriter: invalid number of bytes written: got %d, expected %d",
len(target.Bytes()), size+len(expectedHash))
r := target.Bytes()
resultingHash := r[len(r)-len(expectedHash):]
Assert(t, bytes.Equal(expectedHash[:], resultingHash),
"HashAppendWriter: hashes do not match: expected %02x, got %02x",
expectedHash, resultingHash)
// write again, this must return an error
_, err = wr.Write([]byte{23})
Assert(t, err != nil,
"HashAppendWriter: Write() after Close() did not return an error")
}
}
func TestHashingWriter(t *testing.T) {
tests := []int{5, 23, 2<<18 + 23, 1 << 20}
for _, size := range tests {
data := make([]byte, size)
_, err := io.ReadFull(rand.Reader, data)
if err != nil {
t.Fatalf("ReadFull: %v", err)
}
expectedHash := sha256.Sum256(data)
wr := backend.NewHashingWriter(ioutil.Discard, sha256.New())
n, err := io.Copy(wr, bytes.NewReader(data))
OK(t, err)
Assert(t, n == int64(size),
"HashAppendWriter: invalid number of bytes written: got %d, expected %d",
n, size)
Assert(t, wr.Size() == size,
"HashAppendWriter: invalid number of bytes returned: got %d, expected %d",
wr.Size, size)
resultingHash := wr.Sum(nil)
Assert(t, bytes.Equal(expectedHash[:], resultingHash),
"HashAppendWriter: hashes do not match: expected %02x, got %02x",
expectedHash, resultingHash)
}
}

142
build.go
View File

@@ -30,7 +30,11 @@ func specialDir(name string) bool {
}
base := filepath.Base(name)
return base[0] == '_' || base[0] == '.'
if base == "vendor" || base[0] == '_' || base[0] == '.' {
return true
}
return false
}
// excludePath returns true if the file should not be copied to the new GOPATH.
@@ -148,10 +152,12 @@ func showUsage(output io.Writer) {
fmt.Fprintf(output, "USAGE: go run build.go OPTIONS\n")
fmt.Fprintf(output, "\n")
fmt.Fprintf(output, "OPTIONS:\n")
fmt.Fprintf(output, " -v --verbose output more messages\n")
fmt.Fprintf(output, " -t --tags specify additional build tags\n")
fmt.Fprintf(output, " -k --keep-gopath do not remove the GOPATH after build\n")
fmt.Fprintf(output, " -T --test run tests\n")
fmt.Fprintf(output, " -v --verbose output more messages\n")
fmt.Fprintf(output, " -t --tags specify additional build tags\n")
fmt.Fprintf(output, " -k --keep-gopath do not remove the GOPATH after build\n")
fmt.Fprintf(output, " -T --test run tests\n")
fmt.Fprintf(output, " --goos value set GOOS for cross-compilation\n")
fmt.Fprintf(output, " --goarch value set GOARCH for cross-compilation\n")
}
func verbosePrintf(message string, args ...interface{}) {
@@ -159,7 +165,7 @@ func verbosePrintf(message string, args ...interface{}) {
return
}
fmt.Printf(message, args...)
fmt.Printf("build: "+message, args...)
}
// cleanEnv returns a clean environment with GOPATH and GOBIN removed (if
@@ -177,10 +183,11 @@ func cleanEnv() (env []string) {
}
// build runs "go build args..." with GOPATH set to gopath.
func build(gopath string, args ...string) error {
func build(cwd, goos, goarch, gopath string, args ...string) error {
args = append([]string{"build"}, args...)
cmd := exec.Command("go", args...)
cmd.Env = append(cleanEnv(), "GOPATH="+gopath)
cmd.Env = append(cleanEnv(), "GOPATH="+gopath, "GOARCH="+goarch, "GOOS="+goos)
cmd.Dir = cwd
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
verbosePrintf("go %s\n", args)
@@ -189,10 +196,11 @@ func build(gopath string, args ...string) error {
}
// test runs "go test args..." with GOPATH set to gopath.
func test(gopath string, args ...string) error {
func test(cwd, gopath string, args ...string) error {
args = append([]string{"test"}, args...)
cmd := exec.Command("go", args...)
cmd.Env = append(cleanEnv(), "GOPATH="+gopath)
cmd.Dir = cwd
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
verbosePrintf("go %s\n", args)
@@ -200,22 +208,41 @@ func test(gopath string, args ...string) error {
return cmd.Run()
}
// getVersion returns a version string, either from the file VERSION in the
// current directory or from git.
func getVersion() string {
v, err := ioutil.ReadFile("VERSION")
version := strings.TrimSpace(string(v))
if err == nil {
verbosePrintf("version from file 'VERSION' is %s\n", version)
return version
// getVersion returns the version string from the file VERSION in the current
// directory.
func getVersionFromFile() string {
buf, err := ioutil.ReadFile("VERSION")
if err != nil {
verbosePrintf("error reading file VERSION: %v\n", err)
return ""
}
return gitVersion()
return strings.TrimSpace(string(buf))
}
// gitVersion returns a version string that identifies the currently checked
// out git commit.
func gitVersion() string {
// getVersion returns a version string which is a combination of the contents
// of the file VERSION in the current directory and the version from git (if
// available).
func getVersion() string {
versionFile := getVersionFromFile()
versionGit := getVersionFromGit()
verbosePrintf("version from file 'VERSION' is %q, version from git %q\n",
versionFile, versionGit)
switch {
case versionFile == "":
return versionGit
case versionGit == "":
return versionFile
}
return fmt.Sprintf("%s (%s)", versionFile, versionGit)
}
// getVersionFromGit returns a version string that identifies the currently
// checked out git commit.
func getVersionFromGit() string {
cmd := exec.Command("git", "describe",
"--long", "--tags", "--dirty", "--always")
out, err := cmd.Output()
@@ -229,11 +256,37 @@ func gitVersion() string {
return version
}
// Constants represents a set of constants that are set in the final binary to
// the given value via compiler flags.
type Constants map[string]string
// LDFlags returns the string that can be passed to go build's `-ldflags`.
func (cs Constants) LDFlags() string {
l := make([]string, 0, len(cs))
v := runtime.Version()
if strings.HasPrefix(v, "go1.5") || strings.HasPrefix(v, "go1.6") || strings.HasPrefix(v, "go1.7") {
for k, v := range cs {
l = append(l, fmt.Sprintf(`-X "%s=%s"`, k, v))
}
} else {
for k, v := range cs {
l = append(l, fmt.Sprintf(`-X %q %q`, k, v))
}
}
return strings.Join(l, " ")
}
func main() {
buildTags := []string{}
skipNext := false
params := os.Args[1:]
targetGOOS := runtime.GOOS
targetGOARCH := runtime.GOARCH
for i, arg := range params {
if skipNext {
skipNext = false
@@ -250,8 +303,15 @@ func main() {
buildTags = strings.Split(params[i+1], " ")
case "-T", "--test":
runTests = true
case "--goos":
skipNext = true
targetGOOS = params[i+1]
case "--goarch":
skipNext = true
targetGOARCH = params[i+1]
case "-h":
showUsage(os.Stdout)
return
default:
fmt.Fprintf(os.Stderr, "Error: unknown option %q\n\n", arg)
showUsage(os.Stderr)
@@ -281,13 +341,17 @@ func main() {
}
verbosePrintf("create GOPATH at %v\n", gopath)
if err = updateGopath(gopath, root, "github.com/restic/restic"); err != nil {
die("copying files from %v to %v failed: %v\n", root, gopath, err)
if err = updateGopath(gopath, filepath.Join(root, "src", "restic"), "restic"); err != nil {
die("copying files from %v/src/restic to %v/src/restic failed: %v\n", root, gopath, err)
}
vendor := filepath.Join(root, "Godeps", "_workspace", "src")
if err = updateGopath(gopath, filepath.Join(root, "src", "cmds"), "cmds"); err != nil {
die("copying files from %v/src/cmds to %v/src/restic/cmds failed: %v\n", root, gopath, err)
}
vendor := filepath.Join(root, "vendor", "src")
if err = updateGopath(gopath, vendor, ""); err != nil {
die("copying files from %v to %v failed: %v\n", root, gopath, err)
die("copying files from %v to %v/src failed: %v\n", vendor, gopath, err)
}
defer func() {
@@ -297,27 +361,37 @@ func main() {
die("remove GOPATH at %s failed: %v\n", err)
}
} else {
fmt.Printf("leaving temporary GOPATH at %v\n", gopath)
verbosePrintf("leaving temporary GOPATH at %v\n", gopath)
}
}()
output := "restic"
if runtime.GOOS == "windows" {
output = "restic.exe"
outputFilename := "restic"
if targetGOOS == "windows" {
outputFilename = "restic.exe"
}
cwd, err := os.Getwd()
if err != nil {
die("Getwd() returned %v\n", err)
}
output := filepath.Join(cwd, outputFilename)
version := getVersion()
compileTime := time.Now().Format(timeFormat)
ldflags := fmt.Sprintf("-s -X main.compiledAt %q", compileTime)
constants := Constants{`main.compiledAt`: compileTime}
if version != "" {
ldflags = fmt.Sprintf("%s -X main.version %q", ldflags, version)
constants["main.version"] = version
}
ldflags := "-s -w " + constants.LDFlags()
verbosePrintf("ldflags: %s\n", ldflags)
args := []string{
"-tags", strings.Join(buildTags, " "),
"-ldflags", ldflags,
"-o", output, "github.com/restic/restic/cmd/restic",
"-o", output, "cmds/restic",
}
err = build(gopath, args...)
err = build(filepath.Join(gopath, "src"), targetGOOS, targetGOARCH, gopath, args...)
if err != nil {
die("build failed: %v\n", err)
}
@@ -325,7 +399,7 @@ func main() {
if runTests {
verbosePrintf("running tests\n")
err = test(gopath, "github.com/restic/restic/...")
err = test(filepath.Join(gopath, "src"), gopath, "restic/...")
if err != nil {
die("running tests failed: %v\n", err)
}

View File

@@ -1,142 +0,0 @@
package checker_test
import (
"path/filepath"
"sort"
"testing"
"github.com/restic/restic/backend"
"github.com/restic/restic/checker"
"github.com/restic/restic/repository"
. "github.com/restic/restic/test"
)
var checkerTestData = filepath.Join("testdata", "checker-test-repo.tar.gz")
func list(repo *repository.Repository, t backend.Type) (IDs []string) {
done := make(chan struct{})
defer close(done)
for id := range repo.List(t, done) {
IDs = append(IDs, id.String())
}
return IDs
}
func checkPacks(chkr *checker.Checker) (errs []error) {
done := make(chan struct{})
defer close(done)
errChan := make(chan error)
go chkr.Packs(errChan, done)
for err := range errChan {
errs = append(errs, err)
}
return errs
}
func checkStruct(chkr *checker.Checker) (errs []error) {
done := make(chan struct{})
defer close(done)
errChan := make(chan error)
go chkr.Structure(errChan, done)
for err := range errChan {
errs = append(errs, err)
}
return errs
}
func TestCheckRepo(t *testing.T) {
WithTestEnvironment(t, checkerTestData, func(repodir string) {
repo := OpenLocalRepo(t, repodir)
chkr := checker.New(repo)
OK(t, chkr.LoadIndex())
OKs(t, checkPacks(chkr))
OKs(t, checkStruct(chkr))
})
}
func TestMissingPack(t *testing.T) {
WithTestEnvironment(t, checkerTestData, func(repodir string) {
repo := OpenLocalRepo(t, repodir)
packID := "657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6"
OK(t, repo.Backend().Remove(backend.Data, packID))
chkr := checker.New(repo)
OK(t, chkr.LoadIndex())
errs := checkPacks(chkr)
Assert(t, len(errs) == 1,
"expected exactly one error, got %v", len(errs))
if err, ok := errs[0].(checker.PackError); ok {
Equals(t, packID, err.ID.String())
} else {
t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err)
}
})
}
func TestUnreferencedPack(t *testing.T) {
WithTestEnvironment(t, checkerTestData, func(repodir string) {
repo := OpenLocalRepo(t, repodir)
// index 8eb5 only references pack 60e0
indexID := "8eb5b61062bf8e959f244fba0c971108bc8d4d2a4b236f71a704998e28cc5cf6"
packID := "60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e"
OK(t, repo.Backend().Remove(backend.Index, indexID))
chkr := checker.New(repo)
OK(t, chkr.LoadIndex())
errs := checkPacks(chkr)
Assert(t, len(errs) == 1,
"expected exactly one error, got %v", len(errs))
if err, ok := errs[0].(checker.PackError); ok {
Equals(t, packID, err.ID.String())
} else {
t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err)
}
})
}
func TestUnreferencedBlobs(t *testing.T) {
WithTestEnvironment(t, checkerTestData, func(repodir string) {
repo := OpenLocalRepo(t, repodir)
snID := "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02"
OK(t, repo.Backend().Remove(backend.Snapshot, snID))
unusedBlobsBySnapshot := backend.IDs{
ParseID("58c748bbe2929fdf30c73262bd8313fe828f8925b05d1d4a87fe109082acb849"),
ParseID("988a272ab9768182abfd1fe7d7a7b68967825f0b861d3b36156795832c772235"),
ParseID("c01952de4d91da1b1b80bc6e06eaa4ec21523f4853b69dc8231708b9b7ec62d8"),
ParseID("bec3a53d7dc737f9a9bee68b107ec9e8ad722019f649b34d474b9982c3a3fec7"),
ParseID("2a6f01e5e92d8343c4c6b78b51c5a4dc9c39d42c04e26088c7614b13d8d0559d"),
ParseID("18b51b327df9391732ba7aaf841a4885f350d8a557b2da8352c9acf8898e3f10"),
}
sort.Sort(unusedBlobsBySnapshot)
chkr := checker.New(repo)
OK(t, chkr.LoadIndex())
OKs(t, checkPacks(chkr))
OKs(t, checkStruct(chkr))
blobs := chkr.UnusedBlobs()
sort.Sort(blobs)
Equals(t, unusedBlobsBySnapshot, blobs)
})
}

View File

@@ -1,106 +0,0 @@
package main
import (
"errors"
"fmt"
"os"
"github.com/restic/restic/backend"
"github.com/restic/restic/checker"
)
type CmdCheck struct {
ReadData bool `long:"read-data" description:"Read data blobs" default:"false"`
RemoveOrphaned bool `long:"remove" description:"Remove data that isn't used" default:"false"`
global *GlobalOptions
}
func init() {
_, err := parser.AddCommand("check",
"check the repository",
"The check command check the integrity and consistency of the repository",
&CmdCheck{global: &globalOpts})
if err != nil {
panic(err)
}
}
func (cmd CmdCheck) Usage() string {
return "[check-options]"
}
func (cmd CmdCheck) Execute(args []string) error {
if len(args) != 0 {
return errors.New("check has no arguments")
}
repo, err := cmd.global.OpenRepository()
if err != nil {
return err
}
cmd.global.Verbosef("Create exclusive lock for repository\n")
lock, err := lockRepoExclusive(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
chkr := checker.New(repo)
cmd.global.Verbosef("Load indexes\n")
if err = chkr.LoadIndex(); err != nil {
return err
}
done := make(chan struct{})
defer close(done)
errorsFound := false
errChan := make(chan error)
cmd.global.Verbosef("Check all packs\n")
go chkr.Packs(errChan, done)
foundOrphanedPacks := false
for err := range errChan {
errorsFound = true
fmt.Fprintf(os.Stderr, "%v\n", err)
if e, ok := err.(checker.PackError); ok && e.Orphaned {
foundOrphanedPacks = true
}
}
cmd.global.Verbosef("Check snapshots, trees and blobs\n")
errChan = make(chan error)
go chkr.Structure(errChan, done)
for err := range errChan {
errorsFound = true
fmt.Fprintf(os.Stderr, "error: %v\n", err)
}
for _, id := range chkr.UnusedBlobs() {
cmd.global.Verbosef("unused blob %v\n", id.Str())
}
if foundOrphanedPacks && cmd.RemoveOrphaned {
IDs := chkr.OrphanedPacks()
cmd.global.Verbosef("Remove %d orphaned packs... ", len(IDs))
for _, id := range IDs {
if err := repo.Backend().Remove(backend.Data, id.String()); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
}
}
cmd.global.Verbosef("done\n")
}
if errorsFound {
return errors.New("repository contains errors")
}
return nil
}

View File

@@ -1,152 +0,0 @@
// +build debug
package main
import (
"encoding/json"
"fmt"
"io"
"os"
"github.com/juju/errors"
"github.com/restic/restic"
"github.com/restic/restic/backend"
"github.com/restic/restic/pack"
"github.com/restic/restic/repository"
)
type CmdDump struct {
global *GlobalOptions
}
func init() {
_, err := parser.AddCommand("dump",
"dump data structures",
"The dump command dumps data structures from a repository as JSON documents",
&CmdDump{global: &globalOpts})
if err != nil {
panic(err)
}
}
func (cmd CmdDump) Usage() string {
return "[index|snapshots|trees|all]"
}
func prettyPrintJSON(wr io.Writer, item interface{}) error {
buf, err := json.MarshalIndent(item, "", " ")
if err != nil {
return err
}
_, err = wr.Write(append(buf, '\n'))
return err
}
func printSnapshots(repo *repository.Repository, wr io.Writer) error {
done := make(chan struct{})
defer close(done)
for id := range repo.List(backend.Snapshot, done) {
snapshot, err := restic.LoadSnapshot(repo, id)
if err != nil {
fmt.Fprintf(os.Stderr, "LoadSnapshot(%v): %v", id.Str(), err)
continue
}
fmt.Fprintf(wr, "snapshot_id: %v\n", id)
err = prettyPrintJSON(wr, snapshot)
if err != nil {
return err
}
}
return nil
}
func printTrees(repo *repository.Repository, wr io.Writer) error {
done := make(chan struct{})
defer close(done)
trees := []backend.ID{}
for blob := range repo.Index().Each(done) {
if blob.Type != pack.Tree {
continue
}
trees = append(trees, blob.ID)
}
for _, id := range trees {
tree, err := restic.LoadTree(repo, id)
if err != nil {
fmt.Fprintf(os.Stderr, "LoadTree(%v): %v", id.Str(), err)
continue
}
fmt.Fprintf(wr, "tree_id: %v\n", id)
prettyPrintJSON(wr, tree)
}
return nil
}
func (cmd CmdDump) Execute(args []string) error {
if len(args) != 1 {
return fmt.Errorf("type not specified, Usage: %s", cmd.Usage())
}
repo, err := cmd.global.OpenRepository()
if err != nil {
return err
}
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
err = repo.LoadIndex()
if err != nil {
return err
}
tpe := args[0]
switch tpe {
case "index":
return repo.Index().Dump(os.Stdout)
case "snapshots":
return printSnapshots(repo, os.Stdout)
case "trees":
return printTrees(repo, os.Stdout)
case "all":
fmt.Printf("snapshots:\n")
err := printSnapshots(repo, os.Stdout)
if err != nil {
return err
}
fmt.Printf("\ntrees:\n")
err = printTrees(repo, os.Stdout)
if err != nil {
return err
}
fmt.Printf("\nindex:\n")
err = repo.Index().Dump(os.Stdout)
if err != nil {
return err
}
return nil
default:
return errors.Errorf("no such type %q", tpe)
}
}

View File

@@ -1,203 +0,0 @@
package main
import (
"errors"
"fmt"
"io"
"net/url"
"os"
"github.com/jessevdk/go-flags"
"github.com/restic/restic/backend"
"github.com/restic/restic/backend/local"
"github.com/restic/restic/backend/s3"
"github.com/restic/restic/backend/sftp"
"github.com/restic/restic/repository"
"golang.org/x/crypto/ssh/terminal"
)
var version = "compiled manually"
var compiledAt = "unknown time"
type GlobalOptions struct {
Repo string `short:"r" long:"repo" description:"Repository directory to backup to/restore from"`
CacheDir string ` long:"cache-dir" description:"Directory to use as a local cache"`
Quiet bool `short:"q" long:"quiet" default:"false" description:"Do not output comprehensive progress report"`
password string
stdout io.Writer
stderr io.Writer
}
var globalOpts = GlobalOptions{stdout: os.Stdout, stderr: os.Stderr}
var parser = flags.NewParser(&globalOpts, flags.Default)
func (o GlobalOptions) Printf(format string, args ...interface{}) {
_, err := fmt.Fprintf(o.stdout, format, args...)
if err != nil {
fmt.Fprintf(os.Stderr, "unable to write to stdout: %v\n", err)
os.Exit(100)
}
}
func (o GlobalOptions) Verbosef(format string, args ...interface{}) {
if o.Quiet {
return
}
o.Printf(format, args...)
}
func (o GlobalOptions) ShowProgress() bool {
if o.Quiet {
return false
}
if !terminal.IsTerminal(int(os.Stdout.Fd())) {
return false
}
return true
}
func (o GlobalOptions) Warnf(format string, args ...interface{}) {
_, err := fmt.Fprintf(o.stderr, format, args...)
if err != nil {
fmt.Fprintf(os.Stderr, "unable to write to stderr: %v\n", err)
os.Exit(100)
}
}
func (o GlobalOptions) Exitf(exitcode int, format string, args ...interface{}) {
if format[len(format)-1] != '\n' {
format += "\n"
}
o.Warnf(format, args...)
os.Exit(exitcode)
}
func (o GlobalOptions) ReadPassword(prompt string) string {
fmt.Fprint(os.Stderr, prompt)
pw, err := terminal.ReadPassword(int(os.Stdin.Fd()))
if err != nil {
o.Exitf(2, "unable to read password: %v", err)
}
fmt.Fprintln(os.Stderr)
if len(pw) == 0 {
o.Exitf(1, "an empty password is not a password")
}
return string(pw)
}
func (o GlobalOptions) ReadPasswordTwice(prompt1, prompt2 string) string {
pw1 := o.ReadPassword(prompt1)
pw2 := o.ReadPassword(prompt2)
if pw1 != pw2 {
o.Exitf(1, "passwords do not match")
}
return pw1
}
func (o GlobalOptions) OpenRepository() (*repository.Repository, error) {
if o.Repo == "" {
return nil, errors.New("Please specify repository location (-r)")
}
be, err := open(o.Repo)
if err != nil {
return nil, err
}
s := repository.New(be)
if o.password == "" {
o.password = o.ReadPassword("enter password for repository: ")
}
err = s.SearchKey(o.password)
if err != nil {
return nil, fmt.Errorf("unable to open repo: %v", err)
}
return s, nil
}
// Open the backend specified by URI.
// Valid formats are:
// * /foo/bar -> local repository at /foo/bar
// * s3://region/bucket -> amazon s3 bucket
// * sftp://user@host/foo/bar -> remote sftp repository on host for user at path foo/bar
// * sftp://host//tmp/backup -> remote sftp repository on host at path /tmp/backup
// * c:\temp -> local repository at c:\temp - the path must exist
func open(u string) (backend.Backend, error) {
// check if the url is a directory that exists
fi, err := os.Stat(u)
if err == nil && fi.IsDir() {
return local.Open(u)
}
url, err := url.Parse(u)
if err != nil {
return nil, err
}
if url.Scheme == "" {
return local.Open(url.Path)
}
if len(url.Path) < 1 {
return nil, fmt.Errorf("unable to parse url %v", url)
}
if url.Scheme == "s3" {
return s3.Open(url.Host, url.Path[1:])
}
args := []string{url.Host}
if url.User != nil && url.User.Username() != "" {
args = append(args, "-l")
args = append(args, url.User.Username())
}
args = append(args, "-s")
args = append(args, "sftp")
return sftp.Open(url.Path[1:], "ssh", args...)
}
// Create the backend specified by URI.
func create(u string) (backend.Backend, error) {
// check if the url is a directory that exists
fi, err := os.Stat(u)
if err == nil && fi.IsDir() {
return local.Create(u)
}
url, err := url.Parse(u)
if err != nil {
return nil, err
}
if url.Scheme == "" {
return local.Create(url.Path)
}
if len(url.Path) < 1 {
return nil, fmt.Errorf("unable to parse url %v", url)
}
if url.Scheme == "s3" {
return s3.Open(url.Host, url.Path[1:])
}
args := []string{url.Host}
if url.User != nil && url.User.Username() != "" {
args = append(args, "-l")
args = append(args, url.User.Username())
}
args = append(args, "-s")
args = append(args, "sftp")
return sftp.Create(url.Path[1:], "ssh", args...)
}

View File

@@ -12,7 +12,7 @@ several subdirectories. A repository implementation must be able to fulfill a
number of operations, e.g. list the contents.
*Blob*: A Blob combines a number of data bytes with identifying information
like the SHA256 hash of the data and its length.
like the SHA-256 hash of the data and its length.
*Pack*: A Pack combines one or more Blobs, e.g. in a single file.
@@ -164,7 +164,7 @@ Data and Tree Blobs, so the outer structure is `IV || Ciphertext || MAC` again.
The plaintext consists of a JSON document like the following:
{
"obsolete": [
"supersedes": [
"ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452"
],
"packs": [
@@ -197,10 +197,9 @@ This JSON document lists Packs and the blobs contained therein. In this
example, the Pack `73d04e61` contains two data Blobs and one Tree blob, the
plaintext hashes are listed afterwards.
The field `obsolete` lists the storage IDs of index files that have been
The field `supersedes` lists the storage IDs of index files that have been
replaced with the current index file. This happens when index files are
repacked, this happens for example when old snapshots are removed and Packs are
recombined.
repacked, for example when old snapshots are removed and Packs are recombined.
There may be an arbitrary number of index files, containing information on
non-disjoint sets of Packs. The number of packs described in a single file is
@@ -246,18 +245,18 @@ repository password. This is then used with `scrypt`, a key derivation function
bytes. The first 32 bytes are used as the encryption key (for AES-256) and the
last 32 bytes are used as the message authentication key (for Poly1305-AES).
These last 32 bytes are divided into a 16 byte AES key `k` followed by 16 bytes
of secret key `r`. They key `r` is then masked for use with Poly1305 (see the
of secret key `r`. The key `r` is then masked for use with Poly1305 (see the
paper for details).
This message authentication key is used to compute a MAC over the bytes contained
in the JSON field `data` (after removing the Base64 encoding and not including
the last 32 byte). If the password is incorrect or the key file has been
tampered with, the computed MAC will not match the last 16 bytes of the data,
and restic exits with an error. Otherwise, the data is decrypted with the
encryption key derived from `scrypt`. This yields a JSON document which
contains the master encryption and message authentication keys for this
repository (encoded in Base64). The command `restic cat masterkey` can be used
as follows to decrypt and pretty-print the master key:
Those message authentication keys (`k` and `r`) are used to compute a MAC over
the bytes contained in the JSON field `data` (after removing the Base64
encoding and not including the last 32 byte). If the password is incorrect or
the key file has been tampered with, the computed MAC will not match the last
16 bytes of the data, and restic exits with an error. Otherwise, the data is
decrypted with the encryption key derived from `scrypt`. This yields a JSON
document which contains the master encryption and message authentication keys
for this repository (encoded in Base64). The command `restic cat masterkey` can
be used as follows to decrypt and pretty-print the master key:
$ restic -r /tmp/restic-repo cat masterkey
{
@@ -315,9 +314,8 @@ Trees and Data
--------------
A snapshot references a tree by the SHA-256 hash of the JSON string
representation of its contents. Trees are saved in a subdirectory of the
directory `trees`. The sub directory's name is the first two characters of the
filename the tree object is stored in.
representation of its contents. Trees and data are saved in pack files in a
subdirectory of the directory `data`.
The command `restic cat tree` can be used to inspect the tree referenced above:
@@ -462,10 +460,10 @@ General assumptions:
The restic backup program guarantees the following:
* Accessing the unencrypted content of stored files and meta data should not
* Accessing the unencrypted content of stored files and metadata should not
be possible without a password for the repository. Everything except the
`version` and `id` files and the meta data included for informational
purposes in the key files is encrypted and authenticated.
metadata included for informational purposes in the key files is encrypted and
authenticated.
* Modifications (intentional or unintentional) can be detected automatically
on several layers:

432
doc/Manual.md Normal file
View File

@@ -0,0 +1,432 @@
Thanks for using restic. This document will give you an overview of the basic
functionality provided by restic.
# Building/installing restic
If you are using Mac OS X, you can install restic using the
[homebrew](http://brew.sh/) packet manager:
$ brew tap restic/restic
$ brew install restic
On archlinux, there is a package called `restic-git` which can be installed from AUR, e.g. with `pacaur`:
$ pacaur -S restic-git
At debian stable you can install 'go' directly from the repositories (as root):
$ apt-get install golang-go
after installation of 'go' go straight forward to 'git clone [...]'
If you are using Linux, BSD or Windows, the only way to install restic on your
system right now is to compile it from source. restic is written in the Go
programming language and you need at least Go version 1.3. See the [Getting
started](https://golang.org/doc/install) guide of the Go project for
instructions how to install Go.
In order to build restic from source, execute the following steps:
$ git clone https://github.com/restic/restic
[...]
$ cd restic
$ go run build.go
At the moment, the only tested compiler for restic is the official Go compiler.
Building restic with gccgo may work, but is not supported.
Usage help is available:
$ ./restic --help
Usage:
restic [OPTIONS] <command>
Application Options:
-r, --repo= Repository directory to backup to/restore from
Help Options:
-h, --help Show this help message
Available commands:
backup save file/directory
cache manage cache
cat dump something
find find a file/directory
fsck check the repository
init create repository
key manage keys
list lists data
ls list files
restore restore a snapshot
snapshots show snapshots
version display version
Similar to programs such as `git`, restic has a number of sub-commands. You can
see these commands in the listing above. Each sub-command may have own
command-line options, and there is a help option for each command which lists
them, e.g. for the `backup` command:
$ ./restic backup --help
Usage:
restic [OPTIONS] backup DIR/FILE [DIR/FILE] [...]
The backup command creates a snapshot of a file or directory
Application Options:
-r, --repo= Repository directory to backup to/restore from
--cache-dir= Directory to use as a local cache
-q, --quiet Do not output comprehensive progress report (false)
--no-lock Do not lock the repo, this allows some operations on read-only repos. (false)
Help Options:
-h, --help Show this help message
[backup command options]
-p, --parent= use this parent snapshot (default: last snapshot in repo that has the same target)
-f, --force Force re-reading the target. Overrides the "parent" flag
-e, --exclude= Exclude a pattern (can be specified multiple times)
# Initialize a repository
First, we need to create a "repository". This is the place where your backups
will be saved at.
In order to create a repository at `/tmp/backup`, run the following command and
enter the same password twice:
$ restic init --repo /tmp/backup
enter password for new backend:
enter password again:
created restic backend 085b3c76b9 at /tmp/backup
Please note that knowledge of your password is required to access the repository.
Losing your password means that your data is irrecoverably lost.
Remembering your password is important! If you lose it, you won't be able to
access data stored in the repository.
For automated backups, restic accepts the repository location in the
environment variable `RESTIC_REPOSITORY` and also the password in the variable
`RESTIC_PASSWORD`.
## Password prompt on Windows
At the moment, restic only supports the default Windows console interaction.
If you use emulation environments like [MSYS2](https://msys2.github.io/) or
[Cygwin](https://www.cygwin.com/), which use terminals like `Mintty` or `rxvt`,
you may get a password error:
You can workaround this by using a special tool called `winpty` (look
[here](https://sourceforge.net/p/msys2/wiki/Porting/) and
[here](https://github.com/rprichard/winpty) for detail information). On MSYS2,
you can install `winpty` as follows:
$ pacman -S winpty
$ winpty restic -r /tmp/backup init
# Create a snapshot
Now we're ready to backup some data. The contents of a directory at a specific
point in time is called a "snapshot" in restic. Run the following command and
enter the repository password you chose above again:
$ restic -r /tmp/backup backup ~/work
enter password for repository:
scan [/home/user/work]
scanned 764 directories, 1816 files in 0:00
[0:29] 100.00% 54.732 MiB/s 1.582 GiB / 1.582 GiB 2580 / 2580 items 0 errors ETA 0:00
duration: 0:29, 54.47MiB/s
snapshot 40dc1520 saved
As you can see, restic created a backup of the directory and was pretty fast!
The specific snapshot just created is identified by a sequence of hexadecimal
characters, `40dc1520` in this case.
If you run the command again, restic will create another snapshot of your data,
but this time it's even faster. This is de-duplication at work!
$ restic -r /tmp/backup backup ~/shared/work/web
enter password for repository:
using parent snapshot 40dc1520aa6a07b7b3ae561786770a01951245d2367241e71e9485f18ae8228c
scan [/home/user/work]
scanned 764 directories, 1816 files in 0:00
[0:00] 100.00% 0B/s 1.582 GiB / 1.582 GiB 2580 / 2580 items 0 errors ETA 0:00
duration: 0:00, 6572.38MiB/s
snapshot 79766175 saved
You can even backup individual files in the same repository.
$ restic -r /tmp/backup backup ~/work.txt
scan [~/work.txt]
scanned 0 directories, 1 files in 0:00
[0:00] 100.00% 0B/s 220B / 220B 1 / 1 items 0 errors ETA 0:00
duration: 0:00, 0.03MiB/s
snapshot 31f7bd63 saved
In fact several hosts may use the same repository to backup directories and
files leading to a greater de-duplication.
You can exclude folders and files by specifying exclude-patterns.
Either specify them with multiple `--exclude`'s or one `--exclude-file`
$ cat exclude
# exclude go-files
*.go
# exclude foo/x/y/z/bar foo/x/bar foo/bar
foo/**/bar
$ restic -r /tmp/backup backup ~/work --exclude=*.c --exclude-file=exclude
Patterns use [`filepath.Glob`](https://golang.org/pkg/path/filepath/#Glob) internally,
see [`filepath.Match`](https://golang.org/pkg/path/filepath/#Match) for syntax.
Additionally `**` exludes arbitrary subdirectories.
Environment-variables in exclude-files are expanded with [`os.ExpandEnv`](https://golang.org/pkg/os/#ExpandEnv).
## Reading data from stdin
Sometimes it can be nice to directly save the output of a program, e.g.
`mysqldump` so that the SQL can later be restored. Restic supports this mode of
operation, just supply the option `--stdin` to the `backup` command like this:
$ mysqldump [...] | restic -r /tmp/backup backup --stdin
This creates a new snapshot of the output of `mysqldump`. You can then use e.g.
the fuse mounting option (see below) to mount the repository and read the file.
By default, the file name `stdin` is used, a different name can be specified
with `--stdin-filename`, e.g. like this:
$ mysqldump [...] | restic -r /tmp/backup backup --stdin --stdin-filenam production.sql
# List all snapshots
Now, you can list all the snapshots stored in the repository:
$ restic -r /tmp/backup snapshots
enter password for repository:
ID Date Host Directory
----------------------------------------------------------------------
40dc1520 2015-05-08 21:38:30 kasimir /home/user/work
79766175 2015-05-08 21:40:19 kasimir /home/user/work
bdbd3439 2015-05-08 21:45:17 luigi /home/art
590c8fc8 2015-05-08 21:47:38 kazik /srv
9f0bc19e 2015-05-08 21:46:11 luigi /srv
You can filter the listing by directory path:
$ restic -r /tmp/backup snapshots --path="/srv"
enter password for repository:
ID Date Host Directory
----------------------------------------------------------------------
590c8fc8 2015-05-08 21:47:38 kazik /srv
9f0bc19e 2015-05-08 21:46:11 luigi /srv
Or filter by host:
$ restic -r /tmp/backup snapshots --host luigi
enter password for repository:
ID Date Host Directory
----------------------------------------------------------------------
bdbd3439 2015-05-08 21:45:17 luigi /home/art
9f0bc19e 2015-05-08 21:46:11 luigi /srv
Combining filters is also possible.
# Restore a snapshot
Restoring a snapshot is as easy as it sounds, just use the following command to
restore the contents of the latest snapshot to `/tmp/restore-work`:
$ restic -r /tmp/backup restore 79766175 --target ~/tmp/restore-work
enter password for repository:
restoring <Snapshot of [/home/user/work] at 2015-05-08 21:40:19.884408621 +0200 CEST> to /tmp/restore-work
Use the word `latest` to restore the last backup. You can also combine `latest`
with the `--host` and `--path` filters to choose the last backup for a specific
host, path or both.
$ restic -r /tmp/backup restore latest --target ~/tmp/restore-work --path "/home/art" --host luigi
enter password for repository:
restoring <Snapshot of [/home/art] at 2015-05-08 21:45:17.884408621 +0200 CEST> to /tmp/restore-work
# Manage repository keys
The `key` command allows you to set multiple access keys or passwords per
repository. In fact, you can use the `list`, `add`, `remove` and `passwd`
sub-commands to manage these keys very precisely:
$ restic -r /tmp/backup key list
enter password for repository:
ID User Host Created
----------------------------------------------------------------------
*eb78040b username kasimir 2015-08-12 13:29:57
$ restic -r /tmp/backup key add
enter password for repository:
enter password for new key:
enter password again:
saved new key as <Key of username@kasimir, created on 2015-08-12 13:35:05.316831933 +0200 CEST>
$ restic -r backup key list
enter password for repository:
ID User Host Created
----------------------------------------------------------------------
5c657874 username kasimir 2015-08-12 13:35:05
*eb78040b username kasimir 2015-08-12 13:29:57
# Check integrity and consistency
Imagine your repository is saved on a server that has a faulty hard drive, or
even worse, attackers get privileged access and modify your backup with the
intention to make you restore malicious data:
$ sudo echo "boom" >> backup/index/d795ffa99a8ab8f8e42cec1f814df4e48b8f49129360fb57613df93739faee97
In order to detect these things, it is a good idea to regularly use the `check`
command to test whether everything is alright, your precious backup data is
consistent and the integrity is unharmed:
$ restic -r /tmp/backup check
Load indexes
ciphertext verification failed
Trying to restore a snapshot which has been modified as shown above will yield
the same error:
$ restic -r /tmp/backup restore 79766175 --target ~/tmp/restore-work
Load indexes
ciphertext verification failed
# Mount a repository
Browsing your backup as a regular file system is also very easy. First, create
a mount point such as `/mnt/restic` and then use the following command to serve
the repository with FUSE:
$ mkdir /mnt/restic
$ restic -r /tmp/backup mount /mnt/restic
enter password for repository:
Now serving /tmp/backup at /tmp/restic
Don't forget to umount after quitting!
Windows doesn't support FUSE directly. Projects like
[dokan](http://dokan-dev.github.io/) try to fill the gap. We haven't tested it
yet, but we'd like to hear about your experience. For setup information see
[dokan FUSE in dokan's wiki](https://github.com/dokan-dev/dokany/wiki/FUSE).
# Create an SFTP repository
In order to backup data via SFTP, you must first set up a server with SSH and
let it know your public key. Passwordless login is really important since
restic fails to connect to the repository if the server prompts for
credentials.
Once the server is configured, the setup of the SFTP repository can simply be
achieved by changing the URL scheme in the `init` command:
$ restic -r sftp://user@host//tmp/backup init
enter password for new backend:
enter password again:
created restic backend f1c6108821 at sftp://user@host//tmp/backup
Please note that knowledge of your password is required to access the repository.
Losing your password means that your data is irrecoverably lost.
Yes, that's really two slash (`/`) characters after the host name, here the
directory `/tmp/backup` on the server is meant. If you'd rather like to create
a repository in the user's home directory on the server, use the location
`sftp://user@host/foo/bar/repo`. In this case the directory is relative to the
user's home directory: `foo/bar/repo`.
# Create an Amazon S3 repository
Restic can backup data to any Amazon S3 bucket. However, in this case, changing the URL scheme is not enough since Amazon uses special security credentials to sign HTTP requests. By consequence, you must first setup the following environment variables with the credentials you obtained while creating the bucket.
$ export AWS_ACCESS_KEY_ID=<MY_ACCESS_KEY>
$ export AWS_SECRET_ACCESS_KEY=<MY_SECRET_ACCESS_KEY>
You can then easily initialize a repository that uses your Amazon S3 as a backend.
$ restic -r s3://s3.amazonaws.com/bucket_name init
enter password for new backend:
enter password again:
created restic backend eefee03bbd at s3://s3.amazonaws.com/bucket_name
Please note that knowledge of your password is required to access the repository.
Losing your password means that your data is irrecoverably lost.
For an S3-compatible repository without TLS available, use the alternative URI
protocol `s3:http://server:port/bucket_name`.
## Create a Minio Server repository
[Minio](https://www.minio.io) is an Open Source Object Storage, written in Go and compatible with AWS S3 API.
### Pre-Requisites
* Download and Install [Minio Server](https://minio.io/download/).
* You can also refer to [https://docs.minio.io](https://docs.minio.io) for step by step guidance on installation and getting started on Minio CLient and Minio Server.
You must first setup the following environment variables with the credentials of your running Minio Server.
$ export AWS_ACCESS_KEY_ID=<YOUR-MINIO-ACCESS-KEY-ID>
$ export AWS_SECRET_ACCESS_KEY= <YOUR-MINIO-SECRET-ACCESS-KEY>
Now you can easily initialize restic to use Minio server as backend with this command.
$ ./restic -r s3:http://localhost:9000/restic init
enter password for new backend:
enter password again:
created restic backend 6ad29560f5 at s3:http://localhost:9000/restic1
Please note that knowledge of your password is required to access
the repository. Losing your password means that your data is irrecoverably lost.
# Debugging restic
The program can be built with debug support like this:
$ go run build.go -tags debug
Afterwards, extensive debug messages are written to the file in environment
variable `RESTIC_DEBUG`, e.g.:
$ RESTIC_DEBUG=/tmp/restic-debug.log restic backup ~/work
If you suspect that there is a bug, you can have a look at the debug log.
Please be aware that the debug log might contain sensitive information such as
file and directory names.
# Under the hood: Browse repository objects
Internally, a repository stores data of several different types described in the [design documentation](https://github.com/restic/restic/blob/master/doc/Design.md). You can `list` objects such as blobs, packs, index, snapshots, keys or locks with the following command:
```shell
$ restic -r /tmp/backup list snapshots
d369ccc7d126594950bf74f0a348d5d98d9e99f3215082eb69bf02dc9b3e464c
```
The `find` command searches for a given
[pattern](http://golang.org/pkg/path/filepath/#Match) in the repository.
$ restic -r backup find test.txt
debug log file restic.log
debug enabled
enter password for repository:
found 1 matching entries in snapshot 196bc5760c909a7681647949e80e5448e276521489558525680acf1bd428af36
-rw-r--r-- 501 20 5 2015-08-26 14:09:57 +0200 CEST path/to/test.txt
The `cat` command allows you to display the JSON representation of the objects
or its raw content.
$ restic -r /tmp/backup cat snapshot d369ccc7d126594950bf74f0a348d5d98d9e99f3215082eb69bf02dc9b3e464c
enter password for repository:
{
"time": "2015-08-12T12:52:44.091448856+02:00",
"tree": "05cec17e8d3349f402576d02576a2971fc0d9f9776ce2f441c7010849c4ff5af",
"paths": [
"/home/user/work"
],
"hostname": "kasimir",
"username": "username",
"uid": 501,
"gid": 20
}

View File

@@ -4,7 +4,7 @@
# Contributor: Alexander Neumann <alexander@bumpern.de>
options=(!strip)
pkgname=restic-git
pkgver=r702.c143f8c
pkgver=v0.1.0.r172.g1f1b8e1
pkgrel=1
pkgdesc="restic is a program that does backups right."
arch=('i686' 'x86_64')
@@ -21,7 +21,7 @@ importpath='github.com/restic/restic'
pkgver() {
cd "$pkgname"
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)";
git describe --long | sed 's/\([^-]*-g\)/r\1/;s/-/./g'
}
build() {

59
doc/REST_backend.md Normal file
View File

@@ -0,0 +1,59 @@
REST Backend
============
Restic can interact with HTTP Backend that respects the following REST API. The
following values are valid for `{type}`: `data`, `keys`, `locks`, `snapshots`,
`index`, `config`. `{path}` is a path to the repository, so that multiple
different repositories can be accessed. The default path is `/`.
## HEAD {path}/config
Returns "200 OK" if the repository has a configuration,
an HTTP error otherwise.
## GET {path}/config
Returns the content of the configuration file if the repository has a configuration,
an HTTP error otherwise.
Response format: binary/octet-stream
## POST {path}/config
Returns "200 OK" if the configuration of the request body has been saved,
an HTTP error otherwise.
## GET {path}/{type}/
Returns a JSON array containing the names of all the blobs stored for a given type.
Response format: JSON
## HEAD {path}/{type}/{name}
Returns "200 OK" if the blob with the given name and type is stored in the repository,
"404 not found" otherwise. If the blob exists, the HTTP header `Content-Length`
is set to the file size.
## GET {path}/{type}/{name}
Returns the content of the blob with the given name and type if it is stored in the repository,
"404 not found" otherwise.
If the request specifies a partial read with a Range header field,
then the status code of the response is 206 instead of 200
and the response only contains the specified range.
Response format: binary/octet-stream
## POST {path}/{type}/{name}
Saves the content of the request body as a blob with the given name and type,
an HTTP error otherwise.
Request format: binary/octet-stream
## DELETE {path}/{type}/{name}
Returns "200 OK" if the blob with the given name and type has been deleted from the repository,
an HTTP error otherwise.

117
doc/index.md Normal file
View File

@@ -0,0 +1,117 @@
Welcome to restic
=================
![restic logo](logo/logo.svg)
restic is a backup program that is fast, efficient and secure. On the left you
can find an overview of the documentation. The project's homepage is
<https://restic.github.io>, the source code repository can be found on GitHub
at the URL <https://github.com/restic/restic>.
Building and viewing the documentation
--------------------------------------
The documentation you're currently viewing may not match the version of restic
you have installed. If you cloned the repository manually, you can find the
right documentation in the directory `doc/`. If you're viewing this online at
<https://restic.readthedocs.io>, there is a small menu at the bottom left of
this page, where you can select the version.
The restic documentation is built with [MkDocs](http://www.mkdocs.org). After
installing it, you can edit and view the documentation locally by running:
$ mkdocs serve
INFO - Building documentation...
INFO - Cleaning site directory
[I 160221 12:33:57 server:271] Serving on http://127.0.0.1:8000
Afterwards visit the URL with a browser.
Design Principles
-----------------
Restic is a program that does backups right and was designed with the following
principles in mind:
* **Easy:** Doing backups should be a frictionless process, otherwise you might be
tempted to skip it. Restic should be easy to configure and use, so that, in
the event of a data loss, you can just restore it. Likewise,
restoring data should not be complicated.
* **Fast**: Backing up your data with restic should only be limited by your
network or hard disk bandwidth so that you can backup your files every day.
Nobody does backups if it takes too much time. Restoring backups should only
transfer data that is needed for the files that are to be restored, so that
this process is also fast.
* **Verifiable**: Much more important than backup is restore, so restic enables
you to easily verify that all data can be restored.
* **Secure**: Restic uses cryptography to guarantee confidentiality and integrity
of your data. The location the backup data is stored is assumed not to be a
trusted environment (e.g. a shared space where others like system
administrators are able to access your backups). Restic is built to secure
your data against such attackers.
* **Efficient**: With the growth of data, additional snapshots should only take
the storage of the actual increment. Even more, duplicate data should be
de-duplicated before it is actually written to the storage back end to save
precious backup space.
Compatibility
-------------
Backward compatibility for backups is important so that our users are always
able to restore saved data. Therefore restic follows [Semantic
Versioning](http://semver.org) to clearly define which versions are compatible.
The repository and data structures contained therein are considered the "Public
API" in the sense of Semantic Versioning. This goes for all released versions
of restic, this may not be the case for the master branch.
We guarantee backward compatibility of all repositories within one major version;
as long as we do not increment the major version, data can be read and restored.
We strive to be fully backward compatible to all prior versions.
Contribute and Documentation
----------------------------
Contributions are welcome! More information can be found in the document
[`CONTRIBUTING.md`](https://github.com/restic/restic/blob/master/CONTRIBUTING.md).
Contact
-------
If you discover a bug, find something surprising or if you would like to
discuss or ask something, please [open a github
issue](https://github.com/restic/restic/issues/new). If you would like to chat
about restic, there is also the IRC channel #restic on irc.freenode.net.
**Important**: If you discover something that you believe to be a possible
critical security problem, please do *not* open a GitHub issue but send an
email directly to alexander@bumpern.de. If possible, please encrypt your email
using the following PGP key
([0x91A6868BD3F7A907](https://pgp.mit.edu/pks/lookup?op=get&search=0xCF8F18F2844575973F79D4E191A6868BD3F7A907)):
```
pub 4096R/91A6868BD3F7A907 2014-11-01
Key fingerprint = CF8F 18F2 8445 7597 3F79 D4E1 91A6 868B D3F7 A907
uid Alexander Neumann <alexander@bumpern.de>
sub 4096R/D5FC2ACF4043FDF1 2014-11-01
```
Talks
-----
The following talks will be or have been given about restic:
* 2016-01-31: Lightning Talk at the Go Devroom at FOSDEM 2016, Brussels, Belgium
* 2016-01-29: [restic - Backups mal richtig](https://media.ccc.de/v/c4.openchaos.2016.01.restic): Public lecture in German at [CCC Cologne e.V.](https://koeln.ccc.de) in Cologne, Germany
* 2015-08-23: [A Solution to the Backup Inconvenience](https://programm.froscon.de/2015/events/1515.html): Lecture at [FROSCON 2015](https://www.froscon.de) in Bonn, Germany
* 2015-02-01: [Lightning Talk at FOSDEM 2015](https://www.youtube.com/watch?v=oM-MfeflUZ8&t=11m40s): A short introduction (with slightly outdated command line)
* 2015-01-27: [Talk about restic at CCC Aachen](https://videoag.fsmpi.rwth-aachen.de/?view=player&lectureid=4442#content) (in German)
License
=======
Restic is licensed under "BSD 2-Clause License". You can find the complete text
in the file `LICENSE`.

View File

@@ -1,884 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="210mm"
height="297mm"
id="svg4198"
version="1.1"
inkscape:version="0.48.5 r10040"
sodipodi:docname="structure.svg">
<defs
id="defs3">
<marker
inkscape:stockid="Arrow2Mend"
orient="auto"
refY="0.0"
refX="0.0"
id="Arrow2Mend"
style="overflow:visible;">
<path
id="path5383"
style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
transform="scale(0.6) rotate(180) translate(0,0)" />
</marker>
<marker
inkscape:stockid="Arrow2Lend"
orient="auto"
refY="0.0"
refX="0.0"
id="Arrow2Lend"
style="overflow:visible;">
<path
id="path5377"
style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
transform="scale(1.1) rotate(180) translate(1,0)" />
</marker>
</defs>
<sodipodi:namedview
inkscape:document-units="mm"
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="1.3810888"
inkscape:cx="278.75145"
inkscape:cy="788.04056"
inkscape:current-layer="layer1"
showgrid="true"
units="mm"
inkscape:window-width="1362"
inkscape:window-height="729"
inkscape:window-x="0"
inkscape:window-y="18"
inkscape:window-maximized="0"
inkscape:snap-grids="false"
inkscape:snap-to-guides="true">
<inkscape:grid
type="xygrid"
id="grid4204"
empspacing="10"
visible="true"
enabled="true"
snapvisiblegridlinesonly="true"
dotted="false"
spacingx="1mm"
spacingy="1mm"
units="mm" />
</sodipodi:namedview>
<metadata
id="metadata4">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title />
</cc:Work>
</rdf:RDF>
</metadata>
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1">
<rect
rx="3.5432446"
ry="3.5432444"
y="125.31244"
x="337.60443"
height="8.6873627"
width="31.788435"
id="rect5239-1"
style="fill:#ccff99;fill-opacity:1;stroke:#000000;stroke-width:0.99921262;stroke-miterlimit:4;stroke-opacity:0;stroke-dasharray:none" />
<rect
rx="3.5432446"
ry="3.5432444"
y="450.72906"
x="266.07547"
height="8.6873627"
width="31.788435"
id="rect5342"
style="fill:#fdd99b;fill-opacity:1;stroke:#000000;stroke-width:0.99921262;stroke-miterlimit:4;stroke-opacity:0;stroke-dasharray:none" />
<rect
style="fill:#986601;fill-opacity:1;stroke:#000000;stroke-width:0.99921262;stroke-miterlimit:4;stroke-opacity:0;stroke-dasharray:none"
id="rect5344"
width="31.788435"
height="8.6873627"
x="270.97147"
y="460.72906"
ry="3.5432444"
rx="3.5432446" />
<rect
style="fill:#ccff99;fill-opacity:1;stroke:#000000;stroke-width:0.99921262;stroke-miterlimit:4;stroke-opacity:0;stroke-dasharray:none"
id="rect5324"
width="31.788435"
height="8.6873627"
x="270.67154"
y="430.52609"
ry="3.5432444"
rx="3.5432446" />
<rect
rx="3.5432446"
ry="3.5432444"
y="420.34509"
x="265.94748"
height="8.6873627"
width="31.788435"
id="rect5326"
style="fill:#339900;fill-opacity:1;stroke:#000000;stroke-width:0.99921262;stroke-miterlimit:4;stroke-opacity:0;stroke-dasharray:none" />
<rect
style="fill:#fdd99b;fill-opacity:1;stroke:#000000;stroke-width:0.99921262;stroke-miterlimit:4;stroke-opacity:0;stroke-dasharray:none"
id="rect5303"
width="31.788435"
height="8.6873627"
x="265.94748"
y="390.34506"
ry="3.5432444"
rx="3.5432446" />
<rect
rx="3.5432446"
ry="3.5432444"
y="400.34506"
x="270.84348"
height="8.6873627"
width="31.788435"
id="rect5301"
style="fill:#986601;fill-opacity:1;stroke:#000000;stroke-width:0.99921262;stroke-miterlimit:4;stroke-opacity:0;stroke-dasharray:none" />
<text
xml:space="preserve"
style="font-size:10px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
x="88.58268"
y="184.25194"
id="text3797"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan3799"
x="88.58268"
y="184.25194"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace">data/</tspan></text>
<text
xml:space="preserve"
style="font-size:10px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
x="88.58268"
y="77.952728"
id="text3801"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan3803"
x="88.58268"
y="77.952728"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace">keys/</tspan></text>
<text
xml:space="preserve"
style="font-size:10px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
x="88.58268"
y="113.3858"
id="text3809"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan3811"
x="88.58268"
y="113.3858"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace">snapshots/</tspan></text>
<text
xml:space="preserve"
style="font-size:10px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
x="88.58268"
y="276.37793"
id="text3813"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan3815"
x="88.58268"
y="276.37793"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace">trees/</tspan></text>
<text
xml:space="preserve"
style="font-size:10px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
x="88.58268"
y="148.81886"
id="text3817"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan3819"
x="88.58268"
y="148.81886"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace">maps/</tspan></text>
<rect
rx="3.5432444"
ry="3.5432444"
y="120.47243"
x="99.212608"
height="14.17323"
width="42.519684"
id="rect3840"
style="fill:#f0a513;fill-opacity:1;stroke:#000000;stroke-opacity:0" />
<text
sodipodi:linespacing="125%"
id="text3842"
y="131.10234"
x="102.75592"
style="font-size:10px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
xml:space="preserve"><tspan
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace"
y="131.10234"
x="102.75592"
id="tspan3844"
sodipodi:role="line">6e4065</tspan></text>
<rect
style="fill:#aaccee;fill-opacity:1;stroke:#000000;stroke-opacity:0"
id="rect3864"
width="42.519684"
height="14.17323"
x="99.212608"
y="155.9055"
ry="3.5432444"
rx="3.5432444" />
<text
xml:space="preserve"
style="font-size:10px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
x="102.75592"
y="166.53545"
id="text3866"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan3868"
x="102.75592"
y="166.53545"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace">b35a97</tspan></text>
<rect
rx="3.5432444"
ry="3.5432444"
y="201.96844"
x="120.47243"
height="14.17323"
width="42.519684"
id="rect3880"
style="fill:#986601;fill-opacity:1;stroke:#000000;stroke-opacity:0" />
<text
sodipodi:linespacing="125%"
id="text3882"
y="212.59839"
x="124.01575"
style="font-size:10px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
xml:space="preserve"><tspan
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace"
y="212.59839"
x="124.01575"
id="tspan3884"
sodipodi:role="line">1bde93</tspan></text>
<text
xml:space="preserve"
style="font-size:10px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
x="109.84253"
y="198.42513"
id="text3964"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan3966"
x="109.84253"
y="198.42513"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace">1b/</tspan></text>
<rect
style="fill:#986601;fill-opacity:1;stroke:#000000;stroke-opacity:0"
id="rect3975"
width="42.519684"
height="14.17323"
x="120.47243"
y="230.31494"
ry="3.5432444"
rx="3.5432444" />
<text
xml:space="preserve"
style="font-size:10px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
x="124.01574"
y="240.94489"
id="text3977"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan3979"
x="124.01574"
y="240.94489"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace">47808f</tspan></text>
<text
sodipodi:linespacing="125%"
id="text3981"
y="226.77159"
x="109.84253"
style="font-size:10px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
xml:space="preserve"><tspan
y="226.77159"
x="109.84253"
id="tspan3983"
sodipodi:role="line"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace">47/</tspan></text>
<rect
rx="3.5432444"
ry="3.5432444"
y="248.03148"
x="120.47245"
height="14.17323"
width="42.519684"
id="rect3987"
style="fill:#986601;fill-opacity:1;stroke:#000000;stroke-opacity:0" />
<text
sodipodi:linespacing="125%"
id="text3989"
y="258.66144"
x="124.01575"
style="font-size:10px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
xml:space="preserve"><tspan
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace"
y="258.66144"
x="124.01575"
id="tspan3991"
sodipodi:role="line">47853a</tspan></text>
<rect
style="fill:#ccff99;fill-opacity:1;stroke:#000000;stroke-opacity:0"
id="rect4039"
width="42.519684"
height="14.17323"
x="120.47243"
y="294.09445"
ry="3.5432444"
rx="3.5432444" />
<text
xml:space="preserve"
style="font-size:10px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
x="124.01575"
y="304.7244"
id="text4041"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan4043"
x="124.01575"
y="304.7244"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace">967de5</tspan></text>
<text
sodipodi:linespacing="125%"
id="text4045"
y="290.55115"
x="109.84253"
style="font-size:10px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
xml:space="preserve"><tspan
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace"
y="290.55115"
x="109.84253"
id="tspan4047"
sodipodi:role="line">9f/</tspan></text>
<rect
rx="3.5432444"
ry="3.5432444"
y="322.44095"
x="120.47243"
height="14.17323"
width="42.519684"
id="rect4051"
style="fill:#ccff99;fill-opacity:1;stroke:#000000;stroke-opacity:0" />
<text
sodipodi:linespacing="125%"
id="text4053"
y="333.07089"
x="124.01574"
style="font-size:10px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
xml:space="preserve"><tspan
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace"
y="333.07089"
x="124.01574"
id="tspan4055"
sodipodi:role="line">a0c69f</tspan></text>
<text
xml:space="preserve"
style="font-size:10px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
x="109.84253"
y="318.89764"
id="text4057"
sodipodi:linespacing="125%"><tspan
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace"
sodipodi:role="line"
id="tspan4059"
x="109.84253"
y="318.89764">a0/</tspan></text>
<rect
style="fill:#ccff99;fill-opacity:1;stroke:#000000;stroke-opacity:0"
id="rect4063"
width="42.519684"
height="14.17323"
x="120.47243"
y="350.78732"
ry="3.5432444"
rx="3.5432444" />
<text
xml:space="preserve"
style="font-size:10px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
x="124.01575"
y="361.41727"
id="text4065"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan4067"
x="124.01575"
y="361.41727"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace">beb505</tspan></text>
<text
sodipodi:linespacing="125%"
id="text4069"
y="347.24402"
x="109.84253"
style="font-size:10px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
xml:space="preserve"><tspan
y="347.24402"
x="109.84253"
id="tspan4071"
sodipodi:role="line"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace">be/</tspan></text>
<g
id="g4085"
transform="translate(53.149608,-372.04724)">
<path
transform="translate(0,308.26769)"
inkscape:connector-curvature="0"
id="path4073"
d="m 24.80315,131.10236 0,7.08661 7.086614,0"
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
<path
transform="translate(0,308.26769)"
inkscape:connector-curvature="0"
id="path4075"
d="m 24.80315,138.18897 0,35.43307 7.086614,0"
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
<path
transform="translate(0,308.26769)"
inkscape:connector-curvature="0"
id="path4079"
d="m 24.80315,173.62204 c 0.262467,0.51087 0.262467,0.51087 0,0 l 0,35.43307 7.086614,0"
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
<path
transform="translate(0,308.26769)"
inkscape:connector-curvature="0"
id="path4081"
d="m 24.80315,209.05511 0,35.43307 7.086614,0"
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
<path
transform="translate(0,308.26769)"
inkscape:connector-curvature="0"
id="path4083"
d="m 24.80315,244.48818 0,92.12599 7.086614,0"
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
</g>
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 99.212598,187.79524 0,7.08661 7.086612,0"
id="path4116"
inkscape:connector-curvature="0" />
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 99.212598,194.88186 0,28.34646 7.086612,0"
id="path4118"
inkscape:connector-curvature="0" />
<path
style="fill:none;stroke:#000000;stroke-width:0.99999994px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 99.212597,279.92123 0,7.08662 7.086613,0"
id="path4120"
inkscape:connector-curvature="0" />
<path
style="fill:none;stroke:#000000;stroke-width:0.99999994px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 99.212597,287.00785 0,28.34646 7.086613,0"
id="path4122"
inkscape:connector-curvature="0" />
<path
style="fill:none;stroke:#000000;stroke-width:0.99999994px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 99.212597,315.35431 0,28.34645 7.086613,0"
id="path4126"
inkscape:connector-curvature="0" />
<text
sodipodi:linespacing="125%"
id="text4128"
y="375.59052"
x="88.58268"
style="font-size:10px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
xml:space="preserve"><tspan
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace"
y="375.59052"
x="88.58268"
id="tspan4130"
sodipodi:role="line">tmp/</tspan></text>
<text
xml:space="preserve"
style="font-size:10px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
x="88.58268"
y="389.76376"
id="text4132"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan4134"
x="88.58268"
y="389.76376"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace">locks/</tspan></text>
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 77.952758,272.83462 0,99.2126 7.08661,0"
id="path4140"
inkscape:connector-curvature="0" />
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 77.952758,372.04722 0,14.17322 7.08661,0"
id="path4144"
inkscape:connector-curvature="0" />
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 77.952758,386.22044 0,14.17323 7.08661,0"
id="path4146"
inkscape:connector-curvature="0" />
<text
sodipodi:linespacing="125%"
id="text4156"
y="403.93698"
x="88.58268"
style="font-size:10px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
xml:space="preserve"><tspan
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace"
y="403.93698"
x="88.58268"
id="tspan4158"
sodipodi:role="line">version</tspan></text>
<g
id="g4485"
transform="translate(-17.71652,7.0866626)">
<rect
style="fill:#ffeeaa;fill-opacity:1;stroke:none"
id="rect7169"
width="42.519684"
height="14.17323"
x="116.92913"
y="77.95269"
ry="3.5432444"
rx="3.5432444" />
<text
xml:space="preserve"
style="font-size:10px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
x="120.47243"
y="88.582603"
id="text7171"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan7173"
x="120.47243"
y="88.582603"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace">c913e0</tspan></text>
</g>
<rect
style="fill:#339900;fill-opacity:1;stroke:#000000;stroke-width:1;stroke-opacity:0"
id="rect4607"
width="31.404444"
height="8.6214886"
x="276.1517"
y="124.92723"
ry="3.5432446"
rx="3.5432446" />
<text
xml:space="preserve"
style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Monaco;-inkscape-font-specification:Monaco"
x="219.68503"
y="92.125961"
id="text4528"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
x="219.68503"
y="92.125961"
id="tspan4552"
style="font-size:8px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace">{time: 2015-01-01T13:12:13.40,</tspan><tspan
sodipodi:role="line"
x="219.68503"
y="102.12596"
id="tspan4538"
style="font-size:8px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace"> dir: /data,</tspan><tspan
sodipodi:role="line"
x="219.68503"
y="112.12596"
id="tspan4540"
style="font-size:8px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace"> hostname: kasimir,</tspan><tspan
sodipodi:role="line"
x="219.68503"
y="122.12596"
id="tspan4542"
style="font-size:8px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace"> username: hans,</tspan><tspan
sodipodi:role="line"
x="219.68503"
y="132.12596"
id="tspan4546"
style="font-size:8px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace"> tree: {id: 486f6c, sid: 967de5},</tspan><tspan
sodipodi:role="line"
x="219.68503"
y="142.12596"
id="tspan4556"
style="font-size:8px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace"> [...]</tspan><tspan
sodipodi:role="line"
x="219.68503"
y="152.12596"
id="tspan4560"
style="font-size:8px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace">}</tspan></text>
<rect
style="fill:#f0a513;fill-opacity:1;stroke:#000000;stroke-width:0.99999988;stroke-opacity:0"
id="rect4519"
width="177.16533"
height="21.259836"
x="212.59842"
y="60.236198"
ry="3.5432441"
rx="3.5432444" />
<rect
style="fill:none;stroke:#000000;stroke-width:1.77165354;stroke-miterlimit:2;stroke-opacity:1;stroke-dasharray:none"
id="rect4511"
width="177.16534"
height="106.29922"
x="212.59839"
y="60.236198"
rx="3.5432441"
ry="3.5432444" />
<text
xml:space="preserve"
style="font-size:10px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
x="219.68501"
y="74.409424"
id="text4513"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan4515"
x="219.68501"
y="74.409424">snapshot</tspan></text>
<text
xml:space="preserve"
style="font-size:10px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
x="272.83466"
y="74.409424"
id="text4521"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan4523"
x="272.83466"
y="74.409424"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Monospace;-inkscape-font-specification:Monospace">6e4065</tspan></text>
<text
xml:space="preserve"
style="font-size:8px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Monospace;-inkscape-font-specification:Monospace"
x="432.28345"
y="92.125946"
id="text5187"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
x="432.28345"
y="92.125946"
id="tspan5216" /></text>
<rect
style="fill:#339900;fill-opacity:1;stroke:#000000;stroke-width:0.99921262;stroke-miterlimit:4;stroke-opacity:0;stroke-dasharray:none"
id="rect5328"
width="31.788435"
height="8.6873627"
x="290.65253"
y="290.55115"
ry="3.5432444"
rx="3.5432446" />
<rect
style="fill:#fdd99b;fill-opacity:1;stroke:#000000;stroke-width:0.99921262;stroke-miterlimit:4;stroke-opacity:0;stroke-dasharray:none"
id="rect5316"
width="31.788435"
height="8.6873627"
x="333.17938"
y="350.39966"
ry="3.5432444"
rx="3.5432446" />
<rect
rx="3.5432446"
ry="3.5432444"
y="350.5314"
x="294.73447"
height="8.6873627"
width="31.788435"
id="rect5314"
style="fill:#fdd99b;fill-opacity:1;stroke:#000000;stroke-width:0.99921262;stroke-miterlimit:4;stroke-opacity:0;stroke-dasharray:none" />
<rect
rx="3.5432439"
ry="3.5432444"
y="201.96848"
x="212.5984"
height="21.259878"
width="212.59843"
id="rect5259"
style="fill:#339900;fill-opacity:1;stroke:#000000;stroke-width:0.99921262;stroke-miterlimit:4;stroke-opacity:0;stroke-dasharray:none" />
<rect
style="fill:none;stroke:#000000;stroke-width:1.77165353;stroke-miterlimit:2;stroke-opacity:1;stroke-dasharray:none"
id="rect5253"
width="212.59843"
height="294.09451"
x="212.59842"
y="201.96849"
rx="3.5432439"
ry="3.5432451" />
<text
xml:space="preserve"
style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Monospace;-inkscape-font-specification:Monospace"
x="219.68504"
y="216.14169"
id="text5255"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan5257"
x="219.68504"
y="216.14169"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:Sans;-inkscape-font-specification:Sans">tree</tspan></text>
<text
xml:space="preserve"
style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Monospace;-inkscape-font-specification:Monospace"
x="248.03149"
y="216.14169"
id="text5261"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
id="tspan5263"
x="248.03149"
y="216.14169">486f6c</tspan></text>
<text
xml:space="preserve"
style="font-size:8px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Monospace;-inkscape-font-specification:Monospace"
x="219.68504"
y="237.40155"
id="text5265"
sodipodi:linespacing="125%"><tspan
sodipodi:role="line"
x="219.68504"
y="237.40155"
id="tspan5269">{ nodes: [</tspan><tspan
sodipodi:role="line"
x="219.68504"
y="247.40155"
id="tspan3211"> {</tspan><tspan
sodipodi:role="line"
x="219.68504"
y="257.40155"
id="tspan3241"> name: testdir1,</tspan><tspan
sodipodi:role="line"
x="219.68504"
y="267.40155"
id="tspan5271"> type: dir,</tspan><tspan
sodipodi:role="line"
x="219.68504"
y="277.40155"
id="tspan5273"> mode: 0755,</tspan><tspan
sodipodi:role="line"
x="219.68504"
y="287.40155"
id="tspan5275"> user: hans,</tspan><tspan
sodipodi:role="line"
x="219.68504"
y="297.40155"
id="tspan5281"> subtree: a8838f}</tspan><tspan
sodipodi:role="line"
x="219.68504"
y="307.40155"
id="tspan5283"> },{</tspan><tspan
sodipodi:role="line"
x="219.68504"
y="317.40155"
id="tspan3239"> name: testfile,</tspan><tspan
sodipodi:role="line"
x="219.68504"
y="327.40155"
id="tspan5285"> type: file,</tspan><tspan
sodipodi:role="line"
x="219.68504"
y="337.40155"
id="tspan5287"> mode: 0640,</tspan><tspan
sodipodi:role="line"
x="219.68504"
y="347.40155"
id="tspan5289"> user: hans,</tspan><tspan
sodipodi:role="line"
x="219.68504"
y="357.40155"
id="tspan5291"> content: [50f77b, ea0cc4, [...]]</tspan><tspan
sodipodi:role="line"
x="219.68504"
y="367.40155"
id="tspan3251"> }],</tspan><tspan
sodipodi:role="line"
x="219.68504"
y="377.40155"
id="tspan3255"> map:[</tspan><tspan
sodipodi:role="line"
x="219.68504"
y="387.40155"
id="tspan3496"> {</tspan><tspan
sodipodi:role="line"
x="219.68504"
y="397.40155"
id="tspan3522"> id: 50f77b,</tspan><tspan
sodipodi:role="line"
x="219.68504"
y="407.40155"
id="tspan3498"> sid: 1bde93</tspan><tspan
sodipodi:role="line"
x="219.68504"
y="417.40155"
id="tspan3500"> },{</tspan><tspan
sodipodi:role="line"
x="219.68504"
y="427.40155"
id="tspan3502"> id: a8838f,</tspan><tspan
sodipodi:role="line"
x="219.68504"
y="437.40155"
id="tspan3504"> sid: a0c69f</tspan><tspan
sodipodi:role="line"
x="219.68504"
y="447.40155"
id="tspan3506"> },{</tspan><tspan
sodipodi:role="line"
x="219.68504"
y="457.40155"
id="tspan3508"> id: ea0cc4,</tspan><tspan
sodipodi:role="line"
x="219.68504"
y="467.40155"
id="tspan3510"> sid: 47853a</tspan><tspan
sodipodi:role="line"
x="219.68504"
y="477.40155"
id="tspan3512"> }, [...]</tspan><tspan
sodipodi:role="line"
x="219.68504"
y="487.40155"
id="tspan3514">]}</tspan></text>
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
d="m 301.18111,170.07873 0,28.34645"
id="path6665"
inkscape:connector-curvature="0" />
<rect
style="fill:none;stroke:#000000;stroke-width:1.77165354;stroke-miterlimit:2;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0"
id="rect4052"
width="102.75591"
height="350.78741"
x="70.866142"
y="60.236198"
rx="3.5432441"
ry="3.5432446" />
<path
style="fill:none;stroke:#000000;stroke-width:0.9992126;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
d="M 148.8189,127.55903 209.05512,70.86612"
id="path4834"
inkscape:connector-curvature="0" />
</g>
</svg>

Before

Width:  |  Height:  |  Size: 36 KiB

7
mkdocs.yml Normal file
View File

@@ -0,0 +1,7 @@
site_name: Documentation for restic
theme: readthedocs
docs_dir: doc
pages:
- Getting Started: index.md
- User Manual: Manual.md
- restic Design Document: Design.md

View File

@@ -1,2 +0,0 @@
// Package repository implements a restic repository on top of a backend.
package repository

View File

@@ -1,324 +0,0 @@
package repository
import (
"encoding/json"
"fmt"
"io"
"sync"
"github.com/restic/restic/backend"
"github.com/restic/restic/crypto"
"github.com/restic/restic/debug"
"github.com/restic/restic/pack"
)
// Index holds a lookup table for id -> pack.
type Index struct {
m sync.Mutex
pack map[string]indexEntry
}
type indexEntry struct {
tpe pack.BlobType
packID *backend.ID
offset uint
length uint
old bool
}
// NewIndex returns a new index.
func NewIndex() *Index {
return &Index{
pack: make(map[string]indexEntry),
}
}
func (idx *Index) store(t pack.BlobType, id backend.ID, pack *backend.ID, offset, length uint, old bool) {
idx.pack[id.String()] = indexEntry{
tpe: t,
packID: pack,
offset: offset,
length: length,
old: old,
}
}
// Store remembers the id and pack in the index.
func (idx *Index) Store(t pack.BlobType, id backend.ID, pack *backend.ID, offset, length uint) {
idx.m.Lock()
defer idx.m.Unlock()
debug.Log("Index.Store", "pack %v contains id %v (%v), offset %v, length %v",
pack.Str(), id.Str(), t, offset, length)
idx.store(t, id, pack, offset, length, false)
}
// Remove removes the pack ID from the index.
func (idx *Index) Remove(packID backend.ID) {
idx.m.Lock()
defer idx.m.Unlock()
debug.Log("Index.Remove", "id %v removed", packID.Str())
s := packID.String()
if _, ok := idx.pack[s]; ok {
delete(idx.pack, s)
}
}
// Lookup returns the pack for the id.
func (idx *Index) Lookup(id backend.ID) (packID *backend.ID, tpe pack.BlobType, offset, length uint, err error) {
idx.m.Lock()
defer idx.m.Unlock()
if p, ok := idx.pack[id.String()]; ok {
debug.Log("Index.Lookup", "id %v found in pack %v at %d, length %d",
id.Str(), p.packID.Str(), p.offset, p.length)
return p.packID, p.tpe, p.offset, p.length, nil
}
debug.Log("Index.Lookup", "id %v not found", id.Str())
return nil, pack.Data, 0, 0, fmt.Errorf("id %v not found in index", id)
}
// Has returns true iff the id is listed in the index.
func (idx *Index) Has(id backend.ID) bool {
_, _, _, _, err := idx.Lookup(id)
if err == nil {
return true
}
return false
}
// LookupSize returns the length of the cleartext content behind the
// given id
func (idx *Index) LookupSize(id backend.ID) (cleartextLength uint, err error) {
_, _, _, encryptedLength, err := idx.Lookup(id)
if err != nil {
return 0, err
}
return encryptedLength - crypto.Extension, nil
}
// Merge loads all items from other into idx.
func (idx *Index) Merge(other *Index) {
debug.Log("Index.Merge", "Merge index with %p", other)
idx.m.Lock()
defer idx.m.Unlock()
for k, v := range other.pack {
if _, ok := idx.pack[k]; ok {
debug.Log("Index.Merge", "index already has key %v, updating", k[:8])
}
idx.pack[k] = v
}
debug.Log("Index.Merge", "done merging index")
}
// PackedBlob is a blob already saved within a pack.
type PackedBlob struct {
pack.Blob
PackID backend.ID
}
// Each returns a channel that yields all blobs known to the index. If done is
// closed, the background goroutine terminates. This blocks any modification of
// the index.
func (idx *Index) Each(done chan struct{}) <-chan PackedBlob {
idx.m.Lock()
ch := make(chan PackedBlob)
go func() {
defer idx.m.Unlock()
defer func() {
close(ch)
}()
for ids, blob := range idx.pack {
id, err := backend.ParseID(ids)
if err != nil {
// ignore invalid IDs
continue
}
select {
case <-done:
return
case ch <- PackedBlob{
Blob: pack.Blob{
ID: id,
Offset: blob.offset,
Type: blob.tpe,
Length: uint32(blob.length),
},
PackID: *blob.packID,
}:
}
}
}()
return ch
}
// Count returns the number of blobs of type t in the index.
func (idx *Index) Count(t pack.BlobType) (n uint) {
debug.Log("Index.Count", "counting blobs of type %v", t)
idx.m.Lock()
defer idx.m.Unlock()
for id, blob := range idx.pack {
if blob.tpe == t {
n++
debug.Log("Index.Count", " blob %v counted: %v", id[:8], blob)
}
}
return
}
type packJSON struct {
ID string `json:"id"`
Blobs []blobJSON `json:"blobs"`
}
type blobJSON struct {
ID string `json:"id"`
Type pack.BlobType `json:"type"`
Offset uint `json:"offset"`
Length uint `json:"length"`
}
// generatePackList returns a list of packs containing only the index entries
// that selsectFn returned true for. If selectFn is nil, the list contains all
// blobs in the index.
func (idx *Index) generatePackList(selectFn func(indexEntry) bool) ([]*packJSON, error) {
list := []*packJSON{}
packs := make(map[string]*packJSON)
for id, blob := range idx.pack {
if selectFn != nil && !selectFn(blob) {
continue
}
debug.Log("Index.generatePackList", "handle blob %q", id[:8])
if blob.packID.IsNull() {
debug.Log("Index.generatePackList", "blob %q has no packID! (type %v, offset %v, length %v)",
id[:8], blob.tpe, blob.offset, blob.length)
return nil, fmt.Errorf("unable to serialize index: pack for blob %v hasn't been written yet", id)
}
// see if pack is already in map
p, ok := packs[blob.packID.String()]
if !ok {
// else create new pack
p = &packJSON{ID: blob.packID.String()}
// and append it to the list and map
list = append(list, p)
packs[p.ID] = p
}
// add blob
p.Blobs = append(p.Blobs, blobJSON{
ID: id,
Type: blob.tpe,
Offset: blob.offset,
Length: blob.length,
})
}
debug.Log("Index.generatePackList", "done")
return list, nil
}
// encode writes the JSON serialization of the index filtered by selectFn to enc.
func (idx *Index) encode(w io.Writer, selectFn func(indexEntry) bool) error {
list, err := idx.generatePackList(func(entry indexEntry) bool {
return !entry.old
})
if err != nil {
return err
}
debug.Log("Index.Encode", "done")
enc := json.NewEncoder(w)
return enc.Encode(list)
}
// Encode writes the JSON serialization of the index to the writer w. This
// serialization only contains new blobs added via idx.Store(), not old ones
// introduced via DecodeIndex().
func (idx *Index) Encode(w io.Writer) error {
debug.Log("Index.Encode", "encoding index")
idx.m.Lock()
defer idx.m.Unlock()
return idx.encode(w, func(e indexEntry) bool { return !e.old })
}
// Dump writes the pretty-printed JSON representation of the index to w.
func (idx *Index) Dump(w io.Writer) error {
debug.Log("Index.Dump", "dumping index")
idx.m.Lock()
defer idx.m.Unlock()
list, err := idx.generatePackList(nil)
if err != nil {
return err
}
buf, err := json.MarshalIndent(list, "", " ")
if err != nil {
return err
}
_, err = w.Write(append(buf, '\n'))
if err != nil {
return err
}
debug.Log("Index.Dump", "done")
return nil
}
// DecodeIndex loads and unserializes an index from rd.
func DecodeIndex(rd io.Reader) (*Index, error) {
debug.Log("Index.DecodeIndex", "Start decoding index")
list := []*packJSON{}
dec := json.NewDecoder(rd)
err := dec.Decode(&list)
if err != nil {
return nil, err
}
idx := NewIndex()
for _, pack := range list {
packID, err := backend.ParseID(pack.ID)
if err != nil {
debug.Log("Index.DecodeIndex", "error parsing pack ID %q: %v", pack.ID, err)
return nil, err
}
for _, blob := range pack.Blobs {
blobID, err := backend.ParseID(blob.ID)
if err != nil {
debug.Log("Index.DecodeIndex", "error parsing blob ID %q: %v", blob.ID, err)
return nil, err
}
idx.store(blob.Type, blobID, &packID, blob.Offset, blob.Length, true)
}
}
debug.Log("Index.DecodeIndex", "done")
return idx, err
}

View File

@@ -1,225 +0,0 @@
package repository_test
import (
"bytes"
"crypto/rand"
"io"
"testing"
"github.com/restic/restic/backend"
"github.com/restic/restic/pack"
"github.com/restic/restic/repository"
. "github.com/restic/restic/test"
)
func randomID() backend.ID {
id := backend.ID{}
_, err := io.ReadFull(rand.Reader, id[:])
if err != nil {
panic(err)
}
return id
}
func TestIndexSerialize(t *testing.T) {
type testEntry struct {
id backend.ID
pack backend.ID
tpe pack.BlobType
offset, length uint
}
tests := []testEntry{}
idx := repository.NewIndex()
// create 50 packs with 20 blobs each
for i := 0; i < 50; i++ {
packID := randomID()
pos := uint(0)
for j := 0; j < 20; j++ {
id := randomID()
length := uint(i*100 + j)
idx.Store(pack.Data, id, &packID, pos, length)
tests = append(tests, testEntry{
id: id,
pack: packID,
tpe: pack.Data,
offset: pos,
length: length,
})
pos += length
}
}
wr := bytes.NewBuffer(nil)
err := idx.Encode(wr)
OK(t, err)
idx2, err := repository.DecodeIndex(wr)
OK(t, err)
Assert(t, idx2 != nil,
"nil returned for decoded index")
wr2 := bytes.NewBuffer(nil)
err = idx2.Encode(wr2)
OK(t, err)
for _, testBlob := range tests {
packID, tpe, offset, length, err := idx.Lookup(testBlob.id)
OK(t, err)
Equals(t, testBlob.pack, *packID)
Equals(t, testBlob.tpe, tpe)
Equals(t, testBlob.offset, offset)
Equals(t, testBlob.length, length)
packID, tpe, offset, length, err = idx2.Lookup(testBlob.id)
OK(t, err)
Equals(t, testBlob.pack, *packID)
Equals(t, testBlob.tpe, tpe)
Equals(t, testBlob.offset, offset)
Equals(t, testBlob.length, length)
}
// add more blobs to idx2
newtests := []testEntry{}
for i := 0; i < 10; i++ {
packID := randomID()
pos := uint(0)
for j := 0; j < 10; j++ {
id := randomID()
length := uint(i*100 + j)
idx2.Store(pack.Data, id, &packID, pos, length)
newtests = append(newtests, testEntry{
id: id,
pack: packID,
tpe: pack.Data,
offset: pos,
length: length,
})
pos += length
}
}
// serialize idx2, unserialize to idx3
wr3 := bytes.NewBuffer(nil)
err = idx2.Encode(wr3)
OK(t, err)
idx3, err := repository.DecodeIndex(wr3)
OK(t, err)
Assert(t, idx3 != nil,
"nil returned for decoded index")
// all old blobs must not be present in the index
for _, testBlob := range tests {
_, _, _, _, err := idx3.Lookup(testBlob.id)
Assert(t, err != nil,
"found old id %v in serialized index", testBlob.id.Str())
}
// all new blobs must be in the index
for _, testBlob := range newtests {
packID, tpe, offset, length, err := idx3.Lookup(testBlob.id)
OK(t, err)
Equals(t, testBlob.pack, *packID)
Equals(t, testBlob.tpe, tpe)
Equals(t, testBlob.offset, offset)
Equals(t, testBlob.length, length)
}
}
func TestIndexSize(t *testing.T) {
idx := repository.NewIndex()
packs := 200
blobs := 100
for i := 0; i < packs; i++ {
packID := randomID()
pos := uint(0)
for j := 0; j < blobs; j++ {
id := randomID()
length := uint(i*100 + j)
idx.Store(pack.Data, id, &packID, pos, length)
pos += length
}
}
wr := bytes.NewBuffer(nil)
err := idx.Encode(wr)
OK(t, err)
t.Logf("Index file size for %d blobs in %d packs is %d", blobs*packs, packs, wr.Len())
}
// example index serialization from doc/Design.md
var docExample = []byte(`
[ {
"id": "73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c",
"blobs": [
{
"id": "3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce",
"type": "data",
"offset": 0,
"length": 25
},{
"id": "9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae",
"type": "tree",
"offset": 38,
"length": 100
},
{
"id": "d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66",
"type": "data",
"offset": 150,
"length": 123
}
]
} ]
`)
var exampleTests = []struct {
id, packID backend.ID
tpe pack.BlobType
offset, length uint
}{
{
ParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"),
ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"),
pack.Data, 0, 25,
}, {
ParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"),
ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"),
pack.Tree, 38, 100,
}, {
ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"),
ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"),
pack.Data, 150, 123,
},
}
func TestIndexUnserialize(t *testing.T) {
idx, err := repository.DecodeIndex(bytes.NewReader(docExample))
OK(t, err)
for _, test := range exampleTests {
packID, tpe, offset, length, err := idx.Lookup(test.id)
OK(t, err)
Equals(t, test.packID, *packID)
Equals(t, test.tpe, tpe)
Equals(t, test.offset, offset)
Equals(t, test.length, length)
}
}

View File

@@ -3,97 +3,369 @@
package main
import (
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strings"
)
var runCrossCompile = flag.Bool("cross-compile", true, "run cross compilation tests")
var minioServer = flag.String("minio", "", "path to the minio server binary")
var debug = flag.Bool("debug", false, "output debug messages")
var minioServerEnv = map[string]string{
"MINIO_ACCESS_KEY": "KEBIYDZ87HCIH5D17YCN",
"MINIO_SECRET_KEY": "bVX1KhipSBPopEfmhc7rGz8ooxx27xdJ7Gkh1mVe",
}
var minioEnv = map[string]string{
"RESTIC_TEST_S3_SERVER": "http://127.0.0.1:9000",
"AWS_ACCESS_KEY_ID": "KEBIYDZ87HCIH5D17YCN",
"AWS_SECRET_ACCESS_KEY": "bVX1KhipSBPopEfmhc7rGz8ooxx27xdJ7Gkh1mVe",
}
func init() {
flag.Parse()
}
// CIEnvironment is implemented by environments where tests can be run.
type CIEnvironment interface {
Prepare()
RunTests()
Prepare() error
RunTests() error
Teardown() error
}
// TravisEnvironment is the environment in which Travis tests run.
type TravisEnvironment struct {
goxArch []string
goxOS []string
goxOSArch []string
minio string
minioSrv *Background
minioTempdir string
env map[string]string
}
func (env *TravisEnvironment) Prepare() {
func (env *TravisEnvironment) getMinio() error {
if *minioServer != "" {
msg("using minio server at %q\n", *minioServer)
env.minio = *minioServer
return nil
}
tempfile, err := ioutil.TempFile("", "minio-server-")
if err != nil {
return fmt.Errorf("create tempfile for minio download failed: %v\n", err)
}
url := fmt.Sprintf("https://dl.minio.io/server/minio/release/%s-%s/minio",
runtime.GOOS, runtime.GOARCH)
msg("downloading %v\n", url)
res, err := http.Get(url)
if err != nil {
return fmt.Errorf("error downloading minio server: %v\n", err)
}
_, err = io.Copy(tempfile, res.Body)
if err != nil {
return fmt.Errorf("error saving minio server to file: %v\n", err)
}
err = res.Body.Close()
if err != nil {
return fmt.Errorf("error closing HTTP download: %v\n", err)
}
err = tempfile.Close()
if err != nil {
msg("closing tempfile failed: %v\n", err)
return fmt.Errorf("error closing minio server file: %v\n", err)
}
err = os.Chmod(tempfile.Name(), 0755)
if err != nil {
return fmt.Errorf("chmod(minio-server) failed: %v", err)
}
msg("downloaded minio server to %v\n", tempfile.Name())
env.minio = tempfile.Name()
return nil
}
func (env *TravisEnvironment) runMinio() error {
if env.minio == "" {
return nil
}
// start minio server
msg("starting minio server at %s", env.minio)
dir, err := ioutil.TempDir("", "minio-root")
if err != nil {
return fmt.Errorf("running minio server failed: %v", err)
}
env.minioSrv, err = StartBackgroundCommand(minioServerEnv, env.minio,
"server",
"--address", "127.0.0.1:9000",
dir)
if err != nil {
return fmt.Errorf("error running minio server: %v", err)
}
// go func() {
// time.Sleep(300 * time.Millisecond)
// env.minioSrv.Cmd.Process.Kill()
// }()
for k, v := range minioEnv {
env.env[k] = v
}
env.minioTempdir = dir
return nil
}
// Prepare installs dependencies and starts services in order to run the tests.
func (env *TravisEnvironment) Prepare() error {
env.env = make(map[string]string)
msg("preparing environment for Travis CI\n")
run("go", "get", "github.com/mattn/goveralls")
run("go", "get", "github.com/mitchellh/gox")
for _, pkg := range []string{
"golang.org/x/tools/cmd/cover",
"github.com/mattn/goveralls",
"github.com/pierrre/gotestcover",
} {
err := run("go", "get", pkg)
if err != nil {
return err
}
}
if err := env.getMinio(); err != nil {
return err
}
if err := env.runMinio(); err != nil {
return err
}
if runtime.GOOS == "darwin" {
// install the libraries necessary for fuse
run("brew", "install", "caskroom/cask/brew-cask")
run("brew", "cask", "install", "osxfuse")
if err := run("brew", "update"); err != nil {
return err
}
if err := run("brew", "cask", "install", "osxfuse"); err != nil {
return err
}
}
// only test cross compilation on linux with Travis
if runtime.GOOS == "linux" {
env.goxArch = []string{"386", "amd64"}
if !strings.HasPrefix(runtime.Version(), "go1.3") {
env.goxArch = append(env.goxArch, "arm")
if *runCrossCompile {
// only test cross compilation on linux with Travis
if err := run("go", "get", "github.com/mitchellh/gox"); err != nil {
return err
}
if runtime.GOOS == "linux" {
env.goxOSArch = []string{
"linux/386", "linux/amd64",
"windows/386", "windows/amd64",
"darwin/386", "darwin/amd64",
"freebsd/386", "freebsd/amd64",
"opendbsd/386", "opendbsd/amd64",
}
if !strings.HasPrefix(runtime.Version(), "go1.3") {
env.goxOSArch = append(env.goxOSArch,
"linux/arm", "freebsd/arm")
}
} else {
env.goxOSArch = []string{runtime.GOOS + "/" + runtime.GOARCH}
}
env.goxOS = []string{"linux", "darwin", "freebsd", "openbsd", "windows"}
} else {
env.goxArch = []string{runtime.GOARCH}
env.goxOS = []string{runtime.GOOS}
msg("gox: OS/ARCH %v\n", env.goxOSArch)
v := runtime.Version()
if !strings.HasPrefix(v, "go1.5") && !strings.HasPrefix(v, "go1.6") {
err := run("gox", "-build-toolchain",
"-osarch", strings.Join(env.goxOSArch, " "))
if err != nil {
return err
}
}
}
msg("gox: OS %v, ARCH %v\n", env.goxOS, env.goxArch)
if !strings.HasPrefix(runtime.Version(), "go1.5") {
run("gox", "-build-toolchain",
"-os", strings.Join(env.goxOS, " "),
"-arch", strings.Join(env.goxArch, " "))
}
return nil
}
func (env *TravisEnvironment) RunTests() {
// Teardown stops backend services and cleans the environment again.
func (env *TravisEnvironment) Teardown() error {
msg("run travis teardown\n")
if env.minioSrv != nil {
msg("stopping minio server\n")
if env.minioSrv.Cmd.ProcessState == nil {
err := env.minioSrv.Cmd.Process.Kill()
if err != nil {
fmt.Fprintf(os.Stderr, "error killing minio server process: %v", err)
}
} else {
result := <-env.minioSrv.Result
if result.Error != nil {
msg("minio server returned error: %v\n", result.Error)
msg("stdout: %s\n", result.Stdout)
msg("stderr: %s\n", result.Stderr)
}
}
err := os.RemoveAll(env.minioTempdir)
if err != nil {
msg("error removing minio tempdir %v: %v\n", env.minioTempdir, err)
}
}
return nil
}
func goVersionAtLeast151() bool {
v := runtime.Version()
if match, _ := regexp.MatchString(`^go1\.[0-4]`, v); match {
return false
}
if v == "go1.5" {
return false
}
return true
}
// Background is a program running in the background.
type Background struct {
Cmd *exec.Cmd
Result chan Result
}
// Result is the result of a program that ran in the background.
type Result struct {
Stdout, Stderr string
Error error
}
// StartBackgroundCommand runs a program in the background.
func StartBackgroundCommand(env map[string]string, cmd string, args ...string) (*Background, error) {
msg("running background command %v %v\n", cmd, args)
b := Background{
Result: make(chan Result, 1),
}
stdout := bytes.NewBuffer(nil)
stderr := bytes.NewBuffer(nil)
c := exec.Command(cmd, args...)
c.Stdout = stdout
c.Stderr = stderr
if *debug {
c.Stdout = io.MultiWriter(c.Stdout, os.Stdout)
c.Stderr = io.MultiWriter(c.Stderr, os.Stderr)
}
c.Env = updateEnv(os.Environ(), env)
b.Cmd = c
err := c.Start()
if err != nil {
msg("error starting background job %v: %v\n", cmd, err)
return nil, err
}
go func() {
err := b.Cmd.Wait()
msg("background job %v returned: %v\n", cmd, err)
msg("stdout: %s\n", stdout.Bytes())
msg("stderr: %s\n", stderr.Bytes())
b.Result <- Result{
Stdout: string(stdout.Bytes()),
Stderr: string(stderr.Bytes()),
Error: err,
}
}()
return &b, nil
}
// RunTests starts the tests for Travis.
func (env *TravisEnvironment) RunTests() error {
// run fuse tests on darwin
if runtime.GOOS != "darwin" {
msg("skip fuse integration tests on %v\n", runtime.GOOS)
os.Setenv("RESTIC_TEST_FUSE", "0")
}
// compile for all target architectures with tags
for _, tags := range []string{"release", "debug"} {
run("gox", "-verbose",
"-os", strings.Join(env.goxOS, " "),
"-arch", strings.Join(env.goxArch, " "),
"-tags", tags,
"./cmd/restic")
cwd, err := os.Getwd()
if err != nil {
return fmt.Errorf("Getwd() returned error: %v", err)
}
env.env["GOPATH"] = cwd + ":" + filepath.Join(cwd, "vendor")
if *runCrossCompile {
// compile for all target architectures with tags
for _, tags := range []string{"release", "debug"} {
runWithEnv(env.env, "gox", "-verbose",
"-osarch", strings.Join(env.goxOSArch, " "),
"-tags", tags,
"-output", "/tmp/{{.Dir}}_{{.OS}}_{{.Arch}}",
"cmds/restic")
}
}
// run the build script
run("go", "run", "build.go")
if err := run("go", "run", "build.go"); err != nil {
return err
}
// gather coverage information
run("go", "run", "run_tests.go", "all.cov")
// run the tests and gather coverage information
err = runWithEnv(env.env, "gotestcover", "-coverprofile", "all.cov", "cmds/...", "restic/...")
if err != nil {
return err
}
runGofmt()
return runGofmt()
}
// AppveyorEnvironment is the environment on Windows.
type AppveyorEnvironment struct{}
func (env *AppveyorEnvironment) Prepare() {
// Prepare installs dependencies and starts services in order to run the tests.
func (env *AppveyorEnvironment) Prepare() error {
msg("preparing environment for Appveyor CI\n")
return nil
}
func (env *AppveyorEnvironment) RunTests() {
run("go", "run", "build.go", "-v", "-T")
// RunTests start the tests.
func (env *AppveyorEnvironment) RunTests() error {
return run("go", "run", "build.go", "-v", "-T")
}
// Teardown is a noop.
func (env *AppveyorEnvironment) Teardown() error {
return nil
}
// findGoFiles returns a list of go source code file names below dir.
func findGoFiles(dir string) (list []string, err error) {
err = filepath.Walk(dir, func(name string, fi os.FileInfo, err error) error {
if filepath.Base(name) == "Godeps" {
if filepath.Base(name) == "vendor" {
return filepath.SkipDir
}
@@ -116,17 +388,35 @@ func msg(format string, args ...interface{}) {
fmt.Printf("CI: "+format, args...)
}
func runGofmt() {
func updateEnv(env []string, override map[string]string) []string {
var newEnv []string
for _, s := range env {
d := strings.SplitN(s, "=", 2)
key := d[0]
if _, ok := override[key]; ok {
continue
}
newEnv = append(newEnv, s)
}
for k, v := range override {
newEnv = append(newEnv, k+"="+v)
}
return newEnv
}
func runGofmt() error {
dir, err := os.Getwd()
if err != nil {
fmt.Fprintf(os.Stderr, "Getwd(): %v\n", err)
os.Exit(5)
return fmt.Errorf("Getwd(): %v\n", err)
}
files, err := findGoFiles(dir)
if err != nil {
fmt.Fprintf(os.Stderr, "error finding Go files: %v\n", err)
os.Exit(4)
return fmt.Errorf("error finding Go files: %v\n", err)
}
msg("runGofmt() with %d files\n", len(files))
@@ -136,30 +426,38 @@ func runGofmt() {
buf, err := cmd.Output()
if err != nil {
fmt.Fprintf(os.Stderr, "error running gofmt: %v", err)
fmt.Fprintf(os.Stderr, "output:\n%s\n", buf)
os.Exit(3)
return fmt.Errorf("error running gofmt: %v\noutput: %s\n", err, buf)
}
if len(buf) > 0 {
fmt.Fprintf(os.Stderr, "not formatted with `gofmt`:\n")
fmt.Fprintln(os.Stderr, string(buf))
os.Exit(6)
return fmt.Errorf("not formatted with `gofmt`:\n%s\n", buf)
}
return nil
}
func run(command string, args ...string) {
func run(command string, args ...string) error {
msg("run %v %v\n", command, strings.Join(args, " "))
return runWithEnv(nil, command, args...)
}
// runWithEnv calls a command with the current environment, except the entries
// of the env map are set additionally.
func runWithEnv(env map[string]string, command string, args ...string) error {
msg("runWithEnv %v %v\n", command, strings.Join(args, " "))
cmd := exec.Command(command, args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if env != nil {
cmd.Env = updateEnv(os.Environ(), env)
}
err := cmd.Run()
if err != nil {
fmt.Fprintf(os.Stderr, "error running %v %v: %v",
return fmt.Errorf("error running %v %v: %v",
command, strings.Join(args, " "), err)
os.Exit(3)
}
return nil
}
func isTravis() bool {
@@ -183,7 +481,16 @@ func main() {
os.Exit(1)
}
for _, f := range []func(){env.Prepare, env.RunTests} {
f()
foundError := false
for _, f := range []func() error{env.Prepare, env.RunTests, env.Teardown} {
err := f()
if err != nil {
foundError = true
fmt.Fprintf(os.Stderr, "error: %v\n", err)
}
}
if foundError {
os.Exit(1)
}
}

View File

@@ -1,184 +0,0 @@
// +build ignore
package main
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
)
func specialDir(name string) bool {
if name == "." {
return false
}
base := filepath.Base(name)
return base[0] == '_' || base[0] == '.'
}
func emptyDir(name string) bool {
dir, err := os.Open(name)
defer dir.Close()
if err != nil {
fmt.Fprintf(os.Stderr, "unable to open directory %v: %v\n", name, err)
return true
}
fis, err := dir.Readdir(-1)
if err != nil {
fmt.Fprintf(os.Stderr, "Readdirnames(%v): %v\n", name, err)
return true
}
for _, fi := range fis {
if fi.IsDir() {
continue
}
if filepath.Ext(fi.Name()) == ".go" {
return false
}
}
return true
}
func forceRelativeDirname(dirname string) string {
if dirname == "." {
return dirname
}
if strings.HasPrefix(dirname, "./") {
return dirname
}
return "./" + dirname
}
func mergeCoverprofile(file *os.File, out io.Writer) error {
_, err := file.Seek(0, 0)
if err != nil {
return err
}
rd := bufio.NewReader(file)
_, err = rd.ReadString('\n')
if err == io.EOF {
return nil
}
if err != nil {
return err
}
_, err = io.Copy(out, rd)
if err != nil {
return err
}
err = file.Close()
if err != nil {
return err
}
return err
}
func testPackage(pkg string, params []string, out io.Writer) error {
file, err := ioutil.TempFile("", "test-coverage-")
defer os.Remove(file.Name())
defer file.Close()
if err != nil {
return err
}
args := []string{"test", "-cover", "-covermode", "set", "-coverprofile",
file.Name(), pkg}
args = append(args, params...)
cmd := exec.Command("go", args...)
cmd.Stderr = os.Stderr
cmd.Stdout = os.Stdout
err = cmd.Run()
if err != nil {
return err
}
return mergeCoverprofile(file, out)
}
func main() {
if len(os.Args) < 2 {
fmt.Fprintln(os.Stderr, "USAGE: run_tests COVERPROFILE [TESTFLAGS] [-- [PATHS]]")
os.Exit(1)
}
target := os.Args[1]
args := os.Args[2:]
paramsForTest := []string{}
dirs := []string{}
for i, arg := range args {
if arg == "--" {
dirs = args[i+1:]
break
}
paramsForTest = append(paramsForTest, arg)
}
if len(dirs) == 0 {
dirs = append(dirs, ".")
}
file, err := os.Create(target)
if err != nil {
fmt.Fprintf(os.Stderr, "create coverprofile failed: %v\n", err)
os.Exit(1)
}
fmt.Fprintln(file, "mode: set")
failedTests := false
for _, dir := range dirs {
err := filepath.Walk(dir,
func(p string, fi os.FileInfo, e error) error {
if e != nil {
return e
}
if !fi.IsDir() {
return nil
}
if specialDir(p) {
return filepath.SkipDir
}
if emptyDir(p) {
return nil
}
return testPackage(forceRelativeDirname(p), paramsForTest, file)
})
if err != nil {
fmt.Fprintf(os.Stderr, "walk(%q): %v\n", dir, err)
failedTests = true
}
}
err = file.Close()
fmt.Printf("coverprofile: %v\n", file.Name())
if failedTests {
os.Exit(1)
}
}

View File

@@ -1,93 +0,0 @@
package restic
import (
"fmt"
"os"
"os/user"
"path/filepath"
"time"
"github.com/restic/restic/backend"
"github.com/restic/restic/repository"
)
type Snapshot struct {
Time time.Time `json:"time"`
Parent *backend.ID `json:"parent,omitempty"`
Tree *backend.ID `json:"tree"`
Paths []string `json:"paths"`
Hostname string `json:"hostname,omitempty"`
Username string `json:"username,omitempty"`
UID uint32 `json:"uid,omitempty"`
GID uint32 `json:"gid,omitempty"`
Excludes []string `json:"excludes,omitempty"`
id *backend.ID // plaintext ID, used during restore
}
func NewSnapshot(paths []string) (*Snapshot, error) {
for i, path := range paths {
if p, err := filepath.Abs(path); err != nil {
paths[i] = p
}
}
sn := &Snapshot{
Paths: paths,
Time: time.Now(),
}
hn, err := os.Hostname()
if err == nil {
sn.Hostname = hn
}
err = sn.fillUserInfo()
if err != nil {
return nil, err
}
return sn, nil
}
func LoadSnapshot(repo *repository.Repository, id backend.ID) (*Snapshot, error) {
sn := &Snapshot{id: &id}
err := repo.LoadJSONUnpacked(backend.Snapshot, id, sn)
if err != nil {
return nil, err
}
return sn, nil
}
func (sn Snapshot) String() string {
return fmt.Sprintf("<Snapshot of %v at %s>", sn.Paths, sn.Time)
}
func (sn Snapshot) ID() *backend.ID {
return sn.id
}
func (sn *Snapshot) fillUserInfo() error {
usr, err := user.Current()
if err != nil {
return nil
}
sn.Username = usr.Username
// set userid and groupid
sn.UID, sn.GID, err = uidGidInt(*usr)
return err
}
// FindSnapshot takes a string and tries to find a snapshot whose ID matches
// the string as closely as possible.
func FindSnapshot(repo *repository.Repository, s string) (backend.ID, error) {
// find snapshot id with prefix
name, err := backend.Find(repo.Backend(), backend.Snapshot, s)
if err != nil {
return backend.ID{}, err
}
return backend.ParseID(name)
}

View File

@@ -21,3 +21,4 @@ _testmain.go
*.exe
*.test
*.prof

View File

@@ -0,0 +1,24 @@
Copyright (c) 2015, Bertil Chapuis
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -0,0 +1,29 @@
# Restic Server
Restic Server is a sample server that implement restic's rest backend api.
It has been developed for demonstration purpose and is not intented to be used in production.
## Getting started
By default the server persists backup data in `/tmp/restic`.
Build and start the server with a custom persistence directory:
```
go build
./restic-server -path /user/home/backup
```
The server use an `.htpasswd` file to specify users. You can create such a file at the root of the persistence directory by executing the following command. In order to append new user to the file, just omit the `-c` argument.
```
htpasswd -s -c .htpasswd username
```
By default the server uses http. This is not very secure since with Basic Authentication, username and passwords will be present in every request. In order to enable TLS support just add the `-tls` argument and add a private and public key at the root of your persistence directory.
Signed certificate are required by the restic backend but if you just want to test the feature you can generate unsigned keys with the following commands:
```
openssl genrsa -out private_key 2048
openssl req -new -x509 -key private_key -out public_key -days 365
```

View File

@@ -0,0 +1,192 @@
// +build go1.4
package main
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"strings"
"time"
)
// Context contains repository meta-data.
type Context struct {
path string
}
// AuthHandler wraps h with a http.HandlerFunc that performs basic
// authentication against the user/passwords pairs stored in f and returns the
// http.HandlerFunc.
func AuthHandler(f *HtpasswdFile, h http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
username, password, ok := r.BasicAuth()
if !ok {
http.Error(w, "401 unauthorized", 401)
return
}
if !f.Validate(username, password) {
http.Error(w, "401 unauthorized", 401)
return
}
h.ServeHTTP(w, r)
}
}
// CheckConfig returns a http.HandlerFunc that checks whether
// a configuration exists.
func CheckConfig(c *Context) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
config := filepath.Join(c.path, "config")
st, err := os.Stat(config)
if err != nil {
http.Error(w, "404 not found", 404)
return
}
w.Header().Add("Content-Length", fmt.Sprint(st.Size()))
}
}
// GetConfig returns a http.HandlerFunc that allows for a
// config to be retrieved.
func GetConfig(c *Context) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
config := filepath.Join(c.path, "config")
bytes, err := ioutil.ReadFile(config)
if err != nil {
http.Error(w, "404 not found", 404)
return
}
w.Write(bytes)
}
}
// SaveConfig returns a http.HandlerFunc that allows for a
// config to be saved.
func SaveConfig(c *Context) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
config := filepath.Join(c.path, "config")
bytes, err := ioutil.ReadAll(r.Body)
if err != nil {
http.Error(w, "400 bad request", 400)
return
}
errw := ioutil.WriteFile(config, bytes, 0600)
if errw != nil {
http.Error(w, "500 internal server error", 500)
return
}
w.Write([]byte("200 ok"))
}
}
// ListBlobs returns a http.HandlerFunc that lists
// all blobs of a given type in an arbitrary order.
func ListBlobs(c *Context) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := strings.Split(r.RequestURI, "/")
dir := vars[1]
path := filepath.Join(c.path, dir)
files, err := ioutil.ReadDir(path)
if err != nil {
http.Error(w, "404 not found", 404)
return
}
names := make([]string, len(files))
for i, f := range files {
names[i] = f.Name()
}
data, err := json.Marshal(names)
if err != nil {
http.Error(w, "500 internal server error", 500)
return
}
w.Write(data)
}
}
// CheckBlob reutrns a http.HandlerFunc that tests whether a blob exists
// and returns 200, if it does, or 404 otherwise.
func CheckBlob(c *Context) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := strings.Split(r.RequestURI, "/")
dir := vars[1]
name := vars[2]
path := filepath.Join(c.path, dir, name)
st, err := os.Stat(path)
if err != nil {
http.Error(w, "404 not found", 404)
return
}
w.Header().Add("Content-Length", fmt.Sprint(st.Size()))
}
}
// GetBlob returns a http.HandlerFunc that retrieves a blob
// from the repository.
func GetBlob(c *Context) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := strings.Split(r.RequestURI, "/")
dir := vars[1]
name := vars[2]
path := filepath.Join(c.path, dir, name)
file, err := os.Open(path)
if err != nil {
http.Error(w, "404 not found", 404)
return
}
defer file.Close()
http.ServeContent(w, r, "", time.Unix(0, 0), file)
}
}
// SaveBlob returns a http.HandlerFunc that saves a blob to the repository.
func SaveBlob(c *Context) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := strings.Split(r.RequestURI, "/")
dir := vars[1]
name := vars[2]
path := filepath.Join(c.path, dir, name)
tmp := path + "_tmp"
tf, err := os.OpenFile(tmp, os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
http.Error(w, "500 internal server error", 500)
return
}
if _, err := io.Copy(tf, r.Body); err != nil {
http.Error(w, "400 bad request", 400)
tf.Close()
os.Remove(tmp)
return
}
if err := tf.Close(); err != nil {
http.Error(w, "500 internal server error", 500)
}
if err := os.Rename(tmp, path); err != nil {
http.Error(w, "500 internal server error", 500)
return
}
w.Write([]byte("200 ok"))
}
}
// DeleteBlob returns a http.HandlerFunc that deletes a blob from the
// repository.
func DeleteBlob(c *Context) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
vars := strings.Split(r.RequestURI, "/")
dir := vars[1]
name := vars[2]
path := filepath.Join(c.path, dir, name)
err := os.Remove(path)
if err != nil {
http.Error(w, "500 internal server error", 500)
return
}
w.Write([]byte("200 ok"))
}
}

View File

@@ -0,0 +1,96 @@
// +build go1.4
package main
/*
Copied from: github.com/bitly/oauth2_proxy
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
import (
"crypto/sha1"
"encoding/base64"
"encoding/csv"
"io"
"log"
"os"
)
// lookup passwords in a htpasswd file
// The entries must have been created with -s for SHA encryption
// HtpasswdFile is a map for usernames to passwords.
type HtpasswdFile struct {
Users map[string]string
}
// NewHtpasswdFromFile reads the users and passwords from a htpasswd
// file and returns them. If an error is encountered, it is returned, together
// with a nil-Pointer for the HtpasswdFile.
func NewHtpasswdFromFile(path string) (*HtpasswdFile, error) {
r, err := os.Open(path)
if err != nil {
return nil, err
}
defer r.Close()
return NewHtpasswd(r)
}
// NewHtpasswd reads the users and passwords from a htpasswd
// datastream in file and returns them. If an error is encountered,
// it is returned, together with a nil-Pointer for the HtpasswdFile.
func NewHtpasswd(file io.Reader) (*HtpasswdFile, error) {
cr := csv.NewReader(file)
cr.Comma = ':'
cr.Comment = '#'
cr.TrimLeadingSpace = true
records, err := cr.ReadAll()
if err != nil {
return nil, err
}
h := &HtpasswdFile{Users: make(map[string]string)}
for _, record := range records {
h.Users[record[0]] = record[1]
}
return h, nil
}
// Validate returns true if password matches the stored password
// for user. If no password for user is stored, or the password
// is wrong, false is returned.
func (h *HtpasswdFile) Validate(user string, password string) bool {
realPassword, exists := h.Users[user]
if !exists {
return false
}
if realPassword[:5] == "{SHA}" {
d := sha1.New()
d.Write([]byte(password))
if realPassword[5:] == base64.StdEncoding.EncodeToString(d.Sum(nil)) {
return true
}
} else {
log.Printf("Invalid htpasswd entry for %s. Must be a SHA entry.", user)
}
return false
}

View File

@@ -0,0 +1,137 @@
// +build go1.4
package main
import (
"log"
"net/http"
"strings"
)
// Route is a handler for a path that was already split.
type Route struct {
path []string
handler http.Handler
}
// Router maps HTTP methods to a slice of Route handlers.
type Router struct {
routes map[string][]Route
}
// NewRouter creates a new Router and returns a pointer to it.
func NewRouter() *Router {
return &Router{make(map[string][]Route)}
}
// Options registers handler for path with method "OPTIONS".
func (router *Router) Options(path string, handler http.Handler) {
router.Handle("OPTIONS", path, handler)
}
// OptionsFunc registers handler for path with method "OPTIONS".
func (router *Router) OptionsFunc(path string, handler http.HandlerFunc) {
router.Handle("OPTIONS", path, handler)
}
// Get registers handler for path with method "GET".
func (router *Router) Get(path string, handler http.Handler) {
router.Handle("GET", path, handler)
}
// GetFunc registers handler for path with method "GET".
func (router *Router) GetFunc(path string, handler http.HandlerFunc) {
router.Handle("GET", path, handler)
}
// Head registers handler for path with method "HEAD".
func (router *Router) Head(path string, handler http.Handler) {
router.Handle("HEAD", path, handler)
}
// HeadFunc registers handler for path with method "HEAD".
func (router *Router) HeadFunc(path string, handler http.HandlerFunc) {
router.Handle("HEAD", path, handler)
}
// Post registers handler for path with method "POST".
func (router *Router) Post(path string, handler http.Handler) {
router.Handle("POST", path, handler)
}
// PostFunc registers handler for path with method "POST".
func (router *Router) PostFunc(path string, handler http.HandlerFunc) {
router.Handle("POST", path, handler)
}
// Put registers handler for path with method "PUT".
func (router *Router) Put(path string, handler http.Handler) {
router.Handle("PUT", path, handler)
}
// PutFunc registers handler for path with method "PUT".
func (router *Router) PutFunc(path string, handler http.HandlerFunc) {
router.Handle("PUT", path, handler)
}
// Delete registers handler for path with method "DELETE".
func (router *Router) Delete(path string, handler http.Handler) {
router.Handle("DELETE", path, handler)
}
// DeleteFunc registers handler for path with method "DELETE".
func (router *Router) DeleteFunc(path string, handler http.HandlerFunc) {
router.Handle("DELETE", path, handler)
}
// Trace registers handler for path with method "TRACE".
func (router *Router) Trace(path string, handler http.Handler) {
router.Handle("TRACE", path, handler)
}
// TraceFunc registers handler for path with method "TRACE".
func (router *Router) TraceFunc(path string, handler http.HandlerFunc) {
router.Handle("TRACE", path, handler)
}
// Connect registers handler for path with method "Connect".
func (router *Router) Connect(path string, handler http.Handler) {
router.Handle("Connect", path, handler)
}
// ConnectFunc registers handler for path with method "Connect".
func (router *Router) ConnectFunc(path string, handler http.HandlerFunc) {
router.Handle("Connect", path, handler)
}
// Handle registers a http.Handler for method and uri
func (router *Router) Handle(method string, uri string, handler http.Handler) {
routes := router.routes[method]
path := strings.Split(uri, "/")
routes = append(routes, Route{path, handler})
router.routes[method] = routes
}
func (router *Router) ServeHTTP(w http.ResponseWriter, r *http.Request) {
method := r.Method
uri := r.RequestURI
path := strings.Split(uri, "/")
log.Printf("%s %s", method, uri)
ROUTE:
for _, route := range router.routes[method] {
if len(route.path) != len(path) {
continue
}
for i := 0; i < len(route.path); i++ {
if !strings.HasPrefix(route.path[i], ":") && route.path[i] != path[i] {
continue ROUTE
}
}
route.handler.ServeHTTP(w, r)
return
}
http.Error(w, "404 not found", 404)
}

View File

@@ -0,0 +1,74 @@
// +build go1.4
package main
import (
"io/ioutil"
"net/http"
"net/http/httptest"
"strings"
"testing"
)
func TestRouter(t *testing.T) {
router := NewRouter()
getConfig := []byte("GET /config")
router.GetFunc("/config", func(w http.ResponseWriter, r *http.Request) {
w.Write(getConfig)
})
postConfig := []byte("POST /config")
router.PostFunc("/config", func(w http.ResponseWriter, r *http.Request) {
w.Write(postConfig)
})
getBlobs := []byte("GET /blobs/")
router.GetFunc("/blobs/", func(w http.ResponseWriter, r *http.Request) {
w.Write(getBlobs)
})
getBlob := []byte("GET /blobs/:sha")
router.GetFunc("/blobs/:sha", func(w http.ResponseWriter, r *http.Request) {
w.Write(getBlob)
})
server := httptest.NewServer(router)
defer server.Close()
getConfigResp, _ := http.Get(server.URL + "/config")
getConfigBody, _ := ioutil.ReadAll(getConfigResp.Body)
if getConfigResp.StatusCode != 200 {
t.Fatalf("Wanted HTTP Status 200, got %d", getConfigResp.StatusCode)
}
if string(getConfig) != string(getConfigBody) {
t.Fatalf("Config wrong:\nWanted '%s'\nGot: '%s'", string(getConfig), string(getConfigBody))
}
postConfigResp, _ := http.Post(server.URL+"/config", "binary/octet-stream", strings.NewReader("post test"))
postConfigBody, _ := ioutil.ReadAll(postConfigResp.Body)
if postConfigResp.StatusCode != 200 {
t.Fatalf("Wanted HTTP Status 200, got %d", postConfigResp.StatusCode)
}
if string(postConfig) != string(postConfigBody) {
t.Fatalf("Config wrong:\nWanted '%s'\nGot: '%s'", string(postConfig), string(postConfigBody))
}
getBlobsResp, _ := http.Get(server.URL + "/blobs/")
getBlobsBody, _ := ioutil.ReadAll(getBlobsResp.Body)
if getBlobsResp.StatusCode != 200 {
t.Fatalf("Wanted HTTP Status 200, got %d", getBlobsResp.StatusCode)
}
if string(getBlobs) != string(getBlobsBody) {
t.Fatalf("Config wrong:\nWanted '%s'\nGot: '%s'", string(getBlobs), string(getBlobsBody))
}
getBlobResp, _ := http.Get(server.URL + "/blobs/test")
getBlobBody, _ := ioutil.ReadAll(getBlobResp.Body)
if getBlobResp.StatusCode != 200 {
t.Fatalf("Wanted HTTP Status 200, got %d", getBlobResp.StatusCode)
}
if string(getBlob) != string(getBlobBody) {
t.Fatalf("Config wrong:\nWanted '%s'\nGot: '%s'", string(getBlob), string(getBlobBody))
}
}

View File

@@ -0,0 +1,73 @@
// +build go1.4
package main
import (
"flag"
"log"
"net/http"
"os"
"path/filepath"
)
const (
defaultHTTPPort = ":8000"
defaultHTTPSPort = ":8443"
)
func main() {
// Parse command-line args
var path = flag.String("path", "/tmp/restic", "specifies the path of the data directory")
var tls = flag.Bool("tls", false, "turns on tls support")
flag.Parse()
// Create the missing directories
dirs := []string{
"data",
"snapshots",
"index",
"locks",
"keys",
"tmp",
}
for _, d := range dirs {
os.MkdirAll(filepath.Join(*path, d), 0700)
}
// Define the routes
context := &Context{*path}
router := NewRouter()
router.HeadFunc("/config", CheckConfig(context))
router.GetFunc("/config", GetConfig(context))
router.PostFunc("/config", SaveConfig(context))
router.GetFunc("/:dir/", ListBlobs(context))
router.HeadFunc("/:dir/:name", CheckBlob(context))
router.GetFunc("/:type/:name", GetBlob(context))
router.PostFunc("/:type/:name", SaveBlob(context))
router.DeleteFunc("/:type/:name", DeleteBlob(context))
// Check for a password file
var handler http.Handler
htpasswdFile, err := NewHtpasswdFromFile(filepath.Join(*path, ".htpasswd"))
if err != nil {
log.Println("Authentication disabled")
handler = router
} else {
log.Println("Authentication enabled")
handler = AuthHandler(htpasswdFile, router)
}
// start the server
if !*tls {
log.Printf("start server on port %s\n", defaultHTTPPort)
http.ListenAndServe(defaultHTTPPort, handler)
} else {
privateKey := filepath.Join(*path, "private_key")
publicKey := filepath.Join(*path, "public_key")
log.Println("TLS enabled")
log.Printf("private key: %s", privateKey)
log.Printf("public key: %s", publicKey)
log.Printf("start server on port %s\n", defaultHTTPSPort)
http.ListenAndServeTLS(defaultHTTPSPort, publicKey, privateKey, handler)
}
}

View File

@@ -7,7 +7,7 @@ import (
"sync"
"syscall"
"github.com/restic/restic/debug"
"restic/debug"
)
var cleanupHandlers struct {

View File

@@ -1,24 +1,28 @@
package main
import (
"bufio"
"errors"
"fmt"
"os"
"path/filepath"
"restic"
"restic/backend"
"restic/debug"
"restic/filter"
"strings"
"time"
"github.com/restic/restic"
"github.com/restic/restic/backend"
"github.com/restic/restic/filter"
"github.com/restic/restic/repository"
"golang.org/x/crypto/ssh/terminal"
)
type CmdBackup struct {
Parent string `short:"p" long:"parent" description:"use this parent snapshot (default: last snapshot in repo that has the same target)"`
Force bool `short:"f" long:"force" description:"Force re-reading the target. Overrides the \"parent\" flag"`
Excludes []string `short:"e" long:"exclude" description:"Exclude a pattern (can be specified multiple times)"`
Parent string `short:"p" long:"parent" description:"use this parent snapshot (default: last snapshot in repo that has the same target)"`
Force bool `short:"f" long:"force" description:"Force re-reading the target. Overrides the \"parent\" flag"`
Excludes []string `short:"e" long:"exclude" description:"Exclude a pattern (can be specified multiple times)"`
ExcludeFile string `long:"exclude-file" description:"Read exclude-patterns from file"`
Stdin bool `long:"stdin" description:"read backup data from stdin"`
StdinFilename string `long:"stdin-filename" default:"stdin" description:"file name to use when reading from stdin"`
global *GlobalOptions
}
@@ -152,9 +156,13 @@ func (cmd CmdBackup) newArchiveProgress(todo restic.Stat) *restic.Progress {
w, _, err := terminal.GetSize(int(os.Stdout.Fd()))
if err == nil {
if len(status1)+len(status2) > w {
max := w - len(status2) - 4
status1 = status1[:max] + "... "
maxlen := w - len(status2)
if maxlen < 4 {
status1 = ""
} else if len(status1) > maxlen {
status1 = status1[:maxlen-4]
status1 += "... "
}
}
@@ -168,49 +176,45 @@ func (cmd CmdBackup) newArchiveProgress(todo restic.Stat) *restic.Progress {
return archiveProgress
}
func samePaths(expected, actual []string) bool {
if expected == nil || actual == nil {
return true
func (cmd CmdBackup) newArchiveStdinProgress() *restic.Progress {
if !cmd.global.ShowProgress() {
return nil
}
if len(expected) != len(actual) {
return false
}
for i := range expected {
if expected[i] != actual[i] {
return false
archiveProgress := restic.NewProgress(time.Second)
var bps uint64
archiveProgress.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) {
sec := uint64(d / time.Second)
if s.Bytes > 0 && sec > 0 && ticker {
bps = s.Bytes / sec
}
}
return true
}
status1 := fmt.Sprintf("[%s] %s %s/s", formatDuration(d),
formatBytes(s.Bytes),
formatBytes(bps))
var errNoSnapshotFound = errors.New("no snapshot found")
w, _, err := terminal.GetSize(int(os.Stdout.Fd()))
if err == nil {
maxlen := w - len(status1)
func findLatestSnapshot(repo *repository.Repository, targets []string) (backend.ID, error) {
var (
latest time.Time
latestID backend.ID
found bool
)
for snapshotID := range repo.List(backend.Snapshot, make(chan struct{})) {
snapshot, err := restic.LoadSnapshot(repo, snapshotID)
if err != nil {
return backend.ID{}, fmt.Errorf("Error listing snapshot: %v", err)
}
if snapshot.Time.After(latest) && samePaths(snapshot.Paths, targets) {
latest = snapshot.Time
latestID = snapshotID
found = true
if maxlen < 4 {
status1 = ""
} else if len(status1) > maxlen {
status1 = status1[:maxlen-4]
status1 += "... "
}
}
fmt.Printf("\x1b[2K%s\r", status1)
}
if !found {
return backend.ID{}, errNoSnapshotFound
archiveProgress.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
fmt.Printf("\nduration: %s, %s\n", formatDuration(d), formatRate(s.Bytes, d))
}
return latestID, nil
return archiveProgress
}
// filterExisting returns a slice of all existing items, or an error if no
@@ -232,7 +236,41 @@ func filterExisting(items []string) (result []string, err error) {
return
}
func (cmd CmdBackup) readFromStdin(args []string) error {
if len(args) != 0 {
return fmt.Errorf("when reading from stdin, no additional files can be specified")
}
repo, err := cmd.global.OpenRepository()
if err != nil {
return err
}
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
err = repo.LoadIndex()
if err != nil {
return err
}
_, id, err := restic.ArchiveReader(repo, cmd.newArchiveStdinProgress(), os.Stdin, cmd.StdinFilename)
if err != nil {
return err
}
fmt.Printf("archived as %v\n", id.Str())
return nil
}
func (cmd CmdBackup) Execute(args []string) error {
if cmd.Stdin {
return cmd.readFromStdin(args)
}
if len(args) == 0 {
return fmt.Errorf("wrong number of parameters, Usage: %s", cmd.Usage())
}
@@ -280,10 +318,10 @@ func (cmd CmdBackup) Execute(args []string) error {
// Find last snapshot to set it as parent, if not already set
if !cmd.Force && parentSnapshotID == nil {
id, err := findLatestSnapshot(repo, target)
id, err := restic.FindLatestSnapshot(repo, target, "")
if err == nil {
parentSnapshotID = &id
} else if err != errNoSnapshotFound {
} else if err != restic.ErrNoSnapshotFound {
return err
}
}
@@ -294,12 +332,34 @@ func (cmd CmdBackup) Execute(args []string) error {
cmd.global.Verbosef("scan %v\n", target)
// add patterns from file
if cmd.ExcludeFile != "" {
file, err := os.Open(cmd.ExcludeFile)
if err != nil {
cmd.global.Warnf("error reading exclude patterns: %v", err)
return nil
}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
if !strings.HasPrefix(line, "#") {
line = os.ExpandEnv(line)
cmd.Excludes = append(cmd.Excludes, line)
}
}
}
selectFilter := func(item string, fi os.FileInfo) bool {
matched, err := filter.List(cmd.Excludes, item)
if err != nil {
cmd.global.Warnf("error for exclude pattern: %v", err)
}
if matched {
debug.Log("backup.Execute", "path %q excluded by a filter", item)
}
return !matched
}

View File

@@ -3,7 +3,7 @@ package main
import (
"fmt"
"github.com/restic/restic"
"restic"
)
type CmdCache struct {

View File

@@ -4,14 +4,13 @@ import (
"encoding/json"
"errors"
"fmt"
"io"
"os"
"github.com/restic/restic"
"github.com/restic/restic/backend"
"github.com/restic/restic/debug"
"github.com/restic/restic/pack"
"github.com/restic/restic/repository"
"restic"
"restic/backend"
"restic/debug"
"restic/pack"
"restic/repository"
)
type CmdCat struct {
@@ -101,20 +100,19 @@ func (cmd CmdCat) Execute(args []string) error {
return nil
case "key":
rd, err := repo.Backend().Get(backend.Key, id.String())
h := backend.Handle{Type: backend.Key, Name: id.String()}
buf, err := backend.LoadAll(repo.Backend(), h, nil)
if err != nil {
return err
}
dec := json.NewDecoder(rd)
var key repository.Key
err = dec.Decode(&key)
key := &repository.Key{}
err = json.Unmarshal(buf, key)
if err != nil {
return err
}
buf, err := json.MarshalIndent(&key, "", " ")
buf, err = json.MarshalIndent(&key, "", " ")
if err != nil {
return err
}
@@ -153,26 +151,23 @@ func (cmd CmdCat) Execute(args []string) error {
switch tpe {
case "pack":
rd, err := repo.Backend().Get(backend.Data, id.String())
h := backend.Handle{Type: backend.Data, Name: id.String()}
buf, err := backend.LoadAll(repo.Backend(), h, nil)
if err != nil {
return err
}
_, err = io.Copy(os.Stdout, rd)
_, err = os.Stdout.Write(buf)
return err
case "blob":
_, blobType, _, length, err := repo.Index().Lookup(id)
blob, err := repo.Index().Lookup(id)
if err != nil {
return err
}
if blobType != pack.Data {
return errors.New("wrong type for blob")
}
buf := make([]byte, length)
data, err := repo.LoadBlob(pack.Data, id, buf)
buf := make([]byte, blob.Length)
data, err := repo.LoadBlob(blob.Type, id, buf)
if err != nil {
return err
}

View File

@@ -0,0 +1,165 @@
package main
import (
"errors"
"fmt"
"os"
"time"
"golang.org/x/crypto/ssh/terminal"
"restic"
"restic/checker"
)
type CmdCheck struct {
ReadData bool `long:"read-data" default:"false" description:"Read data blobs"`
CheckUnused bool `long:"check-unused" default:"false" description:"Check for unused blobs"`
global *GlobalOptions
}
func init() {
_, err := parser.AddCommand("check",
"check the repository",
"The check command check the integrity and consistency of the repository",
&CmdCheck{global: &globalOpts})
if err != nil {
panic(err)
}
}
func (cmd CmdCheck) Usage() string {
return "[check-options]"
}
func (cmd CmdCheck) newReadProgress(todo restic.Stat) *restic.Progress {
if !cmd.global.ShowProgress() {
return nil
}
readProgress := restic.NewProgress(time.Second)
readProgress.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) {
status := fmt.Sprintf("[%s] %s %d / %d items",
formatDuration(d),
formatPercent(s.Blobs, todo.Blobs),
s.Blobs, todo.Blobs)
w, _, err := terminal.GetSize(int(os.Stdout.Fd()))
if err == nil {
if len(status) > w {
max := w - len(status) - 4
status = status[:max] + "... "
}
}
fmt.Printf("\x1b[2K%s\r", status)
}
readProgress.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
fmt.Printf("\nduration: %s\n", formatDuration(d))
}
return readProgress
}
func (cmd CmdCheck) Execute(args []string) error {
if len(args) != 0 {
return errors.New("check has no arguments")
}
repo, err := cmd.global.OpenRepository()
if err != nil {
return err
}
if !cmd.global.NoLock {
cmd.global.Verbosef("Create exclusive lock for repository\n")
lock, err := lockRepoExclusive(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
}
chkr := checker.New(repo)
cmd.global.Verbosef("Load indexes\n")
hints, errs := chkr.LoadIndex()
dupFound := false
for _, hint := range hints {
cmd.global.Printf("%v\n", hint)
if _, ok := hint.(checker.ErrDuplicatePacks); ok {
dupFound = true
}
}
if dupFound {
cmd.global.Printf("\nrun `restic rebuild-index' to correct this\n")
}
if len(errs) > 0 {
for _, err := range errs {
cmd.global.Warnf("error: %v\n", err)
}
return fmt.Errorf("LoadIndex returned errors")
}
done := make(chan struct{})
defer close(done)
errorsFound := false
errChan := make(chan error)
cmd.global.Verbosef("Check all packs\n")
go chkr.Packs(errChan, done)
for err := range errChan {
errorsFound = true
fmt.Fprintf(os.Stderr, "%v\n", err)
}
cmd.global.Verbosef("Check snapshots, trees and blobs\n")
errChan = make(chan error)
go chkr.Structure(errChan, done)
for err := range errChan {
errorsFound = true
if e, ok := err.(checker.TreeError); ok {
fmt.Fprintf(os.Stderr, "error for tree %v:\n", e.ID.Str())
for _, treeErr := range e.Errors {
fmt.Fprintf(os.Stderr, " %v\n", treeErr)
}
} else {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
}
}
if cmd.CheckUnused {
for _, id := range chkr.UnusedBlobs() {
cmd.global.Verbosef("unused blob %v\n", id.Str())
errorsFound = true
}
}
if cmd.ReadData {
cmd.global.Verbosef("Read all data\n")
p := cmd.newReadProgress(restic.Stat{Blobs: chkr.CountPacks()})
errChan := make(chan error)
go chkr.ReadData(p, errChan, done)
for err := range errChan {
errorsFound = true
fmt.Fprintf(os.Stderr, "%v\n", err)
}
}
if errorsFound {
return errors.New("repository contains errors")
}
return nil
}

258
src/cmds/restic/cmd_dump.go Normal file
View File

@@ -0,0 +1,258 @@
// +build debug
package main
import (
"encoding/json"
"fmt"
"io"
"os"
"restic"
"restic/backend"
"restic/pack"
"restic/repository"
"restic/worker"
"github.com/juju/errors"
)
type CmdDump struct {
global *GlobalOptions
repo *repository.Repository
}
func init() {
_, err := parser.AddCommand("dump",
"dump data structures",
"The dump command dumps data structures from a repository as JSON documents",
&CmdDump{global: &globalOpts})
if err != nil {
panic(err)
}
}
func (cmd CmdDump) Usage() string {
return "[indexes|snapshots|trees|all|packs]"
}
func prettyPrintJSON(wr io.Writer, item interface{}) error {
buf, err := json.MarshalIndent(item, "", " ")
if err != nil {
return err
}
_, err = wr.Write(append(buf, '\n'))
return err
}
func printSnapshots(repo *repository.Repository, wr io.Writer) error {
done := make(chan struct{})
defer close(done)
for id := range repo.List(backend.Snapshot, done) {
snapshot, err := restic.LoadSnapshot(repo, id)
if err != nil {
fmt.Fprintf(os.Stderr, "LoadSnapshot(%v): %v", id.Str(), err)
continue
}
fmt.Fprintf(wr, "snapshot_id: %v\n", id)
err = prettyPrintJSON(wr, snapshot)
if err != nil {
return err
}
}
return nil
}
func printTrees(repo *repository.Repository, wr io.Writer) error {
done := make(chan struct{})
defer close(done)
trees := []backend.ID{}
for _, idx := range repo.Index().All() {
for blob := range idx.Each(nil) {
if blob.Type != pack.Tree {
continue
}
trees = append(trees, blob.ID)
}
}
for _, id := range trees {
tree, err := restic.LoadTree(repo, id)
if err != nil {
fmt.Fprintf(os.Stderr, "LoadTree(%v): %v", id.Str(), err)
continue
}
fmt.Fprintf(wr, "tree_id: %v\n", id)
prettyPrintJSON(wr, tree)
}
return nil
}
const dumpPackWorkers = 10
// Pack is the struct used in printPacks.
type Pack struct {
Name string `json:"name"`
Blobs []Blob `json:"blobs"`
}
// Blob is the struct used in printPacks.
type Blob struct {
Type pack.BlobType `json:"type"`
Length uint `json:"length"`
ID backend.ID `json:"id"`
Offset uint `json:"offset"`
}
func printPacks(repo *repository.Repository, wr io.Writer) error {
done := make(chan struct{})
defer close(done)
f := func(job worker.Job, done <-chan struct{}) (interface{}, error) {
name := job.Data.(string)
h := backend.Handle{Type: backend.Data, Name: name}
rd := backend.NewReadSeeker(repo.Backend(), h)
unpacker, err := pack.NewUnpacker(repo.Key(), rd)
if err != nil {
return nil, err
}
return unpacker.Entries, nil
}
jobCh := make(chan worker.Job)
resCh := make(chan worker.Job)
wp := worker.New(dumpPackWorkers, f, jobCh, resCh)
go func() {
for name := range repo.Backend().List(backend.Data, done) {
jobCh <- worker.Job{Data: name}
}
close(jobCh)
}()
for job := range resCh {
name := job.Data.(string)
if job.Error != nil {
fmt.Fprintf(os.Stderr, "error for pack %v: %v\n", name, job.Error)
continue
}
entries := job.Result.([]pack.Blob)
p := Pack{
Name: name,
Blobs: make([]Blob, len(entries)),
}
for i, blob := range entries {
p.Blobs[i] = Blob{
Type: blob.Type,
Length: blob.Length,
ID: blob.ID,
Offset: blob.Offset,
}
}
prettyPrintJSON(os.Stdout, p)
}
wp.Wait()
return nil
}
func (cmd CmdDump) DumpIndexes() error {
done := make(chan struct{})
defer close(done)
for id := range cmd.repo.List(backend.Index, done) {
fmt.Printf("index_id: %v\n", id)
idx, err := repository.LoadIndex(cmd.repo, id)
if err != nil {
return err
}
err = idx.Dump(os.Stdout)
if err != nil {
return err
}
}
return nil
}
func (cmd CmdDump) Execute(args []string) error {
if len(args) != 1 {
return fmt.Errorf("type not specified, Usage: %s", cmd.Usage())
}
repo, err := cmd.global.OpenRepository()
if err != nil {
return err
}
cmd.repo = repo
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
err = repo.LoadIndex()
if err != nil {
return err
}
tpe := args[0]
switch tpe {
case "indexes":
return cmd.DumpIndexes()
case "snapshots":
return printSnapshots(repo, os.Stdout)
case "trees":
return printTrees(repo, os.Stdout)
case "packs":
return printPacks(repo, os.Stdout)
case "all":
fmt.Printf("snapshots:\n")
err := printSnapshots(repo, os.Stdout)
if err != nil {
return err
}
fmt.Printf("\ntrees:\n")
err = printTrees(repo, os.Stdout)
if err != nil {
return err
}
fmt.Printf("\nindexes:\n")
err = cmd.DumpIndexes()
if err != nil {
return err
}
return nil
default:
return errors.Errorf("no such type %q", tpe)
}
}

View File

@@ -5,10 +5,10 @@ import (
"path/filepath"
"time"
"github.com/restic/restic"
"github.com/restic/restic/backend"
"github.com/restic/restic/debug"
"github.com/restic/restic/repository"
"restic"
"restic/backend"
"restic/debug"
"restic/repository"
)
type findResult struct {
@@ -94,7 +94,7 @@ func (c CmdFind) findInTree(repo *repository.Repository, id backend.ID, path str
}
if node.Type == "dir" {
subdirResults, err := c.findInTree(repo, id, filepath.Join(path, node.Name))
subdirResults, err := c.findInTree(repo, *node.Subtree, filepath.Join(path, node.Name))
if err != nil {
return nil, err
}
@@ -122,11 +122,10 @@ func (c CmdFind) findInSnapshot(repo *repository.Repository, id backend.ID) erro
if len(results) == 0 {
return nil
}
fmt.Printf("found %d matching entries in snapshot %s\n", len(results), id)
c.global.Verbosef("found %d matching entries in snapshot %s\n", len(results), id)
for _, res := range results {
res.node.Name = filepath.Join(res.path, res.node.Name)
fmt.Printf(" %s\n", res.node)
c.global.Printf(" %s\n", res.node)
}
return nil
@@ -138,7 +137,7 @@ func (CmdFind) Usage() string {
func (c CmdFind) Execute(args []string) error {
if len(args) != 1 {
return fmt.Errorf("invalid number of arguments, Usage: %s", c.Usage())
return fmt.Errorf("wrong number of arguments, Usage: %s", c.Usage())
}
var err error
@@ -168,6 +167,11 @@ func (c CmdFind) Execute(args []string) error {
return err
}
err = repo.LoadIndex()
if err != nil {
return err
}
c.pattern = args[0]
if c.Snapshot != "" {

View File

@@ -3,7 +3,7 @@ package main
import (
"errors"
"github.com/restic/restic/repository"
"restic/repository"
)
type CmdInit struct {
@@ -27,6 +27,7 @@ func (cmd CmdInit) Execute(args []string) error {
}
s := repository.New(be)
err = s.Init(cmd.global.password)
if err != nil {
cmd.global.Exitf(1, "creating key in backend at %s failed: %v\n", cmd.global.Repo, err)

View File

@@ -4,8 +4,8 @@ import (
"errors"
"fmt"
"github.com/restic/restic/backend"
"github.com/restic/restic/repository"
"restic/backend"
"restic/repository"
)
type CmdKey struct {

View File

@@ -4,7 +4,7 @@ import (
"errors"
"fmt"
"github.com/restic/restic/backend"
"restic/backend"
)
type CmdList struct {
@@ -35,10 +35,12 @@ func (cmd CmdList) Execute(args []string) error {
return err
}
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
return err
if !cmd.global.NoLock {
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
}
var t backend.Type
@@ -49,8 +51,10 @@ func (cmd CmdList) Execute(args []string) error {
return err
}
for blob := range repo.Index().Each(nil) {
cmd.global.Printf("%s\n", blob.ID)
for _, idx := range repo.Index().All() {
for blob := range idx.Each(nil) {
cmd.global.Printf("%s\n", blob.ID)
}
}
return nil

View File

@@ -5,9 +5,9 @@ import (
"os"
"path/filepath"
"github.com/restic/restic"
"github.com/restic/restic/backend"
"github.com/restic/restic/repository"
"restic"
"restic/backend"
"restic/repository"
)
type CmdLs struct {

View File

@@ -7,7 +7,7 @@ import (
"fmt"
"os"
"github.com/restic/restic/fuse"
"restic/fuse"
systemFuse "bazil.org/fuse"
"bazil.org/fuse/fs"
@@ -98,10 +98,10 @@ func (cmd CmdMount) Execute(args []string) error {
case err := <-errServe:
return err
case <-cmd.done:
err := c.Close()
err := systemFuse.Unmount(mountpoint)
if err != nil {
cmd.global.Printf("Error closing fuse connection: %s\n", err)
cmd.global.Printf("Error umounting: %s\n", err)
}
return systemFuse.Unmount(mountpoint)
return c.Close()
}
}

View File

@@ -0,0 +1,131 @@
package main
import (
"fmt"
"os"
"restic/backend"
"restic/debug"
"restic/pack"
"restic/repository"
"restic/worker"
)
type CmdRebuildIndex struct {
global *GlobalOptions
repo *repository.Repository
}
func init() {
_, err := parser.AddCommand("rebuild-index",
"rebuild the index",
"The rebuild-index command builds a new index",
&CmdRebuildIndex{global: &globalOpts})
if err != nil {
panic(err)
}
}
const rebuildIndexWorkers = 10
func loadBlobsFromPacks(repo *repository.Repository) (packs map[backend.ID][]pack.Blob) {
done := make(chan struct{})
defer close(done)
f := func(job worker.Job, done <-chan struct{}) (interface{}, error) {
return repo.ListPack(job.Data.(backend.ID))
}
jobCh := make(chan worker.Job)
resCh := make(chan worker.Job)
wp := worker.New(rebuildIndexWorkers, f, jobCh, resCh)
go func() {
for id := range repo.List(backend.Data, done) {
jobCh <- worker.Job{Data: id}
}
close(jobCh)
}()
packs = make(map[backend.ID][]pack.Blob)
for job := range resCh {
id := job.Data.(backend.ID)
if job.Error != nil {
fmt.Fprintf(os.Stderr, "error for pack %v: %v\n", id, job.Error)
continue
}
entries := job.Result.([]pack.Blob)
packs[id] = entries
}
wp.Wait()
return packs
}
func listIndexIDs(repo *repository.Repository) (list backend.IDs) {
done := make(chan struct{})
for id := range repo.List(backend.Index, done) {
list = append(list, id)
}
return list
}
func (cmd CmdRebuildIndex) rebuildIndex() error {
debug.Log("RebuildIndex.RebuildIndex", "start rebuilding index")
packs := loadBlobsFromPacks(cmd.repo)
cmd.global.Verbosef("loaded blobs from %d packs\n", len(packs))
idx := repository.NewIndex()
for packID, entries := range packs {
for _, entry := range entries {
pb := repository.PackedBlob{
ID: entry.ID,
Type: entry.Type,
Length: entry.Length,
Offset: entry.Offset,
PackID: packID,
}
idx.Store(pb)
}
}
oldIndexes := listIndexIDs(cmd.repo)
idx.AddToSupersedes(oldIndexes...)
cmd.global.Printf(" saving new index\n")
id, err := repository.SaveIndex(cmd.repo, idx)
if err != nil {
debug.Log("RebuildIndex.RebuildIndex", "error saving index: %v", err)
return err
}
debug.Log("RebuildIndex.RebuildIndex", "new index saved as %v", id.Str())
for _, indexID := range oldIndexes {
err := cmd.repo.Backend().Remove(backend.Index, indexID.String())
if err != nil {
cmd.global.Warnf("unable to remove index %v: %v\n", indexID.Str(), err)
}
}
return nil
}
func (cmd CmdRebuildIndex) Execute(args []string) error {
repo, err := cmd.global.OpenRepository()
if err != nil {
return err
}
cmd.repo = repo
lock, err := lockRepoExclusive(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
return cmd.rebuildIndex()
}

View File

@@ -4,15 +4,18 @@ import (
"errors"
"fmt"
"github.com/restic/restic"
"github.com/restic/restic/debug"
"github.com/restic/restic/filter"
"restic"
"restic/backend"
"restic/debug"
"restic/filter"
)
type CmdRestore struct {
Exclude []string `short:"e" long:"exclude" description:"Exclude a pattern (can be specified multiple times)"`
Include []string `short:"i" long:"include" description:"Include a pattern, exclude everything else (can be specified multiple times)"`
Target string `short:"t" long:"target" description:"Directory to restore to"`
Host string `short:"h" long:"host" description:"Source Filter (for id=latest)"`
Paths []string `short:"p" long:"path" description:"Path Filter (absolute path;for id=latest) (can be specified multiple times)"`
global *GlobalOptions
}
@@ -53,10 +56,12 @@ func (cmd CmdRestore) Execute(args []string) error {
return err
}
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
return err
if !cmd.global.NoLock {
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
}
err = repo.LoadIndex()
@@ -64,9 +69,18 @@ func (cmd CmdRestore) Execute(args []string) error {
return err
}
id, err := restic.FindSnapshot(repo, snapshotIDString)
if err != nil {
cmd.global.Exitf(1, "invalid id %q: %v", snapshotIDString, err)
var id backend.ID
if snapshotIDString == "latest" {
id, err = restic.FindLatestSnapshot(repo, cmd.Paths, cmd.Host)
if err != nil {
cmd.global.Exitf(1, "latest snapshot for criteria not found: %v Paths:%v Host:%v", err, cmd.Paths, cmd.Host)
}
} else {
id, err = restic.FindSnapshot(repo, snapshotIDString)
if err != nil {
cmd.global.Exitf(1, "invalid id %q: %v", snapshotIDString, err)
}
}
res, err := restic.NewRestorer(repo, id)

View File

@@ -8,8 +8,8 @@ import (
"sort"
"strings"
"github.com/restic/restic"
"github.com/restic/restic/backend"
"restic"
"restic/backend"
)
type Table struct {
@@ -48,6 +48,9 @@ func (t Table) Write(w io.Writer) error {
const TimeFormat = "2006-01-02 15:04:05"
type CmdSnapshots struct {
Host string `short:"h" long:"host" description:"Host Filter"`
Paths []string `short:"p" long:"path" description:"Path Filter (absolute path) (can be specified multiple times)"`
global *GlobalOptions
}
@@ -82,7 +85,7 @@ func (cmd CmdSnapshots) Execute(args []string) error {
}
tab := NewTable()
tab.Header = fmt.Sprintf("%-8s %-19s %-10s %s", "ID", "Date", "Source", "Directory")
tab.Header = fmt.Sprintf("%-8s %-19s %-10s %s", "ID", "Date", "Host", "Directory")
tab.RowFormat = "%-8s %-19s %-10s %s"
done := make(chan struct{})
@@ -96,17 +99,20 @@ func (cmd CmdSnapshots) Execute(args []string) error {
continue
}
pos := sort.Search(len(list), func(i int) bool {
return list[i].Time.After(sn.Time)
})
if restic.SamePaths(sn.Paths, cmd.Paths) && (cmd.Host == "" || cmd.Host == sn.Hostname) {
pos := sort.Search(len(list), func(i int) bool {
return list[i].Time.After(sn.Time)
})
if pos < len(list) {
list = append(list, nil)
copy(list[pos+1:], list[pos:])
list[pos] = sn
} else {
list = append(list, sn)
if pos < len(list) {
list = append(list, nil)
copy(list[pos+1:], list[pos:])
list[pos] = sn
} else {
list = append(list, sn)
}
}
}
plen, err := repo.PrefixLength(backend.Snapshot)

View File

@@ -1,6 +1,6 @@
package main
import "github.com/restic/restic"
import "restic"
type CmdUnlock struct {
RemoveAll bool `long:"remove-all" description:"Remove all locks, even stale ones"`

Some files were not shown because too many files have changed in this diff Show More