mirror of
https://github.com/rclone/rclone.git
synced 2026-01-27 14:53:20 +00:00
Compare commits
486 Commits
v1.40
...
hensur-bug
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ecb3f04370 | ||
|
|
a25875170b | ||
|
|
a288646419 | ||
|
|
b3d8bc61ac | ||
|
|
7accd30da8 | ||
|
|
9594fd0a0c | ||
|
|
aac84c554a | ||
|
|
5716a58413 | ||
|
|
233507bfe0 | ||
|
|
5b27702b61 | ||
|
|
b2a6a97443 | ||
|
|
2543278c3f | ||
|
|
19cf3bb9e7 | ||
|
|
3c44ef788a | ||
|
|
e5663de09e | ||
|
|
7cfd4e56f8 | ||
|
|
282540c2d4 | ||
|
|
1e7a7d756f | ||
|
|
f6ee0795ac | ||
|
|
eb5a95e7de | ||
|
|
5b1c162fb2 | ||
|
|
d51501938a | ||
|
|
a823d518ac | ||
|
|
87d4e32a6b | ||
|
|
820d2a7149 | ||
|
|
9fe39f25e1 | ||
|
|
1b2cc781e5 | ||
|
|
e05ec2b77e | ||
|
|
9e3ea3c6ac | ||
|
|
0fb12112f5 | ||
|
|
1b95ca2852 | ||
|
|
e07a850be3 | ||
|
|
3fccce625c | ||
|
|
a1f935e815 | ||
|
|
22ee4151fd | ||
|
|
cc23ad71ce | ||
|
|
57b9fff904 | ||
|
|
692ad482dc | ||
|
|
c6f1c3c7f6 | ||
|
|
164d1e05ca | ||
|
|
c644241392 | ||
|
|
89be5cadaa | ||
|
|
f326f94b97 | ||
|
|
3d2117887d | ||
|
|
5a6750e1cd | ||
|
|
6b8b9d19f3 | ||
|
|
4ca26eb38c | ||
|
|
37b2754f37 | ||
|
|
172beb2ae3 | ||
|
|
5eba392a04 | ||
|
|
deda093637 | ||
|
|
a4c4019032 | ||
|
|
2e37942592 | ||
|
|
09d7bd2d40 | ||
|
|
ff0efb1501 | ||
|
|
0f1d4a7ca8 | ||
|
|
a0b3fd3a33 | ||
|
|
cdbe3691b7 | ||
|
|
3a0b3b0f6e | ||
|
|
d3afef3e1b | ||
|
|
f4aaec9ce5 | ||
|
|
bd5d326160 | ||
|
|
5b9b9f1572 | ||
|
|
571c8754de | ||
|
|
fb9a95e68e | ||
|
|
85e0839c8b | ||
|
|
1749fb8ebf | ||
|
|
e114be11ec | ||
|
|
b709f73aab | ||
|
|
05a615ef22 | ||
|
|
76450c01f3 | ||
|
|
86e64c626c | ||
|
|
9b827be418 | ||
|
|
7e5c6725c1 | ||
|
|
543d75723b | ||
|
|
b4d94f255a | ||
|
|
b0dd218fea | ||
|
|
6396872d75 | ||
|
|
6cf684f2a1 | ||
|
|
20c55a6829 | ||
|
|
a3fec7f030 | ||
|
|
8e2b3268be | ||
|
|
32ab4e9ac6 | ||
|
|
b4d86d5450 | ||
|
|
d49ba652e2 | ||
|
|
7d74686698 | ||
|
|
2d7c5ebc7a | ||
|
|
86e3436d55 | ||
|
|
f243d2a309 | ||
|
|
aaa3d7e63b | ||
|
|
e4c5f248c0 | ||
|
|
5afaa48d06 | ||
|
|
1c578ced1c | ||
|
|
de6ec8056f | ||
|
|
66fe4a2523 | ||
|
|
f5617dadf3 | ||
|
|
5e75a9ef5c | ||
|
|
da1682a30e | ||
|
|
5c75453aba | ||
|
|
84ef6b2ae6 | ||
|
|
7194c358ad | ||
|
|
502d8b0cdd | ||
|
|
ca44fb1fba | ||
|
|
842ed7d2a9 | ||
|
|
b3217d2cac | ||
|
|
94950258a4 | ||
|
|
8656bd2bb0 | ||
|
|
174ca22936 | ||
|
|
4eefd05dcf | ||
|
|
1cccfa7331 | ||
|
|
18685e6b0b | ||
|
|
b6db90cc32 | ||
|
|
b596ccdf0f | ||
|
|
a64e0922b9 | ||
|
|
3751ceebdd | ||
|
|
9f671c5dd0 | ||
|
|
c6c74cb869 | ||
|
|
f9cf70e3aa | ||
|
|
ee4485a316 | ||
|
|
455219f501 | ||
|
|
1b8f4b616c | ||
|
|
f818df52b8 | ||
|
|
29fa840d3a | ||
|
|
7712a0e111 | ||
|
|
77806494c8 | ||
|
|
ff8de59d2b | ||
|
|
c19d1ae9a5 | ||
|
|
64ecc2f587 | ||
|
|
7c911bf2d6 | ||
|
|
41f709e13b | ||
|
|
6c5ccf26b1 | ||
|
|
6dc5aa7454 | ||
|
|
552eb8e06b | ||
|
|
7d35b14138 | ||
|
|
6199b95b61 | ||
|
|
040768383b | ||
|
|
6390dec7db | ||
|
|
80a3db34a8 | ||
|
|
cb7a461287 | ||
|
|
eb84b58d3c | ||
|
|
58339a5cb6 | ||
|
|
751bfd456f | ||
|
|
990919f268 | ||
|
|
6301e15b79 | ||
|
|
007c7757d4 | ||
|
|
dd3e912731 | ||
|
|
10ed455777 | ||
|
|
05bec70c3e | ||
|
|
c54f5a781e | ||
|
|
6156bc5806 | ||
|
|
e979cd62c1 | ||
|
|
687477b34d | ||
|
|
40d383e223 | ||
|
|
6bfdabab6d | ||
|
|
f7c1c61dda | ||
|
|
c1f5add049 | ||
|
|
8989c367c4 | ||
|
|
d95667b06d | ||
|
|
0f845e3a59 | ||
|
|
2e80d4c18e | ||
|
|
6349147af4 | ||
|
|
782972088d | ||
|
|
38381d3786 | ||
|
|
eb6aafbd14 | ||
|
|
7f3d5c31d9 | ||
|
|
578f56bba7 | ||
|
|
f7c0b2407d | ||
|
|
dc5a734522 | ||
|
|
3c2ffa7f57 | ||
|
|
06c9f76cd2 | ||
|
|
44abf6473e | ||
|
|
b99595b7ce | ||
|
|
a119ca9f10 | ||
|
|
ffd11662ba | ||
|
|
1f3778dbfb | ||
|
|
f9eb9c894d | ||
|
|
f7a92f2c15 | ||
|
|
42959fe9c3 | ||
|
|
f72eade707 | ||
|
|
bbda4ab1f1 | ||
|
|
916b6e3a40 | ||
|
|
dd670ad1db | ||
|
|
7983b6bdca | ||
|
|
9815b09d90 | ||
|
|
9c90b5e77c | ||
|
|
01af8e2905 | ||
|
|
f06ba393b8 | ||
|
|
473e3c3eb8 | ||
|
|
ab78eb13e4 | ||
|
|
b1f31c2acf | ||
|
|
dcc74fa404 | ||
|
|
6759d36e2f | ||
|
|
a4797014c9 | ||
|
|
4d7d240c12 | ||
|
|
d046402d80 | ||
|
|
9bdf465c10 | ||
|
|
f3f48d7d49 | ||
|
|
3c89406886 | ||
|
|
85d09729f2 | ||
|
|
b3bd2d1c9e | ||
|
|
4c586a9264 | ||
|
|
1c80e84f8a | ||
|
|
028f8a69d3 | ||
|
|
b0d1fa1d6b | ||
|
|
dbb4b2c900 | ||
|
|
99201f8ba4 | ||
|
|
5ad8bcb43a | ||
|
|
6efedc4043 | ||
|
|
a3d9a38f51 | ||
|
|
b1bd17a220 | ||
|
|
793f594b07 | ||
|
|
4fe6614ae1 | ||
|
|
4c2fbf9b36 | ||
|
|
ed4f1b2936 | ||
|
|
144c1a04d4 | ||
|
|
25ec7f5c00 | ||
|
|
b15603d5ea | ||
|
|
71c974bf9a | ||
|
|
03c5b8232e | ||
|
|
72392a2d72 | ||
|
|
b062ae9d13 | ||
|
|
8c0335a176 | ||
|
|
794e55de27 | ||
|
|
038ed1aaf0 | ||
|
|
97beff5370 | ||
|
|
b9b9bce0db | ||
|
|
947e10eb2b | ||
|
|
6b42421374 | ||
|
|
fa051ff970 | ||
|
|
69164b3dda | ||
|
|
935533e57f | ||
|
|
1550f70865 | ||
|
|
1a65c3a740 | ||
|
|
a29a1de43d | ||
|
|
e7ae5e8ee0 | ||
|
|
56e1e82005 | ||
|
|
8442498693 | ||
|
|
08021c4636 | ||
|
|
3f0789e2db | ||
|
|
7110349547 | ||
|
|
a9adb43896 | ||
|
|
c47a4c9703 | ||
|
|
d9d00a7dd7 | ||
|
|
b82e66daaa | ||
|
|
7d2861ead6 | ||
|
|
aaa8591661 | ||
|
|
4df1794932 | ||
|
|
d18928962c | ||
|
|
339fbf0df5 | ||
|
|
13ccb39819 | ||
|
|
f9a1a7e700 | ||
|
|
1c75581959 | ||
|
|
4d793b8ee8 | ||
|
|
9289aead9b | ||
|
|
ce109ed9c0 | ||
|
|
d7ac4ca44e | ||
|
|
1053d7e123 | ||
|
|
017297af70 | ||
|
|
4e8e5fed7d | ||
|
|
c0f772bc14 | ||
|
|
334ef28012 | ||
|
|
da45dadfe9 | ||
|
|
05edb5f501 | ||
|
|
04d18d2a07 | ||
|
|
f1269dc06a | ||
|
|
c5286ee157 | ||
|
|
ba43acb6aa | ||
|
|
8a84975993 | ||
|
|
d758e1908e | ||
|
|
737aed8412 | ||
|
|
4009fb67c8 | ||
|
|
3ef938ebde | ||
|
|
5302e5f9b1 | ||
|
|
de8c7d8e45 | ||
|
|
2a29f7f6c8 | ||
|
|
2b332bced2 | ||
|
|
aad75e6720 | ||
|
|
2a806a8d8b | ||
|
|
500085d244 | ||
|
|
3d8e529441 | ||
|
|
6607d8752c | ||
|
|
67e9ef4547 | ||
|
|
d4213c0ac5 | ||
|
|
3a2248aa5f | ||
|
|
573ef4c8ee | ||
|
|
7bf2d389a8 | ||
|
|
71b4f1ccab | ||
|
|
e5ff375948 | ||
|
|
512f4b4487 | ||
|
|
a38f8b87ce | ||
|
|
9697754707 | ||
|
|
8e625e0bc3 | ||
|
|
e52ecba295 | ||
|
|
e62d2fd309 | ||
|
|
e56be0dfd8 | ||
|
|
2a32e2d838 | ||
|
|
db4c206e0e | ||
|
|
f77efc7649 | ||
|
|
aadbcce486 | ||
|
|
f162116132 | ||
|
|
909c3a92d6 | ||
|
|
826975c341 | ||
|
|
6791cf7d7f | ||
|
|
d022c81d99 | ||
|
|
cdde8fa75a | ||
|
|
5ede6f6d09 | ||
|
|
53292527bb | ||
|
|
ec9894da07 | ||
|
|
ad02d1be3f | ||
|
|
63f413f477 | ||
|
|
f1ffe8e309 | ||
|
|
d85b9bc9d6 | ||
|
|
b07e51cf73 | ||
|
|
f073db81b1 | ||
|
|
9698a2babb | ||
|
|
5eecbd83ee | ||
|
|
e42edc8e8c | ||
|
|
291954baba | ||
|
|
9d8d7ae1f0 | ||
|
|
6ce32e4661 | ||
|
|
1755ffd1f3 | ||
|
|
aa5c5ec5d3 | ||
|
|
e80ae4e09c | ||
|
|
1320e84bc2 | ||
|
|
cb5bd47e61 | ||
|
|
790a8a9aed | ||
|
|
f1a43eca4d | ||
|
|
7ea68f1fc6 | ||
|
|
6427029c4e | ||
|
|
21383877df | ||
|
|
f95835d613 | ||
|
|
be79b47a7a | ||
|
|
be22735609 | ||
|
|
1b1b3c13cd | ||
|
|
5c128272fd | ||
|
|
d178233e74 | ||
|
|
98bf65c43b | ||
|
|
3b5e70c8c6 | ||
|
|
bd3ad1ac3e | ||
|
|
9fdf273614 | ||
|
|
fe25cb9c54 | ||
|
|
f2608e2a64 | ||
|
|
a5f1811892 | ||
|
|
50dc5fe92e | ||
|
|
b7d2048032 | ||
|
|
3116249692 | ||
|
|
d049e5c680 | ||
|
|
1c9572aba1 | ||
|
|
76f2cbeb94 | ||
|
|
0479c7dcf5 | ||
|
|
55674c0bfc | ||
|
|
e4c380b2a8 | ||
|
|
74cbdea0ef | ||
|
|
a3bf6b9c2c | ||
|
|
0daced29db | ||
|
|
b78af517de | ||
|
|
d8e88f10cd | ||
|
|
849db6699d | ||
|
|
a81ec00a8c | ||
|
|
da4a5e1fb3 | ||
|
|
ae562b5a4f | ||
|
|
c01177bc28 | ||
|
|
9f04ce282e | ||
|
|
764440068e | ||
|
|
a703216286 | ||
|
|
96a62d55a2 | ||
|
|
d0f32b62fd | ||
|
|
7c5f87842c | ||
|
|
cc8799e0d6 | ||
|
|
da214973a1 | ||
|
|
be8bd89674 | ||
|
|
9ab2521ef2 | ||
|
|
21a10e58c9 | ||
|
|
d36b80f587 | ||
|
|
24980d7123 | ||
|
|
870c58f7f8 | ||
|
|
b3c6f5f4b8 | ||
|
|
311a962011 | ||
|
|
da7a77ef2e | ||
|
|
9fbc40c5b9 | ||
|
|
56ce784301 | ||
|
|
8fe3037301 | ||
|
|
ba7ae2ee8c | ||
|
|
dc59836021 | ||
|
|
1a3fb21a77 | ||
|
|
bcdb7719c6 | ||
|
|
c51d97c752 | ||
|
|
57a5b72d60 | ||
|
|
34ba17deec | ||
|
|
e3a1bc9cd3 | ||
|
|
a35e62e15c | ||
|
|
d1ca8b8959 | ||
|
|
a0c65deca8 | ||
|
|
1f255a8567 | ||
|
|
f50b85278a | ||
|
|
9948b39dba | ||
|
|
2b855751fc | ||
|
|
ef3bcec76c | ||
|
|
1ac6dacf0f | ||
|
|
94e277d759 | ||
|
|
b83814082b | ||
|
|
2b7957cc74 | ||
|
|
3d5106e52b | ||
|
|
29ce1c2747 | ||
|
|
dc247d21ff | ||
|
|
8c3740c2c5 | ||
|
|
acd5d4377e | ||
|
|
9e4cd55477 | ||
|
|
2015f98f0c | ||
|
|
0e6faa2313 | ||
|
|
905e40b3e6 | ||
|
|
1db68571fd | ||
|
|
6b67489133 | ||
|
|
27dfcf303c | ||
|
|
e6d9720d7b | ||
|
|
196da4d903 | ||
|
|
18317a2747 | ||
|
|
ef412c1985 | ||
|
|
d97fe3b824 | ||
|
|
792c9e185e | ||
|
|
1f681e585b | ||
|
|
e82452ce9a | ||
|
|
dcf8334673 | ||
|
|
37be78705d | ||
|
|
4b5ff33125 | ||
|
|
d5b2ec32f1 | ||
|
|
aeedacfb50 | ||
|
|
92b266d361 | ||
|
|
05e32cfcf9 | ||
|
|
cbec59146a | ||
|
|
06e3fa3aba | ||
|
|
0fa700b3cf | ||
|
|
42f0963bf9 | ||
|
|
be54fd8f70 | ||
|
|
e5be471ce0 | ||
|
|
80588a5a6b | ||
|
|
67023f0040 | ||
|
|
32e02bd367 | ||
|
|
c749cf8d99 | ||
|
|
92cfb57fbd | ||
|
|
0cb5c4aa73 | ||
|
|
0358e9e724 | ||
|
|
a69d8ec93b | ||
|
|
92c5aa3786 | ||
|
|
fbe1c7f1ea | ||
|
|
c4531daa43 | ||
|
|
6e11a25df5 | ||
|
|
0865e38917 | ||
|
|
ab2fa59fc4 | ||
|
|
e13f65b953 | ||
|
|
5b8977a053 | ||
|
|
1dea99ab20 | ||
|
|
06a8d3011d | ||
|
|
e7fd607078 | ||
|
|
eca99b33c0 | ||
|
|
e42cee5e02 | ||
|
|
d45c750f76 | ||
|
|
2c2bb0f750 | ||
|
|
a8267d1628 | ||
|
|
9df266a6b4 | ||
|
|
4d553ef701 | ||
|
|
1ba3ffdc59 | ||
|
|
72f1b097a7 | ||
|
|
885044d0a5 | ||
|
|
6c10312c75 | ||
|
|
e5aa5fe7d8 | ||
|
|
9b140b42c9 | ||
|
|
0bfbde8856 | ||
|
|
98a924602f | ||
|
|
7e80e609e8 | ||
|
|
91b068ad3a | ||
|
|
b52e34ef5e | ||
|
|
32e6eee341 | ||
|
|
c5f1d501ed | ||
|
|
0ed0d9a7bc | ||
|
|
d9c13bff83 | ||
|
|
ce91289b09 | ||
|
|
5ba5be9b37 | ||
|
|
e9a2cbec37 | ||
|
|
4f6f07c074 | ||
|
|
f6020f1308 | ||
|
|
a46f2a9eb7 | ||
|
|
911a78ce6d | ||
|
|
d64789528d | ||
|
|
940df88eb2 | ||
|
|
19ca9fb939 |
@@ -4,6 +4,9 @@ os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\ncw\rclone
|
||||
|
||||
cache:
|
||||
- '%LocalAppData%\go-build'
|
||||
|
||||
environment:
|
||||
GOPATH: C:\gopath
|
||||
CPATH: C:\Program Files (x86)\WinFsp\inc\fuse
|
||||
@@ -43,4 +46,4 @@ artifacts:
|
||||
- path: build/*-v*.zip
|
||||
|
||||
deploy_script:
|
||||
- IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload
|
||||
- IF "%APPVEYOR_REPO_NAME%" == "ncw/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload
|
||||
|
||||
14
.gometalinter.json
Normal file
14
.gometalinter.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"Enable": [
|
||||
"deadcode",
|
||||
"errcheck",
|
||||
"goimports",
|
||||
"golint",
|
||||
"ineffassign",
|
||||
"structcheck",
|
||||
"varcheck",
|
||||
"vet"
|
||||
],
|
||||
"EnableGC": true,
|
||||
"Vendor": true
|
||||
}
|
||||
24
.travis.yml
24
.travis.yml
@@ -4,12 +4,13 @@ dist: trusty
|
||||
os:
|
||||
- linux
|
||||
go:
|
||||
- 1.6.4
|
||||
- 1.7.6
|
||||
- 1.8.7
|
||||
- 1.9.3
|
||||
- "1.10"
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- tip
|
||||
go_import_path: github.com/ncw/rclone
|
||||
before_install:
|
||||
- if [[ $TRAVIS_OS_NAME == linux ]]; then sudo modprobe fuse ; sudo chmod 666 /dev/fuse ; sudo chown root:$USER /etc/fuse.conf ; fi
|
||||
- if [[ $TRAVIS_OS_NAME == osx ]]; then brew update && brew tap caskroom/cask && brew cask install osxfuse ; fi
|
||||
@@ -34,18 +35,25 @@ addons:
|
||||
- libfuse-dev
|
||||
- rpm
|
||||
- pkg-config
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.cache/go-build
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
include:
|
||||
- os: osx
|
||||
go: "1.10"
|
||||
go: 1.11.x
|
||||
env: GOTAGS=""
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/Library/Caches/go-build
|
||||
deploy:
|
||||
provider: script
|
||||
script: make travis_beta
|
||||
skip_cleanup: true
|
||||
on:
|
||||
repo: ncw/rclone
|
||||
all_branches: true
|
||||
go: "1.10"
|
||||
condition: $TRAVIS_OS_NAME == linux && $TRAVIS_PULL_REQUEST == false
|
||||
go: 1.11.x
|
||||
condition: $TRAVIS_PULL_REQUEST == false
|
||||
|
||||
@@ -33,7 +33,7 @@ page](https://github.com/ncw/rclone).
|
||||
|
||||
Now in your terminal
|
||||
|
||||
go get github.com/ncw/rclone
|
||||
go get -u github.com/ncw/rclone
|
||||
cd $GOPATH/src/github.com/ncw/rclone
|
||||
git remote rename origin upstream
|
||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||
@@ -71,7 +71,7 @@ Make sure you
|
||||
|
||||
When you are done with that
|
||||
|
||||
git push origin my-new-feature
|
||||
git push origin my-new-feature
|
||||
|
||||
Go to the Github website and click [Create pull
|
||||
request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
@@ -81,6 +81,14 @@ You patch will get reviewed and you might get asked to fix some stuff.
|
||||
If so, then make the changes in the same branch, squash the commits,
|
||||
rebase it to master then push it to Github with `--force`.
|
||||
|
||||
## Enabling CI for your fork ##
|
||||
|
||||
The CI config files for rclone have taken care of forks of the project, so you can enable CI for your fork repo easily.
|
||||
|
||||
rclone currently uses [Travis CI](https://travis-ci.org/), [AppVeyor](https://ci.appveyor.com/), and
|
||||
[Circle CI](https://circleci.com/) to build the project. To enable them for your fork, simply go into their
|
||||
websites, find your fork of rclone, and enable building there.
|
||||
|
||||
## Testing ##
|
||||
|
||||
rclone's tests are run from the go testing framework, so at the top
|
||||
@@ -166,7 +174,7 @@ with modules beneath.
|
||||
* pacer - retries with backoff and paces operations
|
||||
* readers - a selection of useful io.Readers
|
||||
* rest - a thin abstraction over net/http for REST
|
||||
* vendor - 3rd party code managed by the dep tool
|
||||
* vendor - 3rd party code managed by `go mod`
|
||||
* vfs - Virtual FileSystem layer for implementing rclone mount and similar
|
||||
|
||||
## Writing Documentation ##
|
||||
@@ -229,37 +237,55 @@ Fixes #1498
|
||||
|
||||
## Adding a dependency ##
|
||||
|
||||
rclone uses the [dep](https://github.com/golang/dep) tool to manage
|
||||
its dependencies. All code that rclone needs for building is stored
|
||||
in the `vendor` directory for perfectly reproducable builds.
|
||||
rclone uses the [go
|
||||
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
|
||||
support in go1.11 and later to manage its dependencies.
|
||||
|
||||
The `vendor` directory is entirely managed by the `dep` tool.
|
||||
**NB** you must be using go1.11 or above to add a dependency to
|
||||
rclone. Rclone will still build with older versions of go, but we use
|
||||
the `go mod` command for dependencies which is only in go1.11 and
|
||||
above.
|
||||
|
||||
To add a new dependency
|
||||
rclone can be built with modules outside of the GOPATH, but for
|
||||
backwards compatibility with older go versions, rclone also maintains
|
||||
a `vendor` directory with all the external code rclone needs for
|
||||
building.
|
||||
|
||||
dep ensure -add github.com/pkg/errors
|
||||
The `vendor` directory is entirely managed by the `go mod` tool, do
|
||||
not add things manually.
|
||||
|
||||
You can add constraints on that package (see the `dep` documentation),
|
||||
but don't unless you really need to.
|
||||
To add a dependency `github.com/ncw/new_dependency` see the
|
||||
instructions below. These will fetch the dependency, add it to
|
||||
`go.mod` and `go.sum` and vendor it for older go versions.
|
||||
|
||||
Please check in the changes generated by dep including the `vendor`
|
||||
directory and `Godep.toml` and `Godep.locl` in a single commit
|
||||
separate from any other code changes. Watch out for new files in
|
||||
`vendor`.
|
||||
export GO111MODULE=on
|
||||
go get github.com/ncw/new_dependency
|
||||
go mod vendor
|
||||
|
||||
You can add constraints on that package when doing `go get` (see the
|
||||
go docs linked above), but don't unless you really need to.
|
||||
|
||||
Please check in the changes generated by `go mod` including the
|
||||
`vendor` directory and `go.mod` and `go.sum` in a single commit
|
||||
separate from any other code changes with the title "vendor: add
|
||||
github.com/ncw/new_dependency". Remember to `git add` any new files
|
||||
in `vendor`.
|
||||
|
||||
## Updating a dependency ##
|
||||
|
||||
If you need to update a dependency then run
|
||||
|
||||
dep ensure -update github.com/pkg/errors
|
||||
export GO111MODULE=on
|
||||
go get -u github.com/pkg/errors
|
||||
go mod vendor
|
||||
|
||||
Check in in a single commit as above.
|
||||
|
||||
## Updating all the dependencies ##
|
||||
|
||||
In order to update all the dependencies then run `make update`. This
|
||||
just runs `dep ensure -update`. Check in the changes in a single
|
||||
commit as above.
|
||||
just uses the go modules to update all the modules to their latest
|
||||
stable release. Check in the changes in a single commit as above.
|
||||
|
||||
This should be done early in the release cycle to pick up new versions
|
||||
of packages in time for them to get some testing.
|
||||
@@ -303,8 +329,7 @@ Getting going
|
||||
Unit tests
|
||||
|
||||
* Create a config entry called `TestRemote` for the unit tests to use
|
||||
* Add your fs to the end of `fstest/fstests/gen_tests.go`
|
||||
* generate `backend/remote/remote_test.go` unit tests `cd fstest/fstests; go generate`
|
||||
* Create a `backend/remote/remote_test.go` - copy and adjust your example remote
|
||||
* Make sure all tests pass with `go test -v`
|
||||
|
||||
Integration tests
|
||||
|
||||
452
Gopkg.lock
generated
452
Gopkg.lock
generated
@@ -1,452 +0,0 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "bazil.org/fuse"
|
||||
packages = [
|
||||
".",
|
||||
"fs",
|
||||
"fuseutil"
|
||||
]
|
||||
revision = "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
|
||||
|
||||
[[projects]]
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata"]
|
||||
revision = "050b16d2314d5fc3d4c9a51e4cd5c7468e77f162"
|
||||
version = "v0.17.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
packages = ["storage"]
|
||||
revision = "eae258195456be76b2ec9ad2ee2ab63cdda365d9"
|
||||
version = "v12.2.0-beta"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/go-autorest"
|
||||
packages = [
|
||||
"autorest",
|
||||
"autorest/adal",
|
||||
"autorest/azure",
|
||||
"autorest/date"
|
||||
]
|
||||
revision = "6311d7a76f54cf2b6dea03d737d9bd9a6022ac5f"
|
||||
version = "v9.7.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/Unknwon/goconfig"
|
||||
packages = ["."]
|
||||
revision = "ef1e4c783f8f0478bd8bff0edb3dd0bade552599"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/VividCortex/ewma"
|
||||
packages = ["."]
|
||||
revision = "43880d236f695d39c62cf7aa4ebd4508c258e6c0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/a8m/tree"
|
||||
packages = ["."]
|
||||
revision = "cf42b1e486f0b025942a768a9ad59c9939d6ca40"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/abbot/go-http-auth"
|
||||
packages = ["."]
|
||||
revision = "0ddd408d5d60ea76e320503cc7dd091992dee608"
|
||||
version = "v0.4.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
packages = [
|
||||
"aws",
|
||||
"aws/awserr",
|
||||
"aws/awsutil",
|
||||
"aws/client",
|
||||
"aws/client/metadata",
|
||||
"aws/corehandlers",
|
||||
"aws/credentials",
|
||||
"aws/credentials/ec2rolecreds",
|
||||
"aws/credentials/endpointcreds",
|
||||
"aws/credentials/stscreds",
|
||||
"aws/defaults",
|
||||
"aws/ec2metadata",
|
||||
"aws/endpoints",
|
||||
"aws/request",
|
||||
"aws/session",
|
||||
"aws/signer/v4",
|
||||
"internal/shareddefaults",
|
||||
"private/protocol",
|
||||
"private/protocol/query",
|
||||
"private/protocol/query/queryutil",
|
||||
"private/protocol/rest",
|
||||
"private/protocol/restxml",
|
||||
"private/protocol/xml/xmlutil",
|
||||
"service/s3",
|
||||
"service/s3/s3iface",
|
||||
"service/s3/s3manager",
|
||||
"service/sts"
|
||||
]
|
||||
revision = "2fe57096de348e6cff4031af99254613f8ef73ea"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/billziss-gh/cgofuse"
|
||||
packages = ["fuse"]
|
||||
revision = "487e2baa5611bab252a906d7f9b869f944607305"
|
||||
version = "v1.0.4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/coreos/bbolt"
|
||||
packages = ["."]
|
||||
revision = "ee30b748bcfbd74ec1d8439ae8fd4f9123a5c94e"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/cpuguy83/go-md2man"
|
||||
packages = ["md2man"]
|
||||
revision = "1d903dcb749992f3741d744c0f8376b4bd7eb3e1"
|
||||
version = "v1.0.7"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
packages = ["."]
|
||||
revision = "dbeaa9332f19a944acb5736b4456cfcc02140e29"
|
||||
version = "v3.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/djherbis/times"
|
||||
packages = ["."]
|
||||
revision = "95292e44976d1217cf3611dc7c8d9466877d3ed5"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/dropbox/dropbox-sdk-go-unofficial"
|
||||
packages = [
|
||||
"dropbox",
|
||||
"dropbox/async",
|
||||
"dropbox/file_properties",
|
||||
"dropbox/files"
|
||||
]
|
||||
revision = "9c27e83ceccc8f8bbc9afdc17c50798529d608b1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-ini/ini"
|
||||
packages = ["."]
|
||||
revision = "32e4c1e6bc4e7d0d8451aa6b75200d19e37a536a"
|
||||
version = "v1.32.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/google/go-querystring"
|
||||
packages = ["query"]
|
||||
revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/inconshreveable/mousetrap"
|
||||
packages = ["."]
|
||||
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
version = "v1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/jlaffaye/ftp"
|
||||
packages = ["."]
|
||||
revision = "427467931c6fbc25acba4537c9d3cbc40cfa569b"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/jmespath/go-jmespath"
|
||||
packages = ["."]
|
||||
revision = "0b12d6b5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kardianos/osext"
|
||||
packages = ["."]
|
||||
revision = "ae77be60afb1dcacde03767a8c37337fad28ac14"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kr/fs"
|
||||
packages = ["."]
|
||||
revision = "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/marstr/guid"
|
||||
packages = ["."]
|
||||
revision = "8bdf7d1a087ccc975cf37dd6507da50698fd19ca"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/mattn/go-runewidth"
|
||||
packages = ["."]
|
||||
revision = "9e777a8366cce605130a531d2cd6363d07ad7317"
|
||||
version = "v0.0.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/go-acd"
|
||||
packages = ["."]
|
||||
revision = "887eb06ab6a255fbf5744b5812788e884078620a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/swift"
|
||||
packages = ["."]
|
||||
revision = "ae9f0ea1605b9aa6434ed5c731ca35d83ba67c55"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/nsf/termbox-go"
|
||||
packages = ["."]
|
||||
revision = "8c5e0793e04afcda7fe23d0751791e7321df4265"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/okzk/sdnotify"
|
||||
packages = ["."]
|
||||
revision = "ed8ca104421a21947710335006107540e3ecb335"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/patrickmn/go-cache"
|
||||
packages = ["."]
|
||||
revision = "a3647f8e31d79543b2d0f0ae2fe5c379d72cedc0"
|
||||
version = "v2.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pengsrc/go-shared"
|
||||
packages = [
|
||||
"buffer",
|
||||
"check",
|
||||
"convert",
|
||||
"log",
|
||||
"reopen"
|
||||
]
|
||||
revision = "b98065a377794d577e2a0e32869378b9ce4b8952"
|
||||
version = "v0.1.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
revision = "e881fd58d78e04cf6d0de1217f8707c8cc2249bc"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/sftp"
|
||||
packages = ["."]
|
||||
revision = "72ec6e85598d2480c30f633c154b07b6c112eade"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
packages = ["difflib"]
|
||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/rfjakob/eme"
|
||||
packages = ["."]
|
||||
revision = "2222dbd4ba467ab3fc7e8af41562fcfe69c0d770"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/russross/blackfriday"
|
||||
packages = ["."]
|
||||
revision = "4048872b16cc0fc2c5fd9eacf0ed2c2fedaa0c8c"
|
||||
version = "v1.5"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/satori/go.uuid"
|
||||
packages = ["."]
|
||||
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/sevlyar/go-daemon"
|
||||
packages = ["."]
|
||||
revision = "e49ef56654f54139c4dc0285f973f74e9649e729"
|
||||
version = "v0.1.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/skratchdot/open-golang"
|
||||
packages = ["open"]
|
||||
revision = "75fb7ed4208cf72d323d7d02fd1a5964a7a9073c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = [
|
||||
".",
|
||||
"doc"
|
||||
]
|
||||
revision = "0c34d16c3123764e413b9ed982ada58b1c3d53ea"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
revision = "4c012f6dcd9546820e378d0bdda4d8fc772cdfea"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = [
|
||||
"assert",
|
||||
"require"
|
||||
]
|
||||
revision = "87b1dfb5b2fa649f52695dd9eae19abe404a4308"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/xanzy/ssh-agent"
|
||||
packages = ["."]
|
||||
revision = "ba9c9e33906f58169366275e3450db66139a31a9"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/yunify/qingstor-sdk-go"
|
||||
packages = [
|
||||
".",
|
||||
"config",
|
||||
"logger",
|
||||
"request",
|
||||
"request/builder",
|
||||
"request/data",
|
||||
"request/errors",
|
||||
"request/signer",
|
||||
"request/unpacker",
|
||||
"service",
|
||||
"utils"
|
||||
]
|
||||
revision = "51fa3b6bb3c24f4d646eefff251cd2e6ba716600"
|
||||
version = "v2.2.9"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = [
|
||||
"bcrypt",
|
||||
"blowfish",
|
||||
"curve25519",
|
||||
"ed25519",
|
||||
"ed25519/internal/edwards25519",
|
||||
"nacl/secretbox",
|
||||
"pbkdf2",
|
||||
"poly1305",
|
||||
"salsa20/salsa",
|
||||
"scrypt",
|
||||
"ssh",
|
||||
"ssh/agent",
|
||||
"ssh/terminal"
|
||||
]
|
||||
revision = "13931e22f9e72ea58bb73048bc752b48c6d4d4ac"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
"context/ctxhttp",
|
||||
"html",
|
||||
"html/atom",
|
||||
"webdav",
|
||||
"webdav/internal/xml"
|
||||
]
|
||||
revision = "5ccada7d0a7ba9aeb5d3aca8d3501b4c2a509fec"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [
|
||||
".",
|
||||
"google",
|
||||
"internal",
|
||||
"jws",
|
||||
"jwt"
|
||||
]
|
||||
revision = "30785a2c434e431ef7c507b54617d6a951d5f2b4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows"
|
||||
]
|
||||
revision = "fff93fa7cd278d84afc205751523809c464168ab"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"internal/gen",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"transform",
|
||||
"unicode/cldr",
|
||||
"unicode/norm"
|
||||
]
|
||||
revision = "e19ae1496984b1c655b8044a65c0300a3c878dd3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/time"
|
||||
packages = ["rate"]
|
||||
revision = "6dc17368e09b0e8634d71cac8168d853e869a0c7"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/api"
|
||||
packages = [
|
||||
"drive/v3",
|
||||
"gensupport",
|
||||
"googleapi",
|
||||
"googleapi/internal/uritemplates",
|
||||
"storage/v1"
|
||||
]
|
||||
revision = "de3aa2cfa7f1c18dcb7f91738099bad280117b8e"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
packages = [
|
||||
".",
|
||||
"internal",
|
||||
"internal/app_identity",
|
||||
"internal/base",
|
||||
"internal/datastore",
|
||||
"internal/log",
|
||||
"internal/modules",
|
||||
"internal/remote_api",
|
||||
"internal/urlfetch",
|
||||
"log",
|
||||
"urlfetch"
|
||||
]
|
||||
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
revision = "d670f9405373e636a5a2765eea47fac0c9bc91a4"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "f9e9adb0675a970e6b6a9f28fa75e5bbee74d001359c688bd37c78f035be565a"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
154
Gopkg.toml
154
Gopkg.toml
@@ -1,154 +0,0 @@
|
||||
|
||||
## Gopkg.toml example (these lines may be deleted)
|
||||
|
||||
## "required" lists a set of packages (not projects) that must be included in
|
||||
## Gopkg.lock. This list is merged with the set of packages imported by the current
|
||||
## project. Use it when your project needs a package it doesn't explicitly import -
|
||||
## including "main" packages.
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
|
||||
## "ignored" lists a set of packages (not projects) that are ignored when
|
||||
## dep statically analyzes source code. Ignored packages can be in this project,
|
||||
## or in a dependency.
|
||||
# ignored = ["github.com/user/project/badpkg"]
|
||||
|
||||
## Dependencies define constraints on dependent projects. They are respected by
|
||||
## dep whether coming from the Gopkg.toml of the current project or a dependency.
|
||||
# [[constraint]]
|
||||
## Required: the root import path of the project being constrained.
|
||||
# name = "github.com/user/project"
|
||||
#
|
||||
## Recommended: the version constraint to enforce for the project.
|
||||
## Only one of "branch", "version" or "revision" can be specified.
|
||||
# version = "1.0.0"
|
||||
# branch = "master"
|
||||
# revision = "abc123"
|
||||
#
|
||||
## Optional: an alternate location (URL or import path) for the project's source.
|
||||
# source = "https://github.com/myfork/package.git"
|
||||
|
||||
## Overrides have the same structure as [[constraint]], but supercede all
|
||||
## [[constraint]] declarations from all projects. Only the current project's
|
||||
## [[override]] are applied.
|
||||
##
|
||||
## Overrides are a sledgehammer. Use them only as a last resort.
|
||||
# [[override]]
|
||||
## Required: the root import path of the project being constrained.
|
||||
# name = "github.com/user/project"
|
||||
#
|
||||
## Optional: specifying a version constraint override will cause all other
|
||||
## constraints on this project to be ignored; only the overriden constraint
|
||||
## need be satisfied.
|
||||
## Again, only one of "branch", "version" or "revision" can be specified.
|
||||
# version = "1.0.0"
|
||||
# branch = "master"
|
||||
# revision = "abc123"
|
||||
#
|
||||
## Optional: specifying an alternate source location as an override will
|
||||
## enforce that the alternate location is used for that project, regardless of
|
||||
## what source location any dependent projects specify.
|
||||
# source = "https://github.com/myfork/package.git"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "bazil.org/fuse"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/Unknwon/goconfig"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/VividCortex/ewma"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/go-acd"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/swift"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/errors"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/sftp"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/rfjakob/eme"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/skratchdot/open-golang"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/cobra"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/pflag"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/stacktic/dropbox"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/stretchr/testify"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/text"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/api"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/dropbox/dropbox-sdk-go-unofficial"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/yunify/qingstor-sdk-go"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/coreos/bbolt"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/patrickmn/go-cache"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/okzk/sdnotify"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/sevlyar/go-daemon"
|
||||
version = "0.1.2"
|
||||
@@ -7,6 +7,8 @@ Current active maintainers of rclone are
|
||||
* Ishuah Kariuki @ishuah
|
||||
* Remus Bunduc @remusb - cache subsystem maintainer
|
||||
* Fabian Möller @B4dM4n
|
||||
* Alex Chen @Cnly
|
||||
* Sandeep Ummadi @sandeepkru
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
3171
MANUAL.html
3171
MANUAL.html
File diff suppressed because it is too large
Load Diff
3558
MANUAL.txt
3558
MANUAL.txt
File diff suppressed because it is too large
Load Diff
110
Makefile
110
Makefile
@@ -1,12 +1,28 @@
|
||||
SHELL = bash
|
||||
TAG := $(shell echo $$(git describe --abbrev=8 --tags)-$${APPVEYOR_REPO_BRANCH:-$${TRAVIS_BRANCH:-$$(git rev-parse --abbrev-ref HEAD)}} | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/; s/-\(HEAD\|master\)$$//')
|
||||
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(shell git rev-parse --abbrev-ref HEAD))
|
||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||
ifeq ($(BRANCH),$(LAST_TAG))
|
||||
BRANCH := master
|
||||
endif
|
||||
TAG_BRANCH := -$(BRANCH)
|
||||
BRANCH_PATH := branch/
|
||||
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
||||
TAG_BRANCH :=
|
||||
BRANCH_PATH :=
|
||||
endif
|
||||
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
|
||||
ifneq ($(TAG),$(LAST_TAG))
|
||||
TAG := $(TAG)-beta
|
||||
endif
|
||||
GO_VERSION := $(shell go version)
|
||||
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
|
||||
# Run full tests if go >= go1.9
|
||||
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 9)')
|
||||
BETA_URL := https://beta.rclone.org/$(TAG)/
|
||||
# Run full tests if go >= go1.11
|
||||
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 11)')
|
||||
BETA_PATH := $(BRANCH_PATH)$(TAG)
|
||||
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
||||
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
|
||||
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
||||
# Pass in GOTAGS=xyz on the make command line to set build tags
|
||||
ifdef GOTAGS
|
||||
BUILDTAGS=-tags "$(GOTAGS)"
|
||||
@@ -21,6 +37,7 @@ rclone:
|
||||
|
||||
vars:
|
||||
@echo SHELL="'$(SHELL)'"
|
||||
@echo BRANCH="'$(BRANCH)'"
|
||||
@echo TAG="'$(TAG)'"
|
||||
@echo LAST_TAG="'$(LAST_TAG)'"
|
||||
@echo NEW_TAG="'$(NEW_TAG)'"
|
||||
@@ -56,22 +73,36 @@ else
|
||||
@echo Skipping source quality tests as version of go too old
|
||||
endif
|
||||
|
||||
gometalinter_install:
|
||||
go get -u github.com/alecthomas/gometalinter
|
||||
gometalinter --install --update
|
||||
|
||||
# We aren't using gometalinter as the default linter yet because
|
||||
# 1. it doesn't support build tags: https://github.com/alecthomas/gometalinter/issues/275
|
||||
# 2. can't get -printfuncs working with the vet linter
|
||||
gometalinter:
|
||||
gometalinter ./...
|
||||
|
||||
# Get the build dependencies
|
||||
build_dep:
|
||||
ifdef FULL_TESTS
|
||||
go get -u github.com/kisielk/errcheck
|
||||
go get -u golang.org/x/tools/cmd/goimports
|
||||
go get -u github.com/golang/lint/golint
|
||||
go get -u github.com/inconshreveable/mousetrap
|
||||
go get -u github.com/tools/godep
|
||||
endif
|
||||
|
||||
# Get the release dependencies
|
||||
release_dep:
|
||||
go get -u github.com/goreleaser/nfpm/...
|
||||
go get -u github.com/aktau/github-release
|
||||
|
||||
# Update dependencies
|
||||
update:
|
||||
go get -u github.com/golang/dep/cmd/dep
|
||||
dep ensure -update -v
|
||||
GO111MODULE=on go get -u ./...
|
||||
GO111MODULE=on go tidy
|
||||
GO111MODULE=on go vendor
|
||||
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
||||
|
||||
rclone.1: MANUAL.md
|
||||
pandoc -s --from markdown --to man MANUAL.md -o rclone.1
|
||||
@@ -88,6 +119,9 @@ MANUAL.txt: MANUAL.md
|
||||
commanddocs: rclone
|
||||
rclone gendocs docs/content/commands/
|
||||
|
||||
rcdocs: rclone
|
||||
bin/make_rc_docs.sh
|
||||
|
||||
install: rclone
|
||||
install -d ${DESTDIR}/usr/bin
|
||||
install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
|
||||
@@ -105,12 +139,12 @@ upload_website: website
|
||||
rclone -v sync docs/public memstore:www-rclone-org
|
||||
|
||||
tarball:
|
||||
git archive -9 --format=tar.gz --prefix=rclone-$(TAG) -o build/rclone-$(TAG).tar.gz $(TAG)
|
||||
git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG)
|
||||
|
||||
sign_upload:
|
||||
cd build && md5sum rclone-* | gpg --clearsign > MD5SUMS
|
||||
cd build && sha1sum rclone-* | gpg --clearsign > SHA1SUMS
|
||||
cd build && sha256sum rclone-* | gpg --clearsign > SHA256SUMS
|
||||
cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS
|
||||
cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS
|
||||
cd build && sha256sum rclone-v* | gpg --clearsign > SHA256SUMS
|
||||
|
||||
check_sign:
|
||||
cd build && gpg --verify MD5SUMS && gpg --decrypt MD5SUMS | md5sum -c
|
||||
@@ -118,7 +152,8 @@ check_sign:
|
||||
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
|
||||
|
||||
upload:
|
||||
rclone -v copy build/ memstore:downloads-rclone-org
|
||||
rclone -P copy build/ memstore:downloads-rclone-org/$(TAG)
|
||||
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "memstore:downloads-rclone-org/$(TAG)/$$i" "memstore:downloads-rclone-org/$$j"'
|
||||
|
||||
upload_github:
|
||||
./bin/upload-github $(TAG)
|
||||
@@ -127,43 +162,47 @@ cross: doc
|
||||
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
|
||||
|
||||
beta:
|
||||
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)β
|
||||
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)β
|
||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)%CE%B2/
|
||||
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)
|
||||
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
|
||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
||||
|
||||
log_since_last_release:
|
||||
git log $(LAST_TAG)..
|
||||
|
||||
compile_all:
|
||||
ifdef FULL_TESTS
|
||||
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)β
|
||||
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)
|
||||
else
|
||||
@echo Skipping compile all as version of go too old
|
||||
endif
|
||||
|
||||
appveyor_upload:
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ memstore:beta-rclone-org/$(TAG)
|
||||
ifeq ($(APPVEYOR_REPO_BRANCH),master)
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ memstore:beta-rclone-org
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifndef BRANCH_PATH
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
BUILD_FLAGS := -exclude "^(windows|darwin)/"
|
||||
ifeq ($(TRAVIS_OS_NAME),osx)
|
||||
BUILD_FLAGS := -include "^darwin/" -cgo
|
||||
endif
|
||||
|
||||
travis_beta:
|
||||
ifeq ($(TRAVIS_OS_NAME),linux)
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
|
||||
endif
|
||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt -exclude "^windows/" -parallel 8 $(BUILDTAGS) $(TAG)β
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ memstore:beta-rclone-org/$(TAG)
|
||||
ifeq ($(TRAVIS_BRANCH),master)
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ memstore:beta-rclone-org
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifndef BRANCH_PATH
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
# Fetch the windows builds from appveyor
|
||||
fetch_windows:
|
||||
rclone -v copy --include 'rclone-v*-windows-*.zip' memstore:beta-rclone-org/$(TAG) build/
|
||||
-#cp -av build/rclone-v*-windows-386.zip build/rclone-current-windows-386.zip
|
||||
-#cp -av build/rclone-v*-windows-amd64.zip build/rclone-current-windows-amd64.zip
|
||||
md5sum build/rclone-*-windows-*.zip | sort
|
||||
# Fetch the binary builds from travis and appveyor
|
||||
fetch_binaries:
|
||||
rclone -P sync $(BETA_UPLOAD) build/
|
||||
|
||||
serve: website
|
||||
cd docs && hugo server -v -w
|
||||
@@ -174,10 +213,10 @@ tag: doc
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
|
||||
echo -n "$(NEW_TAG)" > docs/layouts/partials/version.html
|
||||
git tag -s -m "Version $(NEW_TAG)" $(NEW_TAG)
|
||||
bin/make_changelog.py $(LAST_TAG) $(NEW_TAG) > docs/content/changelog.md.new
|
||||
mv docs/content/changelog.md.new docs/content/changelog.md
|
||||
@echo "Edit the new changelog in docs/content/changelog.md"
|
||||
@echo " * $(NEW_TAG) -" `date -I` >> docs/content/changelog.md
|
||||
@git log $(LAST_TAG)..$(NEW_TAG) --oneline >> docs/content/changelog.md
|
||||
@echo "Then commit the changes"
|
||||
@echo "Then commit all the changes"
|
||||
@echo git commit -m \"Version $(NEW_TAG)\" -a -v
|
||||
@echo "And finally run make retag before make cross etc"
|
||||
|
||||
@@ -188,9 +227,6 @@ startdev:
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(LAST_TAG)-DEV\"\n" | gofmt > fs/version.go
|
||||
git commit -m "Start $(LAST_TAG)-DEV development" fs/version.go
|
||||
|
||||
gen_tests:
|
||||
cd fstest/fstests && go generate
|
||||
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
Rclone is a command line program to sync files and directories to and from
|
||||
|
||||
* Amazon Drive
|
||||
* Amazon Drive ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 / Dreamhost / Ceph / Minio / Wasabi
|
||||
* Backblaze B2
|
||||
* Box
|
||||
@@ -25,8 +25,11 @@ Rclone is a command line program to sync files and directories to and from
|
||||
* Google Drive
|
||||
* HTTP
|
||||
* Hubic
|
||||
* Jottacloud
|
||||
* Mega
|
||||
* Microsoft Azure Blob Storage
|
||||
* Microsoft OneDrive
|
||||
* OpenDrive
|
||||
* Openstack Swift / Rackspace cloud files / Memset Memstore / OVH / Oracle Cloud Storage
|
||||
* pCloud
|
||||
* QingStor
|
||||
|
||||
32
RELEASE.md
32
RELEASE.md
@@ -13,13 +13,9 @@ Making a release
|
||||
* git status - to check for new man pages - git add them
|
||||
* git commit -a -v -m "Version v1.XX"
|
||||
* make retag
|
||||
* # Set the GOPATH for a current stable go compiler
|
||||
* make cross
|
||||
* git checkout docs/content/commands # to undo date changes in commands
|
||||
* git push --tags origin master
|
||||
* git push --tags origin master:stable # update the stable branch for packager.io
|
||||
* # Wait for the appveyor and travis builds to complete then fetch the windows binaries from appveyor
|
||||
* make fetch_windows
|
||||
* # Wait for the appveyor and travis builds to complete then...
|
||||
* make fetch_binaries
|
||||
* make tarball
|
||||
* make sign_upload
|
||||
* make check_sign
|
||||
@@ -30,10 +26,30 @@ Making a release
|
||||
* # announce with forum post, twitter post, G+ post
|
||||
|
||||
Early in the next release cycle update the vendored dependencies
|
||||
* Review any pinned packages in go.mod and remove if possible
|
||||
* make update
|
||||
* git status
|
||||
* git add new files
|
||||
* carry forward any patches to vendor stuff
|
||||
* git commit -a -v
|
||||
|
||||
Make the version number be just in a file?
|
||||
Making a point release. If rclone needs a point release due to some
|
||||
horrendous bug, then
|
||||
* git branch v1.XX v1.XX-fixes
|
||||
* git cherry-pick any fixes
|
||||
* Test (see above)
|
||||
* make NEW_TAG=v1.XX.1 tag
|
||||
* edit docs/content/changelog.md
|
||||
* make TAG=v1.43.1 doc
|
||||
* git commit -a -v -m "Version v1.XX.1"
|
||||
* git tag -d -v1.XX.1
|
||||
* git tag -s -m "Version v1.XX.1" v1.XX.1
|
||||
* git push --tags -u origin v1.XX-fixes
|
||||
* make BRANCH_PATH= TAG=v1.43.1 fetch_binaries
|
||||
* make TAG=v1.43.1 tarball
|
||||
* make TAG=v1.43.1 sign_upload
|
||||
* make TAG=v1.43.1 check_sign
|
||||
* make TAG=v1.43.1 upload
|
||||
* make TAG=v1.43.1 upload_website
|
||||
* make TAG=v1.43.1 upload_github
|
||||
* NB this overwrites the current beta so after the release, rebuild the last travis build
|
||||
* Announce!
|
||||
|
||||
@@ -7,7 +7,8 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -17,29 +18,42 @@ func init() {
|
||||
Description: "Alias for a existing remote",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
|
||||
Name: "remote",
|
||||
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
|
||||
Required: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path.
|
||||
//
|
||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
remote := config.FileGet(name, "remote")
|
||||
if remote == "" {
|
||||
return nil, errors.New("alias can't point to an empty remote - check the value of the remote setting")
|
||||
}
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
||||
}
|
||||
fsInfo, configName, fsPath, err := fs.ParseRemote(remote)
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
root = filepath.ToSlash(root)
|
||||
return fsInfo.NewFs(configName, path.Join(fsPath, root))
|
||||
if opt.Remote == "" {
|
||||
return nil, errors.New("alias can't point to an empty remote - check the value of the remote setting")
|
||||
}
|
||||
if strings.HasPrefix(opt.Remote, name+":") {
|
||||
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
||||
}
|
||||
_, configName, fsPath, err := fs.ParseRemote(opt.Remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
root = path.Join(fsPath, filepath.ToSlash(root))
|
||||
if configName == "local" {
|
||||
return fs.NewFs(root)
|
||||
}
|
||||
return fs.NewFs(configName + ":" + root)
|
||||
}
|
||||
|
||||
@@ -15,8 +15,6 @@ import (
|
||||
|
||||
var (
|
||||
remoteName = "TestAlias"
|
||||
testPath = "test"
|
||||
filesPath = filepath.Join(testPath, "files")
|
||||
)
|
||||
|
||||
func prepare(t *testing.T, root string) {
|
||||
|
||||
@@ -15,13 +15,17 @@ import (
|
||||
_ "github.com/ncw/rclone/backend/googlecloudstorage"
|
||||
_ "github.com/ncw/rclone/backend/http"
|
||||
_ "github.com/ncw/rclone/backend/hubic"
|
||||
_ "github.com/ncw/rclone/backend/jottacloud"
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
_ "github.com/ncw/rclone/backend/mega"
|
||||
_ "github.com/ncw/rclone/backend/onedrive"
|
||||
_ "github.com/ncw/rclone/backend/opendrive"
|
||||
_ "github.com/ncw/rclone/backend/pcloud"
|
||||
_ "github.com/ncw/rclone/backend/qingstor"
|
||||
_ "github.com/ncw/rclone/backend/s3"
|
||||
_ "github.com/ncw/rclone/backend/sftp"
|
||||
_ "github.com/ncw/rclone/backend/swift"
|
||||
_ "github.com/ncw/rclone/backend/union"
|
||||
_ "github.com/ncw/rclone/backend/webdav"
|
||||
_ "github.com/ncw/rclone/backend/yandex"
|
||||
)
|
||||
|
||||
@@ -18,14 +18,14 @@ import (
|
||||
"log"
|
||||
"net/http"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/go-acd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -38,20 +38,17 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
folderKind = "FOLDER"
|
||||
fileKind = "FILE"
|
||||
assetKind = "ASSET"
|
||||
statusAvailable = "AVAILABLE"
|
||||
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
|
||||
minSleep = 20 * time.Millisecond
|
||||
warnFileSize = 50000 << 20 // Display warning for files larger than this size
|
||||
folderKind = "FOLDER"
|
||||
fileKind = "FILE"
|
||||
statusAvailable = "AVAILABLE"
|
||||
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
|
||||
minSleep = 20 * time.Millisecond
|
||||
warnFileSize = 50000 << 20 // Display warning for files larger than this size
|
||||
defaultTempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Flags
|
||||
tempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
|
||||
uploadWaitPerGB = flags.DurationP("acd-upload-wait-per-gb", "", 180*time.Second, "Additional time per GB to wait after a failed complete upload to see if it appears.")
|
||||
// Description of how to auth for this app
|
||||
acdConfig = &oauth2.Config{
|
||||
Scopes: []string{"clouddrive:read_all", "clouddrive:write"},
|
||||
@@ -69,35 +66,62 @@ var (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "amazon cloud drive",
|
||||
Prefix: "acd",
|
||||
Description: "Amazon Drive",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
err := oauthutil.Config("amazon cloud drive", name, acdConfig)
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config("amazon cloud drive", name, m, acdConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Amazon Application Client Id - required.",
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Amazon Application Client ID.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Amazon Application Client Secret - required.",
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Amazon Application Client Secret.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: config.ConfigAuthURL,
|
||||
Help: "Auth server URL - leave blank to use Amazon's.",
|
||||
Name: config.ConfigAuthURL,
|
||||
Help: "Auth server URL.\nLeave blank to use Amazon's.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigTokenURL,
|
||||
Help: "Token server url - leave blank to use Amazon's.",
|
||||
Name: config.ConfigTokenURL,
|
||||
Help: "Token server url.\nleave blank to use Amazon's.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "checkpoint",
|
||||
Help: "Checkpoint for internal polling (debug).",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_wait_per_gb",
|
||||
Help: "Additional time per GB to wait after a failed complete upload to see if it appears.",
|
||||
Default: fs.Duration(180 * time.Second),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "templink_threshold",
|
||||
Help: "Files >= this size will be downloaded via their tempLink.",
|
||||
Default: defaultTempLinkThreshold,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
flags.VarP(&tempLinkThreshold, "acd-templink-threshold", "", "Files >= this size will be downloaded via their tempLink.")
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Checkpoint string `config:"checkpoint"`
|
||||
UploadWaitPerGB fs.Duration `config:"upload_wait_per_gb"`
|
||||
TempLinkThreshold fs.SizeSuffix `config:"templink_threshold"`
|
||||
}
|
||||
|
||||
// Fs represents a remote acd server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this Fs
|
||||
c *acd.Client // the connection to the acd server
|
||||
noAuthClient *http.Client // unauthenticated http client
|
||||
root string // the path we are working on
|
||||
@@ -138,9 +162,6 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Pattern to match a acd path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parsePath parses an acd 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
@@ -196,7 +217,13 @@ func filterRequest(req *http.Request) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
root = parsePath(root)
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
if do, ok := baseClient.Transport.(interface {
|
||||
@@ -206,7 +233,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
} else {
|
||||
fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail")
|
||||
}
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, acdConfig, baseClient)
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Amazon Drive: %v", err)
|
||||
}
|
||||
@@ -215,6 +242,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
c: c,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
|
||||
noAuthClient: fshttp.NewClient(fs.Config),
|
||||
@@ -532,13 +560,13 @@ func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, i
|
||||
}
|
||||
|
||||
// Don't wait for uploads - assume they will appear later
|
||||
if *uploadWaitPerGB <= 0 {
|
||||
if f.opt.UploadWaitPerGB <= 0 {
|
||||
fs.Debugf(src, "Upload error detected but waiting disabled: %v (%q)", inErr, httpStatus)
|
||||
return false, inInfo, inErr
|
||||
}
|
||||
|
||||
// Time we should wait for the upload
|
||||
uploadWaitPerByte := float64(*uploadWaitPerGB) / 1024 / 1024 / 1024
|
||||
uploadWaitPerByte := float64(f.opt.UploadWaitPerGB) / 1024 / 1024 / 1024
|
||||
timeToWait := time.Duration(uploadWaitPerByte * float64(src.Size()))
|
||||
|
||||
const sleepTime = 5 * time.Second // sleep between tries
|
||||
@@ -1020,7 +1048,7 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
bigObject := o.Size() >= int64(tempLinkThreshold)
|
||||
bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold)
|
||||
if bigObject {
|
||||
fs.Debugf(o, "Downloading large object via tempLink")
|
||||
}
|
||||
@@ -1213,7 +1241,7 @@ func (o *Object) MimeType() string {
|
||||
//
|
||||
// Close the returned channel to stop being notified.
|
||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
|
||||
checkpoint := config.FileGet(f.name, "checkpoint")
|
||||
checkpoint := f.opt.Checkpoint
|
||||
|
||||
quit := make(chan bool)
|
||||
go func() {
|
||||
@@ -1320,6 +1348,14 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoin
|
||||
return checkpoint
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
if o.info.Id == nil {
|
||||
return ""
|
||||
}
|
||||
return *o.info.Id
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -1331,4 +1367,5 @@ var (
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
)
|
||||
|
||||
@@ -1,7 +1,4 @@
|
||||
// Test AmazonCloudDrive filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
|
||||
// +build acd
|
||||
|
||||
@@ -15,64 +12,9 @@ import (
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
|
||||
fstests.RemoteName = "TestAmazonCloudDrive:"
|
||||
fstests.Run(t)
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
20
backend/azureblob/azureblob_internal_test.go
Normal file
20
backend/azureblob/azureblob_internal_test.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// Check first feature flags are set on this
|
||||
// remote
|
||||
enabled := f.Features().ListTiers
|
||||
assert.True(t, enabled)
|
||||
enabled = f.Features().SetTier
|
||||
assert.True(t, enabled)
|
||||
enabled = f.Features().GetTier
|
||||
assert.True(t, enabled)
|
||||
}
|
||||
@@ -1,9 +1,6 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
|
||||
// +build go1.7
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
|
||||
package azureblob_test
|
||||
|
||||
@@ -11,68 +8,13 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/azureblob"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*azureblob.Object)(nil))
|
||||
fstests.RemoteName = "TestAzureBlob:"
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*azureblob.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.7
|
||||
// +build freebsd netbsd openbsd plan9 solaris !go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -31,11 +31,6 @@ func (e *Error) Fatal() bool {
|
||||
|
||||
var _ fserrors.Fataler = (*Error)(nil)
|
||||
|
||||
// Account describes a B2 account
|
||||
type Account struct {
|
||||
ID string `json:"accountId"` // The identifier for the account.
|
||||
}
|
||||
|
||||
// Bucket describes a B2 bucket
|
||||
type Bucket struct {
|
||||
ID string `json:"bucketId"`
|
||||
@@ -74,7 +69,7 @@ const versionFormat = "-v2006-01-02-150405.000"
|
||||
func (t Timestamp) AddVersion(remote string) string {
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
s := (time.Time)(t).Format(versionFormat)
|
||||
s := time.Time(t).Format(versionFormat)
|
||||
// Replace the '.' with a '-'
|
||||
s = strings.Replace(s, ".", "-", -1)
|
||||
return base + s + ext
|
||||
@@ -107,20 +102,20 @@ func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
||||
|
||||
// IsZero returns true if the timestamp is unitialised
|
||||
func (t Timestamp) IsZero() bool {
|
||||
return (time.Time)(t).IsZero()
|
||||
return time.Time(t).IsZero()
|
||||
}
|
||||
|
||||
// Equal compares two timestamps
|
||||
//
|
||||
// If either are !IsZero then it returns false
|
||||
func (t Timestamp) Equal(s Timestamp) bool {
|
||||
if (time.Time)(t).IsZero() {
|
||||
if time.Time(t).IsZero() {
|
||||
return false
|
||||
}
|
||||
if (time.Time)(s).IsZero() {
|
||||
if time.Time(s).IsZero() {
|
||||
return false
|
||||
}
|
||||
return (time.Time)(t).Equal((time.Time)(s))
|
||||
return time.Time(t).Equal(time.Time(s))
|
||||
}
|
||||
|
||||
// File is info about a file
|
||||
@@ -137,10 +132,26 @@ type File struct {
|
||||
|
||||
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
|
||||
type AuthorizeAccountResponse struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
|
||||
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
||||
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||
} `json:"allowed"`
|
||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
|
||||
MinimumPartSize int `json:"minimumPartSize"` // DEPRECATED: This field will always have the same value as recommendedPartSize. Use recommendedPartSize instead.
|
||||
RecommendedPartSize int `json:"recommendedPartSize"` // The recommended size for each part of a large file. We recommend using this part size for optimal upload performance.
|
||||
}
|
||||
|
||||
// ListBucketsRequest is parameters for b2_list_buckets call
|
||||
type ListBucketsRequest struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
BucketID string `json:"bucketId,omitempty"` // When specified, the result will be a list containing just this bucket.
|
||||
BucketName string `json:"bucketName,omitempty"` // When specified, the result will be a list containing just this bucket.
|
||||
BucketTypes []string `json:"bucketTypes,omitempty"` // If present, B2 will use it as a filter for bucket types returned in the list buckets response.
|
||||
}
|
||||
|
||||
// ListBucketsResponse is as returned from the b2_list_buckets call
|
||||
|
||||
181
backend/b2/b2.go
181
backend/b2/b2.go
@@ -22,8 +22,8 @@ import (
|
||||
"github.com/ncw/rclone/backend/b2/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -34,30 +34,27 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
defaultEndpoint = "https://api.backblazeb2.com"
|
||||
headerPrefix = "x-bz-info-" // lower case as that is what the server returns
|
||||
timeKey = "src_last_modified_millis"
|
||||
timeHeader = headerPrefix + timeKey
|
||||
sha1Key = "large_file_sha1"
|
||||
sha1Header = "X-Bz-Content-Sha1"
|
||||
sha1InfoHeader = headerPrefix + sha1Key
|
||||
testModeHeader = "X-Bz-Test-Mode"
|
||||
retryAfterHeader = "Retry-After"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 5 * time.Minute
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
maxParts = 10000
|
||||
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
|
||||
defaultEndpoint = "https://api.backblazeb2.com"
|
||||
headerPrefix = "x-bz-info-" // lower case as that is what the server returns
|
||||
timeKey = "src_last_modified_millis"
|
||||
timeHeader = headerPrefix + timeKey
|
||||
sha1Key = "large_file_sha1"
|
||||
sha1Header = "X-Bz-Content-Sha1"
|
||||
sha1InfoHeader = headerPrefix + sha1Key
|
||||
testModeHeader = "X-Bz-Test-Mode"
|
||||
retryAfterHeader = "Retry-After"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 5 * time.Minute
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
maxParts = 10000
|
||||
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
|
||||
minChunkSize = 5E6
|
||||
defaultChunkSize = 96 * 1024 * 1024
|
||||
defaultUploadCutoff = 200E6
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
minChunkSize = fs.SizeSuffix(5E6)
|
||||
chunkSize = fs.SizeSuffix(96 * 1024 * 1024)
|
||||
uploadCutoff = fs.SizeSuffix(200E6)
|
||||
b2TestMode = flags.StringP("b2-test-mode", "", "", "A flag string for X-Bz-Test-Mode header.")
|
||||
b2Versions = flags.BoolP("b2-versions", "", false, "Include old versions in directory listings.")
|
||||
b2HardDelete = flags.BoolP("b2-hard-delete", "", false, "Permanently delete files on remote removal, otherwise hide files.")
|
||||
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
|
||||
)
|
||||
|
||||
@@ -68,29 +65,64 @@ func init() {
|
||||
Description: "Backblaze B2",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "account",
|
||||
Help: "Account ID",
|
||||
Name: "account",
|
||||
Help: "Account ID or Application Key ID",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "key",
|
||||
Help: "Application Key",
|
||||
Name: "key",
|
||||
Help: "Application Key",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service - leave blank normally.",
|
||||
},
|
||||
},
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\nLeave blank normally.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "test_mode",
|
||||
Help: "A flag string for X-Bz-Test-Mode header for debugging.",
|
||||
Default: "",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "versions",
|
||||
Help: "Include old versions in directory listings.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hard_delete",
|
||||
Help: "Permanently delete files on remote removal, otherwise hide files.",
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to chunked upload.",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "Upload chunk size. Must fit in memory.",
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
flags.VarP(&uploadCutoff, "b2-upload-cutoff", "", "Cutoff for switching to chunked upload")
|
||||
flags.VarP(&chunkSize, "b2-chunk-size", "", "Upload chunk size. Must fit in memory.")
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Account string `config:"account"`
|
||||
Key string `config:"key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
TestMode string `config:"test_mode"`
|
||||
Versions bool `config:"versions"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
}
|
||||
|
||||
// Fs represents a remote b2 server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
account string // account name
|
||||
key string // auth key
|
||||
endpoint string // name of the starting api endpoint
|
||||
srv *rest.Client // the connection to the b2 server
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
@@ -145,7 +177,7 @@ func (f *Fs) Features() *fs.Features {
|
||||
}
|
||||
|
||||
// Pattern to match a b2 path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
|
||||
|
||||
// parseParse parses a b2 'url'
|
||||
func parsePath(path string) (bucket, directory string, err error) {
|
||||
@@ -232,33 +264,37 @@ func errorHandler(resp *http.Response) error {
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
if uploadCutoff < chunkSize {
|
||||
return nil, errors.Errorf("b2: upload cutoff (%v) must be greater than or equal to chunk size (%v)", uploadCutoff, chunkSize)
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if chunkSize < minChunkSize {
|
||||
return nil, errors.Errorf("b2: chunk size can't be less than %v - was %v", minChunkSize, chunkSize)
|
||||
if opt.UploadCutoff < opt.ChunkSize {
|
||||
return nil, errors.Errorf("b2: upload cutoff (%v) must be greater than or equal to chunk size (%v)", opt.UploadCutoff, opt.ChunkSize)
|
||||
}
|
||||
if opt.ChunkSize < minChunkSize {
|
||||
return nil, errors.Errorf("b2: chunk size can't be less than %v - was %v", minChunkSize, opt.ChunkSize)
|
||||
}
|
||||
bucket, directory, err := parsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
account := config.FileGet(name, "account")
|
||||
if account == "" {
|
||||
if opt.Account == "" {
|
||||
return nil, errors.New("account not found")
|
||||
}
|
||||
key := config.FileGet(name, "key")
|
||||
if key == "" {
|
||||
if opt.Key == "" {
|
||||
return nil, errors.New("key not found")
|
||||
}
|
||||
endpoint := config.FileGet(name, "endpoint", defaultEndpoint)
|
||||
if opt.Endpoint == "" {
|
||||
opt.Endpoint = defaultEndpoint
|
||||
}
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
account: account,
|
||||
key: key,
|
||||
endpoint: endpoint,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
bufferTokens: make(chan []byte, fs.Config.Transfers),
|
||||
@@ -269,8 +305,8 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
BucketBased: true,
|
||||
}).Fill(f)
|
||||
// Set the test flag if required
|
||||
if *b2TestMode != "" {
|
||||
testMode := strings.TrimSpace(*b2TestMode)
|
||||
if opt.TestMode != "" {
|
||||
testMode := strings.TrimSpace(opt.TestMode)
|
||||
f.srv.SetHeader(testModeHeader, testMode)
|
||||
fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode)
|
||||
}
|
||||
@@ -282,6 +318,11 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to authorize account")
|
||||
}
|
||||
// If this is a key limited to a single bucket, it must exist already
|
||||
if f.bucket != "" && f.info.Allowed.BucketID != "" {
|
||||
f.markBucketOK()
|
||||
f.setBucketID(f.info.Allowed.BucketID)
|
||||
}
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the (bucket,directory) is actually an existing file
|
||||
@@ -316,9 +357,9 @@ func (f *Fs) authorizeAccount() error {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/b2api/v1/b2_authorize_account",
|
||||
RootURL: f.endpoint,
|
||||
UserName: f.account,
|
||||
Password: f.key,
|
||||
RootURL: f.opt.Endpoint,
|
||||
UserName: f.opt.Account,
|
||||
Password: f.opt.Key,
|
||||
ExtraHeaders: map[string]string{"Authorization": ""}, // unset the Authorization for this request
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
@@ -384,7 +425,7 @@ func (f *Fs) clearUploadURL() {
|
||||
func (f *Fs) getUploadBlock() []byte {
|
||||
buf := <-f.bufferTokens
|
||||
if buf == nil {
|
||||
buf = make([]byte, chunkSize)
|
||||
buf = make([]byte, f.opt.ChunkSize)
|
||||
}
|
||||
// fs.Debugf(f, "Getting upload block %p", buf)
|
||||
return buf
|
||||
@@ -393,7 +434,7 @@ func (f *Fs) getUploadBlock() []byte {
|
||||
// putUploadBlock returns a block to the pool of size chunkSize
|
||||
func (f *Fs) putUploadBlock(buf []byte) {
|
||||
buf = buf[:cap(buf)]
|
||||
if len(buf) != int(chunkSize) {
|
||||
if len(buf) != int(f.opt.ChunkSize) {
|
||||
panic("bad blocksize returned to pool")
|
||||
}
|
||||
// fs.Debugf(f, "Returning upload block %p", buf)
|
||||
@@ -563,7 +604,7 @@ func (f *Fs) markBucketOK() {
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
||||
last := ""
|
||||
err = f.list(dir, false, "", 0, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
err = f.list(dir, false, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -635,7 +676,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
last := ""
|
||||
err = f.list(dir, true, "", 0, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
err = f.list(dir, true, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -655,7 +696,11 @@ type listBucketFn func(*api.Bucket) error
|
||||
|
||||
// listBucketsToFn lists the buckets to the function supplied
|
||||
func (f *Fs) listBucketsToFn(fn listBucketFn) error {
|
||||
var account = api.Account{ID: f.info.AccountID}
|
||||
var account = api.ListBucketsRequest{
|
||||
AccountID: f.info.AccountID,
|
||||
BucketID: f.info.Allowed.BucketID,
|
||||
}
|
||||
|
||||
var response api.ListBucketsResponse
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -1035,12 +1080,12 @@ func (o *Object) readMetaData() (err error) {
|
||||
maxSearched := 1
|
||||
var timestamp api.Timestamp
|
||||
baseRemote := o.remote
|
||||
if *b2Versions {
|
||||
if o.fs.opt.Versions {
|
||||
timestamp, baseRemote = api.RemoveVersion(baseRemote)
|
||||
maxSearched = maxVersions
|
||||
}
|
||||
var info *api.File
|
||||
err = o.fs.list("", true, baseRemote, maxSearched, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
err = o.fs.list("", true, baseRemote, maxSearched, o.fs.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
if isDirectory {
|
||||
return nil
|
||||
}
|
||||
@@ -1254,7 +1299,7 @@ func urlEncode(in string) string {
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
if *b2Versions {
|
||||
if o.fs.opt.Versions {
|
||||
return errNotWithVersions
|
||||
}
|
||||
err = o.fs.Mkdir("")
|
||||
@@ -1289,7 +1334,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
} else if size > int64(uploadCutoff) {
|
||||
} else if size > int64(o.fs.opt.UploadCutoff) {
|
||||
up, err := o.fs.newLargeUpload(o, in, src)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1408,10 +1453,10 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
if *b2Versions {
|
||||
if o.fs.opt.Versions {
|
||||
return errNotWithVersions
|
||||
}
|
||||
if *b2HardDelete {
|
||||
if o.fs.opt.HardDelete {
|
||||
return o.fs.deleteByID(o.id, o.fs.root+o.remote)
|
||||
}
|
||||
return o.fs.hide(o.fs.root + o.remote)
|
||||
@@ -1422,6 +1467,11 @@ func (o *Object) MimeType() string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
@@ -1431,4 +1481,5 @@ var (
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
)
|
||||
|
||||
@@ -1,75 +1,17 @@
|
||||
// Test B2 filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package b2_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/b2"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*b2.Object)(nil))
|
||||
fstests.RemoteName = "TestB2:"
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestB2:",
|
||||
NilObject: (*b2.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
@@ -86,10 +86,10 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar
|
||||
parts := int64(0)
|
||||
sha1SliceSize := int64(maxParts)
|
||||
if size == -1 {
|
||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", fs.SizeSuffix(chunkSize), fs.SizeSuffix(maxParts*chunkSize))
|
||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
||||
} else {
|
||||
parts = size / int64(chunkSize)
|
||||
if size%int64(chunkSize) != 0 {
|
||||
parts = size / int64(o.fs.opt.ChunkSize)
|
||||
if size%int64(o.fs.opt.ChunkSize) != 0 {
|
||||
parts++
|
||||
}
|
||||
if parts > maxParts {
|
||||
@@ -357,7 +357,8 @@ outer:
|
||||
buf := up.f.getUploadBlock()
|
||||
|
||||
// Read the chunk
|
||||
n, err := io.ReadFull(up.in, buf)
|
||||
var n int
|
||||
n, err = io.ReadFull(up.in, buf)
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
|
||||
buf = buf[:n]
|
||||
@@ -366,7 +367,6 @@ outer:
|
||||
} else if err == io.EOF {
|
||||
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
|
||||
up.f.putUploadBlock(buf)
|
||||
hasMoreParts = false
|
||||
err = nil
|
||||
break outer
|
||||
} else if err != nil {
|
||||
@@ -409,8 +409,8 @@ outer:
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= int64(chunkSize) {
|
||||
reqSize = int64(chunkSize)
|
||||
if reqSize >= int64(up.f.opt.ChunkSize) {
|
||||
reqSize = int64(up.f.opt.ChunkSize)
|
||||
}
|
||||
|
||||
// Get a block of memory
|
||||
|
||||
@@ -61,7 +61,7 @@ func (e *Error) Error() string {
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// ItemFields are the fields needed for FileInfo
|
||||
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status"
|
||||
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link"
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
@@ -74,18 +74,22 @@ const (
|
||||
|
||||
// Item describes a folder or a file as returned by Get Folder Items and others
|
||||
type Item struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
SequenceID string `json:"sequence_id"`
|
||||
Etag string `json:"etag"`
|
||||
SHA1 string `json:"sha1"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
CreatedAt Time `json:"created_at"`
|
||||
ModifiedAt Time `json:"modified_at"`
|
||||
ContentCreatedAt Time `json:"content_created_at"`
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
SequenceID string `json:"sequence_id"`
|
||||
Etag string `json:"etag"`
|
||||
SHA1 string `json:"sha1"`
|
||||
Name string `json:"name"`
|
||||
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
|
||||
CreatedAt Time `json:"created_at"`
|
||||
ModifiedAt Time `json:"modified_at"`
|
||||
ContentCreatedAt Time `json:"content_created_at"`
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
||||
SharedLink struct {
|
||||
URL string `json:"url,omitempty"`
|
||||
Access string `json:"access,omitempty"`
|
||||
} `json:"shared_link"`
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the item
|
||||
@@ -145,6 +149,14 @@ type CopyFile struct {
|
||||
Parent Parent `json:"parent"`
|
||||
}
|
||||
|
||||
// CreateSharedLink is the request for Public Link
|
||||
type CreateSharedLink struct {
|
||||
SharedLink struct {
|
||||
URL string `json:"url,omitempty"`
|
||||
Access string `json:"access,omitempty"`
|
||||
} `json:"shared_link"`
|
||||
}
|
||||
|
||||
// UploadSessionRequest is uses in Create Upload Session
|
||||
type UploadSessionRequest struct {
|
||||
FolderID string `json:"folder_id,omitempty"` // don't pass for update
|
||||
@@ -172,8 +184,8 @@ type UploadSessionResponse struct {
|
||||
// Part defines the return from upload part call which are passed to commit upload also
|
||||
type Part struct {
|
||||
PartID string `json:"part_id"`
|
||||
Offset int `json:"offset"`
|
||||
Size int `json:"size"`
|
||||
Offset int64 `json:"offset"`
|
||||
Size int64 `json:"size"`
|
||||
Sha1 string `json:"sha1"`
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -24,7 +23,8 @@ import (
|
||||
"github.com/ncw/rclone/backend/box/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -47,6 +47,7 @@ const (
|
||||
uploadURL = "https://upload.box.com/api/2.0"
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
|
||||
defaultUploadCutoff = 50 * 1024 * 1024
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -62,7 +63,6 @@ var (
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
uploadCutoff = fs.SizeSuffix(50 * 1024 * 1024)
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -71,27 +71,43 @@ func init() {
|
||||
Name: "box",
|
||||
Description: "Box",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
err := oauthutil.Config("box", name, oauthConfig)
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config("box", name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Box App Client Id - leave blank normally.",
|
||||
Help: "Box App Client Id.\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Box App Client Secret - leave blank normally.",
|
||||
Help: "Box App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to multipart upload.",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "commit_retries",
|
||||
Help: "Max number of times to try committing a multipart file.",
|
||||
Default: 100,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
flags.VarP(&uploadCutoff, "box-upload-cutoff", "", "Cutoff for switching to multipart upload")
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CommitRetries int `config:"commit_retries"`
|
||||
}
|
||||
|
||||
// Fs represents a remote box
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
@@ -110,6 +126,7 @@ type Object struct {
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
id string // ID of the object
|
||||
publicLink string // Public Link for the object
|
||||
sha1 string // SHA-1 of the object content
|
||||
}
|
||||
|
||||
@@ -135,9 +152,6 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Pattern to match a box path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parsePath parses an box 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
@@ -223,13 +237,20 @@ func errorHandler(resp *http.Response) error {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
if uploadCutoff < minUploadCutoff {
|
||||
return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", uploadCutoff, fs.SizeSuffix(minUploadCutoff))
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opt.UploadCutoff < minUploadCutoff {
|
||||
return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff))
|
||||
}
|
||||
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, oauthConfig)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Box: %v", err)
|
||||
}
|
||||
@@ -237,6 +258,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
@@ -278,6 +300,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
f.features.Fill(&newF)
|
||||
// return an error with an fs which points to the parent
|
||||
return &newF, fs.ErrorIsFile
|
||||
}
|
||||
@@ -653,7 +676,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
replacedLeaf := replaceReservedChars(leaf)
|
||||
copy := api.CopyFile{
|
||||
copyFile := api.CopyFile{
|
||||
Name: replacedLeaf,
|
||||
Parent: api.Parent{
|
||||
ID: directoryID,
|
||||
@@ -662,7 +685,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
var resp *http.Response
|
||||
var info *api.Item
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, ©, &info)
|
||||
resp, err = f.srv.CallJSON(&opts, ©File, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -823,6 +846,46 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(remote string) (string, error) {
|
||||
id, err := f.dirCache.FindDir(remote, false)
|
||||
var opts rest.Opts
|
||||
if err == nil {
|
||||
fs.Debugf(f, "attempting to share directory '%s'", remote)
|
||||
|
||||
opts = rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/folders/" + id,
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(f, "attempting to share single file '%s'", remote)
|
||||
o, err := f.NewObject(remote)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if o.(*Object).publicLink != "" {
|
||||
return o.(*Object).publicLink, nil
|
||||
}
|
||||
|
||||
opts = rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/files/" + o.(*Object).id,
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
}
|
||||
|
||||
shareLink := api.CreateSharedLink{}
|
||||
var info api.Item
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &shareLink, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info.SharedLink.URL, err
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing as an
|
||||
// optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
@@ -883,10 +946,11 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
|
||||
}
|
||||
o.hasMetaData = true
|
||||
o.size = info.Size
|
||||
o.size = int64(info.Size)
|
||||
o.sha1 = info.SHA1
|
||||
o.modTime = info.ModTime()
|
||||
o.id = info.ID
|
||||
o.publicLink = info.SharedLink.URL
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -993,8 +1057,8 @@ func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Tim
|
||||
var resp *http.Response
|
||||
var result api.FolderItems
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Body: in,
|
||||
Method: "POST",
|
||||
Body: in,
|
||||
MultipartMetadataName: "attributes",
|
||||
MultipartContentName: "contents",
|
||||
MultipartFileName: upload.Name,
|
||||
@@ -1039,7 +1103,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
|
||||
// Upload with simple or multipart
|
||||
if size <= int64(uploadCutoff) {
|
||||
if size <= int64(o.fs.opt.UploadCutoff) {
|
||||
err = o.upload(in, leaf, directoryID, modTime)
|
||||
} else {
|
||||
err = o.uploadMultipart(in, leaf, directoryID, size, modTime)
|
||||
@@ -1052,6 +1116,11 @@ func (o *Object) Remove() error {
|
||||
return o.fs.deleteObject(o.id)
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -1061,5 +1130,7 @@ var (
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -1,75 +1,17 @@
|
||||
// Test Box filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package box_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/box"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*box.Object)(nil))
|
||||
fstests.RemoteName = "TestBox:"
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestBox:",
|
||||
NilObject: (*box.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
@@ -96,7 +96,9 @@ func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.T
|
||||
request.Attributes.ContentCreatedAt = api.Time(modTime)
|
||||
var body []byte
|
||||
var resp *http.Response
|
||||
maxTries := fs.Config.LowLevelRetries
|
||||
// For discussion of this value see:
|
||||
// https://github.com/ncw/rclone/issues/2054
|
||||
maxTries := o.fs.opt.CommitRetries
|
||||
const defaultDelay = 10
|
||||
var tries int
|
||||
outer:
|
||||
|
||||
763
backend/cache/cache.go
vendored
763
backend/cache/cache.go
vendored
File diff suppressed because it is too large
Load Diff
740
backend/cache/cache_internal_test.go
vendored
740
backend/cache/cache_internal_test.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,go1.7
|
||||
// +build !plan9
|
||||
|
||||
package cache_test
|
||||
|
||||
@@ -33,13 +33,13 @@ import (
|
||||
"github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/object"
|
||||
"github.com/ncw/rclone/fs/rc"
|
||||
"github.com/ncw/rclone/fs/rc/rcflags"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"github.com/ncw/rclone/vfs/vfsflags"
|
||||
flag "github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -50,6 +50,7 @@ const (
|
||||
cryptedTextBase64 = "UkNMT05FAAC320i2xIee0BiNyknSPBn+Qcw3q9FhIFp3tvq6qlqvbsno3PnxmEFeJG3jDBnR/wku2gHWeQ==" // one content
|
||||
cryptedText2Base64 = "UkNMT05FAAATcQkVsgjBh8KafCKcr0wdTa1fMmV0U8hsCLGFoqcvxKVmvv7wx3Hf5EXxFcki2FFV4sdpmSrb9Q==" // updated content
|
||||
cryptedText3Base64 = "UkNMT05FAAB/f7YtYKbPfmk9+OX/ffN3qG3OEdWT+z74kxCX9V/YZwJ4X2DN3HOnUC3gKQ4Gcoud5UtNvQ==" // test content
|
||||
letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -132,13 +133,14 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
||||
require.Len(t, listInner, 1)
|
||||
}
|
||||
|
||||
/* TODO: is this testing something?
|
||||
func TestInternalVfsCache(t *testing.T) {
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 30
|
||||
testSize := int64(524288000)
|
||||
|
||||
vfsflags.Opt.CacheMode = vfs.CacheModeWrites
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"cache-writes": "true", "cache-info-age": "1h"})
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("test")
|
||||
@@ -229,6 +231,7 @@ func TestInternalVfsCache(t *testing.T) {
|
||||
cacheCh <- true
|
||||
readCh <- true
|
||||
}
|
||||
*/
|
||||
|
||||
func TestInternalObjWrapFsFound(t *testing.T) {
|
||||
id := fmt.Sprintf("tiowff%v", time.Now().Unix())
|
||||
@@ -308,7 +311,7 @@ func TestInternalCachedWrittenContentMatches(t *testing.T) {
|
||||
chunkSize := cfs.ChunkSize()
|
||||
|
||||
// create some rand test data
|
||||
testData := runInstance.randomBytes(t, chunkSize*4+chunkSize/2)
|
||||
testData := randStringBytes(int(chunkSize*4 + chunkSize/2))
|
||||
|
||||
// write the object
|
||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||
@@ -385,27 +388,19 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
|
||||
// create some rand test data
|
||||
testSize := chunkSize*4 + chunkSize/2
|
||||
testData := runInstance.randomBytes(t, testSize)
|
||||
testData := randStringBytes(int(testSize))
|
||||
|
||||
// write the object
|
||||
o := runInstance.writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
|
||||
require.Equal(t, o.Size(), int64(testSize))
|
||||
time.Sleep(time.Second * 3)
|
||||
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, int64(testSize), false)
|
||||
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, int64(testSize), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(len(data2)), o.Size())
|
||||
|
||||
// check sample of data from in-file
|
||||
sampleStart := chunkSize / 2
|
||||
sampleEnd := chunkSize
|
||||
testSample := testData[sampleStart:sampleEnd]
|
||||
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", sampleStart, sampleEnd, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(checkSample), len(testSample))
|
||||
require.Equal(t, int64(len(checkSample)), o.Size())
|
||||
|
||||
for i := 0; i < len(checkSample); i++ {
|
||||
require.Equal(t, testSample[i], checkSample[i])
|
||||
require.Equal(t, testData[i], checkSample[i])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -424,7 +419,7 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||
|
||||
// create some rand test data
|
||||
testSize := chunkSize*10 + chunkSize/2
|
||||
testData := runInstance.randomBytes(t, testSize)
|
||||
testData := randStringBytes(int(testSize))
|
||||
|
||||
// write the object
|
||||
runInstance.writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
|
||||
@@ -447,7 +442,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
chunkSize := cfs.ChunkSize()
|
||||
|
||||
// create some rand test data
|
||||
testData := runInstance.randomBytes(t, (chunkSize*4 + chunkSize/2))
|
||||
testData := randStringBytes(int(chunkSize*4 + chunkSize/2))
|
||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||
|
||||
// update in the wrapped fs
|
||||
@@ -579,6 +574,93 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||
id := fmt.Sprintf("tincep%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
if !runInstance.wrappedIsExternal {
|
||||
t.Skipf("Not external")
|
||||
}
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
|
||||
srcName := runInstance.encryptRemoteIfNeeded(t, "test") + "/" + runInstance.encryptRemoteIfNeeded(t, "one") + "/" + runInstance.encryptRemoteIfNeeded(t, "test")
|
||||
dstName := runInstance.encryptRemoteIfNeeded(t, "test") + "/" + runInstance.encryptRemoteIfNeeded(t, "one") + "/" + runInstance.encryptRemoteIfNeeded(t, "test2")
|
||||
// create some rand test data
|
||||
var testData []byte
|
||||
if runInstance.rootIsCrypt {
|
||||
testData, err = base64.StdEncoding.DecodeString(cryptedTextBase64)
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
testData = []byte("test content")
|
||||
}
|
||||
err = rootFs.Mkdir("test")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("test/one")
|
||||
require.NoError(t, err)
|
||||
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
|
||||
|
||||
// list in mount
|
||||
_, err = runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
_, err = runInstance.list(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
|
||||
found := boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||
require.True(t, found)
|
||||
boltDb.Purge()
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||
require.False(t, found)
|
||||
|
||||
// move file
|
||||
_, err = cfs.UnWrap().Features().Move(srcObj, dstName)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = runInstance.retryBlock(func() error {
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||
if !found {
|
||||
log.Printf("not found /test")
|
||||
return errors.Errorf("not found /test")
|
||||
}
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
if !found {
|
||||
log.Printf("not found /test/one")
|
||||
return errors.Errorf("not found /test/one")
|
||||
}
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
|
||||
if !found {
|
||||
log.Printf("not found /test/one/test2")
|
||||
return errors.Errorf("not found /test/one/test2")
|
||||
}
|
||||
li, err := runInstance.list(t, rootFs, "test/one")
|
||||
if err != nil {
|
||||
log.Printf("err: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(li) != 1 {
|
||||
log.Printf("not expected listing /test/one: %v", li)
|
||||
return errors.Errorf("not expected listing /test/one: %v", li)
|
||||
}
|
||||
if fi, ok := li[0].(os.FileInfo); ok {
|
||||
if fi.Name() != "test2" {
|
||||
log.Printf("not expected name: %v", fi.Name())
|
||||
return errors.Errorf("not expected name: %v", fi.Name())
|
||||
}
|
||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||
if di.Remote() != "test/one/test2" {
|
||||
log.Printf("not expected remote: %v", di.Remote())
|
||||
return errors.Errorf("not expected remote: %v", di.Remote())
|
||||
}
|
||||
} else {
|
||||
log.Printf("unexpected listing: %v", li)
|
||||
return errors.Errorf("unexpected listing: %v", li)
|
||||
}
|
||||
log.Printf("complete listing /test/one/test2")
|
||||
return nil
|
||||
}, 12, time.Second*10)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
||||
id := fmt.Sprintf("ticsadcf%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
@@ -589,7 +671,7 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
||||
chunkSize := cfs.ChunkSize()
|
||||
|
||||
// create some rand test data
|
||||
testData := runInstance.randomBytes(t, (chunkSize*4 + chunkSize/2))
|
||||
testData := randStringBytes(int(chunkSize*4 + chunkSize/2))
|
||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||
|
||||
// update in the wrapped fs
|
||||
@@ -617,19 +699,22 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
rc.Start(&rcflags.Opt)
|
||||
|
||||
id := fmt.Sprintf("ticsarc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"rc": "true"})
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
if !runInstance.useMount {
|
||||
t.Skipf("needs mount")
|
||||
}
|
||||
if !runInstance.wrappedIsExternal {
|
||||
t.Skipf("needs drive")
|
||||
}
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
chunkSize := cfs.ChunkSize()
|
||||
|
||||
// create some rand test data
|
||||
testData := runInstance.randomBytes(t, (chunkSize*4 + chunkSize/2))
|
||||
testData := randStringBytes(int(chunkSize*4 + chunkSize/2))
|
||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||
|
||||
// update in the wrapped fs
|
||||
@@ -660,11 +745,36 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
co, err = rootFs.NewObject("data.bin")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
|
||||
li1, err := runInstance.list(t, rootFs, "")
|
||||
|
||||
// create some rand test data
|
||||
testData2 := randStringBytes(int(chunkSize))
|
||||
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test2"), testData2)
|
||||
|
||||
// list should have 1 item only
|
||||
li1, err = runInstance.list(t, rootFs, "")
|
||||
require.Len(t, li1, 1)
|
||||
|
||||
m = make(map[string]string)
|
||||
res2, err := http.Post("http://localhost:5572/cache/expire?remote=/", "application/json; charset=utf-8", strings.NewReader(""))
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = res2.Body.Close()
|
||||
}()
|
||||
_ = json.NewDecoder(res2.Body).Decode(&m)
|
||||
require.Contains(t, m, "status")
|
||||
require.Contains(t, m, "message")
|
||||
require.Equal(t, "ok", m["status"])
|
||||
require.Contains(t, m["message"], "cached directory cleared")
|
||||
|
||||
// list should have 2 items now
|
||||
li2, err := runInstance.list(t, rootFs, "")
|
||||
require.Len(t, li2, 2)
|
||||
}
|
||||
|
||||
func TestInternalCacheWrites(t *testing.T) {
|
||||
id := "ticw"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"cache-writes": "true"})
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
@@ -673,7 +783,7 @@ func TestInternalCacheWrites(t *testing.T) {
|
||||
|
||||
// create some rand test data
|
||||
earliestTime := time.Now()
|
||||
testData := runInstance.randomBytes(t, (chunkSize*4 + chunkSize/2))
|
||||
testData := randStringBytes(int(chunkSize*4 + chunkSize/2))
|
||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||
expectedTs := time.Now()
|
||||
ts, err := boltDb.GetChunkTs(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "data.bin")), 0)
|
||||
@@ -683,7 +793,7 @@ func TestInternalCacheWrites(t *testing.T) {
|
||||
|
||||
func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"cache-workers": "1"})
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
@@ -692,7 +802,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
totalChunks := 20
|
||||
|
||||
// create some rand test data
|
||||
testData := runInstance.randomBytes(t, (int64(totalChunks-1)*chunkSize + chunkSize/2))
|
||||
testData := randStringBytes(int(int64(totalChunks-1)*chunkSize + chunkSize/2))
|
||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||
o, err := cfs.NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||
require.NoError(t, err)
|
||||
@@ -758,7 +868,7 @@ func TestInternalBug2117(t *testing.T) {
|
||||
|
||||
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil,
|
||||
map[string]string{"cache-info-age": "72h", "cache-chunk-clean-interval": "15m"})
|
||||
map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
if runInstance.rootIsCrypt {
|
||||
@@ -805,465 +915,10 @@ func TestInternalBug2117(t *testing.T) {
|
||||
require.Len(t, di, 4)
|
||||
}
|
||||
|
||||
func TestInternalUploadTempDirCreated(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id)})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltDb *cache.Persistent) {
|
||||
// create some rand test data
|
||||
testSize := int64(524288000)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
bu := runInstance.listenForBackgroundUpload(t, rootFs, "one")
|
||||
runInstance.writeRemoteReader(t, rootFs, "one", testReader)
|
||||
// validate that it exists in temp fs
|
||||
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
if runInstance.rootIsCrypt {
|
||||
require.Equal(t, int64(524416032), ti.Size())
|
||||
} else {
|
||||
require.Equal(t, testSize, ti.Size())
|
||||
}
|
||||
de1, err := runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
runInstance.completeBackgroundUpload(t, "one", bu)
|
||||
// check if it was removed from temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
// check if it can be read
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "one", 0, int64(1024), false)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, data2, 1024)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "0s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1m"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
|
||||
func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "3s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("one")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("one/test")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("second")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
testSize := int64(10485760)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
|
||||
|
||||
de1, err := runInstance.list(t, rootFs, "one/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
time.Sleep(time.Second * 5)
|
||||
_ = os.Remove(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = runInstance.dirMove(t, rootFs, "one/test", "second/test")
|
||||
require.NoError(t, err)
|
||||
|
||||
// check if it can be read
|
||||
de1, err = runInstance.list(t, rootFs, "second/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("test")
|
||||
require.NoError(t, err)
|
||||
minSize := 5242880
|
||||
maxSize := 10485760
|
||||
totalFiles := 10
|
||||
rand.Seed(time.Now().Unix())
|
||||
|
||||
lastFile := ""
|
||||
for i := 0; i < totalFiles; i++ {
|
||||
size := int64(rand.Intn(maxSize-minSize) + minSize)
|
||||
testReader := runInstance.randomReader(t, size)
|
||||
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
|
||||
|
||||
// validate that it exists in temp fs
|
||||
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, remote)))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, size, runInstance.cleanSize(t, ti.Size()))
|
||||
|
||||
if runInstance.wrappedIsExternal && i < totalFiles-1 {
|
||||
time.Sleep(time.Second * 3)
|
||||
}
|
||||
lastFile = remote
|
||||
}
|
||||
|
||||
// check if cache lists all files, likely temp upload didn't finish yet
|
||||
de1, err := runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, totalFiles)
|
||||
|
||||
// wait for background uploader to do its thing
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, lastFile)
|
||||
|
||||
// retry until we have no more temp files and fail if they don't go down to 0
|
||||
tf, err := ioutil.ReadDir(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, tf, 0)
|
||||
|
||||
// check if cache lists all files
|
||||
de1, err = runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, totalFiles)
|
||||
}
|
||||
|
||||
func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
id := "tiutfo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
// create some rand test data
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// check if it can be read
|
||||
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data1)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test DirMove - allowed
|
||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("second/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
|
||||
require.NoError(t, err)
|
||||
started, err := boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
|
||||
require.Error(t, err)
|
||||
started, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "second/one")))
|
||||
require.NoError(t, err)
|
||||
require.False(t, started)
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
}
|
||||
|
||||
// test Rmdir - allowed
|
||||
err = runInstance.rm(t, rootFs, "test")
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "directory not empty")
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
started, err := boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
|
||||
require.False(t, started)
|
||||
require.NoError(t, err)
|
||||
|
||||
// test Move/Rename -- allowed
|
||||
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
// try to read from it
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/second")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
|
||||
require.NoError(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
}
|
||||
|
||||
// test Copy -- allowed
|
||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/third")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// test Remove -- allowed
|
||||
err = runInstance.rm(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// test Update -- allowed
|
||||
firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
|
||||
require.NoError(t, err)
|
||||
obj2, err := rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
|
||||
require.Equal(t, "one content updated", string(data2))
|
||||
tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
if runInstance.rootIsCrypt {
|
||||
require.Equal(t, int64(67), tmpInfo.Size())
|
||||
} else {
|
||||
require.Equal(t, int64(len(data2)), tmpInfo.Size())
|
||||
}
|
||||
|
||||
// test SetModTime -- allowed
|
||||
secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, secondModTime, firstModTime)
|
||||
require.NotEqual(t, time.Time{}, firstModTime)
|
||||
require.NotEqual(t, time.Time{}, secondModTime)
|
||||
}
|
||||
|
||||
func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
// create some rand test data
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// check if it can be read
|
||||
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data1)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = boltDb.SetPendingUploadToStarted(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test DirMove
|
||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||
if err != errNotSupported {
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// test Rmdir
|
||||
err = runInstance.rm(t, rootFs, "test")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test Move/Rename
|
||||
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
|
||||
if err != errNotSupported {
|
||||
require.Error(t, err)
|
||||
// try to read from it
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/second")
|
||||
require.Error(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// test Copy -- allowed
|
||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/third")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// test Remove
|
||||
err = runInstance.rm(t, rootFs, "test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// test Update - this seems to work. Why? FIXME
|
||||
//firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
//require.NoError(t, err)
|
||||
//err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated", func() {
|
||||
// data2 := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len("one content updated")), true)
|
||||
// require.Equal(t, "one content", string(data2))
|
||||
//
|
||||
// tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
// require.NoError(t, err)
|
||||
// if runInstance.rootIsCrypt {
|
||||
// require.Equal(t, int64(67), tmpInfo.Size())
|
||||
// } else {
|
||||
// require.Equal(t, int64(len(data2)), tmpInfo.Size())
|
||||
// }
|
||||
//})
|
||||
//require.Error(t, err)
|
||||
|
||||
// test SetModTime -- seems to work cause of previous
|
||||
//secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
//require.NoError(t, err)
|
||||
//require.Equal(t, secondModTime, firstModTime)
|
||||
//require.NotEqual(t, time.Time{}, firstModTime)
|
||||
//require.NotEqual(t, time.Time{}, secondModTime)
|
||||
}
|
||||
|
||||
// FIXME, enable this when mount is sorted out
|
||||
//func TestInternalFilesMissingInMount1904(t *testing.T) {
|
||||
// t.Skip("Not yet")
|
||||
// if runtime.GOOS == "windows" {
|
||||
// t.Skip("Not yet")
|
||||
// }
|
||||
// id := "tifm1904"
|
||||
// rootFs, _ := newCacheFs(t, RemoteName, id, false,
|
||||
// map[string]string{"chunk_size": "5M", "info_age": "1m", "chunk_total_size": "500M", "cache-writes": "true"})
|
||||
// mntPoint := path.Join("/tmp", "tifm1904-mnt")
|
||||
// testPoint := path.Join(mntPoint, id)
|
||||
// checkOutput := "1 10 100 11 12 13 14 15 16 17 18 19 2 20 21 22 23 24 25 26 27 28 29 3 30 31 32 33 34 35 36 37 38 39 4 40 41 42 43 44 45 46 47 48 49 5 50 51 52 53 54 55 56 57 58 59 6 60 61 62 63 64 65 66 67 68 69 7 70 71 72 73 74 75 76 77 78 79 8 80 81 82 83 84 85 86 87 88 89 9 90 91 92 93 94 95 96 97 98 99 "
|
||||
//
|
||||
// _ = os.MkdirAll(mntPoint, os.ModePerm)
|
||||
//
|
||||
// list, err := rootFs.List("")
|
||||
// require.NoError(t, err)
|
||||
// found := false
|
||||
// list.ForDir(func(d fs.Directory) {
|
||||
// if strings.Contains(d.Remote(), id) {
|
||||
// found = true
|
||||
// }
|
||||
// })
|
||||
//
|
||||
// if !found {
|
||||
// t.Skip("Test folder '%v' doesn't exist", id)
|
||||
// }
|
||||
//
|
||||
// mountFs(t, rootFs, mntPoint)
|
||||
// defer unmountFs(t, mntPoint)
|
||||
//
|
||||
// for i := 1; i <= 2; i++ {
|
||||
// out, err := exec.Command("ls", testPoint).Output()
|
||||
// require.NoError(t, err)
|
||||
// require.Equal(t, checkOutput, strings.Replace(string(out), "\n", " ", -1))
|
||||
// t.Logf("root path has all files")
|
||||
// _ = writeObjectString(t, rootFs, path.Join(id, strconv.Itoa(i), strconv.Itoa(i), "one_file"), "one content")
|
||||
//
|
||||
// for j := 1; j <= 100; j++ {
|
||||
// out, err := exec.Command("ls", path.Join(testPoint, strconv.Itoa(j))).Output()
|
||||
// require.NoError(t, err)
|
||||
// require.Equal(t, checkOutput, strings.Replace(string(out), "\n", " ", -1), "'%v' doesn't match", j)
|
||||
// }
|
||||
// obj, err := rootFs.NewObject(path.Join(id, strconv.Itoa(i), strconv.Itoa(i), "one_file"))
|
||||
// require.NoError(t, err)
|
||||
// err = obj.Remove()
|
||||
// require.NoError(t, err)
|
||||
// t.Logf("folders contain all the files")
|
||||
//
|
||||
// out, err = exec.Command("date").Output()
|
||||
// require.NoError(t, err)
|
||||
// t.Logf("check #%v date: '%v'", i, strings.Replace(string(out), "\n", " ", -1))
|
||||
//
|
||||
// if i < 2 {
|
||||
// time.Sleep(time.Second * 60)
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
|
||||
// run holds the remotes for a test run
|
||||
type run struct {
|
||||
okDiff time.Duration
|
||||
allCfgMap map[string]string
|
||||
allFlagMap map[string]string
|
||||
runDefaultCfgMap map[string]string
|
||||
runDefaultFlagMap map[string]string
|
||||
runDefaultCfgMap configmap.Simple
|
||||
mntDir string
|
||||
tmpUploadDir string
|
||||
useMount bool
|
||||
@@ -1287,38 +942,16 @@ func newRun() *run {
|
||||
isMounted: false,
|
||||
}
|
||||
|
||||
r.allCfgMap = map[string]string{
|
||||
"plex_url": "",
|
||||
"plex_username": "",
|
||||
"plex_password": "",
|
||||
"chunk_size": cache.DefCacheChunkSize,
|
||||
"info_age": cache.DefCacheInfoAge,
|
||||
"chunk_total_size": cache.DefCacheTotalChunkSize,
|
||||
// Read in all the defaults for all the options
|
||||
fsInfo, err := fs.Find("cache")
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Couldn't find cache remote: %v", err))
|
||||
}
|
||||
r.allFlagMap = map[string]string{
|
||||
"cache-db-path": filepath.Join(config.CacheDir, "cache-backend"),
|
||||
"cache-chunk-path": filepath.Join(config.CacheDir, "cache-backend"),
|
||||
"cache-db-purge": "true",
|
||||
"cache-chunk-size": cache.DefCacheChunkSize,
|
||||
"cache-total-chunk-size": cache.DefCacheTotalChunkSize,
|
||||
"cache-chunk-clean-interval": cache.DefCacheChunkCleanInterval,
|
||||
"cache-info-age": cache.DefCacheInfoAge,
|
||||
"cache-read-retries": strconv.Itoa(cache.DefCacheReadRetries),
|
||||
"cache-workers": strconv.Itoa(cache.DefCacheTotalWorkers),
|
||||
"cache-chunk-no-memory": "false",
|
||||
"cache-rps": strconv.Itoa(cache.DefCacheRps),
|
||||
"cache-writes": "false",
|
||||
"cache-tmp-upload-path": "",
|
||||
"cache-tmp-wait-time": cache.DefCacheTmpWaitTime,
|
||||
}
|
||||
r.runDefaultCfgMap = make(map[string]string)
|
||||
for key, value := range r.allCfgMap {
|
||||
r.runDefaultCfgMap[key] = value
|
||||
}
|
||||
r.runDefaultFlagMap = make(map[string]string)
|
||||
for key, value := range r.allFlagMap {
|
||||
r.runDefaultFlagMap[key] = value
|
||||
r.runDefaultCfgMap = configmap.Simple{}
|
||||
for _, option := range fsInfo.Options {
|
||||
r.runDefaultCfgMap.Set(option.Name, fmt.Sprint(option.Default))
|
||||
}
|
||||
|
||||
if mountDir == "" {
|
||||
if runtime.GOOS != "windows" {
|
||||
r.mntDir, err = ioutil.TempDir("", "rclonecache-mount")
|
||||
@@ -1395,7 +1028,7 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
config.FileSet(remote, "type", "cache")
|
||||
config.FileSet(remote, "remote", localRemote+":/var/tmp/"+localRemote)
|
||||
} else {
|
||||
remoteType := fs.ConfigFileGet(remote, "type", "")
|
||||
remoteType := config.FileGet(remote, "type", "")
|
||||
if remoteType == "" {
|
||||
t.Skipf("skipped due to invalid remote type for %v", remote)
|
||||
return nil, nil
|
||||
@@ -1406,14 +1039,14 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
config.FileSet(remote, "password", cryptPassword1)
|
||||
config.FileSet(remote, "password2", cryptPassword2)
|
||||
}
|
||||
remoteRemote := fs.ConfigFileGet(remote, "remote", "")
|
||||
remoteRemote := config.FileGet(remote, "remote", "")
|
||||
if remoteRemote == "" {
|
||||
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
|
||||
return nil, nil
|
||||
}
|
||||
remoteRemoteParts := strings.Split(remoteRemote, ":")
|
||||
remoteWrapping := remoteRemoteParts[0]
|
||||
remoteType := fs.ConfigFileGet(remoteWrapping, "type", "")
|
||||
remoteType := config.FileGet(remoteWrapping, "type", "")
|
||||
if remoteType != "cache" {
|
||||
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
|
||||
return nil, nil
|
||||
@@ -1428,28 +1061,22 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
|
||||
require.NoError(t, err)
|
||||
|
||||
for k, v := range r.runDefaultCfgMap {
|
||||
if c, ok := cfg[k]; ok {
|
||||
config.FileSet(cacheRemote, k, c)
|
||||
} else {
|
||||
config.FileSet(cacheRemote, k, v)
|
||||
}
|
||||
}
|
||||
for k, v := range r.runDefaultFlagMap {
|
||||
if c, ok := flags[k]; ok {
|
||||
_ = flag.Set(k, c)
|
||||
} else {
|
||||
_ = flag.Set(k, v)
|
||||
}
|
||||
}
|
||||
fs.Config.LowLevelRetries = 1
|
||||
|
||||
m := configmap.Simple{}
|
||||
for k, v := range r.runDefaultCfgMap {
|
||||
m.Set(k, v)
|
||||
}
|
||||
for k, v := range flags {
|
||||
m.Set(k, v)
|
||||
}
|
||||
|
||||
// Instantiate root
|
||||
if purge {
|
||||
boltDb.PurgeTempUploads()
|
||||
_ = os.RemoveAll(path.Join(runInstance.tmpUploadDir, id))
|
||||
}
|
||||
f, err := fs.NewFs(remote + ":" + id)
|
||||
f, err := cache.NewFs(remote, id, m)
|
||||
require.NoError(t, err)
|
||||
cfs, err := r.getCacheFs(f)
|
||||
require.NoError(t, err)
|
||||
@@ -1499,18 +1126,6 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
||||
}
|
||||
r.tempFiles = nil
|
||||
debug.FreeOSMemory()
|
||||
for k, v := range r.runDefaultFlagMap {
|
||||
_ = flag.Set(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *run) randomBytes(t *testing.T, size int64) []byte {
|
||||
testData := make([]byte, size)
|
||||
testSize, err := rand.Read(testData)
|
||||
require.Equal(t, size, int64(len(testData)))
|
||||
require.Equal(t, size, int64(testSize))
|
||||
require.NoError(t, err)
|
||||
return testData
|
||||
}
|
||||
|
||||
func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
|
||||
@@ -1521,12 +1136,12 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < int(cnt); i++ {
|
||||
data := r.randomBytes(t, chunk)
|
||||
data := randStringBytes(int(chunk))
|
||||
_, _ = f.Write(data)
|
||||
}
|
||||
data := r.randomBytes(t, int64(left))
|
||||
data := randStringBytes(int(left))
|
||||
_, _ = f.Write(data)
|
||||
_, _ = f.Seek(int64(0), 0)
|
||||
_, _ = f.Seek(int64(0), io.SeekStart)
|
||||
r.tempFiles = append(r.tempFiles, f)
|
||||
|
||||
return f
|
||||
@@ -1535,7 +1150,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
|
||||
func (r *run) writeRemoteRandomBytes(t *testing.T, f fs.Fs, p string, size int64) string {
|
||||
remote := path.Join(p, strconv.Itoa(rand.Int())+".bin")
|
||||
// create some rand test data
|
||||
testData := r.randomBytes(t, size)
|
||||
testData := randStringBytes(int(size))
|
||||
|
||||
r.writeRemoteBytes(t, f, remote, testData)
|
||||
return remote
|
||||
@@ -1544,7 +1159,7 @@ func (r *run) writeRemoteRandomBytes(t *testing.T, f fs.Fs, p string, size int64
|
||||
func (r *run) writeObjectRandomBytes(t *testing.T, f fs.Fs, p string, size int64) fs.Object {
|
||||
remote := path.Join(p, strconv.Itoa(rand.Int())+".bin")
|
||||
// create some rand test data
|
||||
testData := r.randomBytes(t, size)
|
||||
testData := randStringBytes(int(size))
|
||||
|
||||
return r.writeObjectBytes(t, f, remote, testData)
|
||||
}
|
||||
@@ -1653,7 +1268,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
|
||||
if err != nil {
|
||||
return checkSample, err
|
||||
}
|
||||
_, _ = f.Seek(offset, 0)
|
||||
_, _ = f.Seek(offset, io.SeekStart)
|
||||
totalRead, err := io.ReadFull(f, checkSample)
|
||||
checkSample = checkSample[:totalRead]
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
@@ -1662,9 +1277,6 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
|
||||
if err != nil {
|
||||
return checkSample, err
|
||||
}
|
||||
if !noLengthCheck && size != int64(totalRead) {
|
||||
return checkSample, errors.Errorf("read size doesn't match expected: %v <> %v", totalRead, size)
|
||||
}
|
||||
} else {
|
||||
co, err := f.NewObject(remote)
|
||||
if err != nil {
|
||||
@@ -1688,7 +1300,7 @@ func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLe
|
||||
err = nil
|
||||
checkSample = checkSample[:totalRead]
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err, "with string -%v-", string(checkSample))
|
||||
_ = reader.Close()
|
||||
return checkSample
|
||||
}
|
||||
@@ -1897,16 +1509,11 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = f.WriteString(data + append)
|
||||
if err != nil {
|
||||
defer func() {
|
||||
_ = f.Close()
|
||||
return err
|
||||
}
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
}()
|
||||
_, err = f.WriteString(data + append)
|
||||
} else {
|
||||
obj1, err := rootFs.NewObject(src)
|
||||
if err != nil {
|
||||
@@ -1916,9 +1523,6 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
||||
r := bytes.NewReader(data1)
|
||||
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
|
||||
err = obj1.Update(r, objInfo1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
@@ -2055,6 +1659,14 @@ func (r *run) getCacheFs(f fs.Fs) (*cache.Fs, error) {
|
||||
return nil, errors.New("didn't found a cache fs")
|
||||
}
|
||||
|
||||
func randStringBytes(n int) []byte {
|
||||
b := make([]byte, n)
|
||||
for i := range b {
|
||||
b[i] = letterBytes[rand.Intn(len(letterBytes))]
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
var (
|
||||
_ fs.Fs = (*cache.Fs)(nil)
|
||||
_ fs.Fs = (*local.Fs)(nil)
|
||||
|
||||
4
backend/cache/cache_mount_unix_test.go
vendored
4
backend/cache/cache_mount_unix_test.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,!windows,go1.7
|
||||
// +build !plan9,!windows
|
||||
|
||||
package cache_test
|
||||
|
||||
@@ -23,7 +23,7 @@ func (r *run) mountFs(t *testing.T, f fs.Fs) {
|
||||
fuse.FSName(device), fuse.VolumeName(device),
|
||||
fuse.NoAppleDouble(),
|
||||
fuse.NoAppleXattr(),
|
||||
fuse.AllowOther(),
|
||||
//fuse.AllowOther(),
|
||||
}
|
||||
err := os.MkdirAll(r.mntDir, os.ModePerm)
|
||||
require.NoError(t, err)
|
||||
|
||||
2
backend/cache/cache_mount_windows_test.go
vendored
2
backend/cache/cache_mount_windows_test.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build windows,go1.7
|
||||
// +build windows
|
||||
|
||||
package cache_test
|
||||
|
||||
|
||||
72
backend/cache/cache_test.go
vendored
72
backend/cache/cache_test.go
vendored
@@ -1,9 +1,6 @@
|
||||
// Test Cache filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
|
||||
// +build !plan9,go1.7
|
||||
// +build !plan9
|
||||
|
||||
package cache_test
|
||||
|
||||
@@ -12,68 +9,13 @@ import (
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*cache.Object)(nil))
|
||||
fstests.RemoteName = "TestCache:"
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
2
backend/cache/cache_unsupported.go
vendored
2
backend/cache/cache_unsupported.go
vendored
@@ -1,6 +1,6 @@
|
||||
// Build for cache for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9 !go1.7
|
||||
// +build plan9
|
||||
|
||||
package cache
|
||||
|
||||
455
backend/cache/cache_upload_test.go
vendored
Normal file
455
backend/cache/cache_upload_test.go
vendored
Normal file
@@ -0,0 +1,455 @@
|
||||
// +build !plan9
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"fmt"
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
_ "github.com/ncw/rclone/backend/drive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestInternalUploadTempDirCreated(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltDb *cache.Persistent) {
|
||||
// create some rand test data
|
||||
testSize := int64(524288000)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
bu := runInstance.listenForBackgroundUpload(t, rootFs, "one")
|
||||
runInstance.writeRemoteReader(t, rootFs, "one", testReader)
|
||||
// validate that it exists in temp fs
|
||||
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
if runInstance.rootIsCrypt {
|
||||
require.Equal(t, int64(524416032), ti.Size())
|
||||
} else {
|
||||
require.Equal(t, testSize, ti.Size())
|
||||
}
|
||||
de1, err := runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
runInstance.completeBackgroundUpload(t, "one", bu)
|
||||
// check if it was removed from temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
// check if it can be read
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "one", 0, int64(1024), false)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, data2, 1024)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
|
||||
func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("one")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("one/test")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("second")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
testSize := int64(10485760)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
|
||||
|
||||
de1, err := runInstance.list(t, rootFs, "one/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
time.Sleep(time.Second * 5)
|
||||
//_ = os.Remove(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
|
||||
//require.NoError(t, err)
|
||||
|
||||
err = runInstance.dirMove(t, rootFs, "one/test", "second/test")
|
||||
require.NoError(t, err)
|
||||
|
||||
// check if it can be read
|
||||
de1, err = runInstance.list(t, rootFs, "second/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
}
|
||||
|
||||
func TestInternalUploadTempPathCleaned(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("one")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("one/test")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("second")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
testSize := int64(1048576)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
testReader2 := runInstance.randomReader(t, testSize)
|
||||
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
|
||||
runInstance.writeObjectReader(t, rootFs, "second/data.bin", testReader2)
|
||||
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second")))
|
||||
require.False(t, os.IsNotExist(err))
|
||||
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, "second/data.bin")
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/data.bin")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
de1, err := runInstance.list(t, rootFs, "one/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
// check if it can be read
|
||||
de1, err = runInstance.list(t, rootFs, "second")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("test")
|
||||
require.NoError(t, err)
|
||||
minSize := 5242880
|
||||
maxSize := 10485760
|
||||
totalFiles := 10
|
||||
rand.Seed(time.Now().Unix())
|
||||
|
||||
lastFile := ""
|
||||
for i := 0; i < totalFiles; i++ {
|
||||
size := int64(rand.Intn(maxSize-minSize) + minSize)
|
||||
testReader := runInstance.randomReader(t, size)
|
||||
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
|
||||
|
||||
// validate that it exists in temp fs
|
||||
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, remote)))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, size, runInstance.cleanSize(t, ti.Size()))
|
||||
|
||||
if runInstance.wrappedIsExternal && i < totalFiles-1 {
|
||||
time.Sleep(time.Second * 3)
|
||||
}
|
||||
lastFile = remote
|
||||
}
|
||||
|
||||
// check if cache lists all files, likely temp upload didn't finish yet
|
||||
de1, err := runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, totalFiles)
|
||||
|
||||
// wait for background uploader to do its thing
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, lastFile)
|
||||
|
||||
// retry until we have no more temp files and fail if they don't go down to 0
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
// check if cache lists all files
|
||||
de1, err = runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, totalFiles)
|
||||
}
|
||||
|
||||
func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
id := "tiutfo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
// create some rand test data
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// check if it can be read
|
||||
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data1)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test DirMove - allowed
|
||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("second/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
|
||||
require.Error(t, err)
|
||||
var started bool
|
||||
started, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "second/one")))
|
||||
require.NoError(t, err)
|
||||
require.False(t, started)
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
}
|
||||
|
||||
// test Rmdir - allowed
|
||||
err = runInstance.rm(t, rootFs, "test")
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "directory not empty")
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
started, err := boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
|
||||
require.False(t, started)
|
||||
require.NoError(t, err)
|
||||
|
||||
// test Move/Rename -- allowed
|
||||
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
// try to read from it
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/second")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
|
||||
require.NoError(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
}
|
||||
|
||||
// test Copy -- allowed
|
||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/third")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// test Remove -- allowed
|
||||
err = runInstance.rm(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// test Update -- allowed
|
||||
firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
|
||||
require.NoError(t, err)
|
||||
obj2, err := rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
|
||||
require.Equal(t, "one content updated", string(data2))
|
||||
tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
if runInstance.rootIsCrypt {
|
||||
require.Equal(t, int64(67), tmpInfo.Size())
|
||||
} else {
|
||||
require.Equal(t, int64(len(data2)), tmpInfo.Size())
|
||||
}
|
||||
|
||||
// test SetModTime -- allowed
|
||||
secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, secondModTime, firstModTime)
|
||||
require.NotEqual(t, time.Time{}, firstModTime)
|
||||
require.NotEqual(t, time.Time{}, secondModTime)
|
||||
}
|
||||
|
||||
func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
// create some rand test data
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// check if it can be read
|
||||
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data1)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = boltDb.SetPendingUploadToStarted(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test DirMove
|
||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||
if err != errNotSupported {
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// test Rmdir
|
||||
err = runInstance.rm(t, rootFs, "test")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test Move/Rename
|
||||
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
|
||||
if err != errNotSupported {
|
||||
require.Error(t, err)
|
||||
// try to read from it
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/second")
|
||||
require.Error(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// test Copy -- allowed
|
||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/third")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// test Remove
|
||||
err = runInstance.rm(t, rootFs, "test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// test Update - this seems to work. Why? FIXME
|
||||
//firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
//require.NoError(t, err)
|
||||
//err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated", func() {
|
||||
// data2 := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len("one content updated")), true)
|
||||
// require.Equal(t, "one content", string(data2))
|
||||
//
|
||||
// tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
// require.NoError(t, err)
|
||||
// if runInstance.rootIsCrypt {
|
||||
// require.Equal(t, int64(67), tmpInfo.Size())
|
||||
// } else {
|
||||
// require.Equal(t, int64(len(data2)), tmpInfo.Size())
|
||||
// }
|
||||
//})
|
||||
//require.Error(t, err)
|
||||
|
||||
// test SetModTime -- seems to work cause of previous
|
||||
//secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
//require.NoError(t, err)
|
||||
//require.Equal(t, secondModTime, firstModTime)
|
||||
//require.NotEqual(t, time.Time{}, firstModTime)
|
||||
//require.NotEqual(t, time.Time{}, secondModTime)
|
||||
}
|
||||
12
backend/cache/directory.go
vendored
12
backend/cache/directory.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,go1.7
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
|
||||
// Directory is a generic dir that stores basic information about it
|
||||
type Directory struct {
|
||||
fs.Directory `json:"-"`
|
||||
Directory fs.Directory `json:"-"` // can be nil
|
||||
|
||||
CacheFs *Fs `json:"-"` // cache fs
|
||||
Name string `json:"name"` // name of the directory
|
||||
@@ -125,6 +125,14 @@ func (d *Directory) Items() int64 {
|
||||
return d.CacheItems
|
||||
}
|
||||
|
||||
// ID returns the ID of the cached directory if known
|
||||
func (d *Directory) ID() string {
|
||||
if d.Directory == nil {
|
||||
return ""
|
||||
}
|
||||
return d.Directory.ID()
|
||||
}
|
||||
|
||||
var (
|
||||
_ fs.Directory = (*Directory)(nil)
|
||||
)
|
||||
|
||||
173
backend/cache/handle.go
vendored
173
backend/cache/handle.go
vendored
@@ -1,11 +1,10 @@
|
||||
// +build !plan9,go1.7
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -50,12 +49,13 @@ type Handle struct {
|
||||
offset int64
|
||||
seenOffsets map[int64]bool
|
||||
mu sync.Mutex
|
||||
workersWg sync.WaitGroup
|
||||
confirmReading chan bool
|
||||
|
||||
UseMemory bool
|
||||
workers []*worker
|
||||
closed bool
|
||||
reading bool
|
||||
workers int
|
||||
maxWorkerID int
|
||||
UseMemory bool
|
||||
closed bool
|
||||
reading bool
|
||||
}
|
||||
|
||||
// NewObjectHandle returns a new Handle for an existing Object
|
||||
@@ -66,14 +66,14 @@ func NewObjectHandle(o *Object, cfs *Fs) *Handle {
|
||||
offset: 0,
|
||||
preloadOffset: -1, // -1 to trigger the first preload
|
||||
|
||||
UseMemory: cfs.chunkMemory,
|
||||
UseMemory: !cfs.opt.ChunkNoMemory,
|
||||
reading: false,
|
||||
}
|
||||
r.seenOffsets = make(map[int64]bool)
|
||||
r.memory = NewMemory(-1)
|
||||
|
||||
// create a larger buffer to queue up requests
|
||||
r.preloadQueue = make(chan int64, r.cfs.totalWorkers*10)
|
||||
r.preloadQueue = make(chan int64, r.cfs.opt.TotalWorkers*10)
|
||||
r.confirmReading = make(chan bool)
|
||||
r.startReadWorkers()
|
||||
return r
|
||||
@@ -96,10 +96,10 @@ func (r *Handle) String() string {
|
||||
|
||||
// startReadWorkers will start the worker pool
|
||||
func (r *Handle) startReadWorkers() {
|
||||
if r.hasAtLeastOneWorker() {
|
||||
if r.workers > 0 {
|
||||
return
|
||||
}
|
||||
totalWorkers := r.cacheFs().totalWorkers
|
||||
totalWorkers := r.cacheFs().opt.TotalWorkers
|
||||
|
||||
if r.cacheFs().plexConnector.isConfigured() {
|
||||
if !r.cacheFs().plexConnector.isConnected() {
|
||||
@@ -118,26 +118,27 @@ func (r *Handle) startReadWorkers() {
|
||||
|
||||
// scaleOutWorkers will increase the worker pool count by the provided amount
|
||||
func (r *Handle) scaleWorkers(desired int) {
|
||||
current := len(r.workers)
|
||||
current := r.workers
|
||||
if current == desired {
|
||||
return
|
||||
}
|
||||
if current > desired {
|
||||
// scale in gracefully
|
||||
for i := 0; i < current-desired; i++ {
|
||||
for r.workers > desired {
|
||||
r.preloadQueue <- -1
|
||||
r.workers--
|
||||
}
|
||||
} else {
|
||||
// scale out
|
||||
for i := 0; i < desired-current; i++ {
|
||||
for r.workers < desired {
|
||||
w := &worker{
|
||||
r: r,
|
||||
ch: r.preloadQueue,
|
||||
id: current + i,
|
||||
id: r.maxWorkerID,
|
||||
}
|
||||
r.workersWg.Add(1)
|
||||
r.workers++
|
||||
r.maxWorkerID++
|
||||
go w.run()
|
||||
|
||||
r.workers = append(r.workers, w)
|
||||
}
|
||||
}
|
||||
// ignore first scale out from 0
|
||||
@@ -146,36 +147,18 @@ func (r *Handle) scaleWorkers(desired int) {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Handle) requestExternalConfirmation() {
|
||||
// if there's no external confirmation available
|
||||
// then we skip this step
|
||||
if len(r.workers) >= r.cacheFs().totalMaxWorkers ||
|
||||
!r.cacheFs().plexConnector.isConnected() {
|
||||
return
|
||||
}
|
||||
go r.cacheFs().plexConnector.isPlayingAsync(r.cachedObject, r.confirmReading)
|
||||
}
|
||||
|
||||
func (r *Handle) confirmExternalReading() {
|
||||
// if we have a max value of workers
|
||||
// or there's no external confirmation available
|
||||
// then we skip this step
|
||||
if len(r.workers) >= r.cacheFs().totalMaxWorkers ||
|
||||
!r.cacheFs().plexConnector.isConnected() {
|
||||
if r.workers > 1 ||
|
||||
!r.cacheFs().plexConnector.isConfigured() {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case confirmed := <-r.confirmReading:
|
||||
if !confirmed {
|
||||
return
|
||||
}
|
||||
default:
|
||||
if !r.cacheFs().plexConnector.isPlaying(r.cachedObject) {
|
||||
return
|
||||
}
|
||||
|
||||
fs.Infof(r, "confirmed reading by external reader")
|
||||
r.scaleWorkers(r.cacheFs().totalMaxWorkers)
|
||||
r.scaleWorkers(r.cacheFs().opt.TotalWorkers)
|
||||
}
|
||||
|
||||
// queueOffset will send an offset to the workers if it's different from the last one
|
||||
@@ -197,8 +180,8 @@ func (r *Handle) queueOffset(offset int64) {
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < len(r.workers); i++ {
|
||||
o := r.preloadOffset + r.cacheFs().chunkSize*int64(i)
|
||||
for i := 0; i < r.workers; i++ {
|
||||
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
|
||||
if o < 0 || o >= r.cachedObject.Size() {
|
||||
continue
|
||||
}
|
||||
@@ -209,21 +192,9 @@ func (r *Handle) queueOffset(offset int64) {
|
||||
r.seenOffsets[o] = true
|
||||
r.preloadQueue <- o
|
||||
}
|
||||
|
||||
r.requestExternalConfirmation()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Handle) hasAtLeastOneWorker() bool {
|
||||
oneWorker := false
|
||||
for i := 0; i < len(r.workers); i++ {
|
||||
if r.workers[i].isRunning() {
|
||||
oneWorker = true
|
||||
}
|
||||
}
|
||||
return oneWorker
|
||||
}
|
||||
|
||||
// getChunk is called by the FS to retrieve a specific chunk of known start and size from where it can find it
|
||||
// it can be from transient or persistent cache
|
||||
// it will also build the chunk from the cache's specific chunk boundaries and build the final desired chunk in a buffer
|
||||
@@ -232,7 +203,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
var err error
|
||||
|
||||
// we calculate the modulus of the requested offset with the size of a chunk
|
||||
offset := chunkStart % r.cacheFs().chunkSize
|
||||
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
|
||||
|
||||
// we align the start offset of the first chunk to a likely chunk in the storage
|
||||
chunkStart = chunkStart - offset
|
||||
@@ -249,7 +220,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
if !found {
|
||||
// we're gonna give the workers a chance to pickup the chunk
|
||||
// and retry a couple of times
|
||||
for i := 0; i < r.cacheFs().readRetries*8; i++ {
|
||||
for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ {
|
||||
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
|
||||
if err == nil {
|
||||
found = true
|
||||
@@ -264,7 +235,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
// not found in ram or
|
||||
// the worker didn't managed to download the chunk in time so we abort and close the stream
|
||||
if err != nil || len(data) == 0 || !found {
|
||||
if !r.hasAtLeastOneWorker() {
|
||||
if r.workers == 0 {
|
||||
fs.Errorf(r, "out of workers")
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
@@ -274,9 +245,9 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
|
||||
// first chunk will be aligned with the start
|
||||
if offset > 0 {
|
||||
if offset >= int64(len(data)) {
|
||||
if offset > int64(len(data)) {
|
||||
fs.Errorf(r, "unexpected conditions during reading. current position: %v, current chunk position: %v, current chunk size: %v, offset: %v, chunk size: %v, file size: %v",
|
||||
r.offset, chunkStart, len(data), offset, r.cacheFs().chunkSize, r.cachedObject.Size())
|
||||
r.offset, chunkStart, len(data), offset, r.cacheFs().opt.ChunkSize, r.cachedObject.Size())
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
data = data[int(offset):]
|
||||
@@ -294,7 +265,6 @@ func (r *Handle) Read(p []byte) (n int, err error) {
|
||||
// first reading
|
||||
if !r.reading {
|
||||
r.reading = true
|
||||
r.requestExternalConfirmation()
|
||||
}
|
||||
// reached EOF
|
||||
if r.offset >= r.cachedObject.Size() {
|
||||
@@ -302,8 +272,10 @@ func (r *Handle) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
currentOffset := r.offset
|
||||
buf, err = r.getChunk(currentOffset)
|
||||
if err != nil && len(buf) == 0 {
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
fs.Errorf(r, "(%v/%v) error (%v) response", currentOffset, r.cachedObject.Size(), err)
|
||||
}
|
||||
if len(buf) == 0 && err != io.ErrUnexpectedEOF {
|
||||
return 0, io.EOF
|
||||
}
|
||||
readSize := copy(p, buf)
|
||||
@@ -324,14 +296,8 @@ func (r *Handle) Close() error {
|
||||
close(r.preloadQueue)
|
||||
r.closed = true
|
||||
// wait for workers to complete their jobs before returning
|
||||
waitCount := 3
|
||||
for i := 0; i < len(r.workers); i++ {
|
||||
waitIdx := 0
|
||||
for r.workers[i].isRunning() && waitIdx < waitCount {
|
||||
time.Sleep(time.Second)
|
||||
waitIdx++
|
||||
}
|
||||
}
|
||||
r.workersWg.Wait()
|
||||
r.memory.db.Flush()
|
||||
|
||||
fs.Debugf(r, "cache reader closed %v", r.offset)
|
||||
return nil
|
||||
@@ -344,22 +310,22 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
||||
|
||||
var err error
|
||||
switch whence {
|
||||
case os.SEEK_SET:
|
||||
case io.SeekStart:
|
||||
fs.Debugf(r, "moving offset set from %v to %v", r.offset, offset)
|
||||
r.offset = offset
|
||||
case os.SEEK_CUR:
|
||||
case io.SeekCurrent:
|
||||
fs.Debugf(r, "moving offset cur from %v to %v", r.offset, r.offset+offset)
|
||||
r.offset += offset
|
||||
case os.SEEK_END:
|
||||
case io.SeekEnd:
|
||||
fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset)
|
||||
r.offset = r.cachedObject.Size() + offset
|
||||
default:
|
||||
err = errors.Errorf("cache: unimplemented seek whence %v", whence)
|
||||
}
|
||||
|
||||
chunkStart := r.offset - (r.offset % r.cacheFs().chunkSize)
|
||||
if chunkStart >= r.cacheFs().chunkSize {
|
||||
chunkStart = chunkStart - r.cacheFs().chunkSize
|
||||
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
||||
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
|
||||
chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
|
||||
}
|
||||
r.queueOffset(chunkStart)
|
||||
|
||||
@@ -367,12 +333,9 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
||||
}
|
||||
|
||||
type worker struct {
|
||||
r *Handle
|
||||
ch <-chan int64
|
||||
rc io.ReadCloser
|
||||
id int
|
||||
running bool
|
||||
mu sync.Mutex
|
||||
r *Handle
|
||||
rc io.ReadCloser
|
||||
id int
|
||||
}
|
||||
|
||||
// String is a representation of this worker
|
||||
@@ -399,10 +362,10 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
|
||||
|
||||
if !closeOpen {
|
||||
if do, ok := r.(fs.RangeSeeker); ok {
|
||||
_, err = do.RangeSeek(offset, os.SEEK_SET, end-offset)
|
||||
_, err = do.RangeSeek(offset, io.SeekStart, end-offset)
|
||||
return r, err
|
||||
} else if do, ok := r.(io.Seeker); ok {
|
||||
_, err = do.Seek(offset, os.SEEK_SET)
|
||||
_, err = do.Seek(offset, io.SeekStart)
|
||||
return r, err
|
||||
}
|
||||
}
|
||||
@@ -417,33 +380,19 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
|
||||
})
|
||||
}
|
||||
|
||||
func (w *worker) isRunning() bool {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
return w.running
|
||||
}
|
||||
|
||||
func (w *worker) setRunning(f bool) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
w.running = f
|
||||
}
|
||||
|
||||
// run is the main loop for the worker which receives offsets to preload
|
||||
func (w *worker) run() {
|
||||
var err error
|
||||
var data []byte
|
||||
defer w.setRunning(false)
|
||||
defer func() {
|
||||
if w.rc != nil {
|
||||
_ = w.rc.Close()
|
||||
w.setRunning(false)
|
||||
}
|
||||
w.r.workersWg.Done()
|
||||
}()
|
||||
|
||||
for {
|
||||
chunkStart, open := <-w.ch
|
||||
w.setRunning(true)
|
||||
chunkStart, open := <-w.r.preloadQueue
|
||||
if chunkStart < 0 || !open {
|
||||
break
|
||||
}
|
||||
@@ -464,14 +413,13 @@ func (w *worker) run() {
|
||||
continue
|
||||
}
|
||||
}
|
||||
err = nil
|
||||
} else {
|
||||
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
chunkEnd := chunkStart + w.r.cacheFs().chunkSize
|
||||
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
|
||||
// TODO: Remove this comment if it proves to be reliable for #1896
|
||||
//if chunkEnd > w.r.cachedObject.Size() {
|
||||
// chunkEnd = w.r.cachedObject.Size()
|
||||
@@ -486,7 +434,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
|
||||
var data []byte
|
||||
|
||||
// stop retries
|
||||
if retry >= w.r.cacheFs().readRetries {
|
||||
if retry >= w.r.cacheFs().opt.ReadRetries {
|
||||
return
|
||||
}
|
||||
// back-off between retries
|
||||
@@ -511,7 +459,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
|
||||
}
|
||||
|
||||
data = make([]byte, chunkEnd-chunkStart)
|
||||
sourceRead := 0
|
||||
var sourceRead int
|
||||
sourceRead, err = io.ReadFull(w.rc, data)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
|
||||
@@ -632,7 +580,7 @@ func (b *backgroundWriter) run() {
|
||||
return
|
||||
}
|
||||
|
||||
absPath, err := b.fs.cache.getPendingUpload(b.fs.Root(), b.fs.tempWriteWait)
|
||||
absPath, err := b.fs.cache.getPendingUpload(b.fs.Root(), time.Duration(b.fs.opt.TempWaitTime))
|
||||
if err != nil || absPath == "" || !b.fs.isRootInPath(absPath) {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
@@ -648,6 +596,23 @@ func (b *backgroundWriter) run() {
|
||||
fs.Errorf(remote, "background upload: %v", err)
|
||||
continue
|
||||
}
|
||||
// clean empty dirs up to root
|
||||
thisDir := cleanPath(path.Dir(remote))
|
||||
for thisDir != "" {
|
||||
thisList, err := b.fs.tempFs.List(thisDir)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if len(thisList) > 0 {
|
||||
break
|
||||
}
|
||||
err = b.fs.tempFs.Rmdir(thisDir)
|
||||
fs.Debugf(thisDir, "cleaned from temp path")
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
thisDir = cleanPath(path.Dir(thisDir))
|
||||
}
|
||||
fs.Infof(remote, "background upload: uploaded entry")
|
||||
err = b.fs.cache.removePendingUpload(absPath)
|
||||
if err != nil && !strings.Contains(err.Error(), "pending upload not found") {
|
||||
|
||||
36
backend/cache/object.go
vendored
36
backend/cache/object.go
vendored
@@ -1,10 +1,9 @@
|
||||
// +build !plan9,go1.7
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -45,7 +44,7 @@ func NewObject(f *Fs, remote string) *Object {
|
||||
|
||||
cacheType := objectInCache
|
||||
parentFs := f.UnWrap()
|
||||
if f.tempWritePath != "" {
|
||||
if f.opt.TempWritePath != "" {
|
||||
_, err := f.cache.SearchPendingUpload(fullRemote)
|
||||
if err == nil { // queued for upload
|
||||
cacheType = objectPendingUpload
|
||||
@@ -76,7 +75,7 @@ func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
|
||||
|
||||
cacheType := objectInCache
|
||||
parentFs := f.UnWrap()
|
||||
if f.tempWritePath != "" {
|
||||
if f.opt.TempWritePath != "" {
|
||||
_, err := f.cache.SearchPendingUpload(fullRemote)
|
||||
if err == nil { // queued for upload
|
||||
cacheType = objectPendingUpload
|
||||
@@ -154,7 +153,7 @@ func (o *Object) Storable() bool {
|
||||
// 2. is not pending a notification from the wrapped fs
|
||||
func (o *Object) refresh() error {
|
||||
isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
|
||||
isExpired := time.Now().After(o.CacheTs.Add(o.CacheFs.fileAge))
|
||||
isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge)))
|
||||
if !isExpired && !isNotified {
|
||||
return nil
|
||||
}
|
||||
@@ -209,11 +208,17 @@ func (o *Object) SetModTime(t time.Time) error {
|
||||
|
||||
// Open is used to request a specific part of the file using fs.RangeOption
|
||||
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
if err := o.refreshFromSource(true); err != nil {
|
||||
var err error
|
||||
|
||||
if o.Object == nil {
|
||||
err = o.refreshFromSource(true)
|
||||
} else {
|
||||
err = o.refresh()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var err error
|
||||
cacheReader := NewObjectHandle(o, o.CacheFs)
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
@@ -223,7 +228,7 @@ func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.Size())
|
||||
}
|
||||
_, err = cacheReader.Seek(offset, os.SEEK_SET)
|
||||
_, err = cacheReader.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -238,7 +243,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return err
|
||||
}
|
||||
// pause background uploads if active
|
||||
if o.CacheFs.tempWritePath != "" {
|
||||
if o.CacheFs.opt.TempWritePath != "" {
|
||||
o.CacheFs.backgroundRunner.pause()
|
||||
defer o.CacheFs.backgroundRunner.play()
|
||||
// don't allow started uploads
|
||||
@@ -257,6 +262,8 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
|
||||
// deleting cached chunks and info to be replaced with new ones
|
||||
_ = o.CacheFs.cache.RemoveObject(o.abs())
|
||||
// advertise to ChangeNotify if wrapped doesn't do that
|
||||
o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject)
|
||||
|
||||
o.CacheModTime = src.ModTime().UnixNano()
|
||||
o.CacheSize = src.Size()
|
||||
@@ -273,7 +280,7 @@ func (o *Object) Remove() error {
|
||||
return err
|
||||
}
|
||||
// pause background uploads if active
|
||||
if o.CacheFs.tempWritePath != "" {
|
||||
if o.CacheFs.opt.TempWritePath != "" {
|
||||
o.CacheFs.backgroundRunner.pause()
|
||||
defer o.CacheFs.backgroundRunner.play()
|
||||
// don't allow started uploads
|
||||
@@ -352,6 +359,13 @@ func (o *Object) tempFileStartedUpload() bool {
|
||||
return started
|
||||
}
|
||||
|
||||
// UnWrap returns the Object that this Object is wrapping or
|
||||
// nil if it isn't wrapping anything
|
||||
func (o *Object) UnWrap() fs.Object {
|
||||
return o.Object
|
||||
}
|
||||
|
||||
var (
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
)
|
||||
|
||||
232
backend/cache/plex.go
vendored
232
backend/cache/plex.go
vendored
@@ -1,8 +1,9 @@
|
||||
// +build !plan9,go1.7
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
@@ -16,58 +17,172 @@ import (
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/patrickmn/go-cache"
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
const (
|
||||
// defPlexLoginURL is the default URL for Plex login
|
||||
defPlexLoginURL = "https://plex.tv/users/sign_in.json"
|
||||
defPlexLoginURL = "https://plex.tv/users/sign_in.json"
|
||||
defPlexNotificationURL = "%s/:/websockets/notifications?X-Plex-Token=%s"
|
||||
)
|
||||
|
||||
// PlaySessionStateNotification is part of the API response of Plex
|
||||
type PlaySessionStateNotification struct {
|
||||
SessionKey string `json:"sessionKey"`
|
||||
GUID string `json:"guid"`
|
||||
Key string `json:"key"`
|
||||
ViewOffset int64 `json:"viewOffset"`
|
||||
State string `json:"state"`
|
||||
TranscodeSession string `json:"transcodeSession"`
|
||||
}
|
||||
|
||||
// NotificationContainer is part of the API response of Plex
|
||||
type NotificationContainer struct {
|
||||
Type string `json:"type"`
|
||||
Size int `json:"size"`
|
||||
PlaySessionState []PlaySessionStateNotification `json:"PlaySessionStateNotification"`
|
||||
}
|
||||
|
||||
// PlexNotification is part of the API response of Plex
|
||||
type PlexNotification struct {
|
||||
Container NotificationContainer `json:"NotificationContainer"`
|
||||
}
|
||||
|
||||
// plexConnector is managing the cache integration with Plex
|
||||
type plexConnector struct {
|
||||
url *url.URL
|
||||
username string
|
||||
password string
|
||||
token string
|
||||
f *Fs
|
||||
mu sync.Mutex
|
||||
url *url.URL
|
||||
username string
|
||||
password string
|
||||
token string
|
||||
insecure bool
|
||||
f *Fs
|
||||
mu sync.Mutex
|
||||
running bool
|
||||
runningMu sync.Mutex
|
||||
stateCache *cache.Cache
|
||||
saveToken func(string)
|
||||
}
|
||||
|
||||
// newPlexConnector connects to a Plex server and generates a token
|
||||
func newPlexConnector(f *Fs, plexURL, username, password string) (*plexConnector, error) {
|
||||
func newPlexConnector(f *Fs, plexURL, username, password string, insecure bool, saveToken func(string)) (*plexConnector, error) {
|
||||
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pc := &plexConnector{
|
||||
f: f,
|
||||
url: u,
|
||||
username: username,
|
||||
password: password,
|
||||
token: "",
|
||||
f: f,
|
||||
url: u,
|
||||
username: username,
|
||||
password: password,
|
||||
token: "",
|
||||
insecure: insecure,
|
||||
stateCache: cache.New(time.Hour, time.Minute),
|
||||
saveToken: saveToken,
|
||||
}
|
||||
|
||||
return pc, nil
|
||||
}
|
||||
|
||||
// newPlexConnector connects to a Plex server and generates a token
|
||||
func newPlexConnectorWithToken(f *Fs, plexURL, token string) (*plexConnector, error) {
|
||||
func newPlexConnectorWithToken(f *Fs, plexURL, token string, insecure bool) (*plexConnector, error) {
|
||||
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pc := &plexConnector{
|
||||
f: f,
|
||||
url: u,
|
||||
token: token,
|
||||
f: f,
|
||||
url: u,
|
||||
token: token,
|
||||
insecure: insecure,
|
||||
stateCache: cache.New(time.Hour, time.Minute),
|
||||
}
|
||||
pc.listenWebsocket()
|
||||
|
||||
return pc, nil
|
||||
}
|
||||
|
||||
func (p *plexConnector) closeWebsocket() {
|
||||
p.runningMu.Lock()
|
||||
defer p.runningMu.Unlock()
|
||||
fs.Infof("plex", "stopped Plex watcher")
|
||||
p.running = false
|
||||
}
|
||||
|
||||
func (p *plexConnector) websocketDial() (*websocket.Conn, error) {
|
||||
u := strings.TrimRight(strings.Replace(strings.Replace(
|
||||
p.url.String(), "http://", "ws://", 1), "https://", "wss://", 1), "/")
|
||||
url := fmt.Sprintf(defPlexNotificationURL, u, p.token)
|
||||
|
||||
config, err := websocket.NewConfig(url, "http://localhost")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.insecure {
|
||||
config.TlsConfig = &tls.Config{InsecureSkipVerify: true}
|
||||
}
|
||||
return websocket.DialConfig(config)
|
||||
}
|
||||
|
||||
func (p *plexConnector) listenWebsocket() {
|
||||
p.runningMu.Lock()
|
||||
defer p.runningMu.Unlock()
|
||||
|
||||
conn, err := p.websocketDial()
|
||||
if err != nil {
|
||||
fs.Errorf("plex", "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
p.running = true
|
||||
go func() {
|
||||
for {
|
||||
if !p.isConnected() {
|
||||
break
|
||||
}
|
||||
|
||||
notif := &PlexNotification{}
|
||||
err := websocket.JSON.Receive(conn, notif)
|
||||
if err != nil {
|
||||
fs.Debugf("plex", "%v", err)
|
||||
p.closeWebsocket()
|
||||
break
|
||||
}
|
||||
// we're only interested in play events
|
||||
if notif.Container.Type == "playing" {
|
||||
// we loop through each of them
|
||||
for _, v := range notif.Container.PlaySessionState {
|
||||
// event type of playing
|
||||
if v.State == "playing" {
|
||||
// if it's not cached get the details and cache them
|
||||
if _, found := p.stateCache.Get(v.Key); !found {
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("%s%s", p.url.String(), v.Key), nil)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
p.fillDefaultHeaders(req)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var data []byte
|
||||
data, err = ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
p.stateCache.Set(v.Key, data, cache.DefaultExpiration)
|
||||
}
|
||||
} else if v.State == "stopped" {
|
||||
p.stateCache.Delete(v.Key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// fillDefaultHeaders will add common headers to requests
|
||||
func (p *plexConnector) fillDefaultHeaders(req *http.Request) {
|
||||
req.Header.Add("X-Plex-Client-Identifier", fmt.Sprintf("rclone (%v)", p.f.String()))
|
||||
@@ -111,17 +226,21 @@ func (p *plexConnector) authenticate() error {
|
||||
}
|
||||
p.token = token
|
||||
if p.token != "" {
|
||||
config.FileSet(p.f.Name(), "plex_token", p.token)
|
||||
config.SaveConfig()
|
||||
if p.saveToken != nil {
|
||||
p.saveToken(p.token)
|
||||
}
|
||||
fs.Infof(p.f.Name(), "Connected to Plex server: %v", p.url.String())
|
||||
}
|
||||
p.listenWebsocket()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isConnected checks if this rclone is authenticated to Plex
|
||||
func (p *plexConnector) isConnected() bool {
|
||||
return p.token != ""
|
||||
p.runningMu.Lock()
|
||||
defer p.runningMu.Unlock()
|
||||
return p.running
|
||||
}
|
||||
|
||||
// isConfigured checks if this rclone is configured to use a Plex server
|
||||
@@ -131,6 +250,9 @@ func (p *plexConnector) isConfigured() bool {
|
||||
|
||||
func (p *plexConnector) isPlaying(co *Object) bool {
|
||||
var err error
|
||||
if !p.isConnected() {
|
||||
p.listenWebsocket()
|
||||
}
|
||||
|
||||
remote := co.Remote()
|
||||
if cr, yes := p.f.isWrappedByCrypt(); yes {
|
||||
@@ -142,64 +264,8 @@ func (p *plexConnector) isPlaying(co *Object) bool {
|
||||
}
|
||||
|
||||
isPlaying := false
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("%s/status/sessions", p.url.String()), nil)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
p.fillDefaultHeaders(req)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
var data map[string]interface{}
|
||||
err = json.NewDecoder(resp.Body).Decode(&data)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
sizeGen, ok := get(data, "MediaContainer", "size")
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
size, ok := sizeGen.(float64)
|
||||
if !ok || size < float64(1) {
|
||||
return false
|
||||
}
|
||||
videosGen, ok := get(data, "MediaContainer", "Video")
|
||||
if !ok {
|
||||
fs.Debugf("plex", "empty videos: %v", data)
|
||||
return false
|
||||
}
|
||||
videos, ok := videosGen.([]interface{})
|
||||
if !ok || len(videos) < 1 {
|
||||
fs.Debugf("plex", "empty videos: %v", data)
|
||||
return false
|
||||
}
|
||||
for _, v := range videos {
|
||||
keyGen, ok := get(v, "key")
|
||||
if !ok {
|
||||
fs.Debugf("plex", "failed to find: key")
|
||||
continue
|
||||
}
|
||||
key, ok := keyGen.(string)
|
||||
if !ok {
|
||||
fs.Debugf("plex", "failed to understand: key")
|
||||
continue
|
||||
}
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("%s%s", p.url.String(), key), nil)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
p.fillDefaultHeaders(req)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
var data []byte
|
||||
data, err = ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if bytes.Contains(data, []byte(remote)) {
|
||||
for _, v := range p.stateCache.Items() {
|
||||
if bytes.Contains(v.Object.([]byte), []byte(remote)) {
|
||||
isPlaying = true
|
||||
break
|
||||
}
|
||||
@@ -208,12 +274,6 @@ func (p *plexConnector) isPlaying(co *Object) bool {
|
||||
return isPlaying
|
||||
}
|
||||
|
||||
func (p *plexConnector) isPlayingAsync(co *Object, response chan bool) {
|
||||
time.Sleep(time.Second) // FIXME random guess here
|
||||
res := p.isPlaying(co)
|
||||
response <- res
|
||||
}
|
||||
|
||||
// adapted from: https://stackoverflow.com/a/28878037 (credit)
|
||||
func get(m interface{}, path ...interface{}) (interface{}, bool) {
|
||||
for _, p := range path {
|
||||
|
||||
2
backend/cache/storage_memory.go
vendored
2
backend/cache/storage_memory.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,go1.7
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
71
backend/cache/storage_persistent.go
vendored
71
backend/cache/storage_persistent.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,go1.7
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
@@ -34,7 +34,8 @@ const (
|
||||
|
||||
// Features flags for this storage type
|
||||
type Features struct {
|
||||
PurgeDb bool // purge the db before starting
|
||||
PurgeDb bool // purge the db before starting
|
||||
DbWaitTime time.Duration // time to wait for DB to be available
|
||||
}
|
||||
|
||||
var boltMap = make(map[string]*Persistent)
|
||||
@@ -122,7 +123,7 @@ func (b *Persistent) connect() error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath)
|
||||
}
|
||||
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: *cacheDbWaitTime})
|
||||
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath)
|
||||
}
|
||||
@@ -192,19 +193,46 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
|
||||
|
||||
// AddDir will update a CachedDirectory metadata and all its entries
|
||||
func (b *Persistent) AddDir(cachedDir *Directory) error {
|
||||
return b.AddBatchDir([]*Directory{cachedDir})
|
||||
}
|
||||
|
||||
// AddBatchDir will update a list of CachedDirectory metadata and all their entries
|
||||
func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
|
||||
if len(cachedDirs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(cachedDir.abs(), true, tx)
|
||||
var bucket *bolt.Bucket
|
||||
if cachedDirs[0].Dir == "" {
|
||||
bucket = tx.Bucket([]byte(RootBucket))
|
||||
} else {
|
||||
bucket = b.getBucket(cachedDirs[0].Dir, true, tx)
|
||||
}
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't open bucket (%v)", cachedDir)
|
||||
return errors.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
|
||||
}
|
||||
|
||||
encoded, err := json.Marshal(cachedDir)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
|
||||
}
|
||||
err = bucket.Put([]byte("."), encoded)
|
||||
if err != nil {
|
||||
return err
|
||||
for _, cachedDir := range cachedDirs {
|
||||
var b *bolt.Bucket
|
||||
var err error
|
||||
if cachedDir.Name == "" {
|
||||
b = bucket
|
||||
} else {
|
||||
b, err = bucket.CreateBucketIfNotExists([]byte(cachedDir.Name))
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
encoded, err := json.Marshal(cachedDir)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
|
||||
}
|
||||
err = b.Put([]byte("."), encoded)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@@ -315,7 +343,7 @@ func (b *Persistent) RemoveDir(fp string) error {
|
||||
// ExpireDir will flush a CachedDirectory and all its objects from the objects
|
||||
// chunks will remain as they are
|
||||
func (b *Persistent) ExpireDir(cd *Directory) error {
|
||||
t := time.Now().Add(cd.CacheFs.fileAge * -1)
|
||||
t := time.Now().Add(time.Duration(-cd.CacheFs.opt.InfoAge))
|
||||
cd.CacheTs = &t
|
||||
|
||||
// expire all parents
|
||||
@@ -400,6 +428,16 @@ func (b *Persistent) RemoveObject(fp string) error {
|
||||
})
|
||||
}
|
||||
|
||||
// ExpireObject will flush an Object and all its data if desired
|
||||
func (b *Persistent) ExpireObject(co *Object, withData bool) error {
|
||||
co.CacheTs = time.Now().Add(time.Duration(-co.CacheFs.opt.InfoAge))
|
||||
err := b.AddObject(co)
|
||||
if withData {
|
||||
_ = os.RemoveAll(path.Join(b.dataPath, co.abs()))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// HasEntry confirms the existence of a single entry (dir or object)
|
||||
func (b *Persistent) HasEntry(remote string) bool {
|
||||
dir, name := path.Split(remote)
|
||||
@@ -1060,10 +1098,3 @@ func itob(v int64) []byte {
|
||||
func btoi(d []byte) int64 {
|
||||
return int64(binary.BigEndian.Uint64(d))
|
||||
}
|
||||
|
||||
// cloneBytes returns a copy of a given slice.
|
||||
func cloneBytes(v []byte) []byte {
|
||||
var clone = make([]byte, len(v))
|
||||
copy(clone, v)
|
||||
return clone
|
||||
}
|
||||
|
||||
@@ -781,7 +781,7 @@ func (c *cipher) newDecrypterSeek(open OpenRangeSeek, offset, limit int64) (fh *
|
||||
}
|
||||
fh.open = open // will be called by fh.RangeSeek
|
||||
if doRangeSeek {
|
||||
_, err = fh.RangeSeek(offset, 0, limit)
|
||||
_, err = fh.RangeSeek(offset, io.SeekStart, limit)
|
||||
if err != nil {
|
||||
_ = fh.Close()
|
||||
return nil, err
|
||||
@@ -908,7 +908,7 @@ func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, er
|
||||
if fh.open == nil {
|
||||
return 0, fh.finish(errors.New("can't seek - not initialised with newDecrypterSeek"))
|
||||
}
|
||||
if whence != 0 {
|
||||
if whence != io.SeekStart {
|
||||
return 0, fh.finish(errors.New("can only seek from the start"))
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestNewNameEncryptionMode(t *testing.T) {
|
||||
{"off", NameEncryptionOff, ""},
|
||||
{"standard", NameEncryptionStandard, ""},
|
||||
{"obfuscate", NameEncryptionObfuscated, ""},
|
||||
{"potato", NameEncryptionMode(0), "Unknown file name encryption mode \"potato\""},
|
||||
{"potato", NameEncryptionOff, "Unknown file name encryption mode \"potato\""},
|
||||
} {
|
||||
actual, actualErr := NewNameEncryptionMode(test.in)
|
||||
assert.Equal(t, actual, test.expected)
|
||||
@@ -1016,7 +1016,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
if offset+limit > len(plaintext) {
|
||||
continue
|
||||
}
|
||||
_, err := fh.RangeSeek(int64(offset), 0, int64(limit))
|
||||
_, err := fh.RangeSeek(int64(offset), io.SeekStart, int64(limit))
|
||||
assert.NoError(t, err)
|
||||
|
||||
check(fh, offset, limit)
|
||||
@@ -1083,7 +1083,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
}
|
||||
fh, err := c.DecryptDataSeek(testOpen, 0, -1)
|
||||
assert.NoError(t, err)
|
||||
gotOffset, err := fh.RangeSeek(test.offset, 0, test.limit)
|
||||
gotOffset, err := fh.RangeSeek(test.offset, io.SeekStart, test.limit)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, gotOffset, test.offset)
|
||||
}
|
||||
|
||||
@@ -5,24 +5,19 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Flags
|
||||
cryptShowMapping = flags.BoolP("crypt-show-mapping", "", false, "For all files listed show how the names encrypt.")
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -30,11 +25,13 @@ func init() {
|
||||
Description: "Encrypt/Decrypt a remote",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Name: "remote",
|
||||
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "filename_encryption",
|
||||
Help: "How to encrypt the filenames.",
|
||||
Name: "filename_encryption",
|
||||
Help: "How to encrypt the filenames.",
|
||||
Default: "standard",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "off",
|
||||
@@ -48,8 +45,9 @@ func init() {
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "directory_name_encryption",
|
||||
Help: "Option to either encrypt directory names or leave them intact.",
|
||||
Name: "directory_name_encryption",
|
||||
Help: "Option to either encrypt directory names or leave them intact.",
|
||||
Default: true,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "true",
|
||||
@@ -68,68 +66,90 @@ func init() {
|
||||
Name: "password2",
|
||||
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
|
||||
IsPassword: true,
|
||||
Optional: true,
|
||||
}, {
|
||||
Name: "show_mapping",
|
||||
Help: "For all files listed show how the names encrypt.",
|
||||
Default: false,
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// NewCipher constructs a Cipher for the given config name
|
||||
func NewCipher(name string) (Cipher, error) {
|
||||
mode, err := NewNameEncryptionMode(config.FileGet(name, "filename_encryption", "standard"))
|
||||
// newCipherForConfig constructs a Cipher for the given config name
|
||||
func newCipherForConfig(opt *Options) (Cipher, error) {
|
||||
mode, err := NewNameEncryptionMode(opt.FilenameEncryption)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dirNameEncrypt, err := strconv.ParseBool(config.FileGet(name, "directory_name_encryption", "true"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
password := config.FileGet(name, "password", "")
|
||||
if password == "" {
|
||||
if opt.Password == "" {
|
||||
return nil, errors.New("password not set in config file")
|
||||
}
|
||||
password, err = obscure.Reveal(password)
|
||||
password, err := obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decrypt password")
|
||||
}
|
||||
salt := config.FileGet(name, "password2", "")
|
||||
if salt != "" {
|
||||
salt, err = obscure.Reveal(salt)
|
||||
var salt string
|
||||
if opt.Password2 != "" {
|
||||
salt, err = obscure.Reveal(opt.Password2)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decrypt password2")
|
||||
}
|
||||
}
|
||||
cipher, err := newCipher(mode, password, salt, dirNameEncrypt)
|
||||
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make cipher")
|
||||
}
|
||||
return cipher, nil
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, rpath string) (fs.Fs, error) {
|
||||
cipher, err := NewCipher(name)
|
||||
// NewCipher constructs a Cipher for the given config
|
||||
func NewCipher(m configmap.Mapper) (Cipher, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remote := config.FileGet(name, "remote")
|
||||
return newCipherForConfig(opt)
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cipher, err := newCipherForConfig(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remote := opt.Remote
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
||||
}
|
||||
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
||||
}
|
||||
// Look for a file first
|
||||
remotePath := path.Join(remote, cipher.EncryptFileName(rpath))
|
||||
wrappedFs, err := fs.NewFs(remotePath)
|
||||
remotePath := path.Join(wPath, cipher.EncryptFileName(rpath))
|
||||
wrappedFs, err := wInfo.NewFs(wName, remotePath, wConfig)
|
||||
// if that didn't produce a file, look for a directory
|
||||
if err != fs.ErrorIsFile {
|
||||
remotePath = path.Join(remote, cipher.EncryptDirName(rpath))
|
||||
wrappedFs, err = fs.NewFs(remotePath)
|
||||
remotePath = path.Join(wPath, cipher.EncryptDirName(rpath))
|
||||
wrappedFs, err = wInfo.NewFs(wName, remotePath, wConfig)
|
||||
}
|
||||
if err != fs.ErrorIsFile && err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remotePath)
|
||||
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
|
||||
}
|
||||
f := &Fs{
|
||||
Fs: wrappedFs,
|
||||
name: name,
|
||||
root: rpath,
|
||||
opt: *opt,
|
||||
cipher: cipher,
|
||||
}
|
||||
// the features here are ones we could support, and they are
|
||||
@@ -161,14 +181,24 @@ func NewFs(name, rpath string) (fs.Fs, error) {
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
FilenameEncryption string `config:"filename_encryption"`
|
||||
DirectoryNameEncryption bool `config:"directory_name_encryption"`
|
||||
Password string `config:"password"`
|
||||
Password2 string `config:"password2"`
|
||||
ShowMapping bool `config:"show_mapping"`
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
type Fs struct {
|
||||
fs.Fs
|
||||
name string
|
||||
root string
|
||||
opt Options
|
||||
features *fs.Features // optional features
|
||||
cipher Cipher
|
||||
mode NameEncryptionMode
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
@@ -199,7 +229,7 @@ func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
|
||||
fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
|
||||
return
|
||||
}
|
||||
if *cryptShowMapping {
|
||||
if f.opt.ShowMapping {
|
||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||
}
|
||||
*entries = append(*entries, f.newObject(obj))
|
||||
@@ -213,7 +243,7 @@ func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) {
|
||||
fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
|
||||
return
|
||||
}
|
||||
if *cryptShowMapping {
|
||||
if f.opt.ShowMapping {
|
||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||
}
|
||||
*entries = append(*entries, f.newDir(dir))
|
||||
@@ -291,14 +321,54 @@ type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.O
|
||||
|
||||
// put implements Put or PutStream
|
||||
func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
// Encrypt the data into wrappedIn
|
||||
wrappedIn, err := f.cipher.EncryptData(in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Find a hash the destination supports to compute a hash of
|
||||
// the encrypted data
|
||||
ht := f.Fs.Hashes().GetOne()
|
||||
var hasher *hash.MultiHasher
|
||||
if ht != hash.None {
|
||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// unwrap the accounting
|
||||
var wrap accounting.WrapFn
|
||||
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
|
||||
// add the hasher
|
||||
wrappedIn = io.TeeReader(wrappedIn, hasher)
|
||||
// wrap the accounting back on
|
||||
wrappedIn = wrap(wrappedIn)
|
||||
}
|
||||
|
||||
// Transfer the data
|
||||
o, err := put(wrappedIn, f.newObjectInfo(src), options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check the hashes of the encrypted data if we were comparing them
|
||||
if ht != hash.None && hasher != nil {
|
||||
srcHash := hasher.Sums()[ht]
|
||||
var dstHash string
|
||||
dstHash, err = o.Hash(ht)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read destination hash")
|
||||
}
|
||||
if srcHash != "" && dstHash != "" && srcHash != dstHash {
|
||||
// remove object
|
||||
err = o.Remove()
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||
}
|
||||
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
|
||||
}
|
||||
}
|
||||
|
||||
return f.newObject(o), nil
|
||||
}
|
||||
|
||||
@@ -452,6 +522,15 @@ func (f *Fs) CleanUp() error {
|
||||
return do()
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
do := f.Fs.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("About not supported")
|
||||
}
|
||||
return do()
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
func (f *Fs) UnWrap() fs.Fs {
|
||||
return f.Fs
|
||||
@@ -627,24 +706,24 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
wrappedIn, err := o.f.cipher.EncryptData(in)
|
||||
if err != nil {
|
||||
return err
|
||||
update := func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return o.Object, o.Object.Update(in, src, options...)
|
||||
}
|
||||
return o.Object.Update(wrappedIn, o.f.newObjectInfo(src))
|
||||
_, err := o.f.put(in, src, options, update)
|
||||
return err
|
||||
}
|
||||
|
||||
// newDir returns a dir with the Name decrypted
|
||||
func (f *Fs) newDir(dir fs.Directory) fs.Directory {
|
||||
new := fs.NewDirCopy(dir)
|
||||
newDir := fs.NewDirCopy(dir)
|
||||
remote := dir.Remote()
|
||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||
if err != nil {
|
||||
fs.Debugf(remote, "Undecryptable dir name: %v", err)
|
||||
} else {
|
||||
new.SetRemote(decryptedRemote)
|
||||
newDir.SetRemote(decryptedRemote)
|
||||
}
|
||||
return new
|
||||
return newDir
|
||||
}
|
||||
|
||||
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
||||
@@ -699,6 +778,7 @@ var (
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
// Test Crypt filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package crypt_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup2(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*crypt.Object)(nil))
|
||||
fstests.RemoteName = "TestCrypt2:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit2(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString2(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName2(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot2(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty2(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound2(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir2(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir2(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty2(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty2(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty2(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound2(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile12(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError2(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile22(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile12(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile22(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile22(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot2(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot2(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir2(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir2(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel22(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel22(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile12(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject2(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and22(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir2(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy2(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove2(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove2(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull2(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision2(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify2(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString2(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs2(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote2(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes2(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime2(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType2(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime2(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize2(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen2(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek2(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange2(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead2(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate2(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable2(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile2(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound2(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove2(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream2(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge2(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal2(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise2(t *testing.T) { fstests.TestFinalise(t) }
|
||||
@@ -1,76 +0,0 @@
|
||||
// Test Crypt filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package crypt_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup3(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*crypt.Object)(nil))
|
||||
fstests.RemoteName = "TestCrypt3:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit3(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString3(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName3(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot3(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty3(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound3(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir3(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir3(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty3(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty3(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty3(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound3(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile13(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError3(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile23(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile13(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile23(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile23(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot3(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot3(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir3(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir3(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel23(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel23(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile13(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject3(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and23(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir3(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy3(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove3(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove3(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull3(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision3(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify3(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString3(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs3(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote3(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes3(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime3(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType3(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime3(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize3(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen3(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek3(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange3(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead3(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate3(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable3(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile3(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound3(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove3(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream3(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge3(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal3(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise3(t *testing.T) { fstests.TestFinalise(t) }
|
||||
@@ -1,34 +0,0 @@
|
||||
package crypt_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// Create the TestCrypt: remote
|
||||
func init() {
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
||||
name := "TestCrypt"
|
||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-crypt-test-off")
|
||||
name2 := name + "2"
|
||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||
name3 := name + "3"
|
||||
fstests.ExtraConfig = []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
{Name: name2, Key: "type", Value: "crypt"},
|
||||
{Name: name2, Key: "remote", Value: tempdir2},
|
||||
{Name: name2, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||
{Name: name2, Key: "filename_encryption", Value: "off"},
|
||||
{Name: name3, Key: "type", Value: "crypt"},
|
||||
{Name: name3, Key: "remote", Value: tempdir3},
|
||||
{Name: name3, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||
{Name: name3, Key: "filename_encryption", Value: "obfuscate"},
|
||||
}
|
||||
fstests.SkipBadWindowsCharacters[name3+":"] = true
|
||||
}
|
||||
@@ -1,76 +1,62 @@
|
||||
// Test Crypt filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package crypt_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*crypt.Object)(nil))
|
||||
fstests.RemoteName = "TestCrypt:"
|
||||
// TestStandard runs integration tests against the remote
|
||||
func TestStandard(t *testing.T) {
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
||||
name := "TestCrypt"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
// TestOff runs integration tests against the remote
|
||||
func TestOff(t *testing.T) {
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-off")
|
||||
name := "TestCrypt2"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||
{Name: name, Key: "filename_encryption", Value: "off"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// TestObfuscate runs integration tests against the remote
|
||||
func TestObfuscate(t *testing.T) {
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||
name := "TestCrypt3"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
||||
},
|
||||
SkipBadWindowsCharacters: true,
|
||||
})
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -53,11 +53,10 @@ const exampleExportFormats = `{
|
||||
]
|
||||
}`
|
||||
|
||||
var exportFormats map[string][]string
|
||||
|
||||
// Load the example export formats into exportFormats for testing
|
||||
func TestInternalLoadExampleExportFormats(t *testing.T) {
|
||||
assert.NoError(t, json.Unmarshal([]byte(exampleExportFormats), &exportFormats))
|
||||
exportFormatsOnce.Do(func() {})
|
||||
assert.NoError(t, json.Unmarshal([]byte(exampleExportFormats), &_exportFormats))
|
||||
}
|
||||
|
||||
func TestInternalParseExtensions(t *testing.T) {
|
||||
@@ -90,8 +89,10 @@ func TestInternalParseExtensions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInternalFindExportFormat(t *testing.T) {
|
||||
item := new(drive.File)
|
||||
item.MimeType = "application/vnd.google-apps.document"
|
||||
item := &drive.File{
|
||||
Name: "file",
|
||||
MimeType: "application/vnd.google-apps.document",
|
||||
}
|
||||
for _, test := range []struct {
|
||||
extensions []string
|
||||
wantExtension string
|
||||
@@ -105,8 +106,14 @@ func TestInternalFindExportFormat(t *testing.T) {
|
||||
} {
|
||||
f := new(Fs)
|
||||
f.extensions = test.extensions
|
||||
gotExtension, gotMimeType := f.findExportFormat("file", exportFormats[item.MimeType])
|
||||
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(item)
|
||||
assert.Equal(t, test.wantExtension, gotExtension)
|
||||
if test.wantExtension != "" {
|
||||
assert.Equal(t, item.Name+"."+gotExtension, gotFilename)
|
||||
} else {
|
||||
assert.Equal(t, "", gotFilename)
|
||||
}
|
||||
assert.Equal(t, test.wantMimeType, gotMimeType)
|
||||
assert.Equal(t, true, gotIsDocument)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,75 +1,17 @@
|
||||
// Test Drive filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package drive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/drive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*drive.Object)(nil))
|
||||
fstests.RemoteName = "TestDrive:"
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDrive:",
|
||||
NilObject: (*drive.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
@@ -30,9 +30,6 @@ import (
|
||||
const (
|
||||
// statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete.
|
||||
statusResumeIncomplete = 308
|
||||
|
||||
// Number of times to try each chunk
|
||||
maxTries = 10
|
||||
)
|
||||
|
||||
// resumableUpload is used by the generated APIs to provide resumable uploads.
|
||||
@@ -61,6 +58,9 @@ func (f *Fs) Upload(in io.Reader, size int64, contentType string, fileID string,
|
||||
if f.isTeamDrive {
|
||||
params.Set("supportsTeamDrives", "true")
|
||||
}
|
||||
if f.opt.KeepRevisionForever {
|
||||
params.Set("keepRevisionForever", "true")
|
||||
}
|
||||
urls := "https://www.googleapis.com/upload/drive/v3/files"
|
||||
method := "POST"
|
||||
if fileID != "" {
|
||||
@@ -159,7 +159,7 @@ func (rx *resumableUpload) transferStatus() (start int64, err error) {
|
||||
|
||||
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
|
||||
func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
|
||||
_, _ = chunk.Seek(0, 0)
|
||||
_, _ = chunk.Seek(0, io.SeekStart)
|
||||
req := rx.makeRequest(start, chunk, chunkSize)
|
||||
res, err := rx.f.client.Do(req)
|
||||
if err != nil {
|
||||
@@ -192,16 +192,16 @@ func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunk
|
||||
}
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
// It retries each chunk maxTries times (with a pause of uploadPause between attempts).
|
||||
// It retries each chunk using the pacer and --low-level-retries
|
||||
func (rx *resumableUpload) Upload() (*drive.File, error) {
|
||||
start := int64(0)
|
||||
var StatusCode int
|
||||
var err error
|
||||
buf := make([]byte, int(chunkSize))
|
||||
buf := make([]byte, int(rx.f.opt.ChunkSize))
|
||||
for start < rx.ContentLength {
|
||||
reqSize := rx.ContentLength - start
|
||||
if reqSize >= int64(chunkSize) {
|
||||
reqSize = int64(chunkSize)
|
||||
if reqSize >= int64(rx.f.opt.ChunkSize) {
|
||||
reqSize = int64(rx.f.opt.ChunkSize)
|
||||
}
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
|
||||
|
||||
|
||||
@@ -1,7 +1,4 @@
|
||||
// Package dropbox provides an interface to Dropbox object storage
|
||||
|
||||
// +build go1.7
|
||||
|
||||
package dropbox
|
||||
|
||||
// FIXME dropbox for business would be quite easy to add
|
||||
@@ -25,7 +22,6 @@ of path_display and all will be well.
|
||||
*/
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
@@ -35,10 +31,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -56,24 +56,6 @@ const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
dropboxConfig = &oauth2.Config{
|
||||
Scopes: []string{},
|
||||
// Endpoint: oauth2.Endpoint{
|
||||
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
||||
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
|
||||
// },
|
||||
Endpoint: dropbox.OAuthEndpoint(""),
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
// A regexp matching path names for files Dropbox ignores
|
||||
// See https://www.dropbox.com/en/help/145 - Ignored files
|
||||
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
|
||||
// Upload chunk size - setting too small makes uploads slow.
|
||||
// Chunks are buffered into memory for retries.
|
||||
//
|
||||
@@ -97,8 +79,26 @@ var (
|
||||
// Choose 48MB which is 91% of Maximum speed. rclone by
|
||||
// default does 4 transfers so this should use 4*48MB = 192MB
|
||||
// by default.
|
||||
uploadChunkSize = fs.SizeSuffix(48 * 1024 * 1024)
|
||||
maxUploadChunkSize = fs.SizeSuffix(150 * 1024 * 1024)
|
||||
defaultChunkSize = 48 * 1024 * 1024
|
||||
maxChunkSize = 150 * 1024 * 1024
|
||||
)
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
dropboxConfig = &oauth2.Config{
|
||||
Scopes: []string{},
|
||||
// Endpoint: oauth2.Endpoint{
|
||||
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
||||
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
|
||||
// },
|
||||
Endpoint: dropbox.OAuthEndpoint(""),
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
// A regexp matching path names for files Dropbox ignores
|
||||
// See https://www.dropbox.com/en/help/145 - Ignored files
|
||||
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -107,32 +107,45 @@ func init() {
|
||||
Name: "dropbox",
|
||||
Description: "Dropbox",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
err := oauthutil.ConfigNoOffline("dropbox", name, dropboxConfig)
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.ConfigNoOffline("dropbox", name, m, dropboxConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Dropbox App Client Id - leave blank normally.",
|
||||
Help: "Dropbox App Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Dropbox App Client Secret - leave blank normally.",
|
||||
Help: "Dropbox App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: fmt.Sprintf("Upload chunk size. Max %v.", fs.SizeSuffix(maxChunkSize)),
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
flags.VarP(&uploadChunkSize, "dropbox-chunk-size", "", fmt.Sprintf("Upload chunk size. Max %v.", maxUploadChunkSize))
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
}
|
||||
|
||||
// Fs represents a remote dropbox server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
features *fs.Features // optional features
|
||||
srv files.Client // the connection to the dropbox server
|
||||
slashRoot string // root with "/" prefix, lowercase
|
||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv files.Client // the connection to the dropbox server
|
||||
sharing sharing.Client // as above, but for generating sharing links
|
||||
users users.Client // as above, but for accessing user information
|
||||
slashRoot string // root with "/" prefix, lowercase
|
||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
ns string // The namespace we are using or "" for none
|
||||
}
|
||||
|
||||
// Object describes a dropbox object
|
||||
@@ -183,15 +196,22 @@ func shouldRetry(err error) (bool, error) {
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
if uploadChunkSize > maxUploadChunkSize {
|
||||
return nil, errors.Errorf("chunk size too big, must be < %v", maxUploadChunkSize)
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ChunkSize > maxChunkSize {
|
||||
return nil, errors.Errorf("chunk size too big, must be < %v", maxChunkSize)
|
||||
}
|
||||
|
||||
// Convert the old token if it exists. The old token was just
|
||||
// just a string, the new one is a JSON blob
|
||||
oldToken := strings.TrimSpace(config.FileGet(name, config.ConfigToken))
|
||||
if oldToken != "" && oldToken[0] != '{' {
|
||||
oldToken, ok := m.Get(config.ConfigToken)
|
||||
oldToken = strings.TrimSpace(oldToken)
|
||||
if ok && oldToken != "" && oldToken[0] != '{' {
|
||||
fs.Infof(name, "Converting token to new format")
|
||||
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
||||
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
||||
@@ -200,22 +220,24 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
}
|
||||
}
|
||||
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, dropboxConfig)
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, dropboxConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure dropbox: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to configure dropbox")
|
||||
}
|
||||
|
||||
config := dropbox.Config{
|
||||
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
||||
Client: oAuthClient, // maybe???
|
||||
}
|
||||
srv := files.New(config)
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
srv: srv,
|
||||
opt: *opt,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
config := dropbox.Config{
|
||||
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
||||
Client: oAuthClient, // maybe???
|
||||
HeaderGenerator: f.headerGenerator,
|
||||
}
|
||||
f.srv = files.New(config)
|
||||
f.sharing = sharing.New(config)
|
||||
f.users = users.New(config)
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
ReadMimeType: true,
|
||||
@@ -223,6 +245,27 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
}).Fill(f)
|
||||
f.setRoot(root)
|
||||
|
||||
// If root starts with / then use the actual root
|
||||
if strings.HasPrefix(root, "/") {
|
||||
var acc *users.FullAccount
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
acc, err = f.users.GetCurrentAccount()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get current account failed")
|
||||
}
|
||||
switch x := acc.RootInfo.(type) {
|
||||
case *common.TeamRootInfo:
|
||||
f.ns = x.RootNamespaceId
|
||||
case *common.UserRootInfo:
|
||||
f.ns = x.RootNamespaceId
|
||||
default:
|
||||
return nil, errors.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo)
|
||||
}
|
||||
fs.Debugf(f, "Using root namespace %q", f.ns)
|
||||
}
|
||||
|
||||
// See if the root is actually an object
|
||||
_, err = f.getFileMetadata(f.slashRoot)
|
||||
if err == nil {
|
||||
@@ -237,14 +280,22 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// headerGenerator for dropbox sdk
|
||||
func (f *Fs) headerGenerator(hostType string, style string, namespace string, route string) map[string]string {
|
||||
if f.ns == "" {
|
||||
return map[string]string{}
|
||||
}
|
||||
return map[string]string{
|
||||
"Dropbox-API-Path-Root": `{".tag": "namespace_id", "namespace_id": "` + f.ns + `"}`,
|
||||
}
|
||||
}
|
||||
|
||||
// Sets root in f
|
||||
func (f *Fs) setRoot(root string) {
|
||||
f.root = strings.Trim(root, "/")
|
||||
lowerCaseRoot := strings.ToLower(f.root)
|
||||
|
||||
f.slashRoot = "/" + lowerCaseRoot
|
||||
f.slashRoot = "/" + f.root
|
||||
f.slashRootSlash = f.slashRoot
|
||||
if lowerCaseRoot != "" {
|
||||
if f.root != "" {
|
||||
f.slashRootSlash += "/"
|
||||
}
|
||||
}
|
||||
@@ -417,21 +468,6 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// A read closer which doesn't close the input
|
||||
type readCloser struct {
|
||||
in io.Reader
|
||||
}
|
||||
|
||||
// Read bytes from the object - see io.Reader
|
||||
func (rc *readCloser) Read(p []byte) (n int, err error) {
|
||||
return rc.in.Read(p)
|
||||
}
|
||||
|
||||
// Dummy close function
|
||||
func (rc *readCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
@@ -640,6 +676,52 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(remote string) (link string, err error) {
|
||||
absPath := "/" + path.Join(f.Root(), remote)
|
||||
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
|
||||
createArg := sharing.CreateSharedLinkWithSettingsArg{
|
||||
Path: absPath,
|
||||
}
|
||||
var linkRes sharing.IsSharedLinkMetadata
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
|
||||
if err != nil && strings.Contains(err.Error(), sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
|
||||
fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
|
||||
listArg := sharing.ListSharedLinksArg{
|
||||
Path: absPath,
|
||||
DirectOnly: true,
|
||||
}
|
||||
var listRes *sharing.ListSharedLinksResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
listRes, err = f.sharing.ListSharedLinks(&listArg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if len(listRes.Links) == 0 {
|
||||
err = errors.New("Dropbox says the sharing link already exists, but list came back empty")
|
||||
return
|
||||
}
|
||||
linkRes = listRes.Links[0]
|
||||
}
|
||||
if err == nil {
|
||||
switch res := linkRes.(type) {
|
||||
case *sharing.FileLinkMetadata:
|
||||
link = res.Url
|
||||
case *sharing.FolderLinkMetadata:
|
||||
link = res.Url
|
||||
default:
|
||||
err = fmt.Errorf("Don't know how to extract link, response has unknown format: %T", res)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
//
|
||||
@@ -683,6 +765,33 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (usage *fs.Usage, err error) {
|
||||
var q *users.SpaceUsage
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
q, err = f.users.GetSpaceUsage()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "about failed")
|
||||
}
|
||||
var total uint64
|
||||
if q.Allocation != nil {
|
||||
if q.Allocation.Individual != nil {
|
||||
total += q.Allocation.Individual.Allocated
|
||||
}
|
||||
if q.Allocation.Team != nil {
|
||||
total += q.Allocation.Team.Allocated
|
||||
}
|
||||
}
|
||||
usage = &fs.Usage{
|
||||
Total: fs.NewUsageValue(int64(total)), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(int64(q.Used)), // bytes in use
|
||||
Free: fs.NewUsageValue(int64(total - q.Used)), // bytes which can be uploaded before reaching the quota
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.Dropbox)
|
||||
@@ -758,20 +867,6 @@ func (o *Object) remotePath() string {
|
||||
return o.fs.slashRootSlash + o.remote
|
||||
}
|
||||
|
||||
// Returns the key for the metadata database for a given path
|
||||
func metadataKey(path string) string {
|
||||
// NB File system is case insensitive
|
||||
path = strings.ToLower(path)
|
||||
hash := md5.New()
|
||||
_, _ = hash.Write([]byte(path))
|
||||
return fmt.Sprintf("%x", hash.Sum(nil))
|
||||
}
|
||||
|
||||
// Returns the key for the metadata database
|
||||
func (o *Object) metadataKey() string {
|
||||
return metadataKey(o.remotePath())
|
||||
}
|
||||
|
||||
// readMetaData gets the info if it hasn't already been fetched
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if !o.modTime.IsZero() {
|
||||
@@ -801,7 +896,7 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
||||
// Dropbox doesn't have a way of doing this so returning this
|
||||
// error will cause the file to be deleted first then
|
||||
// re-uploaded to set the time.
|
||||
return fs.ErrorCantSetModTime
|
||||
return fs.ErrorCantSetModTimeWithoutDelete
|
||||
}
|
||||
|
||||
// Storable returns whether this object is storable
|
||||
@@ -835,7 +930,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an
|
||||
// avoidable request to the Dropbox API that does not carry payload.
|
||||
func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
|
||||
chunkSize := int64(uploadChunkSize)
|
||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||
chunks := 0
|
||||
if size != -1 {
|
||||
chunks = int(size/chunkSize) + 1
|
||||
@@ -859,7 +954,7 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, 0); err != nil {
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, chunk)
|
||||
@@ -895,7 +990,7 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
|
||||
chunk = readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, 0); err != nil {
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
|
||||
@@ -918,7 +1013,7 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
|
||||
chunk = readers.NewRepeatableReaderBuffer(in, buf)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, 0); err != nil {
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
entry, err = o.fs.srv.UploadSessionFinish(args, chunk)
|
||||
@@ -950,7 +1045,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
size := src.Size()
|
||||
var err error
|
||||
var entry *files.FileMetadata
|
||||
if size > int64(uploadChunkSize) || size == -1 {
|
||||
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
|
||||
entry, err = o.uploadChunked(in, commitInfo, size)
|
||||
} else {
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
@@ -975,11 +1070,13 @@ func (o *Object) Remove() (err error) {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -1,78 +1,17 @@
|
||||
// Test Dropbox filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
|
||||
// +build go1.7
|
||||
|
||||
package dropbox_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/dropbox"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*dropbox.Object)(nil))
|
||||
fstests.RemoteName = "TestDropbox:"
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDropbox:",
|
||||
NilObject: (*dropbox.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
// Build for unsupported platforms to stop go complaining about "no
|
||||
// buildable Go source files "
|
||||
|
||||
// +build !go1.7
|
||||
|
||||
package dropbox
|
||||
@@ -4,16 +4,15 @@ package ftp
|
||||
import (
|
||||
"io"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
@@ -30,33 +29,40 @@ func init() {
|
||||
{
|
||||
Name: "host",
|
||||
Help: "FTP host to connect to",
|
||||
Optional: false,
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "ftp.example.com",
|
||||
Help: "Connect to ftp.example.com",
|
||||
}},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
|
||||
Optional: true,
|
||||
Name: "user",
|
||||
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "FTP port, leave blank to use default (21) ",
|
||||
Optional: true,
|
||||
Name: "port",
|
||||
Help: "FTP port, leave blank to use default (21)",
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "FTP password",
|
||||
IsPassword: true,
|
||||
Optional: false,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Port string `config:"port"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
url string
|
||||
user string
|
||||
@@ -161,51 +167,33 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (ff fs.Fs, err error) {
|
||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
||||
// FIXME Convert the old scheme used for the first beta - remove after release
|
||||
if ftpURL := config.FileGet(name, "url"); ftpURL != "" {
|
||||
fs.Infof(name, "Converting old configuration")
|
||||
u, err := url.Parse(ftpURL)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Failed to parse old url %q", ftpURL)
|
||||
}
|
||||
parts := strings.Split(u.Host, ":")
|
||||
config.FileSet(name, "host", parts[0])
|
||||
if len(parts) > 1 {
|
||||
config.FileSet(name, "port", parts[1])
|
||||
}
|
||||
config.FileSet(name, "host", u.Host)
|
||||
config.FileSet(name, "user", config.FileGet(name, "username"))
|
||||
config.FileSet(name, "pass", config.FileGet(name, "password"))
|
||||
config.FileDeleteKey(name, "username")
|
||||
config.FileDeleteKey(name, "password")
|
||||
config.FileDeleteKey(name, "url")
|
||||
config.SaveConfig()
|
||||
if u.Path != "" && u.Path != "/" {
|
||||
fs.Errorf(name, "Path %q in FTP URL no longer supported - put it on the end of the remote %s:%s", u.Path, name, u.Path)
|
||||
}
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
host := config.FileGet(name, "host")
|
||||
user := config.FileGet(name, "user")
|
||||
pass := config.FileGet(name, "pass")
|
||||
port := config.FileGet(name, "port")
|
||||
pass, err = obscure.Reveal(pass)
|
||||
pass, err := obscure.Reveal(opt.Pass)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewFS decrypt password")
|
||||
}
|
||||
user := opt.User
|
||||
if user == "" {
|
||||
user = os.Getenv("USER")
|
||||
}
|
||||
port := opt.Port
|
||||
if port == "" {
|
||||
port = "21"
|
||||
}
|
||||
|
||||
dialAddr := host + ":" + port
|
||||
dialAddr := opt.Host + ":" + port
|
||||
u := "ftp://" + path.Join(dialAddr+"/", root)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
url: u,
|
||||
user: user,
|
||||
pass: pass,
|
||||
@@ -247,7 +235,7 @@ func translateErrorFile(err error) error {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable:
|
||||
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
||||
err = fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
@@ -259,16 +247,15 @@ func translateErrorDir(err error) error {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable:
|
||||
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
||||
err = fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (o fs.Object, err error) {
|
||||
// findItem finds a directory entry for the name in its parent directory
|
||||
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
fullPath := path.Join(f.root, remote)
|
||||
dir := path.Dir(fullPath)
|
||||
@@ -276,32 +263,58 @@ func (f *Fs) NewObject(remote string) (o fs.Object, err error) {
|
||||
|
||||
c, err := f.getFtpConnection()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewObject")
|
||||
return nil, errors.Wrap(err, "findItem")
|
||||
}
|
||||
files, err := c.List(dir)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, translateErrorFile(err)
|
||||
}
|
||||
for i, file := range files {
|
||||
if file.Type != ftp.EntryTypeFolder && file.Name == base {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
Size: files[i].Size,
|
||||
ModTime: files[i].Time,
|
||||
}
|
||||
o.info = info
|
||||
|
||||
return o, nil
|
||||
for _, file := range files {
|
||||
if file.Name == base {
|
||||
return file, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (o fs.Object, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
entry, err := f.findItem(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entry != nil && entry.Type != ftp.EntryTypeFolder {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
Size: entry.Size,
|
||||
ModTime: entry.Time,
|
||||
}
|
||||
o.info = info
|
||||
|
||||
return o, nil
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// dirExists checks the directory pointed to by remote exists or not
|
||||
func (f *Fs) dirExists(remote string) (exists bool, err error) {
|
||||
entry, err := f.findItem(remote)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "dirExists")
|
||||
}
|
||||
if entry != nil && entry.Type == ftp.EntryTypeFolder {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
@@ -322,6 +335,18 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
if err != nil {
|
||||
return nil, translateErrorDir(err)
|
||||
}
|
||||
// Annoyingly FTP returns success for a directory which
|
||||
// doesn't exist, so check it really doesn't exist if no
|
||||
// entries found.
|
||||
if len(files) == 0 {
|
||||
exists, err := f.dirExists(dir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list")
|
||||
}
|
||||
if !exists {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
for i := range files {
|
||||
object := files[i]
|
||||
newremote := path.Join(dir, object.Name)
|
||||
@@ -438,6 +463,15 @@ func (f *Fs) mkdir(abspath string) error {
|
||||
}
|
||||
err = c.MakeDir(abspath)
|
||||
f.putFtpConnection(&c, err)
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
|
||||
err = nil
|
||||
case 521: // dir already exists: error number according to RFC 959: issue #2363
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -1,75 +1,17 @@
|
||||
// Test FTP filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package ftp_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/ftp"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*ftp.Object)(nil))
|
||||
fstests.RemoteName = "TestFTP:"
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFTP:",
|
||||
NilObject: (*ftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
@@ -29,12 +29,15 @@ import (
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
@@ -49,11 +52,10 @@ const (
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
metaMtime = "mtime" // key to store mtime under in metadata
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
minSleep = 10 * time.Millisecond
|
||||
)
|
||||
|
||||
var (
|
||||
gcsLocation = flags.StringP("gcs-location", "", "", "Default location for buckets (us|eu|asia|us-central1|us-east1|us-east4|us-west1|asia-east1|asia-noetheast1|asia-southeast1|australia-southeast1|europe-west1|europe-west2).")
|
||||
gcsStorageClass = flags.StringP("gcs-storage-class", "", "", "Default storage class for buckets (MULTI_REGIONAL|REGIONAL|STANDARD|NEARLINE|COLDLINE|DURABLE_REDUCED_AVAILABILITY).")
|
||||
// Description of how to auth for this app
|
||||
storageConfig = &oauth2.Config{
|
||||
Scopes: []string{storage.DevstorageFullControlScope},
|
||||
@@ -68,29 +70,36 @@ var (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "google cloud storage",
|
||||
Prefix: "gcs",
|
||||
Description: "Google Cloud Storage (this is not Google Drive)",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
if config.FileGet(name, "service_account_file") != "" {
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
saFile, _ := m.Get("service_account_file")
|
||||
saCreds, _ := m.Get("service_account_credentials")
|
||||
if saFile != "" || saCreds != "" {
|
||||
return
|
||||
}
|
||||
err := oauthutil.Config("google cloud storage", name, storageConfig)
|
||||
err := oauthutil.Config("google cloud storage", name, m, storageConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Google Application Client Id - leave blank normally.",
|
||||
Help: "Google Application Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Google Application Client Secret - leave blank normally.",
|
||||
Help: "Google Application Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "project_number",
|
||||
Help: "Project number optional - needed only for list/create/delete buckets - see your developer console.",
|
||||
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path - needed only if you want use SA instead of interactive login.",
|
||||
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
}, {
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
}, {
|
||||
Name: "object_acl",
|
||||
Help: "Access Control List for new objects.",
|
||||
@@ -204,21 +213,29 @@ func init() {
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ProjectNumber string `config:"project_number"`
|
||||
ServiceAccountFile string `config:"service_account_file"`
|
||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||
ObjectACL string `config:"object_acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
Location string `config:"location"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
features *fs.Features // optional features
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
projectNumber string // used for finding buckets
|
||||
objectACL string // used when creating new objects
|
||||
bucketACL string // used when creating new buckets
|
||||
location string // location of new buckets
|
||||
storageClass string // storage class of new buckets
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// Object describes a storage object
|
||||
@@ -262,6 +279,30 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// shouldRetry determines whehter a given err rates being retried
|
||||
func shouldRetry(err error) (again bool, errOut error) {
|
||||
again = false
|
||||
if err != nil {
|
||||
if fserrors.ShouldRetry(err) {
|
||||
again = true
|
||||
} else {
|
||||
switch gerr := err.(type) {
|
||||
case *googleapi.Error:
|
||||
if gerr.Code >= 500 && gerr.Code < 600 {
|
||||
// All 5xx errors should be retried
|
||||
again = true
|
||||
} else if len(gerr.Errors) > 0 {
|
||||
reason := gerr.Errors[0].Reason
|
||||
if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" {
|
||||
again = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return again, err
|
||||
}
|
||||
|
||||
// Pattern to match a storage path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
@@ -277,12 +318,8 @@ func parsePath(path string) (bucket, directory string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func getServiceAccountClient(keyJsonfilePath string) (*http.Client, error) {
|
||||
data, err := ioutil.ReadFile(os.ExpandEnv(keyJsonfilePath))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error opening credentials file")
|
||||
}
|
||||
conf, err := google.JWTConfigFromJSON(data, storageConfig.Scopes...)
|
||||
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error processing credentials")
|
||||
}
|
||||
@@ -291,20 +328,39 @@ func getServiceAccountClient(keyJsonfilePath string) (*http.Client, error) {
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
var oAuthClient *http.Client
|
||||
var err error
|
||||
|
||||
serviceAccountPath := config.FileGet(name, "service_account_file")
|
||||
if serviceAccountPath != "" {
|
||||
oAuthClient, err = getServiceAccountClient(serviceAccountPath)
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ObjectACL == "" {
|
||||
opt.ObjectACL = "private"
|
||||
}
|
||||
if opt.BucketACL == "" {
|
||||
opt.BucketACL = "private"
|
||||
}
|
||||
|
||||
// try loading service account credentials from env variable, then from a file
|
||||
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed configuring Google Cloud Storage Service Account: %v", err)
|
||||
return nil, errors.Wrap(err, "error opening service account credentials file")
|
||||
}
|
||||
opt.ServiceAccountCredentials = string(loadedCreds)
|
||||
}
|
||||
if opt.ServiceAccountCredentials != "" {
|
||||
oAuthClient, err = getServiceAccountClient([]byte(opt.ServiceAccountCredentials))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(name, storageConfig)
|
||||
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Google Cloud Storage: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -314,32 +370,17 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
projectNumber: config.FileGet(name, "project_number"),
|
||||
objectACL: config.FileGet(name, "object_acl"),
|
||||
bucketACL: config.FileGet(name, "bucket_acl"),
|
||||
location: config.FileGet(name, "location"),
|
||||
storageClass: config.FileGet(name, "storage_class"),
|
||||
name: name,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
opt: *opt,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
}).Fill(f)
|
||||
if f.objectACL == "" {
|
||||
f.objectACL = "private"
|
||||
}
|
||||
if f.bucketACL == "" {
|
||||
f.bucketACL = "private"
|
||||
}
|
||||
if *gcsLocation != "" {
|
||||
f.location = *gcsLocation
|
||||
}
|
||||
if *gcsStorageClass != "" {
|
||||
f.storageClass = *gcsStorageClass
|
||||
}
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
f.client = oAuthClient
|
||||
@@ -351,7 +392,10 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the object exists
|
||||
_, err = f.svc.Objects.Get(bucket, directory).Do()
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.Get(bucket, directory).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.root = path.Dir(directory)
|
||||
if f.root == "." {
|
||||
@@ -399,7 +443,7 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
|
||||
// dir is the starting directory, "" for root
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
func (f *Fs) list(dir string, recurse bool, fn listFn) error {
|
||||
func (f *Fs) list(dir string, recurse bool, fn listFn) (err error) {
|
||||
root := f.root
|
||||
rootLength := len(root)
|
||||
if dir != "" {
|
||||
@@ -410,7 +454,11 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) error {
|
||||
list = list.Delimiter("/")
|
||||
}
|
||||
for {
|
||||
objects, err := list.Do()
|
||||
var objects *storage.Objects
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
objects, err = list.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if gErr, ok := err.(*googleapi.Error); ok {
|
||||
if gErr.Code == http.StatusNotFound {
|
||||
@@ -437,6 +485,17 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) error {
|
||||
continue
|
||||
}
|
||||
remote := object.Name[rootLength:]
|
||||
// is this a directory marker?
|
||||
if (strings.HasSuffix(remote, "/") || remote == "") && object.Size == 0 {
|
||||
if recurse && remote != "" {
|
||||
// add a directory in if --fast-list since will have no prefixes
|
||||
err = fn(remote[:len(remote)-1], object, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue // skip directory marker
|
||||
}
|
||||
err = fn(remote, object, false)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -498,12 +557,16 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
||||
if dir != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
if f.projectNumber == "" {
|
||||
if f.opt.ProjectNumber == "" {
|
||||
return nil, errors.New("can't list buckets without project number")
|
||||
}
|
||||
listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks)
|
||||
listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks)
|
||||
for {
|
||||
buckets, err := listBuckets.Do()
|
||||
var buckets *storage.Buckets
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
buckets, err = listBuckets.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -591,13 +654,19 @@ func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption
|
||||
}
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
func (f *Fs) Mkdir(dir string) (err error) {
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.bucketOK {
|
||||
return nil
|
||||
}
|
||||
_, err := f.svc.Buckets.Get(f.bucket).Do()
|
||||
// List something from the bucket to see if it exists. Doing it like this enables the use of a
|
||||
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.List(f.bucket).MaxResults(1).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
// Bucket already exists
|
||||
f.bucketOK = true
|
||||
@@ -610,16 +679,19 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
return errors.Wrap(err, "failed to get bucket")
|
||||
}
|
||||
|
||||
if f.projectNumber == "" {
|
||||
if f.opt.ProjectNumber == "" {
|
||||
return errors.New("can't make bucket without project number")
|
||||
}
|
||||
|
||||
bucket := storage.Bucket{
|
||||
Name: f.bucket,
|
||||
Location: f.location,
|
||||
StorageClass: f.storageClass,
|
||||
Location: f.opt.Location,
|
||||
StorageClass: f.opt.StorageClass,
|
||||
}
|
||||
_, err = f.svc.Buckets.Insert(f.projectNumber, &bucket).PredefinedAcl(f.bucketACL).Do()
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket).PredefinedAcl(f.opt.BucketACL).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.bucketOK = true
|
||||
}
|
||||
@@ -630,13 +702,16 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
//
|
||||
// Returns an error if it isn't empty: Error 409: The bucket you tried
|
||||
// to delete was not empty.
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
func (f *Fs) Rmdir(dir string) (err error) {
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.root != "" || dir != "" {
|
||||
return nil
|
||||
}
|
||||
err := f.svc.Buckets.Delete(f.bucket).Do()
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.svc.Buckets.Delete(f.bucket).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.bucketOK = false
|
||||
}
|
||||
@@ -678,7 +753,11 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObject := srcObj.fs.root + srcObj.remote
|
||||
dstBucket := f.bucket
|
||||
dstObject := f.root + remote
|
||||
newObject, err := f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
|
||||
var newObject *storage.Object
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
newObject, err = f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -766,7 +845,11 @@ func (o *Object) readMetaData() (err error) {
|
||||
if !o.modTime.IsZero() {
|
||||
return nil
|
||||
}
|
||||
object, err := o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||
var object *storage.Object
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
object, err = o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if gErr, ok := err.(*googleapi.Error); ok {
|
||||
if gErr.Code == http.StatusNotFound {
|
||||
@@ -800,14 +883,18 @@ func metadataFromModTime(modTime time.Time) map[string]string {
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
func (o *Object) SetModTime(modTime time.Time) (err error) {
|
||||
// This only adds metadata so will perserve other metadata
|
||||
object := storage.Object{
|
||||
Bucket: o.fs.bucket,
|
||||
Name: o.fs.root + o.remote,
|
||||
Metadata: metadataFromModTime(modTime),
|
||||
}
|
||||
newObject, err := o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do()
|
||||
var newObject *storage.Object
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
newObject, err = o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -827,7 +914,17 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
return nil, err
|
||||
}
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
res, err := o.fs.client.Do(req)
|
||||
var res *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.client.Do(req)
|
||||
if err == nil {
|
||||
err = googleapi.CheckResponse(res)
|
||||
if err != nil {
|
||||
_ = res.Body.Close() // ignore error
|
||||
}
|
||||
}
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -856,7 +953,11 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
Updated: modTime.Format(timeFormatOut), // Doesn't get set
|
||||
Metadata: metadataFromModTime(modTime),
|
||||
}
|
||||
newObject, err := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.objectACL).Do()
|
||||
var newObject *storage.Object
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.opt.ObjectACL).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -866,8 +967,12 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
return o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||
func (o *Object) Remove() (err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
|
||||
@@ -1,75 +1,17 @@
|
||||
// Test GoogleCloudStorage filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package googlecloudstorage_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/googlecloudstorage"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*googlecloudstorage.Object)(nil))
|
||||
fstests.RemoteName = "TestGoogleCloudStorage:"
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestGoogleCloudStorage:",
|
||||
NilObject: (*googlecloudstorage.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
@@ -2,9 +2,6 @@
|
||||
//
|
||||
// It treats HTML pages served from the endpoint as directory
|
||||
// listings, and includes any links found as files.
|
||||
|
||||
// +build go1.7
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
@@ -17,7 +14,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
@@ -38,7 +36,7 @@ func init() {
|
||||
Options: []fs.Option{{
|
||||
Name: "url",
|
||||
Help: "URL of http host to connect to",
|
||||
Optional: false,
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "https://example.com",
|
||||
Help: "Connect to example.com",
|
||||
@@ -48,11 +46,17 @@ func init() {
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Endpoint string `config:"url"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote HTTP files
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this backend
|
||||
endpoint *url.URL
|
||||
endpointURL string // endpoint as a string
|
||||
httpClient *http.Client
|
||||
@@ -81,14 +85,20 @@ func statusError(res *http.Response, err error) error {
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
endpoint := config.FileGet(name, "url")
|
||||
if !strings.HasSuffix(endpoint, "/") {
|
||||
endpoint += "/"
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(opt.Endpoint, "/") {
|
||||
opt.Endpoint += "/"
|
||||
}
|
||||
|
||||
// Parse the endpoint and stick the root onto it
|
||||
base, err := url.Parse(endpoint)
|
||||
base, err := url.Parse(opt.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -133,6 +143,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
httpClient: client,
|
||||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
@@ -192,10 +203,11 @@ func (f *Fs) url(remote string) string {
|
||||
return f.endpointURL + rest.URLPathEscape(remote)
|
||||
}
|
||||
|
||||
func parseInt64(s string) int64 {
|
||||
// parse s into an int64, on failure return def
|
||||
func parseInt64(s string, def int64) int64 {
|
||||
n, e := strconv.ParseInt(s, 10, 64)
|
||||
if e != nil {
|
||||
return 0
|
||||
return def
|
||||
}
|
||||
return n
|
||||
}
|
||||
@@ -412,7 +424,7 @@ func (o *Object) stat() error {
|
||||
if err != nil {
|
||||
t = timeUnset
|
||||
}
|
||||
o.size = parseInt64(res.Header.Get("Content-Length"))
|
||||
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
|
||||
o.modTime = t
|
||||
o.contentType = res.Header.Get("Content-Type")
|
||||
return nil
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -29,7 +30,7 @@ var (
|
||||
)
|
||||
|
||||
// prepareServer the test server and return a function to tidy it up afterwards
|
||||
func prepareServer(t *testing.T) func() {
|
||||
func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// file server for test/files
|
||||
fileServer := http.FileServer(http.Dir(filesPath))
|
||||
|
||||
@@ -41,19 +42,24 @@ func prepareServer(t *testing.T) func() {
|
||||
// fs.Config.LogLevel = fs.LogLevelDebug
|
||||
// fs.Config.DumpHeaders = true
|
||||
// fs.Config.DumpBodies = true
|
||||
config.FileSet(remoteName, "type", "http")
|
||||
config.FileSet(remoteName, "url", ts.URL)
|
||||
// config.FileSet(remoteName, "type", "http")
|
||||
// config.FileSet(remoteName, "url", ts.URL)
|
||||
|
||||
m := configmap.Simple{
|
||||
"type": "http",
|
||||
"url": ts.URL,
|
||||
}
|
||||
|
||||
// return a function to tidy up
|
||||
return ts.Close
|
||||
return m, ts.Close
|
||||
}
|
||||
|
||||
// prepare the test server and return a function to tidy it up afterwards
|
||||
func prepare(t *testing.T) (fs.Fs, func()) {
|
||||
tidy := prepareServer(t)
|
||||
m, tidy := prepareServer(t)
|
||||
|
||||
// Instantiate it
|
||||
f, err := NewFs(remoteName, "")
|
||||
f, err := NewFs(remoteName, "", m)
|
||||
require.NoError(t, err)
|
||||
|
||||
return f, tidy
|
||||
@@ -151,6 +157,7 @@ func TestOpen(t *testing.T) {
|
||||
fd, err := o.Open()
|
||||
require.NoError(t, err)
|
||||
data, err := ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, "beetroot\n", string(data))
|
||||
|
||||
@@ -158,6 +165,7 @@ func TestOpen(t *testing.T) {
|
||||
fd, err = o.Open(&fs.RangeOption{Start: 1, End: 5})
|
||||
require.NoError(t, err)
|
||||
data, err = ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, "eetro", string(data))
|
||||
}
|
||||
@@ -175,20 +183,20 @@ func TestMimeType(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsAFileRoot(t *testing.T) {
|
||||
tidy := prepareServer(t)
|
||||
m, tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
|
||||
f, err := NewFs(remoteName, "one%.txt")
|
||||
f, err := NewFs(remoteName, "one%.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
|
||||
testListRoot(t, f)
|
||||
}
|
||||
|
||||
func TestIsAFileSubDir(t *testing.T) {
|
||||
tidy := prepareServer(t)
|
||||
m, tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
|
||||
f, err := NewFs(remoteName, "three/underthree.txt")
|
||||
f, err := NewFs(remoteName, "three/underthree.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
|
||||
entries, err := f.List("")
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
// Build for mount for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.7
|
||||
|
||||
package http
|
||||
@@ -2,7 +2,9 @@ package hubic
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/swift"
|
||||
)
|
||||
|
||||
@@ -21,12 +23,17 @@ func newAuth(f *Fs) *auth {
|
||||
// Request constructs a http.Request for authentication
|
||||
//
|
||||
// returns nil for not needed
|
||||
func (a *auth) Request(*swift.Connection) (*http.Request, error) {
|
||||
err := a.f.getCredentials()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
|
||||
const retries = 10
|
||||
for try := 1; try <= retries; try++ {
|
||||
err = a.f.getCredentials()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
fs.Debugf(a.f, "retrying auth request %d/%d: %v", try, retries, err)
|
||||
}
|
||||
return nil, nil
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Response parses the result of an http request
|
||||
@@ -36,7 +43,7 @@ func (a *auth) Response(resp *http.Response) error {
|
||||
|
||||
// The public storage URL - set Internal to true to read
|
||||
// internal/service net URL
|
||||
func (a *auth) StorageUrl(Internal bool) string {
|
||||
func (a *auth) StorageUrl(Internal bool) string { // nolint
|
||||
return a.f.credentials.Endpoint
|
||||
}
|
||||
|
||||
@@ -46,7 +53,7 @@ func (a *auth) Token() string {
|
||||
}
|
||||
|
||||
// The CDN url if available
|
||||
func (a *auth) CdnUrl() string {
|
||||
func (a *auth) CdnUrl() string { // nolint
|
||||
return ""
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,8 @@ import (
|
||||
"github.com/ncw/rclone/backend/swift"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
@@ -52,19 +54,19 @@ func init() {
|
||||
Name: "hubic",
|
||||
Description: "Hubic",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
err := oauthutil.Config("hubic", name, oauthConfig)
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config("hubic", name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Options: append([]fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Hubic Client Id - leave blank normally.",
|
||||
Help: "Hubic Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Hubic Client Secret - leave blank normally.",
|
||||
}},
|
||||
Help: "Hubic Client Secret\nLeave blank normally.",
|
||||
}}, swift.SharedOptions...),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -145,8 +147,8 @@ func (f *Fs) getCredentials() (err error) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
client, _, err := oauthutil.NewClient(name, oauthConfig)
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
client, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Hubic")
|
||||
}
|
||||
@@ -167,8 +169,15 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
return nil, errors.Wrap(err, "error authenticating swift connection")
|
||||
}
|
||||
|
||||
// Parse config into swift.Options struct
|
||||
opt := new(swift.Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Make inner swift Fs from the connection
|
||||
swiftFs, err := swift.NewFsWithConnection(name, root, c, true)
|
||||
swiftFs, err := swift.NewFsWithConnection(opt, name, root, c, true)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -1,75 +1,17 @@
|
||||
// Test Hubic filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package hubic_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/hubic"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*hubic.Object)(nil))
|
||||
fstests.RemoteName = "TestHubic:"
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestHubic:",
|
||||
NilObject: (*hubic.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
267
backend/jottacloud/api/types.go
Normal file
267
backend/jottacloud/api/types.go
Normal file
@@ -0,0 +1,267 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
timeFormat = "2006-01-02-T15:04:05Z0700"
|
||||
)
|
||||
|
||||
// Time represents time values in the Jottacloud API. It uses a custom RFC3339 like format.
|
||||
type Time time.Time
|
||||
|
||||
// UnmarshalXML turns XML into a Time
|
||||
func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
var v string
|
||||
if err := d.DecodeElement(&v, &start); err != nil {
|
||||
return err
|
||||
}
|
||||
if v == "" {
|
||||
*t = Time(time.Time{})
|
||||
return nil
|
||||
}
|
||||
newTime, err := time.Parse(timeFormat, v)
|
||||
if err == nil {
|
||||
*t = Time(newTime)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalXML turns a Time into XML
|
||||
func (t *Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
return e.EncodeElement(t.String(), start)
|
||||
}
|
||||
|
||||
// Return Time string in Jottacloud format
|
||||
func (t Time) String() string { return time.Time(t).Format(timeFormat) }
|
||||
|
||||
// Flag is a hacky type for checking if an attribute is present
|
||||
type Flag bool
|
||||
|
||||
// UnmarshalXMLAttr sets Flag to true if the attribute is present
|
||||
func (f *Flag) UnmarshalXMLAttr(attr xml.Attr) error {
|
||||
*f = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalXMLAttr : Do not use
|
||||
func (f *Flag) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
|
||||
attr := xml.Attr{
|
||||
Name: name,
|
||||
Value: "false",
|
||||
}
|
||||
return attr, errors.New("unimplemented")
|
||||
}
|
||||
|
||||
/*
|
||||
GET http://www.jottacloud.com/JFS/<account>
|
||||
|
||||
<user time="2018-07-18-T21:39:10Z" host="dn-132">
|
||||
<username>12qh1wsht8cssxdtwl15rqh9</username>
|
||||
<account-type>free</account-type>
|
||||
<locked>false</locked>
|
||||
<capacity>5368709120</capacity>
|
||||
<max-devices>-1</max-devices>
|
||||
<max-mobile-devices>-1</max-mobile-devices>
|
||||
<usage>0</usage>
|
||||
<read-locked>false</read-locked>
|
||||
<write-locked>false</write-locked>
|
||||
<quota-write-locked>false</quota-write-locked>
|
||||
<enable-sync>true</enable-sync>
|
||||
<enable-foldershare>true</enable-foldershare>
|
||||
<devices>
|
||||
<device>
|
||||
<name xml:space="preserve">Jotta</name>
|
||||
<display_name xml:space="preserve">Jotta</display_name>
|
||||
<type>JOTTA</type>
|
||||
<sid>5c458d01-9eaf-4f23-8d3c-2486fd9704d8</sid>
|
||||
<size>0</size>
|
||||
<modified>2018-07-15-T22:04:59Z</modified>
|
||||
</device>
|
||||
</devices>
|
||||
</user>
|
||||
*/
|
||||
|
||||
// AccountInfo represents a Jottacloud account
|
||||
type AccountInfo struct {
|
||||
Username string `xml:"username"`
|
||||
AccountType string `xml:"account-type"`
|
||||
Locked bool `xml:"locked"`
|
||||
Capacity int64 `xml:"capacity"`
|
||||
MaxDevices int `xml:"max-devices"`
|
||||
MaxMobileDevices int `xml:"max-mobile-devices"`
|
||||
Usage int64 `xml:"usage"`
|
||||
ReadLocked bool `xml:"read-locked"`
|
||||
WriteLocked bool `xml:"write-locked"`
|
||||
QuotaWriteLocked bool `xml:"quota-write-locked"`
|
||||
EnableSync bool `xml:"enable-sync"`
|
||||
EnableFolderShare bool `xml:"enable-foldershare"`
|
||||
Devices []JottaDevice `xml:"devices>device"`
|
||||
}
|
||||
|
||||
/*
|
||||
GET http://www.jottacloud.com/JFS/<account>/<device>
|
||||
|
||||
<device time="2018-07-23-T20:21:50Z" host="dn-158">
|
||||
<name xml:space="preserve">Jotta</name>
|
||||
<display_name xml:space="preserve">Jotta</display_name>
|
||||
<type>JOTTA</type>
|
||||
<sid>5c458d01-9eaf-4f23-8d3c-2486fd9704d8</sid>
|
||||
<size>0</size>
|
||||
<modified>2018-07-15-T22:04:59Z</modified>
|
||||
<user>12qh1wsht8cssxdtwl15rqh9</user>
|
||||
<mountPoints>
|
||||
<mountPoint>
|
||||
<name xml:space="preserve">Archive</name>
|
||||
<size>0</size>
|
||||
<modified>2018-07-15-T22:04:59Z</modified>
|
||||
</mountPoint>
|
||||
<mountPoint>
|
||||
<name xml:space="preserve">Shared</name>
|
||||
<size>0</size>
|
||||
<modified></modified>
|
||||
</mountPoint>
|
||||
<mountPoint>
|
||||
<name xml:space="preserve">Sync</name>
|
||||
<size>0</size>
|
||||
<modified></modified>
|
||||
</mountPoint>
|
||||
</mountPoints>
|
||||
<metadata first="" max="" total="3" num_mountpoints="3"/>
|
||||
</device>
|
||||
*/
|
||||
|
||||
// JottaDevice represents a Jottacloud Device
|
||||
type JottaDevice struct {
|
||||
Name string `xml:"name"`
|
||||
DisplayName string `xml:"display_name"`
|
||||
Type string `xml:"type"`
|
||||
Sid string `xml:"sid"`
|
||||
Size int64 `xml:"size"`
|
||||
User string `xml:"user"`
|
||||
MountPoints []JottaMountPoint `xml:"mountPoints>mountPoint"`
|
||||
}
|
||||
|
||||
/*
|
||||
GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>
|
||||
|
||||
<mountPoint time="2018-07-24-T20:35:02Z" host="dn-157">
|
||||
<name xml:space="preserve">Sync</name>
|
||||
<path xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta</path>
|
||||
<abspath xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta</abspath>
|
||||
<size>0</size>
|
||||
<modified></modified>
|
||||
<device>Jotta</device>
|
||||
<user>12qh1wsht8cssxdtwl15rqh9</user>
|
||||
<folders>
|
||||
<folder name="test"/>
|
||||
</folders>
|
||||
<metadata first="" max="" total="1" num_folders="1" num_files="0"/>
|
||||
</mountPoint>
|
||||
*/
|
||||
|
||||
// JottaMountPoint represents a Jottacloud mountpoint
|
||||
type JottaMountPoint struct {
|
||||
Name string `xml:"name"`
|
||||
Size int64 `xml:"size"`
|
||||
Device string `xml:"device"`
|
||||
Folders []JottaFolder `xml:"folders>folder"`
|
||||
Files []JottaFile `xml:"files>file"`
|
||||
}
|
||||
|
||||
/*
|
||||
GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/<folder>
|
||||
|
||||
<folder name="test" time="2018-07-24-T20:41:37Z" host="dn-158">
|
||||
<path xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta/Sync</path>
|
||||
<abspath xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta/Sync</abspath>
|
||||
<folders>
|
||||
<folder name="t2"/>c
|
||||
</folders>
|
||||
<files>
|
||||
<file name="block.csv" uuid="f6553cd4-1135-48fe-8e6a-bb9565c50ef2">
|
||||
<currentRevision>
|
||||
<number>1</number>
|
||||
<state>COMPLETED</state>
|
||||
<created>2018-07-05-T15:08:02Z</created>
|
||||
<modified>2018-07-05-T15:08:02Z</modified>
|
||||
<mime>application/octet-stream</mime>
|
||||
<size>30827730</size>
|
||||
<md5>1e8a7b728ab678048df00075c9507158</md5>
|
||||
<updated>2018-07-24-T20:41:10Z</updated>
|
||||
</currentRevision>
|
||||
</file>
|
||||
</files>
|
||||
<metadata first="" max="" total="2" num_folders="1" num_files="1"/>
|
||||
</folder>
|
||||
*/
|
||||
|
||||
// JottaFolder represents a JottacloudFolder
|
||||
type JottaFolder struct {
|
||||
XMLName xml.Name
|
||||
Name string `xml:"name,attr"`
|
||||
Deleted Flag `xml:"deleted,attr"`
|
||||
Path string `xml:"path"`
|
||||
CreatedAt Time `xml:"created"`
|
||||
ModifiedAt Time `xml:"modified"`
|
||||
Updated Time `xml:"updated"`
|
||||
Folders []JottaFolder `xml:"folders>folder"`
|
||||
Files []JottaFile `xml:"files>file"`
|
||||
}
|
||||
|
||||
/*
|
||||
GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/.../<file>
|
||||
|
||||
<file name="block.csv" uuid="f6553cd4-1135-48fe-8e6a-bb9565c50ef2">
|
||||
<currentRevision>
|
||||
<number>1</number>
|
||||
<state>COMPLETED</state>
|
||||
<created>2018-07-05-T15:08:02Z</created>
|
||||
<modified>2018-07-05-T15:08:02Z</modified>
|
||||
<mime>application/octet-stream</mime>
|
||||
<size>30827730</size>
|
||||
<md5>1e8a7b728ab678048df00075c9507158</md5>
|
||||
<updated>2018-07-24-T20:41:10Z</updated>
|
||||
</currentRevision>
|
||||
</file>
|
||||
*/
|
||||
|
||||
// JottaFile represents a Jottacloud file
|
||||
type JottaFile struct {
|
||||
XMLName xml.Name
|
||||
Name string `xml:"name,attr"`
|
||||
Deleted Flag `xml:"deleted,attr"`
|
||||
PublicSharePath string `xml:"publicSharePath"`
|
||||
State string `xml:"currentRevision>state"`
|
||||
CreatedAt Time `xml:"currentRevision>created"`
|
||||
ModifiedAt Time `xml:"currentRevision>modified"`
|
||||
Updated Time `xml:"currentRevision>updated"`
|
||||
Size int64 `xml:"currentRevision>size"`
|
||||
MimeType string `xml:"currentRevision>mime"`
|
||||
MD5 string `xml:"currentRevision>md5"`
|
||||
}
|
||||
|
||||
// Error is a custom Error for wrapping Jottacloud error responses
|
||||
type Error struct {
|
||||
StatusCode int `xml:"code"`
|
||||
Message string `xml:"message"`
|
||||
Reason string `xml:"reason"`
|
||||
Cause string `xml:"cause"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and statistifes the error interface
|
||||
func (e *Error) Error() string {
|
||||
out := fmt.Sprintf("error %d", e.StatusCode)
|
||||
if e.Message != "" {
|
||||
out += ": " + e.Message
|
||||
}
|
||||
if e.Reason != "" {
|
||||
out += fmt.Sprintf(" (%+v)", e.Reason)
|
||||
}
|
||||
return out
|
||||
}
|
||||
29
backend/jottacloud/api/types_test.go
Normal file
29
backend/jottacloud/api/types_test.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestMountpointEmptyModificationTime(t *testing.T) {
|
||||
mountpoint := `
|
||||
<mountPoint time="2018-08-12-T09:58:24Z" host="dn-157">
|
||||
<name xml:space="preserve">Sync</name>
|
||||
<path xml:space="preserve">/foo/Jotta</path>
|
||||
<abspath xml:space="preserve">/foo/Jotta</abspath>
|
||||
<size>0</size>
|
||||
<modified></modified>
|
||||
<device>Jotta</device>
|
||||
<user>foo</user>
|
||||
<metadata first="" max="" total="0" num_folders="0" num_files="0"/>
|
||||
</mountPoint>
|
||||
`
|
||||
var jf JottaFolder
|
||||
if err := xml.Unmarshal([]byte(mountpoint), &jf); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !time.Time(jf.ModifiedAt).IsZero() {
|
||||
t.Errorf("got non-zero time, want zero")
|
||||
}
|
||||
}
|
||||
1109
backend/jottacloud/jottacloud.go
Normal file
1109
backend/jottacloud/jottacloud.go
Normal file
File diff suppressed because it is too large
Load Diff
67
backend/jottacloud/jottacloud_internal_test.go
Normal file
67
backend/jottacloud/jottacloud_internal_test.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package jottacloud
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// A test reader to return a test pattern of size
|
||||
type testReader struct {
|
||||
size int64
|
||||
c byte
|
||||
}
|
||||
|
||||
// Reader is the interface that wraps the basic Read method.
|
||||
func (r *testReader) Read(p []byte) (n int, err error) {
|
||||
for i := range p {
|
||||
if r.size <= 0 {
|
||||
return n, io.EOF
|
||||
}
|
||||
p[i] = r.c
|
||||
r.c = (r.c + 1) % 253
|
||||
r.size--
|
||||
n++
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func TestReadMD5(t *testing.T) {
|
||||
// smoke test the reader
|
||||
b, err := ioutil.ReadAll(&testReader{size: 10})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, b)
|
||||
|
||||
// Check readMD5 for different size and threshold
|
||||
for _, size := range []int64{0, 1024, 10 * 1024, 100 * 1024} {
|
||||
t.Run(fmt.Sprintf("%d", size), func(t *testing.T) {
|
||||
hasher := md5.New()
|
||||
n, err := io.Copy(hasher, &testReader{size: size})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, n, size)
|
||||
wantMD5 := fmt.Sprintf("%x", hasher.Sum(nil))
|
||||
for _, threshold := range []int64{512, 1024, 10 * 1024, 20 * 1024} {
|
||||
t.Run(fmt.Sprintf("%d", threshold), func(t *testing.T) {
|
||||
in := &testReader{size: size}
|
||||
gotMD5, out, cleanup, err := readMD5(in, size, threshold)
|
||||
defer cleanup()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, wantMD5, gotMD5)
|
||||
|
||||
// check md5hash of out
|
||||
hasher := md5.New()
|
||||
n, err := io.Copy(hasher, out)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, n, size)
|
||||
outMD5 := fmt.Sprintf("%x", hasher.Sum(nil))
|
||||
assert.Equal(t, wantMD5, outMD5)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
17
backend/jottacloud/jottacloud_test.go
Normal file
17
backend/jottacloud/jottacloud_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test Box filesystem interface
|
||||
package jottacloud_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/jottacloud"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestJottacloud:",
|
||||
NilObject: (*jottacloud.Object)(nil),
|
||||
})
|
||||
}
|
||||
84
backend/jottacloud/replace.go
Normal file
84
backend/jottacloud/replace.go
Normal file
@@ -0,0 +1,84 @@
|
||||
/*
|
||||
Translate file names for JottaCloud adapted from OneDrive
|
||||
|
||||
|
||||
The following characters are JottaClous reserved characters, and can't
|
||||
be used in JottaCloud folder and file names.
|
||||
|
||||
jottacloud = "/" / "\" / "*" / "<" / ">" / "?" / "!" / "&" / ":" / ";" / "|" / "#" / "%" / """ / "'" / "." / "~"
|
||||
|
||||
|
||||
*/
|
||||
|
||||
package jottacloud
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// charMap holds replacements for characters
|
||||
//
|
||||
// Onedrive has a restricted set of characters compared to other cloud
|
||||
// storage systems, so we to map these to the FULLWIDTH unicode
|
||||
// equivalents
|
||||
//
|
||||
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
'+': '+', // FULLWIDTH PLUS SIGN
|
||||
'*': '*', // FULLWIDTH ASTERISK
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'?': '?', // FULLWIDTH QUESTION MARK
|
||||
'!': '!', // FULLWIDTH EXCLAMATION MARK
|
||||
'&': '&', // FULLWIDTH AMPERSAND
|
||||
':': ':', // FULLWIDTH COLON
|
||||
';': ';', // FULLWIDTH SEMICOLON
|
||||
'|': '|', // FULLWIDTH VERTICAL LINE
|
||||
'#': '#', // FULLWIDTH NUMBER SIGN
|
||||
'%': '%', // FULLWIDTH PERCENT SIGN
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
|
||||
'\'': ''', // FULLWIDTH APOSTROPHE
|
||||
'~': '~', // FULLWIDTH TILDE
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
invCharMap map[rune]rune
|
||||
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
|
||||
fixEndingWithSpace = regexp.MustCompile(` (/|$)`)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Create inverse charMap
|
||||
invCharMap = make(map[rune]rune, len(charMap))
|
||||
for k, v := range charMap {
|
||||
invCharMap[v] = k
|
||||
}
|
||||
}
|
||||
|
||||
// replaceReservedChars takes a path and substitutes any reserved
|
||||
// characters in it
|
||||
func replaceReservedChars(in string) string {
|
||||
// Filenames can't start with space
|
||||
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
|
||||
// Filenames can't end with space
|
||||
in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1")
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := charMap[c]; ok && c != ' ' {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
|
||||
// restoreReservedChars takes a path and undoes any substitutions
|
||||
// made by replaceReservedChars
|
||||
func restoreReservedChars(in string) string {
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := invCharMap[c]; ok {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
28
backend/jottacloud/replace_test.go
Normal file
28
backend/jottacloud/replace_test.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package jottacloud
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{`\+*<>?!&:;|#%"'~`, `\+*<>?!&:;|#%"'~`},
|
||||
{`\+*<>?!&:;|#%"'~\+*<>?!&:;|#%"'~`, `\+*<>?!&:;|#%"'~\+*<>?!&:;|#%"'~`},
|
||||
{" leading space", "␠leading space"},
|
||||
{"trailing space ", "trailing space␠"},
|
||||
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
|
||||
{"trailing space /trailing space /trailing space ", "trailing space␠/trailing space␠/trailing space␠"},
|
||||
} {
|
||||
got := replaceReservedChars(test.in)
|
||||
if got != test.out {
|
||||
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
|
||||
}
|
||||
got2 := restoreReservedChars(got)
|
||||
if got2 != test.in {
|
||||
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
|
||||
}
|
||||
}
|
||||
}
|
||||
29
backend/local/about_unix.go
Normal file
29
backend/local/about_unix.go
Normal file
@@ -0,0 +1,29 @@
|
||||
// +build darwin dragonfly freebsd linux
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
var s syscall.Statfs_t
|
||||
err := syscall.Statfs(f.root, &s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read disk usage")
|
||||
}
|
||||
bs := int64(s.Bsize)
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(bs * int64(s.Blocks)), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), // bytes in use
|
||||
Free: fs.NewUsageValue(bs * int64(s.Bavail)), // bytes which can be uploaded before reaching the quota
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// check interface
|
||||
var _ fs.Abouter = &Fs{}
|
||||
36
backend/local/about_windows.go
Normal file
36
backend/local/about_windows.go
Normal file
@@ -0,0 +1,36 @@
|
||||
// +build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
var available, total, free int64
|
||||
_, _, e1 := getFreeDiskSpace.Call(
|
||||
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))),
|
||||
uintptr(unsafe.Pointer(&available)), // lpFreeBytesAvailable - for this user
|
||||
uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
|
||||
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
|
||||
)
|
||||
if e1 != syscall.Errno(0) {
|
||||
return nil, errors.Wrap(e1, "failed to read disk usage")
|
||||
}
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(total), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(total - free), // bytes in use
|
||||
Free: fs.NewUsageValue(available), // bytes which can be uploaded before reaching the quota
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// check interface
|
||||
var _ fs.Abouter = &Fs{}
|
||||
@@ -16,18 +16,13 @@ import (
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/appengine/log"
|
||||
)
|
||||
|
||||
var (
|
||||
followSymlinks = flags.BoolP("copy-links", "L", false, "Follow symlinks and copy the pointed to item.")
|
||||
skipSymlinks = flags.BoolP("skip-links", "", false, "Don't warn about skipped symlinks.")
|
||||
noUTFNorm = flags.BoolP("local-no-unicode-normalization", "", false, "Don't apply unicode normalization to paths and filenames")
|
||||
)
|
||||
|
||||
// Constants
|
||||
@@ -40,29 +35,68 @@ func init() {
|
||||
Description: "Local Disk",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows",
|
||||
Optional: true,
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "true",
|
||||
Help: "Disables long file names",
|
||||
}},
|
||||
}, {
|
||||
Name: "copy_links",
|
||||
Help: "Follow symlinks and copy the pointed to item.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "L",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_links",
|
||||
Help: "Don't warn about skipped symlinks.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_unicode_normalization",
|
||||
Help: "Don't apply unicode normalization to paths and filenames",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_updated",
|
||||
Help: "Don't check to see if the files change during upload",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "one_file_system",
|
||||
Help: "Don't cross filesystem boundaries (unix/macOS only).",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "x",
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
SkipSymlinks bool `config:"skip_links"`
|
||||
NoUTFNorm bool `config:"no_unicode_normalization"`
|
||||
NoCheckUpdated bool `config:"no_check_updated"`
|
||||
NoUNC bool `config:"nounc"`
|
||||
OneFileSystem bool `config:"one_file_system"`
|
||||
}
|
||||
|
||||
// Fs represents a local filesystem rooted at root
|
||||
type Fs struct {
|
||||
name string // the name of the remote
|
||||
root string // The root directory (OS path)
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
dev uint64 // device number of root node
|
||||
precisionOk sync.Once // Whether we need to read the precision
|
||||
precision time.Duration // precision of local filesystem
|
||||
wmu sync.Mutex // used for locking access to 'warned'.
|
||||
warned map[string]struct{} // whether we have warned about this string
|
||||
nounc bool // Skip UNC conversion on Windows
|
||||
// do os.Lstat or os.Stat
|
||||
lstat func(name string) (os.FileInfo, error)
|
||||
dirNames *mapper // directory name mapping
|
||||
@@ -83,18 +117,22 @@ type Object struct {
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
var err error
|
||||
|
||||
if *noUTFNorm {
|
||||
log.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opt.NoUTFNorm {
|
||||
fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
|
||||
}
|
||||
|
||||
nounc := config.FileGet(name, "nounc")
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
warned: make(map[string]struct{}),
|
||||
nounc: nounc == "true",
|
||||
dev: devUnset,
|
||||
lstat: os.Lstat,
|
||||
dirNames: newMapper(),
|
||||
@@ -104,18 +142,18 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
CaseInsensitive: f.caseInsensitive(),
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
if *followSymlinks {
|
||||
if opt.FollowSymlinks {
|
||||
f.lstat = os.Stat
|
||||
}
|
||||
|
||||
// Check to see if this points to a file
|
||||
fi, err := f.lstat(f.root)
|
||||
if err == nil {
|
||||
f.dev = readDevice(fi)
|
||||
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
||||
}
|
||||
if err == nil && fi.Mode().IsRegular() {
|
||||
// It is a file, so use the parent as the root
|
||||
f.root, _ = getDirFile(f.root)
|
||||
f.root = filepath.Dir(f.root)
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
@@ -242,8 +280,15 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
newRemote := path.Join(remote, name)
|
||||
newPath := filepath.Join(fsDirPath, name)
|
||||
// Follow symlinks if required
|
||||
if *followSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
fi, err = os.Stat(newPath)
|
||||
if os.IsNotExist(err) {
|
||||
// Skip bad symlinks
|
||||
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
|
||||
fs.Errorf(newRemote, "Listing error: %v", err)
|
||||
accounting.Stats.Error(err)
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -252,7 +297,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
if fi.IsDir() {
|
||||
// Ignore directories which are symlinks. These are junction points under windows which
|
||||
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
|
||||
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi) {
|
||||
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
|
||||
d := fs.NewDir(f.dirNames.Save(newRemote, f.cleanRemote(newRemote)), fi.ModTime())
|
||||
entries = append(entries, d)
|
||||
}
|
||||
@@ -356,7 +401,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.dev = readDevice(fi)
|
||||
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -421,7 +466,7 @@ func (f *Fs) readPrecision() (precision time.Duration) {
|
||||
|
||||
// If it matches - have found the precision
|
||||
// fmt.Println("compare", fi.ModTime(), t)
|
||||
if fi.ModTime() == t {
|
||||
if fi.ModTime().Equal(t) {
|
||||
// fmt.Println("Precision detected as", duration)
|
||||
return duration
|
||||
}
|
||||
@@ -529,7 +574,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
}
|
||||
|
||||
// Create parent of destination
|
||||
dstParentPath, _ := getDirFile(dstPath)
|
||||
dstParentPath := filepath.Dir(dstPath)
|
||||
err = os.MkdirAll(dstParentPath, 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -592,7 +637,6 @@ func (o *Object) Hash(r hash.Type) (string, error) {
|
||||
o.fs.objectHashesMu.Unlock()
|
||||
|
||||
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil {
|
||||
hashes = make(map[hash.Type]string)
|
||||
in, err := os.Open(o.path)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "hash: failed to open")
|
||||
@@ -642,13 +686,8 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
}
|
||||
mode := o.mode
|
||||
// On windows a file with os.ModeSymlink represents a file with reparse points
|
||||
if runtime.GOOS == "windows" && (mode&os.ModeSymlink) != 0 {
|
||||
fs.Debugf(o, "Clearing symlink bit to allow a file with reparse points to be copied")
|
||||
mode &^= os.ModeSymlink
|
||||
}
|
||||
if mode&os.ModeSymlink != 0 {
|
||||
if !*skipSymlinks {
|
||||
if !o.fs.opt.SkipSymlinks {
|
||||
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
|
||||
}
|
||||
return false
|
||||
@@ -673,13 +712,18 @@ type localOpenFile struct {
|
||||
|
||||
// Read bytes from the object - see io.Reader
|
||||
func (file *localOpenFile) Read(p []byte) (n int, err error) {
|
||||
// Check if file has the same size and modTime
|
||||
fi, err := file.fd.Stat()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "can't read status of source file while transferring")
|
||||
}
|
||||
if file.o.size != fi.Size() || file.o.modTime != fi.ModTime() {
|
||||
return 0, errors.New("can't copy - source file is being updated")
|
||||
if !file.o.fs.opt.NoCheckUpdated {
|
||||
// Check if file has the same size and modTime
|
||||
fi, err := file.fd.Stat()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "can't read status of source file while transferring")
|
||||
}
|
||||
if file.o.size != fi.Size() {
|
||||
return 0, errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size())
|
||||
}
|
||||
if !file.o.modTime.Equal(fi.ModTime()) {
|
||||
return 0, errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime())
|
||||
}
|
||||
}
|
||||
|
||||
n, err = file.in.Read(p)
|
||||
@@ -729,7 +773,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
wrappedFd := readers.NewLimitedReadCloser(fd, limit)
|
||||
if offset != 0 {
|
||||
// seek the object
|
||||
_, err = fd.Seek(offset, 0)
|
||||
_, err = fd.Seek(offset, io.SeekStart)
|
||||
// don't attempt to make checksums
|
||||
return wrappedFd, err
|
||||
}
|
||||
@@ -749,7 +793,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
|
||||
// mkdirAll makes all the directories needed to store the object
|
||||
func (o *Object) mkdirAll() error {
|
||||
dir, _ := getDirFile(o.path)
|
||||
dir := filepath.Dir(o.path)
|
||||
return os.MkdirAll(dir, 0777)
|
||||
}
|
||||
|
||||
@@ -834,18 +878,7 @@ func (o *Object) lstat() error {
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
return os.Remove(o.path)
|
||||
}
|
||||
|
||||
// Return the directory and file from an OS path. Assumes
|
||||
// os.PathSeparator is used.
|
||||
func getDirFile(s string) (string, string) {
|
||||
i := strings.LastIndex(s, string(os.PathSeparator))
|
||||
dir, file := s[:i], s[i+1:]
|
||||
if dir == "" {
|
||||
dir = string(os.PathSeparator)
|
||||
}
|
||||
return dir, file
|
||||
return remove(o.path)
|
||||
}
|
||||
|
||||
// cleanPathFragment cleans an OS path fragment which is part of a
|
||||
@@ -878,7 +911,7 @@ func (f *Fs) cleanPath(s string) string {
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
if !f.nounc {
|
||||
if !f.opt.NoUNC {
|
||||
// Convert to UNC
|
||||
s = uncPath(s)
|
||||
}
|
||||
|
||||
@@ -29,8 +29,10 @@ func TestMapper(t *testing.T) {
|
||||
})
|
||||
assert.Equal(t, "potato", m.Load("potato"))
|
||||
assert.Equal(t, "-r?'a´o¨", m.Load("-r'áö"))
|
||||
}
|
||||
|
||||
// Test copy with source file that's updating
|
||||
// Test copy with source file that's updating
|
||||
func TestUpdatingCheck(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
filePath := "sub dir/local test"
|
||||
@@ -42,9 +44,11 @@ func TestMapper(t *testing.T) {
|
||||
}
|
||||
|
||||
fi, err := fd.Stat()
|
||||
o := &Object{size: fi.Size(), modTime: fi.ModTime()}
|
||||
require.NoError(t, err)
|
||||
o := &Object{size: fi.Size(), modTime: fi.ModTime(), fs: &Fs{}}
|
||||
wrappedFd := readers.NewLimitedReadCloser(fd, -1)
|
||||
hash, err := hash.NewMultiHasherTypes(hash.Supported)
|
||||
require.NoError(t, err)
|
||||
in := localOpenFile{
|
||||
o: o,
|
||||
in: wrappedFd,
|
||||
@@ -59,4 +63,12 @@ func TestMapper(t *testing.T) {
|
||||
r.WriteFile(filePath, "content updated", time.Now())
|
||||
_, err = in.Read(buf)
|
||||
require.Errorf(t, err, "can't copy - source file is being updated")
|
||||
|
||||
// turn the checking off and try again
|
||||
in.o.fs.opt.NoCheckUpdated = true
|
||||
|
||||
r.WriteFile(filePath, "content updated", time.Now())
|
||||
_, err = in.Read(buf)
|
||||
require.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
@@ -1,75 +1,17 @@
|
||||
// Test Local filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package local_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*local.Object)(nil))
|
||||
fstests.RemoteName = ""
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "",
|
||||
NilObject: (*local.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
@@ -8,6 +8,6 @@ import "os"
|
||||
|
||||
// readDevice turns a valid os.FileInfo into a device number,
|
||||
// returning devUnset if it fails.
|
||||
func readDevice(fi os.FileInfo) uint64 {
|
||||
func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
|
||||
return devUnset
|
||||
}
|
||||
|
||||
@@ -9,17 +9,12 @@ import (
|
||||
"syscall"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
)
|
||||
|
||||
var (
|
||||
oneFileSystem = flags.BoolP("one-file-system", "x", false, "Don't cross filesystem boundaries.")
|
||||
)
|
||||
|
||||
// readDevice turns a valid os.FileInfo into a device number,
|
||||
// returning devUnset if it fails.
|
||||
func readDevice(fi os.FileInfo) uint64 {
|
||||
if !*oneFileSystem {
|
||||
func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
|
||||
if !oneFileSystem {
|
||||
return devUnset
|
||||
}
|
||||
statT, ok := fi.Sys().(*syscall.Stat_t)
|
||||
|
||||
10
backend/local/remove_other.go
Normal file
10
backend/local/remove_other.go
Normal file
@@ -0,0 +1,10 @@
|
||||
//+build !windows
|
||||
|
||||
package local
|
||||
|
||||
import "os"
|
||||
|
||||
// Removes name, retrying on a sharing violation
|
||||
func remove(name string) error {
|
||||
return os.Remove(name)
|
||||
}
|
||||
50
backend/local/remove_test.go
Normal file
50
backend/local/remove_test.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Check we can remove an open file
|
||||
func TestRemove(t *testing.T) {
|
||||
fd, err := ioutil.TempFile("", "rclone-remove-test")
|
||||
require.NoError(t, err)
|
||||
name := fd.Name()
|
||||
defer func() {
|
||||
_ = os.Remove(name)
|
||||
}()
|
||||
|
||||
exists := func() bool {
|
||||
_, err := os.Stat(name)
|
||||
if err == nil {
|
||||
return true
|
||||
} else if os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
require.NoError(t, err)
|
||||
return false
|
||||
}
|
||||
|
||||
assert.True(t, exists())
|
||||
// close the file in the background
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
require.NoError(t, fd.Close())
|
||||
}()
|
||||
// delete the open file
|
||||
err = remove(name)
|
||||
require.NoError(t, err)
|
||||
// check it no longer exists
|
||||
assert.False(t, exists())
|
||||
// wait for background close
|
||||
wg.Wait()
|
||||
}
|
||||
38
backend/local/remove_windows.go
Normal file
38
backend/local/remove_windows.go
Normal file
@@ -0,0 +1,38 @@
|
||||
//+build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
const (
|
||||
ERROR_SHARING_VIOLATION syscall.Errno = 32
|
||||
)
|
||||
|
||||
// Removes name, retrying on a sharing violation
|
||||
func remove(name string) (err error) {
|
||||
const maxTries = 10
|
||||
var sleepTime = 1 * time.Millisecond
|
||||
for i := 0; i < maxTries; i++ {
|
||||
err = os.Remove(name)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
pathErr, ok := err.(*os.PathError)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
if pathErr.Err != ERROR_SHARING_VIOLATION {
|
||||
break
|
||||
}
|
||||
fs.Logf(name, "Remove detected sharing violation - retry %d/%d sleeping %v", i+1, maxTries, sleepTime)
|
||||
time.Sleep(sleepTime)
|
||||
sleepTime <<= 1
|
||||
}
|
||||
return err
|
||||
}
|
||||
1161
backend/mega/mega.go
Normal file
1161
backend/mega/mega.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/mega/mega_test.go
Normal file
17
backend/mega/mega_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test Mega filesystem interface
|
||||
package mega_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/mega"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestMega:",
|
||||
NilObject: (*mega.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -2,7 +2,10 @@
|
||||
|
||||
package api
|
||||
|
||||
import "time"
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
@@ -50,10 +53,10 @@ type IdentitySet struct {
|
||||
|
||||
// Quota groups storage space quota-related information on OneDrive into a single structure.
|
||||
type Quota struct {
|
||||
Total int `json:"total"`
|
||||
Used int `json:"used"`
|
||||
Remaining int `json:"remaining"`
|
||||
Deleted int `json:"deleted"`
|
||||
Total int64 `json:"total"`
|
||||
Used int64 `json:"used"`
|
||||
Remaining int64 `json:"remaining"`
|
||||
Deleted int64 `json:"deleted"`
|
||||
State string `json:"state"` // normal | nearing | critical | exceeded
|
||||
}
|
||||
|
||||
@@ -88,9 +91,26 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
||||
// ItemReference groups data needed to reference a OneDrive item
|
||||
// across the service into a single structure.
|
||||
type ItemReference struct {
|
||||
DriveID string `json:"driveId"` // Unique identifier for the Drive that contains the item. Read-only.
|
||||
ID string `json:"id"` // Unique identifier for the item. Read/Write.
|
||||
Path string `json:"path"` // Path that used to navigate to the item. Read/Write.
|
||||
DriveID string `json:"driveId"` // Unique identifier for the Drive that contains the item. Read-only.
|
||||
ID string `json:"id"` // Unique identifier for the item. Read/Write.
|
||||
Path string `json:"path"` // Path that used to navigate to the item. Read/Write.
|
||||
DriveType string `json:"driveType"` // Type of the drive, Read-Only
|
||||
}
|
||||
|
||||
// RemoteItemFacet groups data needed to reference a OneDrive remote item
|
||||
type RemoteItemFacet struct {
|
||||
ID string `json:"id"` // The unique identifier of the item within the remote Drive. Read-only.
|
||||
Name string `json:"name"` // The name of the item (filename and extension). Read-write.
|
||||
CreatedBy IdentitySet `json:"createdBy"` // Identity of the user, device, and application which created the item. Read-only.
|
||||
LastModifiedBy IdentitySet `json:"lastModifiedBy"` // Identity of the user, device, and application which last modified the item. Read-only.
|
||||
CreatedDateTime Timestamp `json:"createdDateTime"` // Date and time of item creation. Read-only.
|
||||
LastModifiedDateTime Timestamp `json:"lastModifiedDateTime"` // Date and time the item was last modified. Read-only.
|
||||
Folder *FolderFacet `json:"folder"` // Folder metadata, if the item is a folder. Read-only.
|
||||
File *FileFacet `json:"file"` // File metadata, if the item is a file. Read-only.
|
||||
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
|
||||
ParentReference *ItemReference `json:"parentReference"` // Parent information, if the item has a parent. Read-write.
|
||||
Size int64 `json:"size"` // Size of the item in bytes. Read-only.
|
||||
WebURL string `json:"webUrl"` // URL that displays the resource in the browser. Read-only.
|
||||
}
|
||||
|
||||
// FolderFacet groups folder-related data on OneDrive into a single structure
|
||||
@@ -100,8 +120,9 @@ type FolderFacet struct {
|
||||
|
||||
// HashesType groups different types of hashes into a single structure, for an item on OneDrive.
|
||||
type HashesType struct {
|
||||
Sha1Hash string `json:"sha1Hash"` // base64 encoded SHA1 hash for the contents of the file (if available)
|
||||
Crc32Hash string `json:"crc32Hash"` // base64 encoded CRC32 value of the file (if available)
|
||||
Sha1Hash string `json:"sha1Hash"` // hex encoded SHA1 hash for the contents of the file (if available)
|
||||
Crc32Hash string `json:"crc32Hash"` // hex encoded CRC32 value of the file (if available)
|
||||
QuickXorHash string `json:"quickXorHash"` // base64 encoded QuickXorHash value of the file (if available)
|
||||
}
|
||||
|
||||
// FileFacet groups file-related data on OneDrive into a single structure.
|
||||
@@ -142,6 +163,7 @@ type Item struct {
|
||||
Description string `json:"description"` // Provide a user-visible description of the item. Read-write.
|
||||
Folder *FolderFacet `json:"folder"` // Folder metadata, if the item is a folder. Read-only.
|
||||
File *FileFacet `json:"file"` // File metadata, if the item is a file. Read-only.
|
||||
RemoteItem *RemoteItemFacet `json:"remoteItem"` // Remote Item metadata, if the item is a remote shared item. Read-only.
|
||||
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
|
||||
// Image *ImageFacet `json:"image"` // Image metadata, if the item is an image. Read-only.
|
||||
// Photo *PhotoFacet `json:"photo"` // Photo metadata, if the item is a photo. Read-only.
|
||||
@@ -223,7 +245,115 @@ type MoveItemRequest struct {
|
||||
// Copy Item
|
||||
// Upload From URL
|
||||
type AsyncOperationStatus struct {
|
||||
Operation string `json:"operation"` // The type of job being run.
|
||||
PercentageComplete float64 `json:"percentageComplete"` // An float value between 0 and 100 that indicates the percentage complete.
|
||||
Status string `json:"status"` // A string value that maps to an enumeration of possible values about the status of the job. "notStarted | inProgress | completed | updating | failed | deletePending | deleteFailed | waiting"
|
||||
}
|
||||
|
||||
// GetID returns a normalized ID of the item
|
||||
// If DriveID is known it will be prefixed to the ID with # seperator
|
||||
func (i *Item) GetID() string {
|
||||
if i.IsRemote() && i.RemoteItem.ID != "" {
|
||||
return i.RemoteItem.ParentReference.DriveID + "#" + i.RemoteItem.ID
|
||||
} else if i.ParentReference != nil && strings.Index(i.ID, "#") == -1 {
|
||||
return i.ParentReference.DriveID + "#" + i.ID
|
||||
}
|
||||
return i.ID
|
||||
}
|
||||
|
||||
// GetDriveID returns a normalized ParentReferance of the item
|
||||
func (i *Item) GetDriveID() string {
|
||||
return i.GetParentReferance().DriveID
|
||||
}
|
||||
|
||||
// GetName returns a normalized Name of the item
|
||||
func (i *Item) GetName() string {
|
||||
if i.IsRemote() && i.RemoteItem.Name != "" {
|
||||
return i.RemoteItem.Name
|
||||
}
|
||||
return i.Name
|
||||
}
|
||||
|
||||
// GetFolder returns a normalized Folder of the item
|
||||
func (i *Item) GetFolder() *FolderFacet {
|
||||
if i.IsRemote() && i.RemoteItem.Folder != nil {
|
||||
return i.RemoteItem.Folder
|
||||
}
|
||||
return i.Folder
|
||||
}
|
||||
|
||||
// GetFile returns a normalized File of the item
|
||||
func (i *Item) GetFile() *FileFacet {
|
||||
if i.IsRemote() && i.RemoteItem.File != nil {
|
||||
return i.RemoteItem.File
|
||||
}
|
||||
return i.File
|
||||
}
|
||||
|
||||
// GetFileSystemInfo returns a normalized FileSystemInfo of the item
|
||||
func (i *Item) GetFileSystemInfo() *FileSystemInfoFacet {
|
||||
if i.IsRemote() && i.RemoteItem.FileSystemInfo != nil {
|
||||
return i.RemoteItem.FileSystemInfo
|
||||
}
|
||||
return i.FileSystemInfo
|
||||
}
|
||||
|
||||
// GetSize returns a normalized Size of the item
|
||||
func (i *Item) GetSize() int64 {
|
||||
if i.IsRemote() && i.RemoteItem.Size != 0 {
|
||||
return i.RemoteItem.Size
|
||||
}
|
||||
return i.Size
|
||||
}
|
||||
|
||||
// GetWebURL returns a normalized WebURL of the item
|
||||
func (i *Item) GetWebURL() string {
|
||||
if i.IsRemote() && i.RemoteItem.WebURL != "" {
|
||||
return i.RemoteItem.WebURL
|
||||
}
|
||||
return i.WebURL
|
||||
}
|
||||
|
||||
// GetCreatedBy returns a normalized CreatedBy of the item
|
||||
func (i *Item) GetCreatedBy() IdentitySet {
|
||||
if i.IsRemote() && i.RemoteItem.CreatedBy != (IdentitySet{}) {
|
||||
return i.RemoteItem.CreatedBy
|
||||
}
|
||||
return i.CreatedBy
|
||||
}
|
||||
|
||||
// GetLastModifiedBy returns a normalized LastModifiedBy of the item
|
||||
func (i *Item) GetLastModifiedBy() IdentitySet {
|
||||
if i.IsRemote() && i.RemoteItem.LastModifiedBy != (IdentitySet{}) {
|
||||
return i.RemoteItem.LastModifiedBy
|
||||
}
|
||||
return i.LastModifiedBy
|
||||
}
|
||||
|
||||
// GetCreatedDateTime returns a normalized CreatedDateTime of the item
|
||||
func (i *Item) GetCreatedDateTime() Timestamp {
|
||||
if i.IsRemote() && i.RemoteItem.CreatedDateTime != (Timestamp{}) {
|
||||
return i.RemoteItem.CreatedDateTime
|
||||
}
|
||||
return i.CreatedDateTime
|
||||
}
|
||||
|
||||
// GetLastModifiedDateTime returns a normalized LastModifiedDateTime of the item
|
||||
func (i *Item) GetLastModifiedDateTime() Timestamp {
|
||||
if i.IsRemote() && i.RemoteItem.LastModifiedDateTime != (Timestamp{}) {
|
||||
return i.RemoteItem.LastModifiedDateTime
|
||||
}
|
||||
return i.LastModifiedDateTime
|
||||
}
|
||||
|
||||
// GetParentReferance returns a normalized ParentReferance of the item
|
||||
func (i *Item) GetParentReferance() *ItemReference {
|
||||
if i.IsRemote() && i.ParentReference == nil {
|
||||
return i.RemoteItem.ParentReference
|
||||
}
|
||||
return i.ParentReference
|
||||
}
|
||||
|
||||
// IsRemote checks if item is a remote item
|
||||
func (i *Item) IsRemote() bool {
|
||||
return i.RemoteItem != nil
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,75 +1,17 @@
|
||||
// Test OneDrive filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package onedrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/onedrive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*onedrive.Object)(nil))
|
||||
fstests.RemoteName = "TestOneDrive:"
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestOneDrive:",
|
||||
NilObject: (*onedrive.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
202
backend/onedrive/quickxorhash/quickxorhash.go
Normal file
202
backend/onedrive/quickxorhash/quickxorhash.go
Normal file
@@ -0,0 +1,202 @@
|
||||
// Package quickxorhash provides the quickXorHash algorithm which is a
|
||||
// quick, simple non-cryptographic hash algorithm that works by XORing
|
||||
// the bytes in a circular-shifting fashion.
|
||||
//
|
||||
// It is used by Microsoft Onedrive for Business to hash data.
|
||||
//
|
||||
// See: https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
|
||||
package quickxorhash
|
||||
|
||||
// This code was ported from the code snippet linked from
|
||||
// https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
|
||||
// Which has the copyright
|
||||
|
||||
// ------------------------------------------------------------------------------
|
||||
// Copyright (c) 2016 Microsoft Corporation
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
// ------------------------------------------------------------------------------
|
||||
|
||||
import (
|
||||
"hash"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockSize is the preferred size for hashing
|
||||
BlockSize = 64
|
||||
// Size of the output checksum
|
||||
Size = 20
|
||||
bitsInLastCell = 32
|
||||
shift = 11
|
||||
widthInBits = 8 * Size
|
||||
dataSize = (widthInBits-1)/64 + 1
|
||||
)
|
||||
|
||||
type quickXorHash struct {
|
||||
data [dataSize]uint64
|
||||
lengthSoFar uint64
|
||||
shiftSoFar int
|
||||
}
|
||||
|
||||
// New returns a new hash.Hash computing the quickXorHash checksum.
|
||||
func New() hash.Hash {
|
||||
return &quickXorHash{}
|
||||
}
|
||||
|
||||
// Write (via the embedded io.Writer interface) adds more data to the running hash.
|
||||
// It never returns an error.
|
||||
//
|
||||
// Write writes len(p) bytes from p to the underlying data stream. It returns
|
||||
// the number of bytes written from p (0 <= n <= len(p)) and any error
|
||||
// encountered that caused the write to stop early. Write must return a non-nil
|
||||
// error if it returns n < len(p). Write must not modify the slice data, even
|
||||
// temporarily.
|
||||
//
|
||||
// Implementations must not retain p.
|
||||
func (q *quickXorHash) Write(p []byte) (n int, err error) {
|
||||
currentshift := q.shiftSoFar
|
||||
|
||||
// The bitvector where we'll start xoring
|
||||
vectorArrayIndex := currentshift / 64
|
||||
|
||||
// The position within the bit vector at which we begin xoring
|
||||
vectorOffset := currentshift % 64
|
||||
iterations := len(p)
|
||||
if iterations > widthInBits {
|
||||
iterations = widthInBits
|
||||
}
|
||||
|
||||
for i := 0; i < iterations; i++ {
|
||||
isLastCell := vectorArrayIndex == len(q.data)-1
|
||||
var bitsInVectorCell int
|
||||
if isLastCell {
|
||||
bitsInVectorCell = bitsInLastCell
|
||||
} else {
|
||||
bitsInVectorCell = 64
|
||||
}
|
||||
|
||||
// There's at least 2 bitvectors before we reach the end of the array
|
||||
if vectorOffset <= bitsInVectorCell-8 {
|
||||
for j := i; j < len(p); j += widthInBits {
|
||||
q.data[vectorArrayIndex] ^= uint64(p[j]) << uint(vectorOffset)
|
||||
}
|
||||
} else {
|
||||
index1 := vectorArrayIndex
|
||||
var index2 int
|
||||
if isLastCell {
|
||||
index2 = 0
|
||||
} else {
|
||||
index2 = vectorArrayIndex + 1
|
||||
}
|
||||
low := byte(bitsInVectorCell - vectorOffset)
|
||||
|
||||
xoredByte := byte(0)
|
||||
for j := i; j < len(p); j += widthInBits {
|
||||
xoredByte ^= p[j]
|
||||
}
|
||||
q.data[index1] ^= uint64(xoredByte) << uint(vectorOffset)
|
||||
q.data[index2] ^= uint64(xoredByte) >> low
|
||||
}
|
||||
vectorOffset += shift
|
||||
for vectorOffset >= bitsInVectorCell {
|
||||
if isLastCell {
|
||||
vectorArrayIndex = 0
|
||||
} else {
|
||||
vectorArrayIndex = vectorArrayIndex + 1
|
||||
}
|
||||
vectorOffset -= bitsInVectorCell
|
||||
}
|
||||
}
|
||||
|
||||
// Update the starting position in a circular shift pattern
|
||||
q.shiftSoFar = (q.shiftSoFar + shift*(len(p)%widthInBits)) % widthInBits
|
||||
|
||||
q.lengthSoFar += uint64(len(p))
|
||||
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Calculate the current checksum
|
||||
func (q *quickXorHash) checkSum() (h [Size]byte) {
|
||||
// Output the data as little endian bytes
|
||||
ph := 0
|
||||
for _, d := range q.data[:len(q.data)-1] {
|
||||
_ = h[ph+7] // bounds check
|
||||
h[ph+0] = byte(d >> (8 * 0))
|
||||
h[ph+1] = byte(d >> (8 * 1))
|
||||
h[ph+2] = byte(d >> (8 * 2))
|
||||
h[ph+3] = byte(d >> (8 * 3))
|
||||
h[ph+4] = byte(d >> (8 * 4))
|
||||
h[ph+5] = byte(d >> (8 * 5))
|
||||
h[ph+6] = byte(d >> (8 * 6))
|
||||
h[ph+7] = byte(d >> (8 * 7))
|
||||
ph += 8
|
||||
}
|
||||
// remaining 32 bits
|
||||
d := q.data[len(q.data)-1]
|
||||
h[Size-4] = byte(d >> (8 * 0))
|
||||
h[Size-3] = byte(d >> (8 * 1))
|
||||
h[Size-2] = byte(d >> (8 * 2))
|
||||
h[Size-1] = byte(d >> (8 * 3))
|
||||
|
||||
// XOR the file length with the least significant bits in little endian format
|
||||
d = q.lengthSoFar
|
||||
h[Size-8] ^= byte(d >> (8 * 0))
|
||||
h[Size-7] ^= byte(d >> (8 * 1))
|
||||
h[Size-6] ^= byte(d >> (8 * 2))
|
||||
h[Size-5] ^= byte(d >> (8 * 3))
|
||||
h[Size-4] ^= byte(d >> (8 * 4))
|
||||
h[Size-3] ^= byte(d >> (8 * 5))
|
||||
h[Size-2] ^= byte(d >> (8 * 6))
|
||||
h[Size-1] ^= byte(d >> (8 * 7))
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
// It does not change the underlying hash state.
|
||||
func (q *quickXorHash) Sum(b []byte) []byte {
|
||||
hash := q.checkSum()
|
||||
return append(b, hash[:]...)
|
||||
}
|
||||
|
||||
// Reset resets the Hash to its initial state.
|
||||
func (q *quickXorHash) Reset() {
|
||||
*q = quickXorHash{}
|
||||
}
|
||||
|
||||
// Size returns the number of bytes Sum will return.
|
||||
func (q *quickXorHash) Size() int {
|
||||
return Size
|
||||
}
|
||||
|
||||
// BlockSize returns the hash's underlying block size.
|
||||
// The Write method must be able to accept any amount
|
||||
// of data, but it may operate more efficiently if all writes
|
||||
// are a multiple of the block size.
|
||||
func (q *quickXorHash) BlockSize() int {
|
||||
return BlockSize
|
||||
}
|
||||
|
||||
// Sum returns the quickXorHash checksum of the data.
|
||||
func Sum(data []byte) [Size]byte {
|
||||
var d quickXorHash
|
||||
_, _ = d.Write(data)
|
||||
return d.checkSum()
|
||||
}
|
||||
168
backend/onedrive/quickxorhash/quickxorhash_test.go
Normal file
168
backend/onedrive/quickxorhash/quickxorhash_test.go
Normal file
@@ -0,0 +1,168 @@
|
||||
package quickxorhash
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"hash"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var testVectors = []struct {
|
||||
size int
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{0, ``, "AAAAAAAAAAAAAAAAAAAAAAAAAAA="},
|
||||
{1, `Sg==`, "SgAAAAAAAAAAAAAAAQAAAAAAAAA="},
|
||||
{2, `tbQ=`, "taAFAAAAAAAAAAAAAgAAAAAAAAA="},
|
||||
{3, `0pZP`, "0rDEEwAAAAAAAAAAAwAAAAAAAAA="},
|
||||
{4, `jRRDVA==`, "jaDAEKgAAAAAAAAABAAAAAAAAAA="},
|
||||
{5, `eAV52qE=`, "eChAHrQRCgAAAAAABQAAAAAAAAA="},
|
||||
{6, `luBZlaT6`, "lgBHFipBCn0AAAAABgAAAAAAAAA="},
|
||||
{7, `qaApEj66lw==`, "qQBFCiTgA11cAgAABwAAAAAAAAA="},
|
||||
{8, `/aNzzCFPS/A=`, "/RjFHJgRgicsAR4ACAAAAAAAAAA="},
|
||||
{9, `n6Neh7p6fFgm`, "nxiFFw6hCz3wAQsmCQAAAAAAAAA="},
|
||||
{10, `J9iPGCbfZSTNyw==`, "J8DGIzBggm+UgQTNUgYAAAAAAAA="},
|
||||
{11, `i+UZyUGJKh+ISbk=`, "iyhHBpIRhESo4AOIQ0IuAAAAAAA="},
|
||||
{12, `h490d57Pqz5q2rtT`, "h3gEHe7giWeswgdq3MYupgAAAAA="},
|
||||
{13, `vPgoDjOfO6fm71RxLw==`, "vMAHChwwg0/s4BTmdQcV4vACAAA="},
|
||||
{14, `XoJ1AsoR4fDYJrDqYs4=`, "XhBEHQSgjAiEAx7YPgEs1CEGZwA="},
|
||||
{15, `gQaybEqS/4UlDc8e4IJm`, "gDCALNigBEn8oxAlZ8AzPAAOQZg="},
|
||||
{16, `2fuxhBJXtpWFe8dOfdGeHw==`, "O9tHLAghgSvYohKFyMMxnNCHaHg="},
|
||||
{17, `XBV6YKU9V7yMakZnFIxIkuU=`, "HbplHsBQih5cgReMQYMRzkABRiA="},
|
||||
{18, `XJZSOiNO2bmfKnTKD7fztcQX`, "/6ZArHQwAidkIxefQgEdlPGAW8w="},
|
||||
{19, `g8VtAh+2Kf4k0kY5tzji2i2zmA==`, "wDNrgwHWAVukwB8kg4YRcnALHIg="},
|
||||
{20, `T6LYJIfDh81JrAK309H2JMJTXis=`, "zBTHrspn3mEcohlJdIUAbjGNaNg="},
|
||||
{21, `DWAAX5/CIfrmErgZa8ot6ZraeSbu`, "LR2Z0PjuRYGKQB/mhQAuMrAGZbQ="},
|
||||
{22, `N9abi3qy/mC1THZuVLHPpx7SgwtLOA==`, "1KTYttCBEen8Hwy1doId3ECFWDw="},
|
||||
{23, `LlUe7wHerLqEtbSZLZgZa9u0m7hbiFs=`, "TqVZpxs3cN61BnuFvwUtMtECTGQ="},
|
||||
{24, `bU2j/0XYdgfPFD4691jV0AOUEUPR4Z5E`, "bnLBiLpVgnxVkXhNsIAPdHAPLFQ="},
|
||||
{25, `lScPwPsyUsH2T1Qsr31wXtP55Wqbe47Uyg==`, "VDMSy8eI26nBHCB0e8gVWPCKPsA="},
|
||||
{26, `rJaKh1dLR1k+4hynliTZMGf8Nd4qKKoZiAM=`, "r7bjwkl8OYQeNaMcCY8fTmEJEmQ="},
|
||||
{27, `pPsT0CPmHrd3Frsnva1pB/z1ytARLeHEYRCo`, "Rdg7rCcDomL59pL0s6GuTvqLVqQ="},
|
||||
{28, `wSRChaqmrsnMrfB2yqI43eRWbro+f9kBvh+01w==`, "YTtloIi6frI7HX3vdLvE7I2iUOA="},
|
||||
{29, `apL67KMIRxQeE9k1/RuW09ppPjbF1WeQpTjSWtI=`, "CIpedls+ZlSQ654fl+X26+Q7LVU="},
|
||||
{30, `53yx0/QgMTVb7OOzHRHbkS7ghyRc+sIXxi7XHKgT`, "zfJtLGFgR9DB3Q64fAFIp+S5iOY="},
|
||||
{31, `PwXNnutoLLmxD8TTog52k8cQkukmT87TTnDipKLHQw==`, "PTaGs7yV3FUyBy/SfU6xJRlCJlI="},
|
||||
{32, `NbYXsp5/K6mR+NmHwExjvWeWDJFnXTKWVlzYHoesp2E=`, "wjuAuWDiq04qDt1R8hHWDDcwVoQ="},
|
||||
{33, `qQ70RB++JAR5ljNv3lJt1PpqETPsckopfonItu18Cr3E`, "FkJaeg/0Z5+euShYlLpE2tJh+Lo="},
|
||||
{34, `RhzSatQTQ9/RFvpHyQa1WLdkr3nIk6MjJUma998YRtp44A==`, "SPN2D29reImAqJezlqV2DLbi8tk="},
|
||||
{35, `DND1u1uZ5SqZVpRUk6NxSUdVo7IjjL9zs4A1evDNCDLcXWc=`, "S6lBk2hxI2SWBfn7nbEl7D19UUs="},
|
||||
{36, `jEi62utFz69JMYHjg1iXy7oO6ZpZSLcVd2B+pjm6BGsv/CWi`, "s0lYU9tr/bp9xsnrrjYgRS5EvV8="},
|
||||
{37, `hfS3DZZnhy0hv7nJdXLv/oJOtIgAuP9SInt/v8KeuO4/IvVh4A==`, "CV+HQCdd2A/e/vdi12f2UU55GLA="},
|
||||
{38, `EkPQAC6ymuRrYjIXD/LT/4Vb+7aTjYVZOHzC8GPCEtYDP0+T3Nc=`, "kE9H9sEmr3vHBYUiPbvsrcDgSEo="},
|
||||
{39, `vtBOGIENG7yQ/N7xNWPNIgy66Gk/I2Ur/ZhdFNUK9/1FCZuu/KeS`, "+Fgp3HBimtCzUAyiinj3pkarYTk="},
|
||||
{40, `YnF4smoy9hox2jBlJ3VUa4qyCRhOZbWcmFGIiszTT4zAdYHsqJazyg==`, "arkIn+ELddmE8N34J9ydyFKW+9w="},
|
||||
{41, `0n7nl3YJtipy6yeUbVPWtc2h45WbF9u8hTz5tNwj3dZZwfXWkk+GN3g=`, "YJLNK7JR64j9aODWfqDvEe/u6NU="},
|
||||
{42, `FnIIPHayc1pHkY4Lh8+zhWwG8xk6Knk/D3cZU1/fOUmRAoJ6CeztvMOL`, "22RPOylMtdk7xO/QEQiMli4ql0k="},
|
||||
{43, `J82VT7ND0Eg1MorSfJMUhn+qocF7PsUpdQAMrDiHJ2JcPZAHZ2nyuwjoKg==`, "pOR5eYfwCLRJbJsidpc1rIJYwtM="},
|
||||
{44, `Zbu+78+e35ZIymV5KTDdub5McyI3FEO8fDxs62uWHQ9U3Oh3ZqgaZ30SnmQ=`, "DbvbTkgNTgWRqRidA9r1jhtUjro="},
|
||||
{45, `lgybK3Da7LEeY5aeeNrqcdHvv6mD1W4cuQ3/rUj2C/CNcSI0cAMw6vtpVY3y`, "700RQByn1lRQSSme9npQB/Ye+bY="},
|
||||
{46, `jStZgKHv4QyJLvF2bYbIUZi/FscHALfKHAssTXkrV1byVR9eACwW9DNZQRHQwg==`, "uwN55He8xgE4g93dH9163xPew4U="},
|
||||
{47, `V1PSud3giF5WW72JB/bgtltsWtEB5V+a+wUALOJOGuqztzVXUZYrvoP3XV++gM0=`, "U+3ZfUF/6mwOoHJcSHkQkckfTDA="},
|
||||
{48, `VXs4t4tfXGiWAL6dlhEMm0YQF0f2w9rzX0CvIVeuW56o6/ec2auMpKeU2VeteEK5`, "sq24lSf7wXLH8eigHl07X+qPTps="},
|
||||
{49, `bLUn3jLH+HFUsG3ptWTHgNvtr3eEv9lfKBf0jm6uhpqhRwtbEQ7Ovj/hYQf42zfdtQ==`, "uC8xrnopGiHebGuwgq607WRQyxQ="},
|
||||
{50, `4SVmjtXIL8BB8SfkbR5Cpaljm2jpyUfAhIBf65XmKxHlz9dy5XixgiE/q1lv+esZW/E=`, "wxZ0rxkMQEnRNAp8ZgEZLT4RdLM="},
|
||||
{51, `pMljctlXeFUqbG3BppyiNbojQO3ygg6nZPeUZaQcVyJ+Clgiw3Q8ntLe8+02ZSfyCc39`, "aZEPmNvOXnTt7z7wt+ewV7QGMlg="},
|
||||
{52, `C16uQlxsHxMWnV2gJhFPuJ2/guZ4N1YgmNvAwL1yrouGQtwieGx8WvZsmYRnX72JnbVtTw==`, "QtlSNqXhVij64MMhKJ3EsDFB/z8="},
|
||||
{53, `7ZVDOywvrl3L0GyKjjcNg2CcTI81n2CeUbzdYWcZOSCEnA/xrNHpiK01HOcGh3BbxuS4S6g=`, "4NznNJc4nmXeApfiCFTq/H5LbHw="},
|
||||
{54, `JXm2tTVqpYuuz2Cc+ZnPusUb8vccPGrzWK2oVwLLl/FjpFoxO9FxGlhnB08iu8Q/XQSdzHn+`, "IwE5+2pKNcK366I2k2BzZYPibSI="},
|
||||
{55, `TiiU1mxzYBSGZuE+TX0l9USWBilQ7dEml5lLrzNPh75xmhjIK8SGqVAkvIMgAmcMB+raXdMPZg==`, "yECGHtgR128ScP4XlvF96eLbIBE="},
|
||||
{56, `zz+Q4zi6wh0fCJUFU9yUOqEVxlIA93gybXHOtXIPwQQ44pW4fyh6BRgc1bOneRuSWp85hwlTJl8=`, "+3Ef4D6yuoC8J+rbFqU1cegverE="},
|
||||
{57, `sa6SHK9z/G505bysK5KgRO2z2cTksDkLoFc7sv0tWBmf2G2mCiozf2Ce6EIO+W1fRsrrtn/eeOAV`, "xZg1CwMNAjN0AIXw2yh4+1N3oos="},
|
||||
{58, `0qx0xdyTHhnKJ22IeTlAjRpWw6y2sOOWFP75XJ7cleGJQiV2kyrmQOST4DGHIL0qqA7sMOdzKyTV
|
||||
iw==`, "bS0tRYPkP1Gfc+ZsBm9PMzPunG8="},
|
||||
{59, `QuzaF0+5ooig6OLEWeibZUENl8EaiXAQvK9UjBEauMeuFFDCtNcGs25BDtJGGbX90gH4VZvCCDNC
|
||||
q4s=`, "rggokuJq1OGNOfB6aDp2g4rdPgw="},
|
||||
{60, `+wg2x23GZQmMLkdv9MeAdettIWDmyK6Wr+ba23XD+Pvvq1lIMn9QIQT4Z7QHJE3iC/ZMFgaId9VA
|
||||
yY3d`, "ahQbTmOdiKUNdhYRHgv5/Ky+Y6k="},
|
||||
{61, `y0ydRgreRQwP95vpNP92ioI+7wFiyldHRbr1SfoPNdbKGFA0lBREaBEGNhf9yixmfE+Azo2AuROx
|
||||
b7Yc7g==`, "cJKFc0dXfiN4hMg1lcMf5E4gqvo="},
|
||||
{62, `LxlVvGXSQlSubK8r0pGf9zf7s/3RHe75a2WlSXQf3gZFR/BtRnR7fCIcaG//CbGfodBFp06DBx/S
|
||||
9hUV8Bk=`, "NwuwhhRWX8QZ/vhWKWgQ1+rNomI="},
|
||||
{63, `L+LSB8kmGMnHaWVA5P/+qFnfQliXvgJW7d2JGAgT6+koi5NQujFW1bwQVoXrBVyob/gBxGizUoJM
|
||||
gid5gGNo`, "ndX/KZBtFoeO3xKeo1ajO/Jy+rY="},
|
||||
{64, `Mb7EGva2rEE5fENDL85P+BsapHEEjv2/siVhKjvAQe02feExVOQSkfmuYzU/kTF1MaKjPmKF/w+c
|
||||
bvwfdWL8aQ==`, "n1anP5NfvD4XDYWIeRPW3ZkPv1Y="},
|
||||
{111, `jyibxJSzO6ZiZ0O1qe3tG/bvIAYssvukh9suIT5wEy1JBINVgPiqdsTW0cOpP0aUfP7mgqLfADkz
|
||||
I/m/GgCuVhr8oFLrOCoTx1/psBOWwhltCbhUx51Icm9aH8tY4Z3ccU+6BKpYQkLCy0B/A9Zc`, "hZfLIilSITC6N3e3tQ/iSgEzkto="},
|
||||
{128, `ikwCorI7PKWz17EI50jZCGbV9JU2E8bXVfxNMg5zdmqSZ2NlsQPp0kqYIPjzwTg1MBtfWPg53k0h
|
||||
0P2naJNEVgrqpoHTfV2b3pJ4m0zYPTJmUX4Bg/lOxcnCxAYKU29Y5F0U8Quz7ZXFBEweftXxJ7RS
|
||||
4r6N7BzJrPsLhY7hgck=`, "imAoFvCWlDn4yVw3/oq1PDbbm6U="},
|
||||
{222, `PfxMcUd0vIW6VbHG/uj/Y0W6qEoKmyBD0nYebEKazKaKG+UaDqBEcmQjbfQeVnVLuodMoPp7P7TR
|
||||
1htX5n2VnkHh22xDyoJ8C/ZQKiSNqQfXvh83judf4RVr9exJCud8Uvgip6aVZTaPrJHVjQhMCp/d
|
||||
EnGvqg0oN5OVkM2qqAXvA0teKUDhgNM71sDBVBCGXxNOR2bpbD1iM4dnuT0ey4L+loXEHTL0fqMe
|
||||
UcEi2asgImnlNakwenDzz0x57aBwyq3AspCFGB1ncX4yYCr/OaCcS5OKi/00WH+wNQU3`, "QX/YEpG0gDsmhEpCdWhsxDzsfVE="},
|
||||
{256, `qwGf2ESubE5jOUHHyc94ORczFYYbc2OmEzo+hBIyzJiNwAzC8PvJqtTzwkWkSslgHFGWQZR2BV5+
|
||||
uYTrYT7HVwRM40vqfj0dBgeDENyTenIOL1LHkjtDKoXEnQ0mXAHoJ8PjbNC93zi5TovVRXTNzfGE
|
||||
s5dpWVqxUzb5lc7dwkyvOluBw482mQ4xrzYyIY1t+//OrNi1ObGXuUw2jBQOFfJVj2Y6BOyYmfB1
|
||||
y36eBxi3zxeG5d5NYjm2GSh6e08QMAwu3zrINcqIzLOuNIiGXBtl7DjKt7b5wqi4oFiRpZsCyx2s
|
||||
mhSrdrtK/CkdU6nDN+34vSR/M8rZpWQdBE7a8g==`, "WYT9JY3JIo/pEBp+tIM6Gt2nyTM="},
|
||||
{333, `w0LGhqU1WXFbdavqDE4kAjEzWLGGzmTNikzqnsiXHx2KRReKVTxkv27u3UcEz9+lbMvYl4xFf2Z4
|
||||
aE1xRBBNd1Ke5C0zToSaYw5o4B/7X99nKK2/XaUX1byLow2aju2XJl2OpKpJg+tSJ2fmjIJTkfuY
|
||||
Uz574dFX6/VXxSxwGH/xQEAKS5TCsBK3CwnuG1p5SAsQq3gGVozDWyjEBcWDMdy8/AIFrj/y03Lf
|
||||
c/RNRCQTAfZbnf2QwV7sluw4fH3XJr07UoD0YqN+7XZzidtrwqMY26fpLZnyZjnBEt1FAZWO7RnK
|
||||
G5asg8xRk9YaDdedXdQSJAOy6bWEWlABj+tVAigBxavaluUH8LOj+yfCFldJjNLdi90fVHkUD/m4
|
||||
Mr5OtmupNMXPwuG3EQlqWUVpQoYpUYKLsk7a5Mvg6UFkiH596y5IbJEVCI1Kb3D1`, "e3+wo77iKcILiZegnzyUNcjCdoQ="},
|
||||
}
|
||||
|
||||
func TestQuickXorHash(t *testing.T) {
|
||||
for _, test := range testVectors {
|
||||
what := fmt.Sprintf("test size %d", test.size)
|
||||
in, err := base64.StdEncoding.DecodeString(test.in)
|
||||
require.NoError(t, err, what)
|
||||
got := Sum(in)
|
||||
want, err := base64.StdEncoding.DecodeString(test.out)
|
||||
require.NoError(t, err, what)
|
||||
assert.Equal(t, want, got[:], what)
|
||||
}
|
||||
}
|
||||
|
||||
func TestQuickXorHashByBlock(t *testing.T) {
|
||||
for _, blockSize := range []int{1, 2, 4, 7, 8, 16, 32, 64, 128, 256, 512} {
|
||||
for _, test := range testVectors {
|
||||
what := fmt.Sprintf("test size %d blockSize %d", test.size, blockSize)
|
||||
in, err := base64.StdEncoding.DecodeString(test.in)
|
||||
require.NoError(t, err, what)
|
||||
h := New()
|
||||
for i := 0; i < len(in); i += blockSize {
|
||||
end := i + blockSize
|
||||
if end > len(in) {
|
||||
end = len(in)
|
||||
}
|
||||
n, err := h.Write(in[i:end])
|
||||
require.Equal(t, end-i, n, what)
|
||||
require.NoError(t, err, what)
|
||||
}
|
||||
got := h.Sum(nil)
|
||||
want, err := base64.StdEncoding.DecodeString(test.out)
|
||||
require.NoError(t, err, what)
|
||||
assert.Equal(t, want, got, test.size, what)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSize(t *testing.T) {
|
||||
d := New()
|
||||
assert.Equal(t, 20, d.Size())
|
||||
}
|
||||
|
||||
func TestBlockSize(t *testing.T) {
|
||||
d := New()
|
||||
assert.Equal(t, 64, d.BlockSize())
|
||||
}
|
||||
|
||||
func TestReset(t *testing.T) {
|
||||
d := New()
|
||||
zeroHash := d.Sum(nil)
|
||||
_, _ = d.Write([]byte{1})
|
||||
assert.NotEqual(t, zeroHash, d.Sum(nil))
|
||||
d.Reset()
|
||||
assert.Equal(t, zeroHash, d.Sum(nil))
|
||||
}
|
||||
|
||||
// check interface
|
||||
var _ hash.Hash = (*quickXorHash)(nil)
|
||||
1117
backend/opendrive/opendrive.go
Normal file
1117
backend/opendrive/opendrive.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/opendrive/opendrive_test.go
Normal file
17
backend/opendrive/opendrive_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test Opendrive filesystem interface
|
||||
package opendrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/opendrive"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestOpenDrive:",
|
||||
NilObject: (*opendrive.Object)(nil),
|
||||
})
|
||||
}
|
||||
78
backend/opendrive/replace.go
Normal file
78
backend/opendrive/replace.go
Normal file
@@ -0,0 +1,78 @@
|
||||
/*
|
||||
Translate file names for OpenDrive
|
||||
|
||||
OpenDrive reserved characters
|
||||
|
||||
The following characters are OpenDrive reserved characters, and can't
|
||||
be used in OpenDrive folder and file names.
|
||||
|
||||
\ / : * ? " < > |
|
||||
|
||||
OpenDrive files and folders can't have leading or trailing spaces also.
|
||||
|
||||
*/
|
||||
|
||||
package opendrive
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// charMap holds replacements for characters
|
||||
//
|
||||
// OpenDrive has a restricted set of characters compared to other cloud
|
||||
// storage systems, so we to map these to the FULLWIDTH unicode
|
||||
// equivalents
|
||||
//
|
||||
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
':': ':', // FULLWIDTH COLON
|
||||
'*': '*', // FULLWIDTH ASTERISK
|
||||
'?': '?', // FULLWIDTH QUESTION MARK
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'|': '|', // FULLWIDTH VERTICAL LINE
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
|
||||
fixEndingWithSpace = regexp.MustCompile(` (/|$)`)
|
||||
invCharMap map[rune]rune
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Create inverse charMap
|
||||
invCharMap = make(map[rune]rune, len(charMap))
|
||||
for k, v := range charMap {
|
||||
invCharMap[v] = k
|
||||
}
|
||||
}
|
||||
|
||||
// replaceReservedChars takes a path and substitutes any reserved
|
||||
// characters in it
|
||||
func replaceReservedChars(in string) string {
|
||||
// Filenames can't start with space
|
||||
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
|
||||
// Filenames can't end with space
|
||||
in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1")
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := charMap[c]; ok && c != ' ' {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
|
||||
// restoreReservedChars takes a path and undoes any substitutions
|
||||
// made by replaceReservedChars
|
||||
func restoreReservedChars(in string) string {
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := invCharMap[c]; ok {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
28
backend/opendrive/replace_test.go
Normal file
28
backend/opendrive/replace_test.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package opendrive
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{`\*<>?:|#%".~`, `\*<>?:|#%".~`},
|
||||
{`\*<>?:|#%".~/\*<>?:|#%".~`, `\*<>?:|#%".~/\*<>?:|#%".~`},
|
||||
{" leading space", "␠leading space"},
|
||||
{" path/ leading spaces", "␠path/␠ leading spaces"},
|
||||
{"trailing space ", "trailing space␠"},
|
||||
{"trailing spaces /path ", "trailing spaces ␠/path␠"},
|
||||
} {
|
||||
got := replaceReservedChars(test.in)
|
||||
if got != test.out {
|
||||
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
|
||||
}
|
||||
got2 := restoreReservedChars(got)
|
||||
if got2 != test.in {
|
||||
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
|
||||
}
|
||||
}
|
||||
}
|
||||
214
backend/opendrive/types.go
Normal file
214
backend/opendrive/types.go
Normal file
@@ -0,0 +1,214 @@
|
||||
package opendrive
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Error describes an openDRIVE error response
|
||||
type Error struct {
|
||||
Info struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
} `json:"error"`
|
||||
}
|
||||
|
||||
// Error statisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s (Error %d)", e.Info.Message, e.Info.Code)
|
||||
}
|
||||
|
||||
// Account describes a OpenDRIVE account
|
||||
type Account struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"passwd"`
|
||||
}
|
||||
|
||||
// UserSessionInfo describes a OpenDRIVE session
|
||||
type UserSessionInfo struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"passwd"`
|
||||
|
||||
SessionID string `json:"SessionID"`
|
||||
UserName string `json:"UserName"`
|
||||
UserFirstName string `json:"UserFirstName"`
|
||||
UserLastName string `json:"UserLastName"`
|
||||
AccType string `json:"AccType"`
|
||||
UserLang string `json:"UserLang"`
|
||||
UserID string `json:"UserID"`
|
||||
IsAccountUser json.RawMessage `json:"IsAccountUser"`
|
||||
DriveName string `json:"DriveName"`
|
||||
UserLevel string `json:"UserLevel"`
|
||||
UserPlan string `json:"UserPlan"`
|
||||
FVersioning string `json:"FVersioning"`
|
||||
UserDomain string `json:"UserDomain"`
|
||||
PartnerUsersDomain string `json:"PartnerUsersDomain"`
|
||||
}
|
||||
|
||||
// FolderList describes a OpenDRIVE listing
|
||||
type FolderList struct {
|
||||
// DirUpdateTime string `json:"DirUpdateTime,string"`
|
||||
Name string `json:"Name"`
|
||||
ParentFolderID string `json:"ParentFolderID"`
|
||||
DirectFolderLink string `json:"DirectFolderLink"`
|
||||
ResponseType int `json:"ResponseType"`
|
||||
Folders []Folder `json:"Folders"`
|
||||
Files []File `json:"Files"`
|
||||
}
|
||||
|
||||
// Folder describes a OpenDRIVE folder
|
||||
type Folder struct {
|
||||
FolderID string `json:"FolderID"`
|
||||
Name string `json:"Name"`
|
||||
DateCreated int `json:"DateCreated"`
|
||||
DirUpdateTime int `json:"DirUpdateTime"`
|
||||
Access int `json:"Access"`
|
||||
DateModified int64 `json:"DateModified"`
|
||||
Shared string `json:"Shared"`
|
||||
ChildFolders int `json:"ChildFolders"`
|
||||
Link string `json:"Link"`
|
||||
Encrypted string `json:"Encrypted"`
|
||||
}
|
||||
|
||||
type createFolder struct {
|
||||
SessionID string `json:"session_id"`
|
||||
FolderName string `json:"folder_name"`
|
||||
FolderSubParent string `json:"folder_sub_parent"`
|
||||
FolderIsPublic int64 `json:"folder_is_public"` // (0 = private, 1 = public, 2 = hidden)
|
||||
FolderPublicUpl int64 `json:"folder_public_upl"` // (0 = disabled, 1 = enabled)
|
||||
FolderPublicDisplay int64 `json:"folder_public_display"` // (0 = disabled, 1 = enabled)
|
||||
FolderPublicDnl int64 `json:"folder_public_dnl"` // (0 = disabled, 1 = enabled).
|
||||
}
|
||||
|
||||
type createFolderResponse struct {
|
||||
FolderID string `json:"FolderID"`
|
||||
Name string `json:"Name"`
|
||||
DateCreated int `json:"DateCreated"`
|
||||
DirUpdateTime int `json:"DirUpdateTime"`
|
||||
Access int `json:"Access"`
|
||||
DateModified int `json:"DateModified"`
|
||||
Shared string `json:"Shared"`
|
||||
Description string `json:"Description"`
|
||||
Link string `json:"Link"`
|
||||
}
|
||||
|
||||
type moveCopyFolder struct {
|
||||
SessionID string `json:"session_id"`
|
||||
FolderID string `json:"folder_id"`
|
||||
DstFolderID string `json:"dst_folder_id"`
|
||||
Move string `json:"move"`
|
||||
NewFolderName string `json:"new_folder_name"` // New name for destination folder.
|
||||
}
|
||||
|
||||
type moveCopyFolderResponse struct {
|
||||
FolderID string `json:"FolderID"`
|
||||
}
|
||||
|
||||
type removeFolder struct {
|
||||
SessionID string `json:"session_id"`
|
||||
FolderID string `json:"folder_id"`
|
||||
}
|
||||
|
||||
// File describes a OpenDRIVE file
|
||||
type File struct {
|
||||
FileID string `json:"FileId"`
|
||||
FileHash string `json:"FileHash"`
|
||||
Name string `json:"Name"`
|
||||
GroupID int `json:"GroupID"`
|
||||
Extension string `json:"Extension"`
|
||||
Size int64 `json:"Size,string"`
|
||||
Views string `json:"Views"`
|
||||
Version string `json:"Version"`
|
||||
Downloads string `json:"Downloads"`
|
||||
DateModified int64 `json:"DateModified,string"`
|
||||
Access string `json:"Access"`
|
||||
Link string `json:"Link"`
|
||||
DownloadLink string `json:"DownloadLink"`
|
||||
StreamingLink string `json:"StreamingLink"`
|
||||
TempStreamingLink string `json:"TempStreamingLink"`
|
||||
EditLink string `json:"EditLink"`
|
||||
ThumbLink string `json:"ThumbLink"`
|
||||
Password string `json:"Password"`
|
||||
EditOnline int `json:"EditOnline"`
|
||||
}
|
||||
|
||||
type moveCopyFile struct {
|
||||
SessionID string `json:"session_id"`
|
||||
SrcFileID string `json:"src_file_id"`
|
||||
DstFolderID string `json:"dst_folder_id"`
|
||||
Move string `json:"move"`
|
||||
OverwriteIfExists string `json:"overwrite_if_exists"`
|
||||
NewFileName string `json:"new_file_name"` // New name for destination file.
|
||||
}
|
||||
|
||||
type moveCopyFileResponse struct {
|
||||
FileID string `json:"FileID"`
|
||||
Size string `json:"Size"`
|
||||
}
|
||||
|
||||
type createFile struct {
|
||||
SessionID string `json:"session_id"`
|
||||
FolderID string `json:"folder_id"`
|
||||
Name string `json:"file_name"`
|
||||
}
|
||||
|
||||
type createFileResponse struct {
|
||||
FileID string `json:"FileId"`
|
||||
Name string `json:"Name"`
|
||||
GroupID int `json:"GroupID"`
|
||||
Extension string `json:"Extension"`
|
||||
Size string `json:"Size"`
|
||||
Views string `json:"Views"`
|
||||
Downloads string `json:"Downloads"`
|
||||
DateModified string `json:"DateModified"`
|
||||
Access string `json:"Access"`
|
||||
Link string `json:"Link"`
|
||||
DownloadLink string `json:"DownloadLink"`
|
||||
StreamingLink string `json:"StreamingLink"`
|
||||
TempStreamingLink string `json:"TempStreamingLink"`
|
||||
DirUpdateTime int `json:"DirUpdateTime"`
|
||||
TempLocation string `json:"TempLocation"`
|
||||
SpeedLimit int `json:"SpeedLimit"`
|
||||
RequireCompression int `json:"RequireCompression"`
|
||||
RequireHash int `json:"RequireHash"`
|
||||
RequireHashOnly int `json:"RequireHashOnly"`
|
||||
}
|
||||
|
||||
type modTimeFile struct {
|
||||
SessionID string `json:"session_id"`
|
||||
FileID string `json:"file_id"`
|
||||
FileModificationTime string `json:"file_modification_time"`
|
||||
}
|
||||
|
||||
type openUpload struct {
|
||||
SessionID string `json:"session_id"`
|
||||
FileID string `json:"file_id"`
|
||||
Size int64 `json:"file_size"`
|
||||
}
|
||||
|
||||
type openUploadResponse struct {
|
||||
TempLocation string `json:"TempLocation"`
|
||||
RequireCompression bool `json:"RequireCompression"`
|
||||
RequireHash bool `json:"RequireHash"`
|
||||
RequireHashOnly bool `json:"RequireHashOnly"`
|
||||
SpeedLimit int `json:"SpeedLimit"`
|
||||
}
|
||||
|
||||
type closeUpload struct {
|
||||
SessionID string `json:"session_id"`
|
||||
FileID string `json:"file_id"`
|
||||
Size int64 `json:"file_size"`
|
||||
TempLocation string `json:"temp_location"`
|
||||
}
|
||||
|
||||
type closeUploadResponse struct {
|
||||
FileID string `json:"FileID"`
|
||||
FileHash string `json:"FileHash"`
|
||||
Size int64 `json:"Size"`
|
||||
}
|
||||
|
||||
type permissions struct {
|
||||
SessionID string `json:"session_id"`
|
||||
FileID string `json:"file_id"`
|
||||
FileIsPublic int64 `json:"file_ispublic"`
|
||||
}
|
||||
@@ -151,3 +151,35 @@ type ChecksumFileResult struct {
|
||||
Hashes
|
||||
Metadata Item `json:"metadata"`
|
||||
}
|
||||
|
||||
// UserInfo is returned from /userinfo
|
||||
type UserInfo struct {
|
||||
Error
|
||||
Cryptosetup bool `json:"cryptosetup"`
|
||||
Plan int `json:"plan"`
|
||||
CryptoSubscription bool `json:"cryptosubscription"`
|
||||
PublicLinkQuota int64 `json:"publiclinkquota"`
|
||||
Email string `json:"email"`
|
||||
UserID int `json:"userid"`
|
||||
Result int `json:"result"`
|
||||
Quota int64 `json:"quota"`
|
||||
TrashRevretentionDays int `json:"trashrevretentiondays"`
|
||||
Premium bool `json:"premium"`
|
||||
PremiumLifetime bool `json:"premiumlifetime"`
|
||||
EmailVerified bool `json:"emailverified"`
|
||||
UsedQuota int64 `json:"usedquota"`
|
||||
Language string `json:"language"`
|
||||
Business bool `json:"business"`
|
||||
CryptoLifetime bool `json:"cryptolifetime"`
|
||||
Registered string `json:"registered"`
|
||||
Journey struct {
|
||||
Claimed bool `json:"claimed"`
|
||||
Steps struct {
|
||||
VerifyMail bool `json:"verifymail"`
|
||||
UploadFile bool `json:"uploadfile"`
|
||||
AutoUpload bool `json:"autoupload"`
|
||||
DownloadApp bool `json:"downloadapp"`
|
||||
DownloadDrive bool `json:"downloaddrive"`
|
||||
} `json:"steps"`
|
||||
} `json:"journey"`
|
||||
}
|
||||
|
||||
@@ -17,13 +17,14 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/pcloud/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -66,26 +67,31 @@ func init() {
|
||||
Name: "pcloud",
|
||||
Description: "Pcloud",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
err := oauthutil.Config("pcloud", name, oauthConfig)
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config("pcloud", name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Pcloud App Client Id - leave blank normally.",
|
||||
Help: "Pcloud App Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Pcloud App Client Secret - leave blank normally.",
|
||||
Help: "Pcloud App Client Secret\nLeave blank normally.",
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
}
|
||||
|
||||
// Fs represents a remote pcloud
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
@@ -130,9 +136,6 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Pattern to match a pcloud path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parsePath parses an pcloud 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
@@ -233,9 +236,15 @@ func errorHandler(resp *http.Response) error {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, oauthConfig)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Pcloud: %v", err)
|
||||
}
|
||||
@@ -243,6 +252,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
@@ -806,6 +816,30 @@ func (f *Fs) DirCacheFlush() {
|
||||
f.dirCache.ResetRoot()
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (usage *fs.Usage, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/userinfo",
|
||||
}
|
||||
var resp *http.Response
|
||||
var q api.UserInfo
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &q)
|
||||
err = q.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "about failed")
|
||||
}
|
||||
usage = &fs.Usage{
|
||||
Total: fs.NewUsageValue(q.Quota), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(q.UsedQuota), // bytes in use
|
||||
Free: fs.NewUsageValue(q.Quota - q.UsedQuota), // bytes which can be uploaded before reaching the quota
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5 | hash.SHA1)
|
||||
@@ -1073,6 +1107,12 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
// sometimes pcloud leaves a half complete file on
|
||||
// error, so delete it if it exists
|
||||
delObj, delErr := o.fs.NewObject(o.remote)
|
||||
if delErr == nil && delObj != nil {
|
||||
_ = delObj.Remove()
|
||||
}
|
||||
return err
|
||||
}
|
||||
if len(result.Items) != 1 {
|
||||
@@ -1098,6 +1138,11 @@ func (o *Object) Remove() error {
|
||||
})
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -1107,5 +1152,7 @@ var (
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -1,75 +1,17 @@
|
||||
// Test Pcloud filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package pcloud_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/pcloud"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*pcloud.Object)(nil))
|
||||
fstests.RemoteName = "TestPcloud:"
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestPcloud:",
|
||||
NilObject: (*pcloud.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Package qingstor provides an interface to QingStor object storage
|
||||
// Home: https://www.qingcloud.com/
|
||||
|
||||
// +build !plan9,go1.7
|
||||
// +build !plan9
|
||||
|
||||
package qingstor
|
||||
|
||||
@@ -17,7 +17,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
@@ -34,49 +35,43 @@ func init() {
|
||||
Description: "QingCloud Object Storage",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "env_auth",
|
||||
Help: "Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "false",
|
||||
Help: "Enter QingStor credentials in the next step",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get QingStor credentials from the environment (env vars or IAM)",
|
||||
},
|
||||
},
|
||||
Name: "env_auth",
|
||||
Help: "Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.",
|
||||
Default: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "false",
|
||||
Help: "Enter QingStor credentials in the next step",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get QingStor credentials from the environment (env vars or IAM)",
|
||||
}},
|
||||
}, {
|
||||
Name: "access_key_id",
|
||||
Help: "QingStor Access Key ID - leave blank for anonymous access or runtime credentials.",
|
||||
Help: "QingStor Access Key ID\nLeave blank for anonymous access or runtime credentials.",
|
||||
}, {
|
||||
Name: "secret_access_key",
|
||||
Help: "QingStor Secret Access Key (password) - leave blank for anonymous access or runtime credentials.",
|
||||
Help: "QingStor Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Enter a endpoint URL to connection QingStor API.\nLeave blank will use the default value \"https://qingstor.com:443\"",
|
||||
}, {
|
||||
Name: "zone",
|
||||
Help: "Choose or Enter a zone to connect. Default is \"pek3a\".",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "pek3a",
|
||||
|
||||
Help: "The Beijing (China) Three Zone\nNeeds location constraint pek3a.",
|
||||
},
|
||||
{
|
||||
Value: "sh1a",
|
||||
|
||||
Help: "The Shanghai (China) First Zone\nNeeds location constraint sh1a.",
|
||||
},
|
||||
{
|
||||
Value: "gd2a",
|
||||
|
||||
Help: "The Guangdong (China) Second Zone\nNeeds location constraint gd2a.",
|
||||
},
|
||||
},
|
||||
Help: "Zone to connect to.\nDefault is \"pek3a\".",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "pek3a",
|
||||
Help: "The Beijing (China) Three Zone\nNeeds location constraint pek3a.",
|
||||
}, {
|
||||
Value: "sh1a",
|
||||
Help: "The Shanghai (China) First Zone\nNeeds location constraint sh1a.",
|
||||
}, {
|
||||
Value: "gd2a",
|
||||
Help: "The Guangdong (China) Second Zone\nNeeds location constraint gd2a.",
|
||||
}},
|
||||
}, {
|
||||
Name: "connection_retries",
|
||||
Help: "Number of connnection retry.\nLeave blank will use the default value \"3\".",
|
||||
Name: "connection_retries",
|
||||
Help: "Number of connnection retries.",
|
||||
Default: 3,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -95,17 +90,28 @@ func timestampToTime(tp int64) time.Time {
|
||||
return tm.UTC()
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
AccessKeyID string `config:"access_key_id"`
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Zone string `config:"zone"`
|
||||
ConnectionRetries int `config:"connection_retries"`
|
||||
}
|
||||
|
||||
// Fs represents a remote qingstor server
|
||||
type Fs struct {
|
||||
name string // The name of the remote
|
||||
root string // The root is a subdir, is a special object
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *qs.Service // The connection to the qingstor server
|
||||
zone string // The zone we are working on
|
||||
bucket string // The bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucketOK and bucketDeleted
|
||||
bucketOK bool // true if we have created the bucket
|
||||
bucketDeleted bool // true if we have deleted the bucket
|
||||
root string // The root is a subdir, is a special object
|
||||
features *fs.Features // optional features
|
||||
svc *qs.Service // The connection to the qingstor server
|
||||
}
|
||||
|
||||
// Object describes a qingstor object
|
||||
@@ -126,10 +132,12 @@ type Object struct {
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Pattern to match a qingstor path
|
||||
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
|
||||
|
||||
// parseParse parses a qingstor 'url'
|
||||
func qsParsePath(path string) (bucket, key string, err error) {
|
||||
// Pattern to match a qingstor path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
parts := matcher.FindStringSubmatch(path)
|
||||
if parts == nil {
|
||||
err = errors.Errorf("Couldn't parse bucket out of qingstor path %q", path)
|
||||
@@ -165,12 +173,12 @@ func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
|
||||
}
|
||||
|
||||
// qsConnection makes a connection to qingstor
|
||||
func qsServiceConnection(name string) (*qs.Service, error) {
|
||||
accessKeyID := config.FileGet(name, "access_key_id")
|
||||
secretAccessKey := config.FileGet(name, "secret_access_key")
|
||||
func qsServiceConnection(opt *Options) (*qs.Service, error) {
|
||||
accessKeyID := opt.AccessKeyID
|
||||
secretAccessKey := opt.SecretAccessKey
|
||||
|
||||
switch {
|
||||
case config.FileGetBool(name, "env_auth", false):
|
||||
case opt.EnvAuth:
|
||||
// No need for empty checks if "env_auth" is true
|
||||
case accessKeyID == "" && secretAccessKey == "":
|
||||
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
|
||||
@@ -184,7 +192,7 @@ func qsServiceConnection(name string) (*qs.Service, error) {
|
||||
host := "qingstor.com"
|
||||
port := 443
|
||||
|
||||
endpoint := config.FileGet(name, "endpoint", "")
|
||||
endpoint := opt.Endpoint
|
||||
if endpoint != "" {
|
||||
_protocol, _host, _port, err := qsParseEndpoint(endpoint)
|
||||
|
||||
@@ -204,48 +212,49 @@ func qsServiceConnection(name string) (*qs.Service, error) {
|
||||
|
||||
}
|
||||
|
||||
connectionRetries := 3
|
||||
retries := config.FileGet(name, "connection_retries", "")
|
||||
if retries != "" {
|
||||
connectionRetries, _ = strconv.Atoi(retries)
|
||||
}
|
||||
|
||||
cf, err := qsConfig.NewDefault()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cf.AccessKeyID = accessKeyID
|
||||
cf.SecretAccessKey = secretAccessKey
|
||||
cf.Protocol = protocol
|
||||
cf.Host = host
|
||||
cf.Port = port
|
||||
cf.ConnectionRetries = connectionRetries
|
||||
cf.ConnectionRetries = opt.ConnectionRetries
|
||||
cf.Connection = fshttp.NewClient(fs.Config)
|
||||
|
||||
svc, _ := qs.Init(cf)
|
||||
|
||||
return svc, err
|
||||
return qs.Init(cf)
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bucket, key, err := qsParsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
svc, err := qsServiceConnection(name)
|
||||
svc, err := qsServiceConnection(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
zone := config.FileGet(name, "zone")
|
||||
if zone == "" {
|
||||
zone = "pek3a"
|
||||
if opt.Zone == "" {
|
||||
opt.Zone = "pek3a"
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
zone: zone,
|
||||
root: key,
|
||||
bucket: bucket,
|
||||
opt: *opt,
|
||||
svc: svc,
|
||||
zone: opt.Zone,
|
||||
bucket: bucket,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -258,7 +267,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
f.root += "/"
|
||||
}
|
||||
//Check to see if the object exists
|
||||
bucketInit, err := svc.Bucket(bucket, zone)
|
||||
bucketInit, err := svc.Bucket(bucket, opt.Zone)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user