mirror of
https://github.com/rclone/rclone.git
synced 2026-01-26 14:23:22 +00:00
Compare commits
299 Commits
v1.40
...
drive-untr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
77b1eaeffe | ||
|
|
ab78eb13e4 | ||
|
|
b1f31c2acf | ||
|
|
dcc74fa404 | ||
|
|
6759d36e2f | ||
|
|
a4797014c9 | ||
|
|
4d7d240c12 | ||
|
|
d046402d80 | ||
|
|
9bdf465c10 | ||
|
|
f3f48d7d49 | ||
|
|
3c89406886 | ||
|
|
85d09729f2 | ||
|
|
b3bd2d1c9e | ||
|
|
4c586a9264 | ||
|
|
1c80e84f8a | ||
|
|
028f8a69d3 | ||
|
|
b0d1fa1d6b | ||
|
|
dbb4b2c900 | ||
|
|
99201f8ba4 | ||
|
|
5ad8bcb43a | ||
|
|
6efedc4043 | ||
|
|
a3d9a38f51 | ||
|
|
b1bd17a220 | ||
|
|
793f594b07 | ||
|
|
4fe6614ae1 | ||
|
|
4c2fbf9b36 | ||
|
|
ed4f1b2936 | ||
|
|
144c1a04d4 | ||
|
|
25ec7f5c00 | ||
|
|
b15603d5ea | ||
|
|
71c974bf9a | ||
|
|
03c5b8232e | ||
|
|
72392a2d72 | ||
|
|
b062ae9d13 | ||
|
|
8c0335a176 | ||
|
|
794e55de27 | ||
|
|
038ed1aaf0 | ||
|
|
97beff5370 | ||
|
|
b9b9bce0db | ||
|
|
947e10eb2b | ||
|
|
6b42421374 | ||
|
|
fa051ff970 | ||
|
|
69164b3dda | ||
|
|
935533e57f | ||
|
|
1550f70865 | ||
|
|
1a65c3a740 | ||
|
|
a29a1de43d | ||
|
|
e7ae5e8ee0 | ||
|
|
56e1e82005 | ||
|
|
8442498693 | ||
|
|
08021c4636 | ||
|
|
3f0789e2db | ||
|
|
7110349547 | ||
|
|
a9adb43896 | ||
|
|
c47a4c9703 | ||
|
|
d9d00a7dd7 | ||
|
|
b82e66daaa | ||
|
|
7d2861ead6 | ||
|
|
aaa8591661 | ||
|
|
4df1794932 | ||
|
|
d18928962c | ||
|
|
339fbf0df5 | ||
|
|
13ccb39819 | ||
|
|
f9a1a7e700 | ||
|
|
1c75581959 | ||
|
|
4d793b8ee8 | ||
|
|
9289aead9b | ||
|
|
ce109ed9c0 | ||
|
|
d7ac4ca44e | ||
|
|
1053d7e123 | ||
|
|
017297af70 | ||
|
|
4e8e5fed7d | ||
|
|
c0f772bc14 | ||
|
|
334ef28012 | ||
|
|
da45dadfe9 | ||
|
|
05edb5f501 | ||
|
|
04d18d2a07 | ||
|
|
f1269dc06a | ||
|
|
c5286ee157 | ||
|
|
ba43acb6aa | ||
|
|
8a84975993 | ||
|
|
d758e1908e | ||
|
|
737aed8412 | ||
|
|
4009fb67c8 | ||
|
|
3ef938ebde | ||
|
|
5302e5f9b1 | ||
|
|
de8c7d8e45 | ||
|
|
2a29f7f6c8 | ||
|
|
2b332bced2 | ||
|
|
aad75e6720 | ||
|
|
2a806a8d8b | ||
|
|
500085d244 | ||
|
|
3d8e529441 | ||
|
|
6607d8752c | ||
|
|
67e9ef4547 | ||
|
|
d4213c0ac5 | ||
|
|
3a2248aa5f | ||
|
|
573ef4c8ee | ||
|
|
7bf2d389a8 | ||
|
|
71b4f1ccab | ||
|
|
e5ff375948 | ||
|
|
512f4b4487 | ||
|
|
a38f8b87ce | ||
|
|
9697754707 | ||
|
|
8e625e0bc3 | ||
|
|
e52ecba295 | ||
|
|
e62d2fd309 | ||
|
|
e56be0dfd8 | ||
|
|
2a32e2d838 | ||
|
|
db4c206e0e | ||
|
|
f77efc7649 | ||
|
|
aadbcce486 | ||
|
|
f162116132 | ||
|
|
909c3a92d6 | ||
|
|
826975c341 | ||
|
|
6791cf7d7f | ||
|
|
d022c81d99 | ||
|
|
cdde8fa75a | ||
|
|
5ede6f6d09 | ||
|
|
53292527bb | ||
|
|
ec9894da07 | ||
|
|
ad02d1be3f | ||
|
|
63f413f477 | ||
|
|
f1ffe8e309 | ||
|
|
d85b9bc9d6 | ||
|
|
b07e51cf73 | ||
|
|
f073db81b1 | ||
|
|
9698a2babb | ||
|
|
5eecbd83ee | ||
|
|
e42edc8e8c | ||
|
|
291954baba | ||
|
|
9d8d7ae1f0 | ||
|
|
6ce32e4661 | ||
|
|
1755ffd1f3 | ||
|
|
aa5c5ec5d3 | ||
|
|
e80ae4e09c | ||
|
|
1320e84bc2 | ||
|
|
cb5bd47e61 | ||
|
|
790a8a9aed | ||
|
|
f1a43eca4d | ||
|
|
7ea68f1fc6 | ||
|
|
6427029c4e | ||
|
|
21383877df | ||
|
|
f95835d613 | ||
|
|
be79b47a7a | ||
|
|
be22735609 | ||
|
|
1b1b3c13cd | ||
|
|
5c128272fd | ||
|
|
d178233e74 | ||
|
|
98bf65c43b | ||
|
|
3b5e70c8c6 | ||
|
|
bd3ad1ac3e | ||
|
|
9fdf273614 | ||
|
|
fe25cb9c54 | ||
|
|
f2608e2a64 | ||
|
|
a5f1811892 | ||
|
|
50dc5fe92e | ||
|
|
b7d2048032 | ||
|
|
3116249692 | ||
|
|
d049e5c680 | ||
|
|
1c9572aba1 | ||
|
|
76f2cbeb94 | ||
|
|
0479c7dcf5 | ||
|
|
55674c0bfc | ||
|
|
e4c380b2a8 | ||
|
|
74cbdea0ef | ||
|
|
a3bf6b9c2c | ||
|
|
0daced29db | ||
|
|
b78af517de | ||
|
|
d8e88f10cd | ||
|
|
849db6699d | ||
|
|
a81ec00a8c | ||
|
|
da4a5e1fb3 | ||
|
|
ae562b5a4f | ||
|
|
c01177bc28 | ||
|
|
9f04ce282e | ||
|
|
764440068e | ||
|
|
a703216286 | ||
|
|
96a62d55a2 | ||
|
|
d0f32b62fd | ||
|
|
7c5f87842c | ||
|
|
cc8799e0d6 | ||
|
|
da214973a1 | ||
|
|
be8bd89674 | ||
|
|
9ab2521ef2 | ||
|
|
21a10e58c9 | ||
|
|
d36b80f587 | ||
|
|
24980d7123 | ||
|
|
870c58f7f8 | ||
|
|
b3c6f5f4b8 | ||
|
|
311a962011 | ||
|
|
da7a77ef2e | ||
|
|
9fbc40c5b9 | ||
|
|
56ce784301 | ||
|
|
8fe3037301 | ||
|
|
ba7ae2ee8c | ||
|
|
dc59836021 | ||
|
|
1a3fb21a77 | ||
|
|
bcdb7719c6 | ||
|
|
c51d97c752 | ||
|
|
57a5b72d60 | ||
|
|
34ba17deec | ||
|
|
e3a1bc9cd3 | ||
|
|
a35e62e15c | ||
|
|
d1ca8b8959 | ||
|
|
a0c65deca8 | ||
|
|
1f255a8567 | ||
|
|
f50b85278a | ||
|
|
9948b39dba | ||
|
|
2b855751fc | ||
|
|
ef3bcec76c | ||
|
|
1ac6dacf0f | ||
|
|
94e277d759 | ||
|
|
b83814082b | ||
|
|
2b7957cc74 | ||
|
|
3d5106e52b | ||
|
|
29ce1c2747 | ||
|
|
dc247d21ff | ||
|
|
8c3740c2c5 | ||
|
|
acd5d4377e | ||
|
|
9e4cd55477 | ||
|
|
2015f98f0c | ||
|
|
0e6faa2313 | ||
|
|
905e40b3e6 | ||
|
|
1db68571fd | ||
|
|
6b67489133 | ||
|
|
27dfcf303c | ||
|
|
e6d9720d7b | ||
|
|
196da4d903 | ||
|
|
18317a2747 | ||
|
|
ef412c1985 | ||
|
|
d97fe3b824 | ||
|
|
792c9e185e | ||
|
|
1f681e585b | ||
|
|
e82452ce9a | ||
|
|
dcf8334673 | ||
|
|
37be78705d | ||
|
|
4b5ff33125 | ||
|
|
d5b2ec32f1 | ||
|
|
aeedacfb50 | ||
|
|
92b266d361 | ||
|
|
05e32cfcf9 | ||
|
|
cbec59146a | ||
|
|
06e3fa3aba | ||
|
|
0fa700b3cf | ||
|
|
42f0963bf9 | ||
|
|
be54fd8f70 | ||
|
|
e5be471ce0 | ||
|
|
80588a5a6b | ||
|
|
67023f0040 | ||
|
|
32e02bd367 | ||
|
|
c749cf8d99 | ||
|
|
92cfb57fbd | ||
|
|
0cb5c4aa73 | ||
|
|
0358e9e724 | ||
|
|
a69d8ec93b | ||
|
|
92c5aa3786 | ||
|
|
fbe1c7f1ea | ||
|
|
c4531daa43 | ||
|
|
6e11a25df5 | ||
|
|
0865e38917 | ||
|
|
ab2fa59fc4 | ||
|
|
e13f65b953 | ||
|
|
5b8977a053 | ||
|
|
1dea99ab20 | ||
|
|
06a8d3011d | ||
|
|
e7fd607078 | ||
|
|
eca99b33c0 | ||
|
|
e42cee5e02 | ||
|
|
d45c750f76 | ||
|
|
2c2bb0f750 | ||
|
|
a8267d1628 | ||
|
|
9df266a6b4 | ||
|
|
4d553ef701 | ||
|
|
1ba3ffdc59 | ||
|
|
72f1b097a7 | ||
|
|
885044d0a5 | ||
|
|
6c10312c75 | ||
|
|
e5aa5fe7d8 | ||
|
|
9b140b42c9 | ||
|
|
0bfbde8856 | ||
|
|
98a924602f | ||
|
|
7e80e609e8 | ||
|
|
91b068ad3a | ||
|
|
b52e34ef5e | ||
|
|
32e6eee341 | ||
|
|
c5f1d501ed | ||
|
|
0ed0d9a7bc | ||
|
|
d9c13bff83 | ||
|
|
ce91289b09 | ||
|
|
5ba5be9b37 | ||
|
|
e9a2cbec37 | ||
|
|
4f6f07c074 | ||
|
|
f6020f1308 | ||
|
|
a46f2a9eb7 | ||
|
|
911a78ce6d | ||
|
|
d64789528d | ||
|
|
940df88eb2 | ||
|
|
19ca9fb939 |
14
.gometalinter.json
Normal file
14
.gometalinter.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"Enable": [
|
||||
"deadcode",
|
||||
"errcheck",
|
||||
"goimports",
|
||||
"golint",
|
||||
"ineffassign",
|
||||
"structcheck",
|
||||
"varcheck",
|
||||
"vet"
|
||||
],
|
||||
"EnableGC": true,
|
||||
"Vendor": true
|
||||
}
|
||||
@@ -4,11 +4,10 @@ dist: trusty
|
||||
os:
|
||||
- linux
|
||||
go:
|
||||
- 1.6.4
|
||||
- 1.7.6
|
||||
- 1.8.7
|
||||
- 1.9.3
|
||||
- "1.10"
|
||||
- "1.10.1"
|
||||
- tip
|
||||
before_install:
|
||||
- if [[ $TRAVIS_OS_NAME == linux ]]; then sudo modprobe fuse ; sudo chmod 666 /dev/fuse ; sudo chown root:$USER /etc/fuse.conf ; fi
|
||||
@@ -39,7 +38,7 @@ matrix:
|
||||
- go: tip
|
||||
include:
|
||||
- os: osx
|
||||
go: "1.10"
|
||||
go: "1.10.1"
|
||||
env: GOTAGS=""
|
||||
deploy:
|
||||
provider: script
|
||||
@@ -47,5 +46,5 @@ deploy:
|
||||
skip_cleanup: true
|
||||
on:
|
||||
all_branches: true
|
||||
go: "1.10"
|
||||
condition: $TRAVIS_OS_NAME == linux && $TRAVIS_PULL_REQUEST == false
|
||||
go: "1.10.1"
|
||||
condition: $TRAVIS_PULL_REQUEST == false
|
||||
|
||||
@@ -235,15 +235,15 @@ in the `vendor` directory for perfectly reproducable builds.
|
||||
|
||||
The `vendor` directory is entirely managed by the `dep` tool.
|
||||
|
||||
To add a new dependency
|
||||
To add a new dependency, run `dep ensure` and `dep` will pull in the
|
||||
new dependency to the `vendor` directory and update the `Gopkg.lock`
|
||||
file.
|
||||
|
||||
dep ensure -add github.com/pkg/errors
|
||||
You can add constraints on that package in the `Gopkg.toml` file (see
|
||||
the `dep` documentation), but don't unless you really need to.
|
||||
|
||||
You can add constraints on that package (see the `dep` documentation),
|
||||
but don't unless you really need to.
|
||||
|
||||
Please check in the changes generated by dep including the `vendor`
|
||||
directory and `Godep.toml` and `Godep.locl` in a single commit
|
||||
Please check in the changes generated by `dep` including the `vendor`
|
||||
directory and `Godep.toml` and `Godep.lock` in a single commit
|
||||
separate from any other code changes. Watch out for new files in
|
||||
`vendor`.
|
||||
|
||||
@@ -303,8 +303,7 @@ Getting going
|
||||
Unit tests
|
||||
|
||||
* Create a config entry called `TestRemote` for the unit tests to use
|
||||
* Add your fs to the end of `fstest/fstests/gen_tests.go`
|
||||
* generate `backend/remote/remote_test.go` unit tests `cd fstest/fstests; go generate`
|
||||
* Create a `backend/remote/remote_test.go` - copy and adjust your example remote
|
||||
* Make sure all tests pass with `go test -v`
|
||||
|
||||
Integration tests
|
||||
|
||||
195
Gopkg.lock
generated
195
Gopkg.lock
generated
@@ -9,30 +9,25 @@
|
||||
"fs",
|
||||
"fuseutil"
|
||||
]
|
||||
revision = "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
|
||||
revision = "65cc252bf6691cb3c7014bcb2c8dc29de91e3a7e"
|
||||
|
||||
[[projects]]
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata"]
|
||||
revision = "050b16d2314d5fc3d4c9a51e4cd5c7468e77f162"
|
||||
version = "v0.17.0"
|
||||
revision = "0fd7230b2a7505833d5f69b75cbd6c9582401479"
|
||||
version = "v0.23.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
packages = ["storage"]
|
||||
revision = "eae258195456be76b2ec9ad2ee2ab63cdda365d9"
|
||||
version = "v12.2.0-beta"
|
||||
name = "github.com/Azure/azure-pipeline-go"
|
||||
packages = ["pipeline"]
|
||||
revision = "7571e8eb0876932ab505918ff7ed5107773e5ee2"
|
||||
version = "0.1.7"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/go-autorest"
|
||||
packages = [
|
||||
"autorest",
|
||||
"autorest/adal",
|
||||
"autorest/azure",
|
||||
"autorest/date"
|
||||
]
|
||||
revision = "6311d7a76f54cf2b6dea03d737d9bd9a6022ac5f"
|
||||
version = "v9.7.1"
|
||||
branch = "master"
|
||||
name = "github.com/Azure/azure-storage-blob-go"
|
||||
packages = ["2018-03-28/azblob"]
|
||||
revision = "eaae161d9d5e07363f04ddb19d84d57efc66d1a1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -41,16 +36,16 @@
|
||||
revision = "ef1e4c783f8f0478bd8bff0edb3dd0bade552599"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/VividCortex/ewma"
|
||||
packages = ["."]
|
||||
revision = "43880d236f695d39c62cf7aa4ebd4508c258e6c0"
|
||||
revision = "b24eb346a94c3ba12c1da1e564dbac1b498a77ce"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/a8m/tree"
|
||||
packages = ["."]
|
||||
revision = "cf42b1e486f0b025942a768a9ad59c9939d6ca40"
|
||||
revision = "3cf936ce15d6100c49d9c75f79c220ae7e579599"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/abbot/go-http-auth"
|
||||
@@ -59,7 +54,6 @@
|
||||
version = "v0.4.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
packages = [
|
||||
"aws",
|
||||
@@ -72,14 +66,19 @@
|
||||
"aws/credentials/ec2rolecreds",
|
||||
"aws/credentials/endpointcreds",
|
||||
"aws/credentials/stscreds",
|
||||
"aws/csm",
|
||||
"aws/defaults",
|
||||
"aws/ec2metadata",
|
||||
"aws/endpoints",
|
||||
"aws/request",
|
||||
"aws/session",
|
||||
"aws/signer/v4",
|
||||
"internal/sdkio",
|
||||
"internal/sdkrand",
|
||||
"internal/shareddefaults",
|
||||
"private/protocol",
|
||||
"private/protocol/eventstream",
|
||||
"private/protocol/eventstream/eventstreamapi",
|
||||
"private/protocol/query",
|
||||
"private/protocol/query/queryutil",
|
||||
"private/protocol/rest",
|
||||
@@ -90,25 +89,26 @@
|
||||
"service/s3/s3manager",
|
||||
"service/sts"
|
||||
]
|
||||
revision = "2fe57096de348e6cff4031af99254613f8ef73ea"
|
||||
revision = "bfc1a07cf158c30c41a3eefba8aae043d0bb5bff"
|
||||
version = "v1.14.8"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/billziss-gh/cgofuse"
|
||||
packages = ["fuse"]
|
||||
revision = "487e2baa5611bab252a906d7f9b869f944607305"
|
||||
version = "v1.0.4"
|
||||
revision = "ea66f9809c71af94522d494d3d617545662ea59d"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/coreos/bbolt"
|
||||
packages = ["."]
|
||||
revision = "ee30b748bcfbd74ec1d8439ae8fd4f9123a5c94e"
|
||||
revision = "af9db2027c98c61ecd8e17caa5bd265792b9b9a2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/cpuguy83/go-md2man"
|
||||
packages = ["md2man"]
|
||||
revision = "1d903dcb749992f3741d744c0f8376b4bd7eb3e1"
|
||||
version = "v1.0.7"
|
||||
revision = "20f5889cbdc3c73dbd2862796665e7c465ade7d1"
|
||||
version = "v1.0.8"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
@@ -116,12 +116,6 @@
|
||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
packages = ["."]
|
||||
revision = "dbeaa9332f19a944acb5736b4456cfcc02140e29"
|
||||
version = "v3.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/djherbis/times"
|
||||
packages = ["."]
|
||||
@@ -129,27 +123,34 @@
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/dropbox/dropbox-sdk-go-unofficial"
|
||||
packages = [
|
||||
"dropbox",
|
||||
"dropbox/async",
|
||||
"dropbox/common",
|
||||
"dropbox/file_properties",
|
||||
"dropbox/files"
|
||||
"dropbox/files",
|
||||
"dropbox/seen_state",
|
||||
"dropbox/sharing",
|
||||
"dropbox/team_common",
|
||||
"dropbox/team_policies",
|
||||
"dropbox/users",
|
||||
"dropbox/users_common"
|
||||
]
|
||||
revision = "9c27e83ceccc8f8bbc9afdc17c50798529d608b1"
|
||||
revision = "7afa861bfde5a348d765522b303b6fbd9d250155"
|
||||
version = "v4.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-ini/ini"
|
||||
packages = ["."]
|
||||
revision = "32e4c1e6bc4e7d0d8451aa6b75200d19e37a536a"
|
||||
version = "v1.32.0"
|
||||
revision = "06f5f3d67269ccec1fe5fe4134ba6e982984f7f5"
|
||||
version = "v1.37.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845"
|
||||
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -167,7 +168,7 @@
|
||||
branch = "master"
|
||||
name = "github.com/jlaffaye/ftp"
|
||||
packages = ["."]
|
||||
revision = "427467931c6fbc25acba4537c9d3cbc40cfa569b"
|
||||
revision = "2403248fa8cc9f7909862627aa7337f13f8e0bf1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/jmespath/go-jmespath"
|
||||
@@ -181,15 +182,10 @@
|
||||
revision = "ae77be60afb1dcacde03767a8c37337fad28ac14"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kr/fs"
|
||||
packages = ["."]
|
||||
revision = "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/marstr/guid"
|
||||
packages = ["."]
|
||||
revision = "8bdf7d1a087ccc975cf37dd6507da50698fd19ca"
|
||||
revision = "1455def202f6e05b95cc7bfc7e8ae67ae5141eba"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/mattn/go-runewidth"
|
||||
@@ -204,16 +200,16 @@
|
||||
revision = "887eb06ab6a255fbf5744b5812788e884078620a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/swift"
|
||||
packages = ["."]
|
||||
revision = "ae9f0ea1605b9aa6434ed5c731ca35d83ba67c55"
|
||||
revision = "b2a7479cf26fa841ff90dd932d0221cb5c50782d"
|
||||
version = "v1.0.39"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/nsf/termbox-go"
|
||||
packages = ["."]
|
||||
revision = "8c5e0793e04afcda7fe23d0751791e7321df4265"
|
||||
revision = "5c94acc5e6eb520f1bcd183974e01171cc4c23b3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -236,20 +232,20 @@
|
||||
"log",
|
||||
"reopen"
|
||||
]
|
||||
revision = "b98065a377794d577e2a0e32869378b9ce4b8952"
|
||||
version = "v0.1.1"
|
||||
revision = "807ee759d82c84982a89fb3dc875ef884942f1e5"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
revision = "e881fd58d78e04cf6d0de1217f8707c8cc2249bc"
|
||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/sftp"
|
||||
packages = ["."]
|
||||
revision = "72ec6e85598d2480c30f633c154b07b6c112eade"
|
||||
revision = "57673e38ea946592a59c26592b7e6fbda646975b"
|
||||
version = "1.8.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
@@ -258,28 +254,22 @@
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/rfjakob/eme"
|
||||
packages = ["."]
|
||||
revision = "2222dbd4ba467ab3fc7e8af41562fcfe69c0d770"
|
||||
revision = "01668ae55fe0b79a483095689043cce3e80260db"
|
||||
version = "v1.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/russross/blackfriday"
|
||||
packages = ["."]
|
||||
revision = "4048872b16cc0fc2c5fd9eacf0ed2c2fedaa0c8c"
|
||||
version = "v1.5"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/satori/go.uuid"
|
||||
packages = ["."]
|
||||
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
|
||||
version = "v1.2.0"
|
||||
revision = "55d61fa8aa702f59229e6cff85793c22e580eaf5"
|
||||
version = "v1.5.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/sevlyar/go-daemon"
|
||||
packages = ["."]
|
||||
revision = "e49ef56654f54139c4dc0285f973f74e9649e729"
|
||||
version = "v0.1.2"
|
||||
revision = "f9261e73885de99b1647d68bedadf2b9a99ad11f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -288,28 +278,34 @@
|
||||
revision = "75fb7ed4208cf72d323d7d02fd1a5964a7a9073c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = [
|
||||
".",
|
||||
"doc"
|
||||
]
|
||||
revision = "0c34d16c3123764e413b9ed982ada58b1c3d53ea"
|
||||
revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
|
||||
version = "v0.0.3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
revision = "4c012f6dcd9546820e378d0bdda4d8fc772cdfea"
|
||||
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = [
|
||||
"assert",
|
||||
"require"
|
||||
]
|
||||
revision = "87b1dfb5b2fa649f52695dd9eae19abe404a4308"
|
||||
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
|
||||
version = "v1.2.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/t3rm1n4l/go-mega"
|
||||
packages = ["."]
|
||||
revision = "57978a63bd3f91fa7e188b751a7e7e6dd4e33813"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -332,8 +328,8 @@
|
||||
"service",
|
||||
"utils"
|
||||
]
|
||||
revision = "51fa3b6bb3c24f4d646eefff251cd2e6ba716600"
|
||||
version = "v2.2.9"
|
||||
revision = "4f9ac88c5fec7350e960aabd0de1f1ede0ad2895"
|
||||
version = "v2.2.14"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -344,6 +340,8 @@
|
||||
"curve25519",
|
||||
"ed25519",
|
||||
"ed25519/internal/edwards25519",
|
||||
"internal/chacha20",
|
||||
"internal/subtle",
|
||||
"nacl/secretbox",
|
||||
"pbkdf2",
|
||||
"poly1305",
|
||||
@@ -353,7 +351,7 @@
|
||||
"ssh/agent",
|
||||
"ssh/terminal"
|
||||
]
|
||||
revision = "13931e22f9e72ea58bb73048bc752b48c6d4d4ac"
|
||||
revision = "027cca12c2d63e3d62b670d901e8a2c95854feec"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -363,10 +361,16 @@
|
||||
"context/ctxhttp",
|
||||
"html",
|
||||
"html/atom",
|
||||
"http/httpguts",
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"publicsuffix",
|
||||
"webdav",
|
||||
"webdav/internal/xml"
|
||||
"webdav/internal/xml",
|
||||
"websocket"
|
||||
]
|
||||
revision = "5ccada7d0a7ba9aeb5d3aca8d3501b4c2a509fec"
|
||||
revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -378,7 +382,7 @@
|
||||
"jws",
|
||||
"jwt"
|
||||
]
|
||||
revision = "30785a2c434e431ef7c507b54617d6a951d5f2b4"
|
||||
revision = "1e0a3fa8ba9a5c9eb35c271780101fdaf1b205d7"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -387,26 +391,34 @@
|
||||
"unix",
|
||||
"windows"
|
||||
]
|
||||
revision = "fff93fa7cd278d84afc205751523809c464168ab"
|
||||
revision = "6c888cc515d3ed83fc103cf1d84468aad274b0a7"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"collate",
|
||||
"collate/build",
|
||||
"internal/colltab",
|
||||
"internal/gen",
|
||||
"internal/tag",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"language",
|
||||
"secure/bidirule",
|
||||
"transform",
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm"
|
||||
"unicode/norm",
|
||||
"unicode/rangetable"
|
||||
]
|
||||
revision = "e19ae1496984b1c655b8044a65c0300a3c878dd3"
|
||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/time"
|
||||
packages = ["rate"]
|
||||
revision = "6dc17368e09b0e8634d71cac8168d853e869a0c7"
|
||||
revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -418,7 +430,7 @@
|
||||
"googleapi/internal/uritemplates",
|
||||
"storage/v1"
|
||||
]
|
||||
revision = "de3aa2cfa7f1c18dcb7f91738099bad280117b8e"
|
||||
revision = "2eea9ba0a3d94f6ab46508083e299a00bbbc65f6"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
@@ -432,21 +444,20 @@
|
||||
"internal/modules",
|
||||
"internal/remote_api",
|
||||
"internal/urlfetch",
|
||||
"log",
|
||||
"urlfetch"
|
||||
]
|
||||
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
|
||||
version = "v1.0.0"
|
||||
revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
revision = "d670f9405373e636a5a2765eea47fac0c9bc91a4"
|
||||
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
||||
version = "v2.2.1"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "f9e9adb0675a970e6b6a9f28fa75e5bbee74d001359c688bd37c78f035be565a"
|
||||
inputs-digest = "670cdb55138aa1394b4c8f87345e9be9c8105248edda4be7176dddee2a4f5d26"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
||||
155
Gopkg.toml
155
Gopkg.toml
@@ -1,154 +1,15 @@
|
||||
|
||||
## Gopkg.toml example (these lines may be deleted)
|
||||
|
||||
## "required" lists a set of packages (not projects) that must be included in
|
||||
## Gopkg.lock. This list is merged with the set of packages imported by the current
|
||||
## project. Use it when your project needs a package it doesn't explicitly import -
|
||||
## including "main" packages.
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
|
||||
## "ignored" lists a set of packages (not projects) that are ignored when
|
||||
## dep statically analyzes source code. Ignored packages can be in this project,
|
||||
## or in a dependency.
|
||||
# ignored = ["github.com/user/project/badpkg"]
|
||||
|
||||
## Dependencies define constraints on dependent projects. They are respected by
|
||||
## dep whether coming from the Gopkg.toml of the current project or a dependency.
|
||||
# [[constraint]]
|
||||
## Required: the root import path of the project being constrained.
|
||||
# name = "github.com/user/project"
|
||||
#
|
||||
## Recommended: the version constraint to enforce for the project.
|
||||
## Only one of "branch", "version" or "revision" can be specified.
|
||||
# version = "1.0.0"
|
||||
# branch = "master"
|
||||
# revision = "abc123"
|
||||
#
|
||||
## Optional: an alternate location (URL or import path) for the project's source.
|
||||
# source = "https://github.com/myfork/package.git"
|
||||
|
||||
## Overrides have the same structure as [[constraint]], but supercede all
|
||||
## [[constraint]] declarations from all projects. Only the current project's
|
||||
## [[override]] are applied.
|
||||
##
|
||||
## Overrides are a sledgehammer. Use them only as a last resort.
|
||||
# [[override]]
|
||||
## Required: the root import path of the project being constrained.
|
||||
# name = "github.com/user/project"
|
||||
#
|
||||
## Optional: specifying a version constraint override will cause all other
|
||||
## constraints on this project to be ignored; only the overriden constraint
|
||||
## need be satisfied.
|
||||
## Again, only one of "branch", "version" or "revision" can be specified.
|
||||
# version = "1.0.0"
|
||||
# branch = "master"
|
||||
# revision = "abc123"
|
||||
#
|
||||
## Optional: specifying an alternate source location as an override will
|
||||
## enforce that the alternate location is used for that project, regardless of
|
||||
## what source location any dependent projects specify.
|
||||
# source = "https://github.com/myfork/package.git"
|
||||
|
||||
[[constraint]]
|
||||
# pin this to master to pull in the macOS changes
|
||||
# can likely remove for 1.43
|
||||
[[override]]
|
||||
branch = "master"
|
||||
name = "bazil.org/fuse"
|
||||
name = "github.com/sevlyar/go-daemon"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/Unknwon/goconfig"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/VividCortex/ewma"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/go-acd"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/swift"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/errors"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/sftp"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/rfjakob/eme"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/skratchdot/open-golang"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/cobra"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/pflag"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/stacktic/dropbox"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/stretchr/testify"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/text"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/api"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/dropbox/dropbox-sdk-go-unofficial"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/yunify/qingstor-sdk-go"
|
||||
|
||||
[[constraint]]
|
||||
# pin this to master to pull in the fix for linux/mips
|
||||
# can likely remove for 1.43
|
||||
[[override]]
|
||||
branch = "master"
|
||||
name = "github.com/coreos/bbolt"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/patrickmn/go-cache"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/okzk/sdnotify"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/sevlyar/go-daemon"
|
||||
version = "0.1.2"
|
||||
name = "github.com/Azure/azure-storage-blob-go"
|
||||
|
||||
1443
MANUAL.html
1443
MANUAL.html
File diff suppressed because it is too large
Load Diff
1742
MANUAL.txt
1742
MANUAL.txt
File diff suppressed because it is too large
Load Diff
71
Makefile
71
Makefile
@@ -1,12 +1,22 @@
|
||||
SHELL = bash
|
||||
TAG := $(shell echo $$(git describe --abbrev=8 --tags)-$${APPVEYOR_REPO_BRANCH:-$${TRAVIS_BRANCH:-$$(git rev-parse --abbrev-ref HEAD)}} | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/; s/-\(HEAD\|master\)$$//')
|
||||
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(shell git rev-parse --abbrev-ref HEAD))
|
||||
TAG_BRANCH := -$(BRANCH)
|
||||
BRANCH_PATH := branch/
|
||||
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
||||
TAG_BRANCH :=
|
||||
BRANCH_PATH :=
|
||||
endif
|
||||
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
|
||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
|
||||
GO_VERSION := $(shell go version)
|
||||
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
|
||||
# Run full tests if go >= go1.9
|
||||
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 9)')
|
||||
BETA_URL := https://beta.rclone.org/$(TAG)/
|
||||
BETA_PATH := $(BRANCH_PATH)$(TAG)
|
||||
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
||||
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
|
||||
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
||||
# Pass in GOTAGS=xyz on the make command line to set build tags
|
||||
ifdef GOTAGS
|
||||
BUILDTAGS=-tags "$(GOTAGS)"
|
||||
@@ -21,6 +31,7 @@ rclone:
|
||||
|
||||
vars:
|
||||
@echo SHELL="'$(SHELL)'"
|
||||
@echo BRANCH="'$(BRANCH)'"
|
||||
@echo TAG="'$(TAG)'"
|
||||
@echo LAST_TAG="'$(LAST_TAG)'"
|
||||
@echo NEW_TAG="'$(NEW_TAG)'"
|
||||
@@ -56,16 +67,30 @@ else
|
||||
@echo Skipping source quality tests as version of go too old
|
||||
endif
|
||||
|
||||
gometalinter_install:
|
||||
go get -u github.com/alecthomas/gometalinter
|
||||
gometalinter --install --update
|
||||
|
||||
# We aren't using gometalinter as the default linter yet because
|
||||
# 1. it doesn't support build tags: https://github.com/alecthomas/gometalinter/issues/275
|
||||
# 2. can't get -printfuncs working with the vet linter
|
||||
gometalinter:
|
||||
gometalinter ./...
|
||||
|
||||
# Get the build dependencies
|
||||
build_dep:
|
||||
ifdef FULL_TESTS
|
||||
go get -u github.com/kisielk/errcheck
|
||||
go get -u golang.org/x/tools/cmd/goimports
|
||||
go get -u github.com/golang/lint/golint
|
||||
go get -u github.com/inconshreveable/mousetrap
|
||||
go get -u github.com/tools/godep
|
||||
endif
|
||||
|
||||
# Get the release dependencies
|
||||
release_dep:
|
||||
go get -u github.com/goreleaser/nfpm/...
|
||||
go get -u github.com/aktau/github-release
|
||||
|
||||
# Update dependencies
|
||||
update:
|
||||
go get -u github.com/golang/dep/cmd/dep
|
||||
@@ -88,6 +113,9 @@ MANUAL.txt: MANUAL.md
|
||||
commanddocs: rclone
|
||||
rclone gendocs docs/content/commands/
|
||||
|
||||
rcdocs: rclone
|
||||
bin/make_rc_docs.sh
|
||||
|
||||
install: rclone
|
||||
install -d ${DESTDIR}/usr/bin
|
||||
install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
|
||||
@@ -105,12 +133,12 @@ upload_website: website
|
||||
rclone -v sync docs/public memstore:www-rclone-org
|
||||
|
||||
tarball:
|
||||
git archive -9 --format=tar.gz --prefix=rclone-$(TAG) -o build/rclone-$(TAG).tar.gz $(TAG)
|
||||
git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG)
|
||||
|
||||
sign_upload:
|
||||
cd build && md5sum rclone-* | gpg --clearsign > MD5SUMS
|
||||
cd build && sha1sum rclone-* | gpg --clearsign > SHA1SUMS
|
||||
cd build && sha256sum rclone-* | gpg --clearsign > SHA256SUMS
|
||||
cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS
|
||||
cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS
|
||||
cd build && sha256sum rclone-v* | gpg --clearsign > SHA256SUMS
|
||||
|
||||
check_sign:
|
||||
cd build && gpg --verify MD5SUMS && gpg --decrypt MD5SUMS | md5sum -c
|
||||
@@ -118,7 +146,8 @@ check_sign:
|
||||
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
|
||||
|
||||
upload:
|
||||
rclone -v copy build/ memstore:downloads-rclone-org
|
||||
rclone -v copy --exclude '*current*' build/ memstore:downloads-rclone-org/$(TAG)
|
||||
rclone -v copy --include '*current*' --include version.txt build/ memstore:downloads-rclone-org
|
||||
|
||||
upload_github:
|
||||
./bin/upload-github $(TAG)
|
||||
@@ -142,25 +171,32 @@ else
|
||||
endif
|
||||
|
||||
appveyor_upload:
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ memstore:beta-rclone-org/$(TAG)
|
||||
ifeq ($(APPVEYOR_REPO_BRANCH),master)
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ memstore:beta-rclone-org
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifndef BRANCH_PATH
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
BUILD_FLAGS := -exclude "^(windows|darwin)/"
|
||||
ifeq ($(TRAVIS_OS_NAME),osx)
|
||||
BUILD_FLAGS := -include "^darwin/" -cgo
|
||||
endif
|
||||
|
||||
travis_beta:
|
||||
ifeq ($(TRAVIS_OS_NAME),linux)
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
|
||||
endif
|
||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt -exclude "^windows/" -parallel 8 $(BUILDTAGS) $(TAG)β
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ memstore:beta-rclone-org/$(TAG)
|
||||
ifeq ($(TRAVIS_BRANCH),master)
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ memstore:beta-rclone-org
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)β
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifndef BRANCH_PATH
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
# Fetch the windows builds from appveyor
|
||||
fetch_windows:
|
||||
rclone -v copy --include 'rclone-v*-windows-*.zip' memstore:beta-rclone-org/$(TAG) build/
|
||||
rclone -v copy --include 'rclone-v*-windows-*.zip' $(BETA_UPLOAD) build/
|
||||
-#cp -av build/rclone-v*-windows-386.zip build/rclone-current-windows-386.zip
|
||||
-#cp -av build/rclone-v*-windows-amd64.zip build/rclone-current-windows-amd64.zip
|
||||
md5sum build/rclone-*-windows-*.zip | sort
|
||||
@@ -188,9 +224,6 @@ startdev:
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(LAST_TAG)-DEV\"\n" | gofmt > fs/version.go
|
||||
git commit -m "Start $(LAST_TAG)-DEV development" fs/version.go
|
||||
|
||||
gen_tests:
|
||||
cd fstest/fstests && go generate
|
||||
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
Rclone is a command line program to sync files and directories to and from
|
||||
|
||||
* Amazon Drive
|
||||
* Amazon Drive ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 / Dreamhost / Ceph / Minio / Wasabi
|
||||
* Backblaze B2
|
||||
* Box
|
||||
@@ -25,8 +25,10 @@ Rclone is a command line program to sync files and directories to and from
|
||||
* Google Drive
|
||||
* HTTP
|
||||
* Hubic
|
||||
* Mega
|
||||
* Microsoft Azure Blob Storage
|
||||
* Microsoft OneDrive
|
||||
* OpenDrive
|
||||
* Openstack Swift / Rackspace cloud files / Memset Memstore / OVH / Oracle Cloud Storage
|
||||
* pCloud
|
||||
* QingStor
|
||||
|
||||
@@ -13,6 +13,7 @@ Making a release
|
||||
* git status - to check for new man pages - git add them
|
||||
* git commit -a -v -m "Version v1.XX"
|
||||
* make retag
|
||||
* make release_dep
|
||||
* # Set the GOPATH for a current stable go compiler
|
||||
* make cross
|
||||
* git checkout docs/content/commands # to undo date changes in commands
|
||||
@@ -30,6 +31,7 @@ Making a release
|
||||
* # announce with forum post, twitter post, G+ post
|
||||
|
||||
Early in the next release cycle update the vendored dependencies
|
||||
* Review any pinned packages in Gopkg.toml and remove if possible
|
||||
* make update
|
||||
* git status
|
||||
* git add new files
|
||||
|
||||
@@ -7,7 +7,8 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -17,29 +18,42 @@ func init() {
|
||||
Description: "Alias for a existing remote",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
|
||||
Name: "remote",
|
||||
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
|
||||
Required: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path.
|
||||
//
|
||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
remote := config.FileGet(name, "remote")
|
||||
if remote == "" {
|
||||
return nil, errors.New("alias can't point to an empty remote - check the value of the remote setting")
|
||||
}
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
||||
}
|
||||
fsInfo, configName, fsPath, err := fs.ParseRemote(remote)
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
root = filepath.ToSlash(root)
|
||||
return fsInfo.NewFs(configName, path.Join(fsPath, root))
|
||||
if opt.Remote == "" {
|
||||
return nil, errors.New("alias can't point to an empty remote - check the value of the remote setting")
|
||||
}
|
||||
if strings.HasPrefix(opt.Remote, name+":") {
|
||||
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
||||
}
|
||||
_, configName, fsPath, err := fs.ParseRemote(opt.Remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
root = path.Join(fsPath, filepath.ToSlash(root))
|
||||
if configName == "local" {
|
||||
return fs.NewFs(root)
|
||||
}
|
||||
return fs.NewFs(configName + ":" + root)
|
||||
}
|
||||
|
||||
@@ -15,8 +15,6 @@ import (
|
||||
|
||||
var (
|
||||
remoteName = "TestAlias"
|
||||
testPath = "test"
|
||||
filesPath = filepath.Join(testPath, "files")
|
||||
)
|
||||
|
||||
func prepare(t *testing.T, root string) {
|
||||
|
||||
@@ -16,7 +16,9 @@ import (
|
||||
_ "github.com/ncw/rclone/backend/http"
|
||||
_ "github.com/ncw/rclone/backend/hubic"
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
_ "github.com/ncw/rclone/backend/mega"
|
||||
_ "github.com/ncw/rclone/backend/onedrive"
|
||||
_ "github.com/ncw/rclone/backend/opendrive"
|
||||
_ "github.com/ncw/rclone/backend/pcloud"
|
||||
_ "github.com/ncw/rclone/backend/qingstor"
|
||||
_ "github.com/ncw/rclone/backend/s3"
|
||||
|
||||
@@ -18,14 +18,14 @@ import (
|
||||
"log"
|
||||
"net/http"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/go-acd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -38,20 +38,17 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
folderKind = "FOLDER"
|
||||
fileKind = "FILE"
|
||||
assetKind = "ASSET"
|
||||
statusAvailable = "AVAILABLE"
|
||||
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
|
||||
minSleep = 20 * time.Millisecond
|
||||
warnFileSize = 50000 << 20 // Display warning for files larger than this size
|
||||
folderKind = "FOLDER"
|
||||
fileKind = "FILE"
|
||||
statusAvailable = "AVAILABLE"
|
||||
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
|
||||
minSleep = 20 * time.Millisecond
|
||||
warnFileSize = 50000 << 20 // Display warning for files larger than this size
|
||||
defaultTempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Flags
|
||||
tempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
|
||||
uploadWaitPerGB = flags.DurationP("acd-upload-wait-per-gb", "", 180*time.Second, "Additional time per GB to wait after a failed complete upload to see if it appears.")
|
||||
// Description of how to auth for this app
|
||||
acdConfig = &oauth2.Config{
|
||||
Scopes: []string{"clouddrive:read_all", "clouddrive:write"},
|
||||
@@ -69,35 +66,62 @@ var (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "amazon cloud drive",
|
||||
Prefix: "acd",
|
||||
Description: "Amazon Drive",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
err := oauthutil.Config("amazon cloud drive", name, acdConfig)
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config("amazon cloud drive", name, m, acdConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Amazon Application Client Id - required.",
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Amazon Application Client ID.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Amazon Application Client Secret - required.",
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Amazon Application Client Secret.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: config.ConfigAuthURL,
|
||||
Help: "Auth server URL - leave blank to use Amazon's.",
|
||||
Name: config.ConfigAuthURL,
|
||||
Help: "Auth server URL.\nLeave blank to use Amazon's.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigTokenURL,
|
||||
Help: "Token server url - leave blank to use Amazon's.",
|
||||
Name: config.ConfigTokenURL,
|
||||
Help: "Token server url.\nleave blank to use Amazon's.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "checkpoint",
|
||||
Help: "Checkpoint for internal polling (debug).",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_wait_per_gb",
|
||||
Help: "Additional time per GB to wait after a failed complete upload to see if it appears.",
|
||||
Default: fs.Duration(180 * time.Second),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "templink_threshold",
|
||||
Help: "Files >= this size will be downloaded via their tempLink.",
|
||||
Default: defaultTempLinkThreshold,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
flags.VarP(&tempLinkThreshold, "acd-templink-threshold", "", "Files >= this size will be downloaded via their tempLink.")
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Checkpoint string `config:"checkpoint"`
|
||||
UploadWaitPerGB fs.Duration `config:"upload_wait_per_gb"`
|
||||
TempLinkThreshold fs.SizeSuffix `config:"templink_threshold"`
|
||||
}
|
||||
|
||||
// Fs represents a remote acd server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this Fs
|
||||
c *acd.Client // the connection to the acd server
|
||||
noAuthClient *http.Client // unauthenticated http client
|
||||
root string // the path we are working on
|
||||
@@ -138,9 +162,6 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Pattern to match a acd path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parsePath parses an acd 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
@@ -196,7 +217,13 @@ func filterRequest(req *http.Request) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
root = parsePath(root)
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
if do, ok := baseClient.Transport.(interface {
|
||||
@@ -206,7 +233,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
} else {
|
||||
fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail")
|
||||
}
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, acdConfig, baseClient)
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Amazon Drive: %v", err)
|
||||
}
|
||||
@@ -215,6 +242,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
c: c,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
|
||||
noAuthClient: fshttp.NewClient(fs.Config),
|
||||
@@ -532,13 +560,13 @@ func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, i
|
||||
}
|
||||
|
||||
// Don't wait for uploads - assume they will appear later
|
||||
if *uploadWaitPerGB <= 0 {
|
||||
if f.opt.UploadWaitPerGB <= 0 {
|
||||
fs.Debugf(src, "Upload error detected but waiting disabled: %v (%q)", inErr, httpStatus)
|
||||
return false, inInfo, inErr
|
||||
}
|
||||
|
||||
// Time we should wait for the upload
|
||||
uploadWaitPerByte := float64(*uploadWaitPerGB) / 1024 / 1024 / 1024
|
||||
uploadWaitPerByte := float64(f.opt.UploadWaitPerGB) / 1024 / 1024 / 1024
|
||||
timeToWait := time.Duration(uploadWaitPerByte * float64(src.Size()))
|
||||
|
||||
const sleepTime = 5 * time.Second // sleep between tries
|
||||
@@ -1020,7 +1048,7 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
bigObject := o.Size() >= int64(tempLinkThreshold)
|
||||
bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold)
|
||||
if bigObject {
|
||||
fs.Debugf(o, "Downloading large object via tempLink")
|
||||
}
|
||||
@@ -1213,7 +1241,7 @@ func (o *Object) MimeType() string {
|
||||
//
|
||||
// Close the returned channel to stop being notified.
|
||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
|
||||
checkpoint := config.FileGet(f.name, "checkpoint")
|
||||
checkpoint := f.opt.Checkpoint
|
||||
|
||||
quit := make(chan bool)
|
||||
go func() {
|
||||
@@ -1320,6 +1348,14 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoin
|
||||
return checkpoint
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
if o.info.Id == nil {
|
||||
return ""
|
||||
}
|
||||
return *o.info.Id
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -1331,4 +1367,5 @@ var (
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
)
|
||||
|
||||
@@ -1,7 +1,4 @@
|
||||
// Test AmazonCloudDrive filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
|
||||
// +build acd
|
||||
|
||||
@@ -15,64 +12,9 @@ import (
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
|
||||
fstests.RemoteName = "TestAmazonCloudDrive:"
|
||||
fstests.Run(t)
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
|
||||
// +build go1.7
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
gohash "hash"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
@@ -21,13 +21,12 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/Azure/azure-storage-blob-go/2018-03-28/azblob"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
@@ -35,24 +34,20 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
apiVersion = "2017-04-17"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 10 * time.Second
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
listChunkSize = 5000 // number of items to read at once
|
||||
modTimeKey = "mtime"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
maxTotalParts = 50000 // in multipart upload
|
||||
maxUncommittedSize = 9 << 30 // can't upload bigger than this
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
maxChunkSize = fs.SizeSuffix(100 * 1024 * 1024)
|
||||
chunkSize = fs.SizeSuffix(4 * 1024 * 1024)
|
||||
uploadCutoff = fs.SizeSuffix(256 * 1024 * 1024)
|
||||
maxUploadCutoff = fs.SizeSuffix(256 * 1024 * 1024)
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 10 * time.Second
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
listChunkSize = 5000 // number of items to read at once
|
||||
modTimeKey = "mtime"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
maxTotalParts = 50000 // in multipart upload
|
||||
storageDefaultBaseURL = "blob.core.windows.net"
|
||||
// maxUncommittedSize = 9 << 30 // can't upload bigger than this
|
||||
defaultChunkSize = 4 * 1024 * 1024
|
||||
maxChunkSize = 100 * 1024 * 1024
|
||||
defaultUploadCutoff = 256 * 1024 * 1024
|
||||
maxUploadCutoff = 256 * 1024 * 1024
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -63,30 +58,49 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "account",
|
||||
Help: "Storage Account Name",
|
||||
Help: "Storage Account Name (leave blank to use connection string or SAS URL)",
|
||||
}, {
|
||||
Name: "key",
|
||||
Help: "Storage Account Key",
|
||||
Help: "Storage Account Key (leave blank to use connection string or SAS URL)",
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service - leave blank normally.",
|
||||
},
|
||||
},
|
||||
Name: "sas_url",
|
||||
Help: "SAS URL for container level access only\n(leave blank if using account/key or connection string)",
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service\nLeave blank normally.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to chunked upload.",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "Upload chunk size. Must fit in memory.",
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
flags.VarP(&uploadCutoff, "azureblob-upload-cutoff", "", "Cutoff for switching to chunked upload")
|
||||
flags.VarP(&chunkSize, "azureblob-chunk-size", "", "Upload chunk size. Must fit in memory.")
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Account string `config:"account"`
|
||||
Key string `config:"key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
SASURL string `config:"sas_url"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
}
|
||||
|
||||
// Fs represents a remote azure server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
features *fs.Features // optional features
|
||||
account string // account name
|
||||
key []byte // auth key
|
||||
endpoint string // name of the starting api endpoint
|
||||
bc *storage.BlobStorageClient
|
||||
cc *storage.Container
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
svcURL *azblob.ServiceURL // reference to serviceURL
|
||||
cntURL *azblob.ContainerURL // reference to containerURL
|
||||
container string // the container we are working on
|
||||
containerOKMu sync.Mutex // mutex to protect container OK
|
||||
containerOK bool // true if we have created the container
|
||||
@@ -97,14 +111,14 @@ type Fs struct {
|
||||
|
||||
// Object describes a azure object
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // azure id of the file
|
||||
modTime time.Time // The modified time of the object if known
|
||||
md5 string // MD5 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
meta map[string]string // blob metadata
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
modTime time.Time // The modified time of the object if known
|
||||
md5 string // MD5 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
accessTier azblob.AccessTierType // Blob Access Tier
|
||||
meta map[string]string // blob metadata
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -164,8 +178,8 @@ var retryErrorCodes = []int{
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
// FIXME interpret special errors - more to do here
|
||||
if storageErr, ok := err.(storage.AzureStorageServiceError); ok {
|
||||
statusCode := storageErr.StatusCode
|
||||
if storageErr, ok := err.(azblob.StorageError); ok {
|
||||
statusCode := storageErr.Response().StatusCode
|
||||
for _, e := range retryErrorCodes {
|
||||
if statusCode == e {
|
||||
return true, err
|
||||
@@ -176,48 +190,74 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
if uploadCutoff > maxUploadCutoff {
|
||||
return nil, errors.Errorf("azure: upload cutoff (%v) must be less than or equal to %v", uploadCutoff, maxUploadCutoff)
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if chunkSize > maxChunkSize {
|
||||
return nil, errors.Errorf("azure: chunk size can't be greater than %v - was %v", maxChunkSize, chunkSize)
|
||||
|
||||
if opt.UploadCutoff > maxUploadCutoff {
|
||||
return nil, errors.Errorf("azure: upload cutoff (%v) must be less than or equal to %v", opt.UploadCutoff, maxUploadCutoff)
|
||||
}
|
||||
if opt.ChunkSize > maxChunkSize {
|
||||
return nil, errors.Errorf("azure: chunk size can't be greater than %v - was %v", maxChunkSize, opt.ChunkSize)
|
||||
}
|
||||
container, directory, err := parsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
account := config.FileGet(name, "account")
|
||||
if account == "" {
|
||||
return nil, errors.New("account not found")
|
||||
}
|
||||
key := config.FileGet(name, "key")
|
||||
if key == "" {
|
||||
return nil, errors.New("key not found")
|
||||
}
|
||||
keyBytes, err := base64.StdEncoding.DecodeString(key)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("malformed storage account key: %v", err)
|
||||
if opt.Endpoint == "" {
|
||||
opt.Endpoint = storageDefaultBaseURL
|
||||
}
|
||||
|
||||
endpoint := config.FileGet(name, "endpoint", storage.DefaultBaseURL)
|
||||
var (
|
||||
u *url.URL
|
||||
serviceURL azblob.ServiceURL
|
||||
containerURL azblob.ContainerURL
|
||||
)
|
||||
switch {
|
||||
case opt.Account != "" && opt.Key != "":
|
||||
credential := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
|
||||
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
||||
}
|
||||
pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{})
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
containerURL = serviceURL.NewContainerURL(container)
|
||||
case opt.SASURL != "":
|
||||
u, err = url.Parse(opt.SASURL)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse SAS URL")
|
||||
}
|
||||
// use anonymous credentials in case of sas url
|
||||
pipeline := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})
|
||||
// Check if we have container level SAS or account level sas
|
||||
parts := azblob.NewBlobURLParts(*u)
|
||||
if parts.ContainerName != "" {
|
||||
if container != "" && parts.ContainerName != container {
|
||||
return nil, errors.New("Container name in SAS URL and container provided in command do not match")
|
||||
}
|
||||
|
||||
client, err := storage.NewClient(account, key, endpoint, apiVersion, true)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make azure storage client")
|
||||
container = parts.ContainerName
|
||||
containerURL = azblob.NewContainerURL(*u, pipeline)
|
||||
} else {
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
containerURL = serviceURL.NewContainerURL(container)
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("Need account+key or connectionString or sasURL")
|
||||
}
|
||||
client.HTTPClient = fshttp.NewClient(fs.Config)
|
||||
bc := client.GetBlobService()
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
container: container,
|
||||
root: directory,
|
||||
account: account,
|
||||
key: keyBytes,
|
||||
endpoint: endpoint,
|
||||
bc: &bc,
|
||||
cc: bc.GetContainerReference(container),
|
||||
svcURL: &serviceURL,
|
||||
cntURL: &containerURL,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
}
|
||||
@@ -255,13 +295,13 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *storage.Blob) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *azblob.BlobItem) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
err := o.decodeMetaData(info)
|
||||
err := o.decodeMetaDataFromBlob(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -281,13 +321,12 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// getBlobReference creates an empty blob reference with no metadata
|
||||
func (f *Fs) getBlobReference(remote string) *storage.Blob {
|
||||
return f.cc.GetBlobReference(f.root + remote)
|
||||
func (f *Fs) getBlobReference(remote string) azblob.BlobURL {
|
||||
return f.cntURL.NewBlobURL(f.root + remote)
|
||||
}
|
||||
|
||||
// getBlobWithModTime adds the modTime passed in to o.meta and creates
|
||||
// a Blob from it.
|
||||
func (o *Object) getBlobWithModTime(modTime time.Time) *storage.Blob {
|
||||
// updateMetadataWithModTime adds the modTime passed in to o.meta.
|
||||
func (o *Object) updateMetadataWithModTime(modTime time.Time) {
|
||||
// Make sure o.meta is not nil
|
||||
if o.meta == nil {
|
||||
o.meta = make(map[string]string, 1)
|
||||
@@ -295,14 +334,10 @@ func (o *Object) getBlobWithModTime(modTime time.Time) *storage.Blob {
|
||||
|
||||
// Set modTimeKey in it
|
||||
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
|
||||
|
||||
blob := o.getBlobReference()
|
||||
blob.Metadata = o.meta
|
||||
return blob
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object
|
||||
type listFn func(remote string, object *storage.Blob, isDirectory bool) error
|
||||
type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
|
||||
|
||||
// list lists the objects into the function supplied from
|
||||
// the container and root supplied
|
||||
@@ -323,32 +358,39 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
if !recurse {
|
||||
delimiter = "/"
|
||||
}
|
||||
params := storage.ListBlobsParameters{
|
||||
MaxResults: maxResults,
|
||||
Prefix: root,
|
||||
Delimiter: delimiter,
|
||||
Include: &storage.IncludeBlobDataset{
|
||||
Snapshots: false,
|
||||
Metadata: true,
|
||||
UncommittedBlobs: false,
|
||||
|
||||
options := azblob.ListBlobsSegmentOptions{
|
||||
Details: azblob.BlobListingDetails{
|
||||
Copy: false,
|
||||
Metadata: true,
|
||||
Snapshots: false,
|
||||
UncommittedBlobs: false,
|
||||
Deleted: false,
|
||||
},
|
||||
Prefix: root,
|
||||
MaxResults: int32(maxResults),
|
||||
}
|
||||
for {
|
||||
var response storage.BlobListResponse
|
||||
ctx := context.Background()
|
||||
for marker := (azblob.Marker{}); marker.NotDone(); {
|
||||
var response *azblob.ListBlobsHierarchySegmentResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
response, err = f.cc.ListBlobs(params)
|
||||
response, err = f.cntURL.ListBlobsHierarchySegment(ctx, marker, delimiter, options)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
if storageErr, ok := err.(storage.AzureStorageServiceError); ok && storageErr.StatusCode == http.StatusNotFound {
|
||||
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
|
||||
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
for i := range response.Blobs {
|
||||
file := &response.Blobs[i]
|
||||
// Advance marker to next
|
||||
marker = response.NextMarker
|
||||
|
||||
for i := range response.Segment.BlobItems {
|
||||
file := &response.Segment.BlobItems[i]
|
||||
// Finish if file name no longer has prefix
|
||||
// if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
|
||||
// return nil
|
||||
@@ -370,8 +412,8 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
}
|
||||
}
|
||||
// Send the subdirectories
|
||||
for _, remote := range response.BlobPrefixes {
|
||||
remote := strings.TrimRight(remote, "/")
|
||||
for _, remote := range response.Segment.BlobPrefixes {
|
||||
remote := strings.TrimRight(remote.Name, "/")
|
||||
if !strings.HasPrefix(remote, f.root) {
|
||||
fs.Debugf(f, "Odd directory name received %q", remote)
|
||||
continue
|
||||
@@ -383,17 +425,12 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// end if no NextFileName
|
||||
if response.NextMarker == "" {
|
||||
break
|
||||
}
|
||||
params.Marker = response.NextMarker
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert a list item into a DirEntry
|
||||
func (f *Fs) itemToDirEntry(remote string, object *storage.Blob, isDirectory bool) (fs.DirEntry, error) {
|
||||
func (f *Fs) itemToDirEntry(remote string, object *azblob.BlobItem, isDirectory bool) (fs.DirEntry, error) {
|
||||
if isDirectory {
|
||||
d := fs.NewDir(remote, time.Time{})
|
||||
return d, nil
|
||||
@@ -417,7 +454,7 @@ func (f *Fs) markContainerOK() {
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.list(dir, false, listChunkSize, func(remote string, object *storage.Blob, isDirectory bool) error {
|
||||
err = f.list(dir, false, listChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -440,13 +477,8 @@ func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
|
||||
if dir != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
err = f.listContainersToFn(func(container *storage.Container) error {
|
||||
t, err := time.Parse(time.RFC1123, container.Properties.LastModified)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "Failed to parse LastModified %q: %v", container.Properties.LastModified, err)
|
||||
t = time.Time{}
|
||||
}
|
||||
d := fs.NewDir(container.Name, t)
|
||||
err = f.listContainersToFn(func(container *azblob.ContainerItem) error {
|
||||
d := fs.NewDir(container.Name, container.Properties.LastModified)
|
||||
entries = append(entries, d)
|
||||
return nil
|
||||
})
|
||||
@@ -493,7 +525,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.list(dir, true, listChunkSize, func(remote string, object *storage.Blob, isDirectory bool) error {
|
||||
err = f.list(dir, true, listChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -509,27 +541,34 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
}
|
||||
|
||||
// listContainerFn is called from listContainersToFn to handle a container
|
||||
type listContainerFn func(*storage.Container) error
|
||||
type listContainerFn func(*azblob.ContainerItem) error
|
||||
|
||||
// listContainersToFn lists the containers to the function supplied
|
||||
func (f *Fs) listContainersToFn(fn listContainerFn) error {
|
||||
// FIXME page the containers if necessary?
|
||||
params := storage.ListContainersParameters{}
|
||||
var response *storage.ContainerListResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
response, err = f.bc.ListContainers(params)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
params := azblob.ListContainersSegmentOptions{
|
||||
MaxResults: int32(listChunkSize),
|
||||
}
|
||||
for i := range response.Containers {
|
||||
err = fn(&response.Containers[i])
|
||||
ctx := context.Background()
|
||||
for marker := (azblob.Marker{}); marker.NotDone(); {
|
||||
var response *azblob.ListContainersResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
response, err = f.svcURL.ListContainersSegment(ctx, marker, params)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := range response.ContainerItems {
|
||||
err = fn(&response.ContainerItems[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
marker = response.NextMarker
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -554,23 +593,20 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
if f.containerOK {
|
||||
return nil
|
||||
}
|
||||
options := storage.CreateContainerOptions{
|
||||
Access: storage.ContainerAccessTypePrivate,
|
||||
}
|
||||
|
||||
// now try to create the container
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
err := f.cc.Create(&options)
|
||||
ctx := context.Background()
|
||||
_, err := f.cntURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
|
||||
if err != nil {
|
||||
if storageErr, ok := err.(storage.AzureStorageServiceError); ok {
|
||||
switch storageErr.StatusCode {
|
||||
case http.StatusConflict:
|
||||
switch storageErr.Code {
|
||||
case "ContainerAlreadyExists":
|
||||
f.containerOK = true
|
||||
return false, nil
|
||||
case "ContainerBeingDeleted":
|
||||
f.containerDeleted = true
|
||||
return true, err
|
||||
}
|
||||
if storageErr, ok := err.(azblob.StorageError); ok {
|
||||
switch storageErr.ServiceCode() {
|
||||
case azblob.ServiceCodeContainerAlreadyExists:
|
||||
f.containerOK = true
|
||||
return false, nil
|
||||
case azblob.ServiceCodeContainerBeingDeleted:
|
||||
f.containerDeleted = true
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -586,7 +622,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
// isEmpty checks to see if a given directory is empty and returns an error if not
|
||||
func (f *Fs) isEmpty(dir string) (err error) {
|
||||
empty := true
|
||||
err = f.list("", true, 1, func(remote string, object *storage.Blob, isDirectory bool) error {
|
||||
err = f.list("", true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
empty = false
|
||||
return nil
|
||||
})
|
||||
@@ -604,16 +640,23 @@ func (f *Fs) isEmpty(dir string) (err error) {
|
||||
func (f *Fs) deleteContainer() error {
|
||||
f.containerOKMu.Lock()
|
||||
defer f.containerOKMu.Unlock()
|
||||
options := storage.DeleteContainerOptions{}
|
||||
options := azblob.ContainerAccessConditions{}
|
||||
ctx := context.Background()
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
exists, err := f.cc.Exists()
|
||||
_, err := f.cntURL.GetProperties(ctx, azblob.LeaseAccessConditions{})
|
||||
if err == nil {
|
||||
_, err = f.cntURL.Delete(ctx, options)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
|
||||
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
|
||||
return false, fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return f.shouldRetry(err)
|
||||
}
|
||||
if !exists {
|
||||
return false, fs.ErrorDirNotFound
|
||||
}
|
||||
err = f.cc.Delete(&options)
|
||||
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -676,17 +719,36 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
dstBlob := f.getBlobReference(remote)
|
||||
srcBlob := srcObj.getBlobReference()
|
||||
options := storage.CopyOptions{}
|
||||
sourceBlobURL := srcBlob.GetURL()
|
||||
dstBlobURL := f.getBlobReference(remote)
|
||||
srcBlobURL := srcObj.getBlobReference()
|
||||
|
||||
source, err := url.Parse(srcBlobURL.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
options := azblob.BlobAccessConditions{}
|
||||
ctx := context.Background()
|
||||
var startCopy *azblob.BlobStartCopyFromURLResponse
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = dstBlob.Copy(sourceBlobURL, &options)
|
||||
startCopy, err = dstBlobURL.StartCopyFromURL(ctx, *source, nil, options, options)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
copyStatus := startCopy.CopyStatus()
|
||||
for copyStatus == azblob.CopyStatusPending {
|
||||
time.Sleep(1 * time.Second)
|
||||
getMetadata, err := dstBlobURL.GetProperties(ctx, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copyStatus = getMetadata.CopyStatus()
|
||||
}
|
||||
|
||||
return f.NewObject(remote)
|
||||
}
|
||||
|
||||
@@ -731,22 +793,10 @@ func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// decodeMetaData sets the metadata from the data passed in
|
||||
//
|
||||
// Sets
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.md5
|
||||
// o.meta
|
||||
func (o *Object) decodeMetaData(info *storage.Blob) (err error) {
|
||||
o.md5 = info.Properties.ContentMD5
|
||||
o.mimeType = info.Properties.ContentType
|
||||
o.size = info.Properties.ContentLength
|
||||
o.modTime = time.Time(info.Properties.LastModified)
|
||||
if len(info.Metadata) > 0 {
|
||||
o.meta = info.Metadata
|
||||
if modTime, ok := info.Metadata[modTimeKey]; ok {
|
||||
func (o *Object) setMetadata(metadata azblob.Metadata) {
|
||||
if len(metadata) > 0 {
|
||||
o.meta = metadata
|
||||
if modTime, ok := metadata[modTimeKey]; ok {
|
||||
when, err := time.Parse(timeFormatIn, modTime)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Couldn't parse %v = %q: %v", modTimeKey, modTime, err)
|
||||
@@ -756,11 +806,42 @@ func (o *Object) decodeMetaData(info *storage.Blob) (err error) {
|
||||
} else {
|
||||
o.meta = nil
|
||||
}
|
||||
}
|
||||
|
||||
// decodeMetaDataFromPropertiesResponse sets the metadata from the data passed in
|
||||
//
|
||||
// Sets
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.md5
|
||||
// o.meta
|
||||
func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetPropertiesResponse) (err error) {
|
||||
// NOTE - In BlobGetPropertiesResponse, Client library returns MD5 as base64 decoded string
|
||||
// unlike BlobProperties in BlobItem (used in decodeMetadataFromBlob) which returns base64
|
||||
// encoded bytes. Object needs to maintain this as base64 encoded string.
|
||||
o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5())
|
||||
o.mimeType = info.ContentType()
|
||||
o.size = info.ContentLength()
|
||||
o.modTime = time.Time(info.LastModified())
|
||||
o.accessTier = azblob.AccessTierType(info.AccessTier())
|
||||
o.setMetadata(info.NewMetadata())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) {
|
||||
o.md5 = string(info.Properties.ContentMD5[:])
|
||||
o.mimeType = *info.Properties.ContentType
|
||||
o.size = *info.Properties.ContentLength
|
||||
o.modTime = info.Properties.LastModified
|
||||
o.accessTier = info.Properties.AccessTier
|
||||
o.setMetadata(info.Metadata)
|
||||
return nil
|
||||
}
|
||||
|
||||
// getBlobReference creates an empty blob reference with no metadata
|
||||
func (o *Object) getBlobReference() *storage.Blob {
|
||||
func (o *Object) getBlobReference() azblob.BlobURL {
|
||||
return o.fs.getBlobReference(o.remote)
|
||||
}
|
||||
|
||||
@@ -783,19 +864,22 @@ func (o *Object) readMetaData() (err error) {
|
||||
blob := o.getBlobReference()
|
||||
|
||||
// Read metadata (this includes metadata)
|
||||
getPropertiesOptions := storage.GetBlobPropertiesOptions{}
|
||||
options := azblob.BlobAccessConditions{}
|
||||
ctx := context.Background()
|
||||
var blobProperties *azblob.BlobGetPropertiesResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = blob.GetProperties(&getPropertiesOptions)
|
||||
blobProperties, err = blob.GetProperties(ctx, options)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if storageErr, ok := err.(storage.AzureStorageServiceError); ok && storageErr.StatusCode == http.StatusNotFound {
|
||||
// On directories - GetProperties does not work and current SDK does not populate service code correctly hence check regular http response as well
|
||||
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeBlobNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return o.decodeMetaData(blob)
|
||||
return o.decodeMetaDataFromPropertiesResponse(blobProperties)
|
||||
}
|
||||
|
||||
// timeString returns modTime as the number of milliseconds
|
||||
@@ -832,10 +916,17 @@ func (o *Object) ModTime() (result time.Time) {
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
blob := o.getBlobWithModTime(modTime)
|
||||
options := storage.SetBlobMetadataOptions{}
|
||||
// Make sure o.meta is not nil
|
||||
if o.meta == nil {
|
||||
o.meta = make(map[string]string, 1)
|
||||
}
|
||||
// Set modTimeKey in it
|
||||
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
|
||||
|
||||
blob := o.getBlobReference()
|
||||
ctx := context.Background()
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
err := blob.SetMetadata(&options)
|
||||
_, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{})
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -850,41 +941,20 @@ func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// openFile represents an Object open for reading
|
||||
type openFile struct {
|
||||
o *Object // Object we are reading for
|
||||
resp *http.Response // response of the GET
|
||||
body io.Reader // reading from here
|
||||
hash gohash.Hash // currently accumulating MD5
|
||||
bytes int64 // number of bytes read on this connection
|
||||
eof bool // whether we have read end of file
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
getBlobOptions := storage.GetBlobOptions{}
|
||||
getBlobRangeOptions := storage.GetBlobRangeOptions{
|
||||
GetBlobOptions: &getBlobOptions,
|
||||
}
|
||||
// Offset and Count for range download
|
||||
var offset int64
|
||||
var count int64
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
start, end := x.Start, x.End
|
||||
if end < 0 {
|
||||
end = 0
|
||||
}
|
||||
if start < 0 {
|
||||
start = o.size - end
|
||||
end = 0
|
||||
}
|
||||
getBlobRangeOptions.Range = &storage.BlobRange{
|
||||
Start: uint64(start),
|
||||
End: uint64(end),
|
||||
offset, count = x.Decode(o.size)
|
||||
if count < 0 {
|
||||
count = o.size - offset
|
||||
}
|
||||
case *fs.SeekOption:
|
||||
getBlobRangeOptions.Range = &storage.BlobRange{
|
||||
Start: uint64(x.Offset),
|
||||
}
|
||||
offset = x.Offset
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
@@ -892,17 +962,17 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
}
|
||||
}
|
||||
blob := o.getBlobReference()
|
||||
ctx := context.Background()
|
||||
ac := azblob.BlobAccessConditions{}
|
||||
var dowloadResponse *azblob.DownloadResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
if getBlobRangeOptions.Range == nil {
|
||||
in, err = blob.Get(&getBlobOptions)
|
||||
} else {
|
||||
in, err = blob.GetRange(&getBlobRangeOptions)
|
||||
}
|
||||
dowloadResponse, err = blob.Download(ctx, offset, count, ac, false)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to open for download")
|
||||
}
|
||||
in = dowloadResponse.Body(azblob.RetryReaderOptions{})
|
||||
return in, nil
|
||||
}
|
||||
|
||||
@@ -927,26 +997,18 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
// urlEncode encodes in with % encoding
|
||||
func urlEncode(in string) string {
|
||||
var out bytes.Buffer
|
||||
for i := 0; i < len(in); i++ {
|
||||
c := in[i]
|
||||
if noNeedToEncode[c] {
|
||||
_ = out.WriteByte(c)
|
||||
} else {
|
||||
_, _ = out.WriteString(fmt.Sprintf("%%%2X", c))
|
||||
}
|
||||
}
|
||||
return out.String()
|
||||
// readSeeker joins an io.Reader and an io.Seeker
|
||||
type readSeeker struct {
|
||||
io.Reader
|
||||
io.Seeker
|
||||
}
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
//
|
||||
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
|
||||
func (o *Object) uploadMultipart(in io.Reader, size int64, blob *storage.Blob, putBlobOptions *storage.PutBlobOptions) (err error) {
|
||||
func (o *Object) uploadMultipart(in io.Reader, size int64, blob *azblob.BlobURL, httpHeaders *azblob.BlobHTTPHeaders) (err error) {
|
||||
// Calculate correct chunkSize
|
||||
chunkSize := int64(chunkSize)
|
||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||
var totalParts int64
|
||||
for {
|
||||
// Calculate number of parts
|
||||
@@ -966,31 +1028,37 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, blob *storage.Blob, p
|
||||
}
|
||||
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", totalParts, fs.SizeSuffix(chunkSize))
|
||||
|
||||
// Create an empty blob
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err := blob.CreateBlockBlob(putBlobOptions)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
// https://godoc.org/github.com/Azure/azure-storage-blob-go/2017-07-29/azblob#example-BlockBlobURL
|
||||
// Utilities are cloned from above example
|
||||
// These helper functions convert a binary block ID to a base-64 string and vice versa
|
||||
// NOTE: The blockID must be <= 64 bytes and ALL blockIDs for the block must be the same length
|
||||
blockIDBinaryToBase64 := func(blockID []byte) string { return base64.StdEncoding.EncodeToString(blockID) }
|
||||
// These helper functions convert an int block ID to a base-64 string and vice versa
|
||||
blockIDIntToBase64 := func(blockID uint64) string {
|
||||
binaryBlockID := (&[8]byte{})[:] // All block IDs are 8 bytes long
|
||||
binary.LittleEndian.PutUint64(binaryBlockID, blockID)
|
||||
return blockIDBinaryToBase64(binaryBlockID)
|
||||
}
|
||||
|
||||
// block ID variables
|
||||
var (
|
||||
rawID uint64
|
||||
bytesID = make([]byte, 8)
|
||||
blockID = "" // id in base64 encoded form
|
||||
blocks = make([]storage.Block, 0, totalParts)
|
||||
blocks = make([]string, totalParts)
|
||||
)
|
||||
|
||||
// increment the blockID
|
||||
nextID := func() {
|
||||
rawID++
|
||||
binary.LittleEndian.PutUint64(bytesID, rawID)
|
||||
blockID = base64.StdEncoding.EncodeToString(bytesID)
|
||||
blocks = append(blocks, storage.Block{
|
||||
ID: blockID,
|
||||
Status: storage.BlockStatusLatest,
|
||||
})
|
||||
blockID = blockIDIntToBase64(rawID)
|
||||
blocks = append(blocks, blockID)
|
||||
}
|
||||
|
||||
// Get BlockBlobURL, we will use default pipeline here
|
||||
blockBlobURL := blob.ToBlockBlobURL()
|
||||
ctx := context.Background()
|
||||
ac := azblob.LeaseAccessConditions{} // Use default lease access conditions
|
||||
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
@@ -1033,13 +1101,11 @@ outer:
|
||||
defer o.fs.uploadToken.Put()
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, totalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||
|
||||
// Upload the block, with MD5 for check
|
||||
md5sum := md5.Sum(buf)
|
||||
putBlockOptions := storage.PutBlockOptions{
|
||||
ContentMD5: base64.StdEncoding.EncodeToString(md5sum[:]),
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = blob.PutBlockWithLength(blockID, uint64(len(buf)), wrap(bytes.NewBuffer(buf)), &putBlockOptions)
|
||||
bufferReader := bytes.NewReader(buf)
|
||||
wrappedReader := wrap(bufferReader)
|
||||
rs := readSeeker{wrappedReader, bufferReader}
|
||||
_, err = blockBlobURL.StageBlock(ctx, blockID, rs, ac)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
|
||||
@@ -1069,9 +1135,8 @@ outer:
|
||||
}
|
||||
|
||||
// Finalise the upload session
|
||||
putBlockListOptions := storage.PutBlockListOptions{}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err := blob.PutBlockList(blocks, &putBlockListOptions)
|
||||
_, err := blockBlobURL.CommitBlockList(ctx, blocks, *httpHeaders, o.meta, azblob.BlobAccessConditions{})
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1089,29 +1154,45 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return err
|
||||
}
|
||||
size := src.Size()
|
||||
blob := o.getBlobWithModTime(src.ModTime())
|
||||
blob.Properties.ContentType = fs.MimeType(o)
|
||||
if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" {
|
||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||
if err == nil {
|
||||
blob.Properties.ContentMD5 = base64.StdEncoding.EncodeToString(sourceMD5bytes)
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
||||
// Update Mod time
|
||||
o.updateMetadataWithModTime(src.ModTime())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blob := o.getBlobReference()
|
||||
httpHeaders := azblob.BlobHTTPHeaders{}
|
||||
httpHeaders.ContentType = fs.MimeType(o)
|
||||
// Multipart upload doesn't support MD5 checksums at put block calls, hence calculate
|
||||
// MD5 only for PutBlob requests
|
||||
if size < int64(o.fs.opt.UploadCutoff) {
|
||||
if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" {
|
||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||
if err == nil {
|
||||
httpHeaders.ContentMD5 = sourceMD5bytes
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
putBlobOptions := storage.PutBlobOptions{}
|
||||
|
||||
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
|
||||
BufferSize: int(o.fs.opt.ChunkSize),
|
||||
MaxBuffers: 4,
|
||||
Metadata: o.meta,
|
||||
BlobHTTPHeaders: httpHeaders,
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
// Don't retry, return a retry error instead
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
if size >= int64(uploadCutoff) {
|
||||
if size >= int64(o.fs.opt.UploadCutoff) {
|
||||
// If a large file upload in chunks
|
||||
err = o.uploadMultipart(in, size, blob, &putBlobOptions)
|
||||
err = o.uploadMultipart(in, size, &blob, &httpHeaders)
|
||||
} else {
|
||||
// Write a small blob in one transaction
|
||||
if size == 0 {
|
||||
in = nil
|
||||
}
|
||||
err = blob.CreateBlockBlobFromReader(in, &putBlobOptions)
|
||||
blockBlobURL := blob.ToBlockBlobURL()
|
||||
_, err = azblob.UploadStreamToBlockBlob(ctx, in, blockBlobURL, putBlobOptions)
|
||||
}
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
@@ -1125,9 +1206,11 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
blob := o.getBlobReference()
|
||||
options := storage.DeleteBlobOptions{}
|
||||
snapShotOptions := azblob.DeleteSnapshotsOptionNone
|
||||
ac := azblob.BlobAccessConditions{}
|
||||
ctx := context.Background()
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err := blob.Delete(&options)
|
||||
_, err := blob.Delete(ctx, snapShotOptions, ac)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
|
||||
// +build go1.7
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
|
||||
package azureblob_test
|
||||
|
||||
@@ -11,68 +8,13 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/azureblob"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*azureblob.Object)(nil))
|
||||
fstests.RemoteName = "TestAzureBlob:"
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*azureblob.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.7
|
||||
// +build freebsd netbsd openbsd plan9 solaris !go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
168
backend/b2/b2.go
168
backend/b2/b2.go
@@ -22,8 +22,8 @@ import (
|
||||
"github.com/ncw/rclone/backend/b2/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -34,30 +34,27 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
defaultEndpoint = "https://api.backblazeb2.com"
|
||||
headerPrefix = "x-bz-info-" // lower case as that is what the server returns
|
||||
timeKey = "src_last_modified_millis"
|
||||
timeHeader = headerPrefix + timeKey
|
||||
sha1Key = "large_file_sha1"
|
||||
sha1Header = "X-Bz-Content-Sha1"
|
||||
sha1InfoHeader = headerPrefix + sha1Key
|
||||
testModeHeader = "X-Bz-Test-Mode"
|
||||
retryAfterHeader = "Retry-After"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 5 * time.Minute
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
maxParts = 10000
|
||||
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
|
||||
defaultEndpoint = "https://api.backblazeb2.com"
|
||||
headerPrefix = "x-bz-info-" // lower case as that is what the server returns
|
||||
timeKey = "src_last_modified_millis"
|
||||
timeHeader = headerPrefix + timeKey
|
||||
sha1Key = "large_file_sha1"
|
||||
sha1Header = "X-Bz-Content-Sha1"
|
||||
sha1InfoHeader = headerPrefix + sha1Key
|
||||
testModeHeader = "X-Bz-Test-Mode"
|
||||
retryAfterHeader = "Retry-After"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 5 * time.Minute
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
maxParts = 10000
|
||||
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
|
||||
minChunkSize = 5E6
|
||||
defaultChunkSize = 96 * 1024 * 1024
|
||||
defaultUploadCutoff = 200E6
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
minChunkSize = fs.SizeSuffix(5E6)
|
||||
chunkSize = fs.SizeSuffix(96 * 1024 * 1024)
|
||||
uploadCutoff = fs.SizeSuffix(200E6)
|
||||
b2TestMode = flags.StringP("b2-test-mode", "", "", "A flag string for X-Bz-Test-Mode header.")
|
||||
b2Versions = flags.BoolP("b2-versions", "", false, "Include old versions in directory listings.")
|
||||
b2HardDelete = flags.BoolP("b2-hard-delete", "", false, "Permanently delete files on remote removal, otherwise hide files.")
|
||||
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
|
||||
)
|
||||
|
||||
@@ -68,29 +65,64 @@ func init() {
|
||||
Description: "Backblaze B2",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "account",
|
||||
Help: "Account ID",
|
||||
Name: "account",
|
||||
Help: "Account ID",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "key",
|
||||
Help: "Application Key",
|
||||
Name: "key",
|
||||
Help: "Application Key",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service - leave blank normally.",
|
||||
},
|
||||
},
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\nLeave blank normally.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "test_mode",
|
||||
Help: "A flag string for X-Bz-Test-Mode header for debugging.",
|
||||
Default: "",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "versions",
|
||||
Help: "Include old versions in directory listings.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hard_delete",
|
||||
Help: "Permanently delete files on remote removal, otherwise hide files.",
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to chunked upload.",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "Upload chunk size. Must fit in memory.",
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
flags.VarP(&uploadCutoff, "b2-upload-cutoff", "", "Cutoff for switching to chunked upload")
|
||||
flags.VarP(&chunkSize, "b2-chunk-size", "", "Upload chunk size. Must fit in memory.")
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Account string `config:"account"`
|
||||
Key string `config:"key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
TestMode string `config:"test_mode"`
|
||||
Versions bool `config:"versions"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
}
|
||||
|
||||
// Fs represents a remote b2 server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
account string // account name
|
||||
key string // auth key
|
||||
endpoint string // name of the starting api endpoint
|
||||
srv *rest.Client // the connection to the b2 server
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
@@ -232,33 +264,37 @@ func errorHandler(resp *http.Response) error {
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
if uploadCutoff < chunkSize {
|
||||
return nil, errors.Errorf("b2: upload cutoff (%v) must be greater than or equal to chunk size (%v)", uploadCutoff, chunkSize)
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if chunkSize < minChunkSize {
|
||||
return nil, errors.Errorf("b2: chunk size can't be less than %v - was %v", minChunkSize, chunkSize)
|
||||
if opt.UploadCutoff < opt.ChunkSize {
|
||||
return nil, errors.Errorf("b2: upload cutoff (%v) must be greater than or equal to chunk size (%v)", opt.UploadCutoff, opt.ChunkSize)
|
||||
}
|
||||
if opt.ChunkSize < minChunkSize {
|
||||
return nil, errors.Errorf("b2: chunk size can't be less than %v - was %v", minChunkSize, opt.ChunkSize)
|
||||
}
|
||||
bucket, directory, err := parsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
account := config.FileGet(name, "account")
|
||||
if account == "" {
|
||||
if opt.Account == "" {
|
||||
return nil, errors.New("account not found")
|
||||
}
|
||||
key := config.FileGet(name, "key")
|
||||
if key == "" {
|
||||
if opt.Key == "" {
|
||||
return nil, errors.New("key not found")
|
||||
}
|
||||
endpoint := config.FileGet(name, "endpoint", defaultEndpoint)
|
||||
if opt.Endpoint == "" {
|
||||
opt.Endpoint = defaultEndpoint
|
||||
}
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
account: account,
|
||||
key: key,
|
||||
endpoint: endpoint,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
bufferTokens: make(chan []byte, fs.Config.Transfers),
|
||||
@@ -269,8 +305,8 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
BucketBased: true,
|
||||
}).Fill(f)
|
||||
// Set the test flag if required
|
||||
if *b2TestMode != "" {
|
||||
testMode := strings.TrimSpace(*b2TestMode)
|
||||
if opt.TestMode != "" {
|
||||
testMode := strings.TrimSpace(opt.TestMode)
|
||||
f.srv.SetHeader(testModeHeader, testMode)
|
||||
fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode)
|
||||
}
|
||||
@@ -316,9 +352,9 @@ func (f *Fs) authorizeAccount() error {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/b2api/v1/b2_authorize_account",
|
||||
RootURL: f.endpoint,
|
||||
UserName: f.account,
|
||||
Password: f.key,
|
||||
RootURL: f.opt.Endpoint,
|
||||
UserName: f.opt.Account,
|
||||
Password: f.opt.Key,
|
||||
ExtraHeaders: map[string]string{"Authorization": ""}, // unset the Authorization for this request
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
@@ -384,7 +420,7 @@ func (f *Fs) clearUploadURL() {
|
||||
func (f *Fs) getUploadBlock() []byte {
|
||||
buf := <-f.bufferTokens
|
||||
if buf == nil {
|
||||
buf = make([]byte, chunkSize)
|
||||
buf = make([]byte, f.opt.ChunkSize)
|
||||
}
|
||||
// fs.Debugf(f, "Getting upload block %p", buf)
|
||||
return buf
|
||||
@@ -393,7 +429,7 @@ func (f *Fs) getUploadBlock() []byte {
|
||||
// putUploadBlock returns a block to the pool of size chunkSize
|
||||
func (f *Fs) putUploadBlock(buf []byte) {
|
||||
buf = buf[:cap(buf)]
|
||||
if len(buf) != int(chunkSize) {
|
||||
if len(buf) != int(f.opt.ChunkSize) {
|
||||
panic("bad blocksize returned to pool")
|
||||
}
|
||||
// fs.Debugf(f, "Returning upload block %p", buf)
|
||||
@@ -563,7 +599,7 @@ func (f *Fs) markBucketOK() {
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
||||
last := ""
|
||||
err = f.list(dir, false, "", 0, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
err = f.list(dir, false, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -635,7 +671,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
last := ""
|
||||
err = f.list(dir, true, "", 0, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
err = f.list(dir, true, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1035,12 +1071,12 @@ func (o *Object) readMetaData() (err error) {
|
||||
maxSearched := 1
|
||||
var timestamp api.Timestamp
|
||||
baseRemote := o.remote
|
||||
if *b2Versions {
|
||||
if o.fs.opt.Versions {
|
||||
timestamp, baseRemote = api.RemoveVersion(baseRemote)
|
||||
maxSearched = maxVersions
|
||||
}
|
||||
var info *api.File
|
||||
err = o.fs.list("", true, baseRemote, maxSearched, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
err = o.fs.list("", true, baseRemote, maxSearched, o.fs.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
if isDirectory {
|
||||
return nil
|
||||
}
|
||||
@@ -1254,7 +1290,7 @@ func urlEncode(in string) string {
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
if *b2Versions {
|
||||
if o.fs.opt.Versions {
|
||||
return errNotWithVersions
|
||||
}
|
||||
err = o.fs.Mkdir("")
|
||||
@@ -1289,7 +1325,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
} else if size > int64(uploadCutoff) {
|
||||
} else if size > int64(o.fs.opt.UploadCutoff) {
|
||||
up, err := o.fs.newLargeUpload(o, in, src)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1408,10 +1444,10 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
if *b2Versions {
|
||||
if o.fs.opt.Versions {
|
||||
return errNotWithVersions
|
||||
}
|
||||
if *b2HardDelete {
|
||||
if o.fs.opt.HardDelete {
|
||||
return o.fs.deleteByID(o.id, o.fs.root+o.remote)
|
||||
}
|
||||
return o.fs.hide(o.fs.root + o.remote)
|
||||
@@ -1422,6 +1458,11 @@ func (o *Object) MimeType() string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
@@ -1431,4 +1472,5 @@ var (
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
)
|
||||
|
||||
@@ -1,75 +1,17 @@
|
||||
// Test B2 filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package b2_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/b2"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*b2.Object)(nil))
|
||||
fstests.RemoteName = "TestB2:"
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestB2:",
|
||||
NilObject: (*b2.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
@@ -86,10 +86,10 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar
|
||||
parts := int64(0)
|
||||
sha1SliceSize := int64(maxParts)
|
||||
if size == -1 {
|
||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", fs.SizeSuffix(chunkSize), fs.SizeSuffix(maxParts*chunkSize))
|
||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
||||
} else {
|
||||
parts = size / int64(chunkSize)
|
||||
if size%int64(chunkSize) != 0 {
|
||||
parts = size / int64(o.fs.opt.ChunkSize)
|
||||
if size%int64(o.fs.opt.ChunkSize) != 0 {
|
||||
parts++
|
||||
}
|
||||
if parts > maxParts {
|
||||
@@ -357,7 +357,8 @@ outer:
|
||||
buf := up.f.getUploadBlock()
|
||||
|
||||
// Read the chunk
|
||||
n, err := io.ReadFull(up.in, buf)
|
||||
var n int
|
||||
n, err = io.ReadFull(up.in, buf)
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
|
||||
buf = buf[:n]
|
||||
@@ -366,7 +367,6 @@ outer:
|
||||
} else if err == io.EOF {
|
||||
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
|
||||
up.f.putUploadBlock(buf)
|
||||
hasMoreParts = false
|
||||
err = nil
|
||||
break outer
|
||||
} else if err != nil {
|
||||
@@ -409,8 +409,8 @@ outer:
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= int64(chunkSize) {
|
||||
reqSize = int64(chunkSize)
|
||||
if reqSize >= int64(up.f.opt.ChunkSize) {
|
||||
reqSize = int64(up.f.opt.ChunkSize)
|
||||
}
|
||||
|
||||
// Get a block of memory
|
||||
|
||||
@@ -74,18 +74,18 @@ const (
|
||||
|
||||
// Item describes a folder or a file as returned by Get Folder Items and others
|
||||
type Item struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
SequenceID string `json:"sequence_id"`
|
||||
Etag string `json:"etag"`
|
||||
SHA1 string `json:"sha1"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
CreatedAt Time `json:"created_at"`
|
||||
ModifiedAt Time `json:"modified_at"`
|
||||
ContentCreatedAt Time `json:"content_created_at"`
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
SequenceID string `json:"sequence_id"`
|
||||
Etag string `json:"etag"`
|
||||
SHA1 string `json:"sha1"`
|
||||
Name string `json:"name"`
|
||||
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
|
||||
CreatedAt Time `json:"created_at"`
|
||||
ModifiedAt Time `json:"modified_at"`
|
||||
ContentCreatedAt Time `json:"content_created_at"`
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the item
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -24,7 +23,8 @@ import (
|
||||
"github.com/ncw/rclone/backend/box/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -47,6 +47,7 @@ const (
|
||||
uploadURL = "https://upload.box.com/api/2.0"
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
|
||||
defaultUploadCutoff = 50 * 1024 * 1024
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -62,7 +63,6 @@ var (
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
uploadCutoff = fs.SizeSuffix(50 * 1024 * 1024)
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -71,27 +71,37 @@ func init() {
|
||||
Name: "box",
|
||||
Description: "Box",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
err := oauthutil.Config("box", name, oauthConfig)
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config("box", name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Box App Client Id - leave blank normally.",
|
||||
Help: "Box App Client Id.\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Box App Client Secret - leave blank normally.",
|
||||
Help: "Box App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to multipart upload.",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
flags.VarP(&uploadCutoff, "box-upload-cutoff", "", "Cutoff for switching to multipart upload")
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
}
|
||||
|
||||
// Fs represents a remote box
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
@@ -135,9 +145,6 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Pattern to match a box path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parsePath parses an box 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
@@ -223,13 +230,20 @@ func errorHandler(resp *http.Response) error {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
if uploadCutoff < minUploadCutoff {
|
||||
return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", uploadCutoff, fs.SizeSuffix(minUploadCutoff))
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opt.UploadCutoff < minUploadCutoff {
|
||||
return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff))
|
||||
}
|
||||
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, oauthConfig)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Box: %v", err)
|
||||
}
|
||||
@@ -237,6 +251,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
@@ -883,7 +898,7 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
|
||||
}
|
||||
o.hasMetaData = true
|
||||
o.size = info.Size
|
||||
o.size = int64(info.Size)
|
||||
o.sha1 = info.SHA1
|
||||
o.modTime = info.ModTime()
|
||||
o.id = info.ID
|
||||
@@ -1039,7 +1054,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
|
||||
// Upload with simple or multipart
|
||||
if size <= int64(uploadCutoff) {
|
||||
if size <= int64(o.fs.opt.UploadCutoff) {
|
||||
err = o.upload(in, leaf, directoryID, modTime)
|
||||
} else {
|
||||
err = o.uploadMultipart(in, leaf, directoryID, size, modTime)
|
||||
@@ -1052,6 +1067,11 @@ func (o *Object) Remove() error {
|
||||
return o.fs.deleteObject(o.id)
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -1062,4 +1082,5 @@ var (
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -1,75 +1,17 @@
|
||||
// Test Box filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package box_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/box"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*box.Object)(nil))
|
||||
fstests.RemoteName = "TestBox:"
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestBox:",
|
||||
NilObject: (*box.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
574
backend/cache/cache.go
vendored
574
backend/cache/cache.go
vendored
@@ -1,44 +1,43 @@
|
||||
// +build !plan9,go1.7
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"os"
|
||||
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/rc"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/atexit"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefCacheChunkSize is the default value for chunk size
|
||||
DefCacheChunkSize = "5M"
|
||||
DefCacheChunkSize = fs.SizeSuffix(5 * 1024 * 1024)
|
||||
// DefCacheTotalChunkSize is the default value for the maximum size of stored chunks
|
||||
DefCacheTotalChunkSize = "10G"
|
||||
DefCacheTotalChunkSize = fs.SizeSuffix(10 * 1024 * 1024 * 1024)
|
||||
// DefCacheChunkCleanInterval is the interval at which chunks are cleaned
|
||||
DefCacheChunkCleanInterval = "1m"
|
||||
DefCacheChunkCleanInterval = fs.Duration(time.Minute)
|
||||
// DefCacheInfoAge is the default value for object info age
|
||||
DefCacheInfoAge = "6h"
|
||||
DefCacheInfoAge = fs.Duration(6 * time.Hour)
|
||||
// DefCacheReadRetries is the default value for read retries
|
||||
DefCacheReadRetries = 10
|
||||
// DefCacheTotalWorkers is how many workers run in parallel to download chunks
|
||||
@@ -50,29 +49,9 @@ const (
|
||||
// DefCacheWrites will cache file data on writes through the cache
|
||||
DefCacheWrites = false
|
||||
// DefCacheTmpWaitTime says how long should files be stored in local cache before being uploaded
|
||||
DefCacheTmpWaitTime = "15m"
|
||||
DefCacheTmpWaitTime = fs.Duration(15 * time.Second)
|
||||
// DefCacheDbWaitTime defines how long the cache backend should wait for the DB to be available
|
||||
DefCacheDbWaitTime = 1 * time.Second
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Flags
|
||||
cacheDbPath = flags.StringP("cache-db-path", "", filepath.Join(config.CacheDir, "cache-backend"), "Directory to cache DB")
|
||||
cacheChunkPath = flags.StringP("cache-chunk-path", "", filepath.Join(config.CacheDir, "cache-backend"), "Directory to cached chunk files")
|
||||
cacheDbPurge = flags.BoolP("cache-db-purge", "", false, "Purge the cache DB before")
|
||||
cacheChunkSize = flags.StringP("cache-chunk-size", "", DefCacheChunkSize, "The size of a chunk")
|
||||
cacheTotalChunkSize = flags.StringP("cache-total-chunk-size", "", DefCacheTotalChunkSize, "The total size which the chunks can take up from the disk")
|
||||
cacheChunkCleanInterval = flags.StringP("cache-chunk-clean-interval", "", DefCacheChunkCleanInterval, "Interval at which chunk cleanup runs")
|
||||
cacheInfoAge = flags.StringP("cache-info-age", "", DefCacheInfoAge, "How much time should object info be stored in cache")
|
||||
cacheReadRetries = flags.IntP("cache-read-retries", "", DefCacheReadRetries, "How many times to retry a read from a cache storage")
|
||||
cacheTotalWorkers = flags.IntP("cache-workers", "", DefCacheTotalWorkers, "How many workers should run in parallel to download chunks")
|
||||
cacheChunkNoMemory = flags.BoolP("cache-chunk-no-memory", "", DefCacheChunkNoMemory, "Disable the in-memory cache for storing chunks during streaming")
|
||||
cacheRps = flags.IntP("cache-rps", "", int(DefCacheRps), "Limits the number of requests per second to the source FS. -1 disables the rate limiter")
|
||||
cacheStoreWrites = flags.BoolP("cache-writes", "", DefCacheWrites, "Will cache file data on writes through the FS")
|
||||
cacheTempWritePath = flags.StringP("cache-tmp-upload-path", "", "", "Directory to keep temporary files until they are uploaded to the cloud storage")
|
||||
cacheTempWaitTime = flags.StringP("cache-tmp-wait-time", "", DefCacheTmpWaitTime, "How long should files be stored in local cache before being uploaded")
|
||||
cacheDbWaitTime = flags.DurationP("cache-db-wait-time", "", DefCacheDbWaitTime, "How long to wait for the DB to be available - 0 is unlimited")
|
||||
DefCacheDbWaitTime = fs.Duration(1 * time.Second)
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -82,73 +61,155 @@ func init() {
|
||||
Description: "Cache a remote",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote to cache.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Name: "remote",
|
||||
Help: "Remote to cache.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "plex_url",
|
||||
Help: "Optional: The URL of the Plex server",
|
||||
Optional: true,
|
||||
Name: "plex_url",
|
||||
Help: "The URL of the Plex server",
|
||||
}, {
|
||||
Name: "plex_username",
|
||||
Help: "Optional: The username of the Plex user",
|
||||
Optional: true,
|
||||
Name: "plex_username",
|
||||
Help: "The username of the Plex user",
|
||||
}, {
|
||||
Name: "plex_password",
|
||||
Help: "Optional: The password of the Plex user",
|
||||
Help: "The password of the Plex user",
|
||||
IsPassword: true,
|
||||
Optional: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "The size of a chunk. Lower value good for slow connections but can affect seamless reading. \nDefault: " + DefCacheChunkSize,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "1m",
|
||||
Help: "1MB",
|
||||
}, {
|
||||
Value: "5M",
|
||||
Help: "5 MB",
|
||||
}, {
|
||||
Value: "10M",
|
||||
Help: "10 MB",
|
||||
},
|
||||
},
|
||||
Optional: true,
|
||||
Name: "plex_token",
|
||||
Help: "The plex token for authentication - auto set normally",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "info_age",
|
||||
Help: "How much time should object info (file size, file hashes etc) be stored in cache. Use a very high value if you don't plan on changing the source FS from outside the cache. \nAccepted units are: \"s\", \"m\", \"h\".\nDefault: " + DefCacheInfoAge,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "1h",
|
||||
Help: "1 hour",
|
||||
}, {
|
||||
Value: "24h",
|
||||
Help: "24 hours",
|
||||
}, {
|
||||
Value: "48h",
|
||||
Help: "48 hours",
|
||||
},
|
||||
},
|
||||
Optional: true,
|
||||
Name: "chunk_size",
|
||||
Help: "The size of a chunk. Lower value good for slow connections but can affect seamless reading.",
|
||||
Default: DefCacheChunkSize,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "1m",
|
||||
Help: "1MB",
|
||||
}, {
|
||||
Value: "5M",
|
||||
Help: "5 MB",
|
||||
}, {
|
||||
Value: "10M",
|
||||
Help: "10 MB",
|
||||
}},
|
||||
}, {
|
||||
Name: "chunk_total_size",
|
||||
Help: "The maximum size of stored chunks. When the storage grows beyond this size, the oldest chunks will be deleted. \nDefault: " + DefCacheTotalChunkSize,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "500M",
|
||||
Help: "500 MB",
|
||||
}, {
|
||||
Value: "1G",
|
||||
Help: "1 GB",
|
||||
}, {
|
||||
Value: "10G",
|
||||
Help: "10 GB",
|
||||
},
|
||||
},
|
||||
Optional: true,
|
||||
Name: "info_age",
|
||||
Help: "How much time should object info (file size, file hashes etc) be stored in cache.\nUse a very high value if you don't plan on changing the source FS from outside the cache.\nAccepted units are: \"s\", \"m\", \"h\".",
|
||||
Default: DefCacheInfoAge,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "1h",
|
||||
Help: "1 hour",
|
||||
}, {
|
||||
Value: "24h",
|
||||
Help: "24 hours",
|
||||
}, {
|
||||
Value: "48h",
|
||||
Help: "48 hours",
|
||||
}},
|
||||
}, {
|
||||
Name: "chunk_total_size",
|
||||
Help: "The maximum size of stored chunks. When the storage grows beyond this size, the oldest chunks will be deleted.",
|
||||
Default: DefCacheTotalChunkSize,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "500M",
|
||||
Help: "500 MB",
|
||||
}, {
|
||||
Value: "1G",
|
||||
Help: "1 GB",
|
||||
}, {
|
||||
Value: "10G",
|
||||
Help: "10 GB",
|
||||
}},
|
||||
}, {
|
||||
Name: "db_path",
|
||||
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||
Help: "Directory to cache DB",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_path",
|
||||
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||
Help: "Directory to cache chunk files",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "db_purge",
|
||||
Default: false,
|
||||
Help: "Purge the cache DB before",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_clean_interval",
|
||||
Default: DefCacheChunkCleanInterval,
|
||||
Help: "Interval at which chunk cleanup runs",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "read_retries",
|
||||
Default: DefCacheReadRetries,
|
||||
Help: "How many times to retry a read from a cache storage",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "workers",
|
||||
Default: DefCacheTotalWorkers,
|
||||
Help: "How many workers should run in parallel to download chunks",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_no_memory",
|
||||
Default: DefCacheChunkNoMemory,
|
||||
Help: "Disable the in-memory cache for storing chunks during streaming",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "rps",
|
||||
Default: int(DefCacheRps),
|
||||
Help: "Limits the number of requests per second to the source FS. -1 disables the rate limiter",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "writes",
|
||||
Default: DefCacheWrites,
|
||||
Help: "Will cache file data on writes through the FS",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "tmp_upload_path",
|
||||
Default: "",
|
||||
Help: "Directory to keep temporary files until they are uploaded to the cloud storage",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "tmp_wait_time",
|
||||
Default: DefCacheTmpWaitTime,
|
||||
Help: "How long should files be stored in local cache before being uploaded",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "db_wait_time",
|
||||
Default: DefCacheDbWaitTime,
|
||||
Help: "How long to wait for the DB to be available - 0 is unlimited",
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
PlexURL string `config:"plex_url"`
|
||||
PlexUsername string `config:"plex_username"`
|
||||
PlexPassword string `config:"plex_password"`
|
||||
PlexToken string `config:"plex_token"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
InfoAge fs.Duration `config:"info_age"`
|
||||
ChunkTotalSize fs.SizeSuffix `config:"chunk_total_size"`
|
||||
DbPath string `config:"db_path"`
|
||||
ChunkPath string `config:"chunk_path"`
|
||||
DbPurge bool `config:"db_purge"`
|
||||
ChunkCleanInterval fs.Duration `config:"chunk_clean_interval"`
|
||||
ReadRetries int `config:"read_retries"`
|
||||
TotalWorkers int `config:"workers"`
|
||||
ChunkNoMemory bool `config:"chunk_no_memory"`
|
||||
Rps int `config:"rps"`
|
||||
StoreWrites bool `config:"writes"`
|
||||
TempWritePath string `config:"tmp_upload_path"`
|
||||
TempWaitTime fs.Duration `config:"tmp_wait_time"`
|
||||
DbWaitTime fs.Duration `config:"db_wait_time"`
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
type Fs struct {
|
||||
fs.Fs
|
||||
@@ -156,21 +217,10 @@ type Fs struct {
|
||||
|
||||
name string
|
||||
root string
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
cache *Persistent
|
||||
|
||||
fileAge time.Duration
|
||||
chunkSize int64
|
||||
chunkTotalSize int64
|
||||
chunkCleanInterval time.Duration
|
||||
readRetries int
|
||||
totalWorkers int
|
||||
totalMaxWorkers int
|
||||
chunkMemory bool
|
||||
cacheWrites bool
|
||||
tempWritePath string
|
||||
tempWriteWait time.Duration
|
||||
tempFs fs.Fs
|
||||
tempFs fs.Fs
|
||||
|
||||
lastChunkCleanup time.Time
|
||||
cleanupMu sync.Mutex
|
||||
@@ -190,9 +240,19 @@ func parseRootPath(path string) (string, error) {
|
||||
}
|
||||
|
||||
// NewFs constructs a Fs from the path, container:path
|
||||
func NewFs(name, rootPath string) (fs.Fs, error) {
|
||||
remote := config.FileGet(name, "remote")
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) {
|
||||
return nil, errors.Errorf("don't set cache-total-chunk-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
|
||||
opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(opt.Remote, name+":") {
|
||||
return nil, errors.New("can't point cache remote at itself - check the value of the remote setting")
|
||||
}
|
||||
|
||||
@@ -201,7 +261,7 @@ func NewFs(name, rootPath string) (fs.Fs, error) {
|
||||
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
|
||||
}
|
||||
|
||||
remotePath := path.Join(remote, rpath)
|
||||
remotePath := path.Join(opt.Remote, rpath)
|
||||
wrappedFs, wrapErr := fs.NewFs(remotePath)
|
||||
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
|
||||
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
|
||||
@@ -212,97 +272,46 @@ func NewFs(name, rootPath string) (fs.Fs, error) {
|
||||
fsErr = fs.ErrorIsFile
|
||||
rpath = cleanPath(path.Dir(rpath))
|
||||
}
|
||||
plexURL := config.FileGet(name, "plex_url")
|
||||
plexToken := config.FileGet(name, "plex_token")
|
||||
var chunkSize fs.SizeSuffix
|
||||
chunkSizeString := config.FileGet(name, "chunk_size", DefCacheChunkSize)
|
||||
if *cacheChunkSize != DefCacheChunkSize {
|
||||
chunkSizeString = *cacheChunkSize
|
||||
}
|
||||
err = chunkSize.Set(chunkSizeString)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to understand chunk size %v", chunkSizeString)
|
||||
}
|
||||
var chunkTotalSize fs.SizeSuffix
|
||||
chunkTotalSizeString := config.FileGet(name, "chunk_total_size", DefCacheTotalChunkSize)
|
||||
if *cacheTotalChunkSize != DefCacheTotalChunkSize {
|
||||
chunkTotalSizeString = *cacheTotalChunkSize
|
||||
}
|
||||
err = chunkTotalSize.Set(chunkTotalSizeString)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to understand chunk total size %v", chunkTotalSizeString)
|
||||
}
|
||||
chunkCleanIntervalStr := *cacheChunkCleanInterval
|
||||
chunkCleanInterval, err := time.ParseDuration(chunkCleanIntervalStr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to understand duration %v", chunkCleanIntervalStr)
|
||||
}
|
||||
infoAge := config.FileGet(name, "info_age", DefCacheInfoAge)
|
||||
if *cacheInfoAge != DefCacheInfoAge {
|
||||
infoAge = *cacheInfoAge
|
||||
}
|
||||
infoDuration, err := time.ParseDuration(infoAge)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to understand duration %v", infoAge)
|
||||
}
|
||||
waitTime, err := time.ParseDuration(*cacheTempWaitTime)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to understand duration %v", *cacheTempWaitTime)
|
||||
}
|
||||
// configure cache backend
|
||||
if *cacheDbPurge {
|
||||
if opt.DbPurge {
|
||||
fs.Debugf(name, "Purging the DB")
|
||||
}
|
||||
f := &Fs{
|
||||
Fs: wrappedFs,
|
||||
name: name,
|
||||
root: rpath,
|
||||
fileAge: infoDuration,
|
||||
chunkSize: int64(chunkSize),
|
||||
chunkTotalSize: int64(chunkTotalSize),
|
||||
chunkCleanInterval: chunkCleanInterval,
|
||||
readRetries: *cacheReadRetries,
|
||||
totalWorkers: *cacheTotalWorkers,
|
||||
totalMaxWorkers: *cacheTotalWorkers,
|
||||
chunkMemory: !*cacheChunkNoMemory,
|
||||
cacheWrites: *cacheStoreWrites,
|
||||
lastChunkCleanup: time.Now().Truncate(time.Hour * 24 * 30),
|
||||
tempWritePath: *cacheTempWritePath,
|
||||
tempWriteWait: waitTime,
|
||||
cleanupChan: make(chan bool, 1),
|
||||
notifiedRemotes: make(map[string]bool),
|
||||
Fs: wrappedFs,
|
||||
name: name,
|
||||
root: rpath,
|
||||
opt: *opt,
|
||||
lastChunkCleanup: time.Now().Truncate(time.Hour * 24 * 30),
|
||||
cleanupChan: make(chan bool, 1),
|
||||
notifiedRemotes: make(map[string]bool),
|
||||
}
|
||||
if f.chunkTotalSize < (f.chunkSize * int64(f.totalWorkers)) {
|
||||
return nil, errors.Errorf("don't set cache-total-chunk-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
|
||||
f.chunkTotalSize, f.chunkSize, f.totalWorkers)
|
||||
}
|
||||
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(*cacheRps)), f.totalWorkers)
|
||||
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
|
||||
|
||||
f.plexConnector = &plexConnector{}
|
||||
if plexURL != "" {
|
||||
if plexToken != "" {
|
||||
f.plexConnector, err = newPlexConnectorWithToken(f, plexURL, plexToken)
|
||||
if opt.PlexURL != "" {
|
||||
if opt.PlexToken != "" {
|
||||
f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", plexURL)
|
||||
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
|
||||
}
|
||||
} else {
|
||||
plexUsername := config.FileGet(name, "plex_username")
|
||||
plexPassword := config.FileGet(name, "plex_password")
|
||||
if plexPassword != "" && plexUsername != "" {
|
||||
decPass, err := obscure.Reveal(plexPassword)
|
||||
if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||
decPass, err := obscure.Reveal(opt.PlexPassword)
|
||||
if err != nil {
|
||||
decPass = plexPassword
|
||||
decPass = opt.PlexPassword
|
||||
}
|
||||
f.plexConnector, err = newPlexConnector(f, plexURL, plexUsername, decPass)
|
||||
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, func(token string) {
|
||||
m.Set("plex_token", token)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", plexURL)
|
||||
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dbPath := *cacheDbPath
|
||||
chunkPath := *cacheChunkPath
|
||||
dbPath := f.opt.DbPath
|
||||
chunkPath := f.opt.ChunkPath
|
||||
// if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath
|
||||
if dbPath != filepath.Join(config.CacheDir, "cache-backend") &&
|
||||
chunkPath == filepath.Join(config.CacheDir, "cache-backend") {
|
||||
@@ -328,7 +337,8 @@ func NewFs(name, rootPath string) (fs.Fs, error) {
|
||||
fs.Infof(name, "Cache DB path: %v", dbPath)
|
||||
fs.Infof(name, "Cache chunk path: %v", chunkPath)
|
||||
f.cache, err = GetPersistent(dbPath, chunkPath, &Features{
|
||||
PurgeDb: *cacheDbPurge,
|
||||
PurgeDb: opt.DbPurge,
|
||||
DbWaitTime: time.Duration(opt.DbWaitTime),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to start cache db")
|
||||
@@ -337,6 +347,9 @@ func NewFs(name, rootPath string) (fs.Fs, error) {
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, syscall.SIGHUP)
|
||||
atexit.Register(func() {
|
||||
if opt.PlexURL != "" {
|
||||
f.plexConnector.closeWebsocket()
|
||||
}
|
||||
f.StopBackgroundRunners()
|
||||
})
|
||||
go func() {
|
||||
@@ -349,35 +362,35 @@ func NewFs(name, rootPath string) (fs.Fs, error) {
|
||||
}
|
||||
}()
|
||||
|
||||
fs.Infof(name, "Chunk Memory: %v", f.chunkMemory)
|
||||
fs.Infof(name, "Chunk Size: %v", fs.SizeSuffix(f.chunkSize))
|
||||
fs.Infof(name, "Chunk Total Size: %v", fs.SizeSuffix(f.chunkTotalSize))
|
||||
fs.Infof(name, "Chunk Clean Interval: %v", f.chunkCleanInterval.String())
|
||||
fs.Infof(name, "Workers: %v", f.totalWorkers)
|
||||
fs.Infof(name, "File Age: %v", f.fileAge.String())
|
||||
if f.cacheWrites {
|
||||
fs.Infof(name, "Chunk Memory: %v", !f.opt.ChunkNoMemory)
|
||||
fs.Infof(name, "Chunk Size: %v", f.opt.ChunkSize)
|
||||
fs.Infof(name, "Chunk Total Size: %v", f.opt.ChunkTotalSize)
|
||||
fs.Infof(name, "Chunk Clean Interval: %v", f.opt.ChunkCleanInterval)
|
||||
fs.Infof(name, "Workers: %v", f.opt.TotalWorkers)
|
||||
fs.Infof(name, "File Age: %v", f.opt.InfoAge)
|
||||
if !f.opt.StoreWrites {
|
||||
fs.Infof(name, "Cache Writes: enabled")
|
||||
}
|
||||
|
||||
if f.tempWritePath != "" {
|
||||
err = os.MkdirAll(f.tempWritePath, os.ModePerm)
|
||||
if f.opt.TempWritePath != "" {
|
||||
err = os.MkdirAll(f.opt.TempWritePath, os.ModePerm)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.tempWritePath)
|
||||
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
|
||||
}
|
||||
f.tempWritePath = filepath.ToSlash(f.tempWritePath)
|
||||
f.tempFs, err = fs.NewFs(f.tempWritePath)
|
||||
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
|
||||
f.tempFs, err = fs.NewFs(f.opt.TempWritePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
|
||||
}
|
||||
fs.Infof(name, "Upload Temp Rest Time: %v", f.tempWriteWait.String())
|
||||
fs.Infof(name, "Upload Temp FS: %v", f.tempWritePath)
|
||||
fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime)
|
||||
fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath)
|
||||
f.backgroundRunner, _ = initBackgroundUploader(f)
|
||||
go f.backgroundRunner.run()
|
||||
}
|
||||
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(f.chunkCleanInterval)
|
||||
time.Sleep(time.Duration(f.opt.ChunkCleanInterval))
|
||||
select {
|
||||
case <-f.cleanupChan:
|
||||
fs.Infof(f, "stopping cleanup")
|
||||
@@ -390,7 +403,7 @@ func NewFs(name, rootPath string) (fs.Fs, error) {
|
||||
}()
|
||||
|
||||
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
|
||||
doChangeNotify(f.receiveChangeNotify, f.chunkCleanInterval)
|
||||
doChangeNotify(f.receiveChangeNotify, time.Duration(f.opt.ChunkCleanInterval))
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
@@ -399,7 +412,7 @@ func NewFs(name, rootPath string) (fs.Fs, error) {
|
||||
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
|
||||
// override only those features that use a temp fs and it doesn't support them
|
||||
//f.features.ChangeNotify = f.ChangeNotify
|
||||
if f.tempWritePath != "" {
|
||||
if f.opt.TempWritePath != "" {
|
||||
if f.tempFs.Features().Copy == nil {
|
||||
f.features.Copy = nil
|
||||
}
|
||||
@@ -428,12 +441,37 @@ Purge a remote from the cache backend. Supports either a directory or a file.
|
||||
Params:
|
||||
- remote = path to remote (required)
|
||||
- withData = true/false to delete cached data (chunks) as well (optional)
|
||||
|
||||
Eg
|
||||
|
||||
rclone rc cache/expire remote=path/to/sub/folder/
|
||||
rclone rc cache/expire remote=/ withData=true
|
||||
`,
|
||||
})
|
||||
|
||||
rc.Add(rc.Call{
|
||||
Path: "cache/stats",
|
||||
Fn: f.httpStats,
|
||||
Title: "Get cache stats",
|
||||
Help: `
|
||||
Show statistics for the cache remote.
|
||||
`,
|
||||
})
|
||||
|
||||
return f, fsErr
|
||||
}
|
||||
|
||||
func (f *Fs) httpStats(in rc.Params) (out rc.Params, err error) {
|
||||
out = make(rc.Params)
|
||||
m, err := f.Stats()
|
||||
if err != nil {
|
||||
return out, errors.Errorf("error while getting cache stats")
|
||||
}
|
||||
out["status"] = "ok"
|
||||
out["stats"] = m
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
|
||||
out = make(rc.Params)
|
||||
remoteInt, ok := in["remote"]
|
||||
@@ -447,18 +485,20 @@ func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
|
||||
withData = true
|
||||
}
|
||||
|
||||
// if it's wrapped by crypt we need to check what format we got
|
||||
if cryptFs, yes := f.isWrappedByCrypt(); yes {
|
||||
_, err := cryptFs.DecryptFileName(remote)
|
||||
// if it failed to decrypt then it is a decrypted format and we need to encrypt it
|
||||
if err != nil {
|
||||
remote = cryptFs.EncryptFileName(remote)
|
||||
if cleanPath(remote) != "" {
|
||||
// if it's wrapped by crypt we need to check what format we got
|
||||
if cryptFs, yes := f.isWrappedByCrypt(); yes {
|
||||
_, err := cryptFs.DecryptFileName(remote)
|
||||
// if it failed to decrypt then it is a decrypted format and we need to encrypt it
|
||||
if err != nil {
|
||||
remote = cryptFs.EncryptFileName(remote)
|
||||
}
|
||||
// else it's an encrypted format and we can use it as it is
|
||||
}
|
||||
// else it's an encrypted format and we can use it as it is
|
||||
}
|
||||
|
||||
if !f.cache.HasEntry(path.Join(f.Root(), remote)) {
|
||||
return out, errors.Errorf("%s doesn't exist in cache", remote)
|
||||
if !f.cache.HasEntry(path.Join(f.Root(), remote)) {
|
||||
return out, errors.Errorf("%s doesn't exist in cache", remote)
|
||||
}
|
||||
}
|
||||
|
||||
co := NewObject(f, remote)
|
||||
@@ -476,17 +516,12 @@ func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
|
||||
return out, nil
|
||||
}
|
||||
// expire the entry
|
||||
co.CacheTs = time.Now().Add(f.fileAge * -1)
|
||||
err = f.cache.AddObject(co)
|
||||
err = f.cache.ExpireObject(co, withData)
|
||||
if err != nil {
|
||||
return out, errors.WithMessage(err, "error expiring file")
|
||||
}
|
||||
// notify vfs too
|
||||
f.notifyChangeUpstream(co.Remote(), fs.EntryObject)
|
||||
if withData {
|
||||
// safe to ignore as the file might not have been open
|
||||
_ = os.RemoveAll(path.Join(f.cache.dataPath, co.abs()))
|
||||
}
|
||||
|
||||
out["status"] = "ok"
|
||||
out["message"] = fmt.Sprintf("cached file cleared: %v", remote)
|
||||
@@ -495,7 +530,16 @@ func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
|
||||
|
||||
// receiveChangeNotify is a wrapper to notifications sent from the wrapped FS about changed files
|
||||
func (f *Fs) receiveChangeNotify(forgetPath string, entryType fs.EntryType) {
|
||||
fs.Debugf(f, "notify: expiring cache for '%v'", forgetPath)
|
||||
if crypt, yes := f.isWrappedByCrypt(); yes {
|
||||
decryptedPath, err := crypt.DecryptFileName(forgetPath)
|
||||
if err == nil {
|
||||
fs.Infof(decryptedPath, "received cache expiry notification")
|
||||
} else {
|
||||
fs.Infof(forgetPath, "received cache expiry notification")
|
||||
}
|
||||
} else {
|
||||
fs.Infof(forgetPath, "received cache expiry notification")
|
||||
}
|
||||
// notify upstreams too (vfs)
|
||||
f.notifyChangeUpstream(forgetPath, entryType)
|
||||
|
||||
@@ -504,27 +548,22 @@ func (f *Fs) receiveChangeNotify(forgetPath string, entryType fs.EntryType) {
|
||||
co := NewObject(f, forgetPath)
|
||||
err := f.cache.GetObject(co)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "ignoring change notification for non cached entry %v", co)
|
||||
return
|
||||
fs.Debugf(f, "got change notification for non cached entry %v", co)
|
||||
}
|
||||
// expire the entry
|
||||
co.CacheTs = time.Now().Add(f.fileAge * -1)
|
||||
err = f.cache.AddObject(co)
|
||||
err = f.cache.ExpireObject(co, true)
|
||||
if err != nil {
|
||||
fs.Errorf(forgetPath, "notify: error expiring '%v': %v", co, err)
|
||||
} else {
|
||||
fs.Debugf(forgetPath, "notify: expired %v", co)
|
||||
fs.Debugf(forgetPath, "notify: error expiring '%v': %v", co, err)
|
||||
}
|
||||
cd = NewDirectory(f, cleanPath(path.Dir(co.Remote())))
|
||||
} else {
|
||||
cd = NewDirectory(f, forgetPath)
|
||||
// we expire the dir
|
||||
err := f.cache.ExpireDir(cd)
|
||||
if err != nil {
|
||||
fs.Errorf(forgetPath, "notify: error expiring '%v': %v", cd, err)
|
||||
} else {
|
||||
fs.Debugf(forgetPath, "notify: expired '%v'", cd)
|
||||
}
|
||||
}
|
||||
// we expire the dir
|
||||
err := f.cache.ExpireDir(cd)
|
||||
if err != nil {
|
||||
fs.Debugf(forgetPath, "notify: error expiring '%v': %v", cd, err)
|
||||
} else {
|
||||
fs.Debugf(forgetPath, "notify: expired '%v'", cd)
|
||||
}
|
||||
|
||||
f.notifiedMu.Lock()
|
||||
@@ -536,7 +575,7 @@ func (f *Fs) receiveChangeNotify(forgetPath string, entryType fs.EntryType) {
|
||||
// notifyChangeUpstreamIfNeeded will check if the wrapped remote doesn't notify on changes
|
||||
// or if we use a temp fs
|
||||
func (f *Fs) notifyChangeUpstreamIfNeeded(remote string, entryType fs.EntryType) {
|
||||
if f.Fs.Features().ChangeNotify == nil || f.tempWritePath != "" {
|
||||
if f.Fs.Features().ChangeNotify == nil || f.opt.TempWritePath != "" {
|
||||
f.notifyChangeUpstream(remote, entryType)
|
||||
}
|
||||
}
|
||||
@@ -586,17 +625,17 @@ func (f *Fs) String() string {
|
||||
|
||||
// ChunkSize returns the configured chunk size
|
||||
func (f *Fs) ChunkSize() int64 {
|
||||
return f.chunkSize
|
||||
return int64(f.opt.ChunkSize)
|
||||
}
|
||||
|
||||
// InfoAge returns the configured file age
|
||||
func (f *Fs) InfoAge() time.Duration {
|
||||
return f.fileAge
|
||||
return time.Duration(f.opt.InfoAge)
|
||||
}
|
||||
|
||||
// TempUploadWaitTime returns the configured temp file upload wait time
|
||||
func (f *Fs) TempUploadWaitTime() time.Duration {
|
||||
return f.tempWriteWait
|
||||
return time.Duration(f.opt.TempWaitTime)
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote.
|
||||
@@ -609,17 +648,16 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
err = f.cache.GetObject(co)
|
||||
if err != nil {
|
||||
fs.Debugf(remote, "find: error: %v", err)
|
||||
} else if time.Now().After(co.CacheTs.Add(f.fileAge)) {
|
||||
} else if time.Now().After(co.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
|
||||
fs.Debugf(co, "find: cold object: %+v", co)
|
||||
} else {
|
||||
fs.Debugf(co, "find: warm object: %v, expiring on: %v", co, co.CacheTs.Add(f.fileAge))
|
||||
fs.Debugf(co, "find: warm object: %v, expiring on: %v", co, co.CacheTs.Add(time.Duration(f.opt.InfoAge)))
|
||||
return co, nil
|
||||
}
|
||||
|
||||
// search for entry in source or temp fs
|
||||
var obj fs.Object
|
||||
err = nil
|
||||
if f.tempWritePath != "" {
|
||||
if f.opt.TempWritePath != "" {
|
||||
obj, err = f.tempFs.NewObject(remote)
|
||||
// not found in temp fs
|
||||
if err != nil {
|
||||
@@ -653,13 +691,13 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
entries, err = f.cache.GetDirEntries(cd)
|
||||
if err != nil {
|
||||
fs.Debugf(dir, "list: error: %v", err)
|
||||
} else if time.Now().After(cd.CacheTs.Add(f.fileAge)) {
|
||||
} else if time.Now().After(cd.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
|
||||
fs.Debugf(dir, "list: cold listing: %v", cd.CacheTs)
|
||||
} else if len(entries) == 0 {
|
||||
// TODO: read empty dirs from source?
|
||||
fs.Debugf(dir, "list: empty listing")
|
||||
} else {
|
||||
fs.Debugf(dir, "list: warm %v from cache for: %v, expiring on: %v", len(entries), cd.abs(), cd.CacheTs.Add(f.fileAge))
|
||||
fs.Debugf(dir, "list: warm %v from cache for: %v, expiring on: %v", len(entries), cd.abs(), cd.CacheTs.Add(time.Duration(f.opt.InfoAge)))
|
||||
fs.Debugf(dir, "list: cached entries: %v", entries)
|
||||
return entries, nil
|
||||
}
|
||||
@@ -667,7 +705,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
|
||||
// we first search any temporary files stored locally
|
||||
var cachedEntries fs.DirEntries
|
||||
if f.tempWritePath != "" {
|
||||
if f.opt.TempWritePath != "" {
|
||||
queuedEntries, err := f.cache.searchPendingUploadFromDir(cd.abs())
|
||||
if err != nil {
|
||||
fs.Errorf(dir, "list: error getting pending uploads: %v", err)
|
||||
@@ -697,6 +735,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
fs.Debugf(dir, "list: source entries: %v", entries)
|
||||
|
||||
// and then iterate over the ones from source (temp Objects will override source ones)
|
||||
var batchDirectories []*Directory
|
||||
for _, entry := range entries {
|
||||
switch o := entry.(type) {
|
||||
case fs.Object:
|
||||
@@ -717,19 +756,20 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
case fs.Directory:
|
||||
cdd := DirectoryFromOriginal(f, o)
|
||||
// check if the dir isn't expired and add it in cache if it isn't
|
||||
if cdd2, err := f.cache.GetDir(cdd.abs()); err != nil || time.Now().Before(cdd2.CacheTs.Add(f.fileAge)) {
|
||||
err := f.cache.AddDir(cdd)
|
||||
if err != nil {
|
||||
fs.Errorf(dir, "list: error caching dir from listing %v", o)
|
||||
} else {
|
||||
fs.Debugf(dir, "list: cached dir: %v", cdd)
|
||||
}
|
||||
if cdd2, err := f.cache.GetDir(cdd.abs()); err != nil || time.Now().Before(cdd2.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
|
||||
batchDirectories = append(batchDirectories, cdd)
|
||||
}
|
||||
cachedEntries = append(cachedEntries, cdd)
|
||||
default:
|
||||
fs.Debugf(entry, "list: Unknown object type %T", entry)
|
||||
}
|
||||
}
|
||||
err = f.cache.AddBatchDir(batchDirectories)
|
||||
if err != nil {
|
||||
fs.Errorf(dir, "list: error caching directories from listing %v", dir)
|
||||
} else {
|
||||
fs.Debugf(dir, "list: cached directories: %v", len(batchDirectories))
|
||||
}
|
||||
|
||||
// cache dir meta
|
||||
t := time.Now()
|
||||
@@ -839,7 +879,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
fs.Debugf(f, "rmdir '%s'", dir)
|
||||
|
||||
if f.tempWritePath != "" {
|
||||
if f.opt.TempWritePath != "" {
|
||||
// pause background uploads
|
||||
f.backgroundRunner.pause()
|
||||
defer f.backgroundRunner.play()
|
||||
@@ -924,7 +964,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
if f.tempWritePath != "" {
|
||||
if f.opt.TempWritePath != "" {
|
||||
// pause background uploads
|
||||
f.backgroundRunner.pause()
|
||||
defer f.backgroundRunner.play()
|
||||
@@ -1051,7 +1091,7 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
|
||||
go func() {
|
||||
var offset int64
|
||||
for {
|
||||
chunk := make([]byte, f.chunkSize)
|
||||
chunk := make([]byte, f.opt.ChunkSize)
|
||||
readSize, err := io.ReadFull(pr, chunk)
|
||||
// we ignore 3 failures which are ok:
|
||||
// 1. EOF - original reading finished and we got a full buffer too
|
||||
@@ -1099,7 +1139,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
||||
var obj fs.Object
|
||||
|
||||
// queue for upload and store in temp fs if configured
|
||||
if f.tempWritePath != "" {
|
||||
if f.opt.TempWritePath != "" {
|
||||
// we need to clear the caches before a put through temp fs
|
||||
parentCd := NewDirectory(f, cleanPath(path.Dir(src.Remote())))
|
||||
_ = f.cache.ExpireDir(parentCd)
|
||||
@@ -1118,7 +1158,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
||||
}
|
||||
fs.Infof(obj, "put: queued for upload")
|
||||
// if cache writes is enabled write it first through cache
|
||||
} else if f.cacheWrites {
|
||||
} else if f.opt.StoreWrites {
|
||||
f.cacheReader(in, src, func(inn io.Reader) {
|
||||
obj, err = put(inn, src, options...)
|
||||
})
|
||||
@@ -1139,8 +1179,14 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
||||
}
|
||||
|
||||
// cache the new file
|
||||
cachedObj := ObjectFromOriginal(f, obj).persist()
|
||||
cachedObj := ObjectFromOriginal(f, obj)
|
||||
|
||||
// deleting cached chunks and info to be replaced with new ones
|
||||
_ = f.cache.RemoveObject(cachedObj.abs())
|
||||
|
||||
cachedObj.persist()
|
||||
fs.Debugf(cachedObj, "put: added to cache")
|
||||
|
||||
// expire parent
|
||||
parentCd := NewDirectory(f, cleanPath(path.Dir(cachedObj.Remote())))
|
||||
err = f.cache.ExpireDir(parentCd)
|
||||
@@ -1209,7 +1255,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
|
||||
if srcObj.isTempFile() {
|
||||
// we check if the feature is stil active
|
||||
if f.tempWritePath == "" {
|
||||
if f.opt.TempWritePath == "" {
|
||||
fs.Errorf(srcObj, "can't copy - this is a local cached file but this feature is turned off this run")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
@@ -1285,7 +1331,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
// if this is a temp object then we perform the changes locally
|
||||
if srcObj.isTempFile() {
|
||||
// we check if the feature is stil active
|
||||
if f.tempWritePath == "" {
|
||||
if f.opt.TempWritePath == "" {
|
||||
fs.Errorf(srcObj, "can't move - this is a local cached file but this feature is turned off this run")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
@@ -1389,6 +1435,15 @@ func (f *Fs) CleanUp() error {
|
||||
return do()
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
do := f.Fs.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("About not supported")
|
||||
}
|
||||
return do()
|
||||
}
|
||||
|
||||
// Stats returns stats about the cache storage
|
||||
func (f *Fs) Stats() (map[string]map[string]interface{}, error) {
|
||||
return f.cache.Stats()
|
||||
@@ -1417,8 +1472,8 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
|
||||
f.cleanupMu.Lock()
|
||||
defer f.cleanupMu.Unlock()
|
||||
|
||||
if ignoreLastTs || time.Now().After(f.lastChunkCleanup.Add(f.chunkCleanInterval)) {
|
||||
f.cache.CleanChunksBySize(f.chunkTotalSize)
|
||||
if ignoreLastTs || time.Now().After(f.lastChunkCleanup.Add(time.Duration(f.opt.ChunkCleanInterval))) {
|
||||
f.cache.CleanChunksBySize(int64(f.opt.ChunkTotalSize))
|
||||
f.lastChunkCleanup = time.Now()
|
||||
}
|
||||
}
|
||||
@@ -1427,7 +1482,7 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
|
||||
// can be triggered from a terminate signal or from testing between runs
|
||||
func (f *Fs) StopBackgroundRunners() {
|
||||
f.cleanupChan <- false
|
||||
if f.tempWritePath != "" && f.backgroundRunner != nil && f.backgroundRunner.isRunning() {
|
||||
if f.opt.TempWritePath != "" && f.backgroundRunner != nil && f.backgroundRunner.isRunning() {
|
||||
f.backgroundRunner.close()
|
||||
}
|
||||
f.cache.Close()
|
||||
@@ -1485,7 +1540,7 @@ func (f *Fs) DirCacheFlush() {
|
||||
// GetBackgroundUploadChannel returns a channel that can be listened to for remote activities that happen
|
||||
// in the background
|
||||
func (f *Fs) GetBackgroundUploadChannel() chan BackgroundUploadState {
|
||||
if f.tempWritePath != "" {
|
||||
if f.opt.TempWritePath != "" {
|
||||
return f.backgroundRunner.notifyCh
|
||||
}
|
||||
return nil
|
||||
@@ -1527,4 +1582,5 @@ var (
|
||||
_ fs.Wrapper = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
)
|
||||
|
||||
740
backend/cache/cache_internal_test.go
vendored
740
backend/cache/cache_internal_test.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,go1.7
|
||||
// +build !plan9
|
||||
|
||||
package cache_test
|
||||
|
||||
@@ -33,13 +33,13 @@ import (
|
||||
"github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/object"
|
||||
"github.com/ncw/rclone/fs/rc"
|
||||
"github.com/ncw/rclone/fs/rc/rcflags"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"github.com/ncw/rclone/vfs/vfsflags"
|
||||
flag "github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -50,6 +50,7 @@ const (
|
||||
cryptedTextBase64 = "UkNMT05FAAC320i2xIee0BiNyknSPBn+Qcw3q9FhIFp3tvq6qlqvbsno3PnxmEFeJG3jDBnR/wku2gHWeQ==" // one content
|
||||
cryptedText2Base64 = "UkNMT05FAAATcQkVsgjBh8KafCKcr0wdTa1fMmV0U8hsCLGFoqcvxKVmvv7wx3Hf5EXxFcki2FFV4sdpmSrb9Q==" // updated content
|
||||
cryptedText3Base64 = "UkNMT05FAAB/f7YtYKbPfmk9+OX/ffN3qG3OEdWT+z74kxCX9V/YZwJ4X2DN3HOnUC3gKQ4Gcoud5UtNvQ==" // test content
|
||||
letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -132,13 +133,14 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
||||
require.Len(t, listInner, 1)
|
||||
}
|
||||
|
||||
/* TODO: is this testing something?
|
||||
func TestInternalVfsCache(t *testing.T) {
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 30
|
||||
testSize := int64(524288000)
|
||||
|
||||
vfsflags.Opt.CacheMode = vfs.CacheModeWrites
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"cache-writes": "true", "cache-info-age": "1h"})
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("test")
|
||||
@@ -229,6 +231,7 @@ func TestInternalVfsCache(t *testing.T) {
|
||||
cacheCh <- true
|
||||
readCh <- true
|
||||
}
|
||||
*/
|
||||
|
||||
func TestInternalObjWrapFsFound(t *testing.T) {
|
||||
id := fmt.Sprintf("tiowff%v", time.Now().Unix())
|
||||
@@ -308,7 +311,7 @@ func TestInternalCachedWrittenContentMatches(t *testing.T) {
|
||||
chunkSize := cfs.ChunkSize()
|
||||
|
||||
// create some rand test data
|
||||
testData := runInstance.randomBytes(t, chunkSize*4+chunkSize/2)
|
||||
testData := randStringBytes(int(chunkSize*4 + chunkSize/2))
|
||||
|
||||
// write the object
|
||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||
@@ -385,27 +388,19 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
|
||||
// create some rand test data
|
||||
testSize := chunkSize*4 + chunkSize/2
|
||||
testData := runInstance.randomBytes(t, testSize)
|
||||
testData := randStringBytes(int(testSize))
|
||||
|
||||
// write the object
|
||||
o := runInstance.writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
|
||||
require.Equal(t, o.Size(), int64(testSize))
|
||||
time.Sleep(time.Second * 3)
|
||||
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, int64(testSize), false)
|
||||
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, int64(testSize), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(len(data2)), o.Size())
|
||||
|
||||
// check sample of data from in-file
|
||||
sampleStart := chunkSize / 2
|
||||
sampleEnd := chunkSize
|
||||
testSample := testData[sampleStart:sampleEnd]
|
||||
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", sampleStart, sampleEnd, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(checkSample), len(testSample))
|
||||
require.Equal(t, int64(len(checkSample)), o.Size())
|
||||
|
||||
for i := 0; i < len(checkSample); i++ {
|
||||
require.Equal(t, testSample[i], checkSample[i])
|
||||
require.Equal(t, testData[i], checkSample[i])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -424,7 +419,7 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||
|
||||
// create some rand test data
|
||||
testSize := chunkSize*10 + chunkSize/2
|
||||
testData := runInstance.randomBytes(t, testSize)
|
||||
testData := randStringBytes(int(testSize))
|
||||
|
||||
// write the object
|
||||
runInstance.writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
|
||||
@@ -447,7 +442,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
chunkSize := cfs.ChunkSize()
|
||||
|
||||
// create some rand test data
|
||||
testData := runInstance.randomBytes(t, (chunkSize*4 + chunkSize/2))
|
||||
testData := randStringBytes(int(chunkSize*4 + chunkSize/2))
|
||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||
|
||||
// update in the wrapped fs
|
||||
@@ -579,6 +574,93 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||
id := fmt.Sprintf("tincep%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
if !runInstance.wrappedIsExternal {
|
||||
t.Skipf("Not external")
|
||||
}
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
|
||||
srcName := runInstance.encryptRemoteIfNeeded(t, "test") + "/" + runInstance.encryptRemoteIfNeeded(t, "one") + "/" + runInstance.encryptRemoteIfNeeded(t, "test")
|
||||
dstName := runInstance.encryptRemoteIfNeeded(t, "test") + "/" + runInstance.encryptRemoteIfNeeded(t, "one") + "/" + runInstance.encryptRemoteIfNeeded(t, "test2")
|
||||
// create some rand test data
|
||||
var testData []byte
|
||||
if runInstance.rootIsCrypt {
|
||||
testData, err = base64.StdEncoding.DecodeString(cryptedTextBase64)
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
testData = []byte("test content")
|
||||
}
|
||||
err = rootFs.Mkdir("test")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("test/one")
|
||||
require.NoError(t, err)
|
||||
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
|
||||
|
||||
// list in mount
|
||||
_, err = runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
_, err = runInstance.list(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
|
||||
found := boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||
require.True(t, found)
|
||||
boltDb.Purge()
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||
require.False(t, found)
|
||||
|
||||
// move file
|
||||
_, err = cfs.UnWrap().Features().Move(srcObj, dstName)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = runInstance.retryBlock(func() error {
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||
if !found {
|
||||
log.Printf("not found /test")
|
||||
return errors.Errorf("not found /test")
|
||||
}
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
if !found {
|
||||
log.Printf("not found /test/one")
|
||||
return errors.Errorf("not found /test/one")
|
||||
}
|
||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
|
||||
if !found {
|
||||
log.Printf("not found /test/one/test2")
|
||||
return errors.Errorf("not found /test/one/test2")
|
||||
}
|
||||
li, err := runInstance.list(t, rootFs, "test/one")
|
||||
if err != nil {
|
||||
log.Printf("err: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(li) != 1 {
|
||||
log.Printf("not expected listing /test/one: %v", li)
|
||||
return errors.Errorf("not expected listing /test/one: %v", li)
|
||||
}
|
||||
if fi, ok := li[0].(os.FileInfo); ok {
|
||||
if fi.Name() != "test2" {
|
||||
log.Printf("not expected name: %v", fi.Name())
|
||||
return errors.Errorf("not expected name: %v", fi.Name())
|
||||
}
|
||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||
if di.Remote() != "test/one/test2" {
|
||||
log.Printf("not expected remote: %v", di.Remote())
|
||||
return errors.Errorf("not expected remote: %v", di.Remote())
|
||||
}
|
||||
} else {
|
||||
log.Printf("unexpected listing: %v", li)
|
||||
return errors.Errorf("unexpected listing: %v", li)
|
||||
}
|
||||
log.Printf("complete listing /test/one/test2")
|
||||
return nil
|
||||
}, 12, time.Second*10)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
||||
id := fmt.Sprintf("ticsadcf%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
@@ -589,7 +671,7 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
||||
chunkSize := cfs.ChunkSize()
|
||||
|
||||
// create some rand test data
|
||||
testData := runInstance.randomBytes(t, (chunkSize*4 + chunkSize/2))
|
||||
testData := randStringBytes(int(chunkSize*4 + chunkSize/2))
|
||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||
|
||||
// update in the wrapped fs
|
||||
@@ -617,19 +699,22 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
rc.Start(&rcflags.Opt)
|
||||
|
||||
id := fmt.Sprintf("ticsarc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"rc": "true"})
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
if !runInstance.useMount {
|
||||
t.Skipf("needs mount")
|
||||
}
|
||||
if !runInstance.wrappedIsExternal {
|
||||
t.Skipf("needs drive")
|
||||
}
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
chunkSize := cfs.ChunkSize()
|
||||
|
||||
// create some rand test data
|
||||
testData := runInstance.randomBytes(t, (chunkSize*4 + chunkSize/2))
|
||||
testData := randStringBytes(int(chunkSize*4 + chunkSize/2))
|
||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||
|
||||
// update in the wrapped fs
|
||||
@@ -660,11 +745,36 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
co, err = rootFs.NewObject("data.bin")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
|
||||
li1, err := runInstance.list(t, rootFs, "")
|
||||
|
||||
// create some rand test data
|
||||
testData2 := randStringBytes(int(chunkSize))
|
||||
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test2"), testData2)
|
||||
|
||||
// list should have 1 item only
|
||||
li1, err = runInstance.list(t, rootFs, "")
|
||||
require.Len(t, li1, 1)
|
||||
|
||||
m = make(map[string]string)
|
||||
res2, err := http.Post("http://localhost:5572/cache/expire?remote=/", "application/json; charset=utf-8", strings.NewReader(""))
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = res2.Body.Close()
|
||||
}()
|
||||
_ = json.NewDecoder(res2.Body).Decode(&m)
|
||||
require.Contains(t, m, "status")
|
||||
require.Contains(t, m, "message")
|
||||
require.Equal(t, "ok", m["status"])
|
||||
require.Contains(t, m["message"], "cached directory cleared")
|
||||
|
||||
// list should have 2 items now
|
||||
li2, err := runInstance.list(t, rootFs, "")
|
||||
require.Len(t, li2, 2)
|
||||
}
|
||||
|
||||
func TestInternalCacheWrites(t *testing.T) {
|
||||
id := "ticw"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"cache-writes": "true"})
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
@@ -673,7 +783,7 @@ func TestInternalCacheWrites(t *testing.T) {
|
||||
|
||||
// create some rand test data
|
||||
earliestTime := time.Now()
|
||||
testData := runInstance.randomBytes(t, (chunkSize*4 + chunkSize/2))
|
||||
testData := randStringBytes(int(chunkSize*4 + chunkSize/2))
|
||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||
expectedTs := time.Now()
|
||||
ts, err := boltDb.GetChunkTs(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "data.bin")), 0)
|
||||
@@ -683,7 +793,7 @@ func TestInternalCacheWrites(t *testing.T) {
|
||||
|
||||
func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"cache-workers": "1"})
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
@@ -692,7 +802,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
totalChunks := 20
|
||||
|
||||
// create some rand test data
|
||||
testData := runInstance.randomBytes(t, (int64(totalChunks-1)*chunkSize + chunkSize/2))
|
||||
testData := randStringBytes(int(int64(totalChunks-1)*chunkSize + chunkSize/2))
|
||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||
o, err := cfs.NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||
require.NoError(t, err)
|
||||
@@ -758,7 +868,7 @@ func TestInternalBug2117(t *testing.T) {
|
||||
|
||||
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil,
|
||||
map[string]string{"cache-info-age": "72h", "cache-chunk-clean-interval": "15m"})
|
||||
map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
if runInstance.rootIsCrypt {
|
||||
@@ -805,465 +915,10 @@ func TestInternalBug2117(t *testing.T) {
|
||||
require.Len(t, di, 4)
|
||||
}
|
||||
|
||||
func TestInternalUploadTempDirCreated(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id)})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltDb *cache.Persistent) {
|
||||
// create some rand test data
|
||||
testSize := int64(524288000)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
bu := runInstance.listenForBackgroundUpload(t, rootFs, "one")
|
||||
runInstance.writeRemoteReader(t, rootFs, "one", testReader)
|
||||
// validate that it exists in temp fs
|
||||
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
if runInstance.rootIsCrypt {
|
||||
require.Equal(t, int64(524416032), ti.Size())
|
||||
} else {
|
||||
require.Equal(t, testSize, ti.Size())
|
||||
}
|
||||
de1, err := runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
runInstance.completeBackgroundUpload(t, "one", bu)
|
||||
// check if it was removed from temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
// check if it can be read
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "one", 0, int64(1024), false)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, data2, 1024)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "0s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1m"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
|
||||
func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "3s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("one")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("one/test")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("second")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
testSize := int64(10485760)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
|
||||
|
||||
de1, err := runInstance.list(t, rootFs, "one/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
time.Sleep(time.Second * 5)
|
||||
_ = os.Remove(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = runInstance.dirMove(t, rootFs, "one/test", "second/test")
|
||||
require.NoError(t, err)
|
||||
|
||||
// check if it can be read
|
||||
de1, err = runInstance.list(t, rootFs, "second/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("test")
|
||||
require.NoError(t, err)
|
||||
minSize := 5242880
|
||||
maxSize := 10485760
|
||||
totalFiles := 10
|
||||
rand.Seed(time.Now().Unix())
|
||||
|
||||
lastFile := ""
|
||||
for i := 0; i < totalFiles; i++ {
|
||||
size := int64(rand.Intn(maxSize-minSize) + minSize)
|
||||
testReader := runInstance.randomReader(t, size)
|
||||
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
|
||||
|
||||
// validate that it exists in temp fs
|
||||
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, remote)))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, size, runInstance.cleanSize(t, ti.Size()))
|
||||
|
||||
if runInstance.wrappedIsExternal && i < totalFiles-1 {
|
||||
time.Sleep(time.Second * 3)
|
||||
}
|
||||
lastFile = remote
|
||||
}
|
||||
|
||||
// check if cache lists all files, likely temp upload didn't finish yet
|
||||
de1, err := runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, totalFiles)
|
||||
|
||||
// wait for background uploader to do its thing
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, lastFile)
|
||||
|
||||
// retry until we have no more temp files and fail if they don't go down to 0
|
||||
tf, err := ioutil.ReadDir(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, tf, 0)
|
||||
|
||||
// check if cache lists all files
|
||||
de1, err = runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, totalFiles)
|
||||
}
|
||||
|
||||
func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
id := "tiutfo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
// create some rand test data
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// check if it can be read
|
||||
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data1)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test DirMove - allowed
|
||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("second/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
|
||||
require.NoError(t, err)
|
||||
started, err := boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
|
||||
require.Error(t, err)
|
||||
started, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "second/one")))
|
||||
require.NoError(t, err)
|
||||
require.False(t, started)
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
}
|
||||
|
||||
// test Rmdir - allowed
|
||||
err = runInstance.rm(t, rootFs, "test")
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "directory not empty")
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
started, err := boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
|
||||
require.False(t, started)
|
||||
require.NoError(t, err)
|
||||
|
||||
// test Move/Rename -- allowed
|
||||
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
// try to read from it
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/second")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
|
||||
require.NoError(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
}
|
||||
|
||||
// test Copy -- allowed
|
||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/third")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// test Remove -- allowed
|
||||
err = runInstance.rm(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// test Update -- allowed
|
||||
firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
|
||||
require.NoError(t, err)
|
||||
obj2, err := rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
|
||||
require.Equal(t, "one content updated", string(data2))
|
||||
tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
if runInstance.rootIsCrypt {
|
||||
require.Equal(t, int64(67), tmpInfo.Size())
|
||||
} else {
|
||||
require.Equal(t, int64(len(data2)), tmpInfo.Size())
|
||||
}
|
||||
|
||||
// test SetModTime -- allowed
|
||||
secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, secondModTime, firstModTime)
|
||||
require.NotEqual(t, time.Time{}, firstModTime)
|
||||
require.NotEqual(t, time.Time{}, secondModTime)
|
||||
}
|
||||
|
||||
func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
// create some rand test data
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// check if it can be read
|
||||
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data1)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = boltDb.SetPendingUploadToStarted(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test DirMove
|
||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||
if err != errNotSupported {
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// test Rmdir
|
||||
err = runInstance.rm(t, rootFs, "test")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test Move/Rename
|
||||
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
|
||||
if err != errNotSupported {
|
||||
require.Error(t, err)
|
||||
// try to read from it
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/second")
|
||||
require.Error(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// test Copy -- allowed
|
||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/third")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// test Remove
|
||||
err = runInstance.rm(t, rootFs, "test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// test Update - this seems to work. Why? FIXME
|
||||
//firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
//require.NoError(t, err)
|
||||
//err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated", func() {
|
||||
// data2 := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len("one content updated")), true)
|
||||
// require.Equal(t, "one content", string(data2))
|
||||
//
|
||||
// tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
// require.NoError(t, err)
|
||||
// if runInstance.rootIsCrypt {
|
||||
// require.Equal(t, int64(67), tmpInfo.Size())
|
||||
// } else {
|
||||
// require.Equal(t, int64(len(data2)), tmpInfo.Size())
|
||||
// }
|
||||
//})
|
||||
//require.Error(t, err)
|
||||
|
||||
// test SetModTime -- seems to work cause of previous
|
||||
//secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
//require.NoError(t, err)
|
||||
//require.Equal(t, secondModTime, firstModTime)
|
||||
//require.NotEqual(t, time.Time{}, firstModTime)
|
||||
//require.NotEqual(t, time.Time{}, secondModTime)
|
||||
}
|
||||
|
||||
// FIXME, enable this when mount is sorted out
|
||||
//func TestInternalFilesMissingInMount1904(t *testing.T) {
|
||||
// t.Skip("Not yet")
|
||||
// if runtime.GOOS == "windows" {
|
||||
// t.Skip("Not yet")
|
||||
// }
|
||||
// id := "tifm1904"
|
||||
// rootFs, _ := newCacheFs(t, RemoteName, id, false,
|
||||
// map[string]string{"chunk_size": "5M", "info_age": "1m", "chunk_total_size": "500M", "cache-writes": "true"})
|
||||
// mntPoint := path.Join("/tmp", "tifm1904-mnt")
|
||||
// testPoint := path.Join(mntPoint, id)
|
||||
// checkOutput := "1 10 100 11 12 13 14 15 16 17 18 19 2 20 21 22 23 24 25 26 27 28 29 3 30 31 32 33 34 35 36 37 38 39 4 40 41 42 43 44 45 46 47 48 49 5 50 51 52 53 54 55 56 57 58 59 6 60 61 62 63 64 65 66 67 68 69 7 70 71 72 73 74 75 76 77 78 79 8 80 81 82 83 84 85 86 87 88 89 9 90 91 92 93 94 95 96 97 98 99 "
|
||||
//
|
||||
// _ = os.MkdirAll(mntPoint, os.ModePerm)
|
||||
//
|
||||
// list, err := rootFs.List("")
|
||||
// require.NoError(t, err)
|
||||
// found := false
|
||||
// list.ForDir(func(d fs.Directory) {
|
||||
// if strings.Contains(d.Remote(), id) {
|
||||
// found = true
|
||||
// }
|
||||
// })
|
||||
//
|
||||
// if !found {
|
||||
// t.Skip("Test folder '%v' doesn't exist", id)
|
||||
// }
|
||||
//
|
||||
// mountFs(t, rootFs, mntPoint)
|
||||
// defer unmountFs(t, mntPoint)
|
||||
//
|
||||
// for i := 1; i <= 2; i++ {
|
||||
// out, err := exec.Command("ls", testPoint).Output()
|
||||
// require.NoError(t, err)
|
||||
// require.Equal(t, checkOutput, strings.Replace(string(out), "\n", " ", -1))
|
||||
// t.Logf("root path has all files")
|
||||
// _ = writeObjectString(t, rootFs, path.Join(id, strconv.Itoa(i), strconv.Itoa(i), "one_file"), "one content")
|
||||
//
|
||||
// for j := 1; j <= 100; j++ {
|
||||
// out, err := exec.Command("ls", path.Join(testPoint, strconv.Itoa(j))).Output()
|
||||
// require.NoError(t, err)
|
||||
// require.Equal(t, checkOutput, strings.Replace(string(out), "\n", " ", -1), "'%v' doesn't match", j)
|
||||
// }
|
||||
// obj, err := rootFs.NewObject(path.Join(id, strconv.Itoa(i), strconv.Itoa(i), "one_file"))
|
||||
// require.NoError(t, err)
|
||||
// err = obj.Remove()
|
||||
// require.NoError(t, err)
|
||||
// t.Logf("folders contain all the files")
|
||||
//
|
||||
// out, err = exec.Command("date").Output()
|
||||
// require.NoError(t, err)
|
||||
// t.Logf("check #%v date: '%v'", i, strings.Replace(string(out), "\n", " ", -1))
|
||||
//
|
||||
// if i < 2 {
|
||||
// time.Sleep(time.Second * 60)
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
|
||||
// run holds the remotes for a test run
|
||||
type run struct {
|
||||
okDiff time.Duration
|
||||
allCfgMap map[string]string
|
||||
allFlagMap map[string]string
|
||||
runDefaultCfgMap map[string]string
|
||||
runDefaultFlagMap map[string]string
|
||||
runDefaultCfgMap configmap.Simple
|
||||
mntDir string
|
||||
tmpUploadDir string
|
||||
useMount bool
|
||||
@@ -1287,38 +942,16 @@ func newRun() *run {
|
||||
isMounted: false,
|
||||
}
|
||||
|
||||
r.allCfgMap = map[string]string{
|
||||
"plex_url": "",
|
||||
"plex_username": "",
|
||||
"plex_password": "",
|
||||
"chunk_size": cache.DefCacheChunkSize,
|
||||
"info_age": cache.DefCacheInfoAge,
|
||||
"chunk_total_size": cache.DefCacheTotalChunkSize,
|
||||
// Read in all the defaults for all the options
|
||||
fsInfo, err := fs.Find("cache")
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Couldn't find cache remote: %v", err))
|
||||
}
|
||||
r.allFlagMap = map[string]string{
|
||||
"cache-db-path": filepath.Join(config.CacheDir, "cache-backend"),
|
||||
"cache-chunk-path": filepath.Join(config.CacheDir, "cache-backend"),
|
||||
"cache-db-purge": "true",
|
||||
"cache-chunk-size": cache.DefCacheChunkSize,
|
||||
"cache-total-chunk-size": cache.DefCacheTotalChunkSize,
|
||||
"cache-chunk-clean-interval": cache.DefCacheChunkCleanInterval,
|
||||
"cache-info-age": cache.DefCacheInfoAge,
|
||||
"cache-read-retries": strconv.Itoa(cache.DefCacheReadRetries),
|
||||
"cache-workers": strconv.Itoa(cache.DefCacheTotalWorkers),
|
||||
"cache-chunk-no-memory": "false",
|
||||
"cache-rps": strconv.Itoa(cache.DefCacheRps),
|
||||
"cache-writes": "false",
|
||||
"cache-tmp-upload-path": "",
|
||||
"cache-tmp-wait-time": cache.DefCacheTmpWaitTime,
|
||||
}
|
||||
r.runDefaultCfgMap = make(map[string]string)
|
||||
for key, value := range r.allCfgMap {
|
||||
r.runDefaultCfgMap[key] = value
|
||||
}
|
||||
r.runDefaultFlagMap = make(map[string]string)
|
||||
for key, value := range r.allFlagMap {
|
||||
r.runDefaultFlagMap[key] = value
|
||||
r.runDefaultCfgMap = configmap.Simple{}
|
||||
for _, option := range fsInfo.Options {
|
||||
r.runDefaultCfgMap.Set(option.Name, fmt.Sprint(option.Default))
|
||||
}
|
||||
|
||||
if mountDir == "" {
|
||||
if runtime.GOOS != "windows" {
|
||||
r.mntDir, err = ioutil.TempDir("", "rclonecache-mount")
|
||||
@@ -1395,7 +1028,7 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
config.FileSet(remote, "type", "cache")
|
||||
config.FileSet(remote, "remote", localRemote+":/var/tmp/"+localRemote)
|
||||
} else {
|
||||
remoteType := fs.ConfigFileGet(remote, "type", "")
|
||||
remoteType := config.FileGet(remote, "type", "")
|
||||
if remoteType == "" {
|
||||
t.Skipf("skipped due to invalid remote type for %v", remote)
|
||||
return nil, nil
|
||||
@@ -1406,14 +1039,14 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
config.FileSet(remote, "password", cryptPassword1)
|
||||
config.FileSet(remote, "password2", cryptPassword2)
|
||||
}
|
||||
remoteRemote := fs.ConfigFileGet(remote, "remote", "")
|
||||
remoteRemote := config.FileGet(remote, "remote", "")
|
||||
if remoteRemote == "" {
|
||||
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
|
||||
return nil, nil
|
||||
}
|
||||
remoteRemoteParts := strings.Split(remoteRemote, ":")
|
||||
remoteWrapping := remoteRemoteParts[0]
|
||||
remoteType := fs.ConfigFileGet(remoteWrapping, "type", "")
|
||||
remoteType := config.FileGet(remoteWrapping, "type", "")
|
||||
if remoteType != "cache" {
|
||||
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
|
||||
return nil, nil
|
||||
@@ -1428,28 +1061,22 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
|
||||
require.NoError(t, err)
|
||||
|
||||
for k, v := range r.runDefaultCfgMap {
|
||||
if c, ok := cfg[k]; ok {
|
||||
config.FileSet(cacheRemote, k, c)
|
||||
} else {
|
||||
config.FileSet(cacheRemote, k, v)
|
||||
}
|
||||
}
|
||||
for k, v := range r.runDefaultFlagMap {
|
||||
if c, ok := flags[k]; ok {
|
||||
_ = flag.Set(k, c)
|
||||
} else {
|
||||
_ = flag.Set(k, v)
|
||||
}
|
||||
}
|
||||
fs.Config.LowLevelRetries = 1
|
||||
|
||||
m := configmap.Simple{}
|
||||
for k, v := range r.runDefaultCfgMap {
|
||||
m.Set(k, v)
|
||||
}
|
||||
for k, v := range flags {
|
||||
m.Set(k, v)
|
||||
}
|
||||
|
||||
// Instantiate root
|
||||
if purge {
|
||||
boltDb.PurgeTempUploads()
|
||||
_ = os.RemoveAll(path.Join(runInstance.tmpUploadDir, id))
|
||||
}
|
||||
f, err := fs.NewFs(remote + ":" + id)
|
||||
f, err := cache.NewFs(remote, id, m)
|
||||
require.NoError(t, err)
|
||||
cfs, err := r.getCacheFs(f)
|
||||
require.NoError(t, err)
|
||||
@@ -1499,18 +1126,6 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
||||
}
|
||||
r.tempFiles = nil
|
||||
debug.FreeOSMemory()
|
||||
for k, v := range r.runDefaultFlagMap {
|
||||
_ = flag.Set(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *run) randomBytes(t *testing.T, size int64) []byte {
|
||||
testData := make([]byte, size)
|
||||
testSize, err := rand.Read(testData)
|
||||
require.Equal(t, size, int64(len(testData)))
|
||||
require.Equal(t, size, int64(testSize))
|
||||
require.NoError(t, err)
|
||||
return testData
|
||||
}
|
||||
|
||||
func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
|
||||
@@ -1521,12 +1136,12 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < int(cnt); i++ {
|
||||
data := r.randomBytes(t, chunk)
|
||||
data := randStringBytes(int(chunk))
|
||||
_, _ = f.Write(data)
|
||||
}
|
||||
data := r.randomBytes(t, int64(left))
|
||||
data := randStringBytes(int(left))
|
||||
_, _ = f.Write(data)
|
||||
_, _ = f.Seek(int64(0), 0)
|
||||
_, _ = f.Seek(int64(0), io.SeekStart)
|
||||
r.tempFiles = append(r.tempFiles, f)
|
||||
|
||||
return f
|
||||
@@ -1535,7 +1150,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
|
||||
func (r *run) writeRemoteRandomBytes(t *testing.T, f fs.Fs, p string, size int64) string {
|
||||
remote := path.Join(p, strconv.Itoa(rand.Int())+".bin")
|
||||
// create some rand test data
|
||||
testData := r.randomBytes(t, size)
|
||||
testData := randStringBytes(int(size))
|
||||
|
||||
r.writeRemoteBytes(t, f, remote, testData)
|
||||
return remote
|
||||
@@ -1544,7 +1159,7 @@ func (r *run) writeRemoteRandomBytes(t *testing.T, f fs.Fs, p string, size int64
|
||||
func (r *run) writeObjectRandomBytes(t *testing.T, f fs.Fs, p string, size int64) fs.Object {
|
||||
remote := path.Join(p, strconv.Itoa(rand.Int())+".bin")
|
||||
// create some rand test data
|
||||
testData := r.randomBytes(t, size)
|
||||
testData := randStringBytes(int(size))
|
||||
|
||||
return r.writeObjectBytes(t, f, remote, testData)
|
||||
}
|
||||
@@ -1653,7 +1268,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
|
||||
if err != nil {
|
||||
return checkSample, err
|
||||
}
|
||||
_, _ = f.Seek(offset, 0)
|
||||
_, _ = f.Seek(offset, io.SeekStart)
|
||||
totalRead, err := io.ReadFull(f, checkSample)
|
||||
checkSample = checkSample[:totalRead]
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
@@ -1662,9 +1277,6 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
|
||||
if err != nil {
|
||||
return checkSample, err
|
||||
}
|
||||
if !noLengthCheck && size != int64(totalRead) {
|
||||
return checkSample, errors.Errorf("read size doesn't match expected: %v <> %v", totalRead, size)
|
||||
}
|
||||
} else {
|
||||
co, err := f.NewObject(remote)
|
||||
if err != nil {
|
||||
@@ -1688,7 +1300,7 @@ func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLe
|
||||
err = nil
|
||||
checkSample = checkSample[:totalRead]
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err, "with string -%v-", string(checkSample))
|
||||
_ = reader.Close()
|
||||
return checkSample
|
||||
}
|
||||
@@ -1897,16 +1509,11 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = f.WriteString(data + append)
|
||||
if err != nil {
|
||||
defer func() {
|
||||
_ = f.Close()
|
||||
return err
|
||||
}
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
}()
|
||||
_, err = f.WriteString(data + append)
|
||||
} else {
|
||||
obj1, err := rootFs.NewObject(src)
|
||||
if err != nil {
|
||||
@@ -1916,9 +1523,6 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
||||
r := bytes.NewReader(data1)
|
||||
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
|
||||
err = obj1.Update(r, objInfo1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
@@ -2055,6 +1659,14 @@ func (r *run) getCacheFs(f fs.Fs) (*cache.Fs, error) {
|
||||
return nil, errors.New("didn't found a cache fs")
|
||||
}
|
||||
|
||||
func randStringBytes(n int) []byte {
|
||||
b := make([]byte, n)
|
||||
for i := range b {
|
||||
b[i] = letterBytes[rand.Intn(len(letterBytes))]
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
var (
|
||||
_ fs.Fs = (*cache.Fs)(nil)
|
||||
_ fs.Fs = (*local.Fs)(nil)
|
||||
|
||||
4
backend/cache/cache_mount_unix_test.go
vendored
4
backend/cache/cache_mount_unix_test.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,!windows,go1.7
|
||||
// +build !plan9,!windows
|
||||
|
||||
package cache_test
|
||||
|
||||
@@ -23,7 +23,7 @@ func (r *run) mountFs(t *testing.T, f fs.Fs) {
|
||||
fuse.FSName(device), fuse.VolumeName(device),
|
||||
fuse.NoAppleDouble(),
|
||||
fuse.NoAppleXattr(),
|
||||
fuse.AllowOther(),
|
||||
//fuse.AllowOther(),
|
||||
}
|
||||
err := os.MkdirAll(r.mntDir, os.ModePerm)
|
||||
require.NoError(t, err)
|
||||
|
||||
2
backend/cache/cache_mount_windows_test.go
vendored
2
backend/cache/cache_mount_windows_test.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build windows,go1.7
|
||||
// +build windows
|
||||
|
||||
package cache_test
|
||||
|
||||
|
||||
72
backend/cache/cache_test.go
vendored
72
backend/cache/cache_test.go
vendored
@@ -1,9 +1,6 @@
|
||||
// Test Cache filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
|
||||
// +build !plan9,go1.7
|
||||
// +build !plan9
|
||||
|
||||
package cache_test
|
||||
|
||||
@@ -12,68 +9,13 @@ import (
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*cache.Object)(nil))
|
||||
fstests.RemoteName = "TestCache:"
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
2
backend/cache/cache_unsupported.go
vendored
2
backend/cache/cache_unsupported.go
vendored
@@ -1,6 +1,6 @@
|
||||
// Build for cache for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9 !go1.7
|
||||
// +build plan9
|
||||
|
||||
package cache
|
||||
|
||||
455
backend/cache/cache_upload_test.go
vendored
Normal file
455
backend/cache/cache_upload_test.go
vendored
Normal file
@@ -0,0 +1,455 @@
|
||||
// +build !plan9
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"fmt"
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
_ "github.com/ncw/rclone/backend/drive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestInternalUploadTempDirCreated(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltDb *cache.Persistent) {
|
||||
// create some rand test data
|
||||
testSize := int64(524288000)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
bu := runInstance.listenForBackgroundUpload(t, rootFs, "one")
|
||||
runInstance.writeRemoteReader(t, rootFs, "one", testReader)
|
||||
// validate that it exists in temp fs
|
||||
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
if runInstance.rootIsCrypt {
|
||||
require.Equal(t, int64(524416032), ti.Size())
|
||||
} else {
|
||||
require.Equal(t, testSize, ti.Size())
|
||||
}
|
||||
de1, err := runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
runInstance.completeBackgroundUpload(t, "one", bu)
|
||||
// check if it was removed from temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
// check if it can be read
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "one", 0, int64(1024), false)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, data2, 1024)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
|
||||
func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("one")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("one/test")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("second")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
testSize := int64(10485760)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
|
||||
|
||||
de1, err := runInstance.list(t, rootFs, "one/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
time.Sleep(time.Second * 5)
|
||||
//_ = os.Remove(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
|
||||
//require.NoError(t, err)
|
||||
|
||||
err = runInstance.dirMove(t, rootFs, "one/test", "second/test")
|
||||
require.NoError(t, err)
|
||||
|
||||
// check if it can be read
|
||||
de1, err = runInstance.list(t, rootFs, "second/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
}
|
||||
|
||||
func TestInternalUploadTempPathCleaned(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("one")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("one/test")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("second")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
testSize := int64(1048576)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
testReader2 := runInstance.randomReader(t, testSize)
|
||||
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
|
||||
runInstance.writeObjectReader(t, rootFs, "second/data.bin", testReader2)
|
||||
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second")))
|
||||
require.False(t, os.IsNotExist(err))
|
||||
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, "second/data.bin")
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/data.bin")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
de1, err := runInstance.list(t, rootFs, "one/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
// check if it can be read
|
||||
de1, err = runInstance.list(t, rootFs, "second")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("test")
|
||||
require.NoError(t, err)
|
||||
minSize := 5242880
|
||||
maxSize := 10485760
|
||||
totalFiles := 10
|
||||
rand.Seed(time.Now().Unix())
|
||||
|
||||
lastFile := ""
|
||||
for i := 0; i < totalFiles; i++ {
|
||||
size := int64(rand.Intn(maxSize-minSize) + minSize)
|
||||
testReader := runInstance.randomReader(t, size)
|
||||
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
|
||||
|
||||
// validate that it exists in temp fs
|
||||
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, remote)))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, size, runInstance.cleanSize(t, ti.Size()))
|
||||
|
||||
if runInstance.wrappedIsExternal && i < totalFiles-1 {
|
||||
time.Sleep(time.Second * 3)
|
||||
}
|
||||
lastFile = remote
|
||||
}
|
||||
|
||||
// check if cache lists all files, likely temp upload didn't finish yet
|
||||
de1, err := runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, totalFiles)
|
||||
|
||||
// wait for background uploader to do its thing
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, lastFile)
|
||||
|
||||
// retry until we have no more temp files and fail if they don't go down to 0
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
// check if cache lists all files
|
||||
de1, err = runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, totalFiles)
|
||||
}
|
||||
|
||||
func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
id := "tiutfo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
// create some rand test data
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// check if it can be read
|
||||
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data1)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test DirMove - allowed
|
||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("second/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
|
||||
require.Error(t, err)
|
||||
var started bool
|
||||
started, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "second/one")))
|
||||
require.NoError(t, err)
|
||||
require.False(t, started)
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
}
|
||||
|
||||
// test Rmdir - allowed
|
||||
err = runInstance.rm(t, rootFs, "test")
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "directory not empty")
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
started, err := boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
|
||||
require.False(t, started)
|
||||
require.NoError(t, err)
|
||||
|
||||
// test Move/Rename -- allowed
|
||||
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
// try to read from it
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/second")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
|
||||
require.NoError(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
}
|
||||
|
||||
// test Copy -- allowed
|
||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/third")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// test Remove -- allowed
|
||||
err = runInstance.rm(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// test Update -- allowed
|
||||
firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
|
||||
require.NoError(t, err)
|
||||
obj2, err := rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
|
||||
require.Equal(t, "one content updated", string(data2))
|
||||
tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
if runInstance.rootIsCrypt {
|
||||
require.Equal(t, int64(67), tmpInfo.Size())
|
||||
} else {
|
||||
require.Equal(t, int64(len(data2)), tmpInfo.Size())
|
||||
}
|
||||
|
||||
// test SetModTime -- allowed
|
||||
secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, secondModTime, firstModTime)
|
||||
require.NotEqual(t, time.Time{}, firstModTime)
|
||||
require.NotEqual(t, time.Time{}, secondModTime)
|
||||
}
|
||||
|
||||
func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
// create some rand test data
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// check if it can be read
|
||||
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data1)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = boltDb.SetPendingUploadToStarted(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test DirMove
|
||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||
if err != errNotSupported {
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// test Rmdir
|
||||
err = runInstance.rm(t, rootFs, "test")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test Move/Rename
|
||||
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
|
||||
if err != errNotSupported {
|
||||
require.Error(t, err)
|
||||
// try to read from it
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/second")
|
||||
require.Error(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// test Copy -- allowed
|
||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/third")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// test Remove
|
||||
err = runInstance.rm(t, rootFs, "test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// test Update - this seems to work. Why? FIXME
|
||||
//firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
//require.NoError(t, err)
|
||||
//err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated", func() {
|
||||
// data2 := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len("one content updated")), true)
|
||||
// require.Equal(t, "one content", string(data2))
|
||||
//
|
||||
// tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
// require.NoError(t, err)
|
||||
// if runInstance.rootIsCrypt {
|
||||
// require.Equal(t, int64(67), tmpInfo.Size())
|
||||
// } else {
|
||||
// require.Equal(t, int64(len(data2)), tmpInfo.Size())
|
||||
// }
|
||||
//})
|
||||
//require.Error(t, err)
|
||||
|
||||
// test SetModTime -- seems to work cause of previous
|
||||
//secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
//require.NoError(t, err)
|
||||
//require.Equal(t, secondModTime, firstModTime)
|
||||
//require.NotEqual(t, time.Time{}, firstModTime)
|
||||
//require.NotEqual(t, time.Time{}, secondModTime)
|
||||
}
|
||||
455
backend/cache/cache_upload_test.go.orig
vendored
Normal file
455
backend/cache/cache_upload_test.go.orig
vendored
Normal file
@@ -0,0 +1,455 @@
|
||||
// +build !plan9
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"fmt"
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
_ "github.com/ncw/rclone/backend/drive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestInternalUploadTempDirCreated(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id)})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltDb *cache.Persistent) {
|
||||
// create some rand test data
|
||||
testSize := int64(524288000)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
bu := runInstance.listenForBackgroundUpload(t, rootFs, "one")
|
||||
runInstance.writeRemoteReader(t, rootFs, "one", testReader)
|
||||
// validate that it exists in temp fs
|
||||
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
if runInstance.rootIsCrypt {
|
||||
require.Equal(t, int64(524416032), ti.Size())
|
||||
} else {
|
||||
require.Equal(t, testSize, ti.Size())
|
||||
}
|
||||
de1, err := runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
runInstance.completeBackgroundUpload(t, "one", bu)
|
||||
// check if it was removed from temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
// check if it can be read
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "one", 0, int64(1024), false)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, data2, 1024)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "0s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1m"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
|
||||
func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "3s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("one")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("one/test")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("second")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
testSize := int64(10485760)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
|
||||
|
||||
de1, err := runInstance.list(t, rootFs, "one/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
time.Sleep(time.Second * 5)
|
||||
//_ = os.Remove(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
|
||||
//require.NoError(t, err)
|
||||
|
||||
err = runInstance.dirMove(t, rootFs, "one/test", "second/test")
|
||||
require.NoError(t, err)
|
||||
|
||||
// check if it can be read
|
||||
de1, err = runInstance.list(t, rootFs, "second/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
}
|
||||
|
||||
func TestInternalUploadTempPathCleaned(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("one")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("one/test")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("second")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
testSize := int64(1048576)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
testReader2 := runInstance.randomReader(t, testSize)
|
||||
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
|
||||
runInstance.writeObjectReader(t, rootFs, "second/data.bin", testReader2)
|
||||
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second")))
|
||||
require.False(t, os.IsNotExist(err))
|
||||
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, "second/data.bin")
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/data.bin")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
de1, err := runInstance.list(t, rootFs, "one/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
// check if it can be read
|
||||
de1, err = runInstance.list(t, rootFs, "second")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("test")
|
||||
require.NoError(t, err)
|
||||
minSize := 5242880
|
||||
maxSize := 10485760
|
||||
totalFiles := 10
|
||||
rand.Seed(time.Now().Unix())
|
||||
|
||||
lastFile := ""
|
||||
for i := 0; i < totalFiles; i++ {
|
||||
size := int64(rand.Intn(maxSize-minSize) + minSize)
|
||||
testReader := runInstance.randomReader(t, size)
|
||||
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
|
||||
|
||||
// validate that it exists in temp fs
|
||||
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, remote)))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, size, runInstance.cleanSize(t, ti.Size()))
|
||||
|
||||
if runInstance.wrappedIsExternal && i < totalFiles-1 {
|
||||
time.Sleep(time.Second * 3)
|
||||
}
|
||||
lastFile = remote
|
||||
}
|
||||
|
||||
// check if cache lists all files, likely temp upload didn't finish yet
|
||||
de1, err := runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, totalFiles)
|
||||
|
||||
// wait for background uploader to do its thing
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, lastFile)
|
||||
|
||||
// retry until we have no more temp files and fail if they don't go down to 0
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
// check if cache lists all files
|
||||
de1, err = runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, totalFiles)
|
||||
}
|
||||
|
||||
func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
id := "tiutfo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
// create some rand test data
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// check if it can be read
|
||||
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data1)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test DirMove - allowed
|
||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("second/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
|
||||
require.Error(t, err)
|
||||
var started bool
|
||||
started, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "second/one")))
|
||||
require.NoError(t, err)
|
||||
require.False(t, started)
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
}
|
||||
|
||||
// test Rmdir - allowed
|
||||
err = runInstance.rm(t, rootFs, "test")
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "directory not empty")
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
started, err := boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
|
||||
require.False(t, started)
|
||||
require.NoError(t, err)
|
||||
|
||||
// test Move/Rename -- allowed
|
||||
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
// try to read from it
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/second")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
|
||||
require.NoError(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
}
|
||||
|
||||
// test Copy -- allowed
|
||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/third")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// test Remove -- allowed
|
||||
err = runInstance.rm(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// test Update -- allowed
|
||||
firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
|
||||
require.NoError(t, err)
|
||||
obj2, err := rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
|
||||
require.Equal(t, "one content updated", string(data2))
|
||||
tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
if runInstance.rootIsCrypt {
|
||||
require.Equal(t, int64(67), tmpInfo.Size())
|
||||
} else {
|
||||
require.Equal(t, int64(len(data2)), tmpInfo.Size())
|
||||
}
|
||||
|
||||
// test SetModTime -- allowed
|
||||
secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, secondModTime, firstModTime)
|
||||
require.NotEqual(t, time.Time{}, firstModTime)
|
||||
require.NotEqual(t, time.Time{}, secondModTime)
|
||||
}
|
||||
|
||||
func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
// create some rand test data
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// check if it can be read
|
||||
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data1)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = boltDb.SetPendingUploadToStarted(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test DirMove
|
||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||
if err != errNotSupported {
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// test Rmdir
|
||||
err = runInstance.rm(t, rootFs, "test")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test Move/Rename
|
||||
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
|
||||
if err != errNotSupported {
|
||||
require.Error(t, err)
|
||||
// try to read from it
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/second")
|
||||
require.Error(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// test Copy -- allowed
|
||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/third")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// test Remove
|
||||
err = runInstance.rm(t, rootFs, "test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// test Update - this seems to work. Why? FIXME
|
||||
//firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
//require.NoError(t, err)
|
||||
//err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated", func() {
|
||||
// data2 := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len("one content updated")), true)
|
||||
// require.Equal(t, "one content", string(data2))
|
||||
//
|
||||
// tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
// require.NoError(t, err)
|
||||
// if runInstance.rootIsCrypt {
|
||||
// require.Equal(t, int64(67), tmpInfo.Size())
|
||||
// } else {
|
||||
// require.Equal(t, int64(len(data2)), tmpInfo.Size())
|
||||
// }
|
||||
//})
|
||||
//require.Error(t, err)
|
||||
|
||||
// test SetModTime -- seems to work cause of previous
|
||||
//secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
//require.NoError(t, err)
|
||||
//require.Equal(t, secondModTime, firstModTime)
|
||||
//require.NotEqual(t, time.Time{}, firstModTime)
|
||||
//require.NotEqual(t, time.Time{}, secondModTime)
|
||||
}
|
||||
12
backend/cache/cache_upload_test.go.rej
vendored
Normal file
12
backend/cache/cache_upload_test.go.rej
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
--- cache_upload_test.go
|
||||
+++ cache_upload_test.go
|
||||
@@ -1500,9 +1469,6 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
||||
}
|
||||
r.tempFiles = nil
|
||||
debug.FreeOSMemory()
|
||||
- for k, v := range r.runDefaultFlagMap {
|
||||
- _ = flag.Set(k, v)
|
||||
- }
|
||||
}
|
||||
|
||||
func (r *run) randomBytes(t *testing.T, size int64) []byte {
|
||||
2
backend/cache/directory.go
vendored
2
backend/cache/directory.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,go1.7
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
95
backend/cache/handle.go
vendored
95
backend/cache/handle.go
vendored
@@ -1,11 +1,10 @@
|
||||
// +build !plan9,go1.7
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -66,14 +65,14 @@ func NewObjectHandle(o *Object, cfs *Fs) *Handle {
|
||||
offset: 0,
|
||||
preloadOffset: -1, // -1 to trigger the first preload
|
||||
|
||||
UseMemory: cfs.chunkMemory,
|
||||
UseMemory: !cfs.opt.ChunkNoMemory,
|
||||
reading: false,
|
||||
}
|
||||
r.seenOffsets = make(map[int64]bool)
|
||||
r.memory = NewMemory(-1)
|
||||
|
||||
// create a larger buffer to queue up requests
|
||||
r.preloadQueue = make(chan int64, r.cfs.totalWorkers*10)
|
||||
r.preloadQueue = make(chan int64, r.cfs.opt.TotalWorkers*10)
|
||||
r.confirmReading = make(chan bool)
|
||||
r.startReadWorkers()
|
||||
return r
|
||||
@@ -99,7 +98,7 @@ func (r *Handle) startReadWorkers() {
|
||||
if r.hasAtLeastOneWorker() {
|
||||
return
|
||||
}
|
||||
totalWorkers := r.cacheFs().totalWorkers
|
||||
totalWorkers := r.cacheFs().opt.TotalWorkers
|
||||
|
||||
if r.cacheFs().plexConnector.isConfigured() {
|
||||
if !r.cacheFs().plexConnector.isConnected() {
|
||||
@@ -146,36 +145,18 @@ func (r *Handle) scaleWorkers(desired int) {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Handle) requestExternalConfirmation() {
|
||||
// if there's no external confirmation available
|
||||
// then we skip this step
|
||||
if len(r.workers) >= r.cacheFs().totalMaxWorkers ||
|
||||
!r.cacheFs().plexConnector.isConnected() {
|
||||
return
|
||||
}
|
||||
go r.cacheFs().plexConnector.isPlayingAsync(r.cachedObject, r.confirmReading)
|
||||
}
|
||||
|
||||
func (r *Handle) confirmExternalReading() {
|
||||
// if we have a max value of workers
|
||||
// or there's no external confirmation available
|
||||
// then we skip this step
|
||||
if len(r.workers) >= r.cacheFs().totalMaxWorkers ||
|
||||
!r.cacheFs().plexConnector.isConnected() {
|
||||
if len(r.workers) > 1 ||
|
||||
!r.cacheFs().plexConnector.isConfigured() {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case confirmed := <-r.confirmReading:
|
||||
if !confirmed {
|
||||
return
|
||||
}
|
||||
default:
|
||||
if !r.cacheFs().plexConnector.isPlaying(r.cachedObject) {
|
||||
return
|
||||
}
|
||||
|
||||
fs.Infof(r, "confirmed reading by external reader")
|
||||
r.scaleWorkers(r.cacheFs().totalMaxWorkers)
|
||||
r.scaleWorkers(r.cacheFs().opt.TotalWorkers)
|
||||
}
|
||||
|
||||
// queueOffset will send an offset to the workers if it's different from the last one
|
||||
@@ -198,7 +179,7 @@ func (r *Handle) queueOffset(offset int64) {
|
||||
}
|
||||
|
||||
for i := 0; i < len(r.workers); i++ {
|
||||
o := r.preloadOffset + r.cacheFs().chunkSize*int64(i)
|
||||
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
|
||||
if o < 0 || o >= r.cachedObject.Size() {
|
||||
continue
|
||||
}
|
||||
@@ -209,8 +190,6 @@ func (r *Handle) queueOffset(offset int64) {
|
||||
r.seenOffsets[o] = true
|
||||
r.preloadQueue <- o
|
||||
}
|
||||
|
||||
r.requestExternalConfirmation()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -232,7 +211,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
var err error
|
||||
|
||||
// we calculate the modulus of the requested offset with the size of a chunk
|
||||
offset := chunkStart % r.cacheFs().chunkSize
|
||||
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
|
||||
|
||||
// we align the start offset of the first chunk to a likely chunk in the storage
|
||||
chunkStart = chunkStart - offset
|
||||
@@ -249,7 +228,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
if !found {
|
||||
// we're gonna give the workers a chance to pickup the chunk
|
||||
// and retry a couple of times
|
||||
for i := 0; i < r.cacheFs().readRetries*8; i++ {
|
||||
for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ {
|
||||
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
|
||||
if err == nil {
|
||||
found = true
|
||||
@@ -274,9 +253,9 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
|
||||
// first chunk will be aligned with the start
|
||||
if offset > 0 {
|
||||
if offset >= int64(len(data)) {
|
||||
if offset > int64(len(data)) {
|
||||
fs.Errorf(r, "unexpected conditions during reading. current position: %v, current chunk position: %v, current chunk size: %v, offset: %v, chunk size: %v, file size: %v",
|
||||
r.offset, chunkStart, len(data), offset, r.cacheFs().chunkSize, r.cachedObject.Size())
|
||||
r.offset, chunkStart, len(data), offset, r.cacheFs().opt.ChunkSize, r.cachedObject.Size())
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
data = data[int(offset):]
|
||||
@@ -294,7 +273,6 @@ func (r *Handle) Read(p []byte) (n int, err error) {
|
||||
// first reading
|
||||
if !r.reading {
|
||||
r.reading = true
|
||||
r.requestExternalConfirmation()
|
||||
}
|
||||
// reached EOF
|
||||
if r.offset >= r.cachedObject.Size() {
|
||||
@@ -302,8 +280,10 @@ func (r *Handle) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
currentOffset := r.offset
|
||||
buf, err = r.getChunk(currentOffset)
|
||||
if err != nil && len(buf) == 0 {
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
fs.Errorf(r, "(%v/%v) error (%v) response", currentOffset, r.cachedObject.Size(), err)
|
||||
}
|
||||
if len(buf) == 0 && err != io.ErrUnexpectedEOF {
|
||||
return 0, io.EOF
|
||||
}
|
||||
readSize := copy(p, buf)
|
||||
@@ -332,6 +312,7 @@ func (r *Handle) Close() error {
|
||||
waitIdx++
|
||||
}
|
||||
}
|
||||
r.memory.db.Flush()
|
||||
|
||||
fs.Debugf(r, "cache reader closed %v", r.offset)
|
||||
return nil
|
||||
@@ -344,22 +325,22 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
||||
|
||||
var err error
|
||||
switch whence {
|
||||
case os.SEEK_SET:
|
||||
case io.SeekStart:
|
||||
fs.Debugf(r, "moving offset set from %v to %v", r.offset, offset)
|
||||
r.offset = offset
|
||||
case os.SEEK_CUR:
|
||||
case io.SeekCurrent:
|
||||
fs.Debugf(r, "moving offset cur from %v to %v", r.offset, r.offset+offset)
|
||||
r.offset += offset
|
||||
case os.SEEK_END:
|
||||
case io.SeekEnd:
|
||||
fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset)
|
||||
r.offset = r.cachedObject.Size() + offset
|
||||
default:
|
||||
err = errors.Errorf("cache: unimplemented seek whence %v", whence)
|
||||
}
|
||||
|
||||
chunkStart := r.offset - (r.offset % r.cacheFs().chunkSize)
|
||||
if chunkStart >= r.cacheFs().chunkSize {
|
||||
chunkStart = chunkStart - r.cacheFs().chunkSize
|
||||
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
||||
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
|
||||
chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
|
||||
}
|
||||
r.queueOffset(chunkStart)
|
||||
|
||||
@@ -399,10 +380,10 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
|
||||
|
||||
if !closeOpen {
|
||||
if do, ok := r.(fs.RangeSeeker); ok {
|
||||
_, err = do.RangeSeek(offset, os.SEEK_SET, end-offset)
|
||||
_, err = do.RangeSeek(offset, io.SeekStart, end-offset)
|
||||
return r, err
|
||||
} else if do, ok := r.(io.Seeker); ok {
|
||||
_, err = do.Seek(offset, os.SEEK_SET)
|
||||
_, err = do.Seek(offset, io.SeekStart)
|
||||
return r, err
|
||||
}
|
||||
}
|
||||
@@ -464,14 +445,13 @@ func (w *worker) run() {
|
||||
continue
|
||||
}
|
||||
}
|
||||
err = nil
|
||||
} else {
|
||||
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
chunkEnd := chunkStart + w.r.cacheFs().chunkSize
|
||||
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
|
||||
// TODO: Remove this comment if it proves to be reliable for #1896
|
||||
//if chunkEnd > w.r.cachedObject.Size() {
|
||||
// chunkEnd = w.r.cachedObject.Size()
|
||||
@@ -486,7 +466,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
|
||||
var data []byte
|
||||
|
||||
// stop retries
|
||||
if retry >= w.r.cacheFs().readRetries {
|
||||
if retry >= w.r.cacheFs().opt.ReadRetries {
|
||||
return
|
||||
}
|
||||
// back-off between retries
|
||||
@@ -511,7 +491,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
|
||||
}
|
||||
|
||||
data = make([]byte, chunkEnd-chunkStart)
|
||||
sourceRead := 0
|
||||
var sourceRead int
|
||||
sourceRead, err = io.ReadFull(w.rc, data)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
|
||||
@@ -632,7 +612,7 @@ func (b *backgroundWriter) run() {
|
||||
return
|
||||
}
|
||||
|
||||
absPath, err := b.fs.cache.getPendingUpload(b.fs.Root(), b.fs.tempWriteWait)
|
||||
absPath, err := b.fs.cache.getPendingUpload(b.fs.Root(), time.Duration(b.fs.opt.TempWaitTime))
|
||||
if err != nil || absPath == "" || !b.fs.isRootInPath(absPath) {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
@@ -648,6 +628,23 @@ func (b *backgroundWriter) run() {
|
||||
fs.Errorf(remote, "background upload: %v", err)
|
||||
continue
|
||||
}
|
||||
// clean empty dirs up to root
|
||||
thisDir := cleanPath(path.Dir(remote))
|
||||
for thisDir != "" {
|
||||
thisList, err := b.fs.tempFs.List(thisDir)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if len(thisList) > 0 {
|
||||
break
|
||||
}
|
||||
err = b.fs.tempFs.Rmdir(thisDir)
|
||||
fs.Debugf(thisDir, "cleaned from temp path")
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
thisDir = cleanPath(path.Dir(thisDir))
|
||||
}
|
||||
fs.Infof(remote, "background upload: uploaded entry")
|
||||
err = b.fs.cache.removePendingUpload(absPath)
|
||||
if err != nil && !strings.Contains(err.Error(), "pending upload not found") {
|
||||
|
||||
17
backend/cache/object.go
vendored
17
backend/cache/object.go
vendored
@@ -1,10 +1,9 @@
|
||||
// +build !plan9,go1.7
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -45,7 +44,7 @@ func NewObject(f *Fs, remote string) *Object {
|
||||
|
||||
cacheType := objectInCache
|
||||
parentFs := f.UnWrap()
|
||||
if f.tempWritePath != "" {
|
||||
if f.opt.TempWritePath != "" {
|
||||
_, err := f.cache.SearchPendingUpload(fullRemote)
|
||||
if err == nil { // queued for upload
|
||||
cacheType = objectPendingUpload
|
||||
@@ -76,7 +75,7 @@ func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
|
||||
|
||||
cacheType := objectInCache
|
||||
parentFs := f.UnWrap()
|
||||
if f.tempWritePath != "" {
|
||||
if f.opt.TempWritePath != "" {
|
||||
_, err := f.cache.SearchPendingUpload(fullRemote)
|
||||
if err == nil { // queued for upload
|
||||
cacheType = objectPendingUpload
|
||||
@@ -154,7 +153,7 @@ func (o *Object) Storable() bool {
|
||||
// 2. is not pending a notification from the wrapped fs
|
||||
func (o *Object) refresh() error {
|
||||
isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
|
||||
isExpired := time.Now().After(o.CacheTs.Add(o.CacheFs.fileAge))
|
||||
isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge)))
|
||||
if !isExpired && !isNotified {
|
||||
return nil
|
||||
}
|
||||
@@ -223,7 +222,7 @@ func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.Size())
|
||||
}
|
||||
_, err = cacheReader.Seek(offset, os.SEEK_SET)
|
||||
_, err = cacheReader.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -238,7 +237,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return err
|
||||
}
|
||||
// pause background uploads if active
|
||||
if o.CacheFs.tempWritePath != "" {
|
||||
if o.CacheFs.opt.TempWritePath != "" {
|
||||
o.CacheFs.backgroundRunner.pause()
|
||||
defer o.CacheFs.backgroundRunner.play()
|
||||
// don't allow started uploads
|
||||
@@ -257,6 +256,8 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
|
||||
// deleting cached chunks and info to be replaced with new ones
|
||||
_ = o.CacheFs.cache.RemoveObject(o.abs())
|
||||
// advertise to ChangeNotify if wrapped doesn't do that
|
||||
o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject)
|
||||
|
||||
o.CacheModTime = src.ModTime().UnixNano()
|
||||
o.CacheSize = src.Size()
|
||||
@@ -273,7 +274,7 @@ func (o *Object) Remove() error {
|
||||
return err
|
||||
}
|
||||
// pause background uploads if active
|
||||
if o.CacheFs.tempWritePath != "" {
|
||||
if o.CacheFs.opt.TempWritePath != "" {
|
||||
o.CacheFs.backgroundRunner.pause()
|
||||
defer o.CacheFs.backgroundRunner.play()
|
||||
// don't allow started uploads
|
||||
|
||||
212
backend/cache/plex.go
vendored
212
backend/cache/plex.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,go1.7
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
@@ -16,37 +16,67 @@ import (
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/patrickmn/go-cache"
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
const (
|
||||
// defPlexLoginURL is the default URL for Plex login
|
||||
defPlexLoginURL = "https://plex.tv/users/sign_in.json"
|
||||
defPlexLoginURL = "https://plex.tv/users/sign_in.json"
|
||||
defPlexNotificationURL = "%s/:/websockets/notifications?X-Plex-Token=%s"
|
||||
)
|
||||
|
||||
// PlaySessionStateNotification is part of the API response of Plex
|
||||
type PlaySessionStateNotification struct {
|
||||
SessionKey string `json:"sessionKey"`
|
||||
GUID string `json:"guid"`
|
||||
Key string `json:"key"`
|
||||
ViewOffset int64 `json:"viewOffset"`
|
||||
State string `json:"state"`
|
||||
TranscodeSession string `json:"transcodeSession"`
|
||||
}
|
||||
|
||||
// NotificationContainer is part of the API response of Plex
|
||||
type NotificationContainer struct {
|
||||
Type string `json:"type"`
|
||||
Size int `json:"size"`
|
||||
PlaySessionState []PlaySessionStateNotification `json:"PlaySessionStateNotification"`
|
||||
}
|
||||
|
||||
// PlexNotification is part of the API response of Plex
|
||||
type PlexNotification struct {
|
||||
Container NotificationContainer `json:"NotificationContainer"`
|
||||
}
|
||||
|
||||
// plexConnector is managing the cache integration with Plex
|
||||
type plexConnector struct {
|
||||
url *url.URL
|
||||
username string
|
||||
password string
|
||||
token string
|
||||
f *Fs
|
||||
mu sync.Mutex
|
||||
url *url.URL
|
||||
username string
|
||||
password string
|
||||
token string
|
||||
f *Fs
|
||||
mu sync.Mutex
|
||||
running bool
|
||||
runningMu sync.Mutex
|
||||
stateCache *cache.Cache
|
||||
saveToken func(string)
|
||||
}
|
||||
|
||||
// newPlexConnector connects to a Plex server and generates a token
|
||||
func newPlexConnector(f *Fs, plexURL, username, password string) (*plexConnector, error) {
|
||||
func newPlexConnector(f *Fs, plexURL, username, password string, saveToken func(string)) (*plexConnector, error) {
|
||||
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pc := &plexConnector{
|
||||
f: f,
|
||||
url: u,
|
||||
username: username,
|
||||
password: password,
|
||||
token: "",
|
||||
f: f,
|
||||
url: u,
|
||||
username: username,
|
||||
password: password,
|
||||
token: "",
|
||||
stateCache: cache.New(time.Hour, time.Minute),
|
||||
saveToken: saveToken,
|
||||
}
|
||||
|
||||
return pc, nil
|
||||
@@ -60,14 +90,83 @@ func newPlexConnectorWithToken(f *Fs, plexURL, token string) (*plexConnector, er
|
||||
}
|
||||
|
||||
pc := &plexConnector{
|
||||
f: f,
|
||||
url: u,
|
||||
token: token,
|
||||
f: f,
|
||||
url: u,
|
||||
token: token,
|
||||
stateCache: cache.New(time.Hour, time.Minute),
|
||||
}
|
||||
pc.listenWebsocket()
|
||||
|
||||
return pc, nil
|
||||
}
|
||||
|
||||
func (p *plexConnector) closeWebsocket() {
|
||||
p.runningMu.Lock()
|
||||
defer p.runningMu.Unlock()
|
||||
fs.Infof("plex", "stopped Plex watcher")
|
||||
p.running = false
|
||||
}
|
||||
|
||||
func (p *plexConnector) listenWebsocket() {
|
||||
p.runningMu.Lock()
|
||||
defer p.runningMu.Unlock()
|
||||
|
||||
u := strings.Replace(p.url.String(), "http://", "ws://", 1)
|
||||
u = strings.Replace(u, "https://", "wss://", 1)
|
||||
conn, err := websocket.Dial(fmt.Sprintf(defPlexNotificationURL, strings.TrimRight(u, "/"), p.token),
|
||||
"", "http://localhost")
|
||||
if err != nil {
|
||||
fs.Errorf("plex", "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
p.running = true
|
||||
go func() {
|
||||
for {
|
||||
if !p.isConnected() {
|
||||
break
|
||||
}
|
||||
|
||||
notif := &PlexNotification{}
|
||||
err := websocket.JSON.Receive(conn, notif)
|
||||
if err != nil {
|
||||
fs.Debugf("plex", "%v", err)
|
||||
p.closeWebsocket()
|
||||
break
|
||||
}
|
||||
// we're only interested in play events
|
||||
if notif.Container.Type == "playing" {
|
||||
// we loop through each of them
|
||||
for _, v := range notif.Container.PlaySessionState {
|
||||
// event type of playing
|
||||
if v.State == "playing" {
|
||||
// if it's not cached get the details and cache them
|
||||
if _, found := p.stateCache.Get(v.Key); !found {
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("%s%s", p.url.String(), v.Key), nil)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
p.fillDefaultHeaders(req)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var data []byte
|
||||
data, err = ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
p.stateCache.Set(v.Key, data, cache.DefaultExpiration)
|
||||
}
|
||||
} else if v.State == "stopped" {
|
||||
p.stateCache.Delete(v.Key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// fillDefaultHeaders will add common headers to requests
|
||||
func (p *plexConnector) fillDefaultHeaders(req *http.Request) {
|
||||
req.Header.Add("X-Plex-Client-Identifier", fmt.Sprintf("rclone (%v)", p.f.String()))
|
||||
@@ -111,17 +210,19 @@ func (p *plexConnector) authenticate() error {
|
||||
}
|
||||
p.token = token
|
||||
if p.token != "" {
|
||||
config.FileSet(p.f.Name(), "plex_token", p.token)
|
||||
config.SaveConfig()
|
||||
p.saveToken(p.token)
|
||||
fs.Infof(p.f.Name(), "Connected to Plex server: %v", p.url.String())
|
||||
}
|
||||
p.listenWebsocket()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isConnected checks if this rclone is authenticated to Plex
|
||||
func (p *plexConnector) isConnected() bool {
|
||||
return p.token != ""
|
||||
p.runningMu.Lock()
|
||||
defer p.runningMu.Unlock()
|
||||
return p.running
|
||||
}
|
||||
|
||||
// isConfigured checks if this rclone is configured to use a Plex server
|
||||
@@ -131,6 +232,9 @@ func (p *plexConnector) isConfigured() bool {
|
||||
|
||||
func (p *plexConnector) isPlaying(co *Object) bool {
|
||||
var err error
|
||||
if !p.isConnected() {
|
||||
p.listenWebsocket()
|
||||
}
|
||||
|
||||
remote := co.Remote()
|
||||
if cr, yes := p.f.isWrappedByCrypt(); yes {
|
||||
@@ -142,64 +246,8 @@ func (p *plexConnector) isPlaying(co *Object) bool {
|
||||
}
|
||||
|
||||
isPlaying := false
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("%s/status/sessions", p.url.String()), nil)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
p.fillDefaultHeaders(req)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
var data map[string]interface{}
|
||||
err = json.NewDecoder(resp.Body).Decode(&data)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
sizeGen, ok := get(data, "MediaContainer", "size")
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
size, ok := sizeGen.(float64)
|
||||
if !ok || size < float64(1) {
|
||||
return false
|
||||
}
|
||||
videosGen, ok := get(data, "MediaContainer", "Video")
|
||||
if !ok {
|
||||
fs.Debugf("plex", "empty videos: %v", data)
|
||||
return false
|
||||
}
|
||||
videos, ok := videosGen.([]interface{})
|
||||
if !ok || len(videos) < 1 {
|
||||
fs.Debugf("plex", "empty videos: %v", data)
|
||||
return false
|
||||
}
|
||||
for _, v := range videos {
|
||||
keyGen, ok := get(v, "key")
|
||||
if !ok {
|
||||
fs.Debugf("plex", "failed to find: key")
|
||||
continue
|
||||
}
|
||||
key, ok := keyGen.(string)
|
||||
if !ok {
|
||||
fs.Debugf("plex", "failed to understand: key")
|
||||
continue
|
||||
}
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("%s%s", p.url.String(), key), nil)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
p.fillDefaultHeaders(req)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
var data []byte
|
||||
data, err = ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if bytes.Contains(data, []byte(remote)) {
|
||||
for _, v := range p.stateCache.Items() {
|
||||
if bytes.Contains(v.Object.([]byte), []byte(remote)) {
|
||||
isPlaying = true
|
||||
break
|
||||
}
|
||||
@@ -208,12 +256,6 @@ func (p *plexConnector) isPlaying(co *Object) bool {
|
||||
return isPlaying
|
||||
}
|
||||
|
||||
func (p *plexConnector) isPlayingAsync(co *Object, response chan bool) {
|
||||
time.Sleep(time.Second) // FIXME random guess here
|
||||
res := p.isPlaying(co)
|
||||
response <- res
|
||||
}
|
||||
|
||||
// adapted from: https://stackoverflow.com/a/28878037 (credit)
|
||||
func get(m interface{}, path ...interface{}) (interface{}, bool) {
|
||||
for _, p := range path {
|
||||
|
||||
2
backend/cache/storage_memory.go
vendored
2
backend/cache/storage_memory.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,go1.7
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
71
backend/cache/storage_persistent.go
vendored
71
backend/cache/storage_persistent.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,go1.7
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
@@ -34,7 +34,8 @@ const (
|
||||
|
||||
// Features flags for this storage type
|
||||
type Features struct {
|
||||
PurgeDb bool // purge the db before starting
|
||||
PurgeDb bool // purge the db before starting
|
||||
DbWaitTime time.Duration // time to wait for DB to be available
|
||||
}
|
||||
|
||||
var boltMap = make(map[string]*Persistent)
|
||||
@@ -122,7 +123,7 @@ func (b *Persistent) connect() error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath)
|
||||
}
|
||||
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: *cacheDbWaitTime})
|
||||
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath)
|
||||
}
|
||||
@@ -192,19 +193,46 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
|
||||
|
||||
// AddDir will update a CachedDirectory metadata and all its entries
|
||||
func (b *Persistent) AddDir(cachedDir *Directory) error {
|
||||
return b.AddBatchDir([]*Directory{cachedDir})
|
||||
}
|
||||
|
||||
// AddBatchDir will update a list of CachedDirectory metadata and all their entries
|
||||
func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
|
||||
if len(cachedDirs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := b.getBucket(cachedDir.abs(), true, tx)
|
||||
var bucket *bolt.Bucket
|
||||
if cachedDirs[0].Dir == "" {
|
||||
bucket = tx.Bucket([]byte(RootBucket))
|
||||
} else {
|
||||
bucket = b.getBucket(cachedDirs[0].Dir, true, tx)
|
||||
}
|
||||
if bucket == nil {
|
||||
return errors.Errorf("couldn't open bucket (%v)", cachedDir)
|
||||
return errors.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
|
||||
}
|
||||
|
||||
encoded, err := json.Marshal(cachedDir)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
|
||||
}
|
||||
err = bucket.Put([]byte("."), encoded)
|
||||
if err != nil {
|
||||
return err
|
||||
for _, cachedDir := range cachedDirs {
|
||||
var b *bolt.Bucket
|
||||
var err error
|
||||
if cachedDir.Name == "" {
|
||||
b = bucket
|
||||
} else {
|
||||
b, err = bucket.CreateBucketIfNotExists([]byte(cachedDir.Name))
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
encoded, err := json.Marshal(cachedDir)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
|
||||
}
|
||||
err = b.Put([]byte("."), encoded)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@@ -315,7 +343,7 @@ func (b *Persistent) RemoveDir(fp string) error {
|
||||
// ExpireDir will flush a CachedDirectory and all its objects from the objects
|
||||
// chunks will remain as they are
|
||||
func (b *Persistent) ExpireDir(cd *Directory) error {
|
||||
t := time.Now().Add(cd.CacheFs.fileAge * -1)
|
||||
t := time.Now().Add(time.Duration(-cd.CacheFs.opt.InfoAge))
|
||||
cd.CacheTs = &t
|
||||
|
||||
// expire all parents
|
||||
@@ -400,6 +428,16 @@ func (b *Persistent) RemoveObject(fp string) error {
|
||||
})
|
||||
}
|
||||
|
||||
// ExpireObject will flush an Object and all its data if desired
|
||||
func (b *Persistent) ExpireObject(co *Object, withData bool) error {
|
||||
co.CacheTs = time.Now().Add(time.Duration(-co.CacheFs.opt.InfoAge))
|
||||
err := b.AddObject(co)
|
||||
if withData {
|
||||
_ = os.RemoveAll(path.Join(b.dataPath, co.abs()))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// HasEntry confirms the existence of a single entry (dir or object)
|
||||
func (b *Persistent) HasEntry(remote string) bool {
|
||||
dir, name := path.Split(remote)
|
||||
@@ -1060,10 +1098,3 @@ func itob(v int64) []byte {
|
||||
func btoi(d []byte) int64 {
|
||||
return int64(binary.BigEndian.Uint64(d))
|
||||
}
|
||||
|
||||
// cloneBytes returns a copy of a given slice.
|
||||
func cloneBytes(v []byte) []byte {
|
||||
var clone = make([]byte, len(v))
|
||||
copy(clone, v)
|
||||
return clone
|
||||
}
|
||||
|
||||
@@ -781,7 +781,7 @@ func (c *cipher) newDecrypterSeek(open OpenRangeSeek, offset, limit int64) (fh *
|
||||
}
|
||||
fh.open = open // will be called by fh.RangeSeek
|
||||
if doRangeSeek {
|
||||
_, err = fh.RangeSeek(offset, 0, limit)
|
||||
_, err = fh.RangeSeek(offset, io.SeekStart, limit)
|
||||
if err != nil {
|
||||
_ = fh.Close()
|
||||
return nil, err
|
||||
@@ -908,7 +908,7 @@ func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, er
|
||||
if fh.open == nil {
|
||||
return 0, fh.finish(errors.New("can't seek - not initialised with newDecrypterSeek"))
|
||||
}
|
||||
if whence != 0 {
|
||||
if whence != io.SeekStart {
|
||||
return 0, fh.finish(errors.New("can only seek from the start"))
|
||||
}
|
||||
|
||||
|
||||
@@ -1016,7 +1016,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
if offset+limit > len(plaintext) {
|
||||
continue
|
||||
}
|
||||
_, err := fh.RangeSeek(int64(offset), 0, int64(limit))
|
||||
_, err := fh.RangeSeek(int64(offset), io.SeekStart, int64(limit))
|
||||
assert.NoError(t, err)
|
||||
|
||||
check(fh, offset, limit)
|
||||
@@ -1083,7 +1083,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
}
|
||||
fh, err := c.DecryptDataSeek(testOpen, 0, -1)
|
||||
assert.NoError(t, err)
|
||||
gotOffset, err := fh.RangeSeek(test.offset, 0, test.limit)
|
||||
gotOffset, err := fh.RangeSeek(test.offset, io.SeekStart, test.limit)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, gotOffset, test.offset)
|
||||
}
|
||||
|
||||
@@ -5,24 +5,18 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Flags
|
||||
cryptShowMapping = flags.BoolP("crypt-show-mapping", "", false, "For all files listed show how the names encrypt.")
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -30,11 +24,13 @@ func init() {
|
||||
Description: "Encrypt/Decrypt a remote",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Name: "remote",
|
||||
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "filename_encryption",
|
||||
Help: "How to encrypt the filenames.",
|
||||
Name: "filename_encryption",
|
||||
Help: "How to encrypt the filenames.",
|
||||
Default: "standard",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "off",
|
||||
@@ -48,8 +44,9 @@ func init() {
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "directory_name_encryption",
|
||||
Help: "Option to either encrypt directory names or leave them intact.",
|
||||
Name: "directory_name_encryption",
|
||||
Help: "Option to either encrypt directory names or leave them intact.",
|
||||
Default: true,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "true",
|
||||
@@ -68,50 +65,67 @@ func init() {
|
||||
Name: "password2",
|
||||
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
|
||||
IsPassword: true,
|
||||
Optional: true,
|
||||
}, {
|
||||
Name: "show_mapping",
|
||||
Help: "For all files listed show how the names encrypt.",
|
||||
Default: false,
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// NewCipher constructs a Cipher for the given config name
|
||||
func NewCipher(name string) (Cipher, error) {
|
||||
mode, err := NewNameEncryptionMode(config.FileGet(name, "filename_encryption", "standard"))
|
||||
// newCipherForConfig constructs a Cipher for the given config name
|
||||
func newCipherForConfig(opt *Options) (Cipher, error) {
|
||||
mode, err := NewNameEncryptionMode(opt.FilenameEncryption)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dirNameEncrypt, err := strconv.ParseBool(config.FileGet(name, "directory_name_encryption", "true"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
password := config.FileGet(name, "password", "")
|
||||
if password == "" {
|
||||
if opt.Password == "" {
|
||||
return nil, errors.New("password not set in config file")
|
||||
}
|
||||
password, err = obscure.Reveal(password)
|
||||
password, err := obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decrypt password")
|
||||
}
|
||||
salt := config.FileGet(name, "password2", "")
|
||||
if salt != "" {
|
||||
salt, err = obscure.Reveal(salt)
|
||||
var salt string
|
||||
if opt.Password2 != "" {
|
||||
salt, err = obscure.Reveal(opt.Password2)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decrypt password2")
|
||||
}
|
||||
}
|
||||
cipher, err := newCipher(mode, password, salt, dirNameEncrypt)
|
||||
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make cipher")
|
||||
}
|
||||
return cipher, nil
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, rpath string) (fs.Fs, error) {
|
||||
cipher, err := NewCipher(name)
|
||||
// NewCipher constructs a Cipher for the given config
|
||||
func NewCipher(m configmap.Mapper) (Cipher, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remote := config.FileGet(name, "remote")
|
||||
return newCipherForConfig(opt)
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cipher, err := newCipherForConfig(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remote := opt.Remote
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
||||
}
|
||||
@@ -130,6 +144,7 @@ func NewFs(name, rpath string) (fs.Fs, error) {
|
||||
Fs: wrappedFs,
|
||||
name: name,
|
||||
root: rpath,
|
||||
opt: *opt,
|
||||
cipher: cipher,
|
||||
}
|
||||
// the features here are ones we could support, and they are
|
||||
@@ -161,14 +176,24 @@ func NewFs(name, rpath string) (fs.Fs, error) {
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
FilenameEncryption string `config:"filename_encryption"`
|
||||
DirectoryNameEncryption bool `config:"directory_name_encryption"`
|
||||
Password string `config:"password"`
|
||||
Password2 string `config:"password2"`
|
||||
ShowMapping bool `config:"show_mapping"`
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
type Fs struct {
|
||||
fs.Fs
|
||||
name string
|
||||
root string
|
||||
opt Options
|
||||
features *fs.Features // optional features
|
||||
cipher Cipher
|
||||
mode NameEncryptionMode
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
@@ -199,7 +224,7 @@ func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
|
||||
fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
|
||||
return
|
||||
}
|
||||
if *cryptShowMapping {
|
||||
if f.opt.ShowMapping {
|
||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||
}
|
||||
*entries = append(*entries, f.newObject(obj))
|
||||
@@ -213,7 +238,7 @@ func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) {
|
||||
fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
|
||||
return
|
||||
}
|
||||
if *cryptShowMapping {
|
||||
if f.opt.ShowMapping {
|
||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||
}
|
||||
*entries = append(*entries, f.newDir(dir))
|
||||
@@ -291,14 +316,48 @@ type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.O
|
||||
|
||||
// put implements Put or PutStream
|
||||
func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
// Encrypt the data into wrappedIn
|
||||
wrappedIn, err := f.cipher.EncryptData(in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Find a hash the destination supports to compute a hash of
|
||||
// the encrypted data
|
||||
ht := f.Fs.Hashes().GetOne()
|
||||
var hasher *hash.MultiHasher
|
||||
if ht != hash.None {
|
||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wrappedIn = io.TeeReader(wrappedIn, hasher)
|
||||
}
|
||||
|
||||
// Transfer the data
|
||||
o, err := put(wrappedIn, f.newObjectInfo(src), options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check the hashes of the encrypted data if we were comparing them
|
||||
if ht != hash.None && hasher != nil {
|
||||
srcHash := hasher.Sums()[ht]
|
||||
var dstHash string
|
||||
dstHash, err = o.Hash(ht)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read destination hash")
|
||||
}
|
||||
if srcHash != "" && dstHash != "" && srcHash != dstHash {
|
||||
// remove object
|
||||
err = o.Remove()
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||
}
|
||||
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
|
||||
}
|
||||
}
|
||||
|
||||
return f.newObject(o), nil
|
||||
}
|
||||
|
||||
@@ -452,6 +511,15 @@ func (f *Fs) CleanUp() error {
|
||||
return do()
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
do := f.Fs.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("About not supported")
|
||||
}
|
||||
return do()
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
func (f *Fs) UnWrap() fs.Fs {
|
||||
return f.Fs
|
||||
@@ -627,11 +695,11 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
wrappedIn, err := o.f.cipher.EncryptData(in)
|
||||
if err != nil {
|
||||
return err
|
||||
update := func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return o.Object, o.Object.Update(in, src, options...)
|
||||
}
|
||||
return o.Object.Update(wrappedIn, o.f.newObjectInfo(src))
|
||||
_, err := o.f.put(in, src, options, update)
|
||||
return err
|
||||
}
|
||||
|
||||
// newDir returns a dir with the Name decrypted
|
||||
@@ -699,6 +767,7 @@ var (
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
// Test Crypt filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package crypt_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup2(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*crypt.Object)(nil))
|
||||
fstests.RemoteName = "TestCrypt2:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit2(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString2(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName2(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot2(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty2(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound2(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir2(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir2(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty2(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty2(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty2(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound2(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile12(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError2(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile22(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile12(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile22(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile22(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot2(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot2(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir2(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir2(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel22(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel22(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile12(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject2(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and22(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir2(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy2(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove2(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove2(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull2(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision2(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify2(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString2(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs2(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote2(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes2(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime2(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType2(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime2(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize2(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen2(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek2(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange2(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead2(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate2(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable2(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile2(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound2(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove2(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream2(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge2(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal2(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise2(t *testing.T) { fstests.TestFinalise(t) }
|
||||
@@ -1,76 +0,0 @@
|
||||
// Test Crypt filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package crypt_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup3(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*crypt.Object)(nil))
|
||||
fstests.RemoteName = "TestCrypt3:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit3(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString3(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName3(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot3(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty3(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound3(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir3(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir3(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty3(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty3(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty3(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound3(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile13(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError3(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile23(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile13(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile23(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile23(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot3(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot3(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir3(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir3(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel23(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel23(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile13(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject3(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and23(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir3(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy3(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove3(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove3(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull3(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision3(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify3(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString3(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs3(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote3(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes3(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime3(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType3(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime3(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize3(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen3(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek3(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange3(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead3(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate3(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable3(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile3(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound3(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove3(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream3(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge3(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal3(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise3(t *testing.T) { fstests.TestFinalise(t) }
|
||||
@@ -1,34 +0,0 @@
|
||||
package crypt_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// Create the TestCrypt: remote
|
||||
func init() {
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
||||
name := "TestCrypt"
|
||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-crypt-test-off")
|
||||
name2 := name + "2"
|
||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||
name3 := name + "3"
|
||||
fstests.ExtraConfig = []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
{Name: name2, Key: "type", Value: "crypt"},
|
||||
{Name: name2, Key: "remote", Value: tempdir2},
|
||||
{Name: name2, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||
{Name: name2, Key: "filename_encryption", Value: "off"},
|
||||
{Name: name3, Key: "type", Value: "crypt"},
|
||||
{Name: name3, Key: "remote", Value: tempdir3},
|
||||
{Name: name3, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||
{Name: name3, Key: "filename_encryption", Value: "obfuscate"},
|
||||
}
|
||||
fstests.SkipBadWindowsCharacters[name3+":"] = true
|
||||
}
|
||||
@@ -1,76 +1,62 @@
|
||||
// Test Crypt filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package crypt_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*crypt.Object)(nil))
|
||||
fstests.RemoteName = "TestCrypt:"
|
||||
// TestStandard runs integration tests against the remote
|
||||
func TestStandard(t *testing.T) {
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
||||
name := "TestCrypt"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
// TestOff runs integration tests against the remote
|
||||
func TestOff(t *testing.T) {
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-off")
|
||||
name := "TestCrypt2"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||
{Name: name, Key: "filename_encryption", Value: "off"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// TestObfuscate runs integration tests against the remote
|
||||
func TestObfuscate(t *testing.T) {
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||
name := "TestCrypt3"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
||||
},
|
||||
SkipBadWindowsCharacters: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -23,7 +23,8 @@ import (
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
@@ -34,7 +35,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/drive/v3"
|
||||
drive "google.golang.org/api/drive/v3"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
@@ -49,24 +50,13 @@ const (
|
||||
defaultExtensions = "docx,xlsx,pptx,svg"
|
||||
scopePrefix = "https://www.googleapis.com/auth/"
|
||||
defaultScope = "drive"
|
||||
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
|
||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||
defaultChunkSize = fs.SizeSuffix(8 * 1024 * 1024)
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Flags
|
||||
driveAuthOwnerOnly = flags.BoolP("drive-auth-owner-only", "", false, "Only consider files owned by the authenticated user.")
|
||||
driveUseTrash = flags.BoolP("drive-use-trash", "", true, "Send files to the trash instead of deleting permanently.")
|
||||
driveSkipGdocs = flags.BoolP("drive-skip-gdocs", "", false, "Skip google documents in all listings.")
|
||||
driveSharedWithMe = flags.BoolP("drive-shared-with-me", "", false, "Only show files that are shared with me")
|
||||
driveTrashedOnly = flags.BoolP("drive-trashed-only", "", false, "Only show files that are in the trash")
|
||||
driveExtensions = flags.StringP("drive-formats", "", defaultExtensions, "Comma separated list of preferred formats for downloading Google docs.")
|
||||
driveUseCreatedDate = flags.BoolP("drive-use-created-date", "", false, "Use created date instead of modified date.")
|
||||
driveListChunk = flags.Int64P("drive-list-chunk", "", 1000, "Size of listing chunk 100-1000. 0 to disable.")
|
||||
driveImpersonate = flags.StringP("drive-impersonate", "", "", "Impersonate this user when using a service account.")
|
||||
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
|
||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||
chunkSize = fs.SizeSuffix(8 * 1024 * 1024)
|
||||
driveUploadCutoff = chunkSize
|
||||
// Description of how to auth for this app
|
||||
driveConfig = &oauth2.Config{
|
||||
Scopes: []string{scopePrefix + "drive"},
|
||||
@@ -109,38 +99,43 @@ func init() {
|
||||
Name: "drive",
|
||||
Description: "Google Drive",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
var err error
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
|
||||
return
|
||||
}
|
||||
// Fill in the scopes
|
||||
scope := config.FileGet(name, "scope")
|
||||
if scope == "" {
|
||||
scope = defaultScope
|
||||
if opt.Scope == "" {
|
||||
opt.Scope = defaultScope
|
||||
}
|
||||
driveConfig.Scopes = nil
|
||||
for _, scope := range strings.Split(scope, ",") {
|
||||
for _, scope := range strings.Split(opt.Scope, ",") {
|
||||
driveConfig.Scopes = append(driveConfig.Scopes, scopePrefix+strings.TrimSpace(scope))
|
||||
// Set the root_folder_id if using drive.appfolder
|
||||
if scope == "drive.appfolder" {
|
||||
config.FileSet(name, "root_folder_id", "appDataFolder")
|
||||
m.Set("root_folder_id", "appDataFolder")
|
||||
}
|
||||
}
|
||||
if config.FileGet(name, "service_account_file") == "" {
|
||||
err = oauthutil.Config("drive", name, driveConfig)
|
||||
if opt.ServiceAccountFile == "" {
|
||||
err = oauthutil.Config("drive", name, m, driveConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
}
|
||||
err = configTeamDrive(name)
|
||||
err = configTeamDrive(opt, m, name)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure team drive: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Google Application Client Id - leave blank normally.",
|
||||
Help: "Google Application Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Google Application Client Secret - leave blank normally.",
|
||||
Help: "Google Application Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "scope",
|
||||
Help: "Scope that rclone should use when requesting access from drive.",
|
||||
@@ -162,14 +157,97 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: "ID of the root folder - leave blank normally. Fill in to access \"Computers\" folders. (see docs).",
|
||||
Help: "ID of the root folder\nLeave blank normally.\nFill in to access \"Computers\" folders. (see docs).",
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path - leave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Help: "Service Account Credentials JSON file path \nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
}, {
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "team_drive",
|
||||
Help: "ID of the Team Drive",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "auth_owner_only",
|
||||
Default: false,
|
||||
Help: "Only consider files owned by the authenticated user.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_trash",
|
||||
Default: true,
|
||||
Help: "Send files to the trash instead of deleting permanently.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_gdocs",
|
||||
Default: false,
|
||||
Help: "Skip google documents in all listings.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "shared_with_me",
|
||||
Default: false,
|
||||
Help: "Only show files that are shared with me",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "trashed_only",
|
||||
Default: false,
|
||||
Help: "Only show files that are in the trash",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "formats",
|
||||
Default: defaultExtensions,
|
||||
Help: "Comma separated list of preferred formats for downloading Google docs.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_created_date",
|
||||
Default: false,
|
||||
Help: "Use created date instead of modified date.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "list_chunk",
|
||||
Default: 1000,
|
||||
Help: "Size of listing chunk 100-1000. 0 to disable.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "impersonate",
|
||||
Default: "",
|
||||
Help: "Impersonate this user when using a service account.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "alternate_export",
|
||||
Default: false,
|
||||
Help: "Use alternate export URLs for google documents export.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Default: defaultChunkSize,
|
||||
Help: "Cutoff for switching to chunked upload",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Default: defaultChunkSize,
|
||||
Help: "Upload chunk size. Must a power of 2 >= 256k.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "acknowledge_abuse",
|
||||
Default: false,
|
||||
Help: "Set to allow files which return cannotDownloadAbusiveFile to be downloaded.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "keep_revision_forever",
|
||||
Default: false,
|
||||
Help: "Keep new head revision forever.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "untrash",
|
||||
Default: false,
|
||||
Help: "Untrash any trashed files - use with --drive-trashed-only.",
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
flags.VarP(&driveUploadCutoff, "drive-upload-cutoff", "", "Cutoff for switching to chunked upload")
|
||||
flags.VarP(&chunkSize, "drive-chunk-size", "", "Upload chunk size. Must a power of 2 >= 256k.")
|
||||
|
||||
// Invert mimeTypeToExtension
|
||||
extensionToMimeType = make(map[string]string, len(mimeTypeToExtension))
|
||||
@@ -178,10 +256,35 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Scope string `config:"scope"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
ServiceAccountFile string `config:"service_account_file"`
|
||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||
TeamDriveID string `config:"team_drive"`
|
||||
AuthOwnerOnly bool `config:"auth_owner_only"`
|
||||
UseTrash bool `config:"use_trash"`
|
||||
SkipGdocs bool `config:"skip_gdocs"`
|
||||
SharedWithMe bool `config:"shared_with_me"`
|
||||
TrashedOnly bool `config:"trashed_only"`
|
||||
Extensions string `config:"formats"`
|
||||
UseCreatedDate bool `config:"use_created_date"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
AlternateExport bool `config:"alternate_export"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
AcknowledgeAbuse bool `config:"acknowledge_abuse"`
|
||||
KeepRevisionForever bool `config:"keep_revision_forever"`
|
||||
Untrash bool `config:"untrash"`
|
||||
}
|
||||
|
||||
// Fs represents a remote drive server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *drive.Service // the connection to the drive server
|
||||
client *http.Client // authorized client
|
||||
@@ -189,7 +292,6 @@ type Fs struct {
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
extensions []string // preferred extensions to download docs
|
||||
teamDriveID string // team drive ID, may be ""
|
||||
isTeamDrive bool // true if this is a team drive
|
||||
}
|
||||
|
||||
@@ -271,8 +373,8 @@ type listFn func(*drive.File) bool
|
||||
func (f *Fs) list(dirID string, title string, directoriesOnly bool, filesOnly bool, includeAll bool, fn listFn) (found bool, err error) {
|
||||
var query []string
|
||||
if !includeAll {
|
||||
q := "trashed=" + strconv.FormatBool(*driveTrashedOnly)
|
||||
if *driveTrashedOnly {
|
||||
q := "trashed=" + strconv.FormatBool(f.opt.TrashedOnly)
|
||||
if f.opt.TrashedOnly {
|
||||
q = fmt.Sprintf("(mimeType='%s' or %s)", driveFolderType, q)
|
||||
}
|
||||
query = append(query, q)
|
||||
@@ -280,10 +382,10 @@ func (f *Fs) list(dirID string, title string, directoriesOnly bool, filesOnly bo
|
||||
// Search with sharedWithMe will always return things listed in "Shared With Me" (without any parents)
|
||||
// We must not filter with parent when we try list "ROOT" with drive-shared-with-me
|
||||
// If we need to list file inside those shared folders, we must search it without sharedWithMe
|
||||
if *driveSharedWithMe && dirID == f.rootFolderID {
|
||||
if f.opt.SharedWithMe && dirID == f.rootFolderID {
|
||||
query = append(query, "sharedWithMe=true")
|
||||
}
|
||||
if dirID != "" && !(*driveSharedWithMe && dirID == f.rootFolderID) {
|
||||
if dirID != "" && !(f.opt.SharedWithMe && dirID == f.rootFolderID) {
|
||||
query = append(query, fmt.Sprintf("'%s' in parents", dirID))
|
||||
}
|
||||
if title != "" {
|
||||
@@ -292,8 +394,7 @@ func (f *Fs) list(dirID string, title string, directoriesOnly bool, filesOnly bo
|
||||
searchTitle = strings.Replace(searchTitle, `'`, `\'`, -1)
|
||||
// Convert / to / for search
|
||||
searchTitle = strings.Replace(searchTitle, "/", "/", -1)
|
||||
// use contains to work around #1675
|
||||
query = append(query, fmt.Sprintf("name contains '%s'", searchTitle))
|
||||
query = append(query, fmt.Sprintf("name='%s'", searchTitle))
|
||||
}
|
||||
if directoriesOnly {
|
||||
query = append(query, fmt.Sprintf("mimeType='%s'", driveFolderType))
|
||||
@@ -306,11 +407,11 @@ func (f *Fs) list(dirID string, title string, directoriesOnly bool, filesOnly bo
|
||||
list.Q(strings.Join(query, " and "))
|
||||
// fmt.Printf("list Query = %q\n", query)
|
||||
}
|
||||
if *driveListChunk > 0 {
|
||||
list.PageSize(*driveListChunk)
|
||||
if f.opt.ListChunk > 0 {
|
||||
list.PageSize(f.opt.ListChunk)
|
||||
}
|
||||
if f.isTeamDrive {
|
||||
list.TeamDriveId(f.teamDriveID)
|
||||
list.TeamDriveId(f.opt.TeamDriveID)
|
||||
list.SupportsTeamDrives(true)
|
||||
list.IncludeTeamDriveItems(true)
|
||||
list.Corpora("teamDrive")
|
||||
@@ -322,7 +423,7 @@ func (f *Fs) list(dirID string, title string, directoriesOnly bool, filesOnly bo
|
||||
|
||||
var fields = partialFields
|
||||
|
||||
if *driveAuthOwnerOnly {
|
||||
if f.opt.AuthOwnerOnly {
|
||||
fields += ",owners"
|
||||
}
|
||||
|
||||
@@ -341,7 +442,8 @@ OUTER:
|
||||
for _, item := range files.Files {
|
||||
// Convert / to / for listing purposes
|
||||
item.Name = strings.Replace(item.Name, "/", "/", -1)
|
||||
// skip items introduced by workaround (#1675)
|
||||
// Check the case of items is correct since
|
||||
// the `=` operator is case insensitive.
|
||||
if title != "" && title != item.Name {
|
||||
continue
|
||||
}
|
||||
@@ -392,17 +494,16 @@ func (f *Fs) parseExtensions(extensions string) error {
|
||||
}
|
||||
|
||||
// Figure out if the user wants to use a team drive
|
||||
func configTeamDrive(name string) error {
|
||||
teamDrive := config.FileGet(name, "team_drive")
|
||||
if teamDrive == "" {
|
||||
func configTeamDrive(opt *Options, m configmap.Mapper, name string) error {
|
||||
if opt.TeamDriveID == "" {
|
||||
fmt.Printf("Configure this as a team drive?\n")
|
||||
} else {
|
||||
fmt.Printf("Change current team drive ID %q?\n", teamDrive)
|
||||
fmt.Printf("Change current team drive ID %q?\n", opt.TeamDriveID)
|
||||
}
|
||||
if !config.Confirm() {
|
||||
if !config.ConfirmWithDefault(false) {
|
||||
return nil
|
||||
}
|
||||
client, err := createOAuthClient(name)
|
||||
client, err := createOAuthClient(opt, name, m)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "config team drive failed to create oauth client")
|
||||
}
|
||||
@@ -437,7 +538,8 @@ func configTeamDrive(name string) error {
|
||||
} else {
|
||||
driveID = config.Choose("Enter a Team Drive ID", driveIDs, driveNames, true)
|
||||
}
|
||||
config.FileSet(name, "team_drive", driveID)
|
||||
m.Set("team_drive", driveID)
|
||||
opt.TeamDriveID = driveID
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -446,34 +548,37 @@ func newPacer() *pacer.Pacer {
|
||||
return pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer)
|
||||
}
|
||||
|
||||
func getServiceAccountClient(keyJsonfilePath string) (*http.Client, error) {
|
||||
data, err := ioutil.ReadFile(os.ExpandEnv(keyJsonfilePath))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error opening credentials file")
|
||||
}
|
||||
conf, err := google.JWTConfigFromJSON(data, driveConfig.Scopes...)
|
||||
func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, driveConfig.Scopes...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error processing credentials")
|
||||
}
|
||||
if *driveImpersonate != "" {
|
||||
conf.Subject = *driveImpersonate
|
||||
if opt.Impersonate != "" {
|
||||
conf.Subject = opt.Impersonate
|
||||
}
|
||||
ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
|
||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||
}
|
||||
|
||||
func createOAuthClient(name string) (*http.Client, error) {
|
||||
func createOAuthClient(opt *Options, name string, m configmap.Mapper) (*http.Client, error) {
|
||||
var oAuthClient *http.Client
|
||||
var err error
|
||||
|
||||
serviceAccountPath := config.FileGet(name, "service_account_file")
|
||||
if serviceAccountPath != "" {
|
||||
oAuthClient, err = getServiceAccountClient(serviceAccountPath)
|
||||
// try loading service account credentials from env variable, then from a file
|
||||
if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error opening service account credentials file")
|
||||
}
|
||||
opt.ServiceAccountCredentials = string(loadedCreds)
|
||||
}
|
||||
if opt.ServiceAccountCredentials != "" {
|
||||
oAuthClient, err = getServiceAccountClient(opt, []byte(opt.ServiceAccountCredentials))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create oauth client from service account")
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(name, driveConfig)
|
||||
oAuthClient, _, err = oauthutil.NewClient(name, m, driveConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create oauth client")
|
||||
}
|
||||
@@ -483,15 +588,21 @@ func createOAuthClient(name string) (*http.Client, error) {
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, path string) (fs.Fs, error) {
|
||||
if !isPowerOfTwo(int64(chunkSize)) {
|
||||
return nil, errors.Errorf("drive: chunk size %v isn't a power of two", chunkSize)
|
||||
func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if chunkSize < 256*1024 {
|
||||
return nil, errors.Errorf("drive: chunk size can't be less than 256k - was %v", chunkSize)
|
||||
if !isPowerOfTwo(int64(opt.ChunkSize)) {
|
||||
return nil, errors.Errorf("drive: chunk size %v isn't a power of two", opt.ChunkSize)
|
||||
}
|
||||
if opt.ChunkSize < 256*1024 {
|
||||
return nil, errors.Errorf("drive: chunk size can't be less than 256k - was %v", opt.ChunkSize)
|
||||
}
|
||||
|
||||
oAuthClient, err := createOAuthClient(name)
|
||||
oAuthClient, err := createOAuthClient(opt, name, m)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "drive: failed when making oauth client")
|
||||
}
|
||||
@@ -504,10 +615,10 @@ func NewFs(name, path string) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: newPacer(),
|
||||
}
|
||||
f.teamDriveID = config.FileGet(name, "team_drive")
|
||||
f.isTeamDrive = f.teamDriveID != ""
|
||||
f.isTeamDrive = opt.TeamDriveID != ""
|
||||
f.features = (&fs.Features{
|
||||
DuplicateFiles: true,
|
||||
ReadMimeType: true,
|
||||
@@ -524,20 +635,20 @@ func NewFs(name, path string) (fs.Fs, error) {
|
||||
|
||||
// set root folder for a team drive or query the user root folder
|
||||
if f.isTeamDrive {
|
||||
f.rootFolderID = f.teamDriveID
|
||||
f.rootFolderID = f.opt.TeamDriveID
|
||||
} else {
|
||||
f.rootFolderID = "root"
|
||||
}
|
||||
|
||||
// override root folder if set in the config
|
||||
if rootID := config.FileGet(name, "root_folder_id"); rootID != "" {
|
||||
f.rootFolderID = rootID
|
||||
if opt.RootFolderID != "" {
|
||||
f.rootFolderID = opt.RootFolderID
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(root, f.rootFolderID, f)
|
||||
|
||||
// Parse extensions
|
||||
err = f.parseExtensions(*driveExtensions)
|
||||
err = f.parseExtensions(opt.Extensions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -551,24 +662,28 @@ func NewFs(name, path string) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
newF := *f
|
||||
newF.dirCache = dircache.New(newRoot, f.rootFolderID, &newF)
|
||||
newF.root = newRoot
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, f.rootFolderID, &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = newF.dirCache.FindRoot(false)
|
||||
err = tempF.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
entries, err := newF.List("")
|
||||
entries, err := tempF.List("")
|
||||
if err != nil {
|
||||
// unable to list folder so return old f
|
||||
return f, nil
|
||||
}
|
||||
for _, e := range entries {
|
||||
if _, isObject := e.(fs.Object); isObject && e.Remote() == remote {
|
||||
// return an error with an fs which points to the parent
|
||||
return &newF, fs.ErrorIsFile
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/ncw/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
}
|
||||
// File doesn't exist so return old f
|
||||
@@ -710,6 +825,23 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
var iErr error
|
||||
_, err = f.list(directoryID, "", false, false, false, func(item *drive.File) bool {
|
||||
remote := path.Join(dir, item.Name)
|
||||
|
||||
// Untrash all trashed files if required
|
||||
if f.opt.Untrash && item.Trashed {
|
||||
fs.Infof(remote, "Untrashing")
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info := drive.File{
|
||||
Trashed: false,
|
||||
ForceSendFields: []string{"Trashed"},
|
||||
}
|
||||
_, err = f.svc.Files.Update(item.Id, &info).Fields("").SupportsTeamDrives(f.isTeamDrive).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(remote, "Untrashing failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case item.MimeType == driveFolderType:
|
||||
// cache the directory ID for later lookups
|
||||
@@ -717,7 +849,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
when, _ := time.Parse(timeFormatIn, item.ModifiedTime)
|
||||
d := fs.NewDir(remote, when).SetID(item.Id)
|
||||
entries = append(entries, d)
|
||||
case *driveAuthOwnerOnly && !isAuthOwned(item):
|
||||
case f.opt.AuthOwnerOnly && !isAuthOwned(item):
|
||||
// ignore object
|
||||
case item.Md5Checksum != "" || item.Size > 0:
|
||||
// If item has MD5 sum or a length it is a file stored on drive
|
||||
@@ -727,7 +859,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
return true
|
||||
}
|
||||
entries = append(entries, o)
|
||||
case *driveSkipGdocs:
|
||||
case f.opt.SkipGdocs:
|
||||
fs.Debugf(remote, "Skipping google document type %q", item.MimeType)
|
||||
default:
|
||||
exportMimeTypes, isDocument := f.exportFormats()[item.MimeType]
|
||||
@@ -748,6 +880,18 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
}
|
||||
obj := o.(*Object)
|
||||
obj.url = fmt.Sprintf("%sfiles/%s/export?mimeType=%s", f.svc.BasePath, item.Id, url.QueryEscape(exportMimeType))
|
||||
if f.opt.AlternateExport {
|
||||
switch item.MimeType {
|
||||
case "application/vnd.google-apps.drawing":
|
||||
obj.url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", item.Id, extension)
|
||||
case "application/vnd.google-apps.document":
|
||||
obj.url = fmt.Sprintf("https://docs.google.com/document/d/%s/export?format=%s", item.Id, extension)
|
||||
case "application/vnd.google-apps.spreadsheet":
|
||||
obj.url = fmt.Sprintf("https://docs.google.com/spreadsheets/d/%s/export?format=%s", item.Id, extension)
|
||||
case "application/vnd.google-apps.presentation":
|
||||
obj.url = fmt.Sprintf("https://docs.google.com/presentation/d/%s/export/%s", item.Id, extension)
|
||||
}
|
||||
}
|
||||
obj.isDocument = true
|
||||
obj.mimeType = exportMimeType
|
||||
obj.bytes = -1
|
||||
@@ -830,11 +974,11 @@ func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOpt
|
||||
}
|
||||
|
||||
var info *drive.File
|
||||
if size == 0 || size < int64(driveUploadCutoff) {
|
||||
if size == 0 || size < int64(f.opt.UploadCutoff) {
|
||||
// Make the API request to upload metadata and file data.
|
||||
// Don't retry, return a retry error instead
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
info, err = f.svc.Files.Create(createInfo).Media(in, googleapi.ContentType("")).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).Do()
|
||||
info, err = f.svc.Files.Create(createInfo).Media(in, googleapi.ContentType("")).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).KeepRevisionForever(f.opt.KeepRevisionForever).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -881,9 +1025,9 @@ func (f *Fs) MergeDirs(dirs []fs.Directory) error {
|
||||
}
|
||||
}
|
||||
// rmdir (into trash) the now empty source directory
|
||||
fs.Infof(srcDir, "removing empty directory")
|
||||
err = f.rmdir(srcDir.ID(), true)
|
||||
if err != nil {
|
||||
fs.Infof(srcDir, "removing empty directory")
|
||||
return errors.Wrapf(err, "MergDirs move failed to rmdir %q", srcDir)
|
||||
}
|
||||
}
|
||||
@@ -948,7 +1092,7 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
// trash the directory if it had trashed files
|
||||
// in or the user wants to trash, otherwise
|
||||
// delete it.
|
||||
err = f.rmdir(directoryID, trashedFiles || *driveUseTrash)
|
||||
err = f.rmdir(directoryID, trashedFiles || f.opt.UseTrash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -991,7 +1135,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
|
||||
var info *drive.File
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, err = o.fs.svc.Files.Copy(srcObj.id, createInfo).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).Do()
|
||||
info, err = o.fs.svc.Files.Copy(srcObj.id, createInfo).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).KeepRevisionForever(f.opt.KeepRevisionForever).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1016,7 +1160,7 @@ func (f *Fs) Purge() error {
|
||||
return err
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
if *driveUseTrash {
|
||||
if f.opt.UseTrash {
|
||||
info := drive.File{
|
||||
Trashed: true,
|
||||
}
|
||||
@@ -1046,6 +1190,34 @@ func (f *Fs) CleanUp() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
if f.isTeamDrive {
|
||||
// Teamdrives don't appear to have a usage API so just return empty
|
||||
return &fs.Usage{}, nil
|
||||
}
|
||||
var about *drive.About
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
about, err = f.svc.About.Get().Fields("storageQuota").Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get Drive storageQuota")
|
||||
}
|
||||
q := about.StorageQuota
|
||||
usage := &fs.Usage{
|
||||
Used: fs.NewUsageValue(q.UsageInDrive), // bytes in use
|
||||
Trashed: fs.NewUsageValue(q.UsageInDriveTrash), // bytes in trash
|
||||
Other: fs.NewUsageValue(q.Usage - q.UsageInDrive), // other usage eg gmail in drive
|
||||
}
|
||||
if q.Limit > 0 {
|
||||
usage.Total = fs.NewUsageValue(q.Limit) // quota of bytes that can be used
|
||||
usage.Free = fs.NewUsageValue(q.Limit - q.Usage) // bytes which can be uploaded before reaching the quota
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
@@ -1091,6 +1263,41 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(remote string) (link string, err error) {
|
||||
id, err := f.dirCache.FindDir(remote, false)
|
||||
if err == nil {
|
||||
fs.Debugf(f, "attempting to share directory '%s'", remote)
|
||||
} else {
|
||||
fs.Debugf(f, "attempting to share single file '%s'", remote)
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if err = o.readMetaData(); err != nil {
|
||||
return
|
||||
}
|
||||
id = o.id
|
||||
}
|
||||
|
||||
permission := &drive.Permission{
|
||||
AllowFileDiscovery: false,
|
||||
Role: "reader",
|
||||
Type: "anyone",
|
||||
}
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// TODO: On TeamDrives this might fail if lacking permissions to change ACLs.
|
||||
// Need to either check `canShare` attribute on the object or see if a sufficient permission is already present.
|
||||
_, err = f.svc.Permissions.Create(id, permission).Fields(googleapi.Field("id")).SupportsTeamDrives(f.isTeamDrive).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprintf("https://drive.google.com/open?id=%s", id), nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
//
|
||||
@@ -1156,10 +1363,16 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
}
|
||||
|
||||
// Find ID of src parent
|
||||
_, srcDirectoryID, err := srcFs.dirCache.FindPath(srcRemote, false)
|
||||
var srcDirectoryID string
|
||||
if srcRemote == "" {
|
||||
srcDirectoryID, err = srcFs.dirCache.RootParentID()
|
||||
} else {
|
||||
_, srcDirectoryID, err = srcFs.dirCache.FindPath(srcRemote, false)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
||||
if err != nil {
|
||||
@@ -1223,10 +1436,15 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), pollInter
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
changesCall := f.svc.Changes.List(pageToken).Fields("nextPageToken,newStartPageToken,changes(fileId,file(name,parents,mimeType))")
|
||||
if *driveListChunk > 0 {
|
||||
changesCall = changesCall.PageSize(*driveListChunk)
|
||||
if f.opt.ListChunk > 0 {
|
||||
changesCall.PageSize(f.opt.ListChunk)
|
||||
}
|
||||
changeList, err = changesCall.SupportsTeamDrives(f.isTeamDrive).Do()
|
||||
if f.isTeamDrive {
|
||||
changesCall.TeamDriveId(f.opt.TeamDriveID)
|
||||
changesCall.SupportsTeamDrives(true)
|
||||
changesCall.IncludeTeamDriveItems(true)
|
||||
}
|
||||
changeList, err = changesCall.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1249,7 +1467,12 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), pollInter
|
||||
continue
|
||||
}
|
||||
|
||||
if change.File != nil && change.File.MimeType != driveFolderType {
|
||||
if change.File != nil {
|
||||
changeType := fs.EntryDirectory
|
||||
if change.File.MimeType != driveFolderType {
|
||||
changeType = fs.EntryObject
|
||||
}
|
||||
|
||||
// translate the parent dir of this object
|
||||
if len(change.File.Parents) > 0 {
|
||||
if path, ok := f.dirCache.GetInv(change.File.Parents[0]); ok {
|
||||
@@ -1260,10 +1483,10 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), pollInter
|
||||
path = change.File.Name
|
||||
}
|
||||
// this will now clear the actual file too
|
||||
pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
|
||||
pathsToClear = append(pathsToClear, entryType{path: path, entryType: changeType})
|
||||
}
|
||||
} else { // a true root object that is changed
|
||||
pathsToClear = append(pathsToClear, entryType{path: change.File.Name, entryType: fs.EntryObject})
|
||||
pathsToClear = append(pathsToClear, entryType{path: change.File.Name, entryType: changeType})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1341,7 +1564,7 @@ func (o *Object) setMetaData(info *drive.File) {
|
||||
o.url = fmt.Sprintf("%sfiles/%s?alt=media", o.fs.svc.BasePath, info.Id)
|
||||
o.md5sum = strings.ToLower(info.Md5Checksum)
|
||||
o.bytes = info.Size
|
||||
if *driveUseCreatedDate {
|
||||
if o.fs.opt.UseCreatedDate {
|
||||
o.modifiedDate = info.CreatedTime
|
||||
} else {
|
||||
o.modifiedDate = info.ModifiedTime
|
||||
@@ -1448,6 +1671,12 @@ func (o *Object) httpResponse(method string, options []fs.OpenOption) (req *http
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.client.Do(req)
|
||||
if err == nil {
|
||||
err = googleapi.CheckResponse(res)
|
||||
if err != nil {
|
||||
_ = res.Body.Close() // ignore error
|
||||
}
|
||||
}
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1491,16 +1720,39 @@ func (file *openFile) Close() (err error) {
|
||||
// Check it satisfies the interfaces
|
||||
var _ io.ReadCloser = &openFile{}
|
||||
|
||||
// Checks to see if err is a googleapi.Error with of type what
|
||||
func isGoogleError(err error, what string) bool {
|
||||
if gerr, ok := err.(*googleapi.Error); ok {
|
||||
for _, error := range gerr.Errors {
|
||||
if error.Reason == what {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
req, res, err := o.httpResponse("GET", options)
|
||||
_, res, err := o.httpResponse("GET", options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, isRanging := req.Header["Range"]
|
||||
if !(res.StatusCode == http.StatusOK || (isRanging && res.StatusCode == http.StatusPartialContent)) {
|
||||
_ = res.Body.Close() // ignore error
|
||||
return nil, errors.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
|
||||
if isGoogleError(err, "cannotDownloadAbusiveFile") {
|
||||
if o.fs.opt.AcknowledgeAbuse {
|
||||
// Retry acknowledging abuse
|
||||
if strings.ContainsRune(o.url, '?') {
|
||||
o.url += "&"
|
||||
} else {
|
||||
o.url += "?"
|
||||
}
|
||||
o.url += "acknowledgeAbuse=true"
|
||||
_, res, err = o.httpResponse("GET", options)
|
||||
} else {
|
||||
err = errors.Wrap(err, "Use the --drive-acknowledge-abuse flag to download this file")
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "open file failed")
|
||||
}
|
||||
}
|
||||
// If it is a document, update the size with what we are
|
||||
// reading as it can change from the HEAD in the listing to
|
||||
@@ -1531,10 +1783,10 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
// Make the API request to upload metadata and file data.
|
||||
var err error
|
||||
var info *drive.File
|
||||
if size == 0 || size < int64(driveUploadCutoff) {
|
||||
if size == 0 || size < int64(o.fs.opt.UploadCutoff) {
|
||||
// Don't retry, return a retry error instead
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
info, err = o.fs.svc.Files.Update(o.id, updateInfo).Media(in, googleapi.ContentType("")).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(o.fs.isTeamDrive).Do()
|
||||
info, err = o.fs.svc.Files.Update(o.id, updateInfo).Media(in, googleapi.ContentType("")).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(o.fs.isTeamDrive).KeepRevisionForever(o.fs.opt.KeepRevisionForever).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1558,7 +1810,7 @@ func (o *Object) Remove() error {
|
||||
}
|
||||
var err error
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
if *driveUseTrash {
|
||||
if o.fs.opt.UseTrash {
|
||||
info := drive.File{
|
||||
Trashed: true,
|
||||
}
|
||||
@@ -1581,6 +1833,11 @@ func (o *Object) MimeType() string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -1593,7 +1850,10 @@ var (
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.MergeDirser = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -1,75 +1,17 @@
|
||||
// Test Drive filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package drive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/drive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*drive.Object)(nil))
|
||||
fstests.RemoteName = "TestDrive:"
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDrive:",
|
||||
NilObject: (*drive.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
@@ -30,9 +30,6 @@ import (
|
||||
const (
|
||||
// statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete.
|
||||
statusResumeIncomplete = 308
|
||||
|
||||
// Number of times to try each chunk
|
||||
maxTries = 10
|
||||
)
|
||||
|
||||
// resumableUpload is used by the generated APIs to provide resumable uploads.
|
||||
@@ -61,6 +58,9 @@ func (f *Fs) Upload(in io.Reader, size int64, contentType string, fileID string,
|
||||
if f.isTeamDrive {
|
||||
params.Set("supportsTeamDrives", "true")
|
||||
}
|
||||
if f.opt.KeepRevisionForever {
|
||||
params.Set("keepRevisionForever", "true")
|
||||
}
|
||||
urls := "https://www.googleapis.com/upload/drive/v3/files"
|
||||
method := "POST"
|
||||
if fileID != "" {
|
||||
@@ -159,7 +159,7 @@ func (rx *resumableUpload) transferStatus() (start int64, err error) {
|
||||
|
||||
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
|
||||
func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
|
||||
_, _ = chunk.Seek(0, 0)
|
||||
_, _ = chunk.Seek(0, io.SeekStart)
|
||||
req := rx.makeRequest(start, chunk, chunkSize)
|
||||
res, err := rx.f.client.Do(req)
|
||||
if err != nil {
|
||||
@@ -192,16 +192,16 @@ func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunk
|
||||
}
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
// It retries each chunk maxTries times (with a pause of uploadPause between attempts).
|
||||
// It retries each chunk using the pacer and --low-level-retries
|
||||
func (rx *resumableUpload) Upload() (*drive.File, error) {
|
||||
start := int64(0)
|
||||
var StatusCode int
|
||||
var err error
|
||||
buf := make([]byte, int(chunkSize))
|
||||
buf := make([]byte, int(rx.f.opt.ChunkSize))
|
||||
for start < rx.ContentLength {
|
||||
reqSize := rx.ContentLength - start
|
||||
if reqSize >= int64(chunkSize) {
|
||||
reqSize = int64(chunkSize)
|
||||
if reqSize >= int64(rx.f.opt.ChunkSize) {
|
||||
reqSize = int64(rx.f.opt.ChunkSize)
|
||||
}
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
|
||||
|
||||
|
||||
@@ -1,7 +1,4 @@
|
||||
// Package dropbox provides an interface to Dropbox object storage
|
||||
|
||||
// +build go1.7
|
||||
|
||||
package dropbox
|
||||
|
||||
// FIXME dropbox for business would be quite easy to add
|
||||
@@ -25,7 +22,6 @@ of path_display and all will be well.
|
||||
*/
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
@@ -35,10 +31,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -56,24 +56,6 @@ const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
dropboxConfig = &oauth2.Config{
|
||||
Scopes: []string{},
|
||||
// Endpoint: oauth2.Endpoint{
|
||||
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
||||
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
|
||||
// },
|
||||
Endpoint: dropbox.OAuthEndpoint(""),
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
// A regexp matching path names for files Dropbox ignores
|
||||
// See https://www.dropbox.com/en/help/145 - Ignored files
|
||||
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
|
||||
// Upload chunk size - setting too small makes uploads slow.
|
||||
// Chunks are buffered into memory for retries.
|
||||
//
|
||||
@@ -97,8 +79,26 @@ var (
|
||||
// Choose 48MB which is 91% of Maximum speed. rclone by
|
||||
// default does 4 transfers so this should use 4*48MB = 192MB
|
||||
// by default.
|
||||
uploadChunkSize = fs.SizeSuffix(48 * 1024 * 1024)
|
||||
maxUploadChunkSize = fs.SizeSuffix(150 * 1024 * 1024)
|
||||
defaultChunkSize = 48 * 1024 * 1024
|
||||
maxChunkSize = 150 * 1024 * 1024
|
||||
)
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
dropboxConfig = &oauth2.Config{
|
||||
Scopes: []string{},
|
||||
// Endpoint: oauth2.Endpoint{
|
||||
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
||||
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
|
||||
// },
|
||||
Endpoint: dropbox.OAuthEndpoint(""),
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
// A regexp matching path names for files Dropbox ignores
|
||||
// See https://www.dropbox.com/en/help/145 - Ignored files
|
||||
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -107,32 +107,45 @@ func init() {
|
||||
Name: "dropbox",
|
||||
Description: "Dropbox",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
err := oauthutil.ConfigNoOffline("dropbox", name, dropboxConfig)
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.ConfigNoOffline("dropbox", name, m, dropboxConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Dropbox App Client Id - leave blank normally.",
|
||||
Help: "Dropbox App Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Dropbox App Client Secret - leave blank normally.",
|
||||
Help: "Dropbox App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: fmt.Sprintf("Upload chunk size. Max %v.", fs.SizeSuffix(maxChunkSize)),
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
flags.VarP(&uploadChunkSize, "dropbox-chunk-size", "", fmt.Sprintf("Upload chunk size. Max %v.", maxUploadChunkSize))
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
}
|
||||
|
||||
// Fs represents a remote dropbox server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
features *fs.Features // optional features
|
||||
srv files.Client // the connection to the dropbox server
|
||||
slashRoot string // root with "/" prefix, lowercase
|
||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv files.Client // the connection to the dropbox server
|
||||
sharing sharing.Client // as above, but for generating sharing links
|
||||
users users.Client // as above, but for accessing user information
|
||||
slashRoot string // root with "/" prefix, lowercase
|
||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
ns string // The namespace we are using or "" for none
|
||||
}
|
||||
|
||||
// Object describes a dropbox object
|
||||
@@ -183,15 +196,22 @@ func shouldRetry(err error) (bool, error) {
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
if uploadChunkSize > maxUploadChunkSize {
|
||||
return nil, errors.Errorf("chunk size too big, must be < %v", maxUploadChunkSize)
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ChunkSize > maxChunkSize {
|
||||
return nil, errors.Errorf("chunk size too big, must be < %v", maxChunkSize)
|
||||
}
|
||||
|
||||
// Convert the old token if it exists. The old token was just
|
||||
// just a string, the new one is a JSON blob
|
||||
oldToken := strings.TrimSpace(config.FileGet(name, config.ConfigToken))
|
||||
if oldToken != "" && oldToken[0] != '{' {
|
||||
oldToken, ok := m.Get(config.ConfigToken)
|
||||
oldToken = strings.TrimSpace(oldToken)
|
||||
if ok && oldToken != "" && oldToken[0] != '{' {
|
||||
fs.Infof(name, "Converting token to new format")
|
||||
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
||||
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
||||
@@ -200,22 +220,24 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
}
|
||||
}
|
||||
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, dropboxConfig)
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, dropboxConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure dropbox: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to configure dropbox")
|
||||
}
|
||||
|
||||
config := dropbox.Config{
|
||||
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
||||
Client: oAuthClient, // maybe???
|
||||
}
|
||||
srv := files.New(config)
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
srv: srv,
|
||||
opt: *opt,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
config := dropbox.Config{
|
||||
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
||||
Client: oAuthClient, // maybe???
|
||||
HeaderGenerator: f.headerGenerator,
|
||||
}
|
||||
f.srv = files.New(config)
|
||||
f.sharing = sharing.New(config)
|
||||
f.users = users.New(config)
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
ReadMimeType: true,
|
||||
@@ -223,6 +245,27 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
}).Fill(f)
|
||||
f.setRoot(root)
|
||||
|
||||
// If root starts with / then use the actual root
|
||||
if strings.HasPrefix(root, "/") {
|
||||
var acc *users.FullAccount
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
acc, err = f.users.GetCurrentAccount()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get current account failed")
|
||||
}
|
||||
switch x := acc.RootInfo.(type) {
|
||||
case *common.TeamRootInfo:
|
||||
f.ns = x.RootNamespaceId
|
||||
case *common.UserRootInfo:
|
||||
f.ns = x.RootNamespaceId
|
||||
default:
|
||||
return nil, errors.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo)
|
||||
}
|
||||
fs.Debugf(f, "Using root namespace %q", f.ns)
|
||||
}
|
||||
|
||||
// See if the root is actually an object
|
||||
_, err = f.getFileMetadata(f.slashRoot)
|
||||
if err == nil {
|
||||
@@ -237,14 +280,22 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// headerGenerator for dropbox sdk
|
||||
func (f *Fs) headerGenerator(hostType string, style string, namespace string, route string) map[string]string {
|
||||
if f.ns == "" {
|
||||
return map[string]string{}
|
||||
}
|
||||
return map[string]string{
|
||||
"Dropbox-API-Path-Root": `{".tag": "namespace_id", "namespace_id": "` + f.ns + `"}`,
|
||||
}
|
||||
}
|
||||
|
||||
// Sets root in f
|
||||
func (f *Fs) setRoot(root string) {
|
||||
f.root = strings.Trim(root, "/")
|
||||
lowerCaseRoot := strings.ToLower(f.root)
|
||||
|
||||
f.slashRoot = "/" + lowerCaseRoot
|
||||
f.slashRoot = "/" + f.root
|
||||
f.slashRootSlash = f.slashRoot
|
||||
if lowerCaseRoot != "" {
|
||||
if f.root != "" {
|
||||
f.slashRootSlash += "/"
|
||||
}
|
||||
}
|
||||
@@ -417,21 +468,6 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// A read closer which doesn't close the input
|
||||
type readCloser struct {
|
||||
in io.Reader
|
||||
}
|
||||
|
||||
// Read bytes from the object - see io.Reader
|
||||
func (rc *readCloser) Read(p []byte) (n int, err error) {
|
||||
return rc.in.Read(p)
|
||||
}
|
||||
|
||||
// Dummy close function
|
||||
func (rc *readCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
@@ -640,6 +676,52 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(remote string) (link string, err error) {
|
||||
absPath := "/" + path.Join(f.Root(), remote)
|
||||
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
|
||||
createArg := sharing.CreateSharedLinkWithSettingsArg{
|
||||
Path: absPath,
|
||||
}
|
||||
var linkRes sharing.IsSharedLinkMetadata
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
|
||||
if err != nil && strings.Contains(err.Error(), sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
|
||||
fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
|
||||
listArg := sharing.ListSharedLinksArg{
|
||||
Path: absPath,
|
||||
DirectOnly: true,
|
||||
}
|
||||
var listRes *sharing.ListSharedLinksResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
listRes, err = f.sharing.ListSharedLinks(&listArg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if len(listRes.Links) == 0 {
|
||||
err = errors.New("Dropbox says the sharing link already exists, but list came back empty")
|
||||
return
|
||||
}
|
||||
linkRes = listRes.Links[0]
|
||||
}
|
||||
if err == nil {
|
||||
switch res := linkRes.(type) {
|
||||
case *sharing.FileLinkMetadata:
|
||||
link = res.Url
|
||||
case *sharing.FolderLinkMetadata:
|
||||
link = res.Url
|
||||
default:
|
||||
err = fmt.Errorf("Don't know how to extract link, response has unknown format: %T", res)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
//
|
||||
@@ -683,6 +765,33 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (usage *fs.Usage, err error) {
|
||||
var q *users.SpaceUsage
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
q, err = f.users.GetSpaceUsage()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "about failed")
|
||||
}
|
||||
var total uint64
|
||||
if q.Allocation != nil {
|
||||
if q.Allocation.Individual != nil {
|
||||
total += q.Allocation.Individual.Allocated
|
||||
}
|
||||
if q.Allocation.Team != nil {
|
||||
total += q.Allocation.Team.Allocated
|
||||
}
|
||||
}
|
||||
usage = &fs.Usage{
|
||||
Total: fs.NewUsageValue(int64(total)), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(int64(q.Used)), // bytes in use
|
||||
Free: fs.NewUsageValue(int64(total - q.Used)), // bytes which can be uploaded before reaching the quota
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.Dropbox)
|
||||
@@ -758,20 +867,6 @@ func (o *Object) remotePath() string {
|
||||
return o.fs.slashRootSlash + o.remote
|
||||
}
|
||||
|
||||
// Returns the key for the metadata database for a given path
|
||||
func metadataKey(path string) string {
|
||||
// NB File system is case insensitive
|
||||
path = strings.ToLower(path)
|
||||
hash := md5.New()
|
||||
_, _ = hash.Write([]byte(path))
|
||||
return fmt.Sprintf("%x", hash.Sum(nil))
|
||||
}
|
||||
|
||||
// Returns the key for the metadata database
|
||||
func (o *Object) metadataKey() string {
|
||||
return metadataKey(o.remotePath())
|
||||
}
|
||||
|
||||
// readMetaData gets the info if it hasn't already been fetched
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if !o.modTime.IsZero() {
|
||||
@@ -801,7 +896,7 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
||||
// Dropbox doesn't have a way of doing this so returning this
|
||||
// error will cause the file to be deleted first then
|
||||
// re-uploaded to set the time.
|
||||
return fs.ErrorCantSetModTime
|
||||
return fs.ErrorCantSetModTimeWithoutDelete
|
||||
}
|
||||
|
||||
// Storable returns whether this object is storable
|
||||
@@ -835,7 +930,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an
|
||||
// avoidable request to the Dropbox API that does not carry payload.
|
||||
func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
|
||||
chunkSize := int64(uploadChunkSize)
|
||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||
chunks := 0
|
||||
if size != -1 {
|
||||
chunks = int(size/chunkSize) + 1
|
||||
@@ -859,7 +954,7 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, 0); err != nil {
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, chunk)
|
||||
@@ -895,7 +990,7 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
|
||||
chunk = readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, 0); err != nil {
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
|
||||
@@ -918,7 +1013,7 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
|
||||
chunk = readers.NewRepeatableReaderBuffer(in, buf)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, 0); err != nil {
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
entry, err = o.fs.srv.UploadSessionFinish(args, chunk)
|
||||
@@ -950,7 +1045,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
size := src.Size()
|
||||
var err error
|
||||
var entry *files.FileMetadata
|
||||
if size > int64(uploadChunkSize) || size == -1 {
|
||||
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
|
||||
entry, err = o.uploadChunked(in, commitInfo, size)
|
||||
} else {
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
@@ -975,11 +1070,13 @@ func (o *Object) Remove() (err error) {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -1,78 +1,17 @@
|
||||
// Test Dropbox filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
|
||||
// +build go1.7
|
||||
|
||||
package dropbox_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/dropbox"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*dropbox.Object)(nil))
|
||||
fstests.RemoteName = "TestDropbox:"
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDropbox:",
|
||||
NilObject: (*dropbox.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
// Build for unsupported platforms to stop go complaining about "no
|
||||
// buildable Go source files "
|
||||
|
||||
// +build !go1.7
|
||||
|
||||
package dropbox
|
||||
@@ -4,16 +4,15 @@ package ftp
|
||||
import (
|
||||
"io"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
@@ -30,33 +29,40 @@ func init() {
|
||||
{
|
||||
Name: "host",
|
||||
Help: "FTP host to connect to",
|
||||
Optional: false,
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "ftp.example.com",
|
||||
Help: "Connect to ftp.example.com",
|
||||
}},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
|
||||
Optional: true,
|
||||
Name: "user",
|
||||
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "FTP port, leave blank to use default (21) ",
|
||||
Optional: true,
|
||||
Name: "port",
|
||||
Help: "FTP port, leave blank to use default (21)",
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "FTP password",
|
||||
IsPassword: true,
|
||||
Optional: false,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Port string `config:"port"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
url string
|
||||
user string
|
||||
@@ -161,51 +167,33 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (ff fs.Fs, err error) {
|
||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
||||
// FIXME Convert the old scheme used for the first beta - remove after release
|
||||
if ftpURL := config.FileGet(name, "url"); ftpURL != "" {
|
||||
fs.Infof(name, "Converting old configuration")
|
||||
u, err := url.Parse(ftpURL)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Failed to parse old url %q", ftpURL)
|
||||
}
|
||||
parts := strings.Split(u.Host, ":")
|
||||
config.FileSet(name, "host", parts[0])
|
||||
if len(parts) > 1 {
|
||||
config.FileSet(name, "port", parts[1])
|
||||
}
|
||||
config.FileSet(name, "host", u.Host)
|
||||
config.FileSet(name, "user", config.FileGet(name, "username"))
|
||||
config.FileSet(name, "pass", config.FileGet(name, "password"))
|
||||
config.FileDeleteKey(name, "username")
|
||||
config.FileDeleteKey(name, "password")
|
||||
config.FileDeleteKey(name, "url")
|
||||
config.SaveConfig()
|
||||
if u.Path != "" && u.Path != "/" {
|
||||
fs.Errorf(name, "Path %q in FTP URL no longer supported - put it on the end of the remote %s:%s", u.Path, name, u.Path)
|
||||
}
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
host := config.FileGet(name, "host")
|
||||
user := config.FileGet(name, "user")
|
||||
pass := config.FileGet(name, "pass")
|
||||
port := config.FileGet(name, "port")
|
||||
pass, err = obscure.Reveal(pass)
|
||||
pass, err := obscure.Reveal(opt.Pass)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewFS decrypt password")
|
||||
}
|
||||
user := opt.User
|
||||
if user == "" {
|
||||
user = os.Getenv("USER")
|
||||
}
|
||||
port := opt.Port
|
||||
if port == "" {
|
||||
port = "21"
|
||||
}
|
||||
|
||||
dialAddr := host + ":" + port
|
||||
dialAddr := opt.Host + ":" + port
|
||||
u := "ftp://" + path.Join(dialAddr+"/", root)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
url: u,
|
||||
user: user,
|
||||
pass: pass,
|
||||
@@ -247,7 +235,7 @@ func translateErrorFile(err error) error {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable:
|
||||
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
||||
err = fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
@@ -259,16 +247,15 @@ func translateErrorDir(err error) error {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable:
|
||||
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
||||
err = fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (o fs.Object, err error) {
|
||||
// findItem finds a directory entry for the name in its parent directory
|
||||
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
fullPath := path.Join(f.root, remote)
|
||||
dir := path.Dir(fullPath)
|
||||
@@ -276,32 +263,58 @@ func (f *Fs) NewObject(remote string) (o fs.Object, err error) {
|
||||
|
||||
c, err := f.getFtpConnection()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewObject")
|
||||
return nil, errors.Wrap(err, "findItem")
|
||||
}
|
||||
files, err := c.List(dir)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, translateErrorFile(err)
|
||||
}
|
||||
for i, file := range files {
|
||||
if file.Type != ftp.EntryTypeFolder && file.Name == base {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
Size: files[i].Size,
|
||||
ModTime: files[i].Time,
|
||||
}
|
||||
o.info = info
|
||||
|
||||
return o, nil
|
||||
for _, file := range files {
|
||||
if file.Name == base {
|
||||
return file, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (o fs.Object, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
entry, err := f.findItem(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entry != nil && entry.Type != ftp.EntryTypeFolder {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
Size: entry.Size,
|
||||
ModTime: entry.Time,
|
||||
}
|
||||
o.info = info
|
||||
|
||||
return o, nil
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// dirExists checks the directory pointed to by remote exists or not
|
||||
func (f *Fs) dirExists(remote string) (exists bool, err error) {
|
||||
entry, err := f.findItem(remote)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "dirExists")
|
||||
}
|
||||
if entry != nil && entry.Type == ftp.EntryTypeFolder {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
@@ -322,6 +335,18 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
if err != nil {
|
||||
return nil, translateErrorDir(err)
|
||||
}
|
||||
// Annoyingly FTP returns success for a directory which
|
||||
// doesn't exist, so check it really doesn't exist if no
|
||||
// entries found.
|
||||
if len(files) == 0 {
|
||||
exists, err := f.dirExists(dir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list")
|
||||
}
|
||||
if !exists {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
for i := range files {
|
||||
object := files[i]
|
||||
newremote := path.Join(dir, object.Name)
|
||||
@@ -438,6 +463,15 @@ func (f *Fs) mkdir(abspath string) error {
|
||||
}
|
||||
err = c.MakeDir(abspath)
|
||||
f.putFtpConnection(&c, err)
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
|
||||
err = nil
|
||||
case 521: // dir already exists: error number according to RFC 959: issue #2363
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -1,75 +1,17 @@
|
||||
// Test FTP filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package ftp_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/ftp"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*ftp.Object)(nil))
|
||||
fstests.RemoteName = "TestFTP:"
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFTP:",
|
||||
NilObject: (*ftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
@@ -29,12 +29,15 @@ import (
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
@@ -49,11 +52,10 @@ const (
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
metaMtime = "mtime" // key to store mtime under in metadata
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
minSleep = 10 * time.Millisecond
|
||||
)
|
||||
|
||||
var (
|
||||
gcsLocation = flags.StringP("gcs-location", "", "", "Default location for buckets (us|eu|asia|us-central1|us-east1|us-east4|us-west1|asia-east1|asia-noetheast1|asia-southeast1|australia-southeast1|europe-west1|europe-west2).")
|
||||
gcsStorageClass = flags.StringP("gcs-storage-class", "", "", "Default storage class for buckets (MULTI_REGIONAL|REGIONAL|STANDARD|NEARLINE|COLDLINE|DURABLE_REDUCED_AVAILABILITY).")
|
||||
// Description of how to auth for this app
|
||||
storageConfig = &oauth2.Config{
|
||||
Scopes: []string{storage.DevstorageFullControlScope},
|
||||
@@ -68,29 +70,36 @@ var (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "google cloud storage",
|
||||
Prefix: "gcs",
|
||||
Description: "Google Cloud Storage (this is not Google Drive)",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
if config.FileGet(name, "service_account_file") != "" {
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
saFile, _ := m.Get("service_account_file")
|
||||
saCreds, _ := m.Get("service_account_credentials")
|
||||
if saFile != "" || saCreds != "" {
|
||||
return
|
||||
}
|
||||
err := oauthutil.Config("google cloud storage", name, storageConfig)
|
||||
err := oauthutil.Config("google cloud storage", name, m, storageConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Google Application Client Id - leave blank normally.",
|
||||
Help: "Google Application Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Google Application Client Secret - leave blank normally.",
|
||||
Help: "Google Application Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "project_number",
|
||||
Help: "Project number optional - needed only for list/create/delete buckets - see your developer console.",
|
||||
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path - needed only if you want use SA instead of interactive login.",
|
||||
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
}, {
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
}, {
|
||||
Name: "object_acl",
|
||||
Help: "Access Control List for new objects.",
|
||||
@@ -204,21 +213,29 @@ func init() {
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ProjectNumber string `config:"project_number"`
|
||||
ServiceAccountFile string `config:"service_account_file"`
|
||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||
ObjectACL string `config:"object_acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
Location string `config:"location"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
features *fs.Features // optional features
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
projectNumber string // used for finding buckets
|
||||
objectACL string // used when creating new objects
|
||||
bucketACL string // used when creating new buckets
|
||||
location string // location of new buckets
|
||||
storageClass string // storage class of new buckets
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// Object describes a storage object
|
||||
@@ -262,6 +279,30 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// shouldRetry determines whehter a given err rates being retried
|
||||
func shouldRetry(err error) (again bool, errOut error) {
|
||||
again = false
|
||||
if err != nil {
|
||||
if fserrors.ShouldRetry(err) {
|
||||
again = true
|
||||
} else {
|
||||
switch gerr := err.(type) {
|
||||
case *googleapi.Error:
|
||||
if gerr.Code >= 500 && gerr.Code < 600 {
|
||||
// All 5xx errors should be retried
|
||||
again = true
|
||||
} else if len(gerr.Errors) > 0 {
|
||||
reason := gerr.Errors[0].Reason
|
||||
if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" {
|
||||
again = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return again, err
|
||||
}
|
||||
|
||||
// Pattern to match a storage path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
@@ -277,12 +318,8 @@ func parsePath(path string) (bucket, directory string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func getServiceAccountClient(keyJsonfilePath string) (*http.Client, error) {
|
||||
data, err := ioutil.ReadFile(os.ExpandEnv(keyJsonfilePath))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error opening credentials file")
|
||||
}
|
||||
conf, err := google.JWTConfigFromJSON(data, storageConfig.Scopes...)
|
||||
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error processing credentials")
|
||||
}
|
||||
@@ -291,20 +328,39 @@ func getServiceAccountClient(keyJsonfilePath string) (*http.Client, error) {
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
var oAuthClient *http.Client
|
||||
var err error
|
||||
|
||||
serviceAccountPath := config.FileGet(name, "service_account_file")
|
||||
if serviceAccountPath != "" {
|
||||
oAuthClient, err = getServiceAccountClient(serviceAccountPath)
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ObjectACL == "" {
|
||||
opt.ObjectACL = "private"
|
||||
}
|
||||
if opt.BucketACL == "" {
|
||||
opt.BucketACL = "private"
|
||||
}
|
||||
|
||||
// try loading service account credentials from env variable, then from a file
|
||||
if opt.ServiceAccountCredentials != "" && opt.ServiceAccountFile != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed configuring Google Cloud Storage Service Account: %v", err)
|
||||
return nil, errors.Wrap(err, "error opening service account credentials file")
|
||||
}
|
||||
opt.ServiceAccountCredentials = string(loadedCreds)
|
||||
}
|
||||
if opt.ServiceAccountCredentials != "" {
|
||||
oAuthClient, err = getServiceAccountClient([]byte(opt.ServiceAccountCredentials))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(name, storageConfig)
|
||||
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Google Cloud Storage: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -314,32 +370,17 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
projectNumber: config.FileGet(name, "project_number"),
|
||||
objectACL: config.FileGet(name, "object_acl"),
|
||||
bucketACL: config.FileGet(name, "bucket_acl"),
|
||||
location: config.FileGet(name, "location"),
|
||||
storageClass: config.FileGet(name, "storage_class"),
|
||||
name: name,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
opt: *opt,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
}).Fill(f)
|
||||
if f.objectACL == "" {
|
||||
f.objectACL = "private"
|
||||
}
|
||||
if f.bucketACL == "" {
|
||||
f.bucketACL = "private"
|
||||
}
|
||||
if *gcsLocation != "" {
|
||||
f.location = *gcsLocation
|
||||
}
|
||||
if *gcsStorageClass != "" {
|
||||
f.storageClass = *gcsStorageClass
|
||||
}
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
f.client = oAuthClient
|
||||
@@ -351,7 +392,10 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the object exists
|
||||
_, err = f.svc.Objects.Get(bucket, directory).Do()
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.Get(bucket, directory).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.root = path.Dir(directory)
|
||||
if f.root == "." {
|
||||
@@ -399,7 +443,7 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
|
||||
// dir is the starting directory, "" for root
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
func (f *Fs) list(dir string, recurse bool, fn listFn) error {
|
||||
func (f *Fs) list(dir string, recurse bool, fn listFn) (err error) {
|
||||
root := f.root
|
||||
rootLength := len(root)
|
||||
if dir != "" {
|
||||
@@ -410,7 +454,11 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) error {
|
||||
list = list.Delimiter("/")
|
||||
}
|
||||
for {
|
||||
objects, err := list.Do()
|
||||
var objects *storage.Objects
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
objects, err = list.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if gErr, ok := err.(*googleapi.Error); ok {
|
||||
if gErr.Code == http.StatusNotFound {
|
||||
@@ -437,6 +485,17 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) error {
|
||||
continue
|
||||
}
|
||||
remote := object.Name[rootLength:]
|
||||
// is this a directory marker?
|
||||
if (strings.HasSuffix(remote, "/") || remote == "") && object.Size == 0 {
|
||||
if recurse && remote != "" {
|
||||
// add a directory in if --fast-list since will have no prefixes
|
||||
err = fn(remote[:len(remote)-1], object, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue // skip directory marker
|
||||
}
|
||||
err = fn(remote, object, false)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -498,12 +557,16 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
||||
if dir != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
if f.projectNumber == "" {
|
||||
if f.opt.ProjectNumber == "" {
|
||||
return nil, errors.New("can't list buckets without project number")
|
||||
}
|
||||
listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks)
|
||||
listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks)
|
||||
for {
|
||||
buckets, err := listBuckets.Do()
|
||||
var buckets *storage.Buckets
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
buckets, err = listBuckets.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -591,13 +654,19 @@ func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption
|
||||
}
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
func (f *Fs) Mkdir(dir string) (err error) {
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.bucketOK {
|
||||
return nil
|
||||
}
|
||||
_, err := f.svc.Buckets.Get(f.bucket).Do()
|
||||
// List something from the bucket to see if it exists. Doing it like this enables the use of a
|
||||
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.List(f.bucket).MaxResults(1).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
// Bucket already exists
|
||||
f.bucketOK = true
|
||||
@@ -610,16 +679,19 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
return errors.Wrap(err, "failed to get bucket")
|
||||
}
|
||||
|
||||
if f.projectNumber == "" {
|
||||
if f.opt.ProjectNumber == "" {
|
||||
return errors.New("can't make bucket without project number")
|
||||
}
|
||||
|
||||
bucket := storage.Bucket{
|
||||
Name: f.bucket,
|
||||
Location: f.location,
|
||||
StorageClass: f.storageClass,
|
||||
Location: f.opt.Location,
|
||||
StorageClass: f.opt.StorageClass,
|
||||
}
|
||||
_, err = f.svc.Buckets.Insert(f.projectNumber, &bucket).PredefinedAcl(f.bucketACL).Do()
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket).PredefinedAcl(f.opt.BucketACL).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.bucketOK = true
|
||||
}
|
||||
@@ -630,13 +702,16 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
//
|
||||
// Returns an error if it isn't empty: Error 409: The bucket you tried
|
||||
// to delete was not empty.
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
func (f *Fs) Rmdir(dir string) (err error) {
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.root != "" || dir != "" {
|
||||
return nil
|
||||
}
|
||||
err := f.svc.Buckets.Delete(f.bucket).Do()
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.svc.Buckets.Delete(f.bucket).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.bucketOK = false
|
||||
}
|
||||
@@ -678,7 +753,11 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObject := srcObj.fs.root + srcObj.remote
|
||||
dstBucket := f.bucket
|
||||
dstObject := f.root + remote
|
||||
newObject, err := f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
|
||||
var newObject *storage.Object
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
newObject, err = f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -766,7 +845,11 @@ func (o *Object) readMetaData() (err error) {
|
||||
if !o.modTime.IsZero() {
|
||||
return nil
|
||||
}
|
||||
object, err := o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||
var object *storage.Object
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
object, err = o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if gErr, ok := err.(*googleapi.Error); ok {
|
||||
if gErr.Code == http.StatusNotFound {
|
||||
@@ -800,14 +883,18 @@ func metadataFromModTime(modTime time.Time) map[string]string {
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
func (o *Object) SetModTime(modTime time.Time) (err error) {
|
||||
// This only adds metadata so will perserve other metadata
|
||||
object := storage.Object{
|
||||
Bucket: o.fs.bucket,
|
||||
Name: o.fs.root + o.remote,
|
||||
Metadata: metadataFromModTime(modTime),
|
||||
}
|
||||
newObject, err := o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do()
|
||||
var newObject *storage.Object
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
newObject, err = o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -827,7 +914,17 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
return nil, err
|
||||
}
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
res, err := o.fs.client.Do(req)
|
||||
var res *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.client.Do(req)
|
||||
if err == nil {
|
||||
err = googleapi.CheckResponse(res)
|
||||
if err != nil {
|
||||
_ = res.Body.Close() // ignore error
|
||||
}
|
||||
}
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -856,7 +953,11 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
Updated: modTime.Format(timeFormatOut), // Doesn't get set
|
||||
Metadata: metadataFromModTime(modTime),
|
||||
}
|
||||
newObject, err := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.objectACL).Do()
|
||||
var newObject *storage.Object
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.opt.ObjectACL).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -866,8 +967,12 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
return o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||
func (o *Object) Remove() (err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
|
||||
@@ -1,75 +1,17 @@
|
||||
// Test GoogleCloudStorage filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package googlecloudstorage_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/googlecloudstorage"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*googlecloudstorage.Object)(nil))
|
||||
fstests.RemoteName = "TestGoogleCloudStorage:"
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestGoogleCloudStorage:",
|
||||
NilObject: (*googlecloudstorage.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
@@ -2,9 +2,6 @@
|
||||
//
|
||||
// It treats HTML pages served from the endpoint as directory
|
||||
// listings, and includes any links found as files.
|
||||
|
||||
// +build go1.7
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
@@ -17,7 +14,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
@@ -38,7 +36,7 @@ func init() {
|
||||
Options: []fs.Option{{
|
||||
Name: "url",
|
||||
Help: "URL of http host to connect to",
|
||||
Optional: false,
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "https://example.com",
|
||||
Help: "Connect to example.com",
|
||||
@@ -48,11 +46,17 @@ func init() {
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Endpoint string `config:"url"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote HTTP files
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this backend
|
||||
endpoint *url.URL
|
||||
endpointURL string // endpoint as a string
|
||||
httpClient *http.Client
|
||||
@@ -81,14 +85,20 @@ func statusError(res *http.Response, err error) error {
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
endpoint := config.FileGet(name, "url")
|
||||
if !strings.HasSuffix(endpoint, "/") {
|
||||
endpoint += "/"
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(opt.Endpoint, "/") {
|
||||
opt.Endpoint += "/"
|
||||
}
|
||||
|
||||
// Parse the endpoint and stick the root onto it
|
||||
base, err := url.Parse(endpoint)
|
||||
base, err := url.Parse(opt.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -133,6 +143,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
httpClient: client,
|
||||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
@@ -192,10 +203,11 @@ func (f *Fs) url(remote string) string {
|
||||
return f.endpointURL + rest.URLPathEscape(remote)
|
||||
}
|
||||
|
||||
func parseInt64(s string) int64 {
|
||||
// parse s into an int64, on failure return def
|
||||
func parseInt64(s string, def int64) int64 {
|
||||
n, e := strconv.ParseInt(s, 10, 64)
|
||||
if e != nil {
|
||||
return 0
|
||||
return def
|
||||
}
|
||||
return n
|
||||
}
|
||||
@@ -412,7 +424,7 @@ func (o *Object) stat() error {
|
||||
if err != nil {
|
||||
t = timeUnset
|
||||
}
|
||||
o.size = parseInt64(res.Header.Get("Content-Length"))
|
||||
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
|
||||
o.modTime = t
|
||||
o.contentType = res.Header.Get("Content-Type")
|
||||
return nil
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -29,7 +30,7 @@ var (
|
||||
)
|
||||
|
||||
// prepareServer the test server and return a function to tidy it up afterwards
|
||||
func prepareServer(t *testing.T) func() {
|
||||
func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// file server for test/files
|
||||
fileServer := http.FileServer(http.Dir(filesPath))
|
||||
|
||||
@@ -41,19 +42,24 @@ func prepareServer(t *testing.T) func() {
|
||||
// fs.Config.LogLevel = fs.LogLevelDebug
|
||||
// fs.Config.DumpHeaders = true
|
||||
// fs.Config.DumpBodies = true
|
||||
config.FileSet(remoteName, "type", "http")
|
||||
config.FileSet(remoteName, "url", ts.URL)
|
||||
// config.FileSet(remoteName, "type", "http")
|
||||
// config.FileSet(remoteName, "url", ts.URL)
|
||||
|
||||
m := configmap.Simple{
|
||||
"type": "http",
|
||||
"url": ts.URL,
|
||||
}
|
||||
|
||||
// return a function to tidy up
|
||||
return ts.Close
|
||||
return m, ts.Close
|
||||
}
|
||||
|
||||
// prepare the test server and return a function to tidy it up afterwards
|
||||
func prepare(t *testing.T) (fs.Fs, func()) {
|
||||
tidy := prepareServer(t)
|
||||
m, tidy := prepareServer(t)
|
||||
|
||||
// Instantiate it
|
||||
f, err := NewFs(remoteName, "")
|
||||
f, err := NewFs(remoteName, "", m)
|
||||
require.NoError(t, err)
|
||||
|
||||
return f, tidy
|
||||
@@ -151,6 +157,7 @@ func TestOpen(t *testing.T) {
|
||||
fd, err := o.Open()
|
||||
require.NoError(t, err)
|
||||
data, err := ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, "beetroot\n", string(data))
|
||||
|
||||
@@ -158,6 +165,7 @@ func TestOpen(t *testing.T) {
|
||||
fd, err = o.Open(&fs.RangeOption{Start: 1, End: 5})
|
||||
require.NoError(t, err)
|
||||
data, err = ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, "eetro", string(data))
|
||||
}
|
||||
@@ -175,20 +183,20 @@ func TestMimeType(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsAFileRoot(t *testing.T) {
|
||||
tidy := prepareServer(t)
|
||||
m, tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
|
||||
f, err := NewFs(remoteName, "one%.txt")
|
||||
f, err := NewFs(remoteName, "one%.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
|
||||
testListRoot(t, f)
|
||||
}
|
||||
|
||||
func TestIsAFileSubDir(t *testing.T) {
|
||||
tidy := prepareServer(t)
|
||||
m, tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
|
||||
f, err := NewFs(remoteName, "three/underthree.txt")
|
||||
f, err := NewFs(remoteName, "three/underthree.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
|
||||
entries, err := f.List("")
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
// Build for mount for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.7
|
||||
|
||||
package http
|
||||
@@ -36,7 +36,7 @@ func (a *auth) Response(resp *http.Response) error {
|
||||
|
||||
// The public storage URL - set Internal to true to read
|
||||
// internal/service net URL
|
||||
func (a *auth) StorageUrl(Internal bool) string {
|
||||
func (a *auth) StorageUrl(Internal bool) string { // nolint
|
||||
return a.f.credentials.Endpoint
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ func (a *auth) Token() string {
|
||||
}
|
||||
|
||||
// The CDN url if available
|
||||
func (a *auth) CdnUrl() string {
|
||||
func (a *auth) CdnUrl() string { // nolint
|
||||
return ""
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,8 @@ import (
|
||||
"github.com/ncw/rclone/backend/swift"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
@@ -52,18 +54,18 @@ func init() {
|
||||
Name: "hubic",
|
||||
Description: "Hubic",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
err := oauthutil.Config("hubic", name, oauthConfig)
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config("hubic", name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Hubic Client Id - leave blank normally.",
|
||||
Help: "Hubic Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Hubic Client Secret - leave blank normally.",
|
||||
Help: "Hubic Client Secret\nLeave blank normally.",
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -145,8 +147,8 @@ func (f *Fs) getCredentials() (err error) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
client, _, err := oauthutil.NewClient(name, oauthConfig)
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
client, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Hubic")
|
||||
}
|
||||
@@ -167,8 +169,15 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
return nil, errors.Wrap(err, "error authenticating swift connection")
|
||||
}
|
||||
|
||||
// Parse config into swift.Options struct
|
||||
opt := new(swift.Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Make inner swift Fs from the connection
|
||||
swiftFs, err := swift.NewFsWithConnection(name, root, c, true)
|
||||
swiftFs, err := swift.NewFsWithConnection(opt, name, root, c, true)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -1,75 +1,17 @@
|
||||
// Test Hubic filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package hubic_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/hubic"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*hubic.Object)(nil))
|
||||
fstests.RemoteName = "TestHubic:"
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestHubic:",
|
||||
NilObject: (*hubic.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
29
backend/local/about_unix.go
Normal file
29
backend/local/about_unix.go
Normal file
@@ -0,0 +1,29 @@
|
||||
// +build darwin dragonfly freebsd linux
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
var s syscall.Statfs_t
|
||||
err := syscall.Statfs(f.root, &s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read disk usage")
|
||||
}
|
||||
bs := int64(s.Bsize)
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(bs * int64(s.Blocks)), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), // bytes in use
|
||||
Free: fs.NewUsageValue(bs * int64(s.Bavail)), // bytes which can be uploaded before reaching the quota
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// check interface
|
||||
var _ fs.Abouter = &Fs{}
|
||||
36
backend/local/about_windows.go
Normal file
36
backend/local/about_windows.go
Normal file
@@ -0,0 +1,36 @@
|
||||
// +build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
var available, total, free int64
|
||||
_, _, e1 := getFreeDiskSpace.Call(
|
||||
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))),
|
||||
uintptr(unsafe.Pointer(&available)), // lpFreeBytesAvailable - for this user
|
||||
uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
|
||||
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
|
||||
)
|
||||
if e1 != syscall.Errno(0) {
|
||||
return nil, errors.Wrap(e1, "failed to read disk usage")
|
||||
}
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(total), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(total - free), // bytes in use
|
||||
Free: fs.NewUsageValue(available), // bytes which can be uploaded before reaching the quota
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// check interface
|
||||
var _ fs.Abouter = &Fs{}
|
||||
@@ -16,18 +16,11 @@ import (
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/appengine/log"
|
||||
)
|
||||
|
||||
var (
|
||||
followSymlinks = flags.BoolP("copy-links", "L", false, "Follow symlinks and copy the pointed to item.")
|
||||
skipSymlinks = flags.BoolP("skip-links", "", false, "Don't warn about skipped symlinks.")
|
||||
noUTFNorm = flags.BoolP("local-no-unicode-normalization", "", false, "Don't apply unicode normalization to paths and filenames")
|
||||
)
|
||||
|
||||
// Constants
|
||||
@@ -40,29 +33,68 @@ func init() {
|
||||
Description: "Local Disk",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows",
|
||||
Optional: true,
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "true",
|
||||
Help: "Disables long file names",
|
||||
}},
|
||||
}, {
|
||||
Name: "copy_links",
|
||||
Help: "Follow symlinks and copy the pointed to item.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "L",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_links",
|
||||
Help: "Don't warn about skipped symlinks.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_unicode_normalization",
|
||||
Help: "Don't apply unicode normalization to paths and filenames",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_updated",
|
||||
Help: "Don't check to see if the files change during upload",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "one_file_system",
|
||||
Help: "Don't cross filesystem boundaries (unix/macOS only).",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "x",
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
SkipSymlinks bool `config:"skip_links"`
|
||||
NoUTFNorm bool `config:"no_unicode_normalization"`
|
||||
NoCheckUpdated bool `config:"no_check_updated"`
|
||||
NoUNC bool `config:"nounc"`
|
||||
OneFileSystem bool `config:"one_file_system"`
|
||||
}
|
||||
|
||||
// Fs represents a local filesystem rooted at root
|
||||
type Fs struct {
|
||||
name string // the name of the remote
|
||||
root string // The root directory (OS path)
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
dev uint64 // device number of root node
|
||||
precisionOk sync.Once // Whether we need to read the precision
|
||||
precision time.Duration // precision of local filesystem
|
||||
wmu sync.Mutex // used for locking access to 'warned'.
|
||||
warned map[string]struct{} // whether we have warned about this string
|
||||
nounc bool // Skip UNC conversion on Windows
|
||||
// do os.Lstat or os.Stat
|
||||
lstat func(name string) (os.FileInfo, error)
|
||||
dirNames *mapper // directory name mapping
|
||||
@@ -83,18 +115,22 @@ type Object struct {
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
var err error
|
||||
|
||||
if *noUTFNorm {
|
||||
log.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opt.NoUTFNorm {
|
||||
fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
|
||||
}
|
||||
|
||||
nounc := config.FileGet(name, "nounc")
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
warned: make(map[string]struct{}),
|
||||
nounc: nounc == "true",
|
||||
dev: devUnset,
|
||||
lstat: os.Lstat,
|
||||
dirNames: newMapper(),
|
||||
@@ -104,14 +140,14 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
CaseInsensitive: f.caseInsensitive(),
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
if *followSymlinks {
|
||||
if opt.FollowSymlinks {
|
||||
f.lstat = os.Stat
|
||||
}
|
||||
|
||||
// Check to see if this points to a file
|
||||
fi, err := f.lstat(f.root)
|
||||
if err == nil {
|
||||
f.dev = readDevice(fi)
|
||||
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
||||
}
|
||||
if err == nil && fi.Mode().IsRegular() {
|
||||
// It is a file, so use the parent as the root
|
||||
@@ -242,7 +278,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
newRemote := path.Join(remote, name)
|
||||
newPath := filepath.Join(fsDirPath, name)
|
||||
// Follow symlinks if required
|
||||
if *followSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
fi, err = os.Stat(newPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -252,7 +288,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
if fi.IsDir() {
|
||||
// Ignore directories which are symlinks. These are junction points under windows which
|
||||
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
|
||||
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi) {
|
||||
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
|
||||
d := fs.NewDir(f.dirNames.Save(newRemote, f.cleanRemote(newRemote)), fi.ModTime())
|
||||
entries = append(entries, d)
|
||||
}
|
||||
@@ -356,7 +392,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.dev = readDevice(fi)
|
||||
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -421,7 +457,7 @@ func (f *Fs) readPrecision() (precision time.Duration) {
|
||||
|
||||
// If it matches - have found the precision
|
||||
// fmt.Println("compare", fi.ModTime(), t)
|
||||
if fi.ModTime() == t {
|
||||
if fi.ModTime().Equal(t) {
|
||||
// fmt.Println("Precision detected as", duration)
|
||||
return duration
|
||||
}
|
||||
@@ -592,7 +628,6 @@ func (o *Object) Hash(r hash.Type) (string, error) {
|
||||
o.fs.objectHashesMu.Unlock()
|
||||
|
||||
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil {
|
||||
hashes = make(map[hash.Type]string)
|
||||
in, err := os.Open(o.path)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "hash: failed to open")
|
||||
@@ -642,13 +677,8 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
}
|
||||
mode := o.mode
|
||||
// On windows a file with os.ModeSymlink represents a file with reparse points
|
||||
if runtime.GOOS == "windows" && (mode&os.ModeSymlink) != 0 {
|
||||
fs.Debugf(o, "Clearing symlink bit to allow a file with reparse points to be copied")
|
||||
mode &^= os.ModeSymlink
|
||||
}
|
||||
if mode&os.ModeSymlink != 0 {
|
||||
if !*skipSymlinks {
|
||||
if !o.fs.opt.SkipSymlinks {
|
||||
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
|
||||
}
|
||||
return false
|
||||
@@ -673,13 +703,18 @@ type localOpenFile struct {
|
||||
|
||||
// Read bytes from the object - see io.Reader
|
||||
func (file *localOpenFile) Read(p []byte) (n int, err error) {
|
||||
// Check if file has the same size and modTime
|
||||
fi, err := file.fd.Stat()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "can't read status of source file while transferring")
|
||||
}
|
||||
if file.o.size != fi.Size() || file.o.modTime != fi.ModTime() {
|
||||
return 0, errors.New("can't copy - source file is being updated")
|
||||
if !file.o.fs.opt.NoCheckUpdated {
|
||||
// Check if file has the same size and modTime
|
||||
fi, err := file.fd.Stat()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "can't read status of source file while transferring")
|
||||
}
|
||||
if file.o.size != fi.Size() {
|
||||
return 0, errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size())
|
||||
}
|
||||
if !file.o.modTime.Equal(fi.ModTime()) {
|
||||
return 0, errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime())
|
||||
}
|
||||
}
|
||||
|
||||
n, err = file.in.Read(p)
|
||||
@@ -729,7 +764,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
wrappedFd := readers.NewLimitedReadCloser(fd, limit)
|
||||
if offset != 0 {
|
||||
// seek the object
|
||||
_, err = fd.Seek(offset, 0)
|
||||
_, err = fd.Seek(offset, io.SeekStart)
|
||||
// don't attempt to make checksums
|
||||
return wrappedFd, err
|
||||
}
|
||||
@@ -834,7 +869,7 @@ func (o *Object) lstat() error {
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
return os.Remove(o.path)
|
||||
return remove(o.path)
|
||||
}
|
||||
|
||||
// Return the directory and file from an OS path. Assumes
|
||||
@@ -878,7 +913,7 @@ func (f *Fs) cleanPath(s string) string {
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
if !f.nounc {
|
||||
if !f.opt.NoUNC {
|
||||
// Convert to UNC
|
||||
s = uncPath(s)
|
||||
}
|
||||
|
||||
@@ -29,8 +29,10 @@ func TestMapper(t *testing.T) {
|
||||
})
|
||||
assert.Equal(t, "potato", m.Load("potato"))
|
||||
assert.Equal(t, "-r?'a´o¨", m.Load("-r'áö"))
|
||||
}
|
||||
|
||||
// Test copy with source file that's updating
|
||||
// Test copy with source file that's updating
|
||||
func TestUpdatingCheck(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
filePath := "sub dir/local test"
|
||||
@@ -42,9 +44,11 @@ func TestMapper(t *testing.T) {
|
||||
}
|
||||
|
||||
fi, err := fd.Stat()
|
||||
o := &Object{size: fi.Size(), modTime: fi.ModTime()}
|
||||
require.NoError(t, err)
|
||||
o := &Object{size: fi.Size(), modTime: fi.ModTime(), fs: &Fs{}}
|
||||
wrappedFd := readers.NewLimitedReadCloser(fd, -1)
|
||||
hash, err := hash.NewMultiHasherTypes(hash.Supported)
|
||||
require.NoError(t, err)
|
||||
in := localOpenFile{
|
||||
o: o,
|
||||
in: wrappedFd,
|
||||
@@ -59,4 +63,12 @@ func TestMapper(t *testing.T) {
|
||||
r.WriteFile(filePath, "content updated", time.Now())
|
||||
_, err = in.Read(buf)
|
||||
require.Errorf(t, err, "can't copy - source file is being updated")
|
||||
|
||||
// turn the checking off and try again
|
||||
in.o.fs.opt.NoCheckUpdated = true
|
||||
|
||||
r.WriteFile(filePath, "content updated", time.Now())
|
||||
_, err = in.Read(buf)
|
||||
require.NoError(t, err)
|
||||
|
||||
}
|
||||
|
||||
@@ -1,75 +1,17 @@
|
||||
// Test Local filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package local_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*local.Object)(nil))
|
||||
fstests.RemoteName = ""
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "",
|
||||
NilObject: (*local.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
@@ -8,6 +8,6 @@ import "os"
|
||||
|
||||
// readDevice turns a valid os.FileInfo into a device number,
|
||||
// returning devUnset if it fails.
|
||||
func readDevice(fi os.FileInfo) uint64 {
|
||||
func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
|
||||
return devUnset
|
||||
}
|
||||
|
||||
@@ -9,17 +9,12 @@ import (
|
||||
"syscall"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
)
|
||||
|
||||
var (
|
||||
oneFileSystem = flags.BoolP("one-file-system", "x", false, "Don't cross filesystem boundaries.")
|
||||
)
|
||||
|
||||
// readDevice turns a valid os.FileInfo into a device number,
|
||||
// returning devUnset if it fails.
|
||||
func readDevice(fi os.FileInfo) uint64 {
|
||||
if !*oneFileSystem {
|
||||
func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
|
||||
if !oneFileSystem {
|
||||
return devUnset
|
||||
}
|
||||
statT, ok := fi.Sys().(*syscall.Stat_t)
|
||||
|
||||
10
backend/local/remove_other.go
Normal file
10
backend/local/remove_other.go
Normal file
@@ -0,0 +1,10 @@
|
||||
//+build !windows
|
||||
|
||||
package local
|
||||
|
||||
import "os"
|
||||
|
||||
// Removes name, retrying on a sharing violation
|
||||
func remove(name string) error {
|
||||
return os.Remove(name)
|
||||
}
|
||||
50
backend/local/remove_test.go
Normal file
50
backend/local/remove_test.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Check we can remove an open file
|
||||
func TestRemove(t *testing.T) {
|
||||
fd, err := ioutil.TempFile("", "rclone-remove-test")
|
||||
require.NoError(t, err)
|
||||
name := fd.Name()
|
||||
defer func() {
|
||||
_ = os.Remove(name)
|
||||
}()
|
||||
|
||||
exists := func() bool {
|
||||
_, err := os.Stat(name)
|
||||
if err == nil {
|
||||
return true
|
||||
} else if os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
require.NoError(t, err)
|
||||
return false
|
||||
}
|
||||
|
||||
assert.True(t, exists())
|
||||
// close the file in the background
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
require.NoError(t, fd.Close())
|
||||
}()
|
||||
// delete the open file
|
||||
err = remove(name)
|
||||
require.NoError(t, err)
|
||||
// check it no longer exists
|
||||
assert.False(t, exists())
|
||||
// wait for background close
|
||||
wg.Wait()
|
||||
}
|
||||
38
backend/local/remove_windows.go
Normal file
38
backend/local/remove_windows.go
Normal file
@@ -0,0 +1,38 @@
|
||||
//+build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
const (
|
||||
ERROR_SHARING_VIOLATION syscall.Errno = 32
|
||||
)
|
||||
|
||||
// Removes name, retrying on a sharing violation
|
||||
func remove(name string) (err error) {
|
||||
const maxTries = 10
|
||||
var sleepTime = 1 * time.Millisecond
|
||||
for i := 0; i < maxTries; i++ {
|
||||
err = os.Remove(name)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
pathErr, ok := err.(*os.PathError)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
if pathErr.Err != ERROR_SHARING_VIOLATION {
|
||||
break
|
||||
}
|
||||
fs.Logf(name, "Remove detected sharing violation - retry %d/%d sleeping %v", i+1, maxTries, sleepTime)
|
||||
time.Sleep(sleepTime)
|
||||
sleepTime <<= 1
|
||||
}
|
||||
return err
|
||||
}
|
||||
1156
backend/mega/mega.go
Normal file
1156
backend/mega/mega.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/mega/mega_test.go
Normal file
17
backend/mega/mega_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test Mega filesystem interface
|
||||
package mega_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/mega"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestMega:",
|
||||
NilObject: (*mega.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -2,7 +2,10 @@
|
||||
|
||||
package api
|
||||
|
||||
import "time"
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
@@ -50,10 +53,10 @@ type IdentitySet struct {
|
||||
|
||||
// Quota groups storage space quota-related information on OneDrive into a single structure.
|
||||
type Quota struct {
|
||||
Total int `json:"total"`
|
||||
Used int `json:"used"`
|
||||
Remaining int `json:"remaining"`
|
||||
Deleted int `json:"deleted"`
|
||||
Total int64 `json:"total"`
|
||||
Used int64 `json:"used"`
|
||||
Remaining int64 `json:"remaining"`
|
||||
Deleted int64 `json:"deleted"`
|
||||
State string `json:"state"` // normal | nearing | critical | exceeded
|
||||
}
|
||||
|
||||
@@ -93,6 +96,22 @@ type ItemReference struct {
|
||||
Path string `json:"path"` // Path that used to navigate to the item. Read/Write.
|
||||
}
|
||||
|
||||
// RemoteItemFacet groups data needed to reference a OneDrive remote item
|
||||
type RemoteItemFacet struct {
|
||||
ID string `json:"id"` // The unique identifier of the item within the remote Drive. Read-only.
|
||||
Name string `json:"name"` // The name of the item (filename and extension). Read-write.
|
||||
CreatedBy IdentitySet `json:"createdBy"` // Identity of the user, device, and application which created the item. Read-only.
|
||||
LastModifiedBy IdentitySet `json:"lastModifiedBy"` // Identity of the user, device, and application which last modified the item. Read-only.
|
||||
CreatedDateTime Timestamp `json:"createdDateTime"` // Date and time of item creation. Read-only.
|
||||
LastModifiedDateTime Timestamp `json:"lastModifiedDateTime"` // Date and time the item was last modified. Read-only.
|
||||
Folder *FolderFacet `json:"folder"` // Folder metadata, if the item is a folder. Read-only.
|
||||
File *FileFacet `json:"file"` // File metadata, if the item is a file. Read-only.
|
||||
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
|
||||
ParentReference *ItemReference `json:"parentReference"` // Parent information, if the item has a parent. Read-write.
|
||||
Size int64 `json:"size"` // Size of the item in bytes. Read-only.
|
||||
WebURL string `json:"webUrl"` // URL that displays the resource in the browser. Read-only.
|
||||
}
|
||||
|
||||
// FolderFacet groups folder-related data on OneDrive into a single structure
|
||||
type FolderFacet struct {
|
||||
ChildCount int64 `json:"childCount"` // Number of children contained immediately within this container.
|
||||
@@ -100,8 +119,9 @@ type FolderFacet struct {
|
||||
|
||||
// HashesType groups different types of hashes into a single structure, for an item on OneDrive.
|
||||
type HashesType struct {
|
||||
Sha1Hash string `json:"sha1Hash"` // base64 encoded SHA1 hash for the contents of the file (if available)
|
||||
Crc32Hash string `json:"crc32Hash"` // base64 encoded CRC32 value of the file (if available)
|
||||
Sha1Hash string `json:"sha1Hash"` // hex encoded SHA1 hash for the contents of the file (if available)
|
||||
Crc32Hash string `json:"crc32Hash"` // hex encoded CRC32 value of the file (if available)
|
||||
QuickXorHash string `json:"quickXorHash"` // base64 encoded QuickXorHash value of the file (if available)
|
||||
}
|
||||
|
||||
// FileFacet groups file-related data on OneDrive into a single structure.
|
||||
@@ -142,6 +162,7 @@ type Item struct {
|
||||
Description string `json:"description"` // Provide a user-visible description of the item. Read-write.
|
||||
Folder *FolderFacet `json:"folder"` // Folder metadata, if the item is a folder. Read-only.
|
||||
File *FileFacet `json:"file"` // File metadata, if the item is a file. Read-only.
|
||||
RemoteItem *RemoteItemFacet `json:"remoteItem"` // Remote Item metadata, if the item is a remote shared item. Read-only.
|
||||
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
|
||||
// Image *ImageFacet `json:"image"` // Image metadata, if the item is an image. Read-only.
|
||||
// Photo *PhotoFacet `json:"photo"` // Photo metadata, if the item is a photo. Read-only.
|
||||
@@ -227,3 +248,112 @@ type AsyncOperationStatus struct {
|
||||
PercentageComplete float64 `json:"percentageComplete"` // An float value between 0 and 100 that indicates the percentage complete.
|
||||
Status string `json:"status"` // A string value that maps to an enumeration of possible values about the status of the job. "notStarted | inProgress | completed | updating | failed | deletePending | deleteFailed | waiting"
|
||||
}
|
||||
|
||||
// GetID returns a normalized ID of the item
|
||||
// If DriveID is known it will be prefixed to the ID with # seperator
|
||||
func (i *Item) GetID() string {
|
||||
if i.IsRemote() && i.RemoteItem.ID != "" {
|
||||
return i.RemoteItem.ParentReference.DriveID + "#" + i.RemoteItem.ID
|
||||
} else if i.ParentReference != nil && strings.Index(i.ID, "#") == -1 {
|
||||
return i.ParentReference.DriveID + "#" + i.ID
|
||||
}
|
||||
return i.ID
|
||||
}
|
||||
|
||||
// GetDriveID returns a normalized ParentReferance of the item
|
||||
func (i *Item) GetDriveID() string {
|
||||
return i.GetParentReferance().DriveID
|
||||
}
|
||||
|
||||
// GetName returns a normalized Name of the item
|
||||
func (i *Item) GetName() string {
|
||||
if i.IsRemote() && i.RemoteItem.Name != "" {
|
||||
return i.RemoteItem.Name
|
||||
}
|
||||
return i.Name
|
||||
}
|
||||
|
||||
// GetFolder returns a normalized Folder of the item
|
||||
func (i *Item) GetFolder() *FolderFacet {
|
||||
if i.IsRemote() && i.RemoteItem.Folder != nil {
|
||||
return i.RemoteItem.Folder
|
||||
}
|
||||
return i.Folder
|
||||
}
|
||||
|
||||
// GetFile returns a normalized File of the item
|
||||
func (i *Item) GetFile() *FileFacet {
|
||||
if i.IsRemote() && i.RemoteItem.File != nil {
|
||||
return i.RemoteItem.File
|
||||
}
|
||||
return i.File
|
||||
}
|
||||
|
||||
// GetFileSystemInfo returns a normalized FileSystemInfo of the item
|
||||
func (i *Item) GetFileSystemInfo() *FileSystemInfoFacet {
|
||||
if i.IsRemote() && i.RemoteItem.FileSystemInfo != nil {
|
||||
return i.RemoteItem.FileSystemInfo
|
||||
}
|
||||
return i.FileSystemInfo
|
||||
}
|
||||
|
||||
// GetSize returns a normalized Size of the item
|
||||
func (i *Item) GetSize() int64 {
|
||||
if i.IsRemote() && i.RemoteItem.Size != 0 {
|
||||
return i.RemoteItem.Size
|
||||
}
|
||||
return i.Size
|
||||
}
|
||||
|
||||
// GetWebURL returns a normalized WebURL of the item
|
||||
func (i *Item) GetWebURL() string {
|
||||
if i.IsRemote() && i.RemoteItem.WebURL != "" {
|
||||
return i.RemoteItem.WebURL
|
||||
}
|
||||
return i.WebURL
|
||||
}
|
||||
|
||||
// GetCreatedBy returns a normalized CreatedBy of the item
|
||||
func (i *Item) GetCreatedBy() IdentitySet {
|
||||
if i.IsRemote() && i.RemoteItem.CreatedBy != (IdentitySet{}) {
|
||||
return i.RemoteItem.CreatedBy
|
||||
}
|
||||
return i.CreatedBy
|
||||
}
|
||||
|
||||
// GetLastModifiedBy returns a normalized LastModifiedBy of the item
|
||||
func (i *Item) GetLastModifiedBy() IdentitySet {
|
||||
if i.IsRemote() && i.RemoteItem.LastModifiedBy != (IdentitySet{}) {
|
||||
return i.RemoteItem.LastModifiedBy
|
||||
}
|
||||
return i.LastModifiedBy
|
||||
}
|
||||
|
||||
// GetCreatedDateTime returns a normalized CreatedDateTime of the item
|
||||
func (i *Item) GetCreatedDateTime() Timestamp {
|
||||
if i.IsRemote() && i.RemoteItem.CreatedDateTime != (Timestamp{}) {
|
||||
return i.RemoteItem.CreatedDateTime
|
||||
}
|
||||
return i.CreatedDateTime
|
||||
}
|
||||
|
||||
// GetLastModifiedDateTime returns a normalized LastModifiedDateTime of the item
|
||||
func (i *Item) GetLastModifiedDateTime() Timestamp {
|
||||
if i.IsRemote() && i.RemoteItem.LastModifiedDateTime != (Timestamp{}) {
|
||||
return i.RemoteItem.LastModifiedDateTime
|
||||
}
|
||||
return i.LastModifiedDateTime
|
||||
}
|
||||
|
||||
// GetParentReferance returns a normalized ParentReferance of the item
|
||||
func (i *Item) GetParentReferance() *ItemReference {
|
||||
if i.IsRemote() && i.ParentReference == nil {
|
||||
return i.RemoteItem.ParentReference
|
||||
}
|
||||
return i.ParentReference
|
||||
}
|
||||
|
||||
// IsRemote checks if item is a remote item
|
||||
func (i *Item) IsRemote() bool {
|
||||
return i.RemoteItem != nil
|
||||
}
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -10,14 +12,14 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/onedrive/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -72,8 +74,7 @@ var (
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
oauthBusinessResource = oauth2.SetAuthURLParam("resource", discoveryServiceURL)
|
||||
|
||||
chunkSize = fs.SizeSuffix(10 * 1024 * 1024)
|
||||
sharedURL = "https://api.onedrive.com/v1.0/drives" // root URL for remote shared resources
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -82,7 +83,7 @@ func init() {
|
||||
Name: "onedrive",
|
||||
Description: "Microsoft OneDrive",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
// choose account type
|
||||
fmt.Printf("Choose OneDrive account type?\n")
|
||||
fmt.Printf(" * Say b for a OneDrive business account\n")
|
||||
@@ -91,19 +92,27 @@ func init() {
|
||||
|
||||
if isPersonal {
|
||||
// for personal accounts we don't safe a field about the account
|
||||
err := oauthutil.Config("onedrive", name, oauthPersonalConfig)
|
||||
err := oauthutil.Config("onedrive", name, m, oauthPersonalConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
} else {
|
||||
err := oauthutil.Config("onedrive", name, oauthBusinessConfig, oauthBusinessResource)
|
||||
err := oauthutil.ConfigErrorCheck("onedrive", name, m, func(req *http.Request) oauthutil.AuthError {
|
||||
var resp oauthutil.AuthError
|
||||
|
||||
resp.Name = req.URL.Query().Get("error")
|
||||
resp.Code = strings.Split(req.URL.Query().Get("error_description"), ":")[0] // error_description begins with XXXXXXXXXXXX:
|
||||
resp.Description = strings.Join(strings.Split(req.URL.Query().Get("error_description"), ":")[1:], ":")
|
||||
resp.HelpURL = "https://rclone.org/onedrive/#troubleshooting"
|
||||
return resp
|
||||
}, oauthBusinessConfig, oauthBusinessResource)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Are we running headless?
|
||||
if config.FileGet(name, config.ConfigAutomatic) != "" {
|
||||
if automatic, _ := m.Get(config.ConfigAutomatic); automatic != "" {
|
||||
// Yes, okay we are done
|
||||
return
|
||||
}
|
||||
@@ -117,7 +126,7 @@ func init() {
|
||||
Services []serviceResource `json:"value"`
|
||||
}
|
||||
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, oauthBusinessConfig)
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthBusinessConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure OneDrive: %v", err)
|
||||
return
|
||||
@@ -162,13 +171,13 @@ func init() {
|
||||
foundService = config.Choose("Choose resource URL", resourcesID, resourcesURL, false)
|
||||
}
|
||||
|
||||
config.FileSet(name, configResourceURL, foundService)
|
||||
m.Set(configResourceURL, foundService)
|
||||
oauthBusinessResource = oauth2.SetAuthURLParam("resource", foundService)
|
||||
|
||||
// get the token from the inital config
|
||||
// we need to update the token with a resource
|
||||
// specific token we will query now
|
||||
token, err := oauthutil.GetToken(name)
|
||||
token, err := oauthutil.GetToken(name, m)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Error while getting token: %s", err)
|
||||
return
|
||||
@@ -211,7 +220,7 @@ func init() {
|
||||
token.RefreshToken = jsonToken.RefreshToken
|
||||
|
||||
// finally save them in the config
|
||||
err = oauthutil.PutToken(name, token, true)
|
||||
err = oauthutil.PutToken(name, m, token, true)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Error while setting token: %s", err)
|
||||
}
|
||||
@@ -219,20 +228,30 @@ func init() {
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Microsoft App Client Id - leave blank normally.",
|
||||
Help: "Microsoft App Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Microsoft App Client Secret - leave blank normally.",
|
||||
Help: "Microsoft App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "Chunk size to upload files with - must be multiple of 320k.",
|
||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
flags.VarP(&chunkSize, "onedrive-chunk-size", "", "Above this size files will be chunked - must be multiple of 320k.")
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
ResourceURL string `config:"resource_url"`
|
||||
}
|
||||
|
||||
// Fs represents a remote one drive
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
@@ -245,14 +264,15 @@ type Fs struct {
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
hasMetaData bool // whether info below has been set
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
id string // ID of the object
|
||||
sha1 string // SHA-1 of the object content
|
||||
mimeType string // Content-Type of object from server (may not be as uploaded)
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
hasMetaData bool // whether info below has been set
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
id string // ID of the object
|
||||
sha1 string // SHA-1 of the object content
|
||||
quickxorhash string // QuickXorHash of the object content
|
||||
mimeType string // Content-Type of object from server (may not be as uploaded)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -277,9 +297,6 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Pattern to match a one drive path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parsePath parses an one drive 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
@@ -318,6 +335,7 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Respon
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
return info, resp, err
|
||||
}
|
||||
|
||||
@@ -336,26 +354,35 @@ func errorHandler(resp *http.Response) error {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
// get the resource URL from the config file0
|
||||
resourceURL := config.FileGet(name, configResourceURL, "")
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ChunkSize%(320*1024) != 0 {
|
||||
return nil, errors.Errorf("chunk size %d is not a multiple of 320k", opt.ChunkSize)
|
||||
}
|
||||
// if we have a resource URL it's a business account otherwise a personal one
|
||||
isBusiness := opt.ResourceURL != ""
|
||||
var rootURL string
|
||||
var oauthConfig *oauth2.Config
|
||||
if resourceURL == "" {
|
||||
if !isBusiness {
|
||||
// personal account setup
|
||||
oauthConfig = oauthPersonalConfig
|
||||
rootURL = rootURLPersonal
|
||||
} else {
|
||||
// business account setup
|
||||
oauthConfig = oauthBusinessConfig
|
||||
rootURL = resourceURL + "_api/v2.0/drives/me"
|
||||
rootURL = opt.ResourceURL + "_api/v2.0/drives/me"
|
||||
sharedURL = opt.ResourceURL + "_api/v2.0/drives"
|
||||
|
||||
// update the URL in the AuthOptions
|
||||
oauthBusinessResource = oauth2.SetAuthURLParam("resource", resourceURL)
|
||||
oauthBusinessResource = oauth2.SetAuthURLParam("resource", opt.ResourceURL)
|
||||
}
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, oauthConfig)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure OneDrive: %v", err)
|
||||
}
|
||||
@@ -363,9 +390,10 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
isBusiness: resourceURL != "",
|
||||
isBusiness: isBusiness,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@@ -475,21 +503,18 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er
|
||||
}
|
||||
return "", false, err
|
||||
}
|
||||
if info.Folder == nil {
|
||||
if info.GetFolder() == nil {
|
||||
return "", false, errors.New("found file when looking for folder")
|
||||
}
|
||||
return info.ID, true, nil
|
||||
return info.GetID(), true, nil
|
||||
}
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
||||
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
|
||||
func (f *Fs) CreateDir(dirID, leaf string) (newID string, err error) {
|
||||
// fs.Debugf(f, "CreateDir(%q, %q)\n", dirID, leaf)
|
||||
var resp *http.Response
|
||||
var info *api.Item
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/items/" + pathID + "/children",
|
||||
}
|
||||
opts := newOptsCall(dirID, "POST", "/children")
|
||||
mkdir := api.CreateItemRequest{
|
||||
Name: replaceReservedChars(leaf),
|
||||
ConflictBehavior: "fail",
|
||||
@@ -502,8 +527,9 @@ func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
//fmt.Printf("...Id %q\n", *info.Id)
|
||||
return info.ID, nil
|
||||
return info.GetID(), nil
|
||||
}
|
||||
|
||||
// list the objects into the function supplied
|
||||
@@ -520,10 +546,8 @@ type listAllFn func(*api.Item) bool
|
||||
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
// Top parameter asks for bigger pages of data
|
||||
// https://dev.onedrive.com/odata/optional-query-parameters.htm
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/items/" + dirID + "/children?top=1000",
|
||||
}
|
||||
opts := newOptsCall(dirID, "GET", "/children?top=1000")
|
||||
|
||||
OUTER:
|
||||
for {
|
||||
var result api.ListChildrenResponse
|
||||
@@ -540,7 +564,7 @@ OUTER:
|
||||
}
|
||||
for i := range result.Value {
|
||||
item := &result.Value[i]
|
||||
isFolder := item.Folder != nil
|
||||
isFolder := item.GetFolder() != nil
|
||||
if isFolder {
|
||||
if filesOnly {
|
||||
continue
|
||||
@@ -553,7 +577,7 @@ OUTER:
|
||||
if item.Deleted != nil {
|
||||
continue
|
||||
}
|
||||
item.Name = restoreReservedChars(item.Name)
|
||||
item.Name = restoreReservedChars(item.GetName())
|
||||
if fn(item) {
|
||||
found = true
|
||||
break OUTER
|
||||
@@ -588,13 +612,15 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
|
||||
remote := path.Join(dir, info.Name)
|
||||
if info.Folder != nil {
|
||||
remote := path.Join(dir, info.GetName())
|
||||
folder := info.GetFolder()
|
||||
if folder != nil {
|
||||
// cache the directory ID for later lookups
|
||||
f.dirCache.Put(remote, info.ID)
|
||||
d := fs.NewDir(remote, time.Time(info.LastModifiedDateTime)).SetID(info.ID)
|
||||
if info.Folder != nil {
|
||||
d.SetItems(info.Folder.ChildCount)
|
||||
id := info.GetID()
|
||||
f.dirCache.Put(remote, id)
|
||||
d := fs.NewDir(remote, time.Time(info.GetLastModifiedDateTime())).SetID(id)
|
||||
if folder != nil {
|
||||
d.SetItems(folder.ChildCount)
|
||||
}
|
||||
entries = append(entries, d)
|
||||
} else {
|
||||
@@ -667,11 +693,9 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
|
||||
// deleteObject removes an object by ID
|
||||
func (f *Fs) deleteObject(id string) error {
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/items/" + id,
|
||||
NoResponse: true,
|
||||
}
|
||||
opts := newOptsCall(id, "DELETE", "")
|
||||
opts.NoResponse = true
|
||||
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
@@ -807,17 +831,17 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// Copy the object
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/items/" + srcObj.id + "/action.copy",
|
||||
ExtraHeaders: map[string]string{"Prefer": "respond-async"},
|
||||
NoResponse: true,
|
||||
}
|
||||
opts := newOptsCall(srcObj.id, "POST", "/action.copy")
|
||||
opts.ExtraHeaders = map[string]string{"Prefer": "respond-async"}
|
||||
opts.NoResponse = true
|
||||
|
||||
id, _, _ := parseDirID(directoryID)
|
||||
|
||||
replacedLeaf := replaceReservedChars(leaf)
|
||||
copy := api.CopyItemRequest{
|
||||
Name: &replacedLeaf,
|
||||
ParentReference: api.ItemReference{
|
||||
ID: directoryID,
|
||||
ID: id,
|
||||
},
|
||||
}
|
||||
var resp *http.Response
|
||||
@@ -884,14 +908,14 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// Move the object
|
||||
opts := rest.Opts{
|
||||
Method: "PATCH",
|
||||
Path: "/items/" + srcObj.id,
|
||||
}
|
||||
opts := newOptsCall(srcObj.id, "PATCH", "")
|
||||
|
||||
id, _, _ := parseDirID(directoryID)
|
||||
|
||||
move := api.MoveItemRequest{
|
||||
Name: replaceReservedChars(leaf),
|
||||
ParentReference: &api.ItemReference{
|
||||
ID: directoryID,
|
||||
ID: id,
|
||||
},
|
||||
// We set the mod time too as it gets reset otherwise
|
||||
FileSystemInfo: &api.FileSystemInfoFacet{
|
||||
@@ -922,8 +946,36 @@ func (f *Fs) DirCacheFlush() {
|
||||
f.dirCache.ResetRoot()
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (usage *fs.Usage, err error) {
|
||||
var drive api.Drive
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &drive)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "about failed")
|
||||
}
|
||||
q := drive.Quota
|
||||
usage = &fs.Usage{
|
||||
Total: fs.NewUsageValue(q.Total), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(q.Used), // bytes in use
|
||||
Trashed: fs.NewUsageValue(q.Deleted), // bytes in trash
|
||||
Free: fs.NewUsageValue(q.Remaining), // bytes which can be uploaded before reaching the quota
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
if f.isBusiness {
|
||||
return hash.Set(hash.QuickXorHash)
|
||||
}
|
||||
return hash.Set(hash.SHA1)
|
||||
}
|
||||
|
||||
@@ -954,6 +1006,12 @@ func (o *Object) srvPath() string {
|
||||
|
||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
if o.fs.isBusiness {
|
||||
if t != hash.QuickXorHash {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return o.quickxorhash, nil
|
||||
}
|
||||
if t != hash.SHA1 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
@@ -972,31 +1030,37 @@ func (o *Object) Size() int64 {
|
||||
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
if info.Folder != nil {
|
||||
if info.GetFolder() != nil {
|
||||
return errors.Wrapf(fs.ErrorNotAFile, "%q", o.remote)
|
||||
}
|
||||
o.hasMetaData = true
|
||||
o.size = info.Size
|
||||
o.size = info.GetSize()
|
||||
|
||||
// Docs: https://dev.onedrive.com/facets/hashes_facet.htm
|
||||
// Docs: https://docs.microsoft.com/en-us/onedrive/developer/rest-api/resources/hashes
|
||||
//
|
||||
// The docs state both that the hashes are returned as hex
|
||||
// strings, and as base64 strings. Testing reveals they are in
|
||||
// fact uppercase hex strings.
|
||||
//
|
||||
// In OneDrive for Business, SHA1 and CRC32 hash values are not returned for files.
|
||||
if info.File != nil {
|
||||
o.mimeType = info.File.MimeType
|
||||
if info.File.Hashes.Sha1Hash != "" {
|
||||
o.sha1 = strings.ToLower(info.File.Hashes.Sha1Hash)
|
||||
// We use SHA1 for onedrive personal and QuickXorHash for onedrive for business
|
||||
file := info.GetFile()
|
||||
if file != nil {
|
||||
o.mimeType = file.MimeType
|
||||
if file.Hashes.Sha1Hash != "" {
|
||||
o.sha1 = strings.ToLower(file.Hashes.Sha1Hash)
|
||||
}
|
||||
if file.Hashes.QuickXorHash != "" {
|
||||
h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
|
||||
} else {
|
||||
o.quickxorhash = hex.EncodeToString(h)
|
||||
}
|
||||
}
|
||||
}
|
||||
if info.FileSystemInfo != nil {
|
||||
o.modTime = time.Time(info.FileSystemInfo.LastModifiedDateTime)
|
||||
fileSystemInfo := info.GetFileSystemInfo()
|
||||
if fileSystemInfo != nil {
|
||||
o.modTime = time.Time(fileSystemInfo.LastModifiedDateTime)
|
||||
} else {
|
||||
o.modTime = time.Time(info.LastModifiedDateTime)
|
||||
o.modTime = time.Time(info.GetLastModifiedDateTime())
|
||||
}
|
||||
o.id = info.ID
|
||||
o.id = info.GetID()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1035,9 +1099,20 @@ func (o *Object) ModTime() time.Time {
|
||||
|
||||
// setModTime sets the modification time of the local fs object
|
||||
func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PATCH",
|
||||
Path: "/root:/" + rest.URLPathEscape(o.srvPath()),
|
||||
var opts rest.Opts
|
||||
_, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
_, drive, rootURL := parseDirID(directoryID)
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "PATCH",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/root:/" + rest.URLPathEscape(o.srvPath()),
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "PATCH",
|
||||
Path: "/root:/" + rest.URLPathEscape(o.srvPath()),
|
||||
}
|
||||
}
|
||||
update := api.SetFileSystemInfo{
|
||||
FileSystemInfo: api.FileSystemInfoFacet{
|
||||
@@ -1074,11 +1149,9 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
}
|
||||
fs.FixRangeOption(options, o.size)
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/items/" + o.id + "/content",
|
||||
Options: options,
|
||||
}
|
||||
opts := newOptsCall(o.id, "GET", "/content")
|
||||
opts.Options = options
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
@@ -1096,9 +1169,20 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
|
||||
// createUploadSession creates an upload session for the object
|
||||
func (o *Object) createUploadSession(modTime time.Time) (response *api.CreateUploadResponse, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/upload.createSession",
|
||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
id, drive, rootURL := parseDirID(directoryID)
|
||||
var opts rest.Opts
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/items/" + id + ":/" + rest.URLPathEscape(leaf) + ":/upload.createSession",
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/upload.createSession",
|
||||
}
|
||||
}
|
||||
createRequest := api.CreateUploadRequest{}
|
||||
createRequest.Item.FileSystemInfo.CreatedDateTime = api.Timestamp(modTime)
|
||||
@@ -1123,14 +1207,16 @@ func (o *Object) uploadFragment(url string, start int64, totalSize int64, chunk
|
||||
// var response api.UploadFragmentResponse
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, _ = chunk.Seek(0, 0)
|
||||
_, _ = chunk.Seek(0, io.SeekStart)
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
if resp != nil {
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
}
|
||||
retry, err := shouldRetry(resp, err)
|
||||
if !retry && resp != nil {
|
||||
if resp.StatusCode == 200 || resp.StatusCode == 201 {
|
||||
// we are done :)
|
||||
// read the item
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
info = &api.Item{}
|
||||
return false, json.NewDecoder(resp.Body).Decode(info)
|
||||
}
|
||||
@@ -1157,10 +1243,6 @@ func (o *Object) cancelUploadSession(url string) (err error) {
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
|
||||
if chunkSize%(320*1024) != 0 {
|
||||
return nil, errors.Errorf("chunk size %d is not a multiple of 320k", chunkSize)
|
||||
}
|
||||
|
||||
// Create upload session
|
||||
fs.Debugf(o, "Starting multipart upload")
|
||||
session, err := o.createUploadSession(modTime)
|
||||
@@ -1184,7 +1266,7 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (i
|
||||
remaining := size
|
||||
position := int64(0)
|
||||
for remaining > 0 {
|
||||
n := int64(chunkSize)
|
||||
n := int64(o.fs.opt.ChunkSize)
|
||||
if remaining < n {
|
||||
n = remaining
|
||||
}
|
||||
@@ -1204,11 +1286,24 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (i
|
||||
// uploadSinglepart uploads a file as a single part
|
||||
func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
|
||||
ContentLength: &size,
|
||||
Body: in,
|
||||
var opts rest.Opts
|
||||
_, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
_, drive, rootURL := parseDirID(directoryID)
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
|
||||
ContentLength: &size,
|
||||
Body: in,
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
|
||||
ContentLength: &size,
|
||||
Body: in,
|
||||
}
|
||||
}
|
||||
// for go1.8 (see release notes) we must nil the Body if we want a
|
||||
// "Content-Length: 0" header which onedrive requires for all files.
|
||||
@@ -1222,6 +1317,7 @@ func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = o.setMetaData(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1263,6 +1359,35 @@ func (o *Object) MimeType() string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
func newOptsCall(id string, method string, route string) (opts rest.Opts) {
|
||||
id, drive, rootURL := parseDirID(id)
|
||||
|
||||
if drive != "" {
|
||||
return rest.Opts{
|
||||
Method: method,
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/items/" + id + route,
|
||||
}
|
||||
}
|
||||
return rest.Opts{
|
||||
Method: method,
|
||||
Path: "/items/" + id + route,
|
||||
}
|
||||
}
|
||||
|
||||
func parseDirID(ID string) (string, string, string) {
|
||||
if strings.Index(ID, "#") >= 0 {
|
||||
s := strings.Split(ID, "#")
|
||||
return s[1], s[0], sharedURL
|
||||
}
|
||||
return ID, "", ""
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -1271,6 +1396,8 @@ var (
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
// _ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
)
|
||||
|
||||
@@ -1,75 +1,17 @@
|
||||
// Test OneDrive filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package onedrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/onedrive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*onedrive.Object)(nil))
|
||||
fstests.RemoteName = "TestOneDrive:"
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestOneDrive:",
|
||||
NilObject: (*onedrive.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
202
backend/onedrive/quickxorhash/quickxorhash.go
Normal file
202
backend/onedrive/quickxorhash/quickxorhash.go
Normal file
@@ -0,0 +1,202 @@
|
||||
// Package quickxorhash provides the quickXorHash algorithm which is a
|
||||
// quick, simple non-cryptographic hash algorithm that works by XORing
|
||||
// the bytes in a circular-shifting fashion.
|
||||
//
|
||||
// It is used by Microsoft Onedrive for Business to hash data.
|
||||
//
|
||||
// See: https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
|
||||
package quickxorhash
|
||||
|
||||
// This code was ported from the code snippet linked from
|
||||
// https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
|
||||
// Which has the copyright
|
||||
|
||||
// ------------------------------------------------------------------------------
|
||||
// Copyright (c) 2016 Microsoft Corporation
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
// ------------------------------------------------------------------------------
|
||||
|
||||
import (
|
||||
"hash"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockSize is the preferred size for hashing
|
||||
BlockSize = 64
|
||||
// Size of the output checksum
|
||||
Size = 20
|
||||
bitsInLastCell = 32
|
||||
shift = 11
|
||||
widthInBits = 8 * Size
|
||||
dataSize = (widthInBits-1)/64 + 1
|
||||
)
|
||||
|
||||
type quickXorHash struct {
|
||||
data [dataSize]uint64
|
||||
lengthSoFar uint64
|
||||
shiftSoFar int
|
||||
}
|
||||
|
||||
// New returns a new hash.Hash computing the quickXorHash checksum.
|
||||
func New() hash.Hash {
|
||||
return &quickXorHash{}
|
||||
}
|
||||
|
||||
// Write (via the embedded io.Writer interface) adds more data to the running hash.
|
||||
// It never returns an error.
|
||||
//
|
||||
// Write writes len(p) bytes from p to the underlying data stream. It returns
|
||||
// the number of bytes written from p (0 <= n <= len(p)) and any error
|
||||
// encountered that caused the write to stop early. Write must return a non-nil
|
||||
// error if it returns n < len(p). Write must not modify the slice data, even
|
||||
// temporarily.
|
||||
//
|
||||
// Implementations must not retain p.
|
||||
func (q *quickXorHash) Write(p []byte) (n int, err error) {
|
||||
currentshift := q.shiftSoFar
|
||||
|
||||
// The bitvector where we'll start xoring
|
||||
vectorArrayIndex := currentshift / 64
|
||||
|
||||
// The position within the bit vector at which we begin xoring
|
||||
vectorOffset := currentshift % 64
|
||||
iterations := len(p)
|
||||
if iterations > widthInBits {
|
||||
iterations = widthInBits
|
||||
}
|
||||
|
||||
for i := 0; i < iterations; i++ {
|
||||
isLastCell := vectorArrayIndex == len(q.data)-1
|
||||
var bitsInVectorCell int
|
||||
if isLastCell {
|
||||
bitsInVectorCell = bitsInLastCell
|
||||
} else {
|
||||
bitsInVectorCell = 64
|
||||
}
|
||||
|
||||
// There's at least 2 bitvectors before we reach the end of the array
|
||||
if vectorOffset <= bitsInVectorCell-8 {
|
||||
for j := i; j < len(p); j += widthInBits {
|
||||
q.data[vectorArrayIndex] ^= uint64(p[j]) << uint(vectorOffset)
|
||||
}
|
||||
} else {
|
||||
index1 := vectorArrayIndex
|
||||
var index2 int
|
||||
if isLastCell {
|
||||
index2 = 0
|
||||
} else {
|
||||
index2 = vectorArrayIndex + 1
|
||||
}
|
||||
low := byte(bitsInVectorCell - vectorOffset)
|
||||
|
||||
xoredByte := byte(0)
|
||||
for j := i; j < len(p); j += widthInBits {
|
||||
xoredByte ^= p[j]
|
||||
}
|
||||
q.data[index1] ^= uint64(xoredByte) << uint(vectorOffset)
|
||||
q.data[index2] ^= uint64(xoredByte) >> low
|
||||
}
|
||||
vectorOffset += shift
|
||||
for vectorOffset >= bitsInVectorCell {
|
||||
if isLastCell {
|
||||
vectorArrayIndex = 0
|
||||
} else {
|
||||
vectorArrayIndex = vectorArrayIndex + 1
|
||||
}
|
||||
vectorOffset -= bitsInVectorCell
|
||||
}
|
||||
}
|
||||
|
||||
// Update the starting position in a circular shift pattern
|
||||
q.shiftSoFar = (q.shiftSoFar + shift*(len(p)%widthInBits)) % widthInBits
|
||||
|
||||
q.lengthSoFar += uint64(len(p))
|
||||
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Calculate the current checksum
|
||||
func (q *quickXorHash) checkSum() (h [Size]byte) {
|
||||
// Output the data as little endian bytes
|
||||
ph := 0
|
||||
for _, d := range q.data[:len(q.data)-1] {
|
||||
_ = h[ph+7] // bounds check
|
||||
h[ph+0] = byte(d >> (8 * 0))
|
||||
h[ph+1] = byte(d >> (8 * 1))
|
||||
h[ph+2] = byte(d >> (8 * 2))
|
||||
h[ph+3] = byte(d >> (8 * 3))
|
||||
h[ph+4] = byte(d >> (8 * 4))
|
||||
h[ph+5] = byte(d >> (8 * 5))
|
||||
h[ph+6] = byte(d >> (8 * 6))
|
||||
h[ph+7] = byte(d >> (8 * 7))
|
||||
ph += 8
|
||||
}
|
||||
// remaining 32 bits
|
||||
d := q.data[len(q.data)-1]
|
||||
h[Size-4] = byte(d >> (8 * 0))
|
||||
h[Size-3] = byte(d >> (8 * 1))
|
||||
h[Size-2] = byte(d >> (8 * 2))
|
||||
h[Size-1] = byte(d >> (8 * 3))
|
||||
|
||||
// XOR the file length with the least significant bits in little endian format
|
||||
d = q.lengthSoFar
|
||||
h[Size-8] ^= byte(d >> (8 * 0))
|
||||
h[Size-7] ^= byte(d >> (8 * 1))
|
||||
h[Size-6] ^= byte(d >> (8 * 2))
|
||||
h[Size-5] ^= byte(d >> (8 * 3))
|
||||
h[Size-4] ^= byte(d >> (8 * 4))
|
||||
h[Size-3] ^= byte(d >> (8 * 5))
|
||||
h[Size-2] ^= byte(d >> (8 * 6))
|
||||
h[Size-1] ^= byte(d >> (8 * 7))
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
// It does not change the underlying hash state.
|
||||
func (q *quickXorHash) Sum(b []byte) []byte {
|
||||
hash := q.checkSum()
|
||||
return append(b, hash[:]...)
|
||||
}
|
||||
|
||||
// Reset resets the Hash to its initial state.
|
||||
func (q *quickXorHash) Reset() {
|
||||
*q = quickXorHash{}
|
||||
}
|
||||
|
||||
// Size returns the number of bytes Sum will return.
|
||||
func (q *quickXorHash) Size() int {
|
||||
return Size
|
||||
}
|
||||
|
||||
// BlockSize returns the hash's underlying block size.
|
||||
// The Write method must be able to accept any amount
|
||||
// of data, but it may operate more efficiently if all writes
|
||||
// are a multiple of the block size.
|
||||
func (q *quickXorHash) BlockSize() int {
|
||||
return BlockSize
|
||||
}
|
||||
|
||||
// Sum returns the quickXorHash checksum of the data.
|
||||
func Sum(data []byte) [Size]byte {
|
||||
var d quickXorHash
|
||||
_, _ = d.Write(data)
|
||||
return d.checkSum()
|
||||
}
|
||||
168
backend/onedrive/quickxorhash/quickxorhash_test.go
Normal file
168
backend/onedrive/quickxorhash/quickxorhash_test.go
Normal file
@@ -0,0 +1,168 @@
|
||||
package quickxorhash
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"hash"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var testVectors = []struct {
|
||||
size int
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{0, ``, "AAAAAAAAAAAAAAAAAAAAAAAAAAA="},
|
||||
{1, `Sg==`, "SgAAAAAAAAAAAAAAAQAAAAAAAAA="},
|
||||
{2, `tbQ=`, "taAFAAAAAAAAAAAAAgAAAAAAAAA="},
|
||||
{3, `0pZP`, "0rDEEwAAAAAAAAAAAwAAAAAAAAA="},
|
||||
{4, `jRRDVA==`, "jaDAEKgAAAAAAAAABAAAAAAAAAA="},
|
||||
{5, `eAV52qE=`, "eChAHrQRCgAAAAAABQAAAAAAAAA="},
|
||||
{6, `luBZlaT6`, "lgBHFipBCn0AAAAABgAAAAAAAAA="},
|
||||
{7, `qaApEj66lw==`, "qQBFCiTgA11cAgAABwAAAAAAAAA="},
|
||||
{8, `/aNzzCFPS/A=`, "/RjFHJgRgicsAR4ACAAAAAAAAAA="},
|
||||
{9, `n6Neh7p6fFgm`, "nxiFFw6hCz3wAQsmCQAAAAAAAAA="},
|
||||
{10, `J9iPGCbfZSTNyw==`, "J8DGIzBggm+UgQTNUgYAAAAAAAA="},
|
||||
{11, `i+UZyUGJKh+ISbk=`, "iyhHBpIRhESo4AOIQ0IuAAAAAAA="},
|
||||
{12, `h490d57Pqz5q2rtT`, "h3gEHe7giWeswgdq3MYupgAAAAA="},
|
||||
{13, `vPgoDjOfO6fm71RxLw==`, "vMAHChwwg0/s4BTmdQcV4vACAAA="},
|
||||
{14, `XoJ1AsoR4fDYJrDqYs4=`, "XhBEHQSgjAiEAx7YPgEs1CEGZwA="},
|
||||
{15, `gQaybEqS/4UlDc8e4IJm`, "gDCALNigBEn8oxAlZ8AzPAAOQZg="},
|
||||
{16, `2fuxhBJXtpWFe8dOfdGeHw==`, "O9tHLAghgSvYohKFyMMxnNCHaHg="},
|
||||
{17, `XBV6YKU9V7yMakZnFIxIkuU=`, "HbplHsBQih5cgReMQYMRzkABRiA="},
|
||||
{18, `XJZSOiNO2bmfKnTKD7fztcQX`, "/6ZArHQwAidkIxefQgEdlPGAW8w="},
|
||||
{19, `g8VtAh+2Kf4k0kY5tzji2i2zmA==`, "wDNrgwHWAVukwB8kg4YRcnALHIg="},
|
||||
{20, `T6LYJIfDh81JrAK309H2JMJTXis=`, "zBTHrspn3mEcohlJdIUAbjGNaNg="},
|
||||
{21, `DWAAX5/CIfrmErgZa8ot6ZraeSbu`, "LR2Z0PjuRYGKQB/mhQAuMrAGZbQ="},
|
||||
{22, `N9abi3qy/mC1THZuVLHPpx7SgwtLOA==`, "1KTYttCBEen8Hwy1doId3ECFWDw="},
|
||||
{23, `LlUe7wHerLqEtbSZLZgZa9u0m7hbiFs=`, "TqVZpxs3cN61BnuFvwUtMtECTGQ="},
|
||||
{24, `bU2j/0XYdgfPFD4691jV0AOUEUPR4Z5E`, "bnLBiLpVgnxVkXhNsIAPdHAPLFQ="},
|
||||
{25, `lScPwPsyUsH2T1Qsr31wXtP55Wqbe47Uyg==`, "VDMSy8eI26nBHCB0e8gVWPCKPsA="},
|
||||
{26, `rJaKh1dLR1k+4hynliTZMGf8Nd4qKKoZiAM=`, "r7bjwkl8OYQeNaMcCY8fTmEJEmQ="},
|
||||
{27, `pPsT0CPmHrd3Frsnva1pB/z1ytARLeHEYRCo`, "Rdg7rCcDomL59pL0s6GuTvqLVqQ="},
|
||||
{28, `wSRChaqmrsnMrfB2yqI43eRWbro+f9kBvh+01w==`, "YTtloIi6frI7HX3vdLvE7I2iUOA="},
|
||||
{29, `apL67KMIRxQeE9k1/RuW09ppPjbF1WeQpTjSWtI=`, "CIpedls+ZlSQ654fl+X26+Q7LVU="},
|
||||
{30, `53yx0/QgMTVb7OOzHRHbkS7ghyRc+sIXxi7XHKgT`, "zfJtLGFgR9DB3Q64fAFIp+S5iOY="},
|
||||
{31, `PwXNnutoLLmxD8TTog52k8cQkukmT87TTnDipKLHQw==`, "PTaGs7yV3FUyBy/SfU6xJRlCJlI="},
|
||||
{32, `NbYXsp5/K6mR+NmHwExjvWeWDJFnXTKWVlzYHoesp2E=`, "wjuAuWDiq04qDt1R8hHWDDcwVoQ="},
|
||||
{33, `qQ70RB++JAR5ljNv3lJt1PpqETPsckopfonItu18Cr3E`, "FkJaeg/0Z5+euShYlLpE2tJh+Lo="},
|
||||
{34, `RhzSatQTQ9/RFvpHyQa1WLdkr3nIk6MjJUma998YRtp44A==`, "SPN2D29reImAqJezlqV2DLbi8tk="},
|
||||
{35, `DND1u1uZ5SqZVpRUk6NxSUdVo7IjjL9zs4A1evDNCDLcXWc=`, "S6lBk2hxI2SWBfn7nbEl7D19UUs="},
|
||||
{36, `jEi62utFz69JMYHjg1iXy7oO6ZpZSLcVd2B+pjm6BGsv/CWi`, "s0lYU9tr/bp9xsnrrjYgRS5EvV8="},
|
||||
{37, `hfS3DZZnhy0hv7nJdXLv/oJOtIgAuP9SInt/v8KeuO4/IvVh4A==`, "CV+HQCdd2A/e/vdi12f2UU55GLA="},
|
||||
{38, `EkPQAC6ymuRrYjIXD/LT/4Vb+7aTjYVZOHzC8GPCEtYDP0+T3Nc=`, "kE9H9sEmr3vHBYUiPbvsrcDgSEo="},
|
||||
{39, `vtBOGIENG7yQ/N7xNWPNIgy66Gk/I2Ur/ZhdFNUK9/1FCZuu/KeS`, "+Fgp3HBimtCzUAyiinj3pkarYTk="},
|
||||
{40, `YnF4smoy9hox2jBlJ3VUa4qyCRhOZbWcmFGIiszTT4zAdYHsqJazyg==`, "arkIn+ELddmE8N34J9ydyFKW+9w="},
|
||||
{41, `0n7nl3YJtipy6yeUbVPWtc2h45WbF9u8hTz5tNwj3dZZwfXWkk+GN3g=`, "YJLNK7JR64j9aODWfqDvEe/u6NU="},
|
||||
{42, `FnIIPHayc1pHkY4Lh8+zhWwG8xk6Knk/D3cZU1/fOUmRAoJ6CeztvMOL`, "22RPOylMtdk7xO/QEQiMli4ql0k="},
|
||||
{43, `J82VT7ND0Eg1MorSfJMUhn+qocF7PsUpdQAMrDiHJ2JcPZAHZ2nyuwjoKg==`, "pOR5eYfwCLRJbJsidpc1rIJYwtM="},
|
||||
{44, `Zbu+78+e35ZIymV5KTDdub5McyI3FEO8fDxs62uWHQ9U3Oh3ZqgaZ30SnmQ=`, "DbvbTkgNTgWRqRidA9r1jhtUjro="},
|
||||
{45, `lgybK3Da7LEeY5aeeNrqcdHvv6mD1W4cuQ3/rUj2C/CNcSI0cAMw6vtpVY3y`, "700RQByn1lRQSSme9npQB/Ye+bY="},
|
||||
{46, `jStZgKHv4QyJLvF2bYbIUZi/FscHALfKHAssTXkrV1byVR9eACwW9DNZQRHQwg==`, "uwN55He8xgE4g93dH9163xPew4U="},
|
||||
{47, `V1PSud3giF5WW72JB/bgtltsWtEB5V+a+wUALOJOGuqztzVXUZYrvoP3XV++gM0=`, "U+3ZfUF/6mwOoHJcSHkQkckfTDA="},
|
||||
{48, `VXs4t4tfXGiWAL6dlhEMm0YQF0f2w9rzX0CvIVeuW56o6/ec2auMpKeU2VeteEK5`, "sq24lSf7wXLH8eigHl07X+qPTps="},
|
||||
{49, `bLUn3jLH+HFUsG3ptWTHgNvtr3eEv9lfKBf0jm6uhpqhRwtbEQ7Ovj/hYQf42zfdtQ==`, "uC8xrnopGiHebGuwgq607WRQyxQ="},
|
||||
{50, `4SVmjtXIL8BB8SfkbR5Cpaljm2jpyUfAhIBf65XmKxHlz9dy5XixgiE/q1lv+esZW/E=`, "wxZ0rxkMQEnRNAp8ZgEZLT4RdLM="},
|
||||
{51, `pMljctlXeFUqbG3BppyiNbojQO3ygg6nZPeUZaQcVyJ+Clgiw3Q8ntLe8+02ZSfyCc39`, "aZEPmNvOXnTt7z7wt+ewV7QGMlg="},
|
||||
{52, `C16uQlxsHxMWnV2gJhFPuJ2/guZ4N1YgmNvAwL1yrouGQtwieGx8WvZsmYRnX72JnbVtTw==`, "QtlSNqXhVij64MMhKJ3EsDFB/z8="},
|
||||
{53, `7ZVDOywvrl3L0GyKjjcNg2CcTI81n2CeUbzdYWcZOSCEnA/xrNHpiK01HOcGh3BbxuS4S6g=`, "4NznNJc4nmXeApfiCFTq/H5LbHw="},
|
||||
{54, `JXm2tTVqpYuuz2Cc+ZnPusUb8vccPGrzWK2oVwLLl/FjpFoxO9FxGlhnB08iu8Q/XQSdzHn+`, "IwE5+2pKNcK366I2k2BzZYPibSI="},
|
||||
{55, `TiiU1mxzYBSGZuE+TX0l9USWBilQ7dEml5lLrzNPh75xmhjIK8SGqVAkvIMgAmcMB+raXdMPZg==`, "yECGHtgR128ScP4XlvF96eLbIBE="},
|
||||
{56, `zz+Q4zi6wh0fCJUFU9yUOqEVxlIA93gybXHOtXIPwQQ44pW4fyh6BRgc1bOneRuSWp85hwlTJl8=`, "+3Ef4D6yuoC8J+rbFqU1cegverE="},
|
||||
{57, `sa6SHK9z/G505bysK5KgRO2z2cTksDkLoFc7sv0tWBmf2G2mCiozf2Ce6EIO+W1fRsrrtn/eeOAV`, "xZg1CwMNAjN0AIXw2yh4+1N3oos="},
|
||||
{58, `0qx0xdyTHhnKJ22IeTlAjRpWw6y2sOOWFP75XJ7cleGJQiV2kyrmQOST4DGHIL0qqA7sMOdzKyTV
|
||||
iw==`, "bS0tRYPkP1Gfc+ZsBm9PMzPunG8="},
|
||||
{59, `QuzaF0+5ooig6OLEWeibZUENl8EaiXAQvK9UjBEauMeuFFDCtNcGs25BDtJGGbX90gH4VZvCCDNC
|
||||
q4s=`, "rggokuJq1OGNOfB6aDp2g4rdPgw="},
|
||||
{60, `+wg2x23GZQmMLkdv9MeAdettIWDmyK6Wr+ba23XD+Pvvq1lIMn9QIQT4Z7QHJE3iC/ZMFgaId9VA
|
||||
yY3d`, "ahQbTmOdiKUNdhYRHgv5/Ky+Y6k="},
|
||||
{61, `y0ydRgreRQwP95vpNP92ioI+7wFiyldHRbr1SfoPNdbKGFA0lBREaBEGNhf9yixmfE+Azo2AuROx
|
||||
b7Yc7g==`, "cJKFc0dXfiN4hMg1lcMf5E4gqvo="},
|
||||
{62, `LxlVvGXSQlSubK8r0pGf9zf7s/3RHe75a2WlSXQf3gZFR/BtRnR7fCIcaG//CbGfodBFp06DBx/S
|
||||
9hUV8Bk=`, "NwuwhhRWX8QZ/vhWKWgQ1+rNomI="},
|
||||
{63, `L+LSB8kmGMnHaWVA5P/+qFnfQliXvgJW7d2JGAgT6+koi5NQujFW1bwQVoXrBVyob/gBxGizUoJM
|
||||
gid5gGNo`, "ndX/KZBtFoeO3xKeo1ajO/Jy+rY="},
|
||||
{64, `Mb7EGva2rEE5fENDL85P+BsapHEEjv2/siVhKjvAQe02feExVOQSkfmuYzU/kTF1MaKjPmKF/w+c
|
||||
bvwfdWL8aQ==`, "n1anP5NfvD4XDYWIeRPW3ZkPv1Y="},
|
||||
{111, `jyibxJSzO6ZiZ0O1qe3tG/bvIAYssvukh9suIT5wEy1JBINVgPiqdsTW0cOpP0aUfP7mgqLfADkz
|
||||
I/m/GgCuVhr8oFLrOCoTx1/psBOWwhltCbhUx51Icm9aH8tY4Z3ccU+6BKpYQkLCy0B/A9Zc`, "hZfLIilSITC6N3e3tQ/iSgEzkto="},
|
||||
{128, `ikwCorI7PKWz17EI50jZCGbV9JU2E8bXVfxNMg5zdmqSZ2NlsQPp0kqYIPjzwTg1MBtfWPg53k0h
|
||||
0P2naJNEVgrqpoHTfV2b3pJ4m0zYPTJmUX4Bg/lOxcnCxAYKU29Y5F0U8Quz7ZXFBEweftXxJ7RS
|
||||
4r6N7BzJrPsLhY7hgck=`, "imAoFvCWlDn4yVw3/oq1PDbbm6U="},
|
||||
{222, `PfxMcUd0vIW6VbHG/uj/Y0W6qEoKmyBD0nYebEKazKaKG+UaDqBEcmQjbfQeVnVLuodMoPp7P7TR
|
||||
1htX5n2VnkHh22xDyoJ8C/ZQKiSNqQfXvh83judf4RVr9exJCud8Uvgip6aVZTaPrJHVjQhMCp/d
|
||||
EnGvqg0oN5OVkM2qqAXvA0teKUDhgNM71sDBVBCGXxNOR2bpbD1iM4dnuT0ey4L+loXEHTL0fqMe
|
||||
UcEi2asgImnlNakwenDzz0x57aBwyq3AspCFGB1ncX4yYCr/OaCcS5OKi/00WH+wNQU3`, "QX/YEpG0gDsmhEpCdWhsxDzsfVE="},
|
||||
{256, `qwGf2ESubE5jOUHHyc94ORczFYYbc2OmEzo+hBIyzJiNwAzC8PvJqtTzwkWkSslgHFGWQZR2BV5+
|
||||
uYTrYT7HVwRM40vqfj0dBgeDENyTenIOL1LHkjtDKoXEnQ0mXAHoJ8PjbNC93zi5TovVRXTNzfGE
|
||||
s5dpWVqxUzb5lc7dwkyvOluBw482mQ4xrzYyIY1t+//OrNi1ObGXuUw2jBQOFfJVj2Y6BOyYmfB1
|
||||
y36eBxi3zxeG5d5NYjm2GSh6e08QMAwu3zrINcqIzLOuNIiGXBtl7DjKt7b5wqi4oFiRpZsCyx2s
|
||||
mhSrdrtK/CkdU6nDN+34vSR/M8rZpWQdBE7a8g==`, "WYT9JY3JIo/pEBp+tIM6Gt2nyTM="},
|
||||
{333, `w0LGhqU1WXFbdavqDE4kAjEzWLGGzmTNikzqnsiXHx2KRReKVTxkv27u3UcEz9+lbMvYl4xFf2Z4
|
||||
aE1xRBBNd1Ke5C0zToSaYw5o4B/7X99nKK2/XaUX1byLow2aju2XJl2OpKpJg+tSJ2fmjIJTkfuY
|
||||
Uz574dFX6/VXxSxwGH/xQEAKS5TCsBK3CwnuG1p5SAsQq3gGVozDWyjEBcWDMdy8/AIFrj/y03Lf
|
||||
c/RNRCQTAfZbnf2QwV7sluw4fH3XJr07UoD0YqN+7XZzidtrwqMY26fpLZnyZjnBEt1FAZWO7RnK
|
||||
G5asg8xRk9YaDdedXdQSJAOy6bWEWlABj+tVAigBxavaluUH8LOj+yfCFldJjNLdi90fVHkUD/m4
|
||||
Mr5OtmupNMXPwuG3EQlqWUVpQoYpUYKLsk7a5Mvg6UFkiH596y5IbJEVCI1Kb3D1`, "e3+wo77iKcILiZegnzyUNcjCdoQ="},
|
||||
}
|
||||
|
||||
func TestQuickXorHash(t *testing.T) {
|
||||
for _, test := range testVectors {
|
||||
what := fmt.Sprintf("test size %d", test.size)
|
||||
in, err := base64.StdEncoding.DecodeString(test.in)
|
||||
require.NoError(t, err, what)
|
||||
got := Sum(in)
|
||||
want, err := base64.StdEncoding.DecodeString(test.out)
|
||||
require.NoError(t, err, what)
|
||||
assert.Equal(t, want, got[:], what)
|
||||
}
|
||||
}
|
||||
|
||||
func TestQuickXorHashByBlock(t *testing.T) {
|
||||
for _, blockSize := range []int{1, 2, 4, 7, 8, 16, 32, 64, 128, 256, 512} {
|
||||
for _, test := range testVectors {
|
||||
what := fmt.Sprintf("test size %d blockSize %d", test.size, blockSize)
|
||||
in, err := base64.StdEncoding.DecodeString(test.in)
|
||||
require.NoError(t, err, what)
|
||||
h := New()
|
||||
for i := 0; i < len(in); i += blockSize {
|
||||
end := i + blockSize
|
||||
if end > len(in) {
|
||||
end = len(in)
|
||||
}
|
||||
n, err := h.Write(in[i:end])
|
||||
require.Equal(t, end-i, n, what)
|
||||
require.NoError(t, err, what)
|
||||
}
|
||||
got := h.Sum(nil)
|
||||
want, err := base64.StdEncoding.DecodeString(test.out)
|
||||
require.NoError(t, err, what)
|
||||
assert.Equal(t, want, got[:], test.size, what)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSize(t *testing.T) {
|
||||
d := New()
|
||||
assert.Equal(t, 20, d.Size())
|
||||
}
|
||||
|
||||
func TestBlockSize(t *testing.T) {
|
||||
d := New()
|
||||
assert.Equal(t, 64, d.BlockSize())
|
||||
}
|
||||
|
||||
func TestReset(t *testing.T) {
|
||||
d := New()
|
||||
zeroHash := d.Sum(nil)
|
||||
_, _ = d.Write([]byte{1})
|
||||
assert.NotEqual(t, zeroHash, d.Sum(nil))
|
||||
d.Reset()
|
||||
assert.Equal(t, zeroHash, d.Sum(nil))
|
||||
}
|
||||
|
||||
// check interface
|
||||
var _ hash.Hash = (*quickXorHash)(nil)
|
||||
1117
backend/opendrive/opendrive.go
Normal file
1117
backend/opendrive/opendrive.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/opendrive/opendrive_test.go
Normal file
17
backend/opendrive/opendrive_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test Opendrive filesystem interface
|
||||
package opendrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/opendrive"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestOpenDrive:",
|
||||
NilObject: (*opendrive.Object)(nil),
|
||||
})
|
||||
}
|
||||
78
backend/opendrive/replace.go
Normal file
78
backend/opendrive/replace.go
Normal file
@@ -0,0 +1,78 @@
|
||||
/*
|
||||
Translate file names for OpenDrive
|
||||
|
||||
OpenDrive reserved characters
|
||||
|
||||
The following characters are OpenDrive reserved characters, and can't
|
||||
be used in OpenDrive folder and file names.
|
||||
|
||||
\ / : * ? " < > |
|
||||
|
||||
OpenDrive files and folders can't have leading or trailing spaces also.
|
||||
|
||||
*/
|
||||
|
||||
package opendrive
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// charMap holds replacements for characters
|
||||
//
|
||||
// OpenDrive has a restricted set of characters compared to other cloud
|
||||
// storage systems, so we to map these to the FULLWIDTH unicode
|
||||
// equivalents
|
||||
//
|
||||
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
':': ':', // FULLWIDTH COLON
|
||||
'*': '*', // FULLWIDTH ASTERISK
|
||||
'?': '?', // FULLWIDTH QUESTION MARK
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'|': '|', // FULLWIDTH VERTICAL LINE
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
|
||||
fixEndingWithSpace = regexp.MustCompile(` (/|$)`)
|
||||
invCharMap map[rune]rune
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Create inverse charMap
|
||||
invCharMap = make(map[rune]rune, len(charMap))
|
||||
for k, v := range charMap {
|
||||
invCharMap[v] = k
|
||||
}
|
||||
}
|
||||
|
||||
// replaceReservedChars takes a path and substitutes any reserved
|
||||
// characters in it
|
||||
func replaceReservedChars(in string) string {
|
||||
// Filenames can't start with space
|
||||
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
|
||||
// Filenames can't end with space
|
||||
in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1")
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := charMap[c]; ok && c != ' ' {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
|
||||
// restoreReservedChars takes a path and undoes any substitutions
|
||||
// made by replaceReservedChars
|
||||
func restoreReservedChars(in string) string {
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := invCharMap[c]; ok {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
28
backend/opendrive/replace_test.go
Normal file
28
backend/opendrive/replace_test.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package opendrive
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{`\*<>?:|#%".~`, `\*<>?:|#%".~`},
|
||||
{`\*<>?:|#%".~/\*<>?:|#%".~`, `\*<>?:|#%".~/\*<>?:|#%".~`},
|
||||
{" leading space", "␠leading space"},
|
||||
{" path/ leading spaces", "␠path/␠ leading spaces"},
|
||||
{"trailing space ", "trailing space␠"},
|
||||
{"trailing spaces /path ", "trailing spaces ␠/path␠"},
|
||||
} {
|
||||
got := replaceReservedChars(test.in)
|
||||
if got != test.out {
|
||||
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
|
||||
}
|
||||
got2 := restoreReservedChars(got)
|
||||
if got2 != test.in {
|
||||
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
|
||||
}
|
||||
}
|
||||
}
|
||||
214
backend/opendrive/types.go
Normal file
214
backend/opendrive/types.go
Normal file
@@ -0,0 +1,214 @@
|
||||
package opendrive
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Error describes an openDRIVE error response
|
||||
type Error struct {
|
||||
Info struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
} `json:"error"`
|
||||
}
|
||||
|
||||
// Error statisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s (Error %d)", e.Info.Message, e.Info.Code)
|
||||
}
|
||||
|
||||
// Account describes a OpenDRIVE account
|
||||
type Account struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"passwd"`
|
||||
}
|
||||
|
||||
// UserSessionInfo describes a OpenDRIVE session
|
||||
type UserSessionInfo struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"passwd"`
|
||||
|
||||
SessionID string `json:"SessionID"`
|
||||
UserName string `json:"UserName"`
|
||||
UserFirstName string `json:"UserFirstName"`
|
||||
UserLastName string `json:"UserLastName"`
|
||||
AccType string `json:"AccType"`
|
||||
UserLang string `json:"UserLang"`
|
||||
UserID string `json:"UserID"`
|
||||
IsAccountUser json.RawMessage `json:"IsAccountUser"`
|
||||
DriveName string `json:"DriveName"`
|
||||
UserLevel string `json:"UserLevel"`
|
||||
UserPlan string `json:"UserPlan"`
|
||||
FVersioning string `json:"FVersioning"`
|
||||
UserDomain string `json:"UserDomain"`
|
||||
PartnerUsersDomain string `json:"PartnerUsersDomain"`
|
||||
}
|
||||
|
||||
// FolderList describes a OpenDRIVE listing
|
||||
type FolderList struct {
|
||||
// DirUpdateTime string `json:"DirUpdateTime,string"`
|
||||
Name string `json:"Name"`
|
||||
ParentFolderID string `json:"ParentFolderID"`
|
||||
DirectFolderLink string `json:"DirectFolderLink"`
|
||||
ResponseType int `json:"ResponseType"`
|
||||
Folders []Folder `json:"Folders"`
|
||||
Files []File `json:"Files"`
|
||||
}
|
||||
|
||||
// Folder describes a OpenDRIVE folder
|
||||
type Folder struct {
|
||||
FolderID string `json:"FolderID"`
|
||||
Name string `json:"Name"`
|
||||
DateCreated int `json:"DateCreated"`
|
||||
DirUpdateTime int `json:"DirUpdateTime"`
|
||||
Access int `json:"Access"`
|
||||
DateModified int64 `json:"DateModified"`
|
||||
Shared string `json:"Shared"`
|
||||
ChildFolders int `json:"ChildFolders"`
|
||||
Link string `json:"Link"`
|
||||
Encrypted string `json:"Encrypted"`
|
||||
}
|
||||
|
||||
type createFolder struct {
|
||||
SessionID string `json:"session_id"`
|
||||
FolderName string `json:"folder_name"`
|
||||
FolderSubParent string `json:"folder_sub_parent"`
|
||||
FolderIsPublic int64 `json:"folder_is_public"` // (0 = private, 1 = public, 2 = hidden)
|
||||
FolderPublicUpl int64 `json:"folder_public_upl"` // (0 = disabled, 1 = enabled)
|
||||
FolderPublicDisplay int64 `json:"folder_public_display"` // (0 = disabled, 1 = enabled)
|
||||
FolderPublicDnl int64 `json:"folder_public_dnl"` // (0 = disabled, 1 = enabled).
|
||||
}
|
||||
|
||||
type createFolderResponse struct {
|
||||
FolderID string `json:"FolderID"`
|
||||
Name string `json:"Name"`
|
||||
DateCreated int `json:"DateCreated"`
|
||||
DirUpdateTime int `json:"DirUpdateTime"`
|
||||
Access int `json:"Access"`
|
||||
DateModified int `json:"DateModified"`
|
||||
Shared string `json:"Shared"`
|
||||
Description string `json:"Description"`
|
||||
Link string `json:"Link"`
|
||||
}
|
||||
|
||||
type moveCopyFolder struct {
|
||||
SessionID string `json:"session_id"`
|
||||
FolderID string `json:"folder_id"`
|
||||
DstFolderID string `json:"dst_folder_id"`
|
||||
Move string `json:"move"`
|
||||
NewFolderName string `json:"new_folder_name"` // New name for destination folder.
|
||||
}
|
||||
|
||||
type moveCopyFolderResponse struct {
|
||||
FolderID string `json:"FolderID"`
|
||||
}
|
||||
|
||||
type removeFolder struct {
|
||||
SessionID string `json:"session_id"`
|
||||
FolderID string `json:"folder_id"`
|
||||
}
|
||||
|
||||
// File describes a OpenDRIVE file
|
||||
type File struct {
|
||||
FileID string `json:"FileId"`
|
||||
FileHash string `json:"FileHash"`
|
||||
Name string `json:"Name"`
|
||||
GroupID int `json:"GroupID"`
|
||||
Extension string `json:"Extension"`
|
||||
Size int64 `json:"Size,string"`
|
||||
Views string `json:"Views"`
|
||||
Version string `json:"Version"`
|
||||
Downloads string `json:"Downloads"`
|
||||
DateModified int64 `json:"DateModified,string"`
|
||||
Access string `json:"Access"`
|
||||
Link string `json:"Link"`
|
||||
DownloadLink string `json:"DownloadLink"`
|
||||
StreamingLink string `json:"StreamingLink"`
|
||||
TempStreamingLink string `json:"TempStreamingLink"`
|
||||
EditLink string `json:"EditLink"`
|
||||
ThumbLink string `json:"ThumbLink"`
|
||||
Password string `json:"Password"`
|
||||
EditOnline int `json:"EditOnline"`
|
||||
}
|
||||
|
||||
type moveCopyFile struct {
|
||||
SessionID string `json:"session_id"`
|
||||
SrcFileID string `json:"src_file_id"`
|
||||
DstFolderID string `json:"dst_folder_id"`
|
||||
Move string `json:"move"`
|
||||
OverwriteIfExists string `json:"overwrite_if_exists"`
|
||||
NewFileName string `json:"new_file_name"` // New name for destination file.
|
||||
}
|
||||
|
||||
type moveCopyFileResponse struct {
|
||||
FileID string `json:"FileID"`
|
||||
Size string `json:"Size"`
|
||||
}
|
||||
|
||||
type createFile struct {
|
||||
SessionID string `json:"session_id"`
|
||||
FolderID string `json:"folder_id"`
|
||||
Name string `json:"file_name"`
|
||||
}
|
||||
|
||||
type createFileResponse struct {
|
||||
FileID string `json:"FileId"`
|
||||
Name string `json:"Name"`
|
||||
GroupID int `json:"GroupID"`
|
||||
Extension string `json:"Extension"`
|
||||
Size string `json:"Size"`
|
||||
Views string `json:"Views"`
|
||||
Downloads string `json:"Downloads"`
|
||||
DateModified string `json:"DateModified"`
|
||||
Access string `json:"Access"`
|
||||
Link string `json:"Link"`
|
||||
DownloadLink string `json:"DownloadLink"`
|
||||
StreamingLink string `json:"StreamingLink"`
|
||||
TempStreamingLink string `json:"TempStreamingLink"`
|
||||
DirUpdateTime int `json:"DirUpdateTime"`
|
||||
TempLocation string `json:"TempLocation"`
|
||||
SpeedLimit int `json:"SpeedLimit"`
|
||||
RequireCompression int `json:"RequireCompression"`
|
||||
RequireHash int `json:"RequireHash"`
|
||||
RequireHashOnly int `json:"RequireHashOnly"`
|
||||
}
|
||||
|
||||
type modTimeFile struct {
|
||||
SessionID string `json:"session_id"`
|
||||
FileID string `json:"file_id"`
|
||||
FileModificationTime string `json:"file_modification_time"`
|
||||
}
|
||||
|
||||
type openUpload struct {
|
||||
SessionID string `json:"session_id"`
|
||||
FileID string `json:"file_id"`
|
||||
Size int64 `json:"file_size"`
|
||||
}
|
||||
|
||||
type openUploadResponse struct {
|
||||
TempLocation string `json:"TempLocation"`
|
||||
RequireCompression bool `json:"RequireCompression"`
|
||||
RequireHash bool `json:"RequireHash"`
|
||||
RequireHashOnly bool `json:"RequireHashOnly"`
|
||||
SpeedLimit int `json:"SpeedLimit"`
|
||||
}
|
||||
|
||||
type closeUpload struct {
|
||||
SessionID string `json:"session_id"`
|
||||
FileID string `json:"file_id"`
|
||||
Size int64 `json:"file_size"`
|
||||
TempLocation string `json:"temp_location"`
|
||||
}
|
||||
|
||||
type closeUploadResponse struct {
|
||||
FileID string `json:"FileID"`
|
||||
FileHash string `json:"FileHash"`
|
||||
Size int64 `json:"Size"`
|
||||
}
|
||||
|
||||
type permissions struct {
|
||||
SessionID string `json:"session_id"`
|
||||
FileID string `json:"file_id"`
|
||||
FileIsPublic int64 `json:"file_ispublic"`
|
||||
}
|
||||
@@ -151,3 +151,35 @@ type ChecksumFileResult struct {
|
||||
Hashes
|
||||
Metadata Item `json:"metadata"`
|
||||
}
|
||||
|
||||
// UserInfo is returned from /userinfo
|
||||
type UserInfo struct {
|
||||
Error
|
||||
Cryptosetup bool `json:"cryptosetup"`
|
||||
Plan int `json:"plan"`
|
||||
CryptoSubscription bool `json:"cryptosubscription"`
|
||||
PublicLinkQuota int64 `json:"publiclinkquota"`
|
||||
Email string `json:"email"`
|
||||
UserID int `json:"userid"`
|
||||
Result int `json:"result"`
|
||||
Quota int64 `json:"quota"`
|
||||
TrashRevretentionDays int `json:"trashrevretentiondays"`
|
||||
Premium bool `json:"premium"`
|
||||
PremiumLifetime bool `json:"premiumlifetime"`
|
||||
EmailVerified bool `json:"emailverified"`
|
||||
UsedQuota int64 `json:"usedquota"`
|
||||
Language string `json:"language"`
|
||||
Business bool `json:"business"`
|
||||
CryptoLifetime bool `json:"cryptolifetime"`
|
||||
Registered string `json:"registered"`
|
||||
Journey struct {
|
||||
Claimed bool `json:"claimed"`
|
||||
Steps struct {
|
||||
VerifyMail bool `json:"verifymail"`
|
||||
UploadFile bool `json:"uploadfile"`
|
||||
AutoUpload bool `json:"autoupload"`
|
||||
DownloadApp bool `json:"downloadapp"`
|
||||
DownloadDrive bool `json:"downloaddrive"`
|
||||
} `json:"steps"`
|
||||
} `json:"journey"`
|
||||
}
|
||||
|
||||
@@ -17,13 +17,14 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/pcloud/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -66,26 +67,31 @@ func init() {
|
||||
Name: "pcloud",
|
||||
Description: "Pcloud",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
err := oauthutil.Config("pcloud", name, oauthConfig)
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config("pcloud", name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Pcloud App Client Id - leave blank normally.",
|
||||
Help: "Pcloud App Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Pcloud App Client Secret - leave blank normally.",
|
||||
Help: "Pcloud App Client Secret\nLeave blank normally.",
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
}
|
||||
|
||||
// Fs represents a remote pcloud
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
@@ -130,9 +136,6 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Pattern to match a pcloud path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parsePath parses an pcloud 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
@@ -233,9 +236,15 @@ func errorHandler(resp *http.Response) error {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, oauthConfig)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Pcloud: %v", err)
|
||||
}
|
||||
@@ -243,6 +252,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
@@ -806,6 +816,30 @@ func (f *Fs) DirCacheFlush() {
|
||||
f.dirCache.ResetRoot()
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (usage *fs.Usage, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/userinfo",
|
||||
}
|
||||
var resp *http.Response
|
||||
var q api.UserInfo
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &q)
|
||||
err = q.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "about failed")
|
||||
}
|
||||
usage = &fs.Usage{
|
||||
Total: fs.NewUsageValue(q.Quota), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(q.UsedQuota), // bytes in use
|
||||
Free: fs.NewUsageValue(q.Quota - q.UsedQuota), // bytes which can be uploaded before reaching the quota
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5 | hash.SHA1)
|
||||
@@ -1098,6 +1132,11 @@ func (o *Object) Remove() error {
|
||||
})
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -1107,5 +1146,7 @@ var (
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -1,75 +1,17 @@
|
||||
// Test Pcloud filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package pcloud_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/pcloud"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*pcloud.Object)(nil))
|
||||
fstests.RemoteName = "TestPcloud:"
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestPcloud:",
|
||||
NilObject: (*pcloud.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Package qingstor provides an interface to QingStor object storage
|
||||
// Home: https://www.qingcloud.com/
|
||||
|
||||
// +build !plan9,go1.7
|
||||
// +build !plan9
|
||||
|
||||
package qingstor
|
||||
|
||||
@@ -17,7 +17,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
@@ -34,49 +35,43 @@ func init() {
|
||||
Description: "QingCloud Object Storage",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "env_auth",
|
||||
Help: "Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "false",
|
||||
Help: "Enter QingStor credentials in the next step",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get QingStor credentials from the environment (env vars or IAM)",
|
||||
},
|
||||
},
|
||||
Name: "env_auth",
|
||||
Help: "Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.",
|
||||
Default: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "false",
|
||||
Help: "Enter QingStor credentials in the next step",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get QingStor credentials from the environment (env vars or IAM)",
|
||||
}},
|
||||
}, {
|
||||
Name: "access_key_id",
|
||||
Help: "QingStor Access Key ID - leave blank for anonymous access or runtime credentials.",
|
||||
Help: "QingStor Access Key ID\nLeave blank for anonymous access or runtime credentials.",
|
||||
}, {
|
||||
Name: "secret_access_key",
|
||||
Help: "QingStor Secret Access Key (password) - leave blank for anonymous access or runtime credentials.",
|
||||
Help: "QingStor Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Enter a endpoint URL to connection QingStor API.\nLeave blank will use the default value \"https://qingstor.com:443\"",
|
||||
}, {
|
||||
Name: "zone",
|
||||
Help: "Choose or Enter a zone to connect. Default is \"pek3a\".",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "pek3a",
|
||||
|
||||
Help: "The Beijing (China) Three Zone\nNeeds location constraint pek3a.",
|
||||
},
|
||||
{
|
||||
Value: "sh1a",
|
||||
|
||||
Help: "The Shanghai (China) First Zone\nNeeds location constraint sh1a.",
|
||||
},
|
||||
{
|
||||
Value: "gd2a",
|
||||
|
||||
Help: "The Guangdong (China) Second Zone\nNeeds location constraint gd2a.",
|
||||
},
|
||||
},
|
||||
Help: "Zone to connect to.\nDefault is \"pek3a\".",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "pek3a",
|
||||
Help: "The Beijing (China) Three Zone\nNeeds location constraint pek3a.",
|
||||
}, {
|
||||
Value: "sh1a",
|
||||
Help: "The Shanghai (China) First Zone\nNeeds location constraint sh1a.",
|
||||
}, {
|
||||
Value: "gd2a",
|
||||
Help: "The Guangdong (China) Second Zone\nNeeds location constraint gd2a.",
|
||||
}},
|
||||
}, {
|
||||
Name: "connection_retries",
|
||||
Help: "Number of connnection retry.\nLeave blank will use the default value \"3\".",
|
||||
Name: "connection_retries",
|
||||
Help: "Number of connnection retries.",
|
||||
Default: 3,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -95,17 +90,28 @@ func timestampToTime(tp int64) time.Time {
|
||||
return tm.UTC()
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
AccessKeyID string `config:"access_key_id"`
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Zone string `config:"zone"`
|
||||
ConnectionRetries int `config:"connection_retries"`
|
||||
}
|
||||
|
||||
// Fs represents a remote qingstor server
|
||||
type Fs struct {
|
||||
name string // The name of the remote
|
||||
root string // The root is a subdir, is a special object
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *qs.Service // The connection to the qingstor server
|
||||
zone string // The zone we are working on
|
||||
bucket string // The bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucketOK and bucketDeleted
|
||||
bucketOK bool // true if we have created the bucket
|
||||
bucketDeleted bool // true if we have deleted the bucket
|
||||
root string // The root is a subdir, is a special object
|
||||
features *fs.Features // optional features
|
||||
svc *qs.Service // The connection to the qingstor server
|
||||
}
|
||||
|
||||
// Object describes a qingstor object
|
||||
@@ -165,12 +171,12 @@ func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
|
||||
}
|
||||
|
||||
// qsConnection makes a connection to qingstor
|
||||
func qsServiceConnection(name string) (*qs.Service, error) {
|
||||
accessKeyID := config.FileGet(name, "access_key_id")
|
||||
secretAccessKey := config.FileGet(name, "secret_access_key")
|
||||
func qsServiceConnection(opt *Options) (*qs.Service, error) {
|
||||
accessKeyID := opt.AccessKeyID
|
||||
secretAccessKey := opt.SecretAccessKey
|
||||
|
||||
switch {
|
||||
case config.FileGetBool(name, "env_auth", false):
|
||||
case opt.EnvAuth:
|
||||
// No need for empty checks if "env_auth" is true
|
||||
case accessKeyID == "" && secretAccessKey == "":
|
||||
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
|
||||
@@ -184,7 +190,7 @@ func qsServiceConnection(name string) (*qs.Service, error) {
|
||||
host := "qingstor.com"
|
||||
port := 443
|
||||
|
||||
endpoint := config.FileGet(name, "endpoint", "")
|
||||
endpoint := opt.Endpoint
|
||||
if endpoint != "" {
|
||||
_protocol, _host, _port, err := qsParseEndpoint(endpoint)
|
||||
|
||||
@@ -204,48 +210,49 @@ func qsServiceConnection(name string) (*qs.Service, error) {
|
||||
|
||||
}
|
||||
|
||||
connectionRetries := 3
|
||||
retries := config.FileGet(name, "connection_retries", "")
|
||||
if retries != "" {
|
||||
connectionRetries, _ = strconv.Atoi(retries)
|
||||
}
|
||||
|
||||
cf, err := qsConfig.NewDefault()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cf.AccessKeyID = accessKeyID
|
||||
cf.SecretAccessKey = secretAccessKey
|
||||
cf.Protocol = protocol
|
||||
cf.Host = host
|
||||
cf.Port = port
|
||||
cf.ConnectionRetries = connectionRetries
|
||||
cf.ConnectionRetries = opt.ConnectionRetries
|
||||
cf.Connection = fshttp.NewClient(fs.Config)
|
||||
|
||||
svc, _ := qs.Init(cf)
|
||||
|
||||
return svc, err
|
||||
return qs.Init(cf)
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bucket, key, err := qsParsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
svc, err := qsServiceConnection(name)
|
||||
svc, err := qsServiceConnection(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
zone := config.FileGet(name, "zone")
|
||||
if zone == "" {
|
||||
zone = "pek3a"
|
||||
if opt.Zone == "" {
|
||||
opt.Zone = "pek3a"
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
zone: zone,
|
||||
root: key,
|
||||
bucket: bucket,
|
||||
opt: *opt,
|
||||
svc: svc,
|
||||
zone: opt.Zone,
|
||||
bucket: bucket,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -258,7 +265,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
f.root += "/"
|
||||
}
|
||||
//Check to see if the object exists
|
||||
bucketInit, err := svc.Bucket(bucket, zone)
|
||||
bucketInit, err := svc.Bucket(bucket, opt.Zone)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
// Test QingStor filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
|
||||
// +build !plan9,go1.7
|
||||
// +build !plan9
|
||||
|
||||
package qingstor_test
|
||||
|
||||
@@ -11,68 +8,13 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/qingstor"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*qingstor.Object)(nil))
|
||||
fstests.RemoteName = "TestQingStor:"
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestQingStor:",
|
||||
NilObject: (*qingstor.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9 !go1.7
|
||||
// +build plan9
|
||||
|
||||
package qingstor
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Upload object to QingStor
|
||||
|
||||
// +build !plan9,go1.7
|
||||
// +build !plan9
|
||||
|
||||
package qingstor
|
||||
|
||||
@@ -19,10 +19,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
maxSinglePartSize = 1024 * 1024 * 1024 * 5 // The maximum allowed size when uploading a single object to QingStor
|
||||
maxMultiPartSize = 1024 * 1024 * 1024 * 1 // The maximum allowed part size when uploading a part to QingStor
|
||||
minMultiPartSize = 1024 * 1024 * 4 // The minimum allowed part size when uploading a part to QingStor
|
||||
maxMultiParts = 10000 // The maximum allowed number of parts in an multi-part upload
|
||||
// maxSinglePartSize = 1024 * 1024 * 1024 * 5 // The maximum allowed size when uploading a single object to QingStor
|
||||
// maxMultiPartSize = 1024 * 1024 * 1024 * 1 // The maximum allowed part size when uploading a part to QingStor
|
||||
minMultiPartSize = 1024 * 1024 * 4 // The minimum allowed part size when uploading a part to QingStor
|
||||
maxMultiParts = 10000 // The maximum allowed number of parts in an multi-part upload
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -130,12 +130,12 @@ func (u *uploader) init() {
|
||||
u.totalSize = -1
|
||||
switch r := u.cfg.body.(type) {
|
||||
case io.Seeker:
|
||||
pos, _ := r.Seek(0, 1)
|
||||
pos, _ := r.Seek(0, io.SeekCurrent)
|
||||
defer func() {
|
||||
_, _ = r.Seek(pos, 0)
|
||||
_, _ = r.Seek(pos, io.SeekStart)
|
||||
}()
|
||||
|
||||
n, err := r.Seek(0, 2)
|
||||
n, err := r.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
557
backend/s3/s3.go
557
backend/s3/s3.go
@@ -37,8 +37,8 @@ import (
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
@@ -51,30 +51,57 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 (also Dreamhost, Ceph, Minio, IBM COS)",
|
||||
Description: "Amazon S3 Compliant Storage Providers (AWS, Ceph, Dreamhost, IBM COS, Minio)",
|
||||
NewFs: NewFs,
|
||||
// AWS endpoints: http://docs.amazonwebservices.com/general/latest/gr/rande.html#s3_region
|
||||
Options: []fs.Option{{
|
||||
Name: "env_auth",
|
||||
Help: "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). Only applies if access_key_id and secret_access_key is blank.",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "false",
|
||||
Help: "Enter AWS credentials in the next step",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get AWS credentials from the environment (env vars or IAM)",
|
||||
},
|
||||
},
|
||||
Name: fs.ConfigProvider,
|
||||
Help: "Choose your S3 provider.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "AWS",
|
||||
Help: "Amazon Web Services (AWS) S3",
|
||||
}, {
|
||||
Value: "Ceph",
|
||||
Help: "Ceph Object Storage",
|
||||
}, {
|
||||
Value: "DigitalOcean",
|
||||
Help: "Digital Ocean Spaces",
|
||||
}, {
|
||||
Value: "Dreamhost",
|
||||
Help: "Dreamhost DreamObjects",
|
||||
}, {
|
||||
Value: "IBMCOS",
|
||||
Help: "IBM COS S3",
|
||||
}, {
|
||||
Value: "Minio",
|
||||
Help: "Minio Object Storage",
|
||||
}, {
|
||||
Value: "Wasabi",
|
||||
Help: "Wasabi Object Storage",
|
||||
}, {
|
||||
Value: "Other",
|
||||
Help: "Any other S3 compatible provider",
|
||||
}},
|
||||
}, {
|
||||
Name: "env_auth",
|
||||
Help: "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).\nOnly applies if access_key_id and secret_access_key is blank.",
|
||||
Default: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "false",
|
||||
Help: "Enter AWS credentials in the next step",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get AWS credentials from the environment (env vars or IAM)",
|
||||
}},
|
||||
}, {
|
||||
Name: "access_key_id",
|
||||
Help: "AWS Access Key ID - leave blank for anonymous access or runtime credentials.",
|
||||
Help: "AWS Access Key ID.\nLeave blank for anonymous access or runtime credentials.",
|
||||
}, {
|
||||
Name: "secret_access_key",
|
||||
Help: "AWS Secret Access Key (password) - leave blank for anonymous access or runtime credentials.",
|
||||
Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to. Leave blank if you are using an S3 clone and you don't have a region.",
|
||||
Name: "region",
|
||||
Help: "Region to connect to.",
|
||||
Provider: "AWS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "us-east-1",
|
||||
Help: "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia or Pacific Northwest.\nLeave location constraint empty.",
|
||||
@@ -117,16 +144,158 @@ func init() {
|
||||
}, {
|
||||
Value: "sa-east-1",
|
||||
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
|
||||
}, {
|
||||
Value: "other-v2-signature",
|
||||
Help: "Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.\nSet this and make sure you set the endpoint.",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\nLeave blank if using AWS to use the default endpoint for the region.\nSpecify if using an S3 clone such as Ceph.",
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure. Will use v4 signatures and an empty region.",
|
||||
}, {
|
||||
Value: "other-v2-signature",
|
||||
Help: "Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region. Used when creating buckets only.",
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\nLeave blank if using AWS to use the default endpoint for the region.",
|
||||
Provider: "AWS",
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for IBM COS S3 API.\nSpecify if using an IBM COS On Premise.",
|
||||
Provider: "IBMCOS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "s3-api.us-geo.objectstorage.softlayer.net",
|
||||
Help: "US Cross Region Endpoint",
|
||||
}, {
|
||||
Value: "s3-api.dal.us-geo.objectstorage.softlayer.net",
|
||||
Help: "US Cross Region Dallas Endpoint",
|
||||
}, {
|
||||
Value: "s3-api.wdc-us-geo.objectstorage.softlayer.net",
|
||||
Help: "US Cross Region Washington DC Endpoint",
|
||||
}, {
|
||||
Value: "s3-api.sjc-us-geo.objectstorage.softlayer.net",
|
||||
Help: "US Cross Region San Jose Endpoint",
|
||||
}, {
|
||||
Value: "s3-api.us-geo.objectstorage.service.networklayer.com",
|
||||
Help: "US Cross Region Private Endpoint",
|
||||
}, {
|
||||
Value: "s3-api.dal-us-geo.objectstorage.service.networklayer.com",
|
||||
Help: "US Cross Region Dallas Private Endpoint",
|
||||
}, {
|
||||
Value: "s3-api.wdc-us-geo.objectstorage.service.networklayer.com",
|
||||
Help: "US Cross Region Washington DC Private Endpoint",
|
||||
}, {
|
||||
Value: "s3-api.sjc-us-geo.objectstorage.service.networklayer.com",
|
||||
Help: "US Cross Region San Jose Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.us-east.objectstorage.softlayer.net",
|
||||
Help: "US Region East Endpoint",
|
||||
}, {
|
||||
Value: "s3.us-east.objectstorage.service.networklayer.com",
|
||||
Help: "US Region East Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.us-south.objectstorage.softlayer.net",
|
||||
Help: "US Region South Endpoint",
|
||||
}, {
|
||||
Value: "s3.us-south.objectstorage.service.networklayer.com",
|
||||
Help: "US Region South Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.eu-geo.objectstorage.softlayer.net",
|
||||
Help: "EU Cross Region Endpoint",
|
||||
}, {
|
||||
Value: "s3.fra-eu-geo.objectstorage.softlayer.net",
|
||||
Help: "EU Cross Region Frankfurt Endpoint",
|
||||
}, {
|
||||
Value: "s3.mil-eu-geo.objectstorage.softlayer.net",
|
||||
Help: "EU Cross Region Milan Endpoint",
|
||||
}, {
|
||||
Value: "s3.ams-eu-geo.objectstorage.softlayer.net",
|
||||
Help: "EU Cross Region Amsterdam Endpoint",
|
||||
}, {
|
||||
Value: "s3.eu-geo.objectstorage.service.networklayer.com",
|
||||
Help: "EU Cross Region Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.fra-eu-geo.objectstorage.service.networklayer.com",
|
||||
Help: "EU Cross Region Frankfurt Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.mil-eu-geo.objectstorage.service.networklayer.com",
|
||||
Help: "EU Cross Region Milan Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.ams-eu-geo.objectstorage.service.networklayer.com",
|
||||
Help: "EU Cross Region Amsterdam Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.eu-gb.objectstorage.softlayer.net",
|
||||
Help: "Great Britan Endpoint",
|
||||
}, {
|
||||
Value: "s3.eu-gb.objectstorage.service.networklayer.com",
|
||||
Help: "Great Britan Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.ap-geo.objectstorage.softlayer.net",
|
||||
Help: "APAC Cross Regional Endpoint",
|
||||
}, {
|
||||
Value: "s3.tok-ap-geo.objectstorage.softlayer.net",
|
||||
Help: "APAC Cross Regional Tokyo Endpoint",
|
||||
}, {
|
||||
Value: "s3.hkg-ap-geo.objectstorage.softlayer.net",
|
||||
Help: "APAC Cross Regional HongKong Endpoint",
|
||||
}, {
|
||||
Value: "s3.seo-ap-geo.objectstorage.softlayer.net",
|
||||
Help: "APAC Cross Regional Seoul Endpoint",
|
||||
}, {
|
||||
Value: "s3.ap-geo.objectstorage.service.networklayer.com",
|
||||
Help: "APAC Cross Regional Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.tok-ap-geo.objectstorage.service.networklayer.com",
|
||||
Help: "APAC Cross Regional Tokyo Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.hkg-ap-geo.objectstorage.service.networklayer.com",
|
||||
Help: "APAC Cross Regional HongKong Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.seo-ap-geo.objectstorage.service.networklayer.com",
|
||||
Help: "APAC Cross Regional Seoul Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.mel01.objectstorage.softlayer.net",
|
||||
Help: "Melbourne Single Site Endpoint",
|
||||
}, {
|
||||
Value: "s3.mel01.objectstorage.service.networklayer.com",
|
||||
Help: "Melbourne Single Site Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.tor01.objectstorage.softlayer.net",
|
||||
Help: "Toronto Single Site Endpoint",
|
||||
}, {
|
||||
Value: "s3.tor01.objectstorage.service.networklayer.com",
|
||||
Help: "Toronto Single Site Private Endpoint",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,IBMCOS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-west-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
Provider: "Dreamhost",
|
||||
}, {
|
||||
Value: "nyc3.digitaloceanspaces.com",
|
||||
Help: "Digital Ocean Spaces New York 3",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "ams3.digitaloceanspaces.com",
|
||||
Help: "Digital Ocean Spaces Amsterdam 3",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "sgp1.digitaloceanspaces.com",
|
||||
Help: "Digital Ocean Spaces Singapore 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "s3.wasabisys.com",
|
||||
Help: "Wasabi Object Storage",
|
||||
Provider: "Wasabi",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\nUsed when creating buckets only.",
|
||||
Provider: "AWS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Empty for US Region, Northern Virginia or Pacific Northwest.",
|
||||
@@ -170,31 +339,159 @@ func init() {
|
||||
Value: "sa-east-1",
|
||||
Help: "South America (Sao Paulo) Region.",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must match endpoint when using IBM Cloud Public.\nFor on-prem COS, do not make a selection from this list, hit enter",
|
||||
Provider: "IBMCOS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "us-standard",
|
||||
Help: "US Cross Region Standard",
|
||||
}, {
|
||||
Value: "us-vault",
|
||||
Help: "US Cross Region Vault",
|
||||
}, {
|
||||
Value: "us-cold",
|
||||
Help: "US Cross Region Cold",
|
||||
}, {
|
||||
Value: "us-flex",
|
||||
Help: "US Cross Region Flex",
|
||||
}, {
|
||||
Value: "us-east-standard",
|
||||
Help: "US East Region Standard",
|
||||
}, {
|
||||
Value: "us-east-vault",
|
||||
Help: "US East Region Vault",
|
||||
}, {
|
||||
Value: "us-east-cold",
|
||||
Help: "US East Region Cold",
|
||||
}, {
|
||||
Value: "us-east-flex",
|
||||
Help: "US East Region Flex",
|
||||
}, {
|
||||
Value: "us-south-standard",
|
||||
Help: "US Sout hRegion Standard",
|
||||
}, {
|
||||
Value: "us-south-vault",
|
||||
Help: "US South Region Vault",
|
||||
}, {
|
||||
Value: "us-south-cold",
|
||||
Help: "US South Region Cold",
|
||||
}, {
|
||||
Value: "us-south-flex",
|
||||
Help: "US South Region Flex",
|
||||
}, {
|
||||
Value: "eu-standard",
|
||||
Help: "EU Cross Region Standard",
|
||||
}, {
|
||||
Value: "eu-vault",
|
||||
Help: "EU Cross Region Vault",
|
||||
}, {
|
||||
Value: "eu-cold",
|
||||
Help: "EU Cross Region Cold",
|
||||
}, {
|
||||
Value: "eu-flex",
|
||||
Help: "EU Cross Region Flex",
|
||||
}, {
|
||||
Value: "eu-gb-standard",
|
||||
Help: "Great Britan Standard",
|
||||
}, {
|
||||
Value: "eu-gb-vault",
|
||||
Help: "Great Britan Vault",
|
||||
}, {
|
||||
Value: "eu-gb-cold",
|
||||
Help: "Great Britan Cold",
|
||||
}, {
|
||||
Value: "eu-gb-flex",
|
||||
Help: "Great Britan Flex",
|
||||
}, {
|
||||
Value: "ap-standard",
|
||||
Help: "APAC Standard",
|
||||
}, {
|
||||
Value: "ap-vault",
|
||||
Help: "APAC Vault",
|
||||
}, {
|
||||
Value: "ap-cold",
|
||||
Help: "APAC Cold",
|
||||
}, {
|
||||
Value: "ap-flex",
|
||||
Help: "APAC Flex",
|
||||
}, {
|
||||
Value: "mel01-standard",
|
||||
Help: "Melbourne Standard",
|
||||
}, {
|
||||
Value: "mel01-vault",
|
||||
Help: "Melbourne Vault",
|
||||
}, {
|
||||
Value: "mel01-cold",
|
||||
Help: "Melbourne Cold",
|
||||
}, {
|
||||
Value: "mel01-flex",
|
||||
Help: "Melbourne Flex",
|
||||
}, {
|
||||
Value: "tor01-standard",
|
||||
Help: "Toronto Standard",
|
||||
}, {
|
||||
Value: "tor01-vault",
|
||||
Help: "Toronto Vault",
|
||||
}, {
|
||||
Value: "tor01-cold",
|
||||
Help: "Toronto Cold",
|
||||
}, {
|
||||
Value: "tor01-flex",
|
||||
Help: "Toronto Flex",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,IBMCOS",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: "Canned ACL used when creating buckets and/or storing objects in S3.\nFor more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "private",
|
||||
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
|
||||
Value: "private",
|
||||
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
|
||||
Provider: "!IBMCOS",
|
||||
}, {
|
||||
Value: "public-read",
|
||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
|
||||
Value: "public-read",
|
||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
|
||||
Provider: "!IBMCOS",
|
||||
}, {
|
||||
Value: "public-read-write",
|
||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
|
||||
Value: "public-read-write",
|
||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
|
||||
Provider: "!IBMCOS",
|
||||
}, {
|
||||
Value: "authenticated-read",
|
||||
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
|
||||
Value: "authenticated-read",
|
||||
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
|
||||
Provider: "!IBMCOS",
|
||||
}, {
|
||||
Value: "bucket-owner-read",
|
||||
Help: "Object owner gets FULL_CONTROL. Bucket owner gets READ access.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
|
||||
Value: "bucket-owner-read",
|
||||
Help: "Object owner gets FULL_CONTROL. Bucket owner gets READ access.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
|
||||
Provider: "!IBMCOS",
|
||||
}, {
|
||||
Value: "bucket-owner-full-control",
|
||||
Help: "Both the object owner and the bucket owner get FULL_CONTROL over the object.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
|
||||
Value: "bucket-owner-full-control",
|
||||
Help: "Both the object owner and the bucket owner get FULL_CONTROL over the object.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
|
||||
Provider: "!IBMCOS",
|
||||
}, {
|
||||
Value: "private",
|
||||
Help: "Owner gets FULL_CONTROL. No one else has access rights (default). This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS",
|
||||
Provider: "IBMCOS",
|
||||
}, {
|
||||
Value: "public-read",
|
||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access. This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise IBM COS",
|
||||
Provider: "IBMCOS",
|
||||
}, {
|
||||
Value: "public-read-write",
|
||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access. This acl is available on IBM Cloud (Infra), On-Premise IBM COS",
|
||||
Provider: "IBMCOS",
|
||||
}, {
|
||||
Value: "authenticated-read",
|
||||
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access. Not supported on Buckets. This acl is available on IBM Cloud (Infra) and On-Premise IBM COS",
|
||||
Provider: "IBMCOS",
|
||||
}},
|
||||
}, {
|
||||
Name: "server_side_encryption",
|
||||
Help: "The server-side encryption algorithm used when storing this object in S3.",
|
||||
Name: "server_side_encryption",
|
||||
Help: "The server-side encryption algorithm used when storing this object in S3.",
|
||||
Provider: "AWS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
@@ -203,8 +500,9 @@ func init() {
|
||||
Help: "AES256",
|
||||
}},
|
||||
}, {
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing objects in S3.",
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing objects in S3.",
|
||||
Provider: "AWS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Default",
|
||||
@@ -217,7 +515,30 @@ func init() {
|
||||
}, {
|
||||
Value: "STANDARD_IA",
|
||||
Help: "Standard Infrequent Access storage class",
|
||||
}, {
|
||||
Value: "ONEZONE_IA",
|
||||
Help: "One Zone Infrequent Access storage class",
|
||||
}},
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "Chunk size to use for uploading",
|
||||
Default: fs.SizeSuffix(s3manager.MinUploadPartSize),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: "Don't store MD5 checksum with object metadata",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "session_token",
|
||||
Help: "An AWS session token",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: "Concurrency for multipart uploads.",
|
||||
Default: 2,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -232,28 +553,36 @@ const (
|
||||
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Flags
|
||||
s3ACL = flags.StringP("s3-acl", "", "", "Canned ACL used when creating buckets and/or storing objects in S3")
|
||||
s3StorageClass = flags.StringP("s3-storage-class", "", "", "Storage class to use when uploading S3 objects (STANDARD|REDUCED_REDUNDANCY|STANDARD_IA)")
|
||||
)
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Provider string `config:"provider"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
AccessKeyID string `config:"access_key_id"`
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
LocationConstraint string `config:"location_constraint"`
|
||||
ACL string `config:"acl"`
|
||||
ServerSideEncryption string `config:"server_side_encryption"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
SessionToken string `config:"session_token"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
type Fs struct {
|
||||
name string // the name of the remote
|
||||
root string // root of the bucket - ignore all objects above this
|
||||
features *fs.Features // optional features
|
||||
c *s3.S3 // the connection to the s3 server
|
||||
ses *session.Session // the s3 session
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
bucketDeleted bool // true if we have deleted the bucket
|
||||
acl string // ACL for new buckets / objects
|
||||
locationConstraint string // location constraint of new buckets
|
||||
sse string // the type of server-side encryption
|
||||
storageClass string // storage class
|
||||
name string // the name of the remote
|
||||
root string // root of the bucket - ignore all objects above this
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
c *s3.S3 // the connection to the s3 server
|
||||
ses *session.Session // the s3 session
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
bucketDeleted bool // true if we have deleted the bucket
|
||||
}
|
||||
|
||||
// Object describes a s3 object
|
||||
@@ -315,12 +644,12 @@ func s3ParsePath(path string) (bucket, directory string, err error) {
|
||||
}
|
||||
|
||||
// s3Connection makes a connection to s3
|
||||
func s3Connection(name string) (*s3.S3, *session.Session, error) {
|
||||
func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
// Make the auth
|
||||
v := credentials.Value{
|
||||
AccessKeyID: config.FileGet(name, "access_key_id"),
|
||||
SecretAccessKey: config.FileGet(name, "secret_access_key"),
|
||||
SessionToken: config.FileGet(name, "session_token"),
|
||||
AccessKeyID: opt.AccessKeyID,
|
||||
SecretAccessKey: opt.SecretAccessKey,
|
||||
SessionToken: opt.SessionToken,
|
||||
}
|
||||
|
||||
lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service
|
||||
@@ -336,6 +665,11 @@ func s3Connection(name string) (*s3.S3, *session.Session, error) {
|
||||
// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
|
||||
&credentials.EnvProvider{},
|
||||
|
||||
// A SharedCredentialsProvider retrieves credentials
|
||||
// from the current user's home directory. It checks
|
||||
// AWS_SHARED_CREDENTIALS_FILE and AWS_PROFILE too.
|
||||
&credentials.SharedCredentialsProvider{},
|
||||
|
||||
// Pick up IAM role if we're in an ECS task
|
||||
defaults.RemoteCredProvider(*def.Config, def.Handlers),
|
||||
|
||||
@@ -350,7 +684,7 @@ func s3Connection(name string) (*s3.S3, *session.Session, error) {
|
||||
cred := credentials.NewChainCredentials(providers)
|
||||
|
||||
switch {
|
||||
case config.FileGetBool(name, "env_auth", false):
|
||||
case opt.EnvAuth:
|
||||
// No need for empty checks if "env_auth" is true
|
||||
case v.AccessKeyID == "" && v.SecretAccessKey == "":
|
||||
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
|
||||
@@ -361,26 +695,24 @@ func s3Connection(name string) (*s3.S3, *session.Session, error) {
|
||||
return nil, nil, errors.New("secret_access_key not found")
|
||||
}
|
||||
|
||||
endpoint := config.FileGet(name, "endpoint")
|
||||
region := config.FileGet(name, "region")
|
||||
if region == "" && endpoint == "" {
|
||||
endpoint = "https://s3.amazonaws.com/"
|
||||
if opt.Region == "" && opt.Endpoint == "" {
|
||||
opt.Endpoint = "https://s3.amazonaws.com/"
|
||||
}
|
||||
if region == "" {
|
||||
region = "us-east-1"
|
||||
if opt.Region == "" {
|
||||
opt.Region = "us-east-1"
|
||||
}
|
||||
awsConfig := aws.NewConfig().
|
||||
WithRegion(region).
|
||||
WithRegion(opt.Region).
|
||||
WithMaxRetries(maxRetries).
|
||||
WithCredentials(cred).
|
||||
WithEndpoint(endpoint).
|
||||
WithEndpoint(opt.Endpoint).
|
||||
WithHTTPClient(fshttp.NewClient(fs.Config)).
|
||||
WithS3ForcePathStyle(true)
|
||||
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
||||
ses := session.New()
|
||||
c := s3.New(ses, awsConfig)
|
||||
if region == "other-v2-signature" {
|
||||
fs.Debugf(name, "Using v2 auth")
|
||||
if opt.Region == "other-v2-signature" {
|
||||
fs.Debugf(nil, "Using v2 auth")
|
||||
signer := func(req *request.Request) {
|
||||
// Ignore AnonymousCredentials object
|
||||
if req.Config.Credentials == credentials.AnonymousCredentials {
|
||||
@@ -396,37 +728,37 @@ func s3Connection(name string) (*s3.S3, *session.Session, error) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ChunkSize < fs.SizeSuffix(s3manager.MinUploadPartSize) {
|
||||
return nil, errors.Errorf("s3 chunk size (%v) must be >= %v", opt.ChunkSize, fs.SizeSuffix(s3manager.MinUploadPartSize))
|
||||
}
|
||||
bucket, directory, err := s3ParsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, ses, err := s3Connection(name)
|
||||
c, ses, err := s3Connection(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := &Fs{
|
||||
name: name,
|
||||
c: c,
|
||||
bucket: bucket,
|
||||
ses: ses,
|
||||
acl: config.FileGet(name, "acl"),
|
||||
root: directory,
|
||||
locationConstraint: config.FileGet(name, "location_constraint"),
|
||||
sse: config.FileGet(name, "server_side_encryption"),
|
||||
storageClass: config.FileGet(name, "storage_class"),
|
||||
name: name,
|
||||
root: directory,
|
||||
opt: *opt,
|
||||
c: c,
|
||||
bucket: bucket,
|
||||
ses: ses,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
}).Fill(f)
|
||||
if *s3ACL != "" {
|
||||
f.acl = *s3ACL
|
||||
}
|
||||
if *s3StorageClass != "" {
|
||||
f.storageClass = *s3StorageClass
|
||||
}
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the object exists
|
||||
@@ -549,6 +881,18 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) error {
|
||||
continue
|
||||
}
|
||||
remote := key[rootLength:]
|
||||
// is this a directory marker?
|
||||
if (strings.HasSuffix(remote, "/") || remote == "") && *object.Size == 0 {
|
||||
if recurse && remote != "" {
|
||||
// add a directory in if --fast-list since will have no prefixes
|
||||
remote = remote[:len(remote)-1]
|
||||
err = fn(remote, &s3.Object{Key: &remote}, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue // skip directory marker
|
||||
}
|
||||
err = fn(remote, object, false)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -739,11 +1083,11 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
}
|
||||
req := s3.CreateBucketInput{
|
||||
Bucket: &f.bucket,
|
||||
ACL: &f.acl,
|
||||
ACL: &f.opt.ACL,
|
||||
}
|
||||
if f.locationConstraint != "" {
|
||||
if f.opt.LocationConstraint != "" {
|
||||
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
|
||||
LocationConstraint: &f.locationConstraint,
|
||||
LocationConstraint: &f.opt.LocationConstraint,
|
||||
}
|
||||
}
|
||||
_, err := f.c.CreateBucket(&req)
|
||||
@@ -928,6 +1272,9 @@ func (o *Object) readMetaData() (err error) {
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() time.Time {
|
||||
if fs.Config.UseServerModTime {
|
||||
return o.lastModified
|
||||
}
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
@@ -969,7 +1316,7 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
||||
directive := s3.MetadataDirectiveReplace // replace metadata with that passed in
|
||||
req := s3.CopyObjectInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.acl,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &key,
|
||||
ContentType: &mimeType,
|
||||
CopySource: aws.String(pathEscape(sourceKey)),
|
||||
@@ -1025,10 +1372,10 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
size := src.Size()
|
||||
|
||||
uploader := s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
|
||||
u.Concurrency = 2
|
||||
u.Concurrency = o.fs.opt.UploadConcurrency
|
||||
u.LeavePartsOnError = false
|
||||
u.S3 = o.fs.c
|
||||
u.PartSize = s3manager.MinUploadPartSize
|
||||
u.PartSize = int64(o.fs.opt.ChunkSize)
|
||||
|
||||
if size == -1 {
|
||||
// Make parts as small as possible while still being able to upload to the
|
||||
@@ -1048,7 +1395,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
metaMtime: aws.String(swift.TimeToFloatString(modTime)),
|
||||
}
|
||||
|
||||
if size > uploader.PartSize {
|
||||
if !o.fs.opt.DisableChecksum && size > uploader.PartSize {
|
||||
hash, err := src.Hash(hash.MD5)
|
||||
|
||||
if err == nil && matchMd5.MatchString(hash) {
|
||||
@@ -1066,18 +1413,18 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
key := o.fs.root + o.remote
|
||||
req := s3manager.UploadInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.acl,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &key,
|
||||
Body: in,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
//ContentLength: &size,
|
||||
}
|
||||
if o.fs.sse != "" {
|
||||
req.ServerSideEncryption = &o.fs.sse
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.storageClass != "" {
|
||||
req.StorageClass = &o.fs.storageClass
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
_, err = uploader.Upload(&req)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,75 +1,17 @@
|
||||
// Test S3 filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package s3_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/s3"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*s3.Object)(nil))
|
||||
fstests.RemoteName = "TestS3:"
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestS3:",
|
||||
NilObject: (*s3.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
// Package sftp provides a filesystem interface using github.com/pkg/sftp
|
||||
|
||||
// +build !plan9
|
||||
// +build !plan9,go1.9
|
||||
|
||||
package sftp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -18,7 +20,8 @@ import (
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -27,7 +30,6 @@ import (
|
||||
"github.com/pkg/sftp"
|
||||
"github.com/xanzy/ssh-agent"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
@@ -37,9 +39,6 @@ const (
|
||||
|
||||
var (
|
||||
currentUser = readCurrentUser()
|
||||
|
||||
// Flags
|
||||
sftpAskPassword = flags.BoolP("sftp-ask-password", "", false, "Allow asking for SFTP password when needed.")
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -50,32 +49,28 @@ func init() {
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
Help: "SSH host to connect to",
|
||||
Optional: false,
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "example.com",
|
||||
Help: "Connect to example.com",
|
||||
}},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "SSH username, leave blank for current username, " + currentUser,
|
||||
Optional: true,
|
||||
Name: "user",
|
||||
Help: "SSH username, leave blank for current username, " + currentUser,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "SSH port, leave blank to use default (22)",
|
||||
Optional: true,
|
||||
Name: "port",
|
||||
Help: "SSH port, leave blank to use default (22)",
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "SSH password, leave blank to use ssh-agent.",
|
||||
Optional: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "key_file",
|
||||
Help: "Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.",
|
||||
Optional: true,
|
||||
Name: "key_file",
|
||||
Help: "Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.",
|
||||
}, {
|
||||
Name: "use_insecure_cipher",
|
||||
Help: "Enable the user of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker..",
|
||||
Optional: true,
|
||||
Name: "use_insecure_cipher",
|
||||
Help: "Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.",
|
||||
Default: false,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "false",
|
||||
@@ -86,30 +81,56 @@ func init() {
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "disable_hashcheck",
|
||||
Help: "Disable the exectution of SSH commands to determine if remote file hashing is available, leave blank unless you know what you are doing.",
|
||||
Optional: true,
|
||||
Name: "disable_hashcheck",
|
||||
Default: false,
|
||||
Help: "Disable the execution of SSH commands to determine if remote file hashing is available.\nLeave blank or set to false to enable hashing (recommended), set to true to disable hashing.",
|
||||
}, {
|
||||
Name: "ask_password",
|
||||
Default: false,
|
||||
Help: "Allow asking for SFTP password when needed.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "path_override",
|
||||
Default: "",
|
||||
Help: "Override path used by SSH connection.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "set_modtime",
|
||||
Default: true,
|
||||
Help: "Set the modified time on the remote if set.",
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Port string `config:"port"`
|
||||
Pass string `config:"pass"`
|
||||
KeyFile string `config:"key_file"`
|
||||
UseInsecureCipher bool `config:"use_insecure_cipher"`
|
||||
DisableHashCheck bool `config:"disable_hashcheck"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
PathOverride string `config:"path_override"`
|
||||
SetModTime bool `config:"set_modtime"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
features *fs.Features // optional features
|
||||
config *ssh.ClientConfig
|
||||
host string
|
||||
port string
|
||||
url string
|
||||
mkdirLock *stringLock
|
||||
cachedHashes *hash.Set
|
||||
hashcheckDisabled bool
|
||||
setModtime bool
|
||||
poolMu sync.Mutex
|
||||
pool []*conn
|
||||
connLimit *rate.Limiter // for limiting number of connections per second
|
||||
name string
|
||||
root string
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
config *ssh.ClientConfig
|
||||
url string
|
||||
mkdirLock *stringLock
|
||||
cachedHashes *hash.Set
|
||||
poolMu sync.Mutex
|
||||
pool []*conn
|
||||
connLimit *rate.Limiter // for limiting number of connections per second
|
||||
}
|
||||
|
||||
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
@@ -123,12 +144,6 @@ type Object struct {
|
||||
sha1sum *string // Cached SHA1 checksum
|
||||
}
|
||||
|
||||
// ObjectReader holds the sftp.File interface to a remote SFTP file opened for reading
|
||||
type ObjectReader struct {
|
||||
object *Object
|
||||
sftpFile *sftp.File
|
||||
}
|
||||
|
||||
// readCurrentUser finds the current user name or "" if not found
|
||||
func readCurrentUser() (userName string) {
|
||||
usr, err := user.Current()
|
||||
@@ -201,7 +216,7 @@ func (f *Fs) sftpConnection() (c *conn, err error) {
|
||||
c = &conn{
|
||||
err: make(chan error, 1),
|
||||
}
|
||||
c.sshClient, err = Dial("tcp", f.host+":"+f.port, f.config)
|
||||
c.sshClient, err = Dial("tcp", f.opt.Host+":"+f.opt.Port, f.config)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't connect SSH")
|
||||
}
|
||||
@@ -274,35 +289,33 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
user := config.FileGet(name, "user")
|
||||
host := config.FileGet(name, "host")
|
||||
port := config.FileGet(name, "port")
|
||||
pass := config.FileGet(name, "pass")
|
||||
keyFile := config.FileGet(name, "key_file")
|
||||
insecureCipher := config.FileGetBool(name, "use_insecure_cipher")
|
||||
hashcheckDisabled := config.FileGetBool(name, "disable_hashcheck")
|
||||
setModtime := config.FileGetBool(name, "set_modtime", true)
|
||||
if user == "" {
|
||||
user = currentUser
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if port == "" {
|
||||
port = "22"
|
||||
if opt.User == "" {
|
||||
opt.User = currentUser
|
||||
}
|
||||
if opt.Port == "" {
|
||||
opt.Port = "22"
|
||||
}
|
||||
sshConfig := &ssh.ClientConfig{
|
||||
User: user,
|
||||
User: opt.User,
|
||||
Auth: []ssh.AuthMethod{},
|
||||
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||
Timeout: fs.Config.ConnectTimeout,
|
||||
}
|
||||
|
||||
if insecureCipher {
|
||||
if opt.UseInsecureCipher {
|
||||
sshConfig.Config.SetDefaults()
|
||||
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc")
|
||||
}
|
||||
|
||||
// Add ssh agent-auth if no password or file specified
|
||||
if pass == "" && keyFile == "" {
|
||||
if opt.Pass == "" && opt.KeyFile == "" {
|
||||
sshAgentClient, _, err := sshagent.New()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't connect to ssh-agent")
|
||||
@@ -315,8 +328,8 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// Load key file if specified
|
||||
if keyFile != "" {
|
||||
key, err := ioutil.ReadFile(keyFile)
|
||||
if opt.KeyFile != "" {
|
||||
key, err := ioutil.ReadFile(opt.KeyFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read private key file")
|
||||
}
|
||||
@@ -328,8 +341,8 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// Auth from password if specified
|
||||
if pass != "" {
|
||||
clearpass, err := obscure.Reveal(pass)
|
||||
if opt.Pass != "" {
|
||||
clearpass, err := obscure.Reveal(opt.Pass)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -337,23 +350,20 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// Ask for password if none was defined and we're allowed to
|
||||
if pass == "" && *sftpAskPassword {
|
||||
fmt.Fprint(os.Stderr, "Enter SFTP password: ")
|
||||
if opt.Pass == "" && opt.AskPassword {
|
||||
_, _ = fmt.Fprint(os.Stderr, "Enter SFTP password: ")
|
||||
clearpass := config.ReadPassword()
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
config: sshConfig,
|
||||
host: host,
|
||||
port: port,
|
||||
url: "sftp://" + user + "@" + host + ":" + port + "/" + root,
|
||||
hashcheckDisabled: hashcheckDisabled,
|
||||
setModtime: setModtime,
|
||||
mkdirLock: newStringLock(),
|
||||
connLimit: rate.NewLimiter(rate.Limit(connectionsPerSecond), 1),
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
config: sshConfig,
|
||||
url: "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root,
|
||||
mkdirLock: newStringLock(),
|
||||
connLimit: rate.NewLimiter(rate.Limit(connectionsPerSecond), 1),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
@@ -667,7 +677,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
return *f.cachedHashes
|
||||
}
|
||||
|
||||
if f.hashcheckDisabled {
|
||||
if f.opt.DisableHashCheck {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
@@ -733,40 +743,47 @@ func (o *Object) Remote() string {
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(r hash.Type) (string, error) {
|
||||
if r == hash.MD5 && o.md5sum != nil {
|
||||
return *o.md5sum, nil
|
||||
} else if r == hash.SHA1 && o.sha1sum != nil {
|
||||
return *o.sha1sum, nil
|
||||
var hashCmd string
|
||||
if r == hash.MD5 {
|
||||
if o.md5sum != nil {
|
||||
return *o.md5sum, nil
|
||||
}
|
||||
hashCmd = "md5sum"
|
||||
} else if r == hash.SHA1 {
|
||||
if o.sha1sum != nil {
|
||||
return *o.sha1sum, nil
|
||||
}
|
||||
hashCmd = "sha1sum"
|
||||
} else {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
c, err := o.fs.getSftpConnection()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "Hash")
|
||||
return "", errors.Wrap(err, "Hash get SFTP connection")
|
||||
}
|
||||
session, err := c.sshClient.NewSession()
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
o.fs.cachedHashes = nil // Something has changed on the remote system
|
||||
return "", hash.ErrUnsupported
|
||||
return "", errors.Wrap(err, "Hash put SFTP connection")
|
||||
}
|
||||
|
||||
err = hash.ErrUnsupported
|
||||
var outputBytes []byte
|
||||
var stdout, stderr bytes.Buffer
|
||||
session.Stdout = &stdout
|
||||
session.Stderr = &stderr
|
||||
escapedPath := shellEscape(o.path())
|
||||
if r == hash.MD5 {
|
||||
outputBytes, err = session.Output("md5sum " + escapedPath)
|
||||
} else if r == hash.SHA1 {
|
||||
outputBytes, err = session.Output("sha1sum " + escapedPath)
|
||||
if o.fs.opt.PathOverride != "" {
|
||||
escapedPath = shellEscape(path.Join(o.fs.opt.PathOverride, o.remote))
|
||||
}
|
||||
|
||||
err = session.Run(hashCmd + " " + escapedPath)
|
||||
if err != nil {
|
||||
o.fs.cachedHashes = nil // Something has changed on the remote system
|
||||
_ = session.Close()
|
||||
return "", hash.ErrUnsupported
|
||||
fs.Debugf(o, "Failed to calculate %v hash: %v (%s)", r, err, bytes.TrimSpace(stderr.Bytes()))
|
||||
return "", nil
|
||||
}
|
||||
|
||||
_ = session.Close()
|
||||
str := parseHash(outputBytes)
|
||||
str := parseHash(stdout.Bytes())
|
||||
if r == hash.MD5 {
|
||||
o.md5sum = &str
|
||||
} else if r == hash.SHA1 {
|
||||
@@ -849,7 +866,7 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime")
|
||||
}
|
||||
if o.fs.setModtime {
|
||||
if o.fs.opt.SetModTime {
|
||||
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
@@ -868,15 +885,49 @@ func (o *Object) Storable() bool {
|
||||
return o.mode.IsRegular()
|
||||
}
|
||||
|
||||
// objectReader represents a file open for reading on the SFTP server
|
||||
type objectReader struct {
|
||||
sftpFile *sftp.File
|
||||
pipeReader *io.PipeReader
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func newObjectReader(sftpFile *sftp.File) *objectReader {
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
file := &objectReader{
|
||||
sftpFile: sftpFile,
|
||||
pipeReader: pipeReader,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
go func() {
|
||||
// Use sftpFile.WriteTo to pump data so that it gets a
|
||||
// chance to build the window up.
|
||||
_, err := sftpFile.WriteTo(pipeWriter)
|
||||
// Close the pipeWriter so the pipeReader fails with
|
||||
// the same error or EOF if err == nil
|
||||
_ = pipeWriter.CloseWithError(err)
|
||||
// signal that we've finished
|
||||
close(file.done)
|
||||
}()
|
||||
|
||||
return file
|
||||
}
|
||||
|
||||
// Read from a remote sftp file object reader
|
||||
func (file *ObjectReader) Read(p []byte) (n int, err error) {
|
||||
n, err = file.sftpFile.Read(p)
|
||||
func (file *objectReader) Read(p []byte) (n int, err error) {
|
||||
n, err = file.pipeReader.Read(p)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close a reader of a remote sftp file
|
||||
func (file *ObjectReader) Close() (err error) {
|
||||
func (file *objectReader) Close() (err error) {
|
||||
// Close the sftpFile - this will likely cause the WriteTo to error
|
||||
err = file.sftpFile.Close()
|
||||
// Close the pipeReader so writes to the pipeWriter fail
|
||||
_ = file.pipeReader.Close()
|
||||
// Wait for the background process to finish
|
||||
<-file.done
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -905,15 +956,12 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
return nil, errors.Wrap(err, "Open failed")
|
||||
}
|
||||
if offset > 0 {
|
||||
off, err := sftpFile.Seek(offset, 0)
|
||||
off, err := sftpFile.Seek(offset, io.SeekStart)
|
||||
if err != nil || off != offset {
|
||||
return nil, errors.Wrap(err, "Open Seek failed")
|
||||
}
|
||||
}
|
||||
in = readers.NewLimitedReadCloser(&ObjectReader{
|
||||
object: o,
|
||||
sftpFile: sftpFile,
|
||||
}, limit)
|
||||
in = readers.NewLimitedReadCloser(newObjectReader(sftpFile), limit)
|
||||
return in, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build !plan9,go1.9
|
||||
|
||||
package sftp
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,75 +1,20 @@
|
||||
// Test Sftp filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
|
||||
// +build !plan9,go1.9
|
||||
|
||||
package sftp_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/sftp"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*sftp.Object)(nil))
|
||||
fstests.RemoteName = "TestSftp:"
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestSftp:",
|
||||
NilObject: (*sftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsChangeNotify(t *testing.T) { fstests.TestFsChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectOpenRange(t *testing.T) { fstests.TestObjectOpenRange(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestInternal(t *testing.T) { fstests.TestInternal(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for sftp for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9
|
||||
// +build plan9 !go1.9
|
||||
|
||||
package sftp
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build !plan9,go1.9
|
||||
|
||||
package sftp
|
||||
|
||||
import "sync"
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build !plan9,go1.9
|
||||
|
||||
package sftp
|
||||
|
||||
import (
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user