mirror of
https://github.com/rclone/rclone.git
synced 2026-01-12 21:43:13 +00:00
Compare commits
1 Commits
Cnly-graph
...
drive-untr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
77b1eaeffe |
@@ -8,7 +8,6 @@ go:
|
||||
- 1.8.7
|
||||
- 1.9.3
|
||||
- "1.10.1"
|
||||
- "1.11rc1"
|
||||
- tip
|
||||
before_install:
|
||||
- if [[ $TRAVIS_OS_NAME == linux ]]; then sudo modprobe fuse ; sudo chmod 666 /dev/fuse ; sudo chown root:$USER /etc/fuse.conf ; fi
|
||||
@@ -39,7 +38,7 @@ matrix:
|
||||
- go: tip
|
||||
include:
|
||||
- os: osx
|
||||
go: "1.11rc1"
|
||||
go: "1.10.1"
|
||||
env: GOTAGS=""
|
||||
deploy:
|
||||
provider: script
|
||||
@@ -47,5 +46,5 @@ deploy:
|
||||
skip_cleanup: true
|
||||
on:
|
||||
all_branches: true
|
||||
go: "1.11rc1"
|
||||
go: "1.10.1"
|
||||
condition: $TRAVIS_PULL_REQUEST == false
|
||||
|
||||
208
Gopkg.lock
generated
208
Gopkg.lock
generated
@@ -3,74 +3,57 @@
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:df0ad5dd3f57601a1185ce7fdc76fef7101654c602d6a8c5153bce76ff6253c0"
|
||||
name = "bazil.org/fuse"
|
||||
packages = [
|
||||
".",
|
||||
"fs",
|
||||
"fuseutil",
|
||||
"fuseutil"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "65cc252bf6691cb3c7014bcb2c8dc29de91e3a7e"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:4e800c0d846ed856a032380c87b22577ef03c146ccd26203b62ac90ef78e94b1"
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata"]
|
||||
pruneopts = ""
|
||||
revision = "0fd7230b2a7505833d5f69b75cbd6c9582401479"
|
||||
version = "v0.23.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6f302284bb48712a01cdcd3216e8bbb293d1edb618f55b5fe7f92521cce930c7"
|
||||
name = "github.com/Azure/azure-pipeline-go"
|
||||
packages = ["pipeline"]
|
||||
pruneopts = ""
|
||||
revision = "7571e8eb0876932ab505918ff7ed5107773e5ee2"
|
||||
version = "0.1.7"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:fe7593b80dc3ea36fe51ca434844b801e1695e7b680851a137ed40fa2b4d6184"
|
||||
name = "github.com/Azure/azure-storage-blob-go"
|
||||
packages = ["2018-03-28/azblob"]
|
||||
pruneopts = ""
|
||||
revision = "eaae161d9d5e07363f04ddb19d84d57efc66d1a1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:f9282871e22332a39831bc53a3588fc04d763bf3f13312655580c7f4cb012b66"
|
||||
name = "github.com/Unknwon/goconfig"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "ef1e4c783f8f0478bd8bff0edb3dd0bade552599"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ac226c42eb54c121e0704c6f7f64c96c7817ad6d6286e5536e8cea72807e1079"
|
||||
name = "github.com/VividCortex/ewma"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "b24eb346a94c3ba12c1da1e564dbac1b498a77ce"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:391632fa3a324c4f461f28baaf45cea8b21e13630b00f27059613f855bb544bb"
|
||||
name = "github.com/a8m/tree"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "3cf936ce15d6100c49d9c75f79c220ae7e579599"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:61e512f75ec00c5d0e33e1f503fbad293f5850b76a8f62f6035097c8c436315d"
|
||||
name = "github.com/abbot/go-http-auth"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "0ddd408d5d60ea76e320503cc7dd091992dee608"
|
||||
version = "v0.4.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f9dc8648e19ca5c4ccdf32e13301eaaff14a6662826c98926ba401d98bdea315"
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
packages = [
|
||||
"aws",
|
||||
@@ -104,54 +87,42 @@
|
||||
"service/s3",
|
||||
"service/s3/s3iface",
|
||||
"service/s3/s3manager",
|
||||
"service/sts",
|
||||
"service/sts"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "bfc1a07cf158c30c41a3eefba8aae043d0bb5bff"
|
||||
version = "v1.14.8"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a82690274ae3235a742afe7eebd9affcb97a80340af7d9b538f583cd953cff19"
|
||||
name = "github.com/billziss-gh/cgofuse"
|
||||
packages = ["fuse"]
|
||||
pruneopts = ""
|
||||
revision = "ea66f9809c71af94522d494d3d617545662ea59d"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:80db6bfe7a7d5e277987232bb650ccdeb365d00293576c5421dab101401741b6"
|
||||
name = "github.com/coreos/bbolt"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "af9db2027c98c61ecd8e17caa5bd265792b9b9a2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:982e2547680f9fd2212c6443ab73ea84eef40ee1cdcecb61d997de838445214c"
|
||||
name = "github.com/cpuguy83/go-md2man"
|
||||
packages = ["md2man"]
|
||||
pruneopts = ""
|
||||
revision = "20f5889cbdc3c73dbd2862796665e7c465ade7d1"
|
||||
version = "v1.0.8"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:56c130d885a4aacae1dd9c7b71cfe39912c7ebc1ff7d2b46083c8812996dc43b"
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
pruneopts = ""
|
||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7b4b8c901568da024c49be7ff5e20fdecef629b60679c803041093823fb8d081"
|
||||
name = "github.com/djherbis/times"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "95292e44976d1217cf3611dc7c8d9466877d3ed5"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2a99d23b565e06fe1930f444c53c066216f06465c8d1d097b691f23c169858ea"
|
||||
name = "github.com/dropbox/dropbox-sdk-go-unofficial"
|
||||
packages = [
|
||||
"dropbox",
|
||||
@@ -164,241 +135,185 @@
|
||||
"dropbox/team_common",
|
||||
"dropbox/team_policies",
|
||||
"dropbox/users",
|
||||
"dropbox/users_common",
|
||||
"dropbox/users_common"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "7afa861bfde5a348d765522b303b6fbd9d250155"
|
||||
version = "v4.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:617b3e0f5989d4ff866a1820480990c65dfc9257eb080da749a45e2d76681b02"
|
||||
name = "github.com/go-ini/ini"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "06f5f3d67269ccec1fe5fe4134ba6e982984f7f5"
|
||||
version = "v1.37.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f958a1c137db276e52f0b50efee41a1a389dcdded59a69711f3e872757dab34b"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
pruneopts = ""
|
||||
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:9abc49f39e3e23e262594bb4fb70abf74c0c99e94f99153f43b143805e850719"
|
||||
name = "github.com/google/go-querystring"
|
||||
packages = ["query"]
|
||||
pruneopts = ""
|
||||
revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be"
|
||||
name = "github.com/inconshreveable/mousetrap"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
version = "v1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:64b78d98b8956492576911baf6a1e3499816d4575e485d12792e4abe7d8b6c46"
|
||||
name = "github.com/jlaffaye/ftp"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "2403248fa8cc9f7909862627aa7337f13f8e0bf1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6f49eae0c1e5dab1dafafee34b207aeb7a42303105960944828c2079b92fc88e"
|
||||
name = "github.com/jmespath/go-jmespath"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "0b12d6b5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:2c5ad58492804c40bdaf5d92039b0cde8b5becd2b7feeb37d7d1cc36a8aa8dbe"
|
||||
name = "github.com/kardianos/osext"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "ae77be60afb1dcacde03767a8c37337fad28ac14"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:1bea35e6d6ea2712107d20a9770cf331b5c18e17ba02d28011939d9bc7c67534"
|
||||
name = "github.com/kr/fs"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "1455def202f6e05b95cc7bfc7e8ae67ae5141eba"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:81e673df85e765593a863f67cba4544cf40e8919590f04d67664940786c2b61a"
|
||||
name = "github.com/mattn/go-runewidth"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "9e777a8366cce605130a531d2cd6363d07ad7317"
|
||||
version = "v0.0.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:1f8bb0521ae032c1e4ba38adb361a1840b81c8bf52bea5fcf900edf44c17f6cb"
|
||||
name = "github.com/ncw/go-acd"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "887eb06ab6a255fbf5744b5812788e884078620a"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7a4827b2062a21ba644241bdec27959a5be2670f8aa4038ba14cfe2ce389e8d2"
|
||||
name = "github.com/ncw/swift"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "b2a7479cf26fa841ff90dd932d0221cb5c50782d"
|
||||
version = "v1.0.39"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:1a25d906193d34ce43e96fcab1a2de2b90e0242f9b0c176564db1b268bf48ea5"
|
||||
name = "github.com/nsf/termbox-go"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "5c94acc5e6eb520f1bcd183974e01171cc4c23b3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:7cdd61c36e251a51489ac7e73d070e3c67d8c9eb4bebf21a4886d6eec54d909c"
|
||||
name = "github.com/okzk/sdnotify"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "d9becc38acbd785892af7637319e2c5e101057f7"
|
||||
revision = "ed8ca104421a21947710335006107540e3ecb335"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:4c0404dc03d974acd5fcd8b8d3ce687b13bd169db032b89275e8b9d77b98ce8c"
|
||||
name = "github.com/patrickmn/go-cache"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "a3647f8e31d79543b2d0f0ae2fe5c379d72cedc0"
|
||||
version = "v2.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f6a7088857981d9d011920d6340f67d7b7649909918cf1ee6ba293718acc9e26"
|
||||
name = "github.com/pengsrc/go-shared"
|
||||
packages = [
|
||||
"buffer",
|
||||
"check",
|
||||
"convert",
|
||||
"log",
|
||||
"reopen",
|
||||
"reopen"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "807ee759d82c84982a89fb3dc875ef884942f1e5"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7365acd48986e205ccb8652cc746f09c8b7876030d53710ea6ef7d0bd0dcd7ca"
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a80af40d2e94dd143f966f7f5968e762a282ea739ba4ecb14d231503c200065b"
|
||||
name = "github.com/pkg/sftp"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "57673e38ea946592a59c26592b7e6fbda646975b"
|
||||
version = "1.8.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411"
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
packages = ["difflib"]
|
||||
pruneopts = ""
|
||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c426863e69173c90404c3d3ad4acde6ff87f9353658a670ac6fdbe08a633750f"
|
||||
name = "github.com/rfjakob/eme"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "01668ae55fe0b79a483095689043cce3e80260db"
|
||||
version = "v1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:fd0e88ec70bf0efce538ec8b968a824d992cc60a6cf1539698aa366b3527a053"
|
||||
name = "github.com/russross/blackfriday"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "55d61fa8aa702f59229e6cff85793c22e580eaf5"
|
||||
version = "v1.5.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:05dc5c00381eccf0bbc07717248b8757e0e9318877e15e09316fac9b72f1b3ef"
|
||||
name = "github.com/sevlyar/go-daemon"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "f9261e73885de99b1647d68bedadf2b9a99ad11f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:50b5be512f924d289f20e8b2aef8951d98b9bd8c44666cf169514906df597a4c"
|
||||
name = "github.com/skratchdot/open-golang"
|
||||
packages = ["open"]
|
||||
pruneopts = ""
|
||||
revision = "75fb7ed4208cf72d323d7d02fd1a5964a7a9073c"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a1403cc8a94b8d7956ee5e9694badef0e7b051af289caad1cf668331e3ffa4f6"
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = [
|
||||
".",
|
||||
"doc",
|
||||
"doc"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
|
||||
version = "v0.0.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8e243c568f36b09031ec18dff5f7d2769dcf5ca4d624ea511c8e3197dc3d352d"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c587772fb8ad29ad4db67575dad25ba17a51f072ff18a22b4f0257a4d9c24f75"
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = [
|
||||
"assert",
|
||||
"require",
|
||||
"require"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
|
||||
version = "v1.2.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:3c0753359567ac0500e17324c4da80398ee773093b4586e46210eea9dc03d155"
|
||||
name = "github.com/t3rm1n4l/go-mega"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "854bf31d998b151cf5f94529c815bc4c67322949"
|
||||
revision = "57978a63bd3f91fa7e188b751a7e7e6dd4e33813"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:afc0b8068986a01e2d8f449917829753a54f6bd4d1265c2b4ad9cba75560020f"
|
||||
branch = "master"
|
||||
name = "github.com/xanzy/ssh-agent"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "640f0ab560aeb89d523bb6ac322b1244d5c3796c"
|
||||
version = "v0.2.0"
|
||||
revision = "ba9c9e33906f58169366275e3450db66139a31a9"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:970aab2bde4ae92adf92ccae41eace959c66e2653ebb7e86355477e0307d0be8"
|
||||
name = "github.com/yunify/qingstor-sdk-go"
|
||||
packages = [
|
||||
".",
|
||||
@@ -411,15 +326,13 @@
|
||||
"request/signer",
|
||||
"request/unpacker",
|
||||
"service",
|
||||
"utils",
|
||||
"utils"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "4f9ac88c5fec7350e960aabd0de1f1ede0ad2895"
|
||||
version = "v2.2.14"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:419d8420cd7231162a9620ca6bc3b2c9ac98270590773d3f25d90950ccc984cc"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = [
|
||||
"bcrypt",
|
||||
@@ -436,14 +349,12 @@
|
||||
"scrypt",
|
||||
"ssh",
|
||||
"ssh/agent",
|
||||
"ssh/terminal",
|
||||
"ssh/terminal"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "027cca12c2d63e3d62b670d901e8a2c95854feec"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:5dc6753986b9eeba4abdf05dedc5ba06bb52dad43cc8aad35ffb42bb7adfa68f"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
@@ -457,38 +368,32 @@
|
||||
"publicsuffix",
|
||||
"webdav",
|
||||
"webdav/internal/xml",
|
||||
"websocket",
|
||||
"websocket"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:823e7b6793b3f80b5d01da97211790dc89601937e4b70825fdcb5637ac60f04f"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [
|
||||
".",
|
||||
"google",
|
||||
"internal",
|
||||
"jws",
|
||||
"jwt",
|
||||
"jwt"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "1e0a3fa8ba9a5c9eb35c271780101fdaf1b205d7"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:7e5298358e5f751305289e82373c7ac6832bdc492055d6da23c72fa1d8053c3f"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows",
|
||||
"windows"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "6c888cc515d3ed83fc103cf1d84468aad274b0a7"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5acd3512b047305d49e8763eef7ba423901e85d5dd2fd1e71778a0ea8de10bd4"
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"collate",
|
||||
@@ -504,36 +409,30 @@
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"unicode/rangetable",
|
||||
"unicode/rangetable"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:55a681cb66f28755765fa5fa5104cbd8dc85c55c02d206f9f89566451e3fe1aa"
|
||||
name = "golang.org/x/time"
|
||||
packages = ["rate"]
|
||||
pruneopts = ""
|
||||
revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:7d15746ff4df12481c89fd953a28122fa75368fb1fb1bb1fed918a78647b3c3a"
|
||||
name = "google.golang.org/api"
|
||||
packages = [
|
||||
"drive/v3",
|
||||
"gensupport",
|
||||
"googleapi",
|
||||
"googleapi/internal/uritemplates",
|
||||
"storage/v1",
|
||||
"storage/v1"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "2eea9ba0a3d94f6ab46508083e299a00bbbc65f6"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c1771ca6060335f9768dff6558108bc5ef6c58506821ad43377ee23ff059e472"
|
||||
name = "google.golang.org/appengine"
|
||||
packages = [
|
||||
".",
|
||||
@@ -545,89 +444,20 @@
|
||||
"internal/modules",
|
||||
"internal/remote_api",
|
||||
"internal/urlfetch",
|
||||
"urlfetch",
|
||||
"urlfetch"
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f0620375dd1f6251d9973b5f2596228cc8042e887cd7f827e4220bc1ce8c30e2"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
pruneopts = ""
|
||||
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
||||
version = "v2.2.1"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
input-imports = [
|
||||
"bazil.org/fuse",
|
||||
"bazil.org/fuse/fs",
|
||||
"github.com/Azure/azure-storage-blob-go/2018-03-28/azblob",
|
||||
"github.com/Unknwon/goconfig",
|
||||
"github.com/VividCortex/ewma",
|
||||
"github.com/a8m/tree",
|
||||
"github.com/abbot/go-http-auth",
|
||||
"github.com/aws/aws-sdk-go/aws",
|
||||
"github.com/aws/aws-sdk-go/aws/awserr",
|
||||
"github.com/aws/aws-sdk-go/aws/corehandlers",
|
||||
"github.com/aws/aws-sdk-go/aws/credentials",
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
|
||||
"github.com/aws/aws-sdk-go/aws/defaults",
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata",
|
||||
"github.com/aws/aws-sdk-go/aws/request",
|
||||
"github.com/aws/aws-sdk-go/aws/session",
|
||||
"github.com/aws/aws-sdk-go/service/s3",
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager",
|
||||
"github.com/billziss-gh/cgofuse/fuse",
|
||||
"github.com/coreos/bbolt",
|
||||
"github.com/djherbis/times",
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox",
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common",
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files",
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing",
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users",
|
||||
"github.com/jlaffaye/ftp",
|
||||
"github.com/ncw/go-acd",
|
||||
"github.com/ncw/swift",
|
||||
"github.com/nsf/termbox-go",
|
||||
"github.com/okzk/sdnotify",
|
||||
"github.com/patrickmn/go-cache",
|
||||
"github.com/pkg/errors",
|
||||
"github.com/pkg/sftp",
|
||||
"github.com/rfjakob/eme",
|
||||
"github.com/sevlyar/go-daemon",
|
||||
"github.com/skratchdot/open-golang/open",
|
||||
"github.com/spf13/cobra",
|
||||
"github.com/spf13/cobra/doc",
|
||||
"github.com/spf13/pflag",
|
||||
"github.com/stretchr/testify/assert",
|
||||
"github.com/stretchr/testify/require",
|
||||
"github.com/t3rm1n4l/go-mega",
|
||||
"github.com/xanzy/ssh-agent",
|
||||
"github.com/yunify/qingstor-sdk-go/config",
|
||||
"github.com/yunify/qingstor-sdk-go/request/errors",
|
||||
"github.com/yunify/qingstor-sdk-go/service",
|
||||
"golang.org/x/crypto/nacl/secretbox",
|
||||
"golang.org/x/crypto/scrypt",
|
||||
"golang.org/x/crypto/ssh",
|
||||
"golang.org/x/crypto/ssh/terminal",
|
||||
"golang.org/x/net/context",
|
||||
"golang.org/x/net/html",
|
||||
"golang.org/x/net/http2",
|
||||
"golang.org/x/net/publicsuffix",
|
||||
"golang.org/x/net/webdav",
|
||||
"golang.org/x/net/websocket",
|
||||
"golang.org/x/oauth2",
|
||||
"golang.org/x/oauth2/google",
|
||||
"golang.org/x/sys/unix",
|
||||
"golang.org/x/text/unicode/norm",
|
||||
"golang.org/x/time/rate",
|
||||
"google.golang.org/api/drive/v3",
|
||||
"google.golang.org/api/googleapi",
|
||||
"google.golang.org/api/storage/v1",
|
||||
]
|
||||
inputs-digest = "670cdb55138aa1394b4c8f87345e9be9c8105248edda4be7176dddee2a4f5d26"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
||||
4
Makefile
4
Makefile
@@ -11,8 +11,8 @@ LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
|
||||
GO_VERSION := $(shell go version)
|
||||
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
|
||||
# Run full tests if go >= go1.11
|
||||
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 11)')
|
||||
# Run full tests if go >= go1.9
|
||||
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 9)')
|
||||
BETA_PATH := $(BRANCH_PATH)$(TAG)
|
||||
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
||||
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
|
||||
|
||||
@@ -25,7 +25,6 @@ Rclone is a command line program to sync files and directories to and from
|
||||
* Google Drive
|
||||
* HTTP
|
||||
* Hubic
|
||||
* Jottacloud
|
||||
* Mega
|
||||
* Microsoft Azure Blob Storage
|
||||
* Microsoft OneDrive
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
_ "github.com/ncw/rclone/backend/googlecloudstorage"
|
||||
_ "github.com/ncw/rclone/backend/http"
|
||||
_ "github.com/ncw/rclone/backend/hubic"
|
||||
_ "github.com/ncw/rclone/backend/jottacloud"
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
_ "github.com/ncw/rclone/backend/mega"
|
||||
_ "github.com/ncw/rclone/backend/onedrive"
|
||||
|
||||
@@ -48,7 +48,6 @@ const (
|
||||
maxChunkSize = 100 * 1024 * 1024
|
||||
defaultUploadCutoff = 256 * 1024 * 1024
|
||||
maxUploadCutoff = 256 * 1024 * 1024
|
||||
defaultAccessTier = azblob.AccessTierNone
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -80,11 +79,6 @@ func init() {
|
||||
Help: "Upload chunk size. Must fit in memory.",
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "access_tier",
|
||||
Help: "Access tier of blob, supports hot, cool and archive tiers.\nArchived blobs can be restored by setting access tier to hot or cool." +
|
||||
" Leave blank if you intend to use default access tier, which is set at account level",
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -97,7 +91,6 @@ type Options struct {
|
||||
SASURL string `config:"sas_url"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
AccessTier string `config:"access_tier"`
|
||||
}
|
||||
|
||||
// Fs represents a remote azure server
|
||||
@@ -219,19 +212,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt.Endpoint = storageDefaultBaseURL
|
||||
}
|
||||
|
||||
if opt.AccessTier == "" {
|
||||
opt.AccessTier = string(defaultAccessTier)
|
||||
} else {
|
||||
switch opt.AccessTier {
|
||||
case string(azblob.AccessTierHot):
|
||||
case string(azblob.AccessTierCool):
|
||||
case string(azblob.AccessTierArchive):
|
||||
// valid cases
|
||||
default:
|
||||
return nil, errors.Errorf("azure: Supported access tiers are %s, %s and %s", string(azblob.AccessTierHot), string(azblob.AccessTierCool), azblob.AccessTierArchive)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
u *url.URL
|
||||
serviceURL azblob.ServiceURL
|
||||
@@ -851,7 +831,7 @@ func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetProper
|
||||
}
|
||||
|
||||
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) {
|
||||
o.md5 = string(info.Properties.ContentMD5)
|
||||
o.md5 = string(info.Properties.ContentMD5[:])
|
||||
o.mimeType = *info.Properties.ContentType
|
||||
o.size = *info.Properties.ContentLength
|
||||
o.modTime = info.Properties.LastModified
|
||||
@@ -966,10 +946,6 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// Offset and Count for range download
|
||||
var offset int64
|
||||
var count int64
|
||||
if o.AccessTier() == azblob.AccessTierArchive {
|
||||
return nil, errors.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
@@ -1223,29 +1199,8 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Refresh metadata on object
|
||||
o.clearMetaData()
|
||||
err = o.readMetaData()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If tier is not changed or not specified, do not attempt to invoke `SetBlobTier` operation
|
||||
if o.fs.opt.AccessTier == string(defaultAccessTier) || o.fs.opt.AccessTier == string(o.AccessTier()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Now, set blob tier based on configured access tier
|
||||
desiredAccessTier := azblob.AccessTierType(o.fs.opt.AccessTier)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blob.SetTier(ctx, desiredAccessTier)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to set Blob Tier")
|
||||
}
|
||||
return nil
|
||||
return o.readMetaData()
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
@@ -1265,11 +1220,6 @@ func (o *Object) MimeType() string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// AccessTier of an object, default is of type none
|
||||
func (o *Object) AccessTier() azblob.AccessTierType {
|
||||
return o.accessTier
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
|
||||
@@ -31,6 +31,11 @@ func (e *Error) Fatal() bool {
|
||||
|
||||
var _ fserrors.Fataler = (*Error)(nil)
|
||||
|
||||
// Account describes a B2 account
|
||||
type Account struct {
|
||||
ID string `json:"accountId"` // The identifier for the account.
|
||||
}
|
||||
|
||||
// Bucket describes a B2 bucket
|
||||
type Bucket struct {
|
||||
ID string `json:"bucketId"`
|
||||
@@ -69,7 +74,7 @@ const versionFormat = "-v2006-01-02-150405.000"
|
||||
func (t Timestamp) AddVersion(remote string) string {
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
s := time.Time(t).Format(versionFormat)
|
||||
s := (time.Time)(t).Format(versionFormat)
|
||||
// Replace the '.' with a '-'
|
||||
s = strings.Replace(s, ".", "-", -1)
|
||||
return base + s + ext
|
||||
@@ -102,20 +107,20 @@ func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
||||
|
||||
// IsZero returns true if the timestamp is unitialised
|
||||
func (t Timestamp) IsZero() bool {
|
||||
return time.Time(t).IsZero()
|
||||
return (time.Time)(t).IsZero()
|
||||
}
|
||||
|
||||
// Equal compares two timestamps
|
||||
//
|
||||
// If either are !IsZero then it returns false
|
||||
func (t Timestamp) Equal(s Timestamp) bool {
|
||||
if time.Time(t).IsZero() {
|
||||
if (time.Time)(t).IsZero() {
|
||||
return false
|
||||
}
|
||||
if time.Time(s).IsZero() {
|
||||
if (time.Time)(s).IsZero() {
|
||||
return false
|
||||
}
|
||||
return time.Time(t).Equal(time.Time(s))
|
||||
return (time.Time)(t).Equal((time.Time)(s))
|
||||
}
|
||||
|
||||
// File is info about a file
|
||||
@@ -132,26 +137,10 @@ type File struct {
|
||||
|
||||
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
|
||||
type AuthorizeAccountResponse struct {
|
||||
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
||||
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||
} `json:"allowed"`
|
||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
|
||||
MinimumPartSize int `json:"minimumPartSize"` // DEPRECATED: This field will always have the same value as recommendedPartSize. Use recommendedPartSize instead.
|
||||
RecommendedPartSize int `json:"recommendedPartSize"` // The recommended size for each part of a large file. We recommend using this part size for optimal upload performance.
|
||||
}
|
||||
|
||||
// ListBucketsRequest is parameters for b2_list_buckets call
|
||||
type ListBucketsRequest struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
BucketID string `json:"bucketId,omitempty"` // When specified, the result will be a list containing just this bucket.
|
||||
BucketName string `json:"bucketName,omitempty"` // When specified, the result will be a list containing just this bucket.
|
||||
BucketTypes []string `json:"bucketTypes,omitempty"` // If present, B2 will use it as a filter for bucket types returned in the list buckets response.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
|
||||
}
|
||||
|
||||
// ListBucketsResponse is as returned from the b2_list_buckets call
|
||||
|
||||
@@ -66,7 +66,7 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "account",
|
||||
Help: "Account ID or Application Key ID",
|
||||
Help: "Account ID",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "key",
|
||||
@@ -318,11 +318,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to authorize account")
|
||||
}
|
||||
// If this is a key limited to a single bucket, it must exist already
|
||||
if f.bucket != "" && f.info.Allowed.BucketID != "" {
|
||||
f.markBucketOK()
|
||||
f.setBucketID(f.info.Allowed.BucketID)
|
||||
}
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the (bucket,directory) is actually an existing file
|
||||
@@ -696,11 +691,7 @@ type listBucketFn func(*api.Bucket) error
|
||||
|
||||
// listBucketsToFn lists the buckets to the function supplied
|
||||
func (f *Fs) listBucketsToFn(fn listBucketFn) error {
|
||||
var account = api.ListBucketsRequest{
|
||||
AccountID: f.info.AccountID,
|
||||
BucketID: f.info.Allowed.BucketID,
|
||||
}
|
||||
|
||||
var account = api.Account{ID: f.info.AccountID}
|
||||
var response api.ListBucketsResponse
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
|
||||
@@ -172,8 +172,8 @@ type UploadSessionResponse struct {
|
||||
// Part defines the return from upload part call which are passed to commit upload also
|
||||
type Part struct {
|
||||
PartID string `json:"part_id"`
|
||||
Offset int64 `json:"offset"`
|
||||
Size int64 `json:"size"`
|
||||
Offset int `json:"offset"`
|
||||
Size int `json:"size"`
|
||||
Sha1 string `json:"sha1"`
|
||||
}
|
||||
|
||||
|
||||
@@ -88,19 +88,13 @@ func init() {
|
||||
Help: "Cutoff for switching to multipart upload.",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "commit_retries",
|
||||
Help: "Max number of times to try committing a multipart file.",
|
||||
Default: 100,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CommitRetries int `config:"commit_retries"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
}
|
||||
|
||||
// Fs represents a remote box
|
||||
@@ -674,7 +668,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
replacedLeaf := replaceReservedChars(leaf)
|
||||
copyFile := api.CopyFile{
|
||||
copy := api.CopyFile{
|
||||
Name: replacedLeaf,
|
||||
Parent: api.Parent{
|
||||
ID: directoryID,
|
||||
@@ -683,7 +677,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
var resp *http.Response
|
||||
var info *api.Item
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, ©File, &info)
|
||||
resp, err = f.srv.CallJSON(&opts, ©, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1014,8 +1008,8 @@ func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Tim
|
||||
var resp *http.Response
|
||||
var result api.FolderItems
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Body: in,
|
||||
Method: "POST",
|
||||
Body: in,
|
||||
MultipartMetadataName: "attributes",
|
||||
MultipartContentName: "contents",
|
||||
MultipartFileName: upload.Name,
|
||||
|
||||
@@ -96,9 +96,7 @@ func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.T
|
||||
request.Attributes.ContentCreatedAt = api.Time(modTime)
|
||||
var body []byte
|
||||
var resp *http.Response
|
||||
// For discussion of this value see:
|
||||
// https://github.com/ncw/rclone/issues/2054
|
||||
maxTries := o.fs.opt.CommitRetries
|
||||
maxTries := fs.Config.LowLevelRetries
|
||||
const defaultDelay = 10
|
||||
var tries int
|
||||
outer:
|
||||
|
||||
10
backend/cache/directory.go
vendored
10
backend/cache/directory.go
vendored
@@ -12,7 +12,7 @@ import (
|
||||
|
||||
// Directory is a generic dir that stores basic information about it
|
||||
type Directory struct {
|
||||
Directory fs.Directory `json:"-"` // can be nil
|
||||
fs.Directory `json:"-"`
|
||||
|
||||
CacheFs *Fs `json:"-"` // cache fs
|
||||
Name string `json:"name"` // name of the directory
|
||||
@@ -125,14 +125,6 @@ func (d *Directory) Items() int64 {
|
||||
return d.CacheItems
|
||||
}
|
||||
|
||||
// ID returns the ID of the cached directory if known
|
||||
func (d *Directory) ID() string {
|
||||
if d.Directory == nil {
|
||||
return ""
|
||||
}
|
||||
return d.Directory.ID()
|
||||
}
|
||||
|
||||
var (
|
||||
_ fs.Directory = (*Directory)(nil)
|
||||
)
|
||||
|
||||
9
backend/cache/object.go
vendored
9
backend/cache/object.go
vendored
@@ -353,13 +353,6 @@ func (o *Object) tempFileStartedUpload() bool {
|
||||
return started
|
||||
}
|
||||
|
||||
// UnWrap returns the Object that this Object is wrapping or
|
||||
// nil if it isn't wrapping anything
|
||||
func (o *Object) UnWrap() fs.Object {
|
||||
return o.Object
|
||||
}
|
||||
|
||||
var (
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
|
||||
4
backend/cache/plex.go
vendored
4
backend/cache/plex.go
vendored
@@ -210,9 +210,7 @@ func (p *plexConnector) authenticate() error {
|
||||
}
|
||||
p.token = token
|
||||
if p.token != "" {
|
||||
if p.saveToken != nil {
|
||||
p.saveToken(p.token)
|
||||
}
|
||||
p.saveToken(p.token)
|
||||
fs.Infof(p.f.Name(), "Connected to Plex server: %v", p.url.String())
|
||||
}
|
||||
p.listenWebsocket()
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestNewNameEncryptionMode(t *testing.T) {
|
||||
{"off", NameEncryptionOff, ""},
|
||||
{"standard", NameEncryptionStandard, ""},
|
||||
{"obfuscate", NameEncryptionObfuscated, ""},
|
||||
{"potato", NameEncryptionOff, "Unknown file name encryption mode \"potato\""},
|
||||
{"potato", NameEncryptionMode(0), "Unknown file name encryption mode \"potato\""},
|
||||
} {
|
||||
actual, actualErr := NewNameEncryptionMode(test.in)
|
||||
assert.Equal(t, actual, test.expected)
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
@@ -332,13 +331,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// unwrap the accounting
|
||||
var wrap accounting.WrapFn
|
||||
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
|
||||
// add the hasher
|
||||
wrappedIn = io.TeeReader(wrappedIn, hasher)
|
||||
// wrap the accounting back on
|
||||
wrappedIn = wrap(wrappedIn)
|
||||
}
|
||||
|
||||
// Transfer the data
|
||||
@@ -711,15 +704,15 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
|
||||
// newDir returns a dir with the Name decrypted
|
||||
func (f *Fs) newDir(dir fs.Directory) fs.Directory {
|
||||
newDir := fs.NewDirCopy(dir)
|
||||
new := fs.NewDirCopy(dir)
|
||||
remote := dir.Remote()
|
||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||
if err != nil {
|
||||
fs.Debugf(remote, "Undecryptable dir name: %v", err)
|
||||
} else {
|
||||
newDir.SetRemote(decryptedRemote)
|
||||
new.SetRemote(decryptedRemote)
|
||||
}
|
||||
return newDir
|
||||
return new
|
||||
}
|
||||
|
||||
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
||||
|
||||
@@ -8,7 +8,6 @@ package drive
|
||||
// * files with / in name
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -30,7 +29,6 @@ import (
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/dircache"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
@@ -68,29 +66,29 @@ var (
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
}
|
||||
mimeTypeToExtension = map[string]string{
|
||||
"application/epub+zip": "epub",
|
||||
"application/msword": "doc",
|
||||
"application/pdf": "pdf",
|
||||
"application/rtf": "rtf",
|
||||
"application/vnd.ms-excel": "xls",
|
||||
"application/vnd.oasis.opendocument.presentation": "odp",
|
||||
"application/vnd.oasis.opendocument.spreadsheet": "ods",
|
||||
"application/vnd.oasis.opendocument.text": "odt",
|
||||
"application/epub+zip": "epub",
|
||||
"application/msword": "doc",
|
||||
"application/pdf": "pdf",
|
||||
"application/rtf": "rtf",
|
||||
"application/vnd.ms-excel": "xls",
|
||||
"application/vnd.oasis.opendocument.presentation": "odp",
|
||||
"application/vnd.oasis.opendocument.spreadsheet": "ods",
|
||||
"application/vnd.oasis.opendocument.text": "odt",
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.presentation": "pptx",
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": "xlsx",
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": "docx",
|
||||
"application/x-vnd.oasis.opendocument.spreadsheet": "ods",
|
||||
"application/zip": "zip",
|
||||
"image/jpeg": "jpg",
|
||||
"image/png": "png",
|
||||
"image/svg+xml": "svg",
|
||||
"text/csv": "csv",
|
||||
"text/html": "html",
|
||||
"text/plain": "txt",
|
||||
"text/tab-separated-values": "tsv",
|
||||
"application/zip": "zip",
|
||||
"image/jpeg": "jpg",
|
||||
"image/png": "png",
|
||||
"image/svg+xml": "svg",
|
||||
"text/csv": "csv",
|
||||
"text/html": "html",
|
||||
"text/plain": "txt",
|
||||
"text/tab-separated-values": "tsv",
|
||||
}
|
||||
extensionToMimeType map[string]string
|
||||
partialFields = "id,name,size,md5Checksum,trashed,modifiedTime,createdTime,mimeType,parents"
|
||||
partialFields = "id,name,size,md5Checksum,trashed,modifiedTime,createdTime,mimeType"
|
||||
exportFormatsOnce sync.Once // make sure we fetch the export formats only once
|
||||
_exportFormats map[string][]string // allowed export mime-type conversions
|
||||
)
|
||||
@@ -243,6 +241,11 @@ func init() {
|
||||
Default: false,
|
||||
Help: "Keep new head revision forever.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "untrash",
|
||||
Default: false,
|
||||
Help: "Untrash any trashed files - use with --drive-trashed-only.",
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
|
||||
@@ -274,6 +277,7 @@ type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
AcknowledgeAbuse bool `config:"acknowledge_abuse"`
|
||||
KeepRevisionForever bool `config:"keep_revision_forever"`
|
||||
Untrash bool `config:"untrash"`
|
||||
}
|
||||
|
||||
// Fs represents a remote drive server
|
||||
@@ -361,21 +365,12 @@ func parseDrivePath(path string) (root string, err error) {
|
||||
// Should return true to finish processing
|
||||
type listFn func(*drive.File) bool
|
||||
|
||||
func containsString(slice []string, s string) bool {
|
||||
for _, e := range slice {
|
||||
if e == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
//
|
||||
// Search params: https://developers.google.com/drive/search-parameters
|
||||
func (f *Fs) list(dirIDs []string, title string, directoriesOnly bool, filesOnly bool, includeAll bool, fn listFn) (found bool, err error) {
|
||||
func (f *Fs) list(dirID string, title string, directoriesOnly bool, filesOnly bool, includeAll bool, fn listFn) (found bool, err error) {
|
||||
var query []string
|
||||
if !includeAll {
|
||||
q := "trashed=" + strconv.FormatBool(f.opt.TrashedOnly)
|
||||
@@ -387,42 +382,19 @@ func (f *Fs) list(dirIDs []string, title string, directoriesOnly bool, filesOnly
|
||||
// Search with sharedWithMe will always return things listed in "Shared With Me" (without any parents)
|
||||
// We must not filter with parent when we try list "ROOT" with drive-shared-with-me
|
||||
// If we need to list file inside those shared folders, we must search it without sharedWithMe
|
||||
parentsQuery := bytes.NewBufferString("(")
|
||||
for _, dirID := range dirIDs {
|
||||
if dirID == "" {
|
||||
continue
|
||||
}
|
||||
if parentsQuery.Len() > 1 {
|
||||
_, _ = parentsQuery.WriteString(" or ")
|
||||
}
|
||||
if f.opt.SharedWithMe && dirID == f.rootFolderID {
|
||||
_, _ = parentsQuery.WriteString("sharedWithMe=true")
|
||||
} else {
|
||||
_, _ = fmt.Fprintf(parentsQuery, "'%s' in parents", dirID)
|
||||
}
|
||||
if f.opt.SharedWithMe && dirID == f.rootFolderID {
|
||||
query = append(query, "sharedWithMe=true")
|
||||
}
|
||||
if parentsQuery.Len() > 1 {
|
||||
_ = parentsQuery.WriteByte(')')
|
||||
query = append(query, parentsQuery.String())
|
||||
if dirID != "" && !(f.opt.SharedWithMe && dirID == f.rootFolderID) {
|
||||
query = append(query, fmt.Sprintf("'%s' in parents", dirID))
|
||||
}
|
||||
stem := ""
|
||||
if title != "" {
|
||||
// Escaping the backslash isn't documented but seems to work
|
||||
searchTitle := strings.Replace(title, `\`, `\\`, -1)
|
||||
searchTitle = strings.Replace(searchTitle, `'`, `\'`, -1)
|
||||
// Convert / to / for search
|
||||
searchTitle = strings.Replace(searchTitle, "/", "/", -1)
|
||||
|
||||
handleGdocs := !directoriesOnly && !f.opt.SkipGdocs
|
||||
// if the search title contains an extension and the extension is in the export extensions add a search
|
||||
// for the filename without the extension.
|
||||
// assume that export extensions don't contain escape sequences and only have one part (not .tar.gz)
|
||||
if ext := path.Ext(searchTitle); handleGdocs && len(ext) > 0 && containsString(f.extensions, ext[1:]) {
|
||||
stem = title[:len(title)-len(ext)]
|
||||
query = append(query, fmt.Sprintf("(name='%s' or name='%s')", searchTitle, searchTitle[:len(title)-len(ext)]))
|
||||
} else {
|
||||
query = append(query, fmt.Sprintf("name='%s'", searchTitle))
|
||||
}
|
||||
query = append(query, fmt.Sprintf("name='%s'", searchTitle))
|
||||
}
|
||||
if directoriesOnly {
|
||||
query = append(query, fmt.Sprintf("mimeType='%s'", driveFolderType))
|
||||
@@ -472,15 +444,8 @@ OUTER:
|
||||
item.Name = strings.Replace(item.Name, "/", "/", -1)
|
||||
// Check the case of items is correct since
|
||||
// the `=` operator is case insensitive.
|
||||
|
||||
if title != "" && title != item.Name {
|
||||
if stem == "" || stem != item.Name {
|
||||
continue
|
||||
}
|
||||
_, exportName, _, _ := f.findExportFormat(item)
|
||||
if exportName == "" || exportName != title {
|
||||
continue
|
||||
}
|
||||
continue
|
||||
}
|
||||
if fn(item) {
|
||||
found = true
|
||||
@@ -706,17 +671,23 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := tempF.NewObject(remote)
|
||||
entries, err := tempF.List("")
|
||||
if err != nil {
|
||||
// unable to list folder so return old f
|
||||
return f, nil
|
||||
}
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/ncw/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
return f, fs.ErrorIsFile
|
||||
for _, e := range entries {
|
||||
if _, isObject := e.(fs.Object); isObject && e.Remote() == remote {
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/ncw/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
}
|
||||
// File doesn't exist so return old f
|
||||
return f, nil
|
||||
}
|
||||
// fmt.Printf("Root id %s", f.dirCache.RootID())
|
||||
return f, nil
|
||||
@@ -750,18 +721,11 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
found, err = f.list([]string{pathID}, leaf, true, false, false, func(item *drive.File) bool {
|
||||
found, err = f.list(pathID, leaf, true, false, false, func(item *drive.File) bool {
|
||||
if item.Name == leaf {
|
||||
pathIDOut = item.Id
|
||||
return true
|
||||
}
|
||||
if !f.opt.SkipGdocs {
|
||||
_, exportName, _, _ := f.findExportFormat(item)
|
||||
if exportName == leaf {
|
||||
pathIDOut = item.Id
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
return pathIDOut, found, err
|
||||
@@ -825,21 +789,18 @@ func (f *Fs) exportFormats() map[string][]string {
|
||||
//
|
||||
// Look through the extensions and find the first format that can be
|
||||
// converted. If none found then return "", ""
|
||||
func (f *Fs) findExportFormat(item *drive.File) (extension, filename, mimeType string, isDocument bool) {
|
||||
exportMimeTypes, isDocument := f.exportFormats()[item.MimeType]
|
||||
if isDocument {
|
||||
for _, extension := range f.extensions {
|
||||
mimeType := extensionToMimeType[extension]
|
||||
for _, emt := range exportMimeTypes {
|
||||
if emt == mimeType {
|
||||
return extension, fmt.Sprintf("%s.%s", item.Name, extension), mimeType, true
|
||||
}
|
||||
func (f *Fs) findExportFormat(filepath string, exportMimeTypes []string) (extension, mimeType string) {
|
||||
for _, extension := range f.extensions {
|
||||
mimeType := extensionToMimeType[extension]
|
||||
for _, emt := range exportMimeTypes {
|
||||
if emt == mimeType {
|
||||
return extension, mimeType
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// else return empty
|
||||
return "", "", "", isDocument
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
@@ -862,13 +823,79 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
}
|
||||
|
||||
var iErr error
|
||||
_, err = f.list([]string{directoryID}, "", false, false, false, func(item *drive.File) bool {
|
||||
entry, err := f.itemToDirEntry(path.Join(dir, item.Name), item)
|
||||
if err != nil {
|
||||
return true
|
||||
_, err = f.list(directoryID, "", false, false, false, func(item *drive.File) bool {
|
||||
remote := path.Join(dir, item.Name)
|
||||
|
||||
// Untrash all trashed files if required
|
||||
if f.opt.Untrash && item.Trashed {
|
||||
fs.Infof(remote, "Untrashing")
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info := drive.File{
|
||||
Trashed: false,
|
||||
ForceSendFields: []string{"Trashed"},
|
||||
}
|
||||
_, err = f.svc.Files.Update(item.Id, &info).Fields("").SupportsTeamDrives(f.isTeamDrive).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(remote, "Untrashing failed: %v", err)
|
||||
}
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
|
||||
switch {
|
||||
case item.MimeType == driveFolderType:
|
||||
// cache the directory ID for later lookups
|
||||
f.dirCache.Put(remote, item.Id)
|
||||
when, _ := time.Parse(timeFormatIn, item.ModifiedTime)
|
||||
d := fs.NewDir(remote, when).SetID(item.Id)
|
||||
entries = append(entries, d)
|
||||
case f.opt.AuthOwnerOnly && !isAuthOwned(item):
|
||||
// ignore object
|
||||
case item.Md5Checksum != "" || item.Size > 0:
|
||||
// If item has MD5 sum or a length it is a file stored on drive
|
||||
o, err := f.newObjectWithInfo(remote, item)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
entries = append(entries, o)
|
||||
case f.opt.SkipGdocs:
|
||||
fs.Debugf(remote, "Skipping google document type %q", item.MimeType)
|
||||
default:
|
||||
exportMimeTypes, isDocument := f.exportFormats()[item.MimeType]
|
||||
if !isDocument {
|
||||
fs.Debugf(remote, "Ignoring unknown document type %q", item.MimeType)
|
||||
break
|
||||
}
|
||||
// If item has export links then it is a google doc
|
||||
extension, exportMimeType := f.findExportFormat(remote, exportMimeTypes)
|
||||
if extension == "" {
|
||||
fs.Debugf(remote, "No export formats found for %q", item.MimeType)
|
||||
break
|
||||
}
|
||||
o, err := f.newObjectWithInfo(remote+"."+extension, item)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
obj := o.(*Object)
|
||||
obj.url = fmt.Sprintf("%sfiles/%s/export?mimeType=%s", f.svc.BasePath, item.Id, url.QueryEscape(exportMimeType))
|
||||
if f.opt.AlternateExport {
|
||||
switch item.MimeType {
|
||||
case "application/vnd.google-apps.drawing":
|
||||
obj.url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", item.Id, extension)
|
||||
case "application/vnd.google-apps.document":
|
||||
obj.url = fmt.Sprintf("https://docs.google.com/document/d/%s/export?format=%s", item.Id, extension)
|
||||
case "application/vnd.google-apps.spreadsheet":
|
||||
obj.url = fmt.Sprintf("https://docs.google.com/spreadsheets/d/%s/export?format=%s", item.Id, extension)
|
||||
case "application/vnd.google-apps.presentation":
|
||||
obj.url = fmt.Sprintf("https://docs.google.com/presentation/d/%s/export/%s", item.Id, extension)
|
||||
}
|
||||
}
|
||||
obj.isDocument = true
|
||||
obj.mimeType = exportMimeType
|
||||
obj.bytes = -1
|
||||
entries = append(entries, o)
|
||||
}
|
||||
return false
|
||||
})
|
||||
@@ -881,233 +908,6 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// listRRunner will read dirIDs from the in channel, perform the file listing an call cb with each DirEntry.
|
||||
//
|
||||
// In each cycle, will wait up to 10ms to read up to grouping entries from the in channel.
|
||||
// If an error occurs it will be send to the out channel and then return. Once the in channel is closed,
|
||||
// nil is send to the out channel and the function returns.
|
||||
func (f *Fs) listRRunner(wg *sync.WaitGroup, in <-chan string, out chan<- error, cb func(fs.DirEntry) error, grouping int) {
|
||||
var dirs []string
|
||||
|
||||
for dir := range in {
|
||||
dirs = append(dirs[:0], dir)
|
||||
wait := time.After(10 * time.Millisecond)
|
||||
waitloop:
|
||||
for i := 1; i < grouping; i++ {
|
||||
select {
|
||||
case d, ok := <-in:
|
||||
if !ok {
|
||||
break waitloop
|
||||
}
|
||||
dirs = append(dirs, d)
|
||||
case <-wait:
|
||||
break waitloop
|
||||
}
|
||||
}
|
||||
var iErr error
|
||||
_, err := f.list(dirs, "", false, false, false, func(item *drive.File) bool {
|
||||
parentPath := ""
|
||||
if len(item.Parents) > 0 {
|
||||
p, ok := f.dirCache.GetInv(item.Parents[0])
|
||||
if ok {
|
||||
parentPath = p
|
||||
}
|
||||
}
|
||||
remote := path.Join(parentPath, item.Name)
|
||||
entry, err := f.itemToDirEntry(remote, item)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
|
||||
err = cb(entry)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
for range dirs {
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
if iErr != nil {
|
||||
out <- iErr
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
out <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
out <- nil
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
// from dir recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
//
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
const (
|
||||
grouping = 50
|
||||
inputBuffer = 1000
|
||||
)
|
||||
|
||||
err = f.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
directoryID, err := f.dirCache.FindDir(dir, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mu := sync.Mutex{} // protects in and overflow
|
||||
wg := sync.WaitGroup{}
|
||||
in := make(chan string, inputBuffer)
|
||||
out := make(chan error, fs.Config.Checkers)
|
||||
list := walk.NewListRHelper(callback)
|
||||
overfflow := []string{}
|
||||
|
||||
cb := func(entry fs.DirEntry) error {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if d, isDir := entry.(*fs.Dir); isDir && in != nil {
|
||||
select {
|
||||
case in <- d.ID():
|
||||
wg.Add(1)
|
||||
default:
|
||||
overfflow = append(overfflow, d.ID())
|
||||
}
|
||||
}
|
||||
return list.Add(entry)
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
in <- directoryID
|
||||
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
go f.listRRunner(&wg, in, out, cb, grouping)
|
||||
}
|
||||
go func() {
|
||||
// wait until the all directories are processed
|
||||
wg.Wait()
|
||||
// if the input channel overflowed add the collected entries to the channel now
|
||||
for len(overfflow) > 0 {
|
||||
mu.Lock()
|
||||
l := len(overfflow)
|
||||
// only fill half of the channel to prevent entries beeing put into overfflow again
|
||||
if l > inputBuffer/2 {
|
||||
l = inputBuffer / 2
|
||||
}
|
||||
wg.Add(l)
|
||||
for _, d := range overfflow[:l] {
|
||||
in <- d
|
||||
}
|
||||
overfflow = overfflow[l:]
|
||||
mu.Unlock()
|
||||
|
||||
// wait again for the completion of all directories
|
||||
wg.Wait()
|
||||
}
|
||||
mu.Lock()
|
||||
if in != nil {
|
||||
// notify all workers to exit
|
||||
close(in)
|
||||
in = nil
|
||||
}
|
||||
mu.Unlock()
|
||||
}()
|
||||
// wait until the all workers to finish
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
e := <-out
|
||||
mu.Lock()
|
||||
// if one worker returns an error early, close the input so all other workers exit
|
||||
if e != nil && in != nil {
|
||||
err = e
|
||||
close(in)
|
||||
in = nil
|
||||
}
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
close(out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
func (f *Fs) itemToDirEntry(remote string, item *drive.File) (fs.DirEntry, error) {
|
||||
switch {
|
||||
case item.MimeType == driveFolderType:
|
||||
// cache the directory ID for later lookups
|
||||
f.dirCache.Put(remote, item.Id)
|
||||
when, _ := time.Parse(timeFormatIn, item.ModifiedTime)
|
||||
d := fs.NewDir(remote, when).SetID(item.Id)
|
||||
return d, nil
|
||||
case f.opt.AuthOwnerOnly && !isAuthOwned(item):
|
||||
// ignore object
|
||||
case item.Md5Checksum != "" || item.Size > 0:
|
||||
// If item has MD5 sum or a length it is a file stored on drive
|
||||
o, err := f.newObjectWithInfo(remote, item)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
case f.opt.SkipGdocs:
|
||||
fs.Debugf(remote, "Skipping google document type %q", item.MimeType)
|
||||
default:
|
||||
// If item MimeType is in the ExportFormats then it is a google doc
|
||||
extension, _, exportMimeType, isDocument := f.findExportFormat(item)
|
||||
if !isDocument {
|
||||
fs.Debugf(remote, "Ignoring unknown document type %q", item.MimeType)
|
||||
break
|
||||
}
|
||||
if extension == "" {
|
||||
fs.Debugf(remote, "No export formats found for %q", item.MimeType)
|
||||
break
|
||||
}
|
||||
o, err := f.newObjectWithInfo(remote+"."+extension, item)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
obj := o.(*Object)
|
||||
obj.url = fmt.Sprintf("%sfiles/%s/export?mimeType=%s", f.svc.BasePath, item.Id, url.QueryEscape(exportMimeType))
|
||||
if f.opt.AlternateExport {
|
||||
switch item.MimeType {
|
||||
case "application/vnd.google-apps.drawing":
|
||||
obj.url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", item.Id, extension)
|
||||
case "application/vnd.google-apps.document":
|
||||
obj.url = fmt.Sprintf("https://docs.google.com/document/d/%s/export?format=%s", item.Id, extension)
|
||||
case "application/vnd.google-apps.spreadsheet":
|
||||
obj.url = fmt.Sprintf("https://docs.google.com/spreadsheets/d/%s/export?format=%s", item.Id, extension)
|
||||
case "application/vnd.google-apps.presentation":
|
||||
obj.url = fmt.Sprintf("https://docs.google.com/presentation/d/%s/export/%s", item.Id, extension)
|
||||
}
|
||||
}
|
||||
obj.isDocument = true
|
||||
obj.mimeType = exportMimeType
|
||||
obj.bytes = -1
|
||||
return o, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Creates a drive.File info from the parameters passed in and a half
|
||||
// finished Object which must have setMetaData called on it
|
||||
//
|
||||
@@ -1205,7 +1005,7 @@ func (f *Fs) MergeDirs(dirs []fs.Directory) error {
|
||||
for _, srcDir := range dirs[1:] {
|
||||
// list the the objects
|
||||
infos := []*drive.File{}
|
||||
_, err := f.list([]string{srcDir.ID()}, "", false, false, true, func(info *drive.File) bool {
|
||||
_, err := f.list(srcDir.ID(), "", false, false, true, func(info *drive.File) bool {
|
||||
infos = append(infos, info)
|
||||
return false
|
||||
})
|
||||
@@ -1273,7 +1073,7 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
return err
|
||||
}
|
||||
var trashedFiles = false
|
||||
found, err := f.list([]string{directoryID}, "", false, false, true, func(item *drive.File) bool {
|
||||
found, err := f.list(directoryID, "", false, false, true, func(item *drive.File) bool {
|
||||
if !item.Trashed {
|
||||
fs.Debugf(dir, "Rmdir: contains file: %q", item.Name)
|
||||
return true
|
||||
@@ -1772,26 +1572,6 @@ func (o *Object) setMetaData(info *drive.File) {
|
||||
o.mimeType = info.MimeType
|
||||
}
|
||||
|
||||
// setGdocsMetaData only sets the gdocs related fields
|
||||
func (o *Object) setGdocsMetaData(info *drive.File, extension, exportMimeType string) {
|
||||
o.url = fmt.Sprintf("%sfiles/%s/export?mimeType=%s", o.fs.svc.BasePath, info.Id, url.QueryEscape(exportMimeType))
|
||||
if o.fs.opt.AlternateExport {
|
||||
switch info.MimeType {
|
||||
case "application/vnd.google-apps.drawing":
|
||||
o.url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", info.Id, extension)
|
||||
case "application/vnd.google-apps.document":
|
||||
o.url = fmt.Sprintf("https://docs.google.com/document/d/%s/export?format=%s", info.Id, extension)
|
||||
case "application/vnd.google-apps.spreadsheet":
|
||||
o.url = fmt.Sprintf("https://docs.google.com/spreadsheets/d/%s/export?format=%s", info.Id, extension)
|
||||
case "application/vnd.google-apps.presentation":
|
||||
o.url = fmt.Sprintf("https://docs.google.com/presentation/d/%s/export/%s", info.Id, extension)
|
||||
}
|
||||
}
|
||||
o.isDocument = true
|
||||
o.mimeType = exportMimeType
|
||||
o.bytes = -1
|
||||
}
|
||||
|
||||
// readMetaData gets the info if it hasn't already been fetched
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if o.id != "" {
|
||||
@@ -1806,19 +1586,11 @@ func (o *Object) readMetaData() (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
found, err := o.fs.list([]string{directoryID}, leaf, false, true, false, func(item *drive.File) bool {
|
||||
found, err := o.fs.list(directoryID, leaf, false, true, false, func(item *drive.File) bool {
|
||||
if item.Name == leaf {
|
||||
o.setMetaData(item)
|
||||
return true
|
||||
}
|
||||
if !o.fs.opt.SkipGdocs {
|
||||
extension, exportName, exportMimeType, _ := o.fs.findExportFormat(item)
|
||||
if exportName == leaf {
|
||||
o.setMetaData(item)
|
||||
o.setGdocsMetaData(item, extension, exportMimeType)
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
@@ -2079,7 +1851,6 @@ var (
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.MergeDirser = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
|
||||
@@ -53,10 +53,11 @@ const exampleExportFormats = `{
|
||||
]
|
||||
}`
|
||||
|
||||
var exportFormats map[string][]string
|
||||
|
||||
// Load the example export formats into exportFormats for testing
|
||||
func TestInternalLoadExampleExportFormats(t *testing.T) {
|
||||
exportFormatsOnce.Do(func() {})
|
||||
assert.NoError(t, json.Unmarshal([]byte(exampleExportFormats), &_exportFormats))
|
||||
assert.NoError(t, json.Unmarshal([]byte(exampleExportFormats), &exportFormats))
|
||||
}
|
||||
|
||||
func TestInternalParseExtensions(t *testing.T) {
|
||||
@@ -89,10 +90,8 @@ func TestInternalParseExtensions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInternalFindExportFormat(t *testing.T) {
|
||||
item := &drive.File{
|
||||
Name: "file",
|
||||
MimeType: "application/vnd.google-apps.document",
|
||||
}
|
||||
item := new(drive.File)
|
||||
item.MimeType = "application/vnd.google-apps.document"
|
||||
for _, test := range []struct {
|
||||
extensions []string
|
||||
wantExtension string
|
||||
@@ -106,14 +105,8 @@ func TestInternalFindExportFormat(t *testing.T) {
|
||||
} {
|
||||
f := new(Fs)
|
||||
f.extensions = test.extensions
|
||||
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(item)
|
||||
gotExtension, gotMimeType := f.findExportFormat("file", exportFormats[item.MimeType])
|
||||
assert.Equal(t, test.wantExtension, gotExtension)
|
||||
if test.wantExtension != "" {
|
||||
assert.Equal(t, item.Name+"."+gotExtension, gotFilename)
|
||||
} else {
|
||||
assert.Equal(t, "", gotFilename)
|
||||
}
|
||||
assert.Equal(t, test.wantMimeType, gotMimeType)
|
||||
assert.Equal(t, true, gotIsDocument)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,265 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
timeFormat = "2006-01-02-T15:04:05Z0700"
|
||||
)
|
||||
|
||||
// Time represents time values in the Jottacloud API. It uses a custom RFC3339 like format.
|
||||
type Time time.Time
|
||||
|
||||
// UnmarshalXML turns XML into a Time
|
||||
func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
var v string
|
||||
if err := d.DecodeElement(&v, &start); err != nil {
|
||||
return err
|
||||
}
|
||||
if v == "" {
|
||||
*t = Time(time.Time{})
|
||||
return nil
|
||||
}
|
||||
newTime, err := time.Parse(timeFormat, v)
|
||||
if err == nil {
|
||||
*t = Time(newTime)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalXML turns a Time into XML
|
||||
func (t *Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
return e.EncodeElement(t.String(), start)
|
||||
}
|
||||
|
||||
// Return Time string in Jottacloud format
|
||||
func (t Time) String() string { return time.Time(t).Format(timeFormat) }
|
||||
|
||||
// Flag is a hacky type for checking if an attribute is present
|
||||
type Flag bool
|
||||
|
||||
// UnmarshalXMLAttr sets Flag to true if the attribute is present
|
||||
func (f *Flag) UnmarshalXMLAttr(attr xml.Attr) error {
|
||||
*f = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalXMLAttr : Do not use
|
||||
func (f *Flag) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
|
||||
attr := xml.Attr{
|
||||
Name: name,
|
||||
Value: "false",
|
||||
}
|
||||
return attr, errors.New("unimplemented")
|
||||
}
|
||||
|
||||
/*
|
||||
GET http://www.jottacloud.com/JFS/<account>
|
||||
|
||||
<user time="2018-07-18-T21:39:10Z" host="dn-132">
|
||||
<username>12qh1wsht8cssxdtwl15rqh9</username>
|
||||
<account-type>free</account-type>
|
||||
<locked>false</locked>
|
||||
<capacity>5368709120</capacity>
|
||||
<max-devices>-1</max-devices>
|
||||
<max-mobile-devices>-1</max-mobile-devices>
|
||||
<usage>0</usage>
|
||||
<read-locked>false</read-locked>
|
||||
<write-locked>false</write-locked>
|
||||
<quota-write-locked>false</quota-write-locked>
|
||||
<enable-sync>true</enable-sync>
|
||||
<enable-foldershare>true</enable-foldershare>
|
||||
<devices>
|
||||
<device>
|
||||
<name xml:space="preserve">Jotta</name>
|
||||
<display_name xml:space="preserve">Jotta</display_name>
|
||||
<type>JOTTA</type>
|
||||
<sid>5c458d01-9eaf-4f23-8d3c-2486fd9704d8</sid>
|
||||
<size>0</size>
|
||||
<modified>2018-07-15-T22:04:59Z</modified>
|
||||
</device>
|
||||
</devices>
|
||||
</user>
|
||||
*/
|
||||
|
||||
// AccountInfo represents a Jottacloud account
|
||||
type AccountInfo struct {
|
||||
Username string `xml:"username"`
|
||||
AccountType string `xml:"account-type"`
|
||||
Locked bool `xml:"locked"`
|
||||
Capacity int64 `xml:"capacity"`
|
||||
MaxDevices int `xml:"max-devices"`
|
||||
MaxMobileDevices int `xml:"max-mobile-devices"`
|
||||
Usage int64 `xml:"usage"`
|
||||
ReadLocked bool `xml:"read-locked"`
|
||||
WriteLocked bool `xml:"write-locked"`
|
||||
QuotaWriteLocked bool `xml:"quota-write-locked"`
|
||||
EnableSync bool `xml:"enable-sync"`
|
||||
EnableFolderShare bool `xml:"enable-foldershare"`
|
||||
Devices []JottaDevice `xml:"devices>device"`
|
||||
}
|
||||
|
||||
/*
|
||||
GET http://www.jottacloud.com/JFS/<account>/<device>
|
||||
|
||||
<device time="2018-07-23-T20:21:50Z" host="dn-158">
|
||||
<name xml:space="preserve">Jotta</name>
|
||||
<display_name xml:space="preserve">Jotta</display_name>
|
||||
<type>JOTTA</type>
|
||||
<sid>5c458d01-9eaf-4f23-8d3c-2486fd9704d8</sid>
|
||||
<size>0</size>
|
||||
<modified>2018-07-15-T22:04:59Z</modified>
|
||||
<user>12qh1wsht8cssxdtwl15rqh9</user>
|
||||
<mountPoints>
|
||||
<mountPoint>
|
||||
<name xml:space="preserve">Archive</name>
|
||||
<size>0</size>
|
||||
<modified>2018-07-15-T22:04:59Z</modified>
|
||||
</mountPoint>
|
||||
<mountPoint>
|
||||
<name xml:space="preserve">Shared</name>
|
||||
<size>0</size>
|
||||
<modified></modified>
|
||||
</mountPoint>
|
||||
<mountPoint>
|
||||
<name xml:space="preserve">Sync</name>
|
||||
<size>0</size>
|
||||
<modified></modified>
|
||||
</mountPoint>
|
||||
</mountPoints>
|
||||
<metadata first="" max="" total="3" num_mountpoints="3"/>
|
||||
</device>
|
||||
*/
|
||||
|
||||
// JottaDevice represents a Jottacloud Device
|
||||
type JottaDevice struct {
|
||||
Name string `xml:"name"`
|
||||
DisplayName string `xml:"display_name"`
|
||||
Type string `xml:"type"`
|
||||
Sid string `xml:"sid"`
|
||||
Size int64 `xml:"size"`
|
||||
User string `xml:"user"`
|
||||
MountPoints []JottaMountPoint `xml:"mountPoints>mountPoint"`
|
||||
}
|
||||
|
||||
/*
|
||||
GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>
|
||||
|
||||
<mountPoint time="2018-07-24-T20:35:02Z" host="dn-157">
|
||||
<name xml:space="preserve">Sync</name>
|
||||
<path xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta</path>
|
||||
<abspath xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta</abspath>
|
||||
<size>0</size>
|
||||
<modified></modified>
|
||||
<device>Jotta</device>
|
||||
<user>12qh1wsht8cssxdtwl15rqh9</user>
|
||||
<folders>
|
||||
<folder name="test"/>
|
||||
</folders>
|
||||
<metadata first="" max="" total="1" num_folders="1" num_files="0"/>
|
||||
</mountPoint>
|
||||
*/
|
||||
|
||||
// JottaMountPoint represents a Jottacloud mountpoint
|
||||
type JottaMountPoint struct {
|
||||
Name string `xml:"name"`
|
||||
Size int64 `xml:"size"`
|
||||
Device string `xml:"device"`
|
||||
Folders []JottaFolder `xml:"folders>folder"`
|
||||
Files []JottaFile `xml:"files>file"`
|
||||
}
|
||||
|
||||
/*
|
||||
GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/<folder>
|
||||
|
||||
<folder name="test" time="2018-07-24-T20:41:37Z" host="dn-158">
|
||||
<path xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta/Sync</path>
|
||||
<abspath xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta/Sync</abspath>
|
||||
<folders>
|
||||
<folder name="t2"/>c
|
||||
</folders>
|
||||
<files>
|
||||
<file name="block.csv" uuid="f6553cd4-1135-48fe-8e6a-bb9565c50ef2">
|
||||
<currentRevision>
|
||||
<number>1</number>
|
||||
<state>COMPLETED</state>
|
||||
<created>2018-07-05-T15:08:02Z</created>
|
||||
<modified>2018-07-05-T15:08:02Z</modified>
|
||||
<mime>application/octet-stream</mime>
|
||||
<size>30827730</size>
|
||||
<md5>1e8a7b728ab678048df00075c9507158</md5>
|
||||
<updated>2018-07-24-T20:41:10Z</updated>
|
||||
</currentRevision>
|
||||
</file>
|
||||
</files>
|
||||
<metadata first="" max="" total="2" num_folders="1" num_files="1"/>
|
||||
</folder>
|
||||
*/
|
||||
|
||||
// JottaFolder represents a JottacloudFolder
|
||||
type JottaFolder struct {
|
||||
XMLName xml.Name
|
||||
Name string `xml:"name,attr"`
|
||||
Deleted Flag `xml:"deleted,attr"`
|
||||
Path string `xml:"path"`
|
||||
CreatedAt Time `xml:"created"`
|
||||
ModifiedAt Time `xml:"modified"`
|
||||
Updated Time `xml:"updated"`
|
||||
Folders []JottaFolder `xml:"folders>folder"`
|
||||
Files []JottaFile `xml:"files>file"`
|
||||
}
|
||||
|
||||
/*
|
||||
GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/.../<file>
|
||||
|
||||
<file name="block.csv" uuid="f6553cd4-1135-48fe-8e6a-bb9565c50ef2">
|
||||
<currentRevision>
|
||||
<number>1</number>
|
||||
<state>COMPLETED</state>
|
||||
<created>2018-07-05-T15:08:02Z</created>
|
||||
<modified>2018-07-05-T15:08:02Z</modified>
|
||||
<mime>application/octet-stream</mime>
|
||||
<size>30827730</size>
|
||||
<md5>1e8a7b728ab678048df00075c9507158</md5>
|
||||
<updated>2018-07-24-T20:41:10Z</updated>
|
||||
</currentRevision>
|
||||
</file>
|
||||
*/
|
||||
|
||||
// JottaFile represents a Jottacloud file
|
||||
type JottaFile struct {
|
||||
XMLName xml.Name
|
||||
Name string `xml:"name,attr"`
|
||||
Deleted Flag `xml:"deleted,attr"`
|
||||
State string `xml:"currentRevision>state"`
|
||||
CreatedAt Time `xml:"currentRevision>created"`
|
||||
ModifiedAt Time `xml:"currentRevision>modified"`
|
||||
Updated Time `xml:"currentRevision>updated"`
|
||||
Size int64 `xml:"currentRevision>size"`
|
||||
MD5 string `xml:"currentRevision>md5"`
|
||||
}
|
||||
|
||||
// Error is a custom Error for wrapping Jottacloud error responses
|
||||
type Error struct {
|
||||
StatusCode int `xml:"code"`
|
||||
Message string `xml:"message"`
|
||||
Reason string `xml:"reason"`
|
||||
Cause string `xml:"cause"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and statistifes the error interface
|
||||
func (e *Error) Error() string {
|
||||
out := fmt.Sprintf("error %d", e.StatusCode)
|
||||
if e.Message != "" {
|
||||
out += ": " + e.Message
|
||||
}
|
||||
if e.Reason != "" {
|
||||
out += fmt.Sprintf(" (%+v)", e.Reason)
|
||||
}
|
||||
return out
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestMountpointEmptyModificationTime(t *testing.T) {
|
||||
mountpoint := `
|
||||
<mountPoint time="2018-08-12-T09:58:24Z" host="dn-157">
|
||||
<name xml:space="preserve">Sync</name>
|
||||
<path xml:space="preserve">/foo/Jotta</path>
|
||||
<abspath xml:space="preserve">/foo/Jotta</abspath>
|
||||
<size>0</size>
|
||||
<modified></modified>
|
||||
<device>Jotta</device>
|
||||
<user>foo</user>
|
||||
<metadata first="" max="" total="0" num_folders="0" num_files="0"/>
|
||||
</mountPoint>
|
||||
`
|
||||
var jf JottaFolder
|
||||
if err := xml.Unmarshal([]byte(mountpoint), &jf); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !time.Time(jf.ModifiedAt).IsZero() {
|
||||
t.Errorf("got non-zero time, want zero")
|
||||
}
|
||||
}
|
||||
@@ -1,900 +0,0 @@
|
||||
package jottacloud
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/jottacloud/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Globals
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultDevice = "Jotta"
|
||||
defaultMountpoint = "Sync"
|
||||
rootURL = "https://www.jottacloud.com/jfs/"
|
||||
apiURL = "https://api.jottacloud.com"
|
||||
cachePrefix = "rclone-jcmd5-"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "jottacloud",
|
||||
Description: "JottaCloud",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "User Name",
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "Password.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "mountpoint",
|
||||
Help: "The mountpoint to use.",
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "Sync",
|
||||
Help: "Will be synced by the official client.",
|
||||
}, {
|
||||
Value: "Archive",
|
||||
Help: "Archive",
|
||||
}},
|
||||
}, {
|
||||
Name: "md5_memory_limit",
|
||||
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
|
||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Mountpoint string `config:"mountpoint"`
|
||||
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
|
||||
}
|
||||
|
||||
// Fs represents a remote jottacloud
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
endpointURL string
|
||||
srv *rest.Client
|
||||
pacer *pacer.Pacer
|
||||
}
|
||||
|
||||
// Object describes a jottacloud object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
hasMetaData bool
|
||||
size int64
|
||||
modTime time.Time
|
||||
md5 string
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("jottacloud root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// parsePath parses an box 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(path string) (info *api.JottaFile, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.filePath(path),
|
||||
}
|
||||
var result api.JottaFile
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// does not exist
|
||||
if apiErr.StatusCode == http.StatusNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "read metadata failed")
|
||||
}
|
||||
if result.XMLName.Local != "file" {
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// setEndpointUrl reads the account id and generates the API endpoint URL
|
||||
func (f *Fs) setEndpointURL(user, mountpoint string) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: rest.URLPathEscape(user),
|
||||
}
|
||||
|
||||
var result api.AccountInfo
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.endpointURL = rest.URLPathEscape(path.Join(result.Username, defaultDevice, mountpoint))
|
||||
return nil
|
||||
}
|
||||
|
||||
// errorHandler parses a non 2xx error response into an error
|
||||
func errorHandler(resp *http.Response) error {
|
||||
// Decode error response
|
||||
errResponse := new(api.Error)
|
||||
err := rest.DecodeXML(resp, &errResponse)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Couldn't decode error response: %v", err)
|
||||
}
|
||||
if errResponse.Message == "" {
|
||||
errResponse.Message = resp.Status
|
||||
}
|
||||
if errResponse.StatusCode == 0 {
|
||||
errResponse.StatusCode = resp.StatusCode
|
||||
}
|
||||
return errResponse
|
||||
}
|
||||
|
||||
// filePath returns a escaped file path (f.root, file)
|
||||
func (f *Fs) filePath(file string) string {
|
||||
return rest.URLPathEscape(path.Join(f.endpointURL, replaceReservedChars(path.Join(f.root, file))))
|
||||
}
|
||||
|
||||
// filePath returns a escaped file path (f.root, remote)
|
||||
func (o *Object) filePath() string {
|
||||
return o.fs.filePath(o.remote)
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rootIsDir := strings.HasSuffix(root, "/")
|
||||
root = parsePath(root)
|
||||
|
||||
user := config.FileGet(name, "user")
|
||||
pass := config.FileGet(name, "pass")
|
||||
|
||||
if opt.Pass != "" {
|
||||
var err error
|
||||
opt.Pass, err = obscure.Reveal(opt.Pass)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't decrypt password")
|
||||
}
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
//endpointURL: rest.URLPathEscape(path.Join(user, defaultDevice, opt.Mountpoint)),
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(rootURL),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
|
||||
if user == "" || pass == "" {
|
||||
return nil, errors.New("jottacloud needs user and password")
|
||||
}
|
||||
|
||||
f.srv.SetUserPass(opt.User, opt.Pass)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
err = f.setEndpointURL(opt.User, opt.Mountpoint)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't get account info")
|
||||
}
|
||||
|
||||
if root != "" && !rootIsDir {
|
||||
// Check to see if the root actually an existing file
|
||||
remote := path.Base(root)
|
||||
f.root = path.Dir(root)
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
}
|
||||
_, err := f.NewObject(remote)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
||||
// File doesn't exist so return old f
|
||||
f.root = root
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *api.JottaFile) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
var err error
|
||||
if info != nil {
|
||||
// Set info
|
||||
err = o.setMetaData(info)
|
||||
} else {
|
||||
err = o.readMetaData() // reads info and meta, returning an error
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// CreateDir makes a directory
|
||||
func (f *Fs) CreateDir(path string) (jf *api.JottaFolder, err error) {
|
||||
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: f.filePath(path),
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.Parameters.Set("mkDir", "true")
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &jf)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
return nil, err
|
||||
}
|
||||
// fmt.Printf("...Id %q\n", *info.Id)
|
||||
return jf, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
//fmt.Printf("List: %s\n", dir)
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.filePath(dir),
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
var result api.JottaFolder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// does not exist
|
||||
if apiErr.StatusCode == http.StatusNotFound {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
|
||||
if result.Deleted {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
for i := range result.Folders {
|
||||
item := &result.Folders[i]
|
||||
if item.Deleted {
|
||||
continue
|
||||
}
|
||||
remote := path.Join(dir, restoreReservedChars(item.Name))
|
||||
d := fs.NewDir(remote, time.Time(item.ModifiedAt))
|
||||
entries = append(entries, d)
|
||||
}
|
||||
|
||||
for i := range result.Files {
|
||||
item := &result.Files[i]
|
||||
if item.Deleted || item.State != "COMPLETED" {
|
||||
continue
|
||||
}
|
||||
remote := path.Join(dir, restoreReservedChars(item.Name))
|
||||
o, err := f.newObjectWithInfo(remote, item)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
//fmt.Printf("Entries: %+v\n", entries)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object) {
|
||||
// Temporary Object under construction
|
||||
o = &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: size,
|
||||
modTime: modTime,
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o := f.createObject(src.Remote(), src.ModTime(), src.Size())
|
||||
return o, o.Update(in, src, options...)
|
||||
}
|
||||
|
||||
// mkParentDir makes the parent of the native path dirPath if
|
||||
// necessary and any directories above that
|
||||
func (f *Fs) mkParentDir(dirPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
// chop off trailing / if it exists
|
||||
if strings.HasSuffix(dirPath, "/") {
|
||||
dirPath = dirPath[:len(dirPath)-1]
|
||||
}
|
||||
parent := path.Dir(dirPath)
|
||||
if parent == "." {
|
||||
parent = ""
|
||||
}
|
||||
return f.Mkdir(parent)
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
_, err := f.CreateDir(dir)
|
||||
return err
|
||||
}
|
||||
|
||||
// purgeCheck removes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(dir string, check bool) (err error) {
|
||||
root := path.Join(f.root, dir)
|
||||
if root == "" {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
|
||||
// check that the directory exists
|
||||
entries, err := f.List(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if check {
|
||||
if len(entries) != 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: f.filePath(dir),
|
||||
Parameters: url.Values{},
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
opts.Parameters.Set("dlDir", "true")
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "rmdir failed")
|
||||
}
|
||||
|
||||
// TODO: Parse response?
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir deletes the root folder
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
return f.purgeCheck(dir, true)
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge() error {
|
||||
return f.purgeCheck("", false)
|
||||
}
|
||||
|
||||
// copyOrMoves copys or moves directories or files depending on the mthod parameter
|
||||
func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: src,
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.Parameters.Set(method, "/"+path.Join(f.endpointURL, replaceReservedChars(path.Join(f.root, dest))))
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
err := f.mkParentDir(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err := f.copyOrMove("cp", srcObj.filePath(), remote)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "copy failed")
|
||||
}
|
||||
|
||||
return f.newObjectWithInfo(remote, info)
|
||||
//return f.newObjectWithInfo(remote, &result)
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
err := f.mkParentDir(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err := f.copyOrMove("mv", srcObj.filePath(), remote)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "move failed")
|
||||
}
|
||||
|
||||
return f.newObjectWithInfo(remote, info)
|
||||
//return f.newObjectWithInfo(remote, result)
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcPath := path.Join(srcFs.root, srcRemote)
|
||||
dstPath := path.Join(f.root, dstRemote)
|
||||
|
||||
// Refuse to move to or from the root
|
||||
if srcPath == "" || dstPath == "" {
|
||||
fs.Debugf(src, "DirMove error: Can't move root")
|
||||
return errors.New("can't move root directory")
|
||||
}
|
||||
//fmt.Printf("Move src: %s (FullPath %s), dst: %s (FullPath: %s)\n", srcRemote, srcPath, dstRemote, dstPath)
|
||||
|
||||
var err error
|
||||
_, err = f.List(dstRemote)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
return err
|
||||
} else {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
_, err = f.copyOrMove("mvDir", path.Join(f.endpointURL, replaceReservedChars(srcPath))+"/", dstRemote)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "moveDir failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
}
|
||||
|
||||
// ---------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return o.md5, nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return 0
|
||||
}
|
||||
return o.size
|
||||
}
|
||||
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(info *api.JottaFile) (err error) {
|
||||
o.hasMetaData = true
|
||||
o.size = int64(info.Size)
|
||||
o.md5 = info.MD5
|
||||
o.modTime = time.Time(info.ModifiedAt)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if o.hasMetaData {
|
||||
return nil
|
||||
}
|
||||
info, err := o.fs.readMetaDataForPath(o.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return o.setMetaData(info)
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
}
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// Storable returns a boolean showing whether this object storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
fs.FixRangeOption(options, o.size)
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: o.filePath(),
|
||||
Parameters: url.Values{},
|
||||
Options: options,
|
||||
}
|
||||
|
||||
opts.Parameters.Set("mode", "bin")
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
// Read the md5 of in returning a reader which will read the same contents
|
||||
//
|
||||
// The cleanup function should be called when out is finished with
|
||||
// regardless of whether this function returned an error or not.
|
||||
func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader, cleanup func(), err error) {
|
||||
// we need a MD5
|
||||
md5Hasher := md5.New()
|
||||
// use the teeReader to write to the local file AND caclulate the MD5 while doing so
|
||||
teeReader := io.TeeReader(in, md5Hasher)
|
||||
|
||||
// nothing to clean up by default
|
||||
cleanup = func() {}
|
||||
|
||||
// don't cache small files on disk to reduce wear of the disk
|
||||
if size > threshold {
|
||||
var tempFile *os.File
|
||||
|
||||
// create the cache file
|
||||
tempFile, err = ioutil.TempFile("", cachePrefix)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_ = os.Remove(tempFile.Name()) // Delete the file - may not work on Windows
|
||||
|
||||
// clean up the file after we are done downloading
|
||||
cleanup = func() {
|
||||
// the file should normally already be close, but just to make sure
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name()) // delete the cache file after we are done - may be deleted already
|
||||
}
|
||||
|
||||
// copy the ENTIRE file to disc and calculate the MD5 in the process
|
||||
if _, err = io.Copy(tempFile, teeReader); err != nil {
|
||||
return
|
||||
}
|
||||
// jump to the start of the local file so we can pass it along
|
||||
if _, err = tempFile.Seek(0, 0); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// replace the already read source with a reader of our cached file
|
||||
out = tempFile
|
||||
} else {
|
||||
// that's a small file, just read it into memory
|
||||
var inData []byte
|
||||
inData, err = ioutil.ReadAll(teeReader)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// set the reader to our read memory block
|
||||
out = bytes.NewReader(inData)
|
||||
}
|
||||
return hex.EncodeToString(md5Hasher.Sum(nil)), out, cleanup, nil
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
size := src.Size()
|
||||
md5String, err := src.Hash(hash.MD5)
|
||||
if err != nil || md5String == "" {
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
var wrap accounting.WrapFn
|
||||
in, wrap = accounting.UnWrap(in)
|
||||
var cleanup func()
|
||||
md5String, in, cleanup, err = readMD5(in, size, int64(o.fs.opt.MD5MemoryThreshold))
|
||||
defer cleanup()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to calculate MD5")
|
||||
}
|
||||
// Wrap the accounting back onto the stream
|
||||
in = wrap(in)
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
var result api.JottaFile
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: o.filePath(),
|
||||
Body: in,
|
||||
ContentType: fs.MimeType(src),
|
||||
ContentLength: &size,
|
||||
ExtraHeaders: make(map[string]string),
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.ExtraHeaders["JMd5"] = md5String
|
||||
opts.Parameters.Set("cphash", md5String)
|
||||
opts.ExtraHeaders["JSize"] = strconv.FormatInt(size, 10)
|
||||
// opts.ExtraHeaders["JCreated"] = api.Time(src.ModTime()).String()
|
||||
opts.ExtraHeaders["JModified"] = api.Time(src.ModTime()).String()
|
||||
|
||||
// Parameters observed in other implementations
|
||||
//opts.ExtraHeaders["X-Jfs-DeviceName"] = "Jotta"
|
||||
//opts.ExtraHeaders["X-Jfs-Devicename-Base64"] = ""
|
||||
//opts.ExtraHeaders["X-Jftp-Version"] = "2.4" this appears to be the current version
|
||||
//opts.ExtraHeaders["jx_csid"] = ""
|
||||
//opts.ExtraHeaders["jx_lisence"] = ""
|
||||
|
||||
opts.Parameters.Set("umode", "nomultipart")
|
||||
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallXML(&opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: Check returned Metadata? Timeout on big uploads?
|
||||
return o.setMetaData(&result)
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: o.filePath(),
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.Parameters.Set("dl", "true")
|
||||
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallXML(&opts, nil, nil)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
@@ -1,67 +0,0 @@
|
||||
package jottacloud
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// A test reader to return a test pattern of size
|
||||
type testReader struct {
|
||||
size int64
|
||||
c byte
|
||||
}
|
||||
|
||||
// Reader is the interface that wraps the basic Read method.
|
||||
func (r *testReader) Read(p []byte) (n int, err error) {
|
||||
for i := range p {
|
||||
if r.size <= 0 {
|
||||
return n, io.EOF
|
||||
}
|
||||
p[i] = r.c
|
||||
r.c = (r.c + 1) % 253
|
||||
r.size--
|
||||
n++
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func TestReadMD5(t *testing.T) {
|
||||
// smoke test the reader
|
||||
b, err := ioutil.ReadAll(&testReader{size: 10})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, b)
|
||||
|
||||
// Check readMD5 for different size and threshold
|
||||
for _, size := range []int64{0, 1024, 10 * 1024, 100 * 1024} {
|
||||
t.Run(fmt.Sprintf("%d", size), func(t *testing.T) {
|
||||
hasher := md5.New()
|
||||
n, err := io.Copy(hasher, &testReader{size: size})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, n, size)
|
||||
wantMD5 := fmt.Sprintf("%x", hasher.Sum(nil))
|
||||
for _, threshold := range []int64{512, 1024, 10 * 1024, 20 * 1024} {
|
||||
t.Run(fmt.Sprintf("%d", threshold), func(t *testing.T) {
|
||||
in := &testReader{size: size}
|
||||
gotMD5, out, cleanup, err := readMD5(in, size, threshold)
|
||||
defer cleanup()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, wantMD5, gotMD5)
|
||||
|
||||
// check md5hash of out
|
||||
hasher := md5.New()
|
||||
n, err := io.Copy(hasher, out)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, n, size)
|
||||
outMD5 := fmt.Sprintf("%x", hasher.Sum(nil))
|
||||
assert.Equal(t, wantMD5, outMD5)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
// Test Box filesystem interface
|
||||
package jottacloud_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/jottacloud"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestJottacloud:",
|
||||
NilObject: (*jottacloud.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -1,84 +0,0 @@
|
||||
/*
|
||||
Translate file names for JottaCloud adapted from OneDrive
|
||||
|
||||
|
||||
The following characters are JottaClous reserved characters, and can't
|
||||
be used in JottaCloud folder and file names.
|
||||
|
||||
jottacloud = "/" / "\" / "*" / "<" / ">" / "?" / "!" / "&" / ":" / ";" / "|" / "#" / "%" / """ / "'" / "." / "~"
|
||||
|
||||
|
||||
*/
|
||||
|
||||
package jottacloud
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// charMap holds replacements for characters
|
||||
//
|
||||
// Onedrive has a restricted set of characters compared to other cloud
|
||||
// storage systems, so we to map these to the FULLWIDTH unicode
|
||||
// equivalents
|
||||
//
|
||||
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
'+': '+', // FULLWIDTH PLUS SIGN
|
||||
'*': '*', // FULLWIDTH ASTERISK
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'?': '?', // FULLWIDTH QUESTION MARK
|
||||
'!': '!', // FULLWIDTH EXCLAMATION MARK
|
||||
'&': '&', // FULLWIDTH AMPERSAND
|
||||
':': ':', // FULLWIDTH COLON
|
||||
';': ';', // FULLWIDTH SEMICOLON
|
||||
'|': '|', // FULLWIDTH VERTICAL LINE
|
||||
'#': '#', // FULLWIDTH NUMBER SIGN
|
||||
'%': '%', // FULLWIDTH PERCENT SIGN
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
|
||||
'\'': ''', // FULLWIDTH APOSTROPHE
|
||||
'~': '~', // FULLWIDTH TILDE
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
invCharMap map[rune]rune
|
||||
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
|
||||
fixEndingWithSpace = regexp.MustCompile(` (/|$)`)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Create inverse charMap
|
||||
invCharMap = make(map[rune]rune, len(charMap))
|
||||
for k, v := range charMap {
|
||||
invCharMap[v] = k
|
||||
}
|
||||
}
|
||||
|
||||
// replaceReservedChars takes a path and substitutes any reserved
|
||||
// characters in it
|
||||
func replaceReservedChars(in string) string {
|
||||
// Filenames can't start with space
|
||||
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
|
||||
// Filenames can't end with space
|
||||
in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1")
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := charMap[c]; ok && c != ' ' {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
|
||||
// restoreReservedChars takes a path and undoes any substitutions
|
||||
// made by replaceReservedChars
|
||||
func restoreReservedChars(in string) string {
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := invCharMap[c]; ok {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package jottacloud
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{`\+*<>?!&:;|#%"'~`, `\+*<>?!&:;|#%"'~`},
|
||||
{`\+*<>?!&:;|#%"'~\+*<>?!&:;|#%"'~`, `\+*<>?!&:;|#%"'~\+*<>?!&:;|#%"'~`},
|
||||
{" leading space", "␠leading space"},
|
||||
{"trailing space ", "trailing space␠"},
|
||||
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
|
||||
{"trailing space /trailing space /trailing space ", "trailing space␠/trailing space␠/trailing space␠"},
|
||||
} {
|
||||
got := replaceReservedChars(test.in)
|
||||
if got != test.out {
|
||||
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
|
||||
}
|
||||
got2 := restoreReservedChars(got)
|
||||
if got2 != test.in {
|
||||
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -39,7 +39,8 @@ const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
eventWaitTime = 500 * time.Millisecond
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
useTrash = true // FIXME make configurable - rclone global
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -64,12 +65,7 @@ func init() {
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "debug",
|
||||
Help: "Output more debug from Mega.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hard_delete",
|
||||
Help: "Delete files permanently rather than putting them into the trash.",
|
||||
Help: "If set then output more debug from mega.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
@@ -78,10 +74,9 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Debug bool `config:"debug"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Debug bool `config:"debug"`
|
||||
}
|
||||
|
||||
// Fs represents a remote mega
|
||||
@@ -562,7 +557,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
// deleteNode removes a file or directory, observing useTrash
|
||||
func (f *Fs) deleteNode(node *mega.Node) (err error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.srv.Delete(node, f.opt.HardDelete)
|
||||
err = f.srv.Delete(node, !useTrash)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
|
||||
@@ -91,10 +91,9 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
||||
// ItemReference groups data needed to reference a OneDrive item
|
||||
// across the service into a single structure.
|
||||
type ItemReference struct {
|
||||
DriveID string `json:"driveId"` // Unique identifier for the Drive that contains the item. Read-only.
|
||||
ID string `json:"id"` // Unique identifier for the item. Read/Write.
|
||||
Path string `json:"path"` // Path that used to navigate to the item. Read/Write.
|
||||
DriveType string `json:"driveType"` // Type of the drive, Read-Only
|
||||
DriveID string `json:"driveId"` // Unique identifier for the Drive that contains the item. Read-only.
|
||||
ID string `json:"id"` // Unique identifier for the item. Read/Write.
|
||||
Path string `json:"path"` // Path that used to navigate to the item. Read/Write.
|
||||
}
|
||||
|
||||
// RemoteItemFacet groups data needed to reference a OneDrive remote item
|
||||
@@ -245,6 +244,7 @@ type MoveItemRequest struct {
|
||||
// Copy Item
|
||||
// Upload From URL
|
||||
type AsyncOperationStatus struct {
|
||||
Operation string `json:"operation"` // The type of job being run.
|
||||
PercentageComplete float64 `json:"percentageComplete"` // An float value between 0 and 100 that indicates the percentage complete.
|
||||
Status string `json:"status"` // A string value that maps to an enumeration of possible values about the status of the job. "notStarted | inProgress | completed | updating | failed | deletePending | deleteFailed | waiting"
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -32,32 +33,48 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
rcloneClientID = "b15665d9-eda6-4092-8539-0eec376afd59"
|
||||
rcloneEncryptedClientSecret = "_JUdzh3LnKNqSPcf4Wu5fgMFIQOI8glZu_akYgR8yf6egowNBg-R"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
graphURL = "https://graph.microsoft.com/v1.0"
|
||||
configDriveID = "drive_id"
|
||||
configDriveType = "drive_type"
|
||||
driveTypePersonal = "personal"
|
||||
driveTypeBusiness = "business"
|
||||
driveTypeSharepoint = "documentLibrary"
|
||||
rclonePersonalClientID = "0000000044165769"
|
||||
rclonePersonalEncryptedClientSecret = "ugVWLNhKkVT1-cbTRO-6z1MlzwdW6aMwpKgNaFG-qXjEn_WfDnG9TVyRA5yuoliU"
|
||||
rcloneBusinessClientID = "52857fec-4bc2-483f-9f1b-5fe28e97532c"
|
||||
rcloneBusinessEncryptedClientSecret = "6t4pC8l6L66SFYVIi8PgECDyjXy_ABo1nsTaE-Lr9LpzC6yT4vNOwHsakwwdEui0O6B0kX8_xbBLj91J"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
rootURLPersonal = "https://api.onedrive.com/v1.0/drive" // root URL for requests
|
||||
discoveryServiceURL = "https://api.office.com/discovery/"
|
||||
configResourceURL = "resource_url"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app for a business account
|
||||
oauthConfig = &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://login.microsoftonline.com/common/oauth2/v2.0/authorize",
|
||||
TokenURL: "https://login.microsoftonline.com/common/oauth2/v2.0/token",
|
||||
// Description of how to auth for this app for a personal account
|
||||
oauthPersonalConfig = &oauth2.Config{
|
||||
Scopes: []string{
|
||||
"wl.signin", // Allow single sign-on capabilities
|
||||
"wl.offline_access", // Allow receiving a refresh token
|
||||
"onedrive.readwrite", // r/w perms to all of a user's OneDrive files
|
||||
},
|
||||
Scopes: []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://login.live.com/oauth20_authorize.srf",
|
||||
TokenURL: "https://login.live.com/oauth20_token.srf",
|
||||
},
|
||||
ClientID: rclonePersonalClientID,
|
||||
ClientSecret: obscure.MustReveal(rclonePersonalEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
|
||||
// Description of how to auth for this app for a business account
|
||||
oauthBusinessConfig = &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://login.microsoftonline.com/common/oauth2/authorize",
|
||||
TokenURL: "https://login.microsoftonline.com/common/oauth2/token",
|
||||
},
|
||||
ClientID: rcloneBusinessClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneBusinessEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
oauthBusinessResource = oauth2.SetAuthURLParam("resource", discoveryServiceURL)
|
||||
sharedURL = "https://api.onedrive.com/v1.0/drives" // root URL for remote shared resources
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -67,142 +84,147 @@ func init() {
|
||||
Description: "Microsoft OneDrive",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config("onedrive", name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return
|
||||
}
|
||||
// choose account type
|
||||
fmt.Printf("Choose OneDrive account type?\n")
|
||||
fmt.Printf(" * Say b for a OneDrive business account\n")
|
||||
fmt.Printf(" * Say p for a personal OneDrive account\n")
|
||||
isPersonal := config.Command([]string{"bBusiness", "pPersonal"}) == 'p'
|
||||
|
||||
// Are we running headless?
|
||||
if automatic, _ := m.Get(config.ConfigAutomatic); automatic != "" {
|
||||
// Yes, okay we are done
|
||||
return
|
||||
}
|
||||
|
||||
type driveResource struct {
|
||||
DriveID string `json:"id"`
|
||||
DriveName string `json:"name"`
|
||||
DriveType string `json:"driveType"`
|
||||
}
|
||||
type drivesResponse struct {
|
||||
Drives []driveResource `json:"value"`
|
||||
}
|
||||
|
||||
type siteResource struct {
|
||||
SiteID string `json:"id"`
|
||||
SiteName string `json:"displayName"`
|
||||
SiteURL string `json:"webUrl"`
|
||||
}
|
||||
type siteResponse struct {
|
||||
Sites []siteResource `json:"value"`
|
||||
}
|
||||
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure OneDrive: %v", err)
|
||||
}
|
||||
srv := rest.NewClient(oAuthClient)
|
||||
|
||||
var opts rest.Opts
|
||||
var finalDriveID string
|
||||
var siteID string
|
||||
switch config.Choose("Your choice",
|
||||
[]string{"onedrive", "sharepoint", "driveid", "siteid", "search"},
|
||||
[]string{"OneDrive Personal or Business", "Sharepoint site", "Type in driveID", "Type in SiteID", "Search a Sharepoint site"},
|
||||
false) {
|
||||
|
||||
case "onedrive":
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/me/drives",
|
||||
}
|
||||
case "sharepoint":
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/sites/root/drives",
|
||||
}
|
||||
case "driveid":
|
||||
fmt.Printf("Paste your Drive ID here> ")
|
||||
finalDriveID = config.ReadLine()
|
||||
case "siteid":
|
||||
fmt.Printf("Paste your Site ID here> ")
|
||||
siteID = config.ReadLine()
|
||||
case "search":
|
||||
fmt.Printf("What to search for> ")
|
||||
searchTerm := config.ReadLine()
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/sites?search=" + searchTerm,
|
||||
}
|
||||
|
||||
sites := siteResponse{}
|
||||
_, err := srv.CallJSON(&opts, nil, &sites)
|
||||
if isPersonal {
|
||||
// for personal accounts we don't safe a field about the account
|
||||
err := oauthutil.Config("onedrive", name, m, oauthPersonalConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query available sites: %v", err)
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
} else {
|
||||
err := oauthutil.ConfigErrorCheck("onedrive", name, m, func(req *http.Request) oauthutil.AuthError {
|
||||
var resp oauthutil.AuthError
|
||||
|
||||
if len(sites.Sites) == 0 {
|
||||
log.Fatalf("Search for '%s' returned no results", searchTerm)
|
||||
} else {
|
||||
fmt.Printf("Found %d sites, please select the one you want to use:\n", len(sites.Sites))
|
||||
for index, site := range sites.Sites {
|
||||
fmt.Printf("%d: %s (%s) id=%s\n", index, site.SiteName, site.SiteURL, site.SiteID)
|
||||
}
|
||||
siteID = sites.Sites[config.ChooseNumber("Chose drive to use:", 0, len(sites.Sites)-1)].SiteID
|
||||
}
|
||||
}
|
||||
|
||||
// if we have a siteID we need to ask for the drives
|
||||
if siteID != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/sites/" + siteID + "/drives",
|
||||
}
|
||||
}
|
||||
|
||||
// We don't have the final ID yet?
|
||||
// query Microsoft Graph
|
||||
if finalDriveID == "" {
|
||||
drives := drivesResponse{}
|
||||
_, err := srv.CallJSON(&opts, nil, &drives)
|
||||
resp.Name = req.URL.Query().Get("error")
|
||||
resp.Code = strings.Split(req.URL.Query().Get("error_description"), ":")[0] // error_description begins with XXXXXXXXXXXX:
|
||||
resp.Description = strings.Join(strings.Split(req.URL.Query().Get("error_description"), ":")[1:], ":")
|
||||
resp.HelpURL = "https://rclone.org/onedrive/#troubleshooting"
|
||||
return resp
|
||||
}, oauthBusinessConfig, oauthBusinessResource)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query available drives: %v", err)
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(drives.Drives) == 0 {
|
||||
log.Fatalf("No drives found")
|
||||
} else {
|
||||
fmt.Printf("Found %d drives, please select the one you want to use:\n", len(drives.Drives))
|
||||
for index, drive := range drives.Drives {
|
||||
fmt.Printf("%d: %s (%s) id=%s\n", index, drive.DriveName, drive.DriveType, drive.DriveID)
|
||||
// Are we running headless?
|
||||
if automatic, _ := m.Get(config.ConfigAutomatic); automatic != "" {
|
||||
// Yes, okay we are done
|
||||
return
|
||||
}
|
||||
|
||||
type serviceResource struct {
|
||||
ServiceAPIVersion string `json:"serviceApiVersion"`
|
||||
ServiceEndpointURI string `json:"serviceEndpointUri"`
|
||||
ServiceResourceID string `json:"serviceResourceId"`
|
||||
}
|
||||
type serviceResponse struct {
|
||||
Services []serviceResource `json:"value"`
|
||||
}
|
||||
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthBusinessConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure OneDrive: %v", err)
|
||||
return
|
||||
}
|
||||
srv := rest.NewClient(oAuthClient)
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: discoveryServiceURL,
|
||||
Path: "/v2.0/me/services",
|
||||
}
|
||||
services := serviceResponse{}
|
||||
resp, err := srv.CallJSON(&opts, nil, &services)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Failed to query available services: %v", err)
|
||||
return
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
fs.Errorf(nil, "Failed to query available services: Got HTTP error code %d", resp.StatusCode)
|
||||
return
|
||||
}
|
||||
|
||||
var resourcesURL []string
|
||||
var resourcesID []string
|
||||
|
||||
for _, service := range services.Services {
|
||||
if service.ServiceAPIVersion == "v2.0" {
|
||||
resourcesID = append(resourcesID, service.ServiceResourceID)
|
||||
resourcesURL = append(resourcesURL, service.ServiceEndpointURI)
|
||||
}
|
||||
finalDriveID = drives.Drives[config.ChooseNumber("Chose drive to use:", 0, len(drives.Drives)-1)].DriveID
|
||||
// we only support 2.0 API
|
||||
fs.Infof(nil, "Skipping API %s endpoint %s", service.ServiceAPIVersion, service.ServiceEndpointURI)
|
||||
}
|
||||
|
||||
var foundService string
|
||||
if len(resourcesID) == 0 {
|
||||
fs.Errorf(nil, "No Service found")
|
||||
return
|
||||
} else if len(resourcesID) == 1 {
|
||||
foundService = resourcesID[0]
|
||||
} else {
|
||||
foundService = config.Choose("Choose resource URL", resourcesID, resourcesURL, false)
|
||||
}
|
||||
|
||||
m.Set(configResourceURL, foundService)
|
||||
oauthBusinessResource = oauth2.SetAuthURLParam("resource", foundService)
|
||||
|
||||
// get the token from the inital config
|
||||
// we need to update the token with a resource
|
||||
// specific token we will query now
|
||||
token, err := oauthutil.GetToken(name, m)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Error while getting token: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// values for the token query
|
||||
values := url.Values{}
|
||||
values.Set("refresh_token", token.RefreshToken)
|
||||
values.Set("grant_type", "refresh_token")
|
||||
values.Set("resource", foundService)
|
||||
values.Set("client_id", oauthBusinessConfig.ClientID)
|
||||
values.Set("client_secret", oauthBusinessConfig.ClientSecret)
|
||||
opts = rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: oauthBusinessConfig.Endpoint.TokenURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
Body: strings.NewReader(values.Encode()),
|
||||
}
|
||||
|
||||
// tokenJSON is the struct representing the HTTP response from OAuth2
|
||||
// providers returning a token in JSON form.
|
||||
// we are only interested in the new tokens, all other fields we don't care
|
||||
type tokenJSON struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
}
|
||||
jsonToken := tokenJSON{}
|
||||
resp, err = srv.CallJSON(&opts, nil, &jsonToken)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Failed to get resource token: %v", err)
|
||||
return
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
fs.Errorf(nil, "Failed to get resource token: Got HTTP error code %d", resp.StatusCode)
|
||||
return
|
||||
}
|
||||
|
||||
// update the tokens
|
||||
token.AccessToken = jsonToken.AccessToken
|
||||
token.RefreshToken = jsonToken.RefreshToken
|
||||
|
||||
// finally save them in the config
|
||||
err = oauthutil.PutToken(name, m, token, true)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Error while setting token: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test the driveID and get drive type
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/drives/" + finalDriveID + "/root"}
|
||||
var rootItem api.Item
|
||||
_, err = srv.CallJSON(&opts, nil, &rootItem)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query root for drive %s: %v", finalDriveID, err)
|
||||
}
|
||||
|
||||
fmt.Printf("Found drive '%s' of type '%s', URL: %s\nIs that okay?\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL)
|
||||
// This does not work, YET :)
|
||||
if !config.Confirm() {
|
||||
log.Fatalf("Cancelled by user")
|
||||
}
|
||||
|
||||
config.FileSet(name, configDriveID, finalDriveID)
|
||||
config.FileSet(name, configDriveType, rootItem.ParentReference.DriveType)
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
@@ -215,25 +237,14 @@ func init() {
|
||||
Help: "Chunk size to upload files with - must be multiple of 320k.",
|
||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "drive_id",
|
||||
Help: "The ID of the drive to use",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "drive_type",
|
||||
Help: "The type of the drive ( personal | business | documentLibrary )",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DriveID string `config:"drive_id"`
|
||||
DriveType string `config:"drive_type"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
ResourceURL string `config:"resource_url"`
|
||||
}
|
||||
|
||||
// Fs represents a remote one drive
|
||||
@@ -246,8 +257,7 @@ type Fs struct {
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
driveID string // ID to use for querying Microsoft Graph
|
||||
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
|
||||
isBusiness bool // true if this is an OneDrive Business account
|
||||
}
|
||||
|
||||
// Object describes a one drive object
|
||||
@@ -317,17 +327,9 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Response, err error) {
|
||||
var opts rest.Opts
|
||||
if len(path) == 0 {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root",
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root:/" + rest.URLPathEscape(replaceReservedChars(path)),
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root:/" + rest.URLPathEscape(replaceReservedChars(path)),
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
@@ -362,11 +364,23 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if opt.ChunkSize%(320*1024) != 0 {
|
||||
return nil, errors.Errorf("chunk size %d is not a multiple of 320k", opt.ChunkSize)
|
||||
}
|
||||
// if we have a resource URL it's a business account otherwise a personal one
|
||||
isBusiness := opt.ResourceURL != ""
|
||||
var rootURL string
|
||||
var oauthConfig *oauth2.Config
|
||||
if !isBusiness {
|
||||
// personal account setup
|
||||
oauthConfig = oauthPersonalConfig
|
||||
rootURL = rootURLPersonal
|
||||
} else {
|
||||
// business account setup
|
||||
oauthConfig = oauthBusinessConfig
|
||||
rootURL = opt.ResourceURL + "_api/v2.0/drives/me"
|
||||
sharedURL = opt.ResourceURL + "_api/v2.0/drives"
|
||||
|
||||
if opt.DriveID == "" || opt.DriveType == "" {
|
||||
log.Fatalf("Unable to get drive_id and drive_type. If you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend.")
|
||||
// update the URL in the AuthOptions
|
||||
oauthBusinessResource = oauth2.SetAuthURLParam("resource", opt.ResourceURL)
|
||||
}
|
||||
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
@@ -374,17 +388,19 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
driveID: opt.DriveID,
|
||||
driveType: opt.DriveType,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(graphURL + "/drives/" + opt.DriveID),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
isBusiness: isBusiness,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
ReadMimeType: true,
|
||||
CaseInsensitive: true,
|
||||
// OneDrive for business doesn't support mime types properly
|
||||
// so we disable it until resolved
|
||||
// https://github.com/OneDrive/onedrive-api-docs/issues/643
|
||||
ReadMimeType: !f.isBusiness,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
@@ -530,7 +546,8 @@ type listAllFn func(*api.Item) bool
|
||||
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
// Top parameter asks for bigger pages of data
|
||||
// https://dev.onedrive.com/odata/optional-query-parameters.htm
|
||||
opts := newOptsCall(dirID, "GET", "/children?$top=1000")
|
||||
opts := newOptsCall(dirID, "GET", "/children?top=1000")
|
||||
|
||||
OUTER:
|
||||
for {
|
||||
var result api.ListChildrenResponse
|
||||
@@ -738,11 +755,16 @@ func (f *Fs) Precision() time.Duration {
|
||||
func (f *Fs) waitForJob(location string, o *Object) error {
|
||||
deadline := time.Now().Add(fs.Config.Timeout)
|
||||
for time.Now().Before(deadline) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: location,
|
||||
IgnoreStatus: true, // Ignore the http status response since it seems to return valid info on 500 errors
|
||||
}
|
||||
var resp *http.Response
|
||||
var err error
|
||||
var body []byte
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = http.Get(location)
|
||||
resp, err = f.srv.Call(&opts)
|
||||
if err != nil {
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
@@ -758,18 +780,19 @@ func (f *Fs) waitForJob(location string, o *Object) error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "async status result not JSON: %q", body)
|
||||
}
|
||||
|
||||
switch status.Status {
|
||||
case "failed":
|
||||
case "deleteFailed":
|
||||
{
|
||||
return errors.Errorf("%s: async operation returned %q", o.remote, status.Status)
|
||||
// See if we decoded anything...
|
||||
if !(status.Operation == "" && status.PercentageComplete == 0 && status.Status == "") {
|
||||
if status.Status == "failed" || status.Status == "deleteFailed" {
|
||||
return errors.Errorf("%s: async operation %q returned %q", o.remote, status.Operation, status.Status)
|
||||
}
|
||||
case "completed":
|
||||
err = o.readMetaData()
|
||||
return errors.Wrapf(err, "async operation completed but readMetaData failed")
|
||||
} else if resp.StatusCode == 200 {
|
||||
var info api.Item
|
||||
err = json.Unmarshal(body, &info)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "async item result not JSON: %q", body)
|
||||
}
|
||||
return o.setMetaData(&info)
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
return errors.Errorf("async operation didn't complete after %v", fs.Config.Timeout)
|
||||
@@ -808,23 +831,22 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// Copy the object
|
||||
opts := newOptsCall(srcObj.id, "POST", "/copy")
|
||||
opts := newOptsCall(srcObj.id, "POST", "/action.copy")
|
||||
opts.ExtraHeaders = map[string]string{"Prefer": "respond-async"}
|
||||
opts.NoResponse = true
|
||||
|
||||
id, _, _ := parseDirID(directoryID)
|
||||
|
||||
replacedLeaf := replaceReservedChars(leaf)
|
||||
copyReq := api.CopyItemRequest{
|
||||
copy := api.CopyItemRequest{
|
||||
Name: &replacedLeaf,
|
||||
ParentReference: api.ItemReference{
|
||||
DriveID: f.driveID,
|
||||
ID: id,
|
||||
ID: id,
|
||||
},
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, ©Req, nil)
|
||||
resp, err = f.srv.CallJSON(&opts, ©, nil)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -918,110 +940,6 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcPath := path.Join(srcFs.root, srcRemote)
|
||||
dstPath := path.Join(f.root, dstRemote)
|
||||
|
||||
// Refuse to move to or from the root
|
||||
if srcPath == "" || dstPath == "" {
|
||||
fs.Debugf(src, "DirMove error: Can't move root")
|
||||
return errors.New("can't move root directory")
|
||||
}
|
||||
|
||||
// find the root src directory
|
||||
err := srcFs.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// find the root dst directory
|
||||
if dstRemote != "" {
|
||||
err = f.dirCache.FindRoot(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if f.dirCache.FoundRoot() {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of dst parent, creating subdirs if necessary
|
||||
var leaf, dstDirectoryID string
|
||||
findPath := dstRemote
|
||||
if dstRemote == "" {
|
||||
findPath = f.root
|
||||
}
|
||||
leaf, dstDirectoryID, err = f.dirCache.FindPath(findPath, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
parsedDstDirID, _, _ := parseDirID(dstDirectoryID)
|
||||
|
||||
// Check destination does not exist
|
||||
if dstRemote != "" {
|
||||
_, err = f.dirCache.FindDir(dstRemote, false)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
return err
|
||||
} else {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get timestamps of src so they can be preserved
|
||||
srcInfo, _, err := srcFs.readMetaDataForPath(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
opts := newOptsCall(srcID, "PATCH", "")
|
||||
move := api.MoveItemRequest{
|
||||
Name: replaceReservedChars(leaf),
|
||||
ParentReference: &api.ItemReference{
|
||||
ID: parsedDstDirID,
|
||||
},
|
||||
// We set the mod time too as it gets reset otherwise
|
||||
FileSystemInfo: &api.FileSystemInfoFacet{
|
||||
CreatedDateTime: srcInfo.CreatedDateTime,
|
||||
LastModifiedDateTime: srcInfo.LastModifiedDateTime,
|
||||
},
|
||||
}
|
||||
var resp *http.Response
|
||||
var info api.Item
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &move, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
srcFs.dirCache.FlushDir(srcRemote)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing as an
|
||||
// optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
@@ -1055,10 +973,10 @@ func (f *Fs) About() (usage *fs.Usage, err error) {
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
if f.driveType == driveTypePersonal {
|
||||
return hash.Set(hash.SHA1)
|
||||
if f.isBusiness {
|
||||
return hash.Set(hash.QuickXorHash)
|
||||
}
|
||||
return hash.Set(hash.QuickXorHash)
|
||||
return hash.Set(hash.SHA1)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -1088,16 +1006,16 @@ func (o *Object) srvPath() string {
|
||||
|
||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
if o.fs.driveType == driveTypePersonal {
|
||||
if t == hash.SHA1 {
|
||||
return o.sha1, nil
|
||||
}
|
||||
} else {
|
||||
if t == hash.QuickXorHash {
|
||||
return o.quickxorhash, nil
|
||||
if o.fs.isBusiness {
|
||||
if t != hash.QuickXorHash {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return o.quickxorhash, nil
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
if t != hash.SHA1 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return o.sha1, nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
@@ -1258,12 +1176,12 @@ func (o *Object) createUploadSession(modTime time.Time) (response *api.CreateUpl
|
||||
opts = rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/items/" + id + ":/" + rest.URLPathEscape(leaf) + ":/createUploadSession",
|
||||
Path: "/" + drive + "/items/" + id + ":/" + rest.URLPathEscape(leaf) + ":/upload.createSession",
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/createUploadSession",
|
||||
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/upload.createSession",
|
||||
}
|
||||
}
|
||||
createRequest := api.CreateUploadRequest{}
|
||||
@@ -1365,6 +1283,49 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (i
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// uploadSinglepart uploads a file as a single part
|
||||
func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
|
||||
var resp *http.Response
|
||||
var opts rest.Opts
|
||||
_, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
_, drive, rootURL := parseDirID(directoryID)
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
|
||||
ContentLength: &size,
|
||||
Body: in,
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
|
||||
ContentLength: &size,
|
||||
Body: in,
|
||||
}
|
||||
}
|
||||
// for go1.8 (see release notes) we must nil the Body if we want a
|
||||
// "Content-Length: 0" header which onedrive requires for all files.
|
||||
if size == 0 {
|
||||
opts.Body = nil
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = o.setMetaData(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Set the mod time now and read metadata
|
||||
return o.setModTime(modTime)
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
@@ -1375,7 +1336,13 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
size := src.Size()
|
||||
modTime := src.ModTime()
|
||||
|
||||
info, err := o.uploadMultipart(in, size, modTime)
|
||||
var info *api.Item
|
||||
if size <= 0 {
|
||||
// This is for 0 length files, or files with an unknown size
|
||||
info, err = o.uploadSinglepart(in, size, modTime)
|
||||
} else {
|
||||
info, err = o.uploadMultipart(in, size, modTime)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1416,18 +1383,18 @@ func newOptsCall(id string, method string, route string) (opts rest.Opts) {
|
||||
func parseDirID(ID string) (string, string, string) {
|
||||
if strings.Index(ID, "#") >= 0 {
|
||||
s := strings.Split(ID, "#")
|
||||
return s[1], s[0], graphURL + "/drives"
|
||||
return s[1], s[0], sharedURL
|
||||
}
|
||||
return ID, "", ""
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
// _ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
|
||||
@@ -140,7 +140,7 @@ func TestQuickXorHashByBlock(t *testing.T) {
|
||||
got := h.Sum(nil)
|
||||
want, err := base64.StdEncoding.DecodeString(test.out)
|
||||
require.NoError(t, err, what)
|
||||
assert.Equal(t, want, got, test.size, what)
|
||||
assert.Equal(t, want, got[:], test.size, what)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -539,11 +539,6 @@ func init() {
|
||||
Help: "Concurrency for multipart uploads.",
|
||||
Default: 2,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "force_path_style",
|
||||
Help: "If true use path style access if false use virtual hosted style.\nSome providers (eg Aliyun OSS or Netease COS) require this.",
|
||||
Default: true,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -574,7 +569,6 @@ type Options struct {
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
SessionToken string `config:"session_token"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
ForcePathStyle bool `config:"force_path_style"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
@@ -713,7 +707,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
WithCredentials(cred).
|
||||
WithEndpoint(opt.Endpoint).
|
||||
WithHTTPClient(fshttp.NewClient(fs.Config)).
|
||||
WithS3ForcePathStyle(opt.ForcePathStyle)
|
||||
WithS3ForcePathStyle(true)
|
||||
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
||||
ses := session.New()
|
||||
c := s3.New(ses, awsConfig)
|
||||
|
||||
@@ -120,20 +120,6 @@ func init() {
|
||||
Help: "Admin",
|
||||
Value: "admin",
|
||||
}},
|
||||
}, {
|
||||
Name: "storage_policy",
|
||||
Help: "The storage policy to use when creating a new container",
|
||||
Default: "",
|
||||
Examples: []fs.OptionExample{{
|
||||
Help: "Default",
|
||||
Value: "",
|
||||
}, {
|
||||
Help: "OVH Public Cloud Storage",
|
||||
Value: "pcs",
|
||||
}, {
|
||||
Help: "OVH Public Cloud Archive",
|
||||
Value: "pca",
|
||||
}},
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "Above this size files will be chunked into a _segments container.",
|
||||
@@ -145,22 +131,21 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
User string `config:"user"`
|
||||
Key string `config:"key"`
|
||||
Auth string `config:"auth"`
|
||||
UserID string `config:"user_id"`
|
||||
Domain string `config:"domain"`
|
||||
Tenant string `config:"tenant"`
|
||||
TenantID string `config:"tenant_id"`
|
||||
TenantDomain string `config:"tenant_domain"`
|
||||
Region string `config:"region"`
|
||||
StorageURL string `config:"storage_url"`
|
||||
AuthToken string `config:"auth_token"`
|
||||
AuthVersion int `config:"auth_version"`
|
||||
StoragePolicy string `config:"storage_policy"`
|
||||
EndpointType string `config:"endpoint_type"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
User string `config:"user"`
|
||||
Key string `config:"key"`
|
||||
Auth string `config:"auth"`
|
||||
UserID string `config:"user_id"`
|
||||
Domain string `config:"domain"`
|
||||
Tenant string `config:"tenant"`
|
||||
TenantID string `config:"tenant_id"`
|
||||
TenantDomain string `config:"tenant_domain"`
|
||||
Region string `config:"region"`
|
||||
StorageURL string `config:"storage_url"`
|
||||
AuthToken string `config:"auth_token"`
|
||||
AuthVersion int `config:"auth_version"`
|
||||
EndpointType string `config:"endpoint_type"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
}
|
||||
|
||||
// Fs represents a remote swift server
|
||||
@@ -598,11 +583,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
_, _, err = f.c.Container(f.container)
|
||||
}
|
||||
if err == swift.ContainerNotFound {
|
||||
headers := swift.Headers{}
|
||||
if f.opt.StoragePolicy != "" {
|
||||
headers["X-Storage-Policy"] = f.opt.StoragePolicy
|
||||
}
|
||||
err = f.c.ContainerCreate(f.container, headers)
|
||||
err = f.c.ContainerCreate(f.container, nil)
|
||||
}
|
||||
if err == nil {
|
||||
f.containerOK = true
|
||||
@@ -899,11 +880,7 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
var err error
|
||||
_, _, err = o.fs.c.Container(o.fs.segmentsContainer)
|
||||
if err == swift.ContainerNotFound {
|
||||
headers := swift.Headers{}
|
||||
if o.fs.opt.StoragePolicy != "" {
|
||||
headers["X-Storage-Policy"] = o.fs.opt.StoragePolicy
|
||||
}
|
||||
err = o.fs.c.ContainerCreate(o.fs.segmentsContainer, headers)
|
||||
err = o.fs.c.ContainerCreate(o.fs.segmentsContainer, nil)
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
||||
@@ -46,8 +46,7 @@ import (
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultDepth = "1" // depth for PROPFIND
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -104,18 +103,17 @@ type Options struct {
|
||||
|
||||
// Fs represents a remote webdav
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
endpoint *url.URL // URL of the host
|
||||
endpointURL string // endpoint as a string
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
precision time.Duration // mod time precision
|
||||
canStream bool // set if can stream
|
||||
useOCMtime bool // set if can use X-OC-Mtime
|
||||
retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default)
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
endpoint *url.URL // URL of the host
|
||||
endpointURL string // endpoint as a string
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
precision time.Duration // mod time precision
|
||||
canStream bool // set if can stream
|
||||
useOCMtime bool // set if can use X-OC-Mtime
|
||||
}
|
||||
|
||||
// Object describes a webdav object
|
||||
@@ -184,13 +182,13 @@ func itemIsDir(item *api.Response) bool {
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(path string, depth string) (info *api.Prop, err error) {
|
||||
func (f *Fs) readMetaDataForPath(path string) (info *api.Prop, err error) {
|
||||
// FIXME how do we read back additional properties?
|
||||
opts := rest.Opts{
|
||||
Method: "PROPFIND",
|
||||
Path: f.filePath(path),
|
||||
ExtraHeaders: map[string]string{
|
||||
"Depth": depth,
|
||||
"Depth": "1",
|
||||
},
|
||||
NoRedirect: true,
|
||||
}
|
||||
@@ -204,9 +202,6 @@ func (f *Fs) readMetaDataForPath(path string, depth string) (info *api.Prop, err
|
||||
// does not exist
|
||||
switch apiErr.StatusCode {
|
||||
case http.StatusNotFound:
|
||||
if f.retryWithZeroDepth && depth != "0" {
|
||||
return f.readMetaDataForPath(path, "0")
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
case http.StatusMovedPermanently, http.StatusFound, http.StatusSeeOther:
|
||||
// Some sort of redirect - go doesn't deal with these properly (it resets
|
||||
@@ -373,12 +368,6 @@ func (f *Fs) setQuirks(vendor string) error {
|
||||
return err
|
||||
}
|
||||
f.srv.SetCookie(&spCookies.FedAuth, &spCookies.RtFa)
|
||||
|
||||
// sharepoint, unlike the other vendors, only lists files if the depth header is set to 0
|
||||
// however, rclone defaults to 1 since it provides recursive directory listing
|
||||
// to determine if we may have found a file, the request has to be resent
|
||||
// with the depth set to 0
|
||||
f.retryWithZeroDepth = true
|
||||
case "other":
|
||||
default:
|
||||
fs.Debugf(f, "Unknown vendor %q", vendor)
|
||||
@@ -429,12 +418,12 @@ type listAllFn func(string, bool, *api.Prop) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(dir string, directoriesOnly bool, filesOnly bool, depth string, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(dir string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PROPFIND",
|
||||
Path: f.dirPath(dir), // FIXME Should not start with /
|
||||
ExtraHeaders: map[string]string{
|
||||
"Depth": depth,
|
||||
"Depth": "1",
|
||||
},
|
||||
}
|
||||
var result api.Multistatus
|
||||
@@ -447,9 +436,6 @@ func (f *Fs) listAll(dir string, directoriesOnly bool, filesOnly bool, depth str
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// does not exist
|
||||
if apiErr.StatusCode == http.StatusNotFound {
|
||||
if f.retryWithZeroDepth && depth != "0" {
|
||||
return f.listAll(dir, directoriesOnly, filesOnly, "0", fn)
|
||||
}
|
||||
return found, fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
@@ -523,7 +509,7 @@ func (f *Fs) listAll(dir string, directoriesOnly bool, filesOnly bool, depth str
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
var iErr error
|
||||
_, err = f.listAll(dir, false, false, defaultDepth, func(remote string, isDir bool, info *api.Prop) bool {
|
||||
_, err = f.listAll(dir, false, false, func(remote string, isDir bool, info *api.Prop) bool {
|
||||
if isDir {
|
||||
d := fs.NewDir(remote, time.Time(info.Modified))
|
||||
// .SetID(info.ID)
|
||||
@@ -639,7 +625,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
//
|
||||
// if the directory does not exist then err will be ErrorDirNotFound
|
||||
func (f *Fs) dirNotEmpty(dir string) (found bool, err error) {
|
||||
return f.listAll(dir, false, false, defaultDepth, func(remote string, isDir bool, info *api.Prop) bool {
|
||||
return f.listAll(dir, false, false, func(remote string, isDir bool, info *api.Prop) bool {
|
||||
return true
|
||||
})
|
||||
}
|
||||
@@ -890,7 +876,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
if o.hasMetaData {
|
||||
return nil
|
||||
}
|
||||
info, err := o.fs.readMetaDataForPath(o.remote, defaultDepth)
|
||||
info, err := o.fs.readMetaDataForPath(o.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -968,8 +954,6 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
// Remove failed upload
|
||||
_ = o.Remove()
|
||||
return err
|
||||
}
|
||||
// read metadata from remote
|
||||
|
||||
@@ -29,7 +29,7 @@ func (c *Client) PerformDelete(url string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return errors.Errorf("delete error [%d]: %s", resp.StatusCode, string(body))
|
||||
return errors.Errorf("delete error [%d]: %s", resp.StatusCode, string(body[:]))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ func (c *Client) PerformDownload(url string, headers map[string]string) (out io.
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, errors.Errorf("download error [%d]: %s", resp.StatusCode, string(body))
|
||||
return nil, errors.Errorf("download error [%d]: %s", resp.StatusCode, string(body[:]))
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ func (c *Client) PerformMkdir(url string) (int, string, error) {
|
||||
return 0, "", err
|
||||
}
|
||||
//third parameter is the json error response body
|
||||
return resp.StatusCode, string(body), errors.Errorf("create folder error [%d]: %s", resp.StatusCode, string(body))
|
||||
return resp.StatusCode, string(body[:]), errors.Errorf("create folder error [%d]: %s", resp.StatusCode, string(body[:]))
|
||||
}
|
||||
return resp.StatusCode, "", nil
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ func (c *Client) PerformUpload(url string, data io.Reader, contentType string) (
|
||||
return err
|
||||
}
|
||||
|
||||
return errors.Errorf("upload error [%d]: %s", resp.StatusCode, string(body))
|
||||
return errors.Errorf("upload error [%d]: %s", resp.StatusCode, string(body[:]))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -165,11 +165,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
//return err
|
||||
} else {
|
||||
if ResourceInfoResponse.ResourceType == "file" {
|
||||
rootDir := path.Dir(root)
|
||||
if rootDir == "." {
|
||||
rootDir = ""
|
||||
}
|
||||
f.setRoot(rootDir)
|
||||
f.setRoot(path.Dir(root))
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
@@ -35,7 +35,6 @@ docs = [
|
||||
"drive.md",
|
||||
"http.md",
|
||||
"hubic.md",
|
||||
"jottacloud.md",
|
||||
"mega.md",
|
||||
"azureblob.md",
|
||||
"onedrive.md",
|
||||
|
||||
@@ -42,7 +42,6 @@ import (
|
||||
_ "github.com/ncw/rclone/cmd/purge"
|
||||
_ "github.com/ncw/rclone/cmd/rc"
|
||||
_ "github.com/ncw/rclone/cmd/rcat"
|
||||
_ "github.com/ncw/rclone/cmd/reveal"
|
||||
_ "github.com/ncw/rclone/cmd/rmdir"
|
||||
_ "github.com/ncw/rclone/cmd/rmdirs"
|
||||
_ "github.com/ncw/rclone/cmd/serve"
|
||||
|
||||
@@ -84,7 +84,6 @@ from various cloud storage systems and using file transfer services, such as:
|
||||
* Google Drive
|
||||
* HTTP
|
||||
* Hubic
|
||||
* Jottacloud
|
||||
* Mega
|
||||
* Microsoft Azure Blob Storage
|
||||
* Microsoft OneDrive
|
||||
|
||||
@@ -88,9 +88,6 @@ func mountOptions(device string, mountpoint string) (options []string) {
|
||||
if mountlib.WritebackCache {
|
||||
// FIXME? options = append(options, "-o", WritebackCache())
|
||||
}
|
||||
if mountlib.DaemonTimeout != 0 {
|
||||
options = append(options, "-o", fmt.Sprintf("daemon_timeout=%d", int(mountlib.DaemonTimeout.Seconds())))
|
||||
}
|
||||
for _, option := range mountlib.ExtraOptions {
|
||||
options = append(options, "-o", option)
|
||||
}
|
||||
@@ -213,7 +210,7 @@ func Mount(f fs.Fs, mountpoint string) error {
|
||||
sigHup := make(chan os.Signal, 1)
|
||||
signal.Notify(sigHup, syscall.SIGHUP)
|
||||
|
||||
if err := sdnotify.Ready(); err != nil && err != sdnotify.ErrSdNotifyNoSocket {
|
||||
if err := sdnotify.SdNotifyReady(); err != nil && err != sdnotify.SdNotifyNoSocket {
|
||||
return errors.Wrap(err, "failed to notify systemd")
|
||||
}
|
||||
|
||||
@@ -234,7 +231,7 @@ waitloop:
|
||||
}
|
||||
}
|
||||
|
||||
_ = sdnotify.Stopping()
|
||||
_ = sdnotify.SdNotifyStopping()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to umount FUSE fs")
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@ var (
|
||||
recurse bool
|
||||
showHash bool
|
||||
showEncrypted bool
|
||||
showOrigIDs bool
|
||||
noModTime bool
|
||||
)
|
||||
|
||||
@@ -32,7 +31,6 @@ func init() {
|
||||
commandDefintion.Flags().BoolVarP(&showHash, "hash", "", false, "Include hashes in the output (may take longer).")
|
||||
commandDefintion.Flags().BoolVarP(&noModTime, "no-modtime", "", false, "Don't read the modification time (can speed things up).")
|
||||
commandDefintion.Flags().BoolVarP(&showEncrypted, "encrypted", "M", false, "Show the encrypted names.")
|
||||
commandDefintion.Flags().BoolVarP(&showOrigIDs, "original", "", false, "Show the ID of the underlying Object.")
|
||||
}
|
||||
|
||||
// lsJSON in the struct which gets marshalled for each line
|
||||
@@ -46,7 +44,6 @@ type lsJSON struct {
|
||||
IsDir bool
|
||||
Hashes map[string]string `json:",omitempty"`
|
||||
ID string `json:",omitempty"`
|
||||
OrigID string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Timestamp a time in RFC3339 format with Nanosecond precision secongs
|
||||
@@ -75,7 +72,6 @@ The output is an array of Items, where each Item looks like this
|
||||
"DropboxHash" : "ecb65bb98f9d905b70458986c39fcbad7715e5f2fcc3b1f07767d7c83e2438cc"
|
||||
},
|
||||
"ID": "y2djkhiujf83u33",
|
||||
"OrigID": "UYOJVTUW00Q1RzTDA",
|
||||
"IsDir" : false,
|
||||
"MimeType" : "application/octet-stream",
|
||||
"ModTime" : "2017-05-31T16:15:57.034468261+01:00",
|
||||
@@ -150,23 +146,6 @@ can be processed line by line as each item is written one to a line.
|
||||
if do, ok := entry.(fs.IDer); ok {
|
||||
item.ID = do.ID()
|
||||
}
|
||||
if showOrigIDs {
|
||||
cur := entry
|
||||
for {
|
||||
u, ok := cur.(fs.ObjectUnWrapper)
|
||||
if !ok {
|
||||
break // not a wrapped object, use current id
|
||||
}
|
||||
next := u.UnWrap()
|
||||
if next == nil {
|
||||
break // no base object found, use current id
|
||||
}
|
||||
cur = next
|
||||
}
|
||||
if do, ok := cur.(fs.IDer); ok {
|
||||
item.OrigID = do.ID()
|
||||
}
|
||||
}
|
||||
switch x := entry.(type) {
|
||||
case fs.Directory:
|
||||
item.IsDir = true
|
||||
|
||||
@@ -189,7 +189,7 @@ var _ fusefs.NodeLinker = (*Dir)(nil)
|
||||
|
||||
// Link creates a new directory entry in the receiver based on an
|
||||
// existing Node. Receiver must be a directory.
|
||||
func (d *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fusefs.Node) (newNode fusefs.Node, err error) {
|
||||
defer log.Trace(d, "req=%v, old=%v", req, old)("new=%v, err=%v", &newNode, &err)
|
||||
func (d *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fusefs.Node) (new fusefs.Node, err error) {
|
||||
defer log.Trace(d, "req=%v, old=%v", req, old)("new=%v, err=%v", &new, &err)
|
||||
return nil, fuse.ENOSYS
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
package mount
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
@@ -64,9 +63,6 @@ func mountOptions(device string) (options []fuse.MountOption) {
|
||||
if mountlib.WritebackCache {
|
||||
options = append(options, fuse.WritebackCache())
|
||||
}
|
||||
if mountlib.DaemonTimeout != 0 {
|
||||
options = append(options, fuse.DaemonTimeout(fmt.Sprint(int(mountlib.DaemonTimeout.Seconds()))))
|
||||
}
|
||||
if len(mountlib.ExtraOptions) > 0 {
|
||||
fs.Errorf(nil, "-o/--option not supported with this FUSE backend")
|
||||
}
|
||||
@@ -140,7 +136,7 @@ func Mount(f fs.Fs, mountpoint string) error {
|
||||
signal.Notify(sigHup, syscall.SIGHUP)
|
||||
atexit.IgnoreSignals()
|
||||
|
||||
if err := sdnotify.Ready(); err != nil && err != sdnotify.ErrSdNotifyNoSocket {
|
||||
if err := sdnotify.SdNotifyReady(); err != nil && err != sdnotify.SdNotifyNoSocket {
|
||||
return errors.Wrap(err, "failed to notify systemd")
|
||||
}
|
||||
|
||||
@@ -165,7 +161,7 @@ waitloop:
|
||||
}
|
||||
}
|
||||
|
||||
_ = sdnotify.Stopping()
|
||||
_ = sdnotify.SdNotifyStopping()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to umount FUSE fs")
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"github.com/ncw/rclone/vfs/vfsflags"
|
||||
@@ -32,9 +31,8 @@ var (
|
||||
ExtraFlags []string
|
||||
AttrTimeout = 1 * time.Second // how long the kernel caches attribute for
|
||||
VolumeName string
|
||||
NoAppleDouble = true // use noappledouble by default
|
||||
NoAppleXattr = false // do not use noapplexattr by default
|
||||
DaemonTimeout time.Duration // OSXFUSE only
|
||||
NoAppleDouble = true // use noappledouble by default
|
||||
NoAppleXattr = false // do not use noapplexattr by default
|
||||
)
|
||||
|
||||
// Check is folder is empty
|
||||
@@ -217,11 +215,6 @@ be copied to the vfs cache before opening with --vfs-cache-mode full.
|
||||
` + vfs.Help,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
|
||||
if Daemon {
|
||||
config.PassConfigKeyForDaemonization = true
|
||||
}
|
||||
|
||||
fdst := cmd.NewFsDir(args)
|
||||
|
||||
// Show stats if the user has specifically requested them
|
||||
@@ -281,7 +274,6 @@ be copied to the vfs cache before opening with --vfs-cache-mode full.
|
||||
flags.StringArrayVarP(flagSet, &ExtraFlags, "fuse-flag", "", []string{}, "Flags or arguments to be passed direct to libfuse/WinFsp. Repeat if required.")
|
||||
flags.BoolVarP(flagSet, &Daemon, "daemon", "", Daemon, "Run mount as a daemon (background mode).")
|
||||
flags.StringVarP(flagSet, &VolumeName, "volname", "", VolumeName, "Set the volume name (not supported by all OSes).")
|
||||
flags.DurationVarP(flagSet, &DaemonTimeout, "daemon-timeout", "", DaemonTimeout, "Time limit for rclone to respond to kernel (not supported by all OSes).")
|
||||
|
||||
if runtime.GOOS == "darwin" {
|
||||
flags.BoolVarP(flagSet, &NoAppleDouble, "noappledouble", "", NoAppleDouble, "Sets the OSXFUSE option noappledouble.")
|
||||
|
||||
@@ -143,7 +143,7 @@ func TestDirModTime(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
run.mkdir(t, "dir")
|
||||
mtime := time.Date(2012, time.November, 18, 17, 32, 31, 0, time.UTC)
|
||||
mtime := time.Date(2012, 11, 18, 17, 32, 31, 0, time.UTC)
|
||||
err := os.Chtimes(run.path("dir"), mtime, mtime)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ func TestFileModTime(t *testing.T) {
|
||||
|
||||
run.createFile(t, "file", "123")
|
||||
|
||||
mtime := time.Date(2012, time.November, 18, 17, 32, 31, 0, time.UTC)
|
||||
mtime := time.Date(2012, 11, 18, 17, 32, 31, 0, time.UTC)
|
||||
err := os.Chtimes(run.path("file"), mtime, mtime)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -41,7 +41,7 @@ func TestFileModTimeWithOpenWriters(t *testing.T) {
|
||||
t.Skip("Skipping test on Windows")
|
||||
}
|
||||
|
||||
mtime := time.Date(2012, time.November, 18, 17, 32, 31, 0, time.UTC)
|
||||
mtime := time.Date(2012, 11, 18, 17, 32, 31, 0, time.UTC)
|
||||
filepath := run.path("cp-archive-test")
|
||||
|
||||
f, err := osCreate(filepath)
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
package reveal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
Use: "reveal password",
|
||||
Short: `Reveal obscured password from rclone.conf`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
revealed, err := obscure.Reveal(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(revealed)
|
||||
return nil
|
||||
})
|
||||
},
|
||||
Hidden: true,
|
||||
}
|
||||
@@ -26,7 +26,6 @@ Rclone is a command line program to sync files and directories to and from:
|
||||
* {{< provider name="Google Drive" home="https://www.google.com/drive/" config="/drive/" >}}
|
||||
* {{< provider name="HTTP" home="https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol" config="/http/" >}}
|
||||
* {{< provider name="Hubic" home="https://hubic.com/" config="/hubic/" >}}
|
||||
* {{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}}
|
||||
* {{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
|
||||
* {{< provider name="Memset Memstore" home="https://www.memset.com/cloud/storage/" config="/swift/" >}}
|
||||
* {{< provider name="Mega" home="https://mega.nz/" config="/mega/" >}}
|
||||
|
||||
@@ -172,14 +172,3 @@ Contributors
|
||||
* Onno Zweers <onno.zweers@surfsara.nl>
|
||||
* Jasper Lievisse Adriaanse <jasper@humppa.nl>
|
||||
* sandeepkru <sandeep.ummadi@gmail.com>
|
||||
* HerrH <atomtigerzoo@users.noreply.github.com>
|
||||
* Andrew <4030760+sparkyman215@users.noreply.github.com>
|
||||
* dan smith <XX1011@gmail.com>
|
||||
* Oleg Kovalov <iamolegkovalov@gmail.com>
|
||||
* Ruben Vandamme <github-com-00ff86@vandamme.email>
|
||||
* Cnly <minecnly@gmail.com>
|
||||
* Andres Alvarez <1671935+kir4h@users.noreply.github.com>
|
||||
* reddi1 <xreddi@gmail.com>
|
||||
* Matt Tucker <matthewtckr@gmail.com>
|
||||
* Sebastian Bünger <buengese@gmail.com>
|
||||
* Martin Polden <mpolden@mpolden.no>
|
||||
|
||||
@@ -184,17 +184,6 @@ Upload chunk size. Default 4MB. Note that this is stored in memory
|
||||
and there may be up to `--transfers` chunks stored at once in memory.
|
||||
This can be at most 100MB.
|
||||
|
||||
#### --azureblob-access-tier=Hot/Cool/Archive ####
|
||||
|
||||
Azure storage supports blob tiering, you can configure tier in advanced
|
||||
settings or supply flag while performing data transfer operations.
|
||||
If there is no `access tier` specified, rclone doesn't apply any tier.
|
||||
rclone performs `Set Tier` operation on blobs while uploading, if objects
|
||||
are not modified, specifying `access tier` to new one will have no effect.
|
||||
If blobs are in `archive tier` at remote, trying to perform data transfer
|
||||
operations from remote will not be allowed. User should first restore by
|
||||
tiering blob to `Hot` or `Cool`.
|
||||
|
||||
### Limitations ###
|
||||
|
||||
MD5 sums are only uploaded with chunked files if the source has an MD5
|
||||
|
||||
@@ -55,7 +55,7 @@ Choose a number from below, or type in your own value
|
||||
13 / Yandex Disk
|
||||
\ "yandex"
|
||||
Storage> 3
|
||||
Account ID or Application Key ID
|
||||
Account ID
|
||||
account> 123456789abc
|
||||
Application Key
|
||||
key> 0123456789abcdef0123456789abcdef0123456789
|
||||
@@ -80,7 +80,7 @@ See all buckets
|
||||
|
||||
rclone lsd remote:
|
||||
|
||||
Create a new bucket
|
||||
Make a new bucket
|
||||
|
||||
rclone mkdir remote:bucket
|
||||
|
||||
@@ -93,21 +93,6 @@ excess files in the bucket.
|
||||
|
||||
rclone sync /home/local/directory remote:bucket
|
||||
|
||||
### Application Keys ###
|
||||
|
||||
B2 supports multiple [Application Keys for different access permission
|
||||
to B2 Buckets](https://www.backblaze.com/b2/docs/application_keys.html).
|
||||
|
||||
You can use these with rclone too.
|
||||
|
||||
Follow Backblaze's docs to create an Application Key with the required
|
||||
permission and add the `Application Key ID` as the `account` and the
|
||||
`Application Key` itself as the `key`.
|
||||
|
||||
Note that you must put the Application Key ID as the `account` - you
|
||||
can't use the master Account ID. If you try then B2 will return 401
|
||||
errors.
|
||||
|
||||
### --fast-list ###
|
||||
|
||||
This remote supports `--fast-list` which allows you to use fewer
|
||||
|
||||
@@ -202,7 +202,7 @@ Box allows modification times to be set on objects accurate to 1
|
||||
second. These will be used to detect whether objects need syncing or
|
||||
not.
|
||||
|
||||
Box supports SHA1 type hashes, so you can use the `--checksum`
|
||||
One drive supports SHA1 type hashes, so you can use the `--checksum`
|
||||
flag.
|
||||
|
||||
### Transfers ###
|
||||
@@ -227,10 +227,6 @@ system.
|
||||
Cutoff for switching to chunked upload - must be >= 50MB. The default
|
||||
is 50MB.
|
||||
|
||||
#### --box-commit-retries int ####
|
||||
|
||||
Max number of times to try committing a multipart file. (default 100)
|
||||
|
||||
### Limitations ###
|
||||
|
||||
Note that Box is case insensitive so you can't have a file called
|
||||
|
||||
@@ -259,14 +259,6 @@ Params:
|
||||
Here are the command line options specific to this cloud storage
|
||||
system.
|
||||
|
||||
#### --cache-db-path=PATH ####
|
||||
|
||||
Path to where the file structure metadata (DB) is stored locally. The remote
|
||||
name is used as the DB file name.
|
||||
|
||||
**Default**: <rclone default cache path>/cache-backend/<remote name>
|
||||
**Example**: /.cache/cache-backend/test-cache
|
||||
|
||||
#### --cache-chunk-path=PATH ####
|
||||
|
||||
Path to where partial file data (chunks) is stored locally. The remote
|
||||
@@ -279,6 +271,14 @@ then `--cache-chunk-path` will use the same path as `--cache-db-path`.
|
||||
**Default**: <rclone default cache path>/cache-backend/<remote name>
|
||||
**Example**: /.cache/cache-backend/test-cache
|
||||
|
||||
#### --cache-db-path=PATH ####
|
||||
|
||||
Path to where the file structure metadata (DB) is stored locally. The remote
|
||||
name is used as the DB file name.
|
||||
|
||||
**Default**: <rclone default cache path>/cache-backend/<remote name>
|
||||
**Example**: /.cache/cache-backend/test-cache
|
||||
|
||||
#### --cache-db-purge ####
|
||||
|
||||
Flag to clear all the cached data for this remote before.
|
||||
|
||||
@@ -33,7 +33,6 @@ See the following for detailed instructions for
|
||||
* [Google Drive](/drive/)
|
||||
* [HTTP](/http/)
|
||||
* [Hubic](/hubic/)
|
||||
* [Jottacloud](/jottacloud/)
|
||||
* [Mega](/mega/)
|
||||
* [Microsoft Azure Blob Storage](/azureblob/)
|
||||
* [Microsoft OneDrive](/onedrive/)
|
||||
@@ -341,10 +340,6 @@ change the bwlimit dynamically:
|
||||
Use this sized buffer to speed up file transfers. Each `--transfer`
|
||||
will use this much memory for buffering.
|
||||
|
||||
When using `mount` or `cmount` each open file descriptor will use this much
|
||||
memory for buffering.
|
||||
See the [mount](/commands/rclone_mount/#file-buffering) documentation for more details.
|
||||
|
||||
Set to 0 to disable the buffering for the minimum memory usage.
|
||||
|
||||
### --checkers=N ###
|
||||
@@ -539,22 +534,6 @@ to reduce the value so rclone moves on to a high level retry (see the
|
||||
|
||||
Disable low level retries with `--low-level-retries 1`.
|
||||
|
||||
### --max-backlog=N ###
|
||||
|
||||
This is the maximum allowable backlog of files in a sync/copy/move
|
||||
queued for being checked or transferred.
|
||||
|
||||
This can be set arbitrarily large. It will only use memory when the
|
||||
queue is in use. Note that it will use in the order of N kB of memory
|
||||
when the backlog is in use.
|
||||
|
||||
Setting this large allows rclone to calculate how many files are
|
||||
pending more accurately and give a more accurate estimated finish
|
||||
time.
|
||||
|
||||
Setting this small will make rclone more synchronous to the listings
|
||||
of the remote which may be desirable.
|
||||
|
||||
### --max-delete=N ###
|
||||
|
||||
This tells rclone not to delete more than N files. If that limit is
|
||||
@@ -685,11 +664,6 @@ default level of logging which is `NOTICE` the stats won't show - if
|
||||
you want them to then use `--stats-log-level NOTICE`. See the [Logging
|
||||
section](#logging) for more info on log levels.
|
||||
|
||||
### --stats-one-line ###
|
||||
|
||||
When this is specified, rclone condenses the stats into a single line
|
||||
showing the most important stats only.
|
||||
|
||||
### --stats-unit=bits|bytes ###
|
||||
|
||||
By default, data transfer rates will be printed in bytes/second.
|
||||
@@ -765,7 +739,7 @@ old file on the remote and upload a new copy.
|
||||
|
||||
If you use this flag, and the remote supports server side copy or
|
||||
server side move, and the source and destination have a compatible
|
||||
hash, then this will track renames during `sync`
|
||||
hash, then this will track renames during `sync`, `copy`, and `move`
|
||||
operations and perform renaming server-side.
|
||||
|
||||
Files will be matched by size and hash - if both match then a rename
|
||||
|
||||
@@ -312,45 +312,6 @@ d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
### --fast-list ###
|
||||
|
||||
This remote supports `--fast-list` which allows you to use fewer
|
||||
transactions in exchange for more memory. See the [rclone
|
||||
docs](/docs/#fast-list) for more details.
|
||||
|
||||
It does this by combining multiple `list` calls into a single API request.
|
||||
|
||||
This works by combining many `'%s' in parents` filters into one expression.
|
||||
To list the contents of directories a, b and c, the the following requests will be send by the regular `List` function:
|
||||
```
|
||||
trashed=false and 'a' in parents
|
||||
trashed=false and 'b' in parents
|
||||
trashed=false and 'c' in parents
|
||||
```
|
||||
These can now be combined into a single request:
|
||||
```
|
||||
trashed=false and ('a' in parents or 'b' in parents or 'c' in parents)
|
||||
```
|
||||
|
||||
The implementation of `ListR` will put up to 50 `parents` filters into one request.
|
||||
It will use the `--checkers` value to specify the number of requests to run in parallel.
|
||||
|
||||
In tests, these batch requests were up to 20x faster than the regular method.
|
||||
Running the following command against different sized folders gives:
|
||||
```
|
||||
rclone lsjson -vv -R --checkers=6 gdrive:folder
|
||||
```
|
||||
|
||||
small folder (220 directories, 700 files):
|
||||
|
||||
- without `--fast-list`: 38s
|
||||
- with `--fast-list`: 10s
|
||||
|
||||
large folder (10600 directories, 39000 files):
|
||||
|
||||
- without `--fast-list`: 22:05 min
|
||||
- with `--fast-list`: 58s
|
||||
|
||||
### Modified time ###
|
||||
|
||||
Google drive stores modification times accurate to 1 ms.
|
||||
|
||||
@@ -1,129 +0,0 @@
|
||||
---
|
||||
title: "Jottacloud"
|
||||
description: "Rclone docs for Jottacloud"
|
||||
date: "2018-08-07"
|
||||
---
|
||||
|
||||
<i class="fa fa-archive"></i> Jottacloud
|
||||
-----------------------------------------
|
||||
|
||||
Paths are specified as `remote:path`
|
||||
|
||||
Paths may be as deep as required, eg `remote:directory/subdirectory`.
|
||||
|
||||
To configure Jottacloud you will need to enter your username and password and select a mountpoint.
|
||||
|
||||
Here is an example of how to make a remote called `remote`. First run:
|
||||
|
||||
rclone config
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
No remotes found - make a new one
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
name> remote
|
||||
Type of storage to configure.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
[snip]
|
||||
13 / JottaCloud
|
||||
\ "jottacloud"
|
||||
[snip]
|
||||
Storage> jottacloud
|
||||
User Name
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
user> user
|
||||
Password.
|
||||
y) Yes type in my own password
|
||||
g) Generate random password
|
||||
n) No leave this optional password blank
|
||||
y/g/n> y
|
||||
Enter the password:
|
||||
password:
|
||||
Confirm the password:
|
||||
password:
|
||||
The mountpoint to use.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Will be synced by the official client.
|
||||
\ "Sync"
|
||||
2 / Archive
|
||||
\ "Archive"
|
||||
mountpoint> Archive
|
||||
Remote config
|
||||
--------------------
|
||||
[remote]
|
||||
type = jottacloud
|
||||
user = user
|
||||
pass = *** ENCRYPTED ***
|
||||
mountpoint = Archive
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
Once configured you can then use `rclone` like this,
|
||||
|
||||
List directories in top level of your Jottacloud
|
||||
|
||||
rclone lsd remote:
|
||||
|
||||
List all the files in your Jottacloud
|
||||
|
||||
rclone ls remote:
|
||||
|
||||
To copy a local directory to an Jottacloud directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
|
||||
### Modified time and hashes ###
|
||||
|
||||
Jottacloud allows modification times to be set on objects accurate to 1
|
||||
second. These will be used to detect whether objects need syncing or
|
||||
not.
|
||||
|
||||
Jottacloud supports MD5 type hashes, so you can use the `--checksum`
|
||||
flag.
|
||||
|
||||
Note that Jottacloud requires the MD5 hash before upload so if the
|
||||
source does not have an MD5 checksum then the file will be cached
|
||||
temporarily on disk (wherever the `TMPDIR` environment variable points
|
||||
to) before it is uploaded. Small files will be cached in memory - see
|
||||
the `--jottacloud-md5-memory-limit` flag.
|
||||
|
||||
### Deleting files ###
|
||||
|
||||
Any files you delete with rclone will end up in the trash. Due to a lack of API documentation emptying the trash is currently only possible via the Jottacloud website.
|
||||
|
||||
### Versions ###
|
||||
|
||||
Jottacloud supports file versioning. When rclone uploads a new version of a file it creates a new version of it. Currently rclone only supports retrieving the current version but older versions can be accessed via the Jottacloud Website.
|
||||
|
||||
### Limitations ###
|
||||
|
||||
Note that Jottacloud is case insensitive so you can't have a file called
|
||||
"Hello.doc" and one called "hello.doc".
|
||||
|
||||
There are quite a few characters that can't be in Jottacloud file names. Rclone will map these names to and from an identical looking unicode equivalent. For example if a file has a ? in it will be mapped to ? instead.
|
||||
|
||||
Jottacloud only supports filenames up to 255 characters in length.
|
||||
|
||||
### Specific options ###
|
||||
|
||||
Here are the command line options specific to this cloud storage
|
||||
system.
|
||||
|
||||
#### --jottacloud-md5-memory-limit SizeSuffix
|
||||
|
||||
Files bigger than this will be cached on disk to calculate the MD5 if
|
||||
required. (default 10M)
|
||||
|
||||
### Troubleshooting ###
|
||||
|
||||
Jottacloud exhibits some inconsistent behaviours regarding deleted files and folders which may cause Copy, Move and DirMove operations to previously deleted paths to fail. Emptying the trash should help in such cases.
|
||||
@@ -96,23 +96,6 @@ messages in the log about duplicates.
|
||||
|
||||
Use `rclone dedupe` to fix duplicated files.
|
||||
|
||||
### Specific options ###
|
||||
|
||||
Here are the command line options specific to this cloud storage
|
||||
system.
|
||||
|
||||
#### --mega-debug ####
|
||||
|
||||
If this flag is set (along with `-vv`) it will print further debugging
|
||||
information from the mega backend.
|
||||
|
||||
#### --mega-hard-delete ####
|
||||
|
||||
Normally the mega backend will put all deletions into the trash rather
|
||||
than permanently deleting them. If you specify this flag (or set it
|
||||
in the advanced config) then rclone will permanently delete objects
|
||||
instead.
|
||||
|
||||
### Limitations ###
|
||||
|
||||
This backend uses the [go-mega go
|
||||
|
||||
@@ -27,7 +27,6 @@ Here is an overview of the major features of each cloud storage system.
|
||||
| Google Drive | MD5 | Yes | No | Yes | R/W |
|
||||
| HTTP | - | No | No | No | R |
|
||||
| Hubic | MD5 | Yes | No | No | R/W |
|
||||
| Jottacloud | MD5 | Yes | Yes | No | R/W |
|
||||
| Mega | - | No | No | Yes | - |
|
||||
| Microsoft Azure Blob Storage | MD5 | Yes | No | No | R/W |
|
||||
| Microsoft OneDrive | SHA1 ‡‡ | Yes | Yes | No | R |
|
||||
@@ -135,13 +134,12 @@ operations more efficient.
|
||||
| Dropbox | Yes | Yes | Yes | Yes | No [#575](https://github.com/ncw/rclone/issues/575) | No | Yes | Yes | Yes |
|
||||
| FTP | No | No | Yes | Yes | No | No | Yes | No [#2178](https://github.com/ncw/rclone/issues/2178) | No |
|
||||
| Google Cloud Storage | Yes | Yes | No | No | No | Yes | Yes | No [#2178](https://github.com/ncw/rclone/issues/2178) | No |
|
||||
| Google Drive | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes |
|
||||
| Google Drive | Yes | Yes | Yes | Yes | Yes | No | Yes | Yes | Yes |
|
||||
| HTTP | No | No | No | No | No | No | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | No |
|
||||
| Hubic | Yes † | Yes | No | No | No | Yes | Yes | No [#2178](https://github.com/ncw/rclone/issues/2178) | Yes |
|
||||
| Jottacloud | Yes | Yes | Yes | Yes | No | No | No | No | No |
|
||||
| Mega | Yes | No | Yes | Yes | No | No | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | Yes |
|
||||
| Microsoft Azure Blob Storage | Yes | Yes | No | No | No | Yes | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | No |
|
||||
| Microsoft OneDrive | Yes | Yes | Yes | Yes | No [#575](https://github.com/ncw/rclone/issues/575) | No | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | Yes |
|
||||
| Microsoft OneDrive | Yes | Yes | Yes | No [#197](https://github.com/ncw/rclone/issues/197) | No [#575](https://github.com/ncw/rclone/issues/575) | No | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | Yes |
|
||||
| OpenDrive | Yes | Yes | Yes | Yes | No | No | No | No | No |
|
||||
| Openstack Swift | Yes † | Yes | No | No | No | Yes | Yes | No [#2178](https://github.com/ncw/rclone/issues/2178) | Yes |
|
||||
| pCloud | Yes | Yes | Yes | Yes | Yes | No | No | No [#2178](https://github.com/ncw/rclone/issues/2178) | Yes |
|
||||
|
||||
@@ -120,43 +120,6 @@ The most interesting values for most people are:
|
||||
This returns PID of current process.
|
||||
Useful for stopping rclone process.
|
||||
|
||||
### core/stats: Returns stats about current transfers.
|
||||
|
||||
This returns all available stats
|
||||
|
||||
rclone rc core/stats
|
||||
|
||||
Returns the following values:
|
||||
|
||||
```
|
||||
{
|
||||
"speed": average speed in bytes/sec since start of the process,
|
||||
"bytes": total transferred bytes since the start of the process,
|
||||
"errors": number of errors,
|
||||
"checks": number of checked files,
|
||||
"transfers": number of transferred files,
|
||||
"deletes" : number of deleted files,
|
||||
"elapsedTime": time in seconds since the start of the process,
|
||||
"lastError": last occurred error,
|
||||
"transferring": an array of currently active file transfers:
|
||||
[
|
||||
{
|
||||
"bytes": total transferred bytes for this file,
|
||||
"eta": estimated time in seconds until file transfer completion
|
||||
"name": name of the file,
|
||||
"percentage": progress of the file transfer in percent,
|
||||
"speed": speed in bytes/sec,
|
||||
"speedAvg": speed in bytes/sec as an exponentially weighted moving average,
|
||||
"size": size of the file in bytes
|
||||
}
|
||||
],
|
||||
"checking": an array of names of currently active file checks
|
||||
[]
|
||||
}
|
||||
```
|
||||
Values for "transferring", "checking" and "lastError" are only assigned if data is available.
|
||||
The value for "eta" is null if an eta cannot be determined.
|
||||
|
||||
### rc/error: This returns an error
|
||||
|
||||
This returns an error with the input as part of its error string.
|
||||
@@ -189,23 +152,6 @@ starting with dir will forget that dir, eg
|
||||
|
||||
rclone rc vfs/forget file=hello file2=goodbye dir=home/junk
|
||||
|
||||
### vfs/refresh: Refresh the directory cache.
|
||||
|
||||
This reads the directories for the specified paths and freshens the
|
||||
directory cache.
|
||||
|
||||
If no paths are passed in then it will refresh the root directory.
|
||||
|
||||
rclone rc vfs/refresh
|
||||
|
||||
Otherwise pass directories in as dir=path. Any parameter key
|
||||
starting with dir will refresh that directory, eg
|
||||
|
||||
rclone rc vfs/refresh dir=home/junk dir2=data/misc
|
||||
|
||||
If the parameter recursive=true is given the whole directory tree
|
||||
will get refreshed. This refresh will use --fast-list if enabled.
|
||||
|
||||
<!--- autogenerated stop -->
|
||||
|
||||
## Accessing the remote control via HTTP
|
||||
|
||||
@@ -402,16 +402,6 @@ Note that 2 chunks of this size are buffered in memory per transfer.
|
||||
If you are transferring large files over high speed links and you have
|
||||
enough memory, then increasing this will speed up the transfers.
|
||||
|
||||
#### --s3-force-path-style=BOOL ####
|
||||
|
||||
If this is true (the default) then rclone will use path style access,
|
||||
if false then rclone will use virtual path style. See [the AWS S3
|
||||
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
|
||||
for more info.
|
||||
|
||||
Some providers (eg Aliyun OSS or Netease COS) require this set to
|
||||
`false`. It can also be set in the config in the advanced section.
|
||||
|
||||
#### --s3-upload-concurrency ####
|
||||
|
||||
Number of chunks of the same file that are uploaded concurrently.
|
||||
@@ -921,107 +911,3 @@ acl =
|
||||
server_side_encryption =
|
||||
storage_class =
|
||||
```
|
||||
|
||||
### Aliyun OSS / Netease NOS ###
|
||||
|
||||
This describes how to set up Aliyun OSS - Netease NOS is the same
|
||||
except for different endpoints.
|
||||
|
||||
Note this is a pretty standard S3 setup, except for the setting of
|
||||
`force_path_style = false` in the advanced config.
|
||||
|
||||
```
|
||||
# rclone config
|
||||
e/n/d/r/c/s/q> n
|
||||
name> oss
|
||||
Type of storage to configure.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
3 / Amazon S3 Compliant Storage Providers (AWS, Ceph, Dreamhost, IBM COS, Minio)
|
||||
\ "s3"
|
||||
Storage> s3
|
||||
Choose your S3 provider.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
8 / Any other S3 compatible provider
|
||||
\ "Other"
|
||||
provider> other
|
||||
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
Only applies if access_key_id and secret_access_key is blank.
|
||||
Enter a boolean value (true or false). Press Enter for the default ("false").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Enter AWS credentials in the next step
|
||||
\ "false"
|
||||
2 / Get AWS credentials from the environment (env vars or IAM)
|
||||
\ "true"
|
||||
env_auth> 1
|
||||
AWS Access Key ID.
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
access_key_id> xxxxxxxxxxxx
|
||||
AWS Secret Access Key (password)
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
secret_access_key> xxxxxxxxxxxxxxxxx
|
||||
Region to connect to.
|
||||
Leave blank if you are using an S3 clone and you don't have a region.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Use this if unsure. Will use v4 signatures and an empty region.
|
||||
\ ""
|
||||
2 / Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.
|
||||
\ "other-v2-signature"
|
||||
region> 1
|
||||
Endpoint for S3 API.
|
||||
Required when using an S3 clone.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
endpoint> oss-cn-shenzhen.aliyuncs.com
|
||||
Location constraint - must be set to match the Region.
|
||||
Leave blank if not sure. Used when creating buckets only.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
location_constraint>
|
||||
Canned ACL used when creating buckets and/or storing objects in S3.
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Owner gets FULL_CONTROL. No one else has access rights (default).
|
||||
\ "private"
|
||||
acl> 1
|
||||
Edit advanced config? (y/n)
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> y
|
||||
Chunk size to use for uploading
|
||||
Enter a size with suffix k,M,G,T. Press Enter for the default ("5M").
|
||||
chunk_size>
|
||||
Don't store MD5 checksum with object metadata
|
||||
Enter a boolean value (true or false). Press Enter for the default ("false").
|
||||
disable_checksum>
|
||||
An AWS session token
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
session_token>
|
||||
Concurrency for multipart uploads.
|
||||
Enter a signed integer. Press Enter for the default ("2").
|
||||
upload_concurrency>
|
||||
If true use path style access if false use virtual hosted style.
|
||||
Some providers (eg Aliyun OSS or Netease COS) require this.
|
||||
Enter a boolean value (true or false). Press Enter for the default ("true").
|
||||
force_path_style> false
|
||||
Remote config
|
||||
--------------------
|
||||
[oss]
|
||||
type = s3
|
||||
provider = Other
|
||||
env_auth = false
|
||||
access_key_id = xxxxxxxxx
|
||||
secret_access_key = xxxxxxxxxxxxx
|
||||
endpoint = oss-cn-shenzhen.aliyuncs.com
|
||||
acl = private
|
||||
force_path_style = false
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
@@ -266,11 +266,6 @@ files whose local modtime is newer than the time it was last uploaded.
|
||||
Here are the command line options specific to this cloud storage
|
||||
system.
|
||||
|
||||
#### --swift-storage-policy=STRING ####
|
||||
Apply the specified storage policy when creating a new container. The policy
|
||||
cannot be changed afterwards. The allowed configuration values and their
|
||||
meaning depend on your Swift storage provider.
|
||||
|
||||
#### --swift-chunk-size=SIZE ####
|
||||
|
||||
Above this size files will be chunked into a _segments container. The
|
||||
|
||||
@@ -66,7 +66,6 @@
|
||||
<li><a href="/drive/"><i class="fa fa-google"></i> Google Drive</a></li>
|
||||
<li><a href="/http/"><i class="fa fa-globe"></i> HTTP</a></li>
|
||||
<li><a href="/hubic/"><i class="fa fa-space-shuttle"></i> Hubic</a></li>
|
||||
<li><a href="/jottacloud/"><i class="fa fa-cloud"></i> Jottacloud</a></li>
|
||||
<li><a href="/mega/"><i class="fa fa-archive"></i> Mega</a></li>
|
||||
<li><a href="/azureblob/"><i class="fa fa-windows"></i> Microsoft Azure Blob Storage</a></li>
|
||||
<li><a href="/onedrive/"><i class="fa fa-windows"></i> Microsoft OneDrive</a></li>
|
||||
|
||||
@@ -96,16 +96,6 @@ func (acc *Account) GetReader() io.ReadCloser {
|
||||
return acc.origIn
|
||||
}
|
||||
|
||||
// GetAsyncReader returns the current AsyncReader or nil if Account is unbuffered
|
||||
func (acc *Account) GetAsyncReader() *asyncreader.AsyncReader {
|
||||
acc.mu.Lock()
|
||||
defer acc.mu.Unlock()
|
||||
if asyncIn, ok := acc.in.(*asyncreader.AsyncReader); ok {
|
||||
return asyncIn
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StopBuffering stops the async buffer doing any more buffering
|
||||
func (acc *Account) StopBuffering() {
|
||||
if asyncIn, ok := acc.in.(*asyncreader.AsyncReader); ok {
|
||||
@@ -290,36 +280,6 @@ func (acc *Account) String() string {
|
||||
)
|
||||
}
|
||||
|
||||
// RemoteStats produces stats for this file
|
||||
func (acc *Account) RemoteStats() (out map[string]interface{}) {
|
||||
out = make(map[string]interface{})
|
||||
a, b := acc.progress()
|
||||
out["bytes"] = a
|
||||
out["size"] = b
|
||||
spd, cur := acc.speed()
|
||||
out["speed"] = spd
|
||||
out["speedAvg"] = cur
|
||||
|
||||
eta, etaok := acc.eta()
|
||||
out["eta"] = nil
|
||||
if etaok {
|
||||
if eta > 0 {
|
||||
out["eta"] = eta.Seconds()
|
||||
} else {
|
||||
out["eta"] = 0
|
||||
}
|
||||
}
|
||||
out["name"] = acc.name
|
||||
|
||||
percentageDone := 0
|
||||
if b > 0 {
|
||||
percentageDone = int(100 * float64(a) / float64(b))
|
||||
}
|
||||
out["percentage"] = percentageDone
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// OldStream returns the top io.Reader
|
||||
func (acc *Account) OldStream() io.Reader {
|
||||
acc.mu.Lock()
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/rc"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -19,69 +18,21 @@ var (
|
||||
func init() {
|
||||
// Set the function pointer up in fs
|
||||
fs.CountError = Stats.Error
|
||||
|
||||
rc.Add(rc.Call{
|
||||
Path: "core/stats",
|
||||
Fn: Stats.RemoteStats,
|
||||
Title: "Returns stats about current transfers.",
|
||||
Help: `
|
||||
This returns all available stats
|
||||
|
||||
rclone rc core/stats
|
||||
|
||||
Returns the following values:
|
||||
|
||||
` + "```" + `
|
||||
{
|
||||
"speed": average speed in bytes/sec since start of the process,
|
||||
"bytes": total transferred bytes since the start of the process,
|
||||
"errors": number of errors,
|
||||
"checks": number of checked files,
|
||||
"transfers": number of transferred files,
|
||||
"deletes" : number of deleted files,
|
||||
"elapsedTime": time in seconds since the start of the process,
|
||||
"lastError": last occurred error,
|
||||
"transferring": an array of currently active file transfers:
|
||||
[
|
||||
{
|
||||
"bytes": total transferred bytes for this file,
|
||||
"eta": estimated time in seconds until file transfer completion
|
||||
"name": name of the file,
|
||||
"percentage": progress of the file transfer in percent,
|
||||
"speed": speed in bytes/sec,
|
||||
"speedAvg": speed in bytes/sec as an exponentially weighted moving average,
|
||||
"size": size of the file in bytes
|
||||
}
|
||||
],
|
||||
"checking": an array of names of currently active file checks
|
||||
[]
|
||||
}
|
||||
` + "```" + `
|
||||
Values for "transferring", "checking" and "lastError" are only assigned if data is available.
|
||||
The value for "eta" is null if an eta cannot be determined.
|
||||
`,
|
||||
})
|
||||
}
|
||||
|
||||
// StatsInfo accounts all transfers
|
||||
type StatsInfo struct {
|
||||
mu sync.RWMutex
|
||||
bytes int64
|
||||
errors int64
|
||||
lastError error
|
||||
checks int64
|
||||
checking *stringSet
|
||||
checkQueue int
|
||||
checkQueueSize int64
|
||||
transfers int64
|
||||
transferring *stringSet
|
||||
transferQueue int
|
||||
transferQueueSize int64
|
||||
renameQueue int
|
||||
renameQueueSize int64
|
||||
deletes int64
|
||||
start time.Time
|
||||
inProgress *inProgress
|
||||
mu sync.RWMutex
|
||||
bytes int64
|
||||
errors int64
|
||||
lastError error
|
||||
checks int64
|
||||
checking *stringSet
|
||||
transfers int64
|
||||
transferring *stringSet
|
||||
deletes int64
|
||||
start time.Time
|
||||
inProgress *inProgress
|
||||
}
|
||||
|
||||
// NewStats cretates an initialised StatsInfo
|
||||
@@ -94,52 +45,6 @@ func NewStats() *StatsInfo {
|
||||
}
|
||||
}
|
||||
|
||||
// RemoteStats returns stats for rc
|
||||
func (s *StatsInfo) RemoteStats(in rc.Params) (out rc.Params, err error) {
|
||||
out = make(rc.Params)
|
||||
s.mu.RLock()
|
||||
dt := time.Now().Sub(s.start)
|
||||
dtSeconds := dt.Seconds()
|
||||
speed := 0.0
|
||||
if dt > 0 {
|
||||
speed = float64(s.bytes) / dtSeconds
|
||||
}
|
||||
out["speed"] = speed
|
||||
out["bytes"] = s.bytes
|
||||
out["errors"] = s.errors
|
||||
out["checks"] = s.checks
|
||||
out["transfers"] = s.transfers
|
||||
out["deletes"] = s.deletes
|
||||
out["elapsedTime"] = dtSeconds
|
||||
s.mu.RUnlock()
|
||||
if !s.checking.empty() {
|
||||
var c []string
|
||||
s.checking.mu.RLock()
|
||||
defer s.checking.mu.RUnlock()
|
||||
for name := range s.checking.items {
|
||||
c = append(c, name)
|
||||
}
|
||||
out["checking"] = c
|
||||
}
|
||||
if !s.transferring.empty() {
|
||||
var t []interface{}
|
||||
s.transferring.mu.RLock()
|
||||
defer s.transferring.mu.RUnlock()
|
||||
for name := range s.transferring.items {
|
||||
if acc := s.inProgress.get(name); acc != nil {
|
||||
t = append(t, acc.RemoteStats())
|
||||
} else {
|
||||
t = append(t, name)
|
||||
}
|
||||
}
|
||||
out["transferring"] = t
|
||||
}
|
||||
if s.errors > 0 {
|
||||
out["lastError"] = s.lastError
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// String convert the StatsInfo to a string for printing
|
||||
func (s *StatsInfo) String() string {
|
||||
s.mu.RLock()
|
||||
@@ -157,64 +62,28 @@ func (s *StatsInfo) String() string {
|
||||
speed = speed * 8
|
||||
}
|
||||
|
||||
percent := func(a int64, b int64) int {
|
||||
if b <= 0 {
|
||||
return 0
|
||||
}
|
||||
return int(float64(a)*100/float64(b) + 0.5)
|
||||
}
|
||||
|
||||
totalChecks, totalTransfer, totalSize := int64(s.checkQueue)+s.checks, int64(s.transferQueue)+s.transfers, s.transferQueueSize+s.bytes
|
||||
eta := time.Duration(0)
|
||||
if speed > 0 {
|
||||
eta = time.Second * time.Duration(float64(s.transferQueueSize)/float64(speed)+0.5)
|
||||
}
|
||||
etaString := "-"
|
||||
if eta > 0 {
|
||||
etaString = eta.String()
|
||||
}
|
||||
if !fs.Config.StatsOneLine {
|
||||
_, _ = fmt.Fprintf(buf, "\nTransferred: ")
|
||||
}
|
||||
xfrchkString := ""
|
||||
if fs.Config.StatsOneLine {
|
||||
xfrchk := []string{}
|
||||
if totalTransfer > 0 && s.transferQueue > 0 {
|
||||
xfrchk = append(xfrchk, fmt.Sprintf("xfr#%d/%d", s.transfers, totalTransfer))
|
||||
}
|
||||
if totalChecks > 0 && s.checkQueue > 0 {
|
||||
xfrchk = append(xfrchk, fmt.Sprintf("chk#%d/%d", s.checks, totalChecks))
|
||||
}
|
||||
if len(xfrchk) > 0 {
|
||||
xfrchkString = fmt.Sprintf(" (%s)", strings.Join(xfrchk, ", "))
|
||||
}
|
||||
}
|
||||
_, _ = fmt.Fprintf(buf, "%10s / %s, %d%%, %s, ETA %s%s",
|
||||
fs.SizeSuffix(s.bytes), fs.SizeSuffix(totalSize).Unit("Bytes"), percent(s.bytes, totalSize), fs.SizeSuffix(speed).Unit(strings.Title(fs.Config.DataRateUnit)+"/s"), etaString, xfrchkString)
|
||||
if !fs.Config.StatsOneLine {
|
||||
_, _ = fmt.Fprintf(buf, `
|
||||
_, _ = fmt.Fprintf(buf, `
|
||||
Transferred: %10s (%s)
|
||||
Errors: %10d
|
||||
Checks: %10d / %d, %d%%
|
||||
Transferred: %10d / %d, %d%%
|
||||
Checks: %10d
|
||||
Transferred: %10d
|
||||
Elapsed time: %10v
|
||||
`,
|
||||
s.errors,
|
||||
s.checks, totalChecks, percent(s.checks, totalChecks),
|
||||
s.transfers, totalTransfer, percent(s.transfers, totalTransfer),
|
||||
dtRounded)
|
||||
}
|
||||
fs.SizeSuffix(s.bytes).Unit("Bytes"), fs.SizeSuffix(speed).Unit(strings.Title(fs.Config.DataRateUnit)+"/s"),
|
||||
s.errors,
|
||||
s.checks,
|
||||
s.transfers,
|
||||
dtRounded)
|
||||
|
||||
// checking and transferring have their own locking so unlock
|
||||
// here to prevent deadlock on GetBytes
|
||||
s.mu.RUnlock()
|
||||
|
||||
if !fs.Config.StatsOneLine {
|
||||
if !s.checking.empty() {
|
||||
_, _ = fmt.Fprintf(buf, "Checking:\n%s\n", s.checking)
|
||||
}
|
||||
if !s.transferring.empty() {
|
||||
_, _ = fmt.Fprintf(buf, "Transferring:\n%s\n", s.transferring)
|
||||
}
|
||||
if !s.checking.empty() {
|
||||
_, _ = fmt.Fprintf(buf, "Checking:\n%s\n", s.checking)
|
||||
}
|
||||
if !s.transferring.empty() {
|
||||
_, _ = fmt.Fprintf(buf, "Transferring:\n%s\n", s.transferring)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
@@ -233,8 +102,8 @@ func (s *StatsInfo) Bytes(bytes int64) {
|
||||
|
||||
// GetBytes returns the number of bytes transferred so far
|
||||
func (s *StatsInfo) GetBytes() int64 {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.bytes
|
||||
}
|
||||
|
||||
@@ -269,8 +138,8 @@ func (s *StatsInfo) Deletes(deletes int64) int64 {
|
||||
|
||||
// ResetCounters sets the counters (bytes, checks, errors, transfers) to 0
|
||||
func (s *StatsInfo) ResetCounters() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
s.bytes = 0
|
||||
s.errors = 0
|
||||
s.checks = 0
|
||||
@@ -280,8 +149,8 @@ func (s *StatsInfo) ResetCounters() {
|
||||
|
||||
// ResetErrors sets the errors count to 0
|
||||
func (s *StatsInfo) ResetErrors() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
s.errors = 0
|
||||
}
|
||||
|
||||
@@ -336,27 +205,3 @@ func (s *StatsInfo) DoneTransferring(remote string, ok bool) {
|
||||
s.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// SetCheckQueue sets the number of queued checks
|
||||
func (s *StatsInfo) SetCheckQueue(n int, size int64) {
|
||||
s.mu.Lock()
|
||||
s.checkQueue = n
|
||||
s.checkQueueSize = size
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// SetTransferQueue sets the number of queued transfers
|
||||
func (s *StatsInfo) SetTransferQueue(n int, size int64) {
|
||||
s.mu.Lock()
|
||||
s.transferQueue = n
|
||||
s.transferQueueSize = size
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// SetRenameQueue sets the number of queued transfers
|
||||
func (s *StatsInfo) SetRenameQueue(n int, size int64) {
|
||||
s.mu.Lock()
|
||||
s.renameQueue = n
|
||||
s.renameQueueSize = size
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
@@ -180,76 +180,6 @@ func (a *AsyncReader) WriteTo(w io.Writer) (n int64, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// SkipBytes will try to seek 'skip' bytes relative to the current position.
|
||||
// On success it returns true. If 'skip' is outside the current buffer data or
|
||||
// an error occurs, Abandon is called and false is returned.
|
||||
func (a *AsyncReader) SkipBytes(skip int) (ok bool) {
|
||||
a.mu.Lock()
|
||||
defer func() {
|
||||
a.mu.Unlock()
|
||||
if !ok {
|
||||
a.Abandon()
|
||||
}
|
||||
}()
|
||||
|
||||
if a.err != nil {
|
||||
return false
|
||||
}
|
||||
if skip < 0 {
|
||||
// seek backwards if skip is inside current buffer
|
||||
if a.cur != nil && a.cur.offset+skip >= 0 {
|
||||
a.cur.offset += skip
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
// early return if skip is past the maximum buffer capacity
|
||||
if skip >= (len(a.ready)+1)*BufferSize {
|
||||
return false
|
||||
}
|
||||
|
||||
refillTokens := 0
|
||||
for {
|
||||
if a.cur.isEmpty() {
|
||||
if a.cur != nil {
|
||||
a.putBuffer(a.cur)
|
||||
refillTokens++
|
||||
a.cur = nil
|
||||
}
|
||||
select {
|
||||
case b, ok := <-a.ready:
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
a.cur = b
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
n := len(a.cur.buffer())
|
||||
if n > skip {
|
||||
n = skip
|
||||
}
|
||||
a.cur.increment(n)
|
||||
skip -= n
|
||||
if skip == 0 {
|
||||
for ; refillTokens > 0; refillTokens-- {
|
||||
a.token <- struct{}{}
|
||||
}
|
||||
// If at end of buffer, store any error, if present
|
||||
if a.cur.isEmpty() && a.cur.err != nil {
|
||||
a.err = a.cur.err
|
||||
}
|
||||
return true
|
||||
}
|
||||
if a.cur.err != nil {
|
||||
a.err = a.cur.err
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Abandon will ensure that the underlying async reader is shut down.
|
||||
// It will NOT close the input supplied on New.
|
||||
func (a *AsyncReader) Abandon() {
|
||||
|
||||
@@ -3,18 +3,14 @@ package asyncreader
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"testing/iotest"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/lib/israce"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -285,81 +281,3 @@ func testAsyncReaderClose(t *testing.T, writeto bool) {
|
||||
}
|
||||
func TestAsyncReaderCloseRead(t *testing.T) { testAsyncReaderClose(t, false) }
|
||||
func TestAsyncReaderCloseWriteTo(t *testing.T) { testAsyncReaderClose(t, true) }
|
||||
|
||||
func TestAsyncReaderSkipBytes(t *testing.T) {
|
||||
t.Parallel()
|
||||
data := make([]byte, 15000)
|
||||
buf := make([]byte, len(data))
|
||||
r := rand.New(rand.NewSource(42))
|
||||
|
||||
n, err := r.Read(data)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(data), n)
|
||||
|
||||
initialReads := []int{0, 1, 100, 2048,
|
||||
softStartInitial - 1, softStartInitial, softStartInitial + 1,
|
||||
8000, len(data)}
|
||||
skips := []int{-1000, -101, -100, -99, 0, 1, 2048,
|
||||
softStartInitial - 1, softStartInitial, softStartInitial + 1,
|
||||
8000, len(data), BufferSize, 2 * BufferSize}
|
||||
|
||||
for buffers := 1; buffers <= 5; buffers++ {
|
||||
if israce.Enabled && buffers > 1 {
|
||||
t.Skip("FIXME Skipping further tests with race detector until https://github.com/golang/go/issues/27070 is fixed.")
|
||||
}
|
||||
t.Run(fmt.Sprintf("%d", buffers), func(t *testing.T) {
|
||||
for _, initialRead := range initialReads {
|
||||
t.Run(fmt.Sprintf("%d", initialRead), func(t *testing.T) {
|
||||
for _, skip := range skips {
|
||||
t.Run(fmt.Sprintf("%d", skip), func(t *testing.T) {
|
||||
ar, err := New(ioutil.NopCloser(bytes.NewReader(data)), buffers)
|
||||
require.NoError(t, err)
|
||||
|
||||
wantSkipFalse := false
|
||||
buf = buf[:initialRead]
|
||||
n, err := readers.ReadFill(ar, buf)
|
||||
if initialRead >= len(data) {
|
||||
wantSkipFalse = true
|
||||
if initialRead > len(data) {
|
||||
assert.Equal(t, err, io.EOF)
|
||||
} else {
|
||||
assert.True(t, err == nil || err == io.EOF)
|
||||
}
|
||||
assert.Equal(t, len(data), n)
|
||||
assert.Equal(t, data, buf[:len(data)])
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, initialRead, n)
|
||||
assert.Equal(t, data[:initialRead], buf)
|
||||
}
|
||||
|
||||
skipped := ar.SkipBytes(skip)
|
||||
buf = buf[:1024]
|
||||
n, err = readers.ReadFill(ar, buf)
|
||||
offset := initialRead + skip
|
||||
if skipped {
|
||||
assert.False(t, wantSkipFalse)
|
||||
l := len(buf)
|
||||
if offset >= len(data) {
|
||||
assert.Equal(t, err, io.EOF)
|
||||
} else {
|
||||
if offset+1024 >= len(data) {
|
||||
l = len(data) - offset
|
||||
}
|
||||
assert.Equal(t, l, n)
|
||||
assert.Equal(t, data[offset:offset+l], buf[:l])
|
||||
}
|
||||
} else {
|
||||
if initialRead >= len(data) {
|
||||
assert.Equal(t, err, io.EOF)
|
||||
} else {
|
||||
assert.True(t, err == errorStreamAbandoned || err == io.EOF)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -81,8 +81,6 @@ type ConfigInfo struct {
|
||||
AskPassword bool
|
||||
UseServerModTime bool
|
||||
MaxTransfer SizeSuffix
|
||||
MaxBacklog int
|
||||
StatsOneLine bool
|
||||
}
|
||||
|
||||
// NewConfig creates a new config with everything set to the default
|
||||
@@ -111,7 +109,6 @@ func NewConfig() *ConfigInfo {
|
||||
c.AskPassword = true
|
||||
c.TPSLimitBurst = 1
|
||||
c.MaxTransfer = -1
|
||||
c.MaxBacklog = 10000
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -79,13 +79,6 @@ var (
|
||||
|
||||
// output of prompt for password
|
||||
PasswordPromptOutput = os.Stderr
|
||||
|
||||
// If set to true, the configKey is obscured with obscure.Obscure and saved to a temp file when it is
|
||||
// calculated from the password. The path of that temp file is then written to the environment variable
|
||||
// `_RCLONE_CONFIG_KEY_FILE`. If `_RCLONE_CONFIG_KEY_FILE` is present, password prompt is skipped and `RCLONE_CONFIG_PASS` ignored.
|
||||
// For security reasons, the temp file is deleted once the configKey is successfully loaded.
|
||||
// This can be used to pass the configKey to a child process.
|
||||
PassConfigKeyForDaemonization = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -256,37 +249,19 @@ func loadConfigFile() (*goconfig.ConfigFile, error) {
|
||||
|
||||
var out []byte
|
||||
for {
|
||||
if envKeyFile := os.Getenv("_RCLONE_CONFIG_KEY_FILE"); len(envKeyFile) > 0 {
|
||||
fs.Debugf(nil, "attempting to obtain configKey from temp file %s", envKeyFile)
|
||||
obscuredKey, err := ioutil.ReadFile(envKeyFile)
|
||||
if len(configKey) == 0 && envpw != "" {
|
||||
err := setConfigPassword(envpw)
|
||||
if err != nil {
|
||||
errRemove := os.Remove(envKeyFile)
|
||||
if errRemove != nil {
|
||||
log.Fatalf("unable to read obscured config key and unable to delete the temp file: %v", err)
|
||||
}
|
||||
log.Fatalf("unable to read obscured config key: %v", err)
|
||||
fmt.Println("Using RCLONE_CONFIG_PASS returned:", err)
|
||||
} else {
|
||||
fs.Debugf(nil, "Using RCLONE_CONFIG_PASS password.")
|
||||
}
|
||||
errRemove := os.Remove(envKeyFile)
|
||||
if errRemove != nil {
|
||||
log.Fatalf("unable to delete temp file with configKey: %v", err)
|
||||
}
|
||||
configKey = []byte(obscure.MustReveal(string(obscuredKey)))
|
||||
fs.Debugf(nil, "using _RCLONE_CONFIG_KEY_FILE for configKey")
|
||||
} else {
|
||||
if len(configKey) == 0 && envpw != "" {
|
||||
err := setConfigPassword(envpw)
|
||||
if err != nil {
|
||||
fmt.Println("Using RCLONE_CONFIG_PASS returned:", err)
|
||||
} else {
|
||||
fs.Debugf(nil, "Using RCLONE_CONFIG_PASS password.")
|
||||
}
|
||||
}
|
||||
if len(configKey) == 0 {
|
||||
if !fs.Config.AskPassword {
|
||||
return nil, errors.New("unable to decrypt configuration and not allowed to ask for password - set RCLONE_CONFIG_PASS to your configuration password")
|
||||
}
|
||||
getConfigPassword("Enter configuration password:")
|
||||
}
|
||||
if len(configKey) == 0 {
|
||||
if !fs.Config.AskPassword {
|
||||
return nil, errors.New("unable to decrypt configuration and not allowed to ask for password - set RCLONE_CONFIG_PASS to your configuration password")
|
||||
}
|
||||
getConfigPassword("Enter configuration password:")
|
||||
}
|
||||
|
||||
// Nonce is first 24 bytes of the ciphertext
|
||||
@@ -387,37 +362,6 @@ func setConfigPassword(password string) error {
|
||||
return err
|
||||
}
|
||||
configKey = sha.Sum(nil)
|
||||
if PassConfigKeyForDaemonization {
|
||||
tempFile, err := ioutil.TempFile("", "rclone")
|
||||
if err != nil {
|
||||
log.Fatalf("cannot create temp file to store configKey: %v", err)
|
||||
}
|
||||
_, err = tempFile.WriteString(obscure.MustObscure(string(configKey)))
|
||||
if err != nil {
|
||||
errRemove := os.Remove(tempFile.Name())
|
||||
if errRemove != nil {
|
||||
log.Fatalf("error writing configKey to temp file and also error deleting it: %v", err)
|
||||
}
|
||||
log.Fatalf("error writing configKey to temp file: %v", err)
|
||||
}
|
||||
err = tempFile.Close()
|
||||
if err != nil {
|
||||
errRemove := os.Remove(tempFile.Name())
|
||||
if errRemove != nil {
|
||||
log.Fatalf("error closing temp file with configKey and also error deleting it: %v", err)
|
||||
}
|
||||
log.Fatalf("error closing temp file with configKey: %v", err)
|
||||
}
|
||||
fs.Debugf(nil, "saving configKey to temp file")
|
||||
err = os.Setenv("_RCLONE_CONFIG_KEY_FILE", tempFile.Name())
|
||||
if err != nil {
|
||||
errRemove := os.Remove(tempFile.Name())
|
||||
if errRemove != nil {
|
||||
log.Fatalf("unable to set environment variable _RCLONE_CONFIG_KEY_FILE and unable to delete the temp file: %v", err)
|
||||
}
|
||||
log.Fatalf("unable to set environment variable _RCLONE_CONFIG_KEY_FILE: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -826,9 +770,7 @@ func ChooseOption(o *fs.Option, name string) string {
|
||||
}
|
||||
password = base64.RawURLEncoding.EncodeToString(pw)
|
||||
fmt.Printf("Your password is: %s\n", password)
|
||||
fmt.Printf("Use this password? Please note that an obscured version of this \npassword (and not the " +
|
||||
"password itself) will be stored under your \nconfiguration file, so keep this generated password " +
|
||||
"in a safe place.\n")
|
||||
fmt.Printf("Use this password?\n")
|
||||
if Confirm() {
|
||||
break
|
||||
}
|
||||
@@ -990,7 +932,7 @@ func NewRemoteName() (name string) {
|
||||
|
||||
// editOptions edits the options. If new is true then it just allows
|
||||
// entry and doesn't show any old values.
|
||||
func editOptions(ri *fs.RegInfo, name string, isNew bool) {
|
||||
func editOptions(ri *fs.RegInfo, name string, new bool) {
|
||||
hasAdvanced := false
|
||||
for _, advanced := range []bool{false, true} {
|
||||
if advanced {
|
||||
@@ -1009,7 +951,7 @@ func editOptions(ri *fs.RegInfo, name string, isNew bool) {
|
||||
}
|
||||
subProvider := getConfigData().MustValue(name, fs.ConfigProvider, "")
|
||||
if matchProvider(option.Provider, subProvider) {
|
||||
if !isNew {
|
||||
if !new {
|
||||
fmt.Printf("Value %q = %q\n", option.Name, FileGet(name, option.Name))
|
||||
fmt.Printf("Edit? (y/n)>\n")
|
||||
if !Confirm() {
|
||||
|
||||
@@ -79,12 +79,10 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.FVarP(flagSet, &fs.Config.LogLevel, "log-level", "", "Log level DEBUG|INFO|NOTICE|ERROR")
|
||||
flags.FVarP(flagSet, &fs.Config.StatsLogLevel, "stats-log-level", "", "Log level to show --stats output DEBUG|INFO|NOTICE|ERROR")
|
||||
flags.FVarP(flagSet, &fs.Config.BwLimit, "bwlimit", "", "Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.")
|
||||
flags.FVarP(flagSet, &fs.Config.BufferSize, "buffer-size", "", "In memory buffer size when reading files for each --transfer.")
|
||||
flags.FVarP(flagSet, &fs.Config.BufferSize, "buffer-size", "", "Buffer size when copying files.")
|
||||
flags.FVarP(flagSet, &fs.Config.StreamingUploadCutoff, "streaming-upload-cutoff", "", "Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends.")
|
||||
flags.FVarP(flagSet, &fs.Config.Dump, "dump", "", "List of items to dump from: "+fs.DumpFlagsList)
|
||||
flags.FVarP(flagSet, &fs.Config.MaxTransfer, "max-transfer", "", "Maximum size of data to transfer.")
|
||||
flags.IntVarP(flagSet, &fs.Config.MaxBacklog, "max-backlog", "", fs.Config.MaxBacklog, "Maximum number of objects in sync or check backlog.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.StatsOneLine, "stats-one-line", "", fs.Config.StatsOneLine, "Make the stats fit on one line.")
|
||||
}
|
||||
|
||||
// SetFlags converts any flags into config which weren't straight foward
|
||||
|
||||
@@ -7,9 +7,9 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
_ Mapper = Simple(nil)
|
||||
_ Getter = Simple(nil)
|
||||
_ Setter = Simple(nil)
|
||||
_ Mapper = (Simple)(nil)
|
||||
_ Getter = (Simple)(nil)
|
||||
_ Setter = (Simple)(nil)
|
||||
)
|
||||
|
||||
func TestConfigMapGet(t *testing.T) {
|
||||
|
||||
@@ -36,25 +36,3 @@ func TestObscure(t *testing.T) {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestReveal(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want string
|
||||
iv string
|
||||
}{
|
||||
{"YWFhYWFhYWFhYWFhYWFhYQ", "", "aaaaaaaaaaaaaaaa"},
|
||||
{"YWFhYWFhYWFhYWFhYWFhYXMaGgIlEQ", "potato", "aaaaaaaaaaaaaaaa"},
|
||||
{"YmJiYmJiYmJiYmJiYmJiYp3gcEWbAw", "potato", "bbbbbbbbbbbbbbbb"},
|
||||
} {
|
||||
cryptRand = bytes.NewBufferString(test.iv)
|
||||
got, err := Reveal(test.in)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, test.want, got)
|
||||
// Now the Must variants
|
||||
cryptRand = bytes.NewBufferString(test.iv)
|
||||
got = MustReveal(test.in)
|
||||
assert.Equal(t, test.want, got)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,94 +0,0 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// CommaSepList is a comma separated config value
|
||||
// It uses the encoding/csv rules for quoting and escaping
|
||||
type CommaSepList []string
|
||||
|
||||
// SpaceSepList is a space separated config value
|
||||
// It uses the encoding/csv rules for quoting and escaping
|
||||
type SpaceSepList []string
|
||||
|
||||
type genericList []string
|
||||
|
||||
func (l CommaSepList) String() string {
|
||||
return genericList(l).string(',')
|
||||
}
|
||||
|
||||
// Set the List entries
|
||||
func (l *CommaSepList) Set(s string) error {
|
||||
return (*genericList)(l).set(',', []byte(s))
|
||||
}
|
||||
|
||||
// Type of the value
|
||||
func (CommaSepList) Type() string {
|
||||
return "[]string"
|
||||
}
|
||||
|
||||
// Scan implements the fmt.Scanner interface
|
||||
func (l *CommaSepList) Scan(s fmt.ScanState, ch rune) error {
|
||||
return (*genericList)(l).scan(',', s, ch)
|
||||
}
|
||||
|
||||
func (l SpaceSepList) String() string {
|
||||
return genericList(l).string(' ')
|
||||
}
|
||||
|
||||
// Set the List entries
|
||||
func (l *SpaceSepList) Set(s string) error {
|
||||
return (*genericList)(l).set(' ', []byte(s))
|
||||
}
|
||||
|
||||
// Type of the value
|
||||
func (SpaceSepList) Type() string {
|
||||
return "[]string"
|
||||
}
|
||||
|
||||
// Scan implements the fmt.Scanner interface
|
||||
func (l *SpaceSepList) Scan(s fmt.ScanState, ch rune) error {
|
||||
return (*genericList)(l).scan(' ', s, ch)
|
||||
}
|
||||
|
||||
func (gl genericList) string(sep rune) string {
|
||||
var buf bytes.Buffer
|
||||
w := csv.NewWriter(&buf)
|
||||
w.Comma = sep
|
||||
err := w.Write(gl)
|
||||
if err != nil {
|
||||
// can only happen if w.Comma is invalid
|
||||
panic(err)
|
||||
}
|
||||
w.Flush()
|
||||
return string(bytes.TrimSpace(buf.Bytes()))
|
||||
}
|
||||
|
||||
func (gl *genericList) set(sep rune, b []byte) error {
|
||||
if len(b) == 0 {
|
||||
*gl = nil
|
||||
return nil
|
||||
}
|
||||
r := csv.NewReader(bytes.NewReader(b))
|
||||
r.Comma = sep
|
||||
|
||||
record, err := r.Read()
|
||||
switch _err := err.(type) {
|
||||
case nil:
|
||||
*gl = record
|
||||
case *csv.ParseError:
|
||||
err = _err.Err // remove line numbers from the error message
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (gl *genericList) scan(sep rune, s fmt.ScanState, ch rune) error {
|
||||
token, err := s.Token(true, func(rune) bool { return true })
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return gl.set(sep, bytes.TrimSpace(token))
|
||||
}
|
||||
@@ -1,87 +0,0 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func must(err error) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleSpaceSepList() {
|
||||
for _, s := range []string{
|
||||
`remotea:test/dir remoteb:`,
|
||||
`"remotea:test/space dir" remoteb:`,
|
||||
`"remotea:test/quote""dir" remoteb:`,
|
||||
} {
|
||||
var l SpaceSepList
|
||||
must(l.Set(s))
|
||||
fmt.Printf("%#v\n", l)
|
||||
}
|
||||
// Output:
|
||||
// fs.SpaceSepList{"remotea:test/dir", "remoteb:"}
|
||||
// fs.SpaceSepList{"remotea:test/space dir", "remoteb:"}
|
||||
// fs.SpaceSepList{"remotea:test/quote\"dir", "remoteb:"}
|
||||
}
|
||||
|
||||
func ExampleCommaSepList() {
|
||||
for _, s := range []string{
|
||||
`remotea:test/dir,remoteb:`,
|
||||
`"remotea:test/space dir",remoteb:`,
|
||||
`"remotea:test/quote""dir",remoteb:`,
|
||||
} {
|
||||
var l CommaSepList
|
||||
must(l.Set(s))
|
||||
fmt.Printf("%#v\n", l)
|
||||
}
|
||||
// Output:
|
||||
// fs.CommaSepList{"remotea:test/dir", "remoteb:"}
|
||||
// fs.CommaSepList{"remotea:test/space dir", "remoteb:"}
|
||||
// fs.CommaSepList{"remotea:test/quote\"dir", "remoteb:"}
|
||||
}
|
||||
|
||||
func TestSpaceSepListSet(t *testing.T) {
|
||||
type tc struct {
|
||||
in string
|
||||
out SpaceSepList
|
||||
err string
|
||||
}
|
||||
tests := []tc{
|
||||
{``, nil, ""},
|
||||
{`\`, SpaceSepList{`\`}, ""},
|
||||
{`\\`, SpaceSepList{`\\`}, ""},
|
||||
{`potato`, SpaceSepList{`potato`}, ""},
|
||||
{`po\tato`, SpaceSepList{`po\tato`}, ""},
|
||||
{`potato\`, SpaceSepList{`potato\`}, ""},
|
||||
{`'potato`, SpaceSepList{`'potato`}, ""},
|
||||
{`pot'ato`, SpaceSepList{`pot'ato`}, ""},
|
||||
{`potato'`, SpaceSepList{`potato'`}, ""},
|
||||
{`"potato"`, SpaceSepList{`potato`}, ""},
|
||||
{`'potato'`, SpaceSepList{`'potato'`}, ""},
|
||||
{`potato apple`, SpaceSepList{`potato`, `apple`}, ""},
|
||||
{`potato\ apple`, SpaceSepList{`potato\`, `apple`}, ""},
|
||||
{`"potato apple"`, SpaceSepList{`potato apple`}, ""},
|
||||
{`"potato'apple"`, SpaceSepList{`potato'apple`}, ""},
|
||||
{`"potato''apple"`, SpaceSepList{`potato''apple`}, ""},
|
||||
{`"potato' 'apple"`, SpaceSepList{`potato' 'apple`}, ""},
|
||||
{`potato="apple"`, nil, `bare " in non-quoted-field`},
|
||||
{`apple "potato`, nil, "extraneous"},
|
||||
{`apple pot"ato`, nil, "bare \" in non-quoted-field"},
|
||||
{`potato"`, nil, "bare \" in non-quoted-field"},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
var l SpaceSepList
|
||||
err := l.Set(tc.in)
|
||||
if tc.err == "" {
|
||||
require.NoErrorf(t, err, "input: %q", tc.in)
|
||||
} else {
|
||||
require.Containsf(t, err.Error(), tc.err, "input: %q", tc.in)
|
||||
}
|
||||
require.Equalf(t, tc.out, l, "input: %q", tc.in)
|
||||
}
|
||||
}
|
||||
15
fs/fs.go
15
fs/fs.go
@@ -628,16 +628,16 @@ func (ft *Features) Mask(f Fs) *Features {
|
||||
// Wrap makes a Copy of the features passed in, overriding the UnWrap/Wrap
|
||||
// method only if available in f.
|
||||
func (ft *Features) Wrap(f Fs) *Features {
|
||||
ftCopy := new(Features)
|
||||
*ftCopy = *ft
|
||||
copy := new(Features)
|
||||
*copy = *ft
|
||||
if do, ok := f.(UnWrapper); ok {
|
||||
ftCopy.UnWrap = do.UnWrap
|
||||
copy.UnWrap = do.UnWrap
|
||||
}
|
||||
if do, ok := f.(Wrapper); ok {
|
||||
ftCopy.WrapFs = do.WrapFs
|
||||
ftCopy.SetWrapper = do.SetWrapper
|
||||
copy.WrapFs = do.WrapFs
|
||||
copy.SetWrapper = do.SetWrapper
|
||||
}
|
||||
return ftCopy
|
||||
return copy
|
||||
}
|
||||
|
||||
// WrapsFs adds extra information between `f` which wraps `w`
|
||||
@@ -827,6 +827,9 @@ type ObjectPair struct {
|
||||
Src, Dst Object
|
||||
}
|
||||
|
||||
// ObjectPairChan is a channel of ObjectPair
|
||||
type ObjectPairChan chan ObjectPair
|
||||
|
||||
// Find looks for an Info object for the name passed in
|
||||
//
|
||||
// Services are looked up in the config file
|
||||
|
||||
@@ -53,7 +53,7 @@ func (err wrappedRetryError) Retry() bool {
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ Retrier = wrappedRetryError{error(nil)}
|
||||
var _ Retrier = wrappedRetryError{(error)(nil)}
|
||||
|
||||
// RetryError makes an error which indicates it would like to be retried
|
||||
func RetryError(err error) error {
|
||||
@@ -97,7 +97,7 @@ func (err wrappedFatalError) Fatal() bool {
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ Fataler = wrappedFatalError{error(nil)}
|
||||
var _ Fataler = wrappedFatalError{(error)(nil)}
|
||||
|
||||
// FatalError makes an error which indicates it is a fatal error and
|
||||
// the sync should stop.
|
||||
@@ -145,7 +145,7 @@ func (err wrappedNoRetryError) NoRetry() bool {
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ NoRetrier = wrappedNoRetryError{error(nil)}
|
||||
var _ NoRetrier = wrappedNoRetryError{(error)(nil)}
|
||||
|
||||
// NoRetryError makes an error which indicates the sync shouldn't be
|
||||
// retried.
|
||||
|
||||
@@ -15,26 +15,26 @@ func ptr(p interface{}) string {
|
||||
|
||||
func TestSetDefaults(t *testing.T) {
|
||||
old := http.DefaultTransport.(*http.Transport)
|
||||
newT := new(http.Transport)
|
||||
setDefaults(newT, old)
|
||||
new := new(http.Transport)
|
||||
setDefaults(new, old)
|
||||
// Can't use assert.Equal or reflect.DeepEqual for this as it has functions in
|
||||
// Check functions by comparing the "%p" representations of them
|
||||
assert.Equal(t, ptr(old.Proxy), ptr(newT.Proxy), "when checking .Proxy")
|
||||
assert.Equal(t, ptr(old.DialContext), ptr(newT.DialContext), "when checking .DialContext")
|
||||
assert.Equal(t, ptr(old.Proxy), ptr(new.Proxy), "when checking .Proxy")
|
||||
assert.Equal(t, ptr(old.DialContext), ptr(new.DialContext), "when checking .DialContext")
|
||||
// Check the other public fields
|
||||
assert.Equal(t, ptr(old.Dial), ptr(newT.Dial), "when checking .Dial")
|
||||
assert.Equal(t, ptr(old.DialTLS), ptr(newT.DialTLS), "when checking .DialTLS")
|
||||
assert.Equal(t, old.TLSClientConfig, newT.TLSClientConfig, "when checking .TLSClientConfig")
|
||||
assert.Equal(t, old.TLSHandshakeTimeout, newT.TLSHandshakeTimeout, "when checking .TLSHandshakeTimeout")
|
||||
assert.Equal(t, old.DisableKeepAlives, newT.DisableKeepAlives, "when checking .DisableKeepAlives")
|
||||
assert.Equal(t, old.DisableCompression, newT.DisableCompression, "when checking .DisableCompression")
|
||||
assert.Equal(t, old.MaxIdleConns, newT.MaxIdleConns, "when checking .MaxIdleConns")
|
||||
assert.Equal(t, old.MaxIdleConnsPerHost, newT.MaxIdleConnsPerHost, "when checking .MaxIdleConnsPerHost")
|
||||
assert.Equal(t, old.IdleConnTimeout, newT.IdleConnTimeout, "when checking .IdleConnTimeout")
|
||||
assert.Equal(t, old.ResponseHeaderTimeout, newT.ResponseHeaderTimeout, "when checking .ResponseHeaderTimeout")
|
||||
assert.Equal(t, old.ExpectContinueTimeout, newT.ExpectContinueTimeout, "when checking .ExpectContinueTimeout")
|
||||
assert.Equal(t, old.TLSNextProto, newT.TLSNextProto, "when checking .TLSNextProto")
|
||||
assert.Equal(t, old.MaxResponseHeaderBytes, newT.MaxResponseHeaderBytes, "when checking .MaxResponseHeaderBytes")
|
||||
assert.Equal(t, ptr(old.Dial), ptr(new.Dial), "when checking .Dial")
|
||||
assert.Equal(t, ptr(old.DialTLS), ptr(new.DialTLS), "when checking .DialTLS")
|
||||
assert.Equal(t, old.TLSClientConfig, new.TLSClientConfig, "when checking .TLSClientConfig")
|
||||
assert.Equal(t, old.TLSHandshakeTimeout, new.TLSHandshakeTimeout, "when checking .TLSHandshakeTimeout")
|
||||
assert.Equal(t, old.DisableKeepAlives, new.DisableKeepAlives, "when checking .DisableKeepAlives")
|
||||
assert.Equal(t, old.DisableCompression, new.DisableCompression, "when checking .DisableCompression")
|
||||
assert.Equal(t, old.MaxIdleConns, new.MaxIdleConns, "when checking .MaxIdleConns")
|
||||
assert.Equal(t, old.MaxIdleConnsPerHost, new.MaxIdleConnsPerHost, "when checking .MaxIdleConnsPerHost")
|
||||
assert.Equal(t, old.IdleConnTimeout, new.IdleConnTimeout, "when checking .IdleConnTimeout")
|
||||
assert.Equal(t, old.ResponseHeaderTimeout, new.ResponseHeaderTimeout, "when checking .ResponseHeaderTimeout")
|
||||
assert.Equal(t, old.ExpectContinueTimeout, new.ExpectContinueTimeout, "when checking .ExpectContinueTimeout")
|
||||
assert.Equal(t, old.TLSNextProto, new.TLSNextProto, "when checking .TLSNextProto")
|
||||
assert.Equal(t, old.MaxResponseHeaderBytes, new.MaxResponseHeaderBytes, "when checking .MaxResponseHeaderBytes")
|
||||
}
|
||||
|
||||
func TestCleanAuth(t *testing.T) {
|
||||
|
||||
@@ -982,15 +982,15 @@ func Purge(f fs.Fs, dir string) error {
|
||||
// Delete removes all the contents of a container. Unlike Purge, it
|
||||
// obeys includes and excludes.
|
||||
func Delete(f fs.Fs) error {
|
||||
delChan := make(fs.ObjectsChan, fs.Config.Transfers)
|
||||
delete := make(fs.ObjectsChan, fs.Config.Transfers)
|
||||
delErr := make(chan error, 1)
|
||||
go func() {
|
||||
delErr <- DeleteFiles(delChan)
|
||||
delErr <- DeleteFiles(delete)
|
||||
}()
|
||||
err := ListFn(f, func(o fs.Object) {
|
||||
delChan <- o
|
||||
delete <- o
|
||||
})
|
||||
close(delChan)
|
||||
close(delete)
|
||||
delError := <-delErr
|
||||
if err == nil {
|
||||
err = delError
|
||||
|
||||
@@ -135,9 +135,9 @@ func (o *RangeOption) Decode(size int64) (offset, limit int64) {
|
||||
|
||||
// FixRangeOption looks through the slice of options and adjusts any
|
||||
// RangeOption~s found that request a fetch from the end into an
|
||||
// absolute fetch using the size passed in and makes sure the range does
|
||||
// not exceed filesize. Some remotes (eg Onedrive, Box) don't support
|
||||
// range requests which index from the end.
|
||||
// absolute fetch using the size passed in. Some remotes (eg
|
||||
// Onedrive, Box) don't support range requests which index from the
|
||||
// end.
|
||||
func FixRangeOption(options []OpenOption, size int64) {
|
||||
for i := range options {
|
||||
option := options[i]
|
||||
@@ -147,10 +147,6 @@ func FixRangeOption(options []OpenOption, size int64) {
|
||||
x = &RangeOption{Start: size - x.End, End: -1}
|
||||
options[i] = x
|
||||
}
|
||||
if x.End > size {
|
||||
x = &RangeOption{Start: x.Start, End: size}
|
||||
options[i] = x
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,17 +13,6 @@ import (
|
||||
// SizeSuffix is an int64 with a friendly way of printing setting
|
||||
type SizeSuffix int64
|
||||
|
||||
// Common multipliers for SizeSuffix
|
||||
const (
|
||||
Byte SizeSuffix = 1 << (iota * 10)
|
||||
KibiByte
|
||||
MebiByte
|
||||
GibiByte
|
||||
TebiByte
|
||||
PebiByte
|
||||
ExbiByte
|
||||
)
|
||||
|
||||
// Turn SizeSuffix into a string and a suffix
|
||||
func (x SizeSuffix) string() (string, string) {
|
||||
scaled := float64(0)
|
||||
|
||||
100
fs/sync/pipe.go
100
fs/sync/pipe.go
@@ -1,100 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// pipe provides an unbounded channel like experience
|
||||
//
|
||||
// Note unlike channels these aren't strictly ordered.
|
||||
type pipe struct {
|
||||
mu sync.Mutex
|
||||
c chan struct{}
|
||||
queue []fs.ObjectPair
|
||||
closed bool
|
||||
totalSize int64
|
||||
stats func(items int, totalSize int64)
|
||||
}
|
||||
|
||||
func newPipe(stats func(items int, totalSize int64), maxBacklog int) *pipe {
|
||||
return &pipe{
|
||||
c: make(chan struct{}, maxBacklog),
|
||||
stats: stats,
|
||||
}
|
||||
}
|
||||
|
||||
// Put an pair into the pipe
|
||||
//
|
||||
// It returns ok = false if the context was cancelled
|
||||
//
|
||||
// It will panic if you call it after Close()
|
||||
func (p *pipe) Put(ctx context.Context, pair fs.ObjectPair) (ok bool) {
|
||||
if ctx.Err() != nil {
|
||||
return false
|
||||
}
|
||||
p.mu.Lock()
|
||||
p.queue = append(p.queue, pair)
|
||||
size := pair.Src.Size()
|
||||
if size > 0 {
|
||||
p.totalSize += size
|
||||
}
|
||||
p.stats(len(p.queue), p.totalSize)
|
||||
p.mu.Unlock()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return false
|
||||
case p.c <- struct{}{}:
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Get a pair from the pipe
|
||||
//
|
||||
// It returns ok = false if the context was cancelled or Close() has
|
||||
// been called.
|
||||
func (p *pipe) Get(ctx context.Context) (pair fs.ObjectPair, ok bool) {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case _, ok = <-p.c:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
p.mu.Lock()
|
||||
pair, p.queue = p.queue[0], p.queue[1:]
|
||||
size := pair.Src.Size()
|
||||
if size > 0 {
|
||||
p.totalSize -= size
|
||||
}
|
||||
if p.totalSize < 0 {
|
||||
p.totalSize = 0
|
||||
}
|
||||
p.stats(len(p.queue), p.totalSize)
|
||||
p.mu.Unlock()
|
||||
return pair, true
|
||||
}
|
||||
|
||||
// Stats reads the number of items in the queue and the totalSize
|
||||
func (p *pipe) Stats() (items int, totalSize int64) {
|
||||
p.mu.Lock()
|
||||
items, totalSize = len(p.queue), p.totalSize
|
||||
p.mu.Unlock()
|
||||
return items, totalSize
|
||||
}
|
||||
|
||||
// Close the pipe
|
||||
//
|
||||
// Writes to a closed pipe will panic as will double closing a pipe
|
||||
func (p *pipe) Close() {
|
||||
p.mu.Lock()
|
||||
close(p.c)
|
||||
p.closed = true
|
||||
p.mu.Unlock()
|
||||
}
|
||||
@@ -1,122 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/mockobject"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPipe(t *testing.T) {
|
||||
var queueLength int
|
||||
var queueSize int64
|
||||
stats := func(n int, size int64) {
|
||||
queueLength, queueSize = n, size
|
||||
}
|
||||
|
||||
// Make a new pipe
|
||||
p := newPipe(stats, 10)
|
||||
|
||||
checkStats := func(expectedN int, expectedSize int64) {
|
||||
n, size := p.Stats()
|
||||
assert.Equal(t, expectedN, n)
|
||||
assert.Equal(t, expectedSize, size)
|
||||
assert.Equal(t, expectedN, queueLength)
|
||||
assert.Equal(t, expectedSize, queueSize)
|
||||
}
|
||||
|
||||
checkStats(0, 0)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
obj1 := mockobject.New("potato").WithContent([]byte("hello"), mockobject.SeekModeNone)
|
||||
|
||||
pair1 := fs.ObjectPair{Src: obj1, Dst: nil}
|
||||
|
||||
// Put an object
|
||||
ok := p.Put(ctx, pair1)
|
||||
assert.Equal(t, true, ok)
|
||||
checkStats(1, 5)
|
||||
|
||||
// Close the pipe showing reading on closed pipe is OK
|
||||
p.Close()
|
||||
|
||||
// Read from pipe
|
||||
pair2, ok := p.Get(ctx)
|
||||
assert.Equal(t, pair1, pair2)
|
||||
assert.Equal(t, true, ok)
|
||||
checkStats(0, 0)
|
||||
|
||||
// Check read on closed pipe
|
||||
pair2, ok = p.Get(ctx)
|
||||
assert.Equal(t, fs.ObjectPair{}, pair2)
|
||||
assert.Equal(t, false, ok)
|
||||
|
||||
// Check panic on write to closed pipe
|
||||
assert.Panics(t, func() { p.Put(ctx, pair1) })
|
||||
|
||||
// Make a new pipe
|
||||
p = newPipe(stats, 10)
|
||||
ctx2, cancel := context.WithCancel(ctx)
|
||||
|
||||
// cancel it in the background - check read ceases
|
||||
go cancel()
|
||||
pair2, ok = p.Get(ctx2)
|
||||
assert.Equal(t, fs.ObjectPair{}, pair2)
|
||||
assert.Equal(t, false, ok)
|
||||
|
||||
// check we can't write
|
||||
ok = p.Put(ctx2, pair1)
|
||||
assert.Equal(t, false, ok)
|
||||
|
||||
}
|
||||
|
||||
// TestPipeConcurrent runs concurrent Get and Put to flush out any
|
||||
// race conditions and concurrency problems.
|
||||
func TestPipeConcurrent(t *testing.T) {
|
||||
const (
|
||||
N = 1000
|
||||
readWriters = 10
|
||||
)
|
||||
|
||||
stats := func(n int, size int64) {}
|
||||
|
||||
// Make a new pipe
|
||||
p := newPipe(stats, 10)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
obj1 := mockobject.New("potato").WithContent([]byte("hello"), mockobject.SeekModeNone)
|
||||
pair1 := fs.ObjectPair{Src: obj1, Dst: nil}
|
||||
ctx := context.Background()
|
||||
var count int64
|
||||
|
||||
for j := 0; j < readWriters; j++ {
|
||||
wg.Add(2)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for i := 0; i < N; i++ {
|
||||
// Read from pipe
|
||||
pair2, ok := p.Get(ctx)
|
||||
assert.Equal(t, pair1, pair2)
|
||||
assert.Equal(t, true, ok)
|
||||
atomic.AddInt64(&count, -1)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for i := 0; i < N; i++ {
|
||||
// Put an object
|
||||
ok := p.Put(ctx, pair1)
|
||||
assert.Equal(t, true, ok)
|
||||
atomic.AddInt64(&count, 1)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
assert.Equal(t, int64(0), count)
|
||||
}
|
||||
174
fs/sync/sync.go
174
fs/sync/sync.go
@@ -43,9 +43,9 @@ type syncCopyMove struct {
|
||||
srcEmptyDirsMu sync.Mutex // protect srcEmptyDirs
|
||||
srcEmptyDirs map[string]fs.DirEntry // potentially empty directories
|
||||
checkerWg sync.WaitGroup // wait for checkers
|
||||
toBeChecked *pipe // checkers channel
|
||||
toBeChecked fs.ObjectPairChan // checkers channel
|
||||
transfersWg sync.WaitGroup // wait for transfers
|
||||
toBeUploaded *pipe // copiers channel
|
||||
toBeUploaded fs.ObjectPairChan // copiers channel
|
||||
errorMu sync.Mutex // Mutex covering the errors variables
|
||||
err error // normal error from copy process
|
||||
noRetryErr error // error with NoRetry set
|
||||
@@ -54,7 +54,7 @@ type syncCopyMove struct {
|
||||
renameMapMu sync.Mutex // mutex to protect the below
|
||||
renameMap map[string][]fs.Object // dst files by hash - only used by trackRenames
|
||||
renamerWg sync.WaitGroup // wait for renamers
|
||||
toBeRenamed *pipe // renamers channel
|
||||
toBeRenamed fs.ObjectPairChan // renamers channel
|
||||
trackRenamesWg sync.WaitGroup // wg for background track renames
|
||||
trackRenamesCh chan fs.Object // objects are pumped in here
|
||||
renameCheck []fs.Object // accumulate files to check for rename here
|
||||
@@ -75,12 +75,12 @@ func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
|
||||
dstFilesResult: make(chan error, 1),
|
||||
dstEmptyDirs: make(map[string]fs.DirEntry),
|
||||
srcEmptyDirs: make(map[string]fs.DirEntry),
|
||||
toBeChecked: newPipe(accounting.Stats.SetCheckQueue, fs.Config.MaxBacklog),
|
||||
toBeUploaded: newPipe(accounting.Stats.SetTransferQueue, fs.Config.MaxBacklog),
|
||||
toBeChecked: make(fs.ObjectPairChan, fs.Config.Transfers),
|
||||
toBeUploaded: make(fs.ObjectPairChan, fs.Config.Transfers),
|
||||
deleteFilesCh: make(chan fs.Object, fs.Config.Checkers),
|
||||
trackRenames: fs.Config.TrackRenames,
|
||||
commonHash: fsrc.Hashes().Overlap(fdst.Hashes()).GetOne(),
|
||||
toBeRenamed: newPipe(accounting.Stats.SetRenameQueue, fs.Config.MaxBacklog),
|
||||
toBeRenamed: make(fs.ObjectPairChan, fs.Config.Transfers),
|
||||
trackRenamesCh: make(chan fs.Object, fs.Config.Checkers),
|
||||
}
|
||||
s.ctx, s.cancel = context.WithCancel(context.Background())
|
||||
@@ -131,7 +131,12 @@ func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
|
||||
|
||||
// Check to see if the context has been cancelled
|
||||
func (s *syncCopyMove) aborting() bool {
|
||||
return s.ctx.Err() != nil
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return true
|
||||
default:
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// This reads the map and pumps it into the channel passed in, closing
|
||||
@@ -192,95 +197,119 @@ func (s *syncCopyMove) currentError() error {
|
||||
// pairChecker reads Objects~s on in send to out if they need transferring.
|
||||
//
|
||||
// FIXME potentially doing lots of hashes at once
|
||||
func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, wg *sync.WaitGroup) {
|
||||
func (s *syncCopyMove) pairChecker(in fs.ObjectPairChan, out fs.ObjectPairChan, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
pair, ok := in.Get(s.ctx)
|
||||
if !ok {
|
||||
if s.aborting() {
|
||||
return
|
||||
}
|
||||
src := pair.Src
|
||||
accounting.Stats.Checking(src.Remote())
|
||||
// Check to see if can store this
|
||||
if src.Storable() {
|
||||
if operations.NeedTransfer(pair.Dst, pair.Src) {
|
||||
// If files are treated as immutable, fail if destination exists and does not match
|
||||
if fs.Config.Immutable && pair.Dst != nil {
|
||||
fs.Errorf(pair.Dst, "Source and destination exist but do not match: immutable file modified")
|
||||
s.processError(fs.ErrorImmutableModified)
|
||||
} else {
|
||||
// If destination already exists, then we must move it into --backup-dir if required
|
||||
if pair.Dst != nil && s.backupDir != nil {
|
||||
remoteWithSuffix := pair.Dst.Remote() + s.suffix
|
||||
overwritten, _ := s.backupDir.NewObject(remoteWithSuffix)
|
||||
_, err := operations.Move(s.backupDir, overwritten, remoteWithSuffix, pair.Dst)
|
||||
if err != nil {
|
||||
s.processError(err)
|
||||
select {
|
||||
case pair, ok := <-in:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
src := pair.Src
|
||||
accounting.Stats.Checking(src.Remote())
|
||||
// Check to see if can store this
|
||||
if src.Storable() {
|
||||
if operations.NeedTransfer(pair.Dst, pair.Src) {
|
||||
// If files are treated as immutable, fail if destination exists and does not match
|
||||
if fs.Config.Immutable && pair.Dst != nil {
|
||||
fs.Errorf(pair.Dst, "Source and destination exist but do not match: immutable file modified")
|
||||
s.processError(fs.ErrorImmutableModified)
|
||||
} else {
|
||||
// If destination already exists, then we must move it into --backup-dir if required
|
||||
if pair.Dst != nil && s.backupDir != nil {
|
||||
remoteWithSuffix := pair.Dst.Remote() + s.suffix
|
||||
overwritten, _ := s.backupDir.NewObject(remoteWithSuffix)
|
||||
_, err := operations.Move(s.backupDir, overwritten, remoteWithSuffix, pair.Dst)
|
||||
if err != nil {
|
||||
s.processError(err)
|
||||
} else {
|
||||
// If successful zero out the dst as it is no longer there and copy the file
|
||||
pair.Dst = nil
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case out <- pair:
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If successful zero out the dst as it is no longer there and copy the file
|
||||
pair.Dst = nil
|
||||
ok = out.Put(s.ctx, pair)
|
||||
if !ok {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case out <- pair:
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ok = out.Put(s.ctx, pair)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If moving need to delete the files we don't need to copy
|
||||
if s.DoMove {
|
||||
// Delete src if no error on copy
|
||||
s.processError(operations.DeleteFile(src))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If moving need to delete the files we don't need to copy
|
||||
if s.DoMove {
|
||||
// Delete src if no error on copy
|
||||
s.processError(operations.DeleteFile(src))
|
||||
}
|
||||
}
|
||||
accounting.Stats.DoneChecking(src.Remote())
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
}
|
||||
accounting.Stats.DoneChecking(src.Remote())
|
||||
}
|
||||
}
|
||||
|
||||
// pairRenamer reads Objects~s on in and attempts to rename them,
|
||||
// otherwise it sends them out if they need transferring.
|
||||
func (s *syncCopyMove) pairRenamer(in *pipe, out *pipe, wg *sync.WaitGroup) {
|
||||
func (s *syncCopyMove) pairRenamer(in fs.ObjectPairChan, out fs.ObjectPairChan, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
pair, ok := in.Get(s.ctx)
|
||||
if !ok {
|
||||
if s.aborting() {
|
||||
return
|
||||
}
|
||||
src := pair.Src
|
||||
if !s.tryRename(src) {
|
||||
// pass on if not renamed
|
||||
ok = out.Put(s.ctx, pair)
|
||||
select {
|
||||
case pair, ok := <-in:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
src := pair.Src
|
||||
if !s.tryRename(src) {
|
||||
// pass on if not renamed
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case out <- pair:
|
||||
}
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// pairCopyOrMove reads Objects on in and moves or copies them.
|
||||
func (s *syncCopyMove) pairCopyOrMove(in *pipe, fdst fs.Fs, wg *sync.WaitGroup) {
|
||||
func (s *syncCopyMove) pairCopyOrMove(in fs.ObjectPairChan, fdst fs.Fs, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
var err error
|
||||
for {
|
||||
pair, ok := in.Get(s.ctx)
|
||||
if !ok {
|
||||
if s.aborting() {
|
||||
return
|
||||
}
|
||||
src := pair.Src
|
||||
accounting.Stats.Transferring(src.Remote())
|
||||
if s.DoMove {
|
||||
_, err = operations.Move(fdst, pair.Dst, src.Remote(), src)
|
||||
} else {
|
||||
_, err = operations.Copy(fdst, pair.Dst, src.Remote(), src)
|
||||
select {
|
||||
case pair, ok := <-in:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
src := pair.Src
|
||||
accounting.Stats.Transferring(src.Remote())
|
||||
if s.DoMove {
|
||||
_, err = operations.Move(fdst, pair.Dst, src.Remote(), src)
|
||||
} else {
|
||||
_, err = operations.Copy(fdst, pair.Dst, src.Remote(), src)
|
||||
}
|
||||
s.processError(err)
|
||||
accounting.Stats.DoneTransferring(src.Remote(), err == nil)
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
}
|
||||
s.processError(err)
|
||||
accounting.Stats.DoneTransferring(src.Remote(), err == nil)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -294,7 +323,7 @@ func (s *syncCopyMove) startCheckers() {
|
||||
|
||||
// This stops the background checkers
|
||||
func (s *syncCopyMove) stopCheckers() {
|
||||
s.toBeChecked.Close()
|
||||
close(s.toBeChecked)
|
||||
fs.Infof(s.fdst, "Waiting for checks to finish")
|
||||
s.checkerWg.Wait()
|
||||
}
|
||||
@@ -309,7 +338,7 @@ func (s *syncCopyMove) startTransfers() {
|
||||
|
||||
// This stops the background transfers
|
||||
func (s *syncCopyMove) stopTransfers() {
|
||||
s.toBeUploaded.Close()
|
||||
close(s.toBeUploaded)
|
||||
fs.Infof(s.fdst, "Waiting for transfers to finish")
|
||||
s.transfersWg.Wait()
|
||||
}
|
||||
@@ -330,7 +359,7 @@ func (s *syncCopyMove) stopRenamers() {
|
||||
if !s.trackRenames {
|
||||
return
|
||||
}
|
||||
s.toBeRenamed.Close()
|
||||
close(s.toBeRenamed)
|
||||
fs.Infof(s.fdst, "Waiting for renames to finish")
|
||||
s.renamerWg.Wait()
|
||||
}
|
||||
@@ -656,9 +685,10 @@ func (s *syncCopyMove) run() error {
|
||||
s.makeRenameMap()
|
||||
// Attempt renames for all the files which don't have a matching dst
|
||||
for _, src := range s.renameCheck {
|
||||
ok := s.toBeRenamed.Put(s.ctx, fs.ObjectPair{Src: src, Dst: nil})
|
||||
if !ok {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
break
|
||||
case s.toBeRenamed <- fs.ObjectPair{Src: src, Dst: nil}:
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -762,9 +792,10 @@ func (s *syncCopyMove) SrcOnly(src fs.DirEntry) (recurse bool) {
|
||||
}
|
||||
} else {
|
||||
// No need to check since doesn't exist
|
||||
ok := s.toBeUploaded.Put(s.ctx, fs.ObjectPair{Src: x, Dst: nil})
|
||||
if !ok {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case s.toBeUploaded <- fs.ObjectPair{Src: x, Dst: nil}:
|
||||
}
|
||||
}
|
||||
case fs.Directory:
|
||||
@@ -794,9 +825,10 @@ func (s *syncCopyMove) Match(dst, src fs.DirEntry) (recurse bool) {
|
||||
}
|
||||
dstX, ok := dst.(fs.Object)
|
||||
if ok {
|
||||
ok = s.toBeChecked.Put(s.ctx, fs.ObjectPair{Src: srcX, Dst: dstX})
|
||||
if !ok {
|
||||
return false
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case s.toBeChecked <- fs.ObjectPair{Src: srcX, Dst: dstX}:
|
||||
}
|
||||
} else {
|
||||
// FIXME src is file, dst is directory
|
||||
|
||||
@@ -382,14 +382,13 @@ func Run(t *testing.T, opt *Opt) {
|
||||
t.Run("TestFsPutError", func(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
|
||||
const N = 5 * 1024
|
||||
// Read N bytes then produce an error
|
||||
contents := fstest.RandomString(N)
|
||||
// Read 50 bytes then produce an error
|
||||
contents := fstest.RandomString(50)
|
||||
buf := bytes.NewBufferString(contents)
|
||||
er := &errorReader{errors.New("potato")}
|
||||
in := io.MultiReader(buf, er)
|
||||
|
||||
obji := object.NewStaticObjectInfo(file2.Path, file2.ModTime, 2*N, true, nil, nil)
|
||||
obji := object.NewStaticObjectInfo(file2.Path, file2.ModTime, 100, true, nil, nil)
|
||||
_, err := remote.Put(in, obji)
|
||||
// assert.Nil(t, obj) - FIXME some remotes return the object even on nil
|
||||
assert.NotNil(t, err)
|
||||
|
||||
@@ -45,7 +45,7 @@ var (
|
||||
{
|
||||
Name: "TestCryptDrive:",
|
||||
SubDir: false,
|
||||
FastList: true,
|
||||
FastList: false,
|
||||
},
|
||||
{
|
||||
Name: "TestCryptSwift:",
|
||||
@@ -55,7 +55,7 @@ var (
|
||||
{
|
||||
Name: "TestDrive:",
|
||||
SubDir: false,
|
||||
FastList: true,
|
||||
FastList: false,
|
||||
},
|
||||
{
|
||||
Name: "TestDropbox:",
|
||||
@@ -72,11 +72,6 @@ var (
|
||||
SubDir: false,
|
||||
FastList: false,
|
||||
},
|
||||
{
|
||||
Name: "TestJottacloud:",
|
||||
SubDir: false,
|
||||
FastList: false,
|
||||
},
|
||||
{
|
||||
Name: "TestOneDrive:",
|
||||
SubDir: false,
|
||||
@@ -177,7 +172,7 @@ func newTest(pkg, remote string, subdir bool, fastlist bool) *test {
|
||||
pkg: pkg,
|
||||
remote: remote,
|
||||
subdir: subdir,
|
||||
cmdLine: []string{binary, "-test.timeout", timeout.String(), "-remote", remote},
|
||||
cmdLine: []string{binary, "-test.timeout", (*timeout).String(), "-remote", remote},
|
||||
try: 1,
|
||||
}
|
||||
if *fstest.Verbose {
|
||||
@@ -231,7 +226,7 @@ func (t *test) findFailures() {
|
||||
|
||||
// nextCmdLine returns the next command line
|
||||
func (t *test) nextCmdLine() []string {
|
||||
cmdLine := t.cmdLine
|
||||
cmdLine := t.cmdLine[:]
|
||||
if t.runFlag != "" {
|
||||
cmdLine = append(cmdLine, "-test.run", t.runFlag)
|
||||
}
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
// +build race
|
||||
|
||||
// Package israce reports if the Go race detector is enabled.
|
||||
//
|
||||
// From https://stackoverflow.com/questions/44944959/how-can-i-check-if-the-race-detector-is-enabled-at-runtime
|
||||
package israce
|
||||
|
||||
// Enabled reports if the race detector is enabled.
|
||||
const Enabled = true
|
||||
@@ -1,9 +0,0 @@
|
||||
// +build !race
|
||||
|
||||
// Package israce reports if the Go race detector is enabled.
|
||||
//
|
||||
// From https://stackoverflow.com/questions/44944959/how-can-i-check-if-the-race-detector-is-enabled-at-runtime
|
||||
package israce
|
||||
|
||||
// Enabled reports if the race detector is enabled.
|
||||
const Enabled = false
|
||||
1
vendor/github.com/okzk/sdnotify/notify.go
generated
vendored
1
vendor/github.com/okzk/sdnotify/notify.go
generated
vendored
@@ -2,7 +2,6 @@
|
||||
|
||||
package sdnotify
|
||||
|
||||
// SdNotify sends a specified string to the systemd notification socket.
|
||||
func SdNotify(state string) error {
|
||||
// do nothing
|
||||
return nil
|
||||
|
||||
3
vendor/github.com/okzk/sdnotify/notify_linux.go
generated
vendored
3
vendor/github.com/okzk/sdnotify/notify_linux.go
generated
vendored
@@ -5,11 +5,10 @@ import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// SdNotify sends a specified string to the systemd notification socket.
|
||||
func SdNotify(state string) error {
|
||||
name := os.Getenv("NOTIFY_SOCKET")
|
||||
if name == "" {
|
||||
return ErrSdNotifyNoSocket
|
||||
return SdNotifyNoSocket
|
||||
}
|
||||
|
||||
conn, err := net.DialUnix("unixgram", nil, &net.UnixAddr{Name: name, Net: "unixgram"})
|
||||
|
||||
20
vendor/github.com/okzk/sdnotify/sample/main.go
generated
vendored
20
vendor/github.com/okzk/sdnotify/sample/main.go
generated
vendored
@@ -1,25 +1,24 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/okzk/sdnotify"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/okzk/sdnotify"
|
||||
)
|
||||
|
||||
func reload() {
|
||||
// Tells the service manager that the service is reloading its configuration.
|
||||
sdnotify.Reloading()
|
||||
sdnotify.SdNotifyReloading()
|
||||
|
||||
log.Println("reloading...")
|
||||
time.Sleep(time.Second)
|
||||
log.Println("reloaded.")
|
||||
|
||||
// The service must also send a "READY" notification when it completed reloading its configuration.
|
||||
sdnotify.Ready()
|
||||
sdnotify.SdNotifyReady()
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -28,16 +27,7 @@ func main() {
|
||||
log.Println("started.")
|
||||
|
||||
// Tells the service manager that service startup is finished.
|
||||
sdnotify.Ready()
|
||||
|
||||
go func() {
|
||||
tick := time.Tick(30 * time.Second)
|
||||
for {
|
||||
<-tick
|
||||
log.Println("watchdog reporting")
|
||||
sdnotify.Watchdog()
|
||||
}
|
||||
}()
|
||||
sdnotify.SdNotifyReady()
|
||||
|
||||
sigCh := make(chan os.Signal, 1)
|
||||
signal.Notify(sigCh, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
|
||||
@@ -50,7 +40,7 @@ func main() {
|
||||
}
|
||||
|
||||
// Tells the service manager that the service is beginning its shutdown.
|
||||
sdnotify.Stopping()
|
||||
sdnotify.SdNotifyStopping()
|
||||
|
||||
log.Println("existing...")
|
||||
time.Sleep(time.Second)
|
||||
|
||||
2
vendor/github.com/okzk/sdnotify/sample/sample.service
generated
vendored
2
vendor/github.com/okzk/sdnotify/sample/sample.service
generated
vendored
@@ -6,8 +6,6 @@ Type=notify
|
||||
ExecStart=/path/to/sample
|
||||
ExecStop=/bin/kill -SIGTERM $MAINPID
|
||||
ExecReload=/bin/kill -SIGHUP $MAINPID
|
||||
WatchdogSec=60s
|
||||
Restart=on-failure
|
||||
|
||||
[Install]
|
||||
WantedBy = multi-user.target
|
||||
|
||||
30
vendor/github.com/okzk/sdnotify/util.go
generated
vendored
30
vendor/github.com/okzk/sdnotify/util.go
generated
vendored
@@ -1,39 +1,21 @@
|
||||
package sdnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
import "errors"
|
||||
|
||||
// ErrSdNotifyNoSocket is the error returned when the NOTIFY_SOCKET does not exist.
|
||||
var ErrSdNotifyNoSocket = errors.New("No socket")
|
||||
var SdNotifyNoSocket = errors.New("No socket")
|
||||
|
||||
// Ready sends READY=1 to the systemd notify socket.
|
||||
func Ready() error {
|
||||
func SdNotifyReady() error {
|
||||
return SdNotify("READY=1")
|
||||
}
|
||||
|
||||
// Stopping sends STOPPING=1 to the systemd notify socket.
|
||||
func Stopping() error {
|
||||
func SdNotifyStopping() error {
|
||||
return SdNotify("STOPPING=1")
|
||||
}
|
||||
|
||||
// Reloading sends RELOADING=1 to the systemd notify socket.
|
||||
func Reloading() error {
|
||||
func SdNotifyReloading() error {
|
||||
return SdNotify("RELOADING=1")
|
||||
}
|
||||
|
||||
// Errno sends ERRNO=? to the systemd notify socket.
|
||||
func Errno(errno int) error {
|
||||
return SdNotify(fmt.Sprintf("ERRNO=%d", errno))
|
||||
}
|
||||
|
||||
// Status sends STATUS=? to the systemd notify socket.
|
||||
func Status(status string) error {
|
||||
func SdNotifyStatus(status string) error {
|
||||
return SdNotify("STATUS=" + status)
|
||||
}
|
||||
|
||||
// Watchdog sends WATCHDOG=1 to the systemd notify socket.
|
||||
func Watchdog() error {
|
||||
return SdNotify("WATCHDOG=1")
|
||||
}
|
||||
|
||||
2
vendor/github.com/t3rm1n4l/go-mega/mega.go
generated
vendored
2
vendor/github.com/t3rm1n4l/go-mega/mega.go
generated
vendored
@@ -464,8 +464,6 @@ func (m *Mega) Login(email string, passwd string) error {
|
||||
var err error
|
||||
var result []byte
|
||||
|
||||
email = strings.ToLower(email) // mega uses lowercased emails for login purposes
|
||||
|
||||
passkey := password_key(passwd)
|
||||
uhandle := stringhash(email, passkey)
|
||||
m.uh = make([]byte, len(uhandle))
|
||||
|
||||
28
vendor/github.com/xanzy/ssh-agent/pageant_windows.go
generated
vendored
28
vendor/github.com/xanzy/ssh-agent/pageant_windows.go
generated
vendored
@@ -29,8 +29,8 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
. "syscall"
|
||||
. "unsafe"
|
||||
)
|
||||
|
||||
// Maximum size of message can be sent to pageant
|
||||
@@ -53,7 +53,7 @@ const (
|
||||
type copyData struct {
|
||||
dwData uintptr
|
||||
cbData uint32
|
||||
lpData unsafe.Pointer
|
||||
lpData Pointer
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -65,7 +65,7 @@ var (
|
||||
)
|
||||
|
||||
func winAPI(dllName, funcName string) func(...uintptr) (uintptr, uintptr, error) {
|
||||
proc := syscall.MustLoadDLL(dllName).MustFindProc(funcName)
|
||||
proc := MustLoadDLL(dllName).MustFindProc(funcName)
|
||||
return func(a ...uintptr) (uintptr, uintptr, error) { return proc.Call(a...) }
|
||||
}
|
||||
|
||||
@@ -96,21 +96,21 @@ func query(msg []byte) ([]byte, error) {
|
||||
|
||||
thID, _, _ := winGetCurrentThreadID()
|
||||
mapName := fmt.Sprintf("PageantRequest%08x", thID)
|
||||
pMapName, _ := syscall.UTF16PtrFromString(mapName)
|
||||
pMapName, _ := UTF16PtrFromString(mapName)
|
||||
|
||||
mmap, err := syscall.CreateFileMapping(syscall.InvalidHandle, nil, syscall.PAGE_READWRITE, 0, MaxMessageLen+4, pMapName)
|
||||
mmap, err := CreateFileMapping(InvalidHandle, nil, PAGE_READWRITE, 0, MaxMessageLen+4, pMapName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer syscall.CloseHandle(mmap)
|
||||
defer CloseHandle(mmap)
|
||||
|
||||
ptr, err := syscall.MapViewOfFile(mmap, syscall.FILE_MAP_WRITE, 0, 0, 0)
|
||||
ptr, err := MapViewOfFile(mmap, FILE_MAP_WRITE, 0, 0, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer syscall.UnmapViewOfFile(ptr)
|
||||
defer UnmapViewOfFile(ptr)
|
||||
|
||||
mmSlice := (*(*[MaxMessageLen]byte)(unsafe.Pointer(ptr)))[:]
|
||||
mmSlice := (*(*[MaxMessageLen]byte)(Pointer(ptr)))[:]
|
||||
|
||||
copy(mmSlice, msg)
|
||||
|
||||
@@ -119,10 +119,10 @@ func query(msg []byte) ([]byte, error) {
|
||||
cds := copyData{
|
||||
dwData: agentCopydataID,
|
||||
cbData: uint32(len(mapNameBytesZ)),
|
||||
lpData: unsafe.Pointer(&(mapNameBytesZ[0])),
|
||||
lpData: Pointer(&(mapNameBytesZ[0])),
|
||||
}
|
||||
|
||||
resp, _, _ := winSendMessage(paWin, wmCopydata, 0, uintptr(unsafe.Pointer(&cds)))
|
||||
resp, _, _ := winSendMessage(paWin, wmCopydata, 0, uintptr(Pointer(&cds)))
|
||||
|
||||
if resp == 0 {
|
||||
return nil, ErrSendMessage
|
||||
@@ -140,7 +140,7 @@ func query(msg []byte) ([]byte, error) {
|
||||
}
|
||||
|
||||
func pageantWindow() uintptr {
|
||||
nameP, _ := syscall.UTF16PtrFromString("Pageant")
|
||||
h, _, _ := winFindWindow(uintptr(unsafe.Pointer(nameP)), uintptr(unsafe.Pointer(nameP)))
|
||||
nameP, _ := UTF16PtrFromString("Pageant")
|
||||
h, _, _ := winFindWindow(uintptr(Pointer(nameP)), uintptr(Pointer(nameP)))
|
||||
return h
|
||||
}
|
||||
|
||||
64
vfs/dir.go
64
vfs/dir.go
@@ -10,7 +10,6 @@ import (
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/list"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -188,25 +187,6 @@ func (d *Dir) _readDir() error {
|
||||
return err
|
||||
}
|
||||
|
||||
err = d._readDirFromEntries(entries, nil, time.Time{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.read = when
|
||||
return nil
|
||||
}
|
||||
|
||||
// update d.items for each dir in the DirTree below this one and
|
||||
// set the last read time - must be called with the lock held
|
||||
func (d *Dir) _readDirFromDirTree(dirTree walk.DirTree, when time.Time) error {
|
||||
return d._readDirFromEntries(dirTree[d.path], dirTree, when)
|
||||
}
|
||||
|
||||
// update d.items and if dirTree is not nil update each dir in the DirTree below this one and
|
||||
// set the last read time - must be called with the lock held
|
||||
func (d *Dir) _readDirFromEntries(entries fs.DirEntries, dirTree walk.DirTree, when time.Time) error {
|
||||
var err error
|
||||
// Cache the items by name
|
||||
found := make(map[string]struct{})
|
||||
for _, entry := range entries {
|
||||
@@ -226,23 +206,10 @@ func (d *Dir) _readDirFromEntries(entries fs.DirEntries, dirTree walk.DirTree, w
|
||||
node = newFile(d, obj, name)
|
||||
}
|
||||
case fs.Directory:
|
||||
dir := item
|
||||
// Reuse old dir value if it exists
|
||||
if node == nil || !node.IsDir() {
|
||||
node = newDir(d.vfs, d.f, d, item)
|
||||
}
|
||||
if dirTree != nil {
|
||||
dir := node.(*Dir)
|
||||
dir.mu.Lock()
|
||||
err = dir._readDirFromDirTree(dirTree, when)
|
||||
if err != nil {
|
||||
dir.read = time.Time{}
|
||||
} else {
|
||||
dir.read = when
|
||||
}
|
||||
dir.mu.Unlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
node = newDir(d.vfs, d.f, d, dir)
|
||||
}
|
||||
default:
|
||||
err = errors.Errorf("unknown type %T", item)
|
||||
@@ -257,37 +224,10 @@ func (d *Dir) _readDirFromEntries(entries fs.DirEntries, dirTree walk.DirTree, w
|
||||
delete(d.items, name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readDirTree forces a refresh of the complete directory tree
|
||||
func (d *Dir) readDirTree() error {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
when := time.Now()
|
||||
d.read = time.Time{}
|
||||
fs.Debugf(d.path, "Reading directory tree")
|
||||
dt, err := walk.NewDirTree(d.f, d.path, false, -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = d._readDirFromDirTree(dt, when)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Debugf(d.path, "Reading directory tree done in %s", time.Since(when))
|
||||
d.read = when
|
||||
return nil
|
||||
}
|
||||
|
||||
// readDir forces a refresh of the directory
|
||||
func (d *Dir) readDir() error {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
d.read = time.Time{}
|
||||
return d._readDir()
|
||||
}
|
||||
|
||||
// stat a single item in the directory
|
||||
//
|
||||
// returns ENOENT if not found.
|
||||
|
||||
17
vfs/help.go
17
vfs/help.go
@@ -27,23 +27,6 @@ Or individual files or directories:
|
||||
|
||||
rclone rc vfs/forget file=path/to/file dir=path/to/dir
|
||||
|
||||
### File Buffering
|
||||
|
||||
The ` + "`--buffer-size`" + ` flag determines the amount of memory,
|
||||
that will be used to buffer data in advance.
|
||||
|
||||
Each open file descriptor will try to keep the specified amount of
|
||||
data in memory at all times. The buffered data is bound to one file
|
||||
descriptor and won't be shared between multiple open file descriptors
|
||||
of the same file.
|
||||
|
||||
This flag is a upper limit for the used memory per file descriptor.
|
||||
The buffer will only use memory for data that is downloaded but not
|
||||
not yet read. If the buffer is empty, only a small amount of memory
|
||||
will be used.
|
||||
The maximum memory used by rclone for buffering can be up to
|
||||
` + "`--buffer-size * open files`" + `.
|
||||
|
||||
### File Caching
|
||||
|
||||
**NB** File caching is **EXPERIMENTAL** - use with care!
|
||||
|
||||
107
vfs/rc.go
107
vfs/rc.go
@@ -1,7 +1,6 @@
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
@@ -60,112 +59,6 @@ starting with dir will forget that dir, eg
|
||||
|
||||
rclone rc vfs/forget file=hello file2=goodbye dir=home/junk
|
||||
|
||||
`,
|
||||
})
|
||||
rc.Add(rc.Call{
|
||||
Path: "vfs/refresh",
|
||||
Fn: func(in rc.Params) (out rc.Params, err error) {
|
||||
root, err := vfs.Root()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
getDir := func(path string) (*Dir, error) {
|
||||
path = strings.Trim(path, "/")
|
||||
segments := strings.Split(path, "/")
|
||||
var node Node = root
|
||||
for _, s := range segments {
|
||||
if dir, ok := node.(*Dir); ok {
|
||||
node, err = dir.stat(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
if dir, ok := node.(*Dir); ok {
|
||||
return dir, nil
|
||||
}
|
||||
return nil, EINVAL
|
||||
}
|
||||
|
||||
recursive := false
|
||||
{
|
||||
const k = "recursive"
|
||||
|
||||
if v, ok := in[k]; ok {
|
||||
s, ok := v.(string)
|
||||
if !ok {
|
||||
return out, errors.Errorf("value must be string %q=%v", k, v)
|
||||
}
|
||||
recursive, err = strconv.ParseBool(s)
|
||||
if err != nil {
|
||||
return out, errors.Errorf("invalid value %q=%v", k, v)
|
||||
}
|
||||
delete(in, k)
|
||||
}
|
||||
}
|
||||
|
||||
result := map[string]string{}
|
||||
if len(in) == 0 {
|
||||
if recursive {
|
||||
err = root.readDirTree()
|
||||
} else {
|
||||
err = root.readDir()
|
||||
}
|
||||
if err != nil {
|
||||
result[""] = err.Error()
|
||||
} else {
|
||||
result[""] = "OK"
|
||||
}
|
||||
} else {
|
||||
for k, v := range in {
|
||||
path, ok := v.(string)
|
||||
if !ok {
|
||||
return out, errors.Errorf("value must be string %q=%v", k, v)
|
||||
}
|
||||
if strings.HasPrefix(k, "dir") {
|
||||
dir, err := getDir(path)
|
||||
if err != nil {
|
||||
result[path] = err.Error()
|
||||
} else {
|
||||
if recursive {
|
||||
err = dir.readDirTree()
|
||||
} else {
|
||||
err = dir.readDir()
|
||||
}
|
||||
if err != nil {
|
||||
result[path] = err.Error()
|
||||
} else {
|
||||
result[path] = "OK"
|
||||
}
|
||||
|
||||
}
|
||||
} else {
|
||||
return out, errors.Errorf("unknown key %q", k)
|
||||
}
|
||||
}
|
||||
}
|
||||
out = rc.Params{
|
||||
"result": result,
|
||||
}
|
||||
return out, nil
|
||||
},
|
||||
Title: "Refresh the directory cache.",
|
||||
Help: `
|
||||
This reads the directories for the specified paths and freshens the
|
||||
directory cache.
|
||||
|
||||
If no paths are passed in then it will refresh the root directory.
|
||||
|
||||
rclone rc vfs/refresh
|
||||
|
||||
Otherwise pass directories in as dir=path. Any parameter key
|
||||
starting with dir will refresh that directory, eg
|
||||
|
||||
rclone rc vfs/refresh dir=home/junk dir2=data/misc
|
||||
|
||||
If the parameter recursive=true is given the whole directory tree
|
||||
will get refreshed. This refresh will use --fast-list if enabled.
|
||||
|
||||
`,
|
||||
})
|
||||
}
|
||||
|
||||
10
vfs/read.go
10
vfs/read.go
@@ -104,16 +104,8 @@ func (fh *ReadFileHandle) seek(offset int64, reopen bool) (err error) {
|
||||
if fh.noSeek {
|
||||
return ESPIPE
|
||||
}
|
||||
fh.hash = nil
|
||||
if !reopen {
|
||||
ar := fh.r.GetAsyncReader()
|
||||
// try to fullfill the seek with buffer discard
|
||||
if ar != nil && ar.SkipBytes(int(offset-fh.offset)) {
|
||||
fh.offset = offset
|
||||
return nil
|
||||
}
|
||||
}
|
||||
fh.r.StopBuffering() // stop the background reading first
|
||||
fh.hash = nil
|
||||
oldReader := fh.r.GetReader()
|
||||
r, ok := oldReader.(*chunkedreader.ChunkedReader)
|
||||
if !ok {
|
||||
|
||||
@@ -261,9 +261,9 @@ func (fh *RWFileHandle) close() (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
isCopied := false
|
||||
copy := false
|
||||
if writer {
|
||||
isCopied = fh.file.delWriter(fh, fh.modified())
|
||||
copy = fh.file.delWriter(fh, fh.modified())
|
||||
defer fh.file.finishWriterClose()
|
||||
}
|
||||
|
||||
@@ -293,7 +293,7 @@ func (fh *RWFileHandle) close() (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
if isCopied {
|
||||
if copy {
|
||||
// Transfer the temp file to the remote
|
||||
cacheObj, err := fh.d.vfs.cache.f.NewObject(fh.remote)
|
||||
if err != nil {
|
||||
|
||||
@@ -576,7 +576,7 @@ func TestRWFileModTimeWithOpenWriters(t *testing.T) {
|
||||
defer r.Finalise()
|
||||
vfs, fh := rwHandleCreateWriteOnly(t, r)
|
||||
|
||||
mtime := time.Date(2012, time.November, 18, 17, 32, 31, 0, time.UTC)
|
||||
mtime := time.Date(2012, 11, 18, 17, 32, 31, 0, time.UTC)
|
||||
|
||||
_, err := fh.Write([]byte{104, 105})
|
||||
require.NoError(t, err)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user