mirror of
https://github.com/rclone/rclone.git
synced 2026-01-21 20:03:22 +00:00
Compare commits
1 Commits
fix-5600-b
...
fix-b2-acl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2e99232a04 |
55
.github/workflows/build.yml
vendored
55
.github/workflows/build.yml
vendored
@@ -25,12 +25,12 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.16', 'go1.17']
|
||||
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.15', 'go1.16']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.18.x'
|
||||
go: '1.17.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -40,8 +40,8 @@ jobs:
|
||||
deploy: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-11
|
||||
go: '1.18.x'
|
||||
os: macOS-latest
|
||||
go: '1.17.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -49,15 +49,15 @@ jobs:
|
||||
deploy: true
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-11
|
||||
go: '1.18.x'
|
||||
os: macOS-latest
|
||||
go: '1.17.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows_amd64
|
||||
os: windows-latest
|
||||
go: '1.18.x'
|
||||
go: '1.17.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^windows/amd64" -cgo'
|
||||
build_args: '-buildmode exe'
|
||||
@@ -67,7 +67,7 @@ jobs:
|
||||
|
||||
- job_name: windows_386
|
||||
os: windows-latest
|
||||
go: '1.18.x'
|
||||
go: '1.17.x'
|
||||
gotags: cmount
|
||||
goarch: '386'
|
||||
cgo: '1'
|
||||
@@ -78,23 +78,23 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.18.x'
|
||||
build_flags: '-exclude "^(windows/(386|amd64)|darwin/|linux/)"'
|
||||
go: '1.17.x'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.15
|
||||
os: ubuntu-latest
|
||||
go: '1.15.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.16
|
||||
os: ubuntu-latest
|
||||
go: '1.16.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.17
|
||||
os: ubuntu-latest
|
||||
go: '1.17.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
@@ -110,7 +110,6 @@ jobs:
|
||||
with:
|
||||
stable: 'false'
|
||||
go-version: ${{ matrix.go }}
|
||||
check-latest: true
|
||||
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
@@ -135,7 +134,7 @@ jobs:
|
||||
run: |
|
||||
brew update
|
||||
brew install --cask macfuse
|
||||
if: matrix.os == 'macos-11'
|
||||
if: matrix.os == 'macOS-latest'
|
||||
|
||||
- name: Install Libraries on Windows
|
||||
shell: powershell
|
||||
@@ -246,14 +245,14 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go
|
||||
- name: Set up Go 1.16
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
go-version: 1.16
|
||||
|
||||
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
|
||||
- name: Force NDK version
|
||||
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;23.1.7779620" | grep -v = || true
|
||||
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;22.1.7171670" | grep -v = || true
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v2
|
||||
@@ -274,8 +273,8 @@ jobs:
|
||||
|
||||
- name: install gomobile
|
||||
run: |
|
||||
go install golang.org/x/mobile/cmd/gobind@latest
|
||||
go install golang.org/x/mobile/cmd/gomobile@latest
|
||||
go get golang.org/x/mobile/cmd/gobind
|
||||
go get golang.org/x/mobile/cmd/gomobile
|
||||
env PATH=$PATH:~/go/bin gomobile init
|
||||
|
||||
- name: arm-v7a gomobile build
|
||||
@@ -284,7 +283,7 @@ jobs:
|
||||
- name: arm-v7a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||
@@ -297,7 +296,7 @@ jobs:
|
||||
- name: arm64-v8a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||
@@ -310,7 +309,7 @@ jobs:
|
||||
- name: x86 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||
@@ -323,7 +322,7 @@ jobs:
|
||||
- name: x64 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||
|
||||
@@ -15,7 +15,7 @@ Current active maintainers of rclone are:
|
||||
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
||||
| Max Sum | @Max-Sum | union backend |
|
||||
| Fred | @creativeprojects | seafile backend |
|
||||
| Caleb Case | @calebcase | storj backend |
|
||||
| Caleb Case | @calebcase | tardigrade backend |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
5153
MANUAL.html
generated
5153
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
7540
MANUAL.txt
generated
7540
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,4 @@
|
||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
||||
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/)
|
||||
|
||||
[Website](https://rclone.org) |
|
||||
[Documentation](https://rclone.org/docs/) |
|
||||
@@ -21,17 +20,14 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
## Storage providers
|
||||
|
||||
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
||||
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
* Box [:page_facing_up:](https://rclone.org/box/)
|
||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
||||
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
@@ -69,8 +65,8 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
|
||||
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/box"
|
||||
_ "github.com/rclone/rclone/backend/cache"
|
||||
_ "github.com/rclone/rclone/backend/chunker"
|
||||
_ "github.com/rclone/rclone/backend/combine"
|
||||
_ "github.com/rclone/rclone/backend/compress"
|
||||
_ "github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
@@ -29,7 +28,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/mailru"
|
||||
_ "github.com/rclone/rclone/backend/mega"
|
||||
_ "github.com/rclone/rclone/backend/memory"
|
||||
_ "github.com/rclone/rclone/backend/netstorage"
|
||||
_ "github.com/rclone/rclone/backend/onedrive"
|
||||
_ "github.com/rclone/rclone/backend/opendrive"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
@@ -41,9 +39,9 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
_ "github.com/rclone/rclone/backend/sharefile"
|
||||
_ "github.com/rclone/rclone/backend/sia"
|
||||
_ "github.com/rclone/rclone/backend/storj"
|
||||
_ "github.com/rclone/rclone/backend/sugarsync"
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
_ "github.com/rclone/rclone/backend/tardigrade"
|
||||
_ "github.com/rclone/rclone/backend/union"
|
||||
_ "github.com/rclone/rclone/backend/uptobox"
|
||||
_ "github.com/rclone/rclone/backend/webdav"
|
||||
|
||||
@@ -50,6 +50,8 @@ const (
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
storageDefaultBaseURL = "blob.core.windows.net"
|
||||
defaultChunkSize = 4 * fs.Mebi
|
||||
maxChunkSize = 100 * fs.Mebi
|
||||
uploadConcurrency = 4
|
||||
defaultAccessTier = azblob.AccessTierNone
|
||||
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
|
||||
// Default storage account, key and blob endpoint for emulator support,
|
||||
@@ -132,33 +134,12 @@ msi_client_id, or msi_mi_res_id parameters.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Upload chunk size.
|
||||
Help: `Upload chunk size (<= 100 MiB).
|
||||
|
||||
Note that this is stored in memory and there may be up to
|
||||
"--transfers" * "--azureblob-upload-concurrency" chunks stored at once
|
||||
in memory.`,
|
||||
"--transfers" chunks stored at once in memory.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
If you are uploading small numbers of large files over high-speed
|
||||
links and these uploads do not fully utilize your bandwidth, then
|
||||
increasing this may help to speed up the transfers.
|
||||
|
||||
In tests, upload speed increases almost linearly with upload
|
||||
concurrency. For example to fill a gigabit pipe it may be necessary to
|
||||
raise this to 64. Note that this will use more memory.
|
||||
|
||||
Note that chunks are stored in memory and there may be up to
|
||||
"--transfers" * "--azureblob-upload-concurrency" chunks stored at once
|
||||
in memory.`,
|
||||
Default: 16,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "list_chunk",
|
||||
Help: `Size of blob list.
|
||||
@@ -276,7 +257,6 @@ type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
SASURL string `config:"sas_url"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
ListChunkSize uint `config:"list_chunk"`
|
||||
AccessTier string `config:"access_tier"`
|
||||
ArchiveTierDelete bool `config:"archive_tier_delete"`
|
||||
@@ -436,6 +416,9 @@ func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if cs < minChunkSize {
|
||||
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
if cs > maxChunkSize {
|
||||
return fmt.Errorf("%s is greater than %s", cs, maxChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -612,7 +595,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
case opt.UseMSI:
|
||||
var token adal.Token
|
||||
var userMSI = &userMSI{}
|
||||
var userMSI *userMSI = &userMSI{}
|
||||
if len(opt.MSIClientID) > 0 || len(opt.MSIObjectID) > 0 || len(opt.MSIResourceID) > 0 {
|
||||
// Specifying a user-assigned identity. Exactly one of the above IDs must be specified.
|
||||
// Validate and ensure exactly one is set. (To do: better validation.)
|
||||
@@ -1461,10 +1444,6 @@ func (o *Object) clearMetaData() {
|
||||
// o.size
|
||||
// o.md5
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
container, _ := o.split()
|
||||
if !o.fs.containerOK(container) {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
if !o.modTime.IsZero() {
|
||||
return nil
|
||||
}
|
||||
@@ -1657,10 +1636,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return errCantUpdateArchiveTierBlobs
|
||||
}
|
||||
}
|
||||
container, containerPath := o.split()
|
||||
if container == "" || containerPath == "" {
|
||||
return fmt.Errorf("can't upload to root - need a container")
|
||||
}
|
||||
container, _ := o.split()
|
||||
err = o.fs.makeContainer(ctx, container)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1691,10 +1667,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
|
||||
BufferSize: int(o.fs.opt.ChunkSize),
|
||||
MaxBuffers: o.fs.opt.UploadConcurrency,
|
||||
MaxBuffers: uploadConcurrency,
|
||||
Metadata: o.meta,
|
||||
BlobHTTPHeaders: httpHeaders,
|
||||
TransferManager: o.fs.newPoolWrapper(o.fs.opt.UploadConcurrency),
|
||||
TransferManager: o.fs.newPoolWrapper(uploadConcurrency),
|
||||
}
|
||||
|
||||
// Don't retry, return a retry error instead
|
||||
|
||||
@@ -17,10 +17,12 @@ import (
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{},
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MaxChunkSize: maxChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -160,15 +160,7 @@ free egress for data downloaded through the Cloudflare network.
|
||||
Rclone works with private buckets by sending an "Authorization" header.
|
||||
If the custom endpoint rewrites the requests for authentication,
|
||||
e.g., in Cloudflare Workers, this header needs to be handled properly.
|
||||
Leave blank if you want to use the endpoint provided by Backblaze.
|
||||
|
||||
The URL provided here SHOULD have the protocol and SHOULD NOT have
|
||||
a trailing slash or specify the /file/bucket subpath as rclone will
|
||||
request files with "{download_url}/file/{bucket_name}/{path}".
|
||||
|
||||
Example:
|
||||
> https://mysubdomain.mydomain.tld
|
||||
(No trailing "/", "file" or "bucket")`,
|
||||
Leave blank if you want to use the endpoint provided by Backblaze.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "download_auth_duration",
|
||||
|
||||
8
backend/cache/cache.go
vendored
8
backend/cache/cache.go
vendored
@@ -394,11 +394,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||
notifiedRemotes: make(map[string]bool),
|
||||
}
|
||||
cache.PinUntilFinalized(f.Fs, f)
|
||||
rps := rate.Inf
|
||||
if opt.Rps > 0 {
|
||||
rps = rate.Limit(float64(opt.Rps))
|
||||
}
|
||||
f.rateLimiter = rate.NewLimiter(rps, opt.TotalWorkers)
|
||||
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
|
||||
|
||||
f.plexConnector = &plexConnector{}
|
||||
if opt.PlexURL != "" {
|
||||
@@ -493,7 +489,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
|
||||
f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create temp fs: %w", err)
|
||||
return nil, fmt.Errorf("failed to create temp fs: %v: %w", err, err)
|
||||
}
|
||||
fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime)
|
||||
fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath)
|
||||
|
||||
@@ -1,877 +0,0 @@
|
||||
// Package combine implents a backend to combine multipe remotes in a directory tree
|
||||
package combine
|
||||
|
||||
/*
|
||||
Have API to add/remove branches in the combine
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "combine",
|
||||
Description: "Combine several remotes into one",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "upstreams",
|
||||
Help: `Upstreams for combining
|
||||
|
||||
These should be in the form
|
||||
|
||||
dir=remote:path dir2=remote2:path
|
||||
|
||||
Where before the = is specified the root directory and after is the remote to
|
||||
put there.
|
||||
|
||||
Embedded spaces can be added using quotes
|
||||
|
||||
"dir=remote:path with space" "dir2=remote2:path with space"
|
||||
|
||||
`,
|
||||
Required: true,
|
||||
Default: fs.SpaceSepList(nil),
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Upstreams fs.SpaceSepList `config:"upstreams"`
|
||||
}
|
||||
|
||||
// Fs represents a combine of upstreams
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this Fs
|
||||
root string // the path we are working on
|
||||
hashSet hash.Set // common hashes
|
||||
when time.Time // directory times
|
||||
upstreams map[string]*upstream // map of upstreams
|
||||
}
|
||||
|
||||
// adjustment stores the info to add a prefix to a path or chop characters off it
|
||||
type adjustment struct {
|
||||
prefix string
|
||||
chop int
|
||||
}
|
||||
|
||||
// do makes the adjustment on s
|
||||
func (a *adjustment) do(s string) string {
|
||||
if a.prefix != "" {
|
||||
return join(a.prefix, s)
|
||||
}
|
||||
return s[a.chop:]
|
||||
}
|
||||
|
||||
// upstream represents an upstream Fs
|
||||
type upstream struct {
|
||||
f fs.Fs
|
||||
parent *Fs
|
||||
dir string // directory the upstream is mounted
|
||||
pathAdjustment adjustment // how to fiddle with the path
|
||||
}
|
||||
|
||||
// Create an upstream from the directory it is mounted on and the remote
|
||||
func (f *Fs) newUpstream(ctx context.Context, dir, remote string) (*upstream, error) {
|
||||
uFs, err := cache.Get(ctx, remote)
|
||||
if err == fs.ErrorIsFile {
|
||||
return nil, fmt.Errorf("can't combine files yet, only directories %q: %w", remote, err)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create upstream %q: %w", remote, err)
|
||||
}
|
||||
u := &upstream{
|
||||
f: uFs,
|
||||
parent: f,
|
||||
dir: dir,
|
||||
}
|
||||
if len(f.root) < len(dir) {
|
||||
u.pathAdjustment.prefix = dir[:len(dir)-len(f.root)]
|
||||
} else {
|
||||
u.pathAdjustment.chop = len(f.root) - len(dir)
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path.
|
||||
//
|
||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs.Fs, err error) {
|
||||
// defer log.Trace(nil, "name=%q, root=%q, m=%v", name, root, m)("f=%+v, err=%v", &outFs, &err)
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Backward compatible to old config
|
||||
if len(opt.Upstreams) == 0 {
|
||||
return nil, errors.New("combine can't point to an empty upstream - check the value of the upstreams setting")
|
||||
}
|
||||
for _, u := range opt.Upstreams {
|
||||
if strings.HasPrefix(u, name+":") {
|
||||
return nil, errors.New("can't point combine remote at itself - check the value of the upstreams setting")
|
||||
}
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
upstreams: make(map[string]*upstream, len(opt.Upstreams)),
|
||||
when: time.Now(),
|
||||
}
|
||||
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
var mu sync.Mutex
|
||||
for _, upstream := range opt.Upstreams {
|
||||
upstream := upstream
|
||||
g.Go(func() (err error) {
|
||||
equal := strings.IndexRune(upstream, '=')
|
||||
if equal < 0 {
|
||||
return fmt.Errorf("no \"=\" in upstream definition %q", upstream)
|
||||
}
|
||||
dir, remote := upstream[:equal], upstream[equal+1:]
|
||||
if dir == "" {
|
||||
return fmt.Errorf("empty dir in upstream definition %q", upstream)
|
||||
}
|
||||
if remote == "" {
|
||||
return fmt.Errorf("empty remote in upstream definition %q", upstream)
|
||||
}
|
||||
u, err := f.newUpstream(ctx, dir, remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mu.Lock()
|
||||
f.upstreams[dir] = u
|
||||
mu.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// check features
|
||||
var features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: false,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(ctx, f)
|
||||
canMove := true
|
||||
for _, u := range f.upstreams {
|
||||
features = features.Mask(ctx, u.f) // Mask all upstream fs
|
||||
if !operations.CanServerSideMove(u.f) {
|
||||
canMove = false
|
||||
}
|
||||
}
|
||||
// We can move if all remotes support Move or Copy
|
||||
if canMove {
|
||||
features.Move = f.Move
|
||||
}
|
||||
|
||||
// Enable ListR when upstreams either support ListR or is local
|
||||
// But not when all upstreams are local
|
||||
if features.ListR == nil {
|
||||
for _, u := range f.upstreams {
|
||||
if u.f.Features().ListR != nil {
|
||||
features.ListR = f.ListR
|
||||
} else if !u.f.Features().IsLocal {
|
||||
features.ListR = nil
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enable Purge when any upstreams support it
|
||||
if features.Purge == nil {
|
||||
for _, u := range f.upstreams {
|
||||
if u.f.Features().Purge != nil {
|
||||
features.Purge = f.Purge
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enable Shutdown when any upstreams support it
|
||||
if features.Shutdown == nil {
|
||||
for _, u := range f.upstreams {
|
||||
if u.f.Features().Shutdown != nil {
|
||||
features.Shutdown = f.Shutdown
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enable DirCacheFlush when any upstreams support it
|
||||
if features.DirCacheFlush == nil {
|
||||
for _, u := range f.upstreams {
|
||||
if u.f.Features().DirCacheFlush != nil {
|
||||
features.DirCacheFlush = f.DirCacheFlush
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
f.features = features
|
||||
|
||||
// Get common intersection of hashes
|
||||
var hashSet hash.Set
|
||||
var first = true
|
||||
for _, u := range f.upstreams {
|
||||
if first {
|
||||
hashSet = u.f.Hashes()
|
||||
first = false
|
||||
} else {
|
||||
hashSet = hashSet.Overlap(u.f.Hashes())
|
||||
}
|
||||
}
|
||||
f.hashSet = hashSet
|
||||
|
||||
// Check to see if the root is actually a file
|
||||
if f.root != "" {
|
||||
_, err := f.NewObject(ctx, "")
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile || err == fs.ErrorIsDir {
|
||||
// File doesn't exist or is a directory so return old f
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check to see if the root path is actually an existing file
|
||||
oldRoot := f.root
|
||||
newRoot, leaf := path.Split(oldRoot)
|
||||
f.root = newRoot
|
||||
// Adjust path adjustment to remove leaf
|
||||
for _, u := range f.upstreams {
|
||||
u.pathAdjustment.chop -= len(leaf) + 1
|
||||
}
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Run a function over all the upstreams in parallel
|
||||
func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream) error) error {
|
||||
g, gCtx := errgroup.WithContext(ctx)
|
||||
for _, u := range f.upstreams {
|
||||
u := u
|
||||
g.Go(func() (err error) {
|
||||
return fn(gCtx, u)
|
||||
})
|
||||
}
|
||||
return g.Wait()
|
||||
}
|
||||
|
||||
// join the elements together but unline path.Join return empty string
|
||||
func join(elem ...string) string {
|
||||
result := path.Join(elem...)
|
||||
if result == "." {
|
||||
return ""
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// find the upstream for the remote passed in, returning the upstream and the adjusted path
|
||||
func (f *Fs) findUpstream(remote string) (u *upstream, uRemote string, err error) {
|
||||
// defer log.Trace(remote, "")("f=%v, uRemote=%q, err=%v", &u, &uRemote, &err)
|
||||
absolute := join(f.root, remote)
|
||||
for dir, u := range f.upstreams {
|
||||
dirSlash := dir + "/"
|
||||
foundStart := -1
|
||||
foundEnd := -1
|
||||
if absolute == dir {
|
||||
foundEnd = len(dir)
|
||||
foundStart = foundEnd
|
||||
} else if strings.HasPrefix(absolute, dirSlash) {
|
||||
foundEnd = len(dirSlash)
|
||||
foundStart = foundEnd - 1
|
||||
}
|
||||
if foundStart > 0 {
|
||||
uRemote = absolute[foundEnd:]
|
||||
return u, uRemote, nil
|
||||
}
|
||||
}
|
||||
return nil, "", fmt.Errorf("combine for remote %q: %w", remote, fs.ErrorDirNotFound)
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("combine root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
// The root always exists
|
||||
if f.root == "" && dir == "" {
|
||||
return nil
|
||||
}
|
||||
u, uRemote, err := f.findUpstream(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return u.f.Rmdir(ctx, uRemote)
|
||||
}
|
||||
|
||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return f.hashSet
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
// The root always exists
|
||||
if f.root == "" && dir == "" {
|
||||
return nil
|
||||
}
|
||||
u, uRemote, err := f.findUpstream(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return u.f.Mkdir(ctx, uRemote)
|
||||
}
|
||||
|
||||
// purge the upstream or fallback to a slow way
|
||||
func (u *upstream) purge(ctx context.Context, dir string) (err error) {
|
||||
if do := u.f.Features().Purge; do != nil {
|
||||
err = do(ctx, dir)
|
||||
} else {
|
||||
err = operations.Purge(ctx, u.f, dir)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Purge all files in the directory
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
if f.root == "" && dir == "" {
|
||||
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
|
||||
return u.purge(ctx, "")
|
||||
})
|
||||
}
|
||||
u, uRemote, err := f.findUpstream(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return u.purge(ctx, uRemote)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
dstU, dstRemote, err := f.findUpstream(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
do := dstU.f.Features().Copy
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
o, err := do(ctx, srcObj.Object, dstRemote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dstU.newObject(o), nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
dstU, dstRemote, err := f.findUpstream(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
do := dstU.f.Features().Move
|
||||
useCopy := false
|
||||
if do == nil {
|
||||
do = dstU.f.Features().Copy
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
useCopy = true
|
||||
}
|
||||
|
||||
o, err := do(ctx, srcObj.Object, dstRemote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If did Copy then remove the source object
|
||||
if useCopy {
|
||||
err = srcObj.Remove(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return dstU.newObject(o), nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
// defer log.Trace(f, "src=%v, srcRemote=%q, dstRemote=%q", src, srcRemote, dstRemote)("err=%v", &err)
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
dstU, dstURemote, err := f.findUpstream(dstRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
srcU, srcURemote, err := srcFs.findUpstream(srcRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
do := dstU.f.Features().DirMove
|
||||
if do == nil {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
fs.Logf(dstU.f, "srcU.f=%v, srcURemote=%q, dstURemote=%q", srcU.f, srcURemote, dstURemote)
|
||||
return do(ctx, srcU.f, srcURemote, dstURemote)
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path
|
||||
// that has had changes. If the implementation
|
||||
// uses polling, it should adhere to the given interval.
|
||||
// At least one value will be written to the channel,
|
||||
// specifying the initial value and updated values might
|
||||
// follow. A 0 Duration should pause the polling.
|
||||
// The ChangeNotify implementation must empty the channel
|
||||
// regularly. When the channel gets closed, the implementation
|
||||
// should stop polling and release resources.
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, fn func(string, fs.EntryType), ch <-chan time.Duration) {
|
||||
var uChans []chan time.Duration
|
||||
|
||||
for _, u := range f.upstreams {
|
||||
if do := u.f.Features().ChangeNotify; do != nil {
|
||||
ch := make(chan time.Duration)
|
||||
uChans = append(uChans, ch)
|
||||
do(ctx, fn, ch)
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
for i := range ch {
|
||||
for _, c := range uChans {
|
||||
c <- i
|
||||
}
|
||||
}
|
||||
for _, c := range uChans {
|
||||
close(c)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing
|
||||
// as an optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
ctx := context.Background()
|
||||
_ = f.multithread(ctx, func(ctx context.Context, u *upstream) error {
|
||||
if do := u.f.Features().DirCacheFlush; do != nil {
|
||||
do()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
|
||||
srcPath := src.Remote()
|
||||
u, uRemote, err := f.findUpstream(srcPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uSrc := operations.NewOverrideRemote(src, uRemote)
|
||||
var o fs.Object
|
||||
if stream {
|
||||
o, err = u.f.Features().PutStream(ctx, in, uSrc, options...)
|
||||
} else {
|
||||
o, err = u.f.Put(ctx, in, uSrc, options...)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return u.newObject(o), nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
return f.put(ctx, in, src, false, options...)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
return f.put(ctx, in, src, true, options...)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
usage := &fs.Usage{
|
||||
Total: new(int64),
|
||||
Used: new(int64),
|
||||
Trashed: new(int64),
|
||||
Other: new(int64),
|
||||
Free: new(int64),
|
||||
Objects: new(int64),
|
||||
}
|
||||
for _, u := range f.upstreams {
|
||||
doAbout := u.f.Features().About
|
||||
if doAbout == nil {
|
||||
continue
|
||||
}
|
||||
usg, err := doAbout(ctx)
|
||||
if errors.Is(err, fs.ErrorDirNotFound) {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if usg.Total != nil && usage.Total != nil {
|
||||
*usage.Total += *usg.Total
|
||||
} else {
|
||||
usage.Total = nil
|
||||
}
|
||||
if usg.Used != nil && usage.Used != nil {
|
||||
*usage.Used += *usg.Used
|
||||
} else {
|
||||
usage.Used = nil
|
||||
}
|
||||
if usg.Trashed != nil && usage.Trashed != nil {
|
||||
*usage.Trashed += *usg.Trashed
|
||||
} else {
|
||||
usage.Trashed = nil
|
||||
}
|
||||
if usg.Other != nil && usage.Other != nil {
|
||||
*usage.Other += *usg.Other
|
||||
} else {
|
||||
usage.Other = nil
|
||||
}
|
||||
if usg.Free != nil && usage.Free != nil {
|
||||
*usage.Free += *usg.Free
|
||||
} else {
|
||||
usage.Free = nil
|
||||
}
|
||||
if usg.Objects != nil && usage.Objects != nil {
|
||||
*usage.Objects += *usg.Objects
|
||||
} else {
|
||||
usage.Objects = nil
|
||||
}
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Wraps entries for this upstream
|
||||
func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.DirEntries, error) {
|
||||
for i, entry := range entries {
|
||||
switch x := entry.(type) {
|
||||
case fs.Object:
|
||||
entries[i] = u.newObject(x)
|
||||
case fs.Directory:
|
||||
newDir := fs.NewDirCopy(ctx, x)
|
||||
newDir.SetRemote(u.pathAdjustment.do(newDir.Remote()))
|
||||
entries[i] = newDir
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown entry type %T", entry)
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
|
||||
if f.root == "" && dir == "" {
|
||||
entries = make(fs.DirEntries, 0, len(f.upstreams))
|
||||
for combineDir := range f.upstreams {
|
||||
d := fs.NewDir(combineDir, f.when)
|
||||
entries = append(entries, d)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
u, uRemote, err := f.findUpstream(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries, err = u.f.List(ctx, uRemote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return u.wrapEntries(ctx, entries)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
// from dir recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
//
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
// defer log.Trace(f, "dir=%q, callback=%v", dir, callback)("err=%v", &err)
|
||||
if f.root == "" && dir == "" {
|
||||
rootEntries, err := f.List(ctx, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = callback(rootEntries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var mu sync.Mutex
|
||||
syncCallback := func(entries fs.DirEntries) error {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return callback(entries)
|
||||
}
|
||||
err = f.multithread(ctx, func(ctx context.Context, u *upstream) error {
|
||||
return f.ListR(ctx, u.dir, syncCallback)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
u, uRemote, err := f.findUpstream(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wrapCallback := func(entries fs.DirEntries) error {
|
||||
entries, err := u.wrapEntries(ctx, entries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return callback(entries)
|
||||
}
|
||||
if do := u.f.Features().ListR; do != nil {
|
||||
err = do(ctx, uRemote, wrapCallback)
|
||||
} else {
|
||||
err = walk.ListR(ctx, u.f, uRemote, true, -1, walk.ListAll, wrapCallback)
|
||||
}
|
||||
if err == fs.ErrorDirNotFound {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// NewObject creates a new remote combine file object
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
u, uRemote, err := f.findUpstream(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o, err := u.f.NewObject(ctx, uRemote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return u.newObject(o), nil
|
||||
}
|
||||
|
||||
// Precision is the greatest Precision of all upstreams
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
var greatestPrecision time.Duration
|
||||
for _, u := range f.upstreams {
|
||||
uPrecision := u.f.Precision()
|
||||
if uPrecision > greatestPrecision {
|
||||
greatestPrecision = uPrecision
|
||||
}
|
||||
}
|
||||
return greatestPrecision
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any
|
||||
// cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
|
||||
if do := u.f.Features().Shutdown; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Object describes a wrapped Object
|
||||
//
|
||||
// This is a wrapped Object which knows its path prefix
|
||||
type Object struct {
|
||||
fs.Object
|
||||
u *upstream
|
||||
}
|
||||
|
||||
func (u *upstream) newObject(o fs.Object) *Object {
|
||||
return &Object{
|
||||
Object: o,
|
||||
u: u,
|
||||
}
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.u.parent
|
||||
}
|
||||
|
||||
// String returns the remote path
|
||||
func (o *Object) String() string {
|
||||
return o.Remote()
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.u.pathAdjustment.do(o.Object.String())
|
||||
}
|
||||
|
||||
// MimeType returns the content type of the Object if known
|
||||
func (o *Object) MimeType(ctx context.Context) (mimeType string) {
|
||||
if do, ok := o.Object.(fs.MimeTyper); ok {
|
||||
mimeType = do.MimeType(ctx)
|
||||
}
|
||||
return mimeType
|
||||
}
|
||||
|
||||
// UnWrap returns the Object that this Object is wrapping or
|
||||
// nil if it isn't wrapping anything
|
||||
func (o *Object) UnWrap() fs.Object {
|
||||
return o.Object
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
)
|
||||
@@ -1,79 +0,0 @@
|
||||
// Test Combine filesystem interface
|
||||
package combine_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
_ "github.com/rclone/rclone/backend/memory"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
t.Skip("Skipping as -remote not set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestLocal(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs := MakeTestDirs(t, 3)
|
||||
upstreams := "dir1=" + dirs[0] + " dir2=" + dirs[1] + " dir3=" + dirs[2]
|
||||
name := "TestCombineLocal"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":dir1",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "combine"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestMemory(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
upstreams := "dir1=:memory:dir1 dir2=:memory:dir2 dir3=:memory:dir3"
|
||||
name := "TestCombineMemory"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":dir1",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "combine"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestMixed(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs := MakeTestDirs(t, 2)
|
||||
upstreams := "dir1=" + dirs[0] + " dir2=" + dirs[1] + " dir3=:memory:dir3"
|
||||
name := "TestCombineMixed"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":dir1",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "combine"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// MakeTestDirs makes directories in /tmp for testing
|
||||
func MakeTestDirs(t *testing.T, n int) (dirs []string) {
|
||||
for i := 1; i <= n; i++ {
|
||||
dir := t.TempDir()
|
||||
dirs = append(dirs, dir)
|
||||
}
|
||||
return dirs
|
||||
}
|
||||
@@ -401,10 +401,6 @@ func isCompressible(r io.Reader) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
err = w.Close()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
ratio := float64(n) / float64(b.Len())
|
||||
return ratio > minCompressionRatio, nil
|
||||
}
|
||||
@@ -630,11 +626,9 @@ func (f *Fs) putMetadata(ctx context.Context, meta *ObjectMetadata, src fs.Objec
|
||||
// Put the data
|
||||
mo, err = put(ctx, metaReader, f.wrapInfo(src, makeMetadataName(src.Remote()), int64(len(data))), options...)
|
||||
if err != nil {
|
||||
if mo != nil {
|
||||
removeErr := mo.Remove(ctx)
|
||||
if removeErr != nil {
|
||||
fs.Errorf(mo, "Failed to remove partially transferred object: %v", err)
|
||||
}
|
||||
removeErr := mo.Remove(ctx)
|
||||
if removeErr != nil {
|
||||
fs.Errorf(mo, "Failed to remove partially transferred object: %v", err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
gocipher "crypto/cipher"
|
||||
"crypto/rand"
|
||||
"encoding/base32"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -17,7 +16,6 @@ import (
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/Max-Sum/base32768"
|
||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
@@ -116,57 +114,6 @@ func (mode NameEncryptionMode) String() (out string) {
|
||||
return out
|
||||
}
|
||||
|
||||
// fileNameEncoding are the encoding methods dealing with encrypted file names
|
||||
type fileNameEncoding interface {
|
||||
EncodeToString(src []byte) string
|
||||
DecodeString(s string) ([]byte, error)
|
||||
}
|
||||
|
||||
// caseInsensitiveBase32Encoding defines a file name encoding
|
||||
// using a modified version of standard base32 as described in
|
||||
// RFC4648
|
||||
//
|
||||
// The standard encoding is modified in two ways
|
||||
// * it becomes lower case (no-one likes upper case filenames!)
|
||||
// * we strip the padding character `=`
|
||||
type caseInsensitiveBase32Encoding struct{}
|
||||
|
||||
// EncodeToString encodes a strign using the modified version of
|
||||
// base32 encoding.
|
||||
func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string {
|
||||
encoded := base32.HexEncoding.EncodeToString(src)
|
||||
encoded = strings.TrimRight(encoded, "=")
|
||||
return strings.ToLower(encoded)
|
||||
}
|
||||
|
||||
// DecodeString decodes a string as encoded by EncodeToString
|
||||
func (caseInsensitiveBase32Encoding) DecodeString(s string) ([]byte, error) {
|
||||
if strings.HasSuffix(s, "=") {
|
||||
return nil, ErrorBadBase32Encoding
|
||||
}
|
||||
// First figure out how many padding characters to add
|
||||
roundUpToMultipleOf8 := (len(s) + 7) &^ 7
|
||||
equals := roundUpToMultipleOf8 - len(s)
|
||||
s = strings.ToUpper(s) + "========"[:equals]
|
||||
return base32.HexEncoding.DecodeString(s)
|
||||
}
|
||||
|
||||
// NewNameEncoding creates a NameEncoding from a string
|
||||
func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
|
||||
s = strings.ToLower(s)
|
||||
switch s {
|
||||
case "base32":
|
||||
enc = caseInsensitiveBase32Encoding{}
|
||||
case "base64":
|
||||
enc = base64.RawURLEncoding
|
||||
case "base32768":
|
||||
enc = base32768.SafeEncoding
|
||||
default:
|
||||
err = fmt.Errorf("Unknown file name encoding mode %q", s)
|
||||
}
|
||||
return enc, err
|
||||
}
|
||||
|
||||
// Cipher defines an encoding and decoding cipher for the crypt backend
|
||||
type Cipher struct {
|
||||
dataKey [32]byte // Key for secretbox
|
||||
@@ -174,17 +121,15 @@ type Cipher struct {
|
||||
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
|
||||
block gocipher.Block
|
||||
mode NameEncryptionMode
|
||||
fileNameEnc fileNameEncoding
|
||||
buffers sync.Pool // encrypt/decrypt buffers
|
||||
cryptoRand io.Reader // read crypto random numbers from here
|
||||
dirNameEncrypt bool
|
||||
}
|
||||
|
||||
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
|
||||
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) {
|
||||
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*Cipher, error) {
|
||||
c := &Cipher{
|
||||
mode: mode,
|
||||
fileNameEnc: enc,
|
||||
cryptoRand: rand.Reader,
|
||||
dirNameEncrypt: dirNameEncrypt,
|
||||
}
|
||||
@@ -242,6 +187,30 @@ func (c *Cipher) putBlock(buf []byte) {
|
||||
c.buffers.Put(buf)
|
||||
}
|
||||
|
||||
// encodeFileName encodes a filename using a modified version of
|
||||
// standard base32 as described in RFC4648
|
||||
//
|
||||
// The standard encoding is modified in two ways
|
||||
// * it becomes lower case (no-one likes upper case filenames!)
|
||||
// * we strip the padding character `=`
|
||||
func encodeFileName(in []byte) string {
|
||||
encoded := base32.HexEncoding.EncodeToString(in)
|
||||
encoded = strings.TrimRight(encoded, "=")
|
||||
return strings.ToLower(encoded)
|
||||
}
|
||||
|
||||
// decodeFileName decodes a filename as encoded by encodeFileName
|
||||
func decodeFileName(in string) ([]byte, error) {
|
||||
if strings.HasSuffix(in, "=") {
|
||||
return nil, ErrorBadBase32Encoding
|
||||
}
|
||||
// First figure out how many padding characters to add
|
||||
roundUpToMultipleOf8 := (len(in) + 7) &^ 7
|
||||
equals := roundUpToMultipleOf8 - len(in)
|
||||
in = strings.ToUpper(in) + "========"[:equals]
|
||||
return base32.HexEncoding.DecodeString(in)
|
||||
}
|
||||
|
||||
// encryptSegment encrypts a path segment
|
||||
//
|
||||
// This uses EME with AES
|
||||
@@ -262,7 +231,7 @@ func (c *Cipher) encryptSegment(plaintext string) string {
|
||||
}
|
||||
paddedPlaintext := pkcs7.Pad(nameCipherBlockSize, []byte(plaintext))
|
||||
ciphertext := eme.Transform(c.block, c.nameTweak[:], paddedPlaintext, eme.DirectionEncrypt)
|
||||
return c.fileNameEnc.EncodeToString(ciphertext)
|
||||
return encodeFileName(ciphertext)
|
||||
}
|
||||
|
||||
// decryptSegment decrypts a path segment
|
||||
@@ -270,7 +239,7 @@ func (c *Cipher) decryptSegment(ciphertext string) (string, error) {
|
||||
if ciphertext == "" {
|
||||
return "", nil
|
||||
}
|
||||
rawCiphertext, err := c.fileNameEnc.DecodeString(ciphertext)
|
||||
rawCiphertext, err := decodeFileName(ciphertext)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base32"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -12,7 +11,6 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Max-Sum/base32768"
|
||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -47,31 +45,11 @@ func TestNewNameEncryptionModeString(t *testing.T) {
|
||||
assert.Equal(t, NameEncryptionMode(3).String(), "Unknown mode #3")
|
||||
}
|
||||
|
||||
type EncodingTestCase struct {
|
||||
in string
|
||||
expected string
|
||||
}
|
||||
|
||||
func testEncodeFileName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
|
||||
for _, test := range testCases {
|
||||
enc, err := NewNameEncoding(encoding)
|
||||
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
|
||||
actual := enc.EncodeToString([]byte(test.in))
|
||||
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
|
||||
recovered, err := enc.DecodeString(test.expected)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", test.expected))
|
||||
if caseInsensitive {
|
||||
in := strings.ToUpper(test.expected)
|
||||
recovered, err = enc.DecodeString(in)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", in))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeFileNameBase32(t *testing.T) {
|
||||
testEncodeFileName(t, "base32", []EncodingTestCase{
|
||||
func TestEncodeFileName(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{"", ""},
|
||||
{"1", "64"},
|
||||
{"12", "64p0"},
|
||||
@@ -89,56 +67,20 @@ func TestEncodeFileNameBase32(t *testing.T) {
|
||||
{"12345678901234", "64p36d1l6orjge9g64p36d0"},
|
||||
{"123456789012345", "64p36d1l6orjge9g64p36d1l"},
|
||||
{"1234567890123456", "64p36d1l6orjge9g64p36d1l6o"},
|
||||
}, true)
|
||||
} {
|
||||
actual := encodeFileName([]byte(test.in))
|
||||
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
|
||||
recovered, err := decodeFileName(test.expected)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", test.expected))
|
||||
in := strings.ToUpper(test.expected)
|
||||
recovered, err = decodeFileName(in)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", in))
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeFileNameBase64(t *testing.T) {
|
||||
testEncodeFileName(t, "base64", []EncodingTestCase{
|
||||
{"", ""},
|
||||
{"1", "MQ"},
|
||||
{"12", "MTI"},
|
||||
{"123", "MTIz"},
|
||||
{"1234", "MTIzNA"},
|
||||
{"12345", "MTIzNDU"},
|
||||
{"123456", "MTIzNDU2"},
|
||||
{"1234567", "MTIzNDU2Nw"},
|
||||
{"12345678", "MTIzNDU2Nzg"},
|
||||
{"123456789", "MTIzNDU2Nzg5"},
|
||||
{"1234567890", "MTIzNDU2Nzg5MA"},
|
||||
{"12345678901", "MTIzNDU2Nzg5MDE"},
|
||||
{"123456789012", "MTIzNDU2Nzg5MDEy"},
|
||||
{"1234567890123", "MTIzNDU2Nzg5MDEyMw"},
|
||||
{"12345678901234", "MTIzNDU2Nzg5MDEyMzQ"},
|
||||
{"123456789012345", "MTIzNDU2Nzg5MDEyMzQ1"},
|
||||
{"1234567890123456", "MTIzNDU2Nzg5MDEyMzQ1Ng"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestEncodeFileNameBase32768(t *testing.T) {
|
||||
testEncodeFileName(t, "base32768", []EncodingTestCase{
|
||||
{"", ""},
|
||||
{"1", "㼿"},
|
||||
{"12", "㻙ɟ"},
|
||||
{"123", "㻙ⲿ"},
|
||||
{"1234", "㻙ⲍƟ"},
|
||||
{"12345", "㻙ⲍ⍟"},
|
||||
{"123456", "㻙ⲍ⍆ʏ"},
|
||||
{"1234567", "㻙ⲍ⍆觟"},
|
||||
{"12345678", "㻙ⲍ⍆觓ɧ"},
|
||||
{"123456789", "㻙ⲍ⍆觓栯"},
|
||||
{"1234567890", "㻙ⲍ⍆觓栩ɣ"},
|
||||
{"12345678901", "㻙ⲍ⍆觓栩朧"},
|
||||
{"123456789012", "㻙ⲍ⍆觓栩朤ʅ"},
|
||||
{"1234567890123", "㻙ⲍ⍆觓栩朤談"},
|
||||
{"12345678901234", "㻙ⲍ⍆觓栩朤諆ɔ"},
|
||||
{"123456789012345", "㻙ⲍ⍆觓栩朤諆媕"},
|
||||
{"1234567890123456", "㻙ⲍ⍆觓栩朤諆媕䆿"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestDecodeFileNameBase32(t *testing.T) {
|
||||
enc, err := NewNameEncoding("base32")
|
||||
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
|
||||
func TestDecodeFileName(t *testing.T) {
|
||||
// We've tested decoding the valid ones above, now concentrate on the invalid ones
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
@@ -148,65 +90,17 @@ func TestDecodeFileNameBase32(t *testing.T) {
|
||||
{"!", base32.CorruptInputError(0)},
|
||||
{"hello=hello", base32.CorruptInputError(5)},
|
||||
} {
|
||||
actual, actualErr := enc.DecodeString(test.in)
|
||||
actual, actualErr := decodeFileName(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeFileNameBase64(t *testing.T) {
|
||||
enc, err := NewNameEncoding("base64")
|
||||
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
|
||||
// We've tested decoding the valid ones above, now concentrate on the invalid ones
|
||||
func TestEncryptSegment(t *testing.T) {
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedErr error
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{"64=", base64.CorruptInputError(2)},
|
||||
{"!", base64.CorruptInputError(0)},
|
||||
{"Hello=Hello", base64.CorruptInputError(5)},
|
||||
} {
|
||||
actual, actualErr := enc.DecodeString(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeFileNameBase32768(t *testing.T) {
|
||||
enc, err := NewNameEncoding("base32768")
|
||||
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
|
||||
// We've tested decoding the valid ones above, now concentrate on the invalid ones
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedErr error
|
||||
}{
|
||||
{"㼿c", base32768.CorruptInputError(1)},
|
||||
{"!", base32768.CorruptInputError(0)},
|
||||
{"㻙ⲿ=㻙ⲿ", base32768.CorruptInputError(2)},
|
||||
} {
|
||||
actual, actualErr := enc.DecodeString(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
}
|
||||
}
|
||||
|
||||
func testEncryptSegment(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
for _, test := range testCases {
|
||||
actual := c.encryptSegment(test.in)
|
||||
assert.Equal(t, test.expected, actual, fmt.Sprintf("Testing %q", test.in))
|
||||
recovered, err := c.decryptSegment(test.expected)
|
||||
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", test.expected))
|
||||
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", test.expected))
|
||||
if caseInsensitive {
|
||||
in := strings.ToUpper(test.expected)
|
||||
recovered, err = c.decryptSegment(in)
|
||||
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", in))
|
||||
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", in))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncryptSegmentBase32(t *testing.T) {
|
||||
testEncryptSegment(t, "base32", []EncodingTestCase{
|
||||
{"", ""},
|
||||
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
|
||||
{"12", "l42g6771hnv3an9cgc8cr2n1ng"},
|
||||
@@ -224,61 +118,26 @@ func TestEncryptSegmentBase32(t *testing.T) {
|
||||
{"12345678901234", "moq0uqdlqrblrc5pa5u5c7hq9g"},
|
||||
{"123456789012345", "eeam3li4rnommi3a762h5n7meg"},
|
||||
{"1234567890123456", "mijbj0frqf6ms7frcr6bd9h0env53jv96pjaaoirk7forcgpt70g"},
|
||||
}, true)
|
||||
} {
|
||||
actual := c.encryptSegment(test.in)
|
||||
assert.Equal(t, test.expected, actual, fmt.Sprintf("Testing %q", test.in))
|
||||
recovered, err := c.decryptSegment(test.expected)
|
||||
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", test.expected))
|
||||
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", test.expected))
|
||||
in := strings.ToUpper(test.expected)
|
||||
recovered, err = c.decryptSegment(in)
|
||||
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", in))
|
||||
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", in))
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncryptSegmentBase64(t *testing.T) {
|
||||
testEncryptSegment(t, "base64", []EncodingTestCase{
|
||||
{"", ""},
|
||||
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
|
||||
{"12", "qQUDHOGN_jVdLIMQzYrhvA"},
|
||||
{"123", "1CxFf2Mti1xIPYlGruDh-A"},
|
||||
{"1234", "RL-xOTmsxsG7kuTy2XJUxw"},
|
||||
{"12345", "3FP_GHoeBJdq0yLgaED8IQ"},
|
||||
{"123456", "Xc4T1Gqrs3OVYnrE6dpEWQ"},
|
||||
{"1234567", "uZeEzssOnDWHEOzLqjwpog"},
|
||||
{"12345678", "8noiTP5WkkbEuijsPhOpxQ"},
|
||||
{"123456789", "GeNxgLA0wiaGAKU3U7qL4Q"},
|
||||
{"1234567890", "x1DUhdmqoVWYVBLD3dha-A"},
|
||||
{"12345678901", "iEyP_3BZR6vvv_2WM6NbZw"},
|
||||
{"123456789012", "4OPGvS4SZdjvS568APUaFw"},
|
||||
{"1234567890123", "Y8c5Wr8OhYYUo7fPwdojdg"},
|
||||
{"12345678901234", "tjQPabXW112wuVF8Vh46TA"},
|
||||
{"123456789012345", "c5Vh1kTd8WtIajmFEtz2dA"},
|
||||
{"1234567890123456", "tKa5gfvTzW4d-2bMtqYgdf5Rz-k2ZqViW6HfjbIZ6cE"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestEncryptSegmentBase32768(t *testing.T) {
|
||||
testEncryptSegment(t, "base32768", []EncodingTestCase{
|
||||
{"", ""},
|
||||
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
|
||||
{"12", "竢朧䉱虃光塬䟛⣡蓟"},
|
||||
{"123", "遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
|
||||
{"1234", "䢟銮䵵狌㐜燳谒颴詟"},
|
||||
{"12345", "钉Ꞇ㖃蚩憶狫朰杜㜿"},
|
||||
{"123456", "啇ᚵⵕ憗䋫➫➓肤卟"},
|
||||
{"1234567", "茫螓翁連劘樓㶔抉矟"},
|
||||
{"12345678", "龝☳䘊辄岅較络㧩襟"},
|
||||
{"123456789", "ⲱ苀㱆犂媐Ꮤ锇惫靟"},
|
||||
{"1234567890", "計宁憕偵匢皫╛纺ꌟ"},
|
||||
{"12345678901", "檆䨿鑫㪺藝ꡖ勇䦛婟"},
|
||||
{"123456789012", "雑頏䰂䲝淚哚鹡魺⪟"},
|
||||
{"1234567890123", "塃璶繁躸圅㔟䗃肃懟"},
|
||||
{"12345678901234", "腺ᕚ崚鏕鏥讥鼌䑺䲿"},
|
||||
{"123456789012345", "怪绕滻蕶肣但⠥荖惟"},
|
||||
{"1234567890123456", "肳哀旚挶靏鏻㾭䱠慟㪳ꏆ賊兲铧敻塹魀ʟ"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestDecryptSegmentBase32(t *testing.T) {
|
||||
func TestDecryptSegment(t *testing.T) {
|
||||
// We've tested the forwards above, now concentrate on the errors
|
||||
longName := make([]byte, 3328)
|
||||
for i := range longName {
|
||||
longName[i] = 'a'
|
||||
}
|
||||
enc, _ := NewNameEncoding("base32")
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedErr error
|
||||
@@ -286,371 +145,49 @@ func TestDecryptSegmentBase32(t *testing.T) {
|
||||
{"64=", ErrorBadBase32Encoding},
|
||||
{"!", base32.CorruptInputError(0)},
|
||||
{string(longName), ErrorTooLongAfterDecode},
|
||||
{enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
} {
|
||||
actual, actualErr := c.decryptSegment(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecryptSegmentBase64(t *testing.T) {
|
||||
// We've tested the forwards above, now concentrate on the errors
|
||||
longName := make([]byte, 2816)
|
||||
for i := range longName {
|
||||
longName[i] = 'a'
|
||||
}
|
||||
enc, _ := NewNameEncoding("base64")
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedErr error
|
||||
}{
|
||||
{"6H=", base64.CorruptInputError(2)},
|
||||
{"!", base64.CorruptInputError(0)},
|
||||
{string(longName), ErrorTooLongAfterDecode},
|
||||
{enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
} {
|
||||
actual, actualErr := c.decryptSegment(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecryptSegmentBase32768(t *testing.T) {
|
||||
// We've tested the forwards above, now concentrate on the errors
|
||||
longName := strings.Repeat("怪", 1280)
|
||||
enc, _ := NewNameEncoding("base32768")
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedErr error
|
||||
}{
|
||||
{"怪=", base32768.CorruptInputError(1)},
|
||||
{"!", base32768.CorruptInputError(0)},
|
||||
{longName, ErrorTooLongAfterDecode},
|
||||
{enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
} {
|
||||
actual, actualErr := c.decryptSegment(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
}
|
||||
}
|
||||
|
||||
func testStandardEncryptFileName(t *testing.T, encoding string, testCasesEncryptDir []EncodingTestCase, testCasesNoEncryptDir []EncodingTestCase) {
|
||||
func TestEncryptFileName(t *testing.T) {
|
||||
// First standard mode
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
for _, test := range testCasesEncryptDir {
|
||||
assert.Equal(t, test.expected, c.EncryptFileName(test.in))
|
||||
}
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
|
||||
// Standard mode with directory name encryption off
|
||||
c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
|
||||
for _, test := range testCasesNoEncryptDir {
|
||||
assert.Equal(t, test.expected, c.EncryptFileName(test.in))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStandardEncryptFileNameBase32(t *testing.T) {
|
||||
testStandardEncryptFileName(t, "base32", []EncodingTestCase{
|
||||
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
|
||||
{"1/12", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng"},
|
||||
{"1/12/123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0"},
|
||||
{"1-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123"},
|
||||
{"1/12-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123"},
|
||||
}, []EncodingTestCase{
|
||||
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
|
||||
{"1/12", "1/l42g6771hnv3an9cgc8cr2n1ng"},
|
||||
{"1/12/123", "1/12/qgm4avr35m5loi1th53ato71v0"},
|
||||
{"1-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123"},
|
||||
{"1/12-v2001-02-03-040506-123", "1/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestStandardEncryptFileNameBase64(t *testing.T) {
|
||||
testStandardEncryptFileName(t, "base64", []EncodingTestCase{
|
||||
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
|
||||
{"1/12", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA"},
|
||||
{"1/12/123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A"},
|
||||
{"1-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw-v2001-02-03-040506-123"},
|
||||
{"1/12-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA-v2001-02-03-040506-123"},
|
||||
}, []EncodingTestCase{
|
||||
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
|
||||
{"1/12", "1/qQUDHOGN_jVdLIMQzYrhvA"},
|
||||
{"1/12/123", "1/12/1CxFf2Mti1xIPYlGruDh-A"},
|
||||
{"1-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw-v2001-02-03-040506-123"},
|
||||
{"1/12-v2001-02-03-040506-123", "1/qQUDHOGN_jVdLIMQzYrhvA-v2001-02-03-040506-123"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestStandardEncryptFileNameBase32768(t *testing.T) {
|
||||
testStandardEncryptFileName(t, "base32768", []EncodingTestCase{
|
||||
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
|
||||
{"1/12", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟"},
|
||||
{"1/12/123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
|
||||
{"1-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟-v2001-02-03-040506-123"},
|
||||
{"1/12-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟-v2001-02-03-040506-123"},
|
||||
}, []EncodingTestCase{
|
||||
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
|
||||
{"1/12", "1/竢朧䉱虃光塬䟛⣡蓟"},
|
||||
{"1/12/123", "1/12/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
|
||||
{"1-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟-v2001-02-03-040506-123"},
|
||||
{"1/12-v2001-02-03-040506-123", "1/竢朧䉱虃光塬䟛⣡蓟-v2001-02-03-040506-123"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestNonStandardEncryptFileName(t *testing.T) {
|
||||
// Off mode
|
||||
c, _ := newCipher(NameEncryptionOff, "", "", true, nil)
|
||||
c, _ = newCipher(NameEncryptionStandard, "", "", false)
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
|
||||
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
|
||||
assert.Equal(t, "1/12/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
|
||||
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
|
||||
// Now off mode
|
||||
c, _ = newCipher(NameEncryptionOff, "", "", true)
|
||||
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
|
||||
// Obfuscation mode
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil)
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", true)
|
||||
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
||||
assert.Equal(t, "49.6/99.23/150.890/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
|
||||
assert.Equal(t, "49.6/99.23/150.890/162.uryyB-v2001-02-03-040506-123.GKG", c.EncryptFileName("1/12/123/hello-v2001-02-03-040506-123.txt"))
|
||||
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
|
||||
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
|
||||
// Obfuscation mode with directory name encryption off
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", false, nil)
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", false)
|
||||
assert.Equal(t, "1/12/123/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
||||
assert.Equal(t, "1/12/123/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
|
||||
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
|
||||
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
|
||||
}
|
||||
|
||||
func testStandardDecryptFileName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
for _, test := range testCases {
|
||||
// Test when dirNameEncrypt=true
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
actual, actualErr := c.DecryptFileName(test.in)
|
||||
assert.NoError(t, actualErr)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
if caseInsensitive {
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
actual, actualErr := c.DecryptFileName(strings.ToUpper(test.in))
|
||||
assert.NoError(t, actualErr)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
}
|
||||
// Add a character should raise ErrorNotAMultipleOfBlocksize
|
||||
actual, actualErr = c.DecryptFileName(enc.EncodeToString([]byte("1")) + test.in)
|
||||
assert.Equal(t, ErrorNotAMultipleOfBlocksize, actualErr)
|
||||
assert.Equal(t, "", actual)
|
||||
// Test when dirNameEncrypt=false
|
||||
noDirEncryptIn := test.in
|
||||
if strings.LastIndex(test.expected, "/") != -1 {
|
||||
noDirEncryptIn = test.expected[:strings.LastIndex(test.expected, "/")] + test.in[strings.LastIndex(test.in, "/"):]
|
||||
}
|
||||
c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
|
||||
actual, actualErr = c.DecryptFileName(noDirEncryptIn)
|
||||
assert.NoError(t, actualErr)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStandardDecryptFileNameBase32(t *testing.T) {
|
||||
testStandardDecryptFileName(t, "base32", []EncodingTestCase{
|
||||
{"p0e52nreeaj0a5ea7s64m4j72s", "1"},
|
||||
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12"},
|
||||
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123"},
|
||||
}, true)
|
||||
}
|
||||
|
||||
func TestStandardDecryptFileNameBase64(t *testing.T) {
|
||||
testStandardDecryptFileName(t, "base64", []EncodingTestCase{
|
||||
{"yBxRX25ypgUVyj8MSxJnFw", "1"},
|
||||
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA", "1/12"},
|
||||
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A", "1/12/123"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestStandardDecryptFileNameBase32768(t *testing.T) {
|
||||
testStandardDecryptFileName(t, "base32768", []EncodingTestCase{
|
||||
{"詮㪗鐮僀伎作㻖㢧⪟", "1"},
|
||||
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟", "1/12"},
|
||||
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ", "1/12/123"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestNonStandardDecryptFileName(t *testing.T) {
|
||||
for _, encoding := range []string{"base32", "base64", "base32768"} {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
for _, test := range []struct {
|
||||
mode NameEncryptionMode
|
||||
dirNameEncrypt bool
|
||||
in string
|
||||
expected string
|
||||
expectedErr error
|
||||
}{
|
||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
|
||||
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
|
||||
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
|
||||
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
|
||||
actual, actualErr := c.DecryptFileName(test.in)
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
assert.Equal(t, test.expectedErr, actualErr, what)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncDecMatches(t *testing.T) {
|
||||
for _, encoding := range []string{"base32", "base64", "base32768"} {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
for _, test := range []struct {
|
||||
mode NameEncryptionMode
|
||||
in string
|
||||
}{
|
||||
{NameEncryptionStandard, "1/2/3/4"},
|
||||
{NameEncryptionOff, "1/2/3/4"},
|
||||
{NameEncryptionObfuscated, "1/2/3/4/!hello\u03a0"},
|
||||
{NameEncryptionObfuscated, "Avatar The Last Airbender"},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", true, enc)
|
||||
out, err := c.DecryptFileName(c.EncryptFileName(test.in))
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, out, test.in, what)
|
||||
assert.Equal(t, err, nil, what)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testStandardEncryptDirName(t *testing.T, encoding string, testCases []EncodingTestCase) {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
// First standard mode
|
||||
for _, test := range testCases {
|
||||
assert.Equal(t, test.expected, c.EncryptDirName(test.in))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStandardEncryptDirNameBase32(t *testing.T) {
|
||||
testStandardEncryptDirName(t, "base32", []EncodingTestCase{
|
||||
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
|
||||
{"1/12", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng"},
|
||||
{"1/12/123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestStandardEncryptDirNameBase64(t *testing.T) {
|
||||
testStandardEncryptDirName(t, "base64", []EncodingTestCase{
|
||||
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
|
||||
{"1/12", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA"},
|
||||
{"1/12/123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestStandardEncryptDirNameBase32768(t *testing.T) {
|
||||
testStandardEncryptDirName(t, "base32768", []EncodingTestCase{
|
||||
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
|
||||
{"1/12", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟"},
|
||||
{"1/12/123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestNonStandardEncryptDirName(t *testing.T) {
|
||||
for _, encoding := range []string{"base32", "base64", "base32768"} {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", false, enc)
|
||||
assert.Equal(t, "1/12", c.EncryptDirName("1/12"))
|
||||
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
|
||||
// Now off mode
|
||||
c, _ = newCipher(NameEncryptionOff, "", "", true, enc)
|
||||
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
|
||||
}
|
||||
}
|
||||
|
||||
func testStandardDecryptDirName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
for _, test := range testCases {
|
||||
// Test dirNameEncrypt=true
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
actual, actualErr := c.DecryptDirName(test.in)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
assert.NoError(t, actualErr)
|
||||
if caseInsensitive {
|
||||
actual, actualErr := c.DecryptDirName(strings.ToUpper(test.in))
|
||||
assert.Equal(t, actual, test.expected)
|
||||
assert.NoError(t, actualErr)
|
||||
}
|
||||
actual, actualErr = c.DecryptDirName(enc.EncodeToString([]byte("1")) + test.in)
|
||||
assert.Equal(t, "", actual)
|
||||
assert.Equal(t, ErrorNotAMultipleOfBlocksize, actualErr)
|
||||
// Test dirNameEncrypt=false
|
||||
c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
|
||||
actual, actualErr = c.DecryptDirName(test.in)
|
||||
assert.Equal(t, test.in, actual)
|
||||
assert.NoError(t, actualErr)
|
||||
actual, actualErr = c.DecryptDirName(test.expected)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
assert.NoError(t, actualErr)
|
||||
// Test dirNameEncrypt=false
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
for _, test := range []struct {
|
||||
mode NameEncryptionMode
|
||||
dirNameEncrypt bool
|
||||
in string
|
||||
expected string
|
||||
expectedErr error
|
||||
}{
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
|
||||
{NameEncryptionStandard, false, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", nil},
|
||||
{NameEncryptionStandard, false, "1/12/123", "1/12/123", nil},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
|
||||
actual, actualErr := c.DecryptDirName(test.in)
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
assert.Equal(t, test.expectedErr, actualErr, what)
|
||||
}
|
||||
*/
|
||||
|
||||
func TestStandardDecryptDirNameBase32(t *testing.T) {
|
||||
testStandardDecryptDirName(t, "base32", []EncodingTestCase{
|
||||
{"p0e52nreeaj0a5ea7s64m4j72s", "1"},
|
||||
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12"},
|
||||
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123"},
|
||||
}, true)
|
||||
}
|
||||
|
||||
func TestStandardDecryptDirNameBase64(t *testing.T) {
|
||||
testStandardDecryptDirName(t, "base64", []EncodingTestCase{
|
||||
{"yBxRX25ypgUVyj8MSxJnFw", "1"},
|
||||
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA", "1/12"},
|
||||
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A", "1/12/123"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestStandardDecryptDirNameBase32768(t *testing.T) {
|
||||
testStandardDecryptDirName(t, "base32768", []EncodingTestCase{
|
||||
{"詮㪗鐮僀伎作㻖㢧⪟", "1"},
|
||||
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟", "1/12"},
|
||||
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ", "1/12/123"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestNonStandardDecryptDirName(t *testing.T) {
|
||||
func TestDecryptFileName(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
mode NameEncryptionMode
|
||||
dirNameEncrypt bool
|
||||
@@ -658,11 +195,87 @@ func TestNonStandardDecryptDirName(t *testing.T) {
|
||||
expected string
|
||||
expectedErr error
|
||||
}{
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
|
||||
{NameEncryptionStandard, false, "1/12/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", "1-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
|
||||
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
|
||||
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
|
||||
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
|
||||
actual, actualErr := c.DecryptFileName(test.in)
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
assert.Equal(t, test.expectedErr, actualErr, what)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncDecMatches(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
mode NameEncryptionMode
|
||||
in string
|
||||
}{
|
||||
{NameEncryptionStandard, "1/2/3/4"},
|
||||
{NameEncryptionOff, "1/2/3/4"},
|
||||
{NameEncryptionObfuscated, "1/2/3/4/!hello\u03a0"},
|
||||
{NameEncryptionObfuscated, "Avatar The Last Airbender"},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", true)
|
||||
out, err := c.DecryptFileName(c.EncryptFileName(test.in))
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, out, test.in, what)
|
||||
assert.Equal(t, err, nil, what)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncryptDirName(t *testing.T) {
|
||||
// First standard mode
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptDirName("1"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptDirName("1/12"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptDirName("1/12/123"))
|
||||
// Standard mode with dir name encryption off
|
||||
c, _ = newCipher(NameEncryptionStandard, "", "", false)
|
||||
assert.Equal(t, "1/12", c.EncryptDirName("1/12"))
|
||||
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
|
||||
// Now off mode
|
||||
c, _ = newCipher(NameEncryptionOff, "", "", true)
|
||||
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
|
||||
}
|
||||
|
||||
func TestDecryptDirName(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
mode NameEncryptionMode
|
||||
dirNameEncrypt bool
|
||||
in string
|
||||
expected string
|
||||
expectedErr error
|
||||
}{
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
|
||||
{NameEncryptionStandard, false, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", nil},
|
||||
{NameEncryptionStandard, false, "1/12/123", "1/12/123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123.bin", nil},
|
||||
{NameEncryptionOff, true, "1/12/123", "1/12/123", nil},
|
||||
{NameEncryptionOff, true, ".bin", ".bin", nil},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, nil)
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
|
||||
actual, actualErr := c.DecryptDirName(test.in)
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
@@ -671,7 +284,7 @@ func TestNonStandardDecryptDirName(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEncryptedSize(t *testing.T) {
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
for _, test := range []struct {
|
||||
in int64
|
||||
expected int64
|
||||
@@ -695,7 +308,7 @@ func TestEncryptedSize(t *testing.T) {
|
||||
|
||||
func TestDecryptedSize(t *testing.T) {
|
||||
// Test the errors since we tested the reverse above
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
for _, test := range []struct {
|
||||
in int64
|
||||
expectedErr error
|
||||
@@ -1066,7 +679,7 @@ func (z *zeroes) Read(p []byte) (n int, err error) {
|
||||
|
||||
// Test encrypt decrypt with different buffer sizes
|
||||
func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = &zeroes{} // zero out the nonce
|
||||
buf := make([]byte, bufSize)
|
||||
@@ -1136,7 +749,7 @@ func TestEncryptData(t *testing.T) {
|
||||
{[]byte{1}, file1},
|
||||
{[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, file16},
|
||||
} {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||
|
||||
@@ -1159,7 +772,7 @@ func TestEncryptData(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewEncrypter(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||
|
||||
@@ -1175,12 +788,13 @@ func TestNewEncrypter(t *testing.T) {
|
||||
fh, err = c.newEncrypter(z, nil)
|
||||
assert.Nil(t, fh)
|
||||
assert.Error(t, err, "short read of nonce")
|
||||
|
||||
}
|
||||
|
||||
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to
|
||||
// cause a fatal loop
|
||||
func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
in := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
|
||||
@@ -1209,7 +823,7 @@ func (c *closeDetector) Close() error {
|
||||
}
|
||||
|
||||
func TestNewDecrypter(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||
|
||||
@@ -1252,7 +866,7 @@ func TestNewDecrypter(t *testing.T) {
|
||||
|
||||
// Test the stream returning 0, io.ErrUnexpectedEOF
|
||||
func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
|
||||
@@ -1268,7 +882,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = &zeroes{} // nodge the crypto rand generator
|
||||
|
||||
@@ -1474,7 +1088,7 @@ func TestDecrypterCalculateUnderlying(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDecrypterRead(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Test truncating the file at each possible point
|
||||
@@ -1538,7 +1152,7 @@ func TestDecrypterRead(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDecrypterClose(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cd := newCloseDetector(bytes.NewBuffer(file16))
|
||||
@@ -1576,7 +1190,7 @@ func TestDecrypterClose(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPutGetBlock(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
block := c.getBlock()
|
||||
@@ -1587,7 +1201,7 @@ func TestPutGetBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKey(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check zero keys OK
|
||||
|
||||
@@ -116,29 +116,6 @@ names, or for debugging purposes.`,
|
||||
Help: "Encrypt file data.",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "filename_encoding",
|
||||
Help: `How to encode the encrypted filename to text string.
|
||||
|
||||
This option could help with shortening the encrypted filename. The
|
||||
suitable option would depend on the way your remote count the filename
|
||||
length and if it's case sensitve.`,
|
||||
Default: "base32",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "base32",
|
||||
Help: "Encode using base32. Suitable for all remote.",
|
||||
},
|
||||
{
|
||||
Value: "base64",
|
||||
Help: "Encode using base64. Suitable for case sensitive remote.",
|
||||
},
|
||||
{
|
||||
Value: "base32768",
|
||||
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)",
|
||||
},
|
||||
},
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -163,11 +140,7 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
|
||||
return nil, fmt.Errorf("failed to decrypt password2: %w", err)
|
||||
}
|
||||
}
|
||||
enc, err := NewNameEncoding(opt.FilenameEncoding)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption, enc)
|
||||
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make cipher: %w", err)
|
||||
}
|
||||
@@ -256,7 +229,6 @@ type Options struct {
|
||||
Password2 string `config:"password2"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
ShowMapping bool `config:"show_mapping"`
|
||||
FilenameEncoding string `config:"filename_encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
@@ -443,7 +415,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||
}
|
||||
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
|
||||
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
|
||||
}
|
||||
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ func TestIntegration(t *testing.T) {
|
||||
}
|
||||
|
||||
// TestStandard runs integration tests against the remote
|
||||
func TestStandardBase32(t *testing.T) {
|
||||
func TestStandard(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
@@ -49,48 +49,6 @@ func TestStandardBase32(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestStandardBase64(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
||||
name := "TestCrypt"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
{Name: name, Key: "filename_encoding", Value: "base64"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestStandardBase32768(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
||||
name := "TestCrypt"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
{Name: name, Key: "filename_encoding", Value: "base32768"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
// TestOff runs integration tests against the remote
|
||||
func TestOff(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"mime"
|
||||
"net/http"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -85,7 +84,7 @@ var (
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
}
|
||||
_mimeTypeToExtensionDuplicates = map[string]string{
|
||||
"application/x-vnd.oasis.opendocument.presentation": ".odp",
|
||||
@@ -300,17 +299,6 @@ a non root folder as its starting point.
|
||||
Default: true,
|
||||
Help: "Send files to the trash instead of deleting permanently.\n\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_shortcut_content",
|
||||
Default: false,
|
||||
Help: `Server side copy contents of shortcuts instead of the shortcut.
|
||||
|
||||
When doing server side copies, normally rclone will copy shortcuts as
|
||||
shortcuts.
|
||||
|
||||
If this flag is used then rclone will copy the contents of shortcuts
|
||||
rather than shortcuts themselves when doing server side copies.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_gdocs",
|
||||
Default: false,
|
||||
@@ -554,14 +542,6 @@ Google don't document so it may break in the future.
|
||||
Normally rclone dereferences shortcut files making them appear as if
|
||||
they are the original file (see [the shortcuts section](#shortcuts)).
|
||||
If this flag is set then rclone will ignore shortcut files completely.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "skip_dangling_shortcuts",
|
||||
Help: `If set skip dangling shortcut files.
|
||||
|
||||
If this is set then rclone will not show any dangling shortcuts in listings.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
@@ -598,7 +578,6 @@ type Options struct {
|
||||
TeamDriveID string `config:"team_drive"`
|
||||
AuthOwnerOnly bool `config:"auth_owner_only"`
|
||||
UseTrash bool `config:"use_trash"`
|
||||
CopyShortcutContent bool `config:"copy_shortcut_content"`
|
||||
SkipGdocs bool `config:"skip_gdocs"`
|
||||
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
|
||||
SharedWithMe bool `config:"shared_with_me"`
|
||||
@@ -625,7 +604,6 @@ type Options struct {
|
||||
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
|
||||
StopOnDownloadLimit bool `config:"stop_on_download_limit"`
|
||||
SkipShortcuts bool `config:"skip_shortcuts"`
|
||||
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -928,11 +906,6 @@ OUTER:
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("list: %w", err)
|
||||
}
|
||||
// leave the dangling shortcut out of the listings
|
||||
// we've already logged about the dangling shortcut in resolveShortcut
|
||||
if f.opt.SkipDanglingShortcuts && item.MimeType == shortcutMimeTypeDangling {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Check the case of items is correct since
|
||||
// the `=` operator is case insensitive.
|
||||
@@ -1598,15 +1571,6 @@ func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string
|
||||
}
|
||||
}
|
||||
|
||||
// If using a link type export and a more specific export
|
||||
// hasn't been found all docs should be exported
|
||||
for _, _extension := range f.exportExtensions {
|
||||
_mimeType := mime.TypeByExtension(_extension)
|
||||
if isLinkMimeType(_mimeType) {
|
||||
return _extension, _mimeType, true
|
||||
}
|
||||
}
|
||||
|
||||
// else return empty
|
||||
return "", "", isDocument
|
||||
}
|
||||
@@ -1617,14 +1581,6 @@ func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string
|
||||
// Look through the exportExtensions and find the first format that can be
|
||||
// converted. If none found then return ("", "", "", false)
|
||||
func (f *Fs) findExportFormat(ctx context.Context, item *drive.File) (extension, filename, mimeType string, isDocument bool) {
|
||||
// If item has MD5 sum it is a file stored on drive
|
||||
if item.Md5Checksum != "" {
|
||||
return
|
||||
}
|
||||
// Folders can't be documents
|
||||
if item.MimeType == driveFolderType {
|
||||
return
|
||||
}
|
||||
extension, mimeType, isDocument = f.findExportFormatByMimeType(ctx, item.MimeType)
|
||||
if extension != "" {
|
||||
filename = item.Name + extension
|
||||
@@ -2418,16 +2374,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
createInfo.Description = ""
|
||||
}
|
||||
|
||||
// get the ID of the thing to copy
|
||||
// copy the contents if CopyShortcutContent
|
||||
// else copy the shortcut only
|
||||
|
||||
// get the ID of the thing to copy - this is the shortcut if available
|
||||
id := shortcutID(srcObj.id)
|
||||
|
||||
if f.opt.CopyShortcutContent {
|
||||
id = actualID(srcObj.id)
|
||||
}
|
||||
|
||||
var info *drive.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, err = f.svc.Files.Copy(id, createInfo).
|
||||
@@ -3236,7 +3185,7 @@ This will return a JSON list of objects like this
|
||||
|
||||
With the -o config parameter it will output the list in a format
|
||||
suitable for adding to a config file to make aliases for all the
|
||||
drives found and a combined drive.
|
||||
drives found.
|
||||
|
||||
[My Drive]
|
||||
type = alias
|
||||
@@ -3246,15 +3195,10 @@ drives found and a combined drive.
|
||||
type = alias
|
||||
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
|
||||
|
||||
[AllDrives]
|
||||
type = combine
|
||||
remote = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
|
||||
Adding this to the rclone config file will cause those team drives to
|
||||
be accessible with the aliases shown. Any illegal charactes will be
|
||||
substituted with "_" and duplicate names will have numbers suffixed.
|
||||
It will also add a remote called AllDrives which shows all the shared
|
||||
drives combined into one directory tree.
|
||||
be accessible with the aliases shown. This may require manual editing
|
||||
of the names.
|
||||
|
||||
`,
|
||||
}, {
|
||||
Name: "untrash",
|
||||
@@ -3370,30 +3314,14 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
re := regexp.MustCompile(`[^\w_. -]+`)
|
||||
if _, ok := opt["config"]; ok {
|
||||
lines := []string{}
|
||||
upstreams := []string{}
|
||||
names := make(map[string]struct{}, len(drives))
|
||||
for i, drive := range drives {
|
||||
name := re.ReplaceAllString(drive.Name, "_")
|
||||
for {
|
||||
if _, found := names[name]; !found {
|
||||
break
|
||||
}
|
||||
name += fmt.Sprintf("-%d", i)
|
||||
}
|
||||
names[name] = struct{}{}
|
||||
for _, drive := range drives {
|
||||
lines = append(lines, "")
|
||||
lines = append(lines, fmt.Sprintf("[%s]", name))
|
||||
lines = append(lines, fmt.Sprintf("[%s]", drive.Name))
|
||||
lines = append(lines, fmt.Sprintf("type = alias"))
|
||||
lines = append(lines, fmt.Sprintf("remote = %s,team_drive=%s,root_folder_id=:", f.name, drive.Id))
|
||||
upstreams = append(upstreams, fmt.Sprintf(`"%s=%s:"`, name, name))
|
||||
}
|
||||
lines = append(lines, "")
|
||||
lines = append(lines, fmt.Sprintf("[AllDrives]"))
|
||||
lines = append(lines, fmt.Sprintf("type = combine"))
|
||||
lines = append(lines, fmt.Sprintf("upstreams = %s", strings.Join(upstreams, " ")))
|
||||
return lines, nil
|
||||
}
|
||||
return drives, nil
|
||||
|
||||
@@ -422,7 +422,11 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
o := obj.(*Object)
|
||||
|
||||
dir := t.TempDir()
|
||||
dir, err := ioutil.TempDir("", "rclone-drive-copyid-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(dir)
|
||||
}()
|
||||
|
||||
checkFile := func(name string) {
|
||||
filePath := filepath.Join(dir, name)
|
||||
@@ -487,11 +491,19 @@ func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
||||
subFs, isDriveFs := subFsResult.(*Fs)
|
||||
require.True(t, isDriveFs)
|
||||
|
||||
tempDir1 := t.TempDir()
|
||||
tempDir1, err := ioutil.TempDir("", "rclone-drive-agequery1-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(tempDir1)
|
||||
}()
|
||||
tempFs1, err := fs.NewFs(defCtx, tempDir1)
|
||||
require.NoError(t, err)
|
||||
|
||||
tempDir2 := t.TempDir()
|
||||
tempDir2, err := ioutil.TempDir("", "rclone-drive-agequery2-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(tempDir2)
|
||||
}()
|
||||
tempFs2, err := fs.NewFs(defCtx, tempDir2)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -1650,37 +1650,13 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
|
||||
}
|
||||
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||
skip := int64(0)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(skip, io.SeekStart); err != nil {
|
||||
return false, err
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
|
||||
// after session is started, we retry everything
|
||||
if err != nil {
|
||||
// Check for incorrect offset error and retry with new offset
|
||||
if uErr, ok := err.(files.UploadSessionAppendV2APIError); ok {
|
||||
if uErr.EndpointError != nil && uErr.EndpointError.IncorrectOffset != nil {
|
||||
correctOffset := uErr.EndpointError.IncorrectOffset.CorrectOffset
|
||||
delta := int64(correctOffset) - int64(cursor.Offset)
|
||||
skip += delta
|
||||
what := fmt.Sprintf("incorrect offset error receved: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
|
||||
if skip < 0 {
|
||||
return false, fmt.Errorf("can't seek backwards to correct offset: %s", what)
|
||||
} else if skip == chunkSize {
|
||||
fs.Debugf(o, "%s: chunk received OK - continuing", what)
|
||||
return false, nil
|
||||
} else if skip > chunkSize {
|
||||
// This error should never happen
|
||||
return false, fmt.Errorf("can't seek forwards by more than a chunk to correct offset: %s", what)
|
||||
}
|
||||
// Skip the sent data on next retry
|
||||
cursor.Offset = uint64(int64(cursor.Offset) + delta)
|
||||
fs.Debugf(o, "%s: skipping bytes on retry to fix offset", what)
|
||||
}
|
||||
}
|
||||
}
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1784,7 +1760,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
entry, err = o.uploadChunked(ctx, in, commitInfo, size)
|
||||
} else {
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
entry, err = o.fs.srv.Upload(&files.UploadArg{CommitInfo: *commitInfo}, in)
|
||||
entry, err = o.fs.srv.Upload(commitInfo, in)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -42,15 +42,18 @@ func init() {
|
||||
}, {
|
||||
Help: "If you want to download a shared folder, add this parameter.",
|
||||
Name: "shared_folder",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Help: "If you want to download a shared file that is password protected, add this parameter.",
|
||||
Name: "file_password",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Help: "If you want to list the files in a shared folder that is password protected, add this parameter.",
|
||||
Name: "folder_password",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
@@ -514,32 +517,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/user/info.cgi",
|
||||
ContentType: "application/json",
|
||||
}
|
||||
var accountInfo AccountInfo
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rest.CallJSON(ctx, &opts, nil, &accountInfo)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read user info: %w", err)
|
||||
}
|
||||
|
||||
// FIXME max upload size would be useful to use in Update
|
||||
usage = &fs.Usage{
|
||||
Used: fs.NewUsageValue(accountInfo.ColdStorage), // bytes in use
|
||||
Total: fs.NewUsageValue(accountInfo.AvailableColdStorage), // bytes total
|
||||
Free: fs.NewUsageValue(accountInfo.AvailableColdStorage - accountInfo.ColdStorage), // bytes free
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
|
||||
@@ -182,34 +182,3 @@ type FoldersList struct {
|
||||
Status string `json:"Status"`
|
||||
SubFolders []Folder `json:"sub_folders"`
|
||||
}
|
||||
|
||||
// AccountInfo is the structure how 1Fichier returns user info
|
||||
type AccountInfo struct {
|
||||
StatsDate string `json:"stats_date"`
|
||||
MailRM string `json:"mail_rm"`
|
||||
DefaultQuota int64 `json:"default_quota"`
|
||||
UploadForbidden string `json:"upload_forbidden"`
|
||||
PageLimit int `json:"page_limit"`
|
||||
ColdStorage int64 `json:"cold_storage"`
|
||||
Status string `json:"status"`
|
||||
UseCDN string `json:"use_cdn"`
|
||||
AvailableColdStorage int64 `json:"available_cold_storage"`
|
||||
DefaultPort string `json:"default_port"`
|
||||
DefaultDomain int `json:"default_domain"`
|
||||
Email string `json:"email"`
|
||||
DownloadMenu string `json:"download_menu"`
|
||||
FTPDID int `json:"ftp_did"`
|
||||
DefaultPortFiles string `json:"default_port_files"`
|
||||
FTPReport string `json:"ftp_report"`
|
||||
OverQuota int64 `json:"overquota"`
|
||||
AvailableStorage int64 `json:"available_storage"`
|
||||
CDN string `json:"cdn"`
|
||||
Offer string `json:"offer"`
|
||||
SubscriptionEnd string `json:"subscription_end"`
|
||||
TFA string `json:"2fa"`
|
||||
AllowedColdStorage int64 `json:"allowed_cold_storage"`
|
||||
HotStorage int64 `json:"hot_storage"`
|
||||
DefaultColdStorageQuota int64 `json:"default_cold_storage_quota"`
|
||||
FTPMode string `json:"ftp_mode"`
|
||||
RUReport string `json:"ru_report"`
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/ftp"
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -52,13 +52,11 @@ func init() {
|
||||
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "FTP username.",
|
||||
Default: currentUser,
|
||||
Name: "user",
|
||||
Help: "FTP username, leave blank for current username, " + currentUser + ".",
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "FTP port number.",
|
||||
Default: 21,
|
||||
Name: "port",
|
||||
Help: "FTP port, leave blank to use default (21).",
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "FTP password.",
|
||||
|
||||
@@ -65,7 +65,7 @@ var (
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -182,30 +182,15 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
}, {
|
||||
Value: "asia-northeast1",
|
||||
Help: "Tokyo",
|
||||
}, {
|
||||
Value: "asia-northeast2",
|
||||
Help: "Osaka",
|
||||
}, {
|
||||
Value: "asia-northeast3",
|
||||
Help: "Seoul",
|
||||
}, {
|
||||
Value: "asia-south1",
|
||||
Help: "Mumbai",
|
||||
}, {
|
||||
Value: "asia-south2",
|
||||
Help: "Delhi",
|
||||
}, {
|
||||
Value: "asia-southeast1",
|
||||
Help: "Singapore",
|
||||
}, {
|
||||
Value: "asia-southeast2",
|
||||
Help: "Jakarta",
|
||||
}, {
|
||||
Value: "australia-southeast1",
|
||||
Help: "Sydney",
|
||||
}, {
|
||||
Value: "australia-southeast2",
|
||||
Help: "Melbourne",
|
||||
}, {
|
||||
Value: "europe-north1",
|
||||
Help: "Finland",
|
||||
@@ -221,12 +206,6 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
}, {
|
||||
Value: "europe-west4",
|
||||
Help: "Netherlands",
|
||||
}, {
|
||||
Value: "europe-west6",
|
||||
Help: "Zürich",
|
||||
}, {
|
||||
Value: "europe-central2",
|
||||
Help: "Warsaw",
|
||||
}, {
|
||||
Value: "us-central1",
|
||||
Help: "Iowa",
|
||||
@@ -242,33 +221,6 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
}, {
|
||||
Value: "us-west2",
|
||||
Help: "California",
|
||||
}, {
|
||||
Value: "us-west3",
|
||||
Help: "Salt Lake City",
|
||||
}, {
|
||||
Value: "us-west4",
|
||||
Help: "Las Vegas",
|
||||
}, {
|
||||
Value: "northamerica-northeast1",
|
||||
Help: "Montréal",
|
||||
}, {
|
||||
Value: "northamerica-northeast2",
|
||||
Help: "Toronto",
|
||||
}, {
|
||||
Value: "southamerica-east1",
|
||||
Help: "São Paulo",
|
||||
}, {
|
||||
Value: "southamerica-west1",
|
||||
Help: "Santiago",
|
||||
}, {
|
||||
Value: "asia1",
|
||||
Help: "Dual region: asia-northeast1 and asia-northeast2.",
|
||||
}, {
|
||||
Value: "eur4",
|
||||
Help: "Dual region: europe-north1 and europe-west4.",
|
||||
}, {
|
||||
Value: "nam4",
|
||||
Help: "Dual region: us-central1 and us-east1.",
|
||||
}},
|
||||
}, {
|
||||
Name: "storage_class",
|
||||
@@ -295,15 +247,6 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
Value: "DURABLE_REDUCED_AVAILABILITY",
|
||||
Help: "Durable reduced availability storage class",
|
||||
}},
|
||||
}, {
|
||||
Name: "no_check_bucket",
|
||||
Help: `If set, don't attempt to check the bucket exists or create it.
|
||||
|
||||
This can be useful when trying to minimise the number of transactions
|
||||
rclone does if you know the bucket exists already.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -326,7 +269,6 @@ type Options struct {
|
||||
BucketPolicyOnly bool `config:"bucket_policy_only"`
|
||||
Location string `config:"location"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -492,7 +434,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
cache: bucket.NewCache(),
|
||||
}
|
||||
f.setRoot(root)
|
||||
@@ -850,14 +792,6 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
}, nil)
|
||||
}
|
||||
|
||||
// checkBucket creates the bucket if it doesn't exist unless NoCheckBucket is true
|
||||
func (f *Fs) checkBucket(ctx context.Context, bucket string) error {
|
||||
if f.opt.NoCheckBucket {
|
||||
return nil
|
||||
}
|
||||
return f.makeBucket(ctx, bucket)
|
||||
}
|
||||
|
||||
// Rmdir deletes the bucket if the fs is at the root
|
||||
//
|
||||
// Returns an error if it isn't empty: Error 409: The bucket you tried
|
||||
@@ -891,7 +825,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
dstBucket, dstPath := f.split(remote)
|
||||
err := f.checkBucket(ctx, dstBucket)
|
||||
err := f.makeBucket(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1141,7 +1075,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
bucket, bucketPath := o.split()
|
||||
err := o.fs.checkBucket(ctx, bucket)
|
||||
err := o.fs.makeBucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ var (
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@@ -202,11 +202,7 @@ func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries,
|
||||
for _, entry := range baseEntries {
|
||||
switch x := entry.(type) {
|
||||
case fs.Object:
|
||||
obj, err := f.wrapObject(x, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hashEntries = append(hashEntries, obj)
|
||||
hashEntries = append(hashEntries, f.wrapObject(x, nil))
|
||||
default:
|
||||
hashEntries = append(hashEntries, entry) // trash in - trash out
|
||||
}
|
||||
@@ -255,7 +251,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
if do := f.Fs.Features().PutStream; do != nil {
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := do(ctx, in, src, options...)
|
||||
return f.wrapObject(oResult, err)
|
||||
return f.wrapObject(oResult, err), err
|
||||
}
|
||||
return nil, errors.New("PutStream not supported")
|
||||
}
|
||||
@@ -265,7 +261,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
if do := f.Fs.Features().PutUnchecked; do != nil {
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := do(ctx, in, src, options...)
|
||||
return f.wrapObject(oResult, err)
|
||||
return f.wrapObject(oResult, err), err
|
||||
}
|
||||
return nil, errors.New("PutUnchecked not supported")
|
||||
}
|
||||
@@ -352,7 +348,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
oResult, err := do(ctx, o.Object, remote)
|
||||
return f.wrapObject(oResult, err)
|
||||
return f.wrapObject(oResult, err), err
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
@@ -375,7 +371,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
dir: false,
|
||||
fs: f,
|
||||
})
|
||||
return f.wrapObject(oResult, nil)
|
||||
return f.wrapObject(oResult, nil), nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote using server-side move operations.
|
||||
@@ -414,7 +410,7 @@ func (f *Fs) Shutdown(ctx context.Context) (err error) {
|
||||
// NewObject finds the Object at remote.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
o, err := f.Fs.NewObject(ctx, remote)
|
||||
return f.wrapObject(o, err)
|
||||
return f.wrapObject(o, err), err
|
||||
}
|
||||
|
||||
//
|
||||
@@ -428,15 +424,11 @@ type Object struct {
|
||||
}
|
||||
|
||||
// Wrap base object into hasher object
|
||||
func (f *Fs) wrapObject(o fs.Object, err error) (obj fs.Object, outErr error) {
|
||||
// log.Trace(o, "err=%v", err)("obj=%#v, outErr=%v", &obj, &outErr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func (f *Fs) wrapObject(o fs.Object, err error) *Object {
|
||||
if err != nil || o == nil {
|
||||
return nil
|
||||
}
|
||||
if o == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return &Object{Object: o, f: f}, nil
|
||||
return &Object{Object: o, f: f}
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
|
||||
@@ -294,7 +294,7 @@ func (f *Fs) dumpLine(r *hashRecord, path string, include bool, err error) strin
|
||||
if hashVal == "" || err != nil {
|
||||
hashVal = "-"
|
||||
}
|
||||
hashVal = fmt.Sprintf("%-*s", hash.Width(hashType, false), hashVal)
|
||||
hashVal = fmt.Sprintf("%-*s", hash.Width(hashType), hashVal)
|
||||
hashes = append(hashes, hashName+":"+hashVal)
|
||||
}
|
||||
hashesStr := strings.Join(hashes, " ")
|
||||
|
||||
@@ -184,7 +184,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (r io.ReadC
|
||||
// Put data into the remote path with given modTime and size
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
var (
|
||||
o fs.Object
|
||||
o *Object
|
||||
common hash.Set
|
||||
rehash bool
|
||||
hashes hashMap
|
||||
@@ -210,8 +210,8 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := f.Fs.Put(ctx, wrapIn, src, options...)
|
||||
o, err = f.wrapObject(oResult, err)
|
||||
if err != nil {
|
||||
o = f.wrapObject(oResult, err)
|
||||
if o == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -224,7 +224,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
}
|
||||
}
|
||||
if len(hashes) > 0 {
|
||||
err := o.(*Object).putHashes(ctx, hashes)
|
||||
err := o.putHashes(ctx, hashes)
|
||||
fs.Debugf(o, "Applied %d source hashes, err: %v", len(hashes), err)
|
||||
}
|
||||
return o, err
|
||||
|
||||
@@ -263,98 +263,6 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.client.RemoveAll(realpath)
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Get the real paths from the remote specs:
|
||||
sourcePath := srcObj.fs.realpath(srcObj.remote)
|
||||
targetPath := f.realpath(remote)
|
||||
fs.Debugf(f, "rename [%s] to [%s]", sourcePath, targetPath)
|
||||
|
||||
// Make sure the target folder exists:
|
||||
dirname := path.Dir(targetPath)
|
||||
err := f.client.MkdirAll(dirname, 0755)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
// Note that the underlying HDFS library hard-codes Overwrite=True, but this is expected rclone behaviour.
|
||||
err = f.client.Rename(sourcePath, targetPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Look up the resulting object
|
||||
info, err := f.client.Stat(targetPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// And return it:
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: info.Size(),
|
||||
modTime: info.ModTime(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
// Get the real paths from the remote specs:
|
||||
sourcePath := srcFs.realpath(srcRemote)
|
||||
targetPath := f.realpath(dstRemote)
|
||||
fs.Debugf(f, "rename [%s] to [%s]", sourcePath, targetPath)
|
||||
|
||||
// Check if the destination exists:
|
||||
info, err := f.client.Stat(targetPath)
|
||||
if err == nil {
|
||||
fs.Debugf(f, "target directory already exits, IsDir = [%t]", info.IsDir())
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
// Make sure the targets parent folder exists:
|
||||
dirname := path.Dir(targetPath)
|
||||
err = f.client.MkdirAll(dirname, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
err = f.client.Rename(sourcePath, targetPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
info, err := f.client.StatFs()
|
||||
@@ -410,6 +318,4 @@ var (
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
)
|
||||
|
||||
@@ -22,8 +22,9 @@ func init() {
|
||||
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "username",
|
||||
Help: "Hadoop user name.",
|
||||
Name: "username",
|
||||
Help: "Hadoop user name.",
|
||||
Required: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "root",
|
||||
Help: "Connect to hdfs as root.",
|
||||
@@ -35,6 +36,7 @@ func init() {
|
||||
Enables KERBEROS authentication. Specifies the Service Principal Name
|
||||
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
|
||||
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "data_transfer_protection",
|
||||
@@ -44,6 +46,7 @@ Specifies whether or not authentication, data signature integrity
|
||||
checks, and wire encryption is required when communicating the the
|
||||
datanodes. Possible values are 'authentication', 'integrity' and
|
||||
'privacy'. Used only with KERBEROS enabled.`,
|
||||
Required: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "privacy",
|
||||
Help: "Ensure authentication, integrity and encryption enabled.",
|
||||
|
||||
@@ -52,7 +52,8 @@ The input format is comma separated list of key,value pairs. Standard
|
||||
|
||||
For example, to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
||||
|
||||
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.`,
|
||||
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
|
||||
`,
|
||||
Default: fs.CommaSepList{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -73,9 +74,8 @@ directories.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_head",
|
||||
Help: `Don't use HEAD requests.
|
||||
Help: `Don't use HEAD requests to find file sizes in dir listing.
|
||||
|
||||
HEAD requests are mainly used to find file sizes in dir listing.
|
||||
If your site is being very slow to load then you can try this option.
|
||||
Normally rclone does a HEAD request for each potential file in a
|
||||
directory listing to:
|
||||
@@ -84,9 +84,12 @@ directory listing to:
|
||||
- check it really exists
|
||||
- check to see if it is a directory
|
||||
|
||||
If you set this option, rclone will not do the HEAD request. This will mean
|
||||
that directory listings are much quicker, but rclone won't have the times or
|
||||
sizes of any files, and some files that don't exist may be in the listing.`,
|
||||
If you set this option, rclone will not do the HEAD request. This will mean
|
||||
|
||||
- directory listings are much quicker
|
||||
- rclone won't have the times or sizes of any files
|
||||
- some files that don't exist may be in the listing
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
@@ -130,87 +133,11 @@ func statusError(res *http.Response, err error) error {
|
||||
}
|
||||
if res.StatusCode < 200 || res.StatusCode > 299 {
|
||||
_ = res.Body.Close()
|
||||
return fmt.Errorf("HTTP Error: %s", res.Status)
|
||||
return fmt.Errorf("HTTP Error %d: %s", res.StatusCode, res.Status)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getFsEndpoint decides if url is to be considered a file or directory,
|
||||
// and returns a proper endpoint url to use for the fs.
|
||||
func getFsEndpoint(ctx context.Context, client *http.Client, url string, opt *Options) (string, bool) {
|
||||
// If url ends with '/' it is already a proper url always assumed to be a directory.
|
||||
if url[len(url)-1] == '/' {
|
||||
return url, false
|
||||
}
|
||||
|
||||
// If url does not end with '/' we send a HEAD request to decide
|
||||
// if it is directory or file, and if directory appends the missing
|
||||
// '/', or if file returns the directory url to parent instead.
|
||||
createFileResult := func() (string, bool) {
|
||||
fs.Debugf(nil, "If path is a directory you must add a trailing '/'")
|
||||
parent, _ := path.Split(url)
|
||||
return parent, true
|
||||
}
|
||||
createDirResult := func() (string, bool) {
|
||||
fs.Debugf(nil, "To avoid the initial HEAD request add a trailing '/' to the path")
|
||||
return url + "/", false
|
||||
}
|
||||
|
||||
// If HEAD requests are not allowed we just have to assume it is a file.
|
||||
if opt.NoHead {
|
||||
fs.Debugf(nil, "Assuming path is a file as --http-no-head is set")
|
||||
return createFileResult()
|
||||
}
|
||||
|
||||
// Use a client which doesn't follow redirects so the server
|
||||
// doesn't redirect http://host/dir to http://host/dir/
|
||||
noRedir := *client
|
||||
noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be created: %v", err)
|
||||
return createFileResult()
|
||||
}
|
||||
addHeaders(req, opt)
|
||||
res, err := noRedir.Do(req)
|
||||
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be sent: %v", err)
|
||||
return createFileResult()
|
||||
}
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
fs.Debugf(nil, "Assuming path is a directory as HEAD response is it does not exist as a file (%s)", res.Status)
|
||||
return createDirResult()
|
||||
}
|
||||
if res.StatusCode == http.StatusMovedPermanently ||
|
||||
res.StatusCode == http.StatusFound ||
|
||||
res.StatusCode == http.StatusSeeOther ||
|
||||
res.StatusCode == http.StatusTemporaryRedirect ||
|
||||
res.StatusCode == http.StatusPermanentRedirect {
|
||||
redir := res.Header.Get("Location")
|
||||
if redir != "" {
|
||||
if redir[len(redir)-1] == '/' {
|
||||
fs.Debugf(nil, "Assuming path is a directory as HEAD response is redirect (%s) to a path that ends with '/': %s", res.Status, redir)
|
||||
return createDirResult()
|
||||
}
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD response is redirect (%s) to a path that does not end with '/': %s", res.Status, redir)
|
||||
return createFileResult()
|
||||
}
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD response is redirect (%s) but no location header", res.Status)
|
||||
return createFileResult()
|
||||
}
|
||||
if res.StatusCode < 200 || res.StatusCode > 299 {
|
||||
// Example is 403 (http.StatusForbidden) for servers not allowing HEAD requests.
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD response is an error (%s)", res.Status)
|
||||
return createFileResult()
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD response is success (%s)", res.Status)
|
||||
return createFileResult()
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
@@ -241,9 +168,37 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
|
||||
endpoint, isFile := getFsEndpoint(ctx, client, u.String(), opt)
|
||||
fs.Debugf(nil, "Root: %s", endpoint)
|
||||
u, err = url.Parse(endpoint)
|
||||
var isFile = false
|
||||
if !strings.HasSuffix(u.String(), "/") {
|
||||
// Make a client which doesn't follow redirects so the server
|
||||
// doesn't redirect http://host/dir to http://host/dir/
|
||||
noRedir := *client
|
||||
noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
// check to see if points to a file
|
||||
req, err := http.NewRequestWithContext(ctx, "HEAD", u.String(), nil)
|
||||
if err == nil {
|
||||
addHeaders(req, opt)
|
||||
res, err := noRedir.Do(req)
|
||||
err = statusError(res, err)
|
||||
if err == nil {
|
||||
isFile = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newRoot := u.String()
|
||||
if isFile {
|
||||
// Point to the parent if this is a file
|
||||
newRoot, _ = path.Split(u.String())
|
||||
} else {
|
||||
if !strings.HasSuffix(newRoot, "/") {
|
||||
newRoot += "/"
|
||||
}
|
||||
}
|
||||
|
||||
u, err = url.Parse(newRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -261,16 +216,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
if isFile {
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(f.endpointURL, "/") {
|
||||
return nil, errors.New("internal error: url doesn't end with /")
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
@@ -346,7 +297,7 @@ func parseName(base *url.URL, name string) (string, error) {
|
||||
}
|
||||
// check it doesn't have URL parameters
|
||||
uStr := u.String()
|
||||
if strings.Contains(uStr, "?") {
|
||||
if strings.Index(uStr, "?") >= 0 {
|
||||
return "", errFoundQuestionMark
|
||||
}
|
||||
// check that this is going back to the same host and scheme
|
||||
@@ -458,7 +409,7 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
|
||||
return nil, fmt.Errorf("readDir: %w", err)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("can't parse content type %q", contentType)
|
||||
return nil, fmt.Errorf("Can't parse content type %q", contentType)
|
||||
}
|
||||
return names, nil
|
||||
}
|
||||
|
||||
@@ -8,10 +8,8 @@ import (
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -26,11 +24,10 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
remoteName = "TestHTTP"
|
||||
testPath = "test"
|
||||
filesPath = filepath.Join(testPath, "files")
|
||||
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
|
||||
lineEndSize = 1
|
||||
remoteName = "TestHTTP"
|
||||
testPath = "test"
|
||||
filesPath = filepath.Join(testPath, "files")
|
||||
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
|
||||
)
|
||||
|
||||
// prepareServer the test server and return a function to tidy it up afterwards
|
||||
@@ -38,22 +35,6 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// file server for test/files
|
||||
fileServer := http.FileServer(http.Dir(filesPath))
|
||||
|
||||
// verify the file path is correct, and also check which line endings
|
||||
// are used to get sizes right ("\n" except on Windows, but even there
|
||||
// we may have "\n" or "\r\n" depending on git crlf setting)
|
||||
fileList, err := ioutil.ReadDir(filesPath)
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, len(fileList), 0)
|
||||
for _, file := range fileList {
|
||||
if !file.IsDir() {
|
||||
data, _ := ioutil.ReadFile(filepath.Join(filesPath, file.Name()))
|
||||
if strings.HasSuffix(string(data), "\r\n") {
|
||||
lineEndSize = 2
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// test the headers are there then pass on to fileServer
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
|
||||
@@ -110,7 +91,7 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
|
||||
e = entries[1]
|
||||
assert.Equal(t, "one%.txt", e.Remote())
|
||||
assert.Equal(t, int64(5+lineEndSize), e.Size())
|
||||
assert.Equal(t, int64(6), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
@@ -127,7 +108,7 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
_, ok = e.(fs.Directory)
|
||||
assert.True(t, ok)
|
||||
} else {
|
||||
assert.Equal(t, int64(40+lineEndSize), e.Size())
|
||||
assert.Equal(t, int64(41), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
@@ -160,7 +141,7 @@ func TestListSubDir(t *testing.T) {
|
||||
|
||||
e := entries[0]
|
||||
assert.Equal(t, "three/underthree.txt", e.Remote())
|
||||
assert.Equal(t, int64(8+lineEndSize), e.Size())
|
||||
assert.Equal(t, int64(9), e.Size())
|
||||
_, ok := e.(*Object)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
@@ -173,7 +154,7 @@ func TestNewObject(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "four/under four.txt", o.Remote())
|
||||
assert.Equal(t, int64(8+lineEndSize), o.Size())
|
||||
assert.Equal(t, int64(9), o.Size())
|
||||
_, ok := o.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
@@ -206,11 +187,7 @@ func TestOpen(t *testing.T) {
|
||||
data, err := ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
if lineEndSize == 2 {
|
||||
assert.Equal(t, "beetroot\r\n", string(data))
|
||||
} else {
|
||||
assert.Equal(t, "beetroot\n", string(data))
|
||||
}
|
||||
assert.Equal(t, "beetroot\n", string(data))
|
||||
|
||||
// Test with range request
|
||||
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
|
||||
@@ -259,7 +236,7 @@ func TestIsAFileSubDir(t *testing.T) {
|
||||
|
||||
e := entries[0]
|
||||
assert.Equal(t, "underthree.txt", e.Remote())
|
||||
assert.Equal(t, int64(8+lineEndSize), e.Size())
|
||||
assert.Equal(t, int64(9), e.Size())
|
||||
_, ok := e.(*Object)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
@@ -376,106 +353,3 @@ func TestParseCaddy(t *testing.T) {
|
||||
"v1.36-22-g06ea13a-ssh-agentβ/",
|
||||
})
|
||||
}
|
||||
|
||||
func TestFsNoSlashRoots(t *testing.T) {
|
||||
// Test Fs with roots that does not end with '/', the logic that
|
||||
// decides if url is to be considered a file or directory, based
|
||||
// on result from a HEAD request.
|
||||
|
||||
// Handler for faking HEAD responses with different status codes
|
||||
headCount := 0
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method == "HEAD" {
|
||||
headCount++
|
||||
responseCode, err := strconv.Atoi(path.Base(r.URL.String()))
|
||||
require.NoError(t, err)
|
||||
if strings.HasPrefix(r.URL.String(), "/redirect/") {
|
||||
var redir string
|
||||
if strings.HasPrefix(r.URL.String(), "/redirect/file/") {
|
||||
redir = "/redirected"
|
||||
} else if strings.HasPrefix(r.URL.String(), "/redirect/dir/") {
|
||||
redir = "/redirected/"
|
||||
} else {
|
||||
require.Fail(t, "Redirect test requests must start with '/redirect/file/' or '/redirect/dir/'")
|
||||
}
|
||||
http.Redirect(w, r, redir, responseCode)
|
||||
} else {
|
||||
http.Error(w, http.StatusText(responseCode), responseCode)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Make the test server
|
||||
ts := httptest.NewServer(handler)
|
||||
defer ts.Close()
|
||||
|
||||
// Configure the remote
|
||||
configfile.Install()
|
||||
m := configmap.Simple{
|
||||
"type": "http",
|
||||
"url": ts.URL,
|
||||
}
|
||||
|
||||
// Test
|
||||
for i, test := range []struct {
|
||||
root string
|
||||
isFile bool
|
||||
}{
|
||||
// 2xx success
|
||||
{"parent/200", true},
|
||||
{"parent/204", true},
|
||||
|
||||
// 3xx redirection Redirect status 301, 302, 303, 307, 308
|
||||
{"redirect/file/301", true}, // Request is redirected to "/redirected"
|
||||
{"redirect/dir/301", false}, // Request is redirected to "/redirected/"
|
||||
{"redirect/file/302", true}, // Request is redirected to "/redirected"
|
||||
{"redirect/dir/302", false}, // Request is redirected to "/redirected/"
|
||||
{"redirect/file/303", true}, // Request is redirected to "/redirected"
|
||||
{"redirect/dir/303", false}, // Request is redirected to "/redirected/"
|
||||
|
||||
{"redirect/file/304", true}, // Not really a redirect, handled like 4xx errors (below)
|
||||
{"redirect/file/305", true}, // Not really a redirect, handled like 4xx errors (below)
|
||||
{"redirect/file/306", true}, // Not really a redirect, handled like 4xx errors (below)
|
||||
|
||||
{"redirect/file/307", true}, // Request is redirected to "/redirected"
|
||||
{"redirect/dir/307", false}, // Request is redirected to "/redirected/"
|
||||
{"redirect/file/308", true}, // Request is redirected to "/redirected"
|
||||
{"redirect/dir/308", false}, // Request is redirected to "/redirected/"
|
||||
|
||||
// 4xx client errors
|
||||
{"parent/403", true}, // Forbidden status (head request blocked)
|
||||
{"parent/404", false}, // Not found status
|
||||
} {
|
||||
for _, noHead := range []bool{false, true} {
|
||||
var isFile bool
|
||||
if noHead {
|
||||
m.Set("no_head", "true")
|
||||
isFile = true
|
||||
} else {
|
||||
m.Set("no_head", "false")
|
||||
isFile = test.isFile
|
||||
}
|
||||
headCount = 0
|
||||
f, err := NewFs(context.Background(), remoteName, test.root, m)
|
||||
if noHead {
|
||||
assert.Equal(t, 0, headCount)
|
||||
} else {
|
||||
assert.Equal(t, 1, headCount)
|
||||
}
|
||||
if isFile {
|
||||
assert.ErrorIs(t, err, fs.ErrorIsFile)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
var endpoint string
|
||||
if isFile {
|
||||
parent, _ := path.Split(test.root)
|
||||
endpoint = "/" + parent
|
||||
} else {
|
||||
endpoint = "/" + test.root + "/"
|
||||
}
|
||||
what := fmt.Sprintf("i=%d, root=%q, isFile=%v, noHead=%v", i, test.root, isFile, noHead)
|
||||
assert.Equal(t, ts.URL+endpoint, f.String(), what)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -70,10 +69,6 @@ const (
|
||||
teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
|
||||
teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
|
||||
teliaCloudClientID = "desktop"
|
||||
|
||||
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
|
||||
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
|
||||
tele2CloudClientID = "desktop"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -136,9 +131,6 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
}, {
|
||||
Value: "telia",
|
||||
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud.",
|
||||
}, {
|
||||
Value: "tele2",
|
||||
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
|
||||
}})
|
||||
case "auth_type_done":
|
||||
// Jump to next state according to config chosen
|
||||
@@ -246,21 +238,6 @@ machines.`)
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "tele2": // tele2 cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, tele2CloudClientID)
|
||||
m.Set(configTokenURL, tele2CloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: tele2CloudAuthURL,
|
||||
TokenURL: tele2CloudTokenURL,
|
||||
},
|
||||
ClientID: tele2CloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "choose_device":
|
||||
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", "Use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?")
|
||||
case "choose_device_query":
|
||||
@@ -932,120 +909,48 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
type listStreamTime time.Time
|
||||
// listFileDirFn is called from listFileDir to handle an object.
|
||||
type listFileDirFn func(fs.DirEntry) error
|
||||
|
||||
func (c *listStreamTime) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
var v string
|
||||
if err := d.DecodeElement(&v, &start); err != nil {
|
||||
return err
|
||||
}
|
||||
t, err := time.Parse(time.RFC3339, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*c = listStreamTime(t)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c listStreamTime) MarshalJSON() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("\"%s\"", time.Time(c).Format(time.RFC3339))), nil
|
||||
}
|
||||
|
||||
func parseListRStream(ctx context.Context, r io.Reader, trimPrefix string, filesystem *Fs, callback func(fs.DirEntry) error) error {
|
||||
|
||||
type stats struct {
|
||||
Folders int `xml:"folders"`
|
||||
Files int `xml:"files"`
|
||||
}
|
||||
var expected, actual stats
|
||||
|
||||
type xmlFile struct {
|
||||
Path string `xml:"path"`
|
||||
Name string `xml:"filename"`
|
||||
Checksum string `xml:"md5"`
|
||||
Size int64 `xml:"size"`
|
||||
Modified listStreamTime `xml:"modified"`
|
||||
Created listStreamTime `xml:"created"`
|
||||
}
|
||||
|
||||
type xmlFolder struct {
|
||||
Path string `xml:"path"`
|
||||
}
|
||||
|
||||
addFolder := func(path string) error {
|
||||
return callback(fs.NewDir(filesystem.opt.Enc.ToStandardPath(path), time.Time{}))
|
||||
}
|
||||
|
||||
addFile := func(f *xmlFile) error {
|
||||
return callback(&Object{
|
||||
hasMetaData: true,
|
||||
fs: filesystem,
|
||||
remote: filesystem.opt.Enc.ToStandardPath(path.Join(f.Path, f.Name)),
|
||||
size: f.Size,
|
||||
md5: f.Checksum,
|
||||
modTime: time.Time(f.Modified),
|
||||
})
|
||||
}
|
||||
|
||||
trimPathPrefix := func(p string) string {
|
||||
p = strings.TrimPrefix(p, trimPrefix)
|
||||
p = strings.TrimPrefix(p, "/")
|
||||
return p
|
||||
}
|
||||
|
||||
uniqueFolders := map[string]bool{}
|
||||
decoder := xml.NewDecoder(r)
|
||||
|
||||
for {
|
||||
t, err := decoder.Token()
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return err
|
||||
}
|
||||
break
|
||||
// List the objects and directories into entries, from a
|
||||
// special kind of JottaFolder representing a FileDirLis
|
||||
func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolder *api.JottaFolder, fn listFileDirFn) error {
|
||||
pathPrefix := "/" + f.filePathRaw("") // Non-escaped prefix of API paths to be cut off, to be left with the remote path including the remoteStartPath
|
||||
pathPrefixLength := len(pathPrefix)
|
||||
startPath := path.Join(pathPrefix, remoteStartPath) // Non-escaped API path up to and including remoteStartPath, to decide if it should be created as a new dir object
|
||||
startPathLength := len(startPath)
|
||||
for i := range startFolder.Folders {
|
||||
folder := &startFolder.Folders[i]
|
||||
if !f.validFolder(folder) {
|
||||
return nil
|
||||
}
|
||||
switch se := t.(type) {
|
||||
case xml.StartElement:
|
||||
switch se.Name.Local {
|
||||
case "file":
|
||||
var f xmlFile
|
||||
if err := decoder.DecodeElement(&f, &se); err != nil {
|
||||
return err
|
||||
}
|
||||
f.Path = trimPathPrefix(f.Path)
|
||||
actual.Files++
|
||||
if !uniqueFolders[f.Path] {
|
||||
uniqueFolders[f.Path] = true
|
||||
actual.Folders++
|
||||
if err := addFolder(f.Path); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := addFile(&f); err != nil {
|
||||
return err
|
||||
}
|
||||
case "folder":
|
||||
var f xmlFolder
|
||||
if err := decoder.DecodeElement(&f, &se); err != nil {
|
||||
return err
|
||||
}
|
||||
f.Path = trimPathPrefix(f.Path)
|
||||
uniqueFolders[f.Path] = true
|
||||
actual.Folders++
|
||||
if err := addFolder(f.Path); err != nil {
|
||||
return err
|
||||
}
|
||||
case "stats":
|
||||
if err := decoder.DecodeElement(&expected, &se); err != nil {
|
||||
folderPath := f.opt.Enc.ToStandardPath(path.Join(folder.Path, folder.Name))
|
||||
folderPathLength := len(folderPath)
|
||||
var remoteDir string
|
||||
if folderPathLength > pathPrefixLength {
|
||||
remoteDir = folderPath[pathPrefixLength+1:]
|
||||
if folderPathLength > startPathLength {
|
||||
d := fs.NewDir(remoteDir, time.Time(folder.ModifiedAt))
|
||||
err := fn(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range folder.Files {
|
||||
file := &folder.Files[i]
|
||||
if f.validFile(file) {
|
||||
remoteFile := path.Join(remoteDir, f.opt.Enc.ToStandardName(file.Name))
|
||||
o, err := f.newObjectWithInfo(ctx, remoteFile, file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = fn(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if expected.Folders != actual.Folders ||
|
||||
expected.Files != actual.Files {
|
||||
return fmt.Errorf("Invalid result from listStream: expected[%#v] != actual[%#v]", expected, actual)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1061,27 +966,12 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
Path: f.filePath(dir),
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
opts.Parameters.Set("mode", "liststream")
|
||||
list := walk.NewListRHelper(callback)
|
||||
opts.Parameters.Set("mode", "list")
|
||||
|
||||
var resp *http.Response
|
||||
var result api.JottaFolder // Could be JottaFileDirList, but JottaFolder is close enough
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
}
|
||||
|
||||
// liststream paths are /mountpoint/root/path
|
||||
// so the returned paths should have /mountpoint/root/ trimmed
|
||||
// as the caller is expecting path.
|
||||
trimPrefix := path.Join("/", f.opt.Mountpoint, f.root)
|
||||
err = parseListRStream(ctx, resp.Body, trimPrefix, f, func(d fs.DirEntry) error {
|
||||
if d.Remote() == dir {
|
||||
return nil
|
||||
}
|
||||
return list.Add(d)
|
||||
})
|
||||
_ = resp.Body.Close()
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1093,6 +983,10 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
}
|
||||
return fmt.Errorf("couldn't list files: %w", err)
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.listFileDir(ctx, dir, &result, func(entry fs.DirEntry) error {
|
||||
return list.Add(entry)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -28,57 +28,33 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "koofr",
|
||||
Description: "Koofr, Digi Storage and other Koofr-compatible storage providers",
|
||||
Description: "Koofr",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: fs.ConfigProvider,
|
||||
Help: "Choose your storage provider.",
|
||||
// NOTE if you add a new provider here, then add it in the
|
||||
// setProviderDefaults() function and update options accordingly
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "koofr",
|
||||
Help: "Koofr, https://app.koofr.net/",
|
||||
}, {
|
||||
Value: "digistorage",
|
||||
Help: "Digi Storage, https://storage.rcs-rds.ro/",
|
||||
}, {
|
||||
Value: "other",
|
||||
Help: "Any other Koofr API compatible storage service",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "The Koofr API endpoint to use.",
|
||||
Provider: "other",
|
||||
Default: "https://app.koofr.net",
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "mountid",
|
||||
Help: "Mount ID of the mount to use.\n\nIf omitted, the primary mount is used.",
|
||||
Required: false,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "setmtime",
|
||||
Help: "Does the backend support setting modification time.\n\nSet this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
|
||||
Default: true,
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "Your user name.",
|
||||
Help: "Your Koofr user name.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
|
||||
Provider: "koofr",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your password for rclone (generate one at https://storage.rcs-rds.ro/app/admin/preferences/password).",
|
||||
Provider: "digistorage",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your password for rclone (generate one at your service's settings page).",
|
||||
Provider: "other",
|
||||
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
@@ -95,7 +71,6 @@ func init() {
|
||||
|
||||
// Options represent the configuration of the Koofr backend
|
||||
type Options struct {
|
||||
Provider string `config:"provider"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
MountID string `config:"mountid"`
|
||||
User string `config:"user"`
|
||||
@@ -280,38 +255,13 @@ func (f *Fs) fullPath(part string) string {
|
||||
return f.opt.Enc.FromStandardPath(path.Join("/", f.root, part))
|
||||
}
|
||||
|
||||
func setProviderDefaults(opt *Options) {
|
||||
// handle old, provider-less configs
|
||||
if opt.Provider == "" {
|
||||
if opt.Endpoint == "" || strings.HasPrefix(opt.Endpoint, "https://app.koofr.net") {
|
||||
opt.Provider = "koofr"
|
||||
} else if strings.HasPrefix(opt.Endpoint, "https://storage.rcs-rds.ro") {
|
||||
opt.Provider = "digistorage"
|
||||
} else {
|
||||
opt.Provider = "other"
|
||||
}
|
||||
}
|
||||
// now assign an endpoint
|
||||
if opt.Provider == "koofr" {
|
||||
opt.Endpoint = "https://app.koofr.net"
|
||||
} else if opt.Provider == "digistorage" {
|
||||
opt.Endpoint = "https://storage.rcs-rds.ro"
|
||||
}
|
||||
}
|
||||
|
||||
// NewFs constructs a new filesystem given a root path and rclone configuration options
|
||||
// NewFs constructs a new filesystem given a root path and configuration options
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
opt := new(Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
setProviderDefaults(opt)
|
||||
return NewFsFromOptions(ctx, name, root, opt)
|
||||
}
|
||||
|
||||
// NewFsFromOptions constructs a new filesystem given a root path and internal configuration options
|
||||
func NewFsFromOptions(ctx context.Context, name, root string, opt *Options) (ff fs.Fs, err error) {
|
||||
pass, err := obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -1133,9 +1133,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
// Wipe hashes before update
|
||||
o.clearHashCache()
|
||||
|
||||
var symlinkData bytes.Buffer
|
||||
// If the object is a regular file, create it.
|
||||
// If it is a translated link, just read in the contents, and
|
||||
@@ -1298,13 +1295,6 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
// clearHashCache wipes any cached hashes for the object
|
||||
func (o *Object) clearHashCache() {
|
||||
o.fs.objectMetaMu.Lock()
|
||||
o.hashes = nil
|
||||
o.fs.objectMetaMu.Unlock()
|
||||
}
|
||||
|
||||
// Stat an Object into info
|
||||
func (o *Object) lstat() error {
|
||||
info, err := o.fs.lstat(o.path)
|
||||
@@ -1316,7 +1306,6 @@ func (o *Object) lstat() error {
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
o.clearHashCache()
|
||||
return remove(o.path)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -13,7 +12,6 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
@@ -168,64 +166,3 @@ func TestSymlinkError(t *testing.T) {
|
||||
_, err := NewFs(context.Background(), "local", "/", m)
|
||||
assert.Equal(t, errLinksAndCopyLinks, err)
|
||||
}
|
||||
|
||||
// Test hashes on updating an object
|
||||
func TestHashOnUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
const filePath = "file.txt"
|
||||
when := time.Now()
|
||||
r.WriteFile(filePath, "content", when)
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Get the object
|
||||
o, err := f.NewObject(ctx, filePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test the hash is as we expect
|
||||
md5, err := o.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
|
||||
|
||||
// Reupload it with diferent contents but same size and timestamp
|
||||
var b = bytes.NewBufferString("CONTENT")
|
||||
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
|
||||
err = o.Update(ctx, b, src)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check the hash is as expected
|
||||
md5, err = o.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "45685e95985e20822fb2538a522a5ccf", md5)
|
||||
}
|
||||
|
||||
// Test hashes on deleting an object
|
||||
func TestHashOnDelete(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
const filePath = "file.txt"
|
||||
when := time.Now()
|
||||
r.WriteFile(filePath, "content", when)
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Get the object
|
||||
o, err := f.NewObject(ctx, filePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test the hash is as we expect
|
||||
md5, err := o.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
|
||||
|
||||
// Delete the object
|
||||
require.NoError(t, o.Remove(ctx))
|
||||
|
||||
// Test the hash cache is empty
|
||||
require.Nil(t, o.(*Object).hashes)
|
||||
|
||||
// Test the hash returns an error
|
||||
_, err = o.Hash(ctx, hash.MD5)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
@@ -58,7 +58,7 @@ type UserInfoResponse struct {
|
||||
AutoProlong bool `json:"auto_prolong"`
|
||||
Basequota int64 `json:"basequota"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Expires int64 `json:"expires"`
|
||||
Expires int `json:"expires"`
|
||||
Prolong bool `json:"prolong"`
|
||||
Promocodes struct {
|
||||
} `json:"promocodes"`
|
||||
@@ -80,7 +80,7 @@ type UserInfoResponse struct {
|
||||
FileSizeLimit int64 `json:"file_size_limit"`
|
||||
Space struct {
|
||||
BytesTotal int64 `json:"bytes_total"`
|
||||
BytesUsed int64 `json:"bytes_used"`
|
||||
BytesUsed int `json:"bytes_used"`
|
||||
Overquota bool `json:"overquota"`
|
||||
} `json:"space"`
|
||||
} `json:"cloud"`
|
||||
|
||||
@@ -1572,7 +1572,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
}
|
||||
|
||||
total := info.Body.Cloud.Space.BytesTotal
|
||||
used := info.Body.Cloud.Space.BytesUsed
|
||||
used := int64(info.Body.Cloud.Space.BytesUsed)
|
||||
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(total),
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,16 +0,0 @@
|
||||
package netstorage_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/netstorage"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestnStorage:",
|
||||
NilObject: (*netstorage.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -65,12 +65,9 @@ var (
|
||||
authPath = "/common/oauth2/v2.0/authorize"
|
||||
tokenPath = "/common/oauth2/v2.0/token"
|
||||
|
||||
scopesWithSitePermission = []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access", "Sites.Read.All"}
|
||||
scopesWithoutSitePermission = []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"}
|
||||
|
||||
// Description of how to auth for this app for a business account
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: scopesWithSitePermission,
|
||||
Scopes: []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access", "Sites.Read.All"},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
@@ -140,26 +137,6 @@ Note that the chunks will be buffered into memory.`,
|
||||
Help: "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: `ID of the root folder.
|
||||
|
||||
This isn't normally needed, but in special circumstances you might
|
||||
know the folder ID that you wish to access but not be able to get
|
||||
there through a path traversal.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_site_permission",
|
||||
Help: `Disable the request for Sites.Read.All permission.
|
||||
|
||||
If set to true, you will no longer be able to search for a SharePoint site when
|
||||
configuring drive ID, because rclone will not request Sites.Read.All permission.
|
||||
Set it to true if your organization didn't assign Sites.Read.All permission to the
|
||||
application, and your organization disallows users to consent app permission
|
||||
request on their own.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "expose_onenote_files",
|
||||
Help: `Set to make OneNote files show up in directory listings.
|
||||
@@ -397,12 +374,6 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
region, graphURL := getRegionURL(m)
|
||||
|
||||
if config.State == "" {
|
||||
disableSitePermission, _ := m.Get("disable_site_permission")
|
||||
if disableSitePermission == "true" {
|
||||
oauthConfig.Scopes = scopesWithoutSitePermission
|
||||
} else {
|
||||
oauthConfig.Scopes = scopesWithSitePermission
|
||||
}
|
||||
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||
AuthURL: authEndpoint[region] + authPath,
|
||||
TokenURL: authEndpoint[region] + tokenPath,
|
||||
@@ -556,8 +527,6 @@ type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DriveID string `config:"drive_id"`
|
||||
DriveType string `config:"drive_type"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
DisableSitePermission bool `config:"disable_site_permission"`
|
||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
@@ -649,12 +618,6 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
||||
retry := false
|
||||
if resp != nil {
|
||||
switch resp.StatusCode {
|
||||
case 400:
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.ErrorInfo.InnerError.Code == "pathIsTooLong" {
|
||||
return false, fserrors.NoRetryError(err)
|
||||
}
|
||||
}
|
||||
case 401:
|
||||
if len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
||||
retry = true
|
||||
@@ -826,11 +789,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
rootURL := graphAPIEndpoint[opt.Region] + "/v1.0" + "/drives/" + opt.DriveID
|
||||
if opt.DisableSitePermission {
|
||||
oauthConfig.Scopes = scopesWithoutSitePermission
|
||||
} else {
|
||||
oauthConfig.Scopes = scopesWithSitePermission
|
||||
}
|
||||
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||
AuthURL: authEndpoint[opt.Region] + authPath,
|
||||
TokenURL: authEndpoint[opt.Region] + tokenPath,
|
||||
@@ -868,19 +826,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
})
|
||||
|
||||
// Get rootID
|
||||
var rootID = opt.RootFolderID
|
||||
if rootID == "" {
|
||||
rootInfo, _, err := f.readMetaDataForPath(ctx, "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get root: %w", err)
|
||||
}
|
||||
rootID = rootInfo.GetID()
|
||||
rootInfo, _, err := f.readMetaDataForPath(ctx, "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get root: %w", err)
|
||||
}
|
||||
if rootID == "" {
|
||||
if rootInfo.GetID() == "" {
|
||||
return nil, errors.New("failed to get root: ID was empty")
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(root, rootID, f)
|
||||
f.dirCache = dircache.New(root, rootInfo.GetID(), f)
|
||||
|
||||
// Find the current root
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
@@ -888,7 +842,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
||||
tempF.dirCache = dircache.New(newRoot, rootInfo.ID, &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = tempF.dirCache.FindRoot(ctx, false)
|
||||
|
||||
@@ -136,8 +136,7 @@ func (q *quickXorHash) Write(p []byte) (n int, err error) {
|
||||
func (q *quickXorHash) checkSum() (h [Size]byte) {
|
||||
// Output the data as little endian bytes
|
||||
ph := 0
|
||||
for i := 0; i < len(q.data)-1; i++ {
|
||||
d := q.data[i]
|
||||
for _, d := range q.data[:len(q.data)-1] {
|
||||
_ = h[ph+7] // bounds check
|
||||
h[ph+0] = byte(d >> (8 * 0))
|
||||
h[ph+1] = byte(d >> (8 * 1))
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
// object storage system.
|
||||
package pcloud
|
||||
|
||||
// FIXME implement ListR? /listfolder can do recursive lists
|
||||
|
||||
// FIXME cleanup returns login required?
|
||||
|
||||
// FIXME mime type? Fix overview if implement.
|
||||
@@ -25,7 +27,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
@@ -245,7 +246,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||
return nil, err
|
||||
}
|
||||
|
||||
found, err := f.listAll(ctx, directoryID, false, true, false, func(item *api.Item) bool {
|
||||
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
info = item
|
||||
return true
|
||||
@@ -379,7 +380,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(ctx, pathID, true, false, false, func(item *api.Item) bool {
|
||||
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
pathIDOut = item.ID
|
||||
return true
|
||||
@@ -445,16 +446,14 @@ type listAllFn func(*api.Item) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, recursive bool, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/listfolder",
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
if recursive {
|
||||
opts.Parameters.Set("recursive", "1")
|
||||
}
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(dirID))
|
||||
// FIXME can do recursive
|
||||
|
||||
var result api.ItemResult
|
||||
var resp *http.Response
|
||||
@@ -466,69 +465,24 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
||||
if err != nil {
|
||||
return found, fmt.Errorf("couldn't list files: %w", err)
|
||||
}
|
||||
var recursiveContents func(is []api.Item, path string)
|
||||
recursiveContents = func(is []api.Item, path string) {
|
||||
for i := range is {
|
||||
item := &is[i]
|
||||
if item.IsFolder {
|
||||
if filesOnly {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if directoriesOnly {
|
||||
continue
|
||||
}
|
||||
for i := range result.Metadata.Contents {
|
||||
item := &result.Metadata.Contents[i]
|
||||
if item.IsFolder {
|
||||
if filesOnly {
|
||||
continue
|
||||
}
|
||||
item.Name = path + f.opt.Enc.ToStandardName(item.Name)
|
||||
if fn(item) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
if recursive {
|
||||
recursiveContents(item.Contents, item.Name+"/")
|
||||
}
|
||||
}
|
||||
}
|
||||
recursiveContents(result.Metadata.Contents, "")
|
||||
return
|
||||
}
|
||||
|
||||
// listHelper iterates over all items from the directory
|
||||
// and calls the callback for each element.
|
||||
func (f *Fs) listHelper(ctx context.Context, dir string, recursive bool, callback func(entries fs.DirEntry) error) (err error) {
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(ctx, directoryID, false, false, recursive, func(info *api.Item) bool {
|
||||
remote := path.Join(dir, info.Name)
|
||||
if info.IsFolder {
|
||||
// cache the directory ID for later lookups
|
||||
f.dirCache.Put(remote, info.ID)
|
||||
d := fs.NewDir(remote, info.ModTime()).SetID(info.ID)
|
||||
// FIXME more info from dir?
|
||||
iErr = callback(d)
|
||||
} else {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, info)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
if directoriesOnly {
|
||||
continue
|
||||
}
|
||||
iErr = callback(o)
|
||||
}
|
||||
if iErr != nil {
|
||||
return true
|
||||
item.Name = f.opt.Enc.ToStandardName(item.Name)
|
||||
if fn(item) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if iErr != nil {
|
||||
return iErr
|
||||
}
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
@@ -541,24 +495,36 @@ func (f *Fs) listHelper(ctx context.Context, dir string, recursive bool, callbac
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.listHelper(ctx, dir, false, func(o fs.DirEntry) error {
|
||||
entries = append(entries, o)
|
||||
return nil
|
||||
})
|
||||
return entries, err
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
// from dir recursively into out.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.listHelper(ctx, dir, true, func(o fs.DirEntry) error {
|
||||
return list.Add(o)
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
|
||||
remote := path.Join(dir, info.Name)
|
||||
if info.IsFolder {
|
||||
// cache the directory ID for later lookups
|
||||
f.dirCache.Put(remote, info.ID)
|
||||
d := fs.NewDir(remote, info.ModTime()).SetID(info.ID)
|
||||
// FIXME more info from dir?
|
||||
entries = append(entries, d)
|
||||
} else {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, info)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
return list.Flush()
|
||||
if iErr != nil {
|
||||
return nil, iErr
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
@@ -690,7 +656,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
opts.Parameters.Set("fileid", fileIDtoNumber(srcObj.id))
|
||||
opts.Parameters.Set("toname", f.opt.Enc.FromStandardName(leaf))
|
||||
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
|
||||
opts.Parameters.Set("mtime", fmt.Sprintf("%d", uint64(srcObj.modTime.Unix())))
|
||||
opts.Parameters.Set("mtime", fmt.Sprintf("%d", srcObj.modTime.Unix()))
|
||||
var resp *http.Response
|
||||
var result api.ItemResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -1171,7 +1137,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
opts.Parameters.Set("filename", leaf)
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(directoryID))
|
||||
opts.Parameters.Set("nopartial", "1")
|
||||
opts.Parameters.Set("mtime", fmt.Sprintf("%d", uint64(modTime.Unix())))
|
||||
opts.Parameters.Set("mtime", fmt.Sprintf("%d", modTime.Unix()))
|
||||
|
||||
// Special treatment for a 0 length upload. This doesn't work
|
||||
// with PUT even with Content-Length set (by setting
|
||||
|
||||
@@ -4,21 +4,16 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/putdotio/go-putio/putio"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
func checkStatusCode(resp *http.Response, expected ...int) error {
|
||||
for _, code := range expected {
|
||||
if resp.StatusCode == code {
|
||||
return nil
|
||||
}
|
||||
func checkStatusCode(resp *http.Response, expected int) error {
|
||||
if resp.StatusCode != expected {
|
||||
return &statusCodeError{response: resp}
|
||||
}
|
||||
return &statusCodeError{response: resp}
|
||||
return nil
|
||||
}
|
||||
|
||||
type statusCodeError struct {
|
||||
@@ -29,10 +24,8 @@ func (e *statusCodeError) Error() string {
|
||||
return fmt.Sprintf("unexpected status code (%d) response while doing %s to %s", e.response.StatusCode, e.response.Request.Method, e.response.Request.URL.String())
|
||||
}
|
||||
|
||||
// This method is called from fserrors.ShouldRetry() to determine if an error should be retried.
|
||||
// Some errors (e.g. 429 Too Many Requests) are handled before this step, so they are not included here.
|
||||
func (e *statusCodeError) Temporary() bool {
|
||||
return e.response.StatusCode >= 500
|
||||
return e.response.StatusCode == 429 || e.response.StatusCode >= 500
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
@@ -47,16 +40,6 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok {
|
||||
err = &statusCodeError{response: perr.Response}
|
||||
}
|
||||
if scerr, ok := err.(*statusCodeError); ok && scerr.response.StatusCode == 429 {
|
||||
delay := defaultRateLimitSleep
|
||||
header := scerr.response.Header.Get("x-ratelimit-reset")
|
||||
if header != "" {
|
||||
if resetTime, cerr := strconv.ParseInt(header, 10, 64); cerr == nil {
|
||||
delay = time.Until(time.Unix(resetTime+1, 0))
|
||||
}
|
||||
}
|
||||
return true, pacer.RetryAfterError(scerr, delay)
|
||||
}
|
||||
if fserrors.ShouldRetry(err) {
|
||||
return true, err
|
||||
}
|
||||
|
||||
@@ -302,8 +302,8 @@ func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := checkStatusCode(resp, 201); err != nil {
|
||||
return shouldRetry(ctx, err)
|
||||
if resp.StatusCode != 201 {
|
||||
return false, fmt.Errorf("unexpected status code from upload create: %d", resp.StatusCode)
|
||||
}
|
||||
location = resp.Header.Get("location")
|
||||
if location == "" {
|
||||
|
||||
@@ -241,13 +241,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
// fs.Debugf(o, "opening file: id=%d", o.file.ID)
|
||||
resp, err = o.fs.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(ctx, err)
|
||||
}
|
||||
if err := checkStatusCode(resp, 200, 206); err != nil {
|
||||
return shouldRetry(ctx, err)
|
||||
}
|
||||
return false, nil
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode >= 400 && perr.Response.StatusCode <= 499 {
|
||||
_ = resp.Body.Close()
|
||||
|
||||
@@ -33,9 +33,8 @@ const (
|
||||
rcloneObscuredClientSecret = "cMwrjWVmrHZp3gf1ZpCrlyGAmPpB-YY5BbVnO1fj-G9evcd8"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultChunkSize = 48 * fs.Mebi
|
||||
defaultRateLimitSleep = 60 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
419
backend/s3/s3.go
419
backend/s3/s3.go
@@ -58,7 +58,7 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Digital Ocean, Dreamhost, IBM COS, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, RackCorp, SeaweedFS, and Tencent COS",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
@@ -75,9 +75,6 @@ func init() {
|
||||
}, {
|
||||
Value: "Ceph",
|
||||
Help: "Ceph Object Storage",
|
||||
}, {
|
||||
Value: "ChinaMobile",
|
||||
Help: "China Mobile Ecloud Elastic Object Storage (EOS)",
|
||||
}, {
|
||||
Value: "DigitalOcean",
|
||||
Help: "Digital Ocean Spaces",
|
||||
@@ -87,9 +84,6 @@ func init() {
|
||||
}, {
|
||||
Value: "IBMCOS",
|
||||
Help: "IBM COS S3",
|
||||
}, {
|
||||
Value: "LyveCloud",
|
||||
Help: "Seagate Lyve Cloud",
|
||||
}, {
|
||||
Value: "Minio",
|
||||
Help: "Minio Object Storage",
|
||||
@@ -108,9 +102,6 @@ func init() {
|
||||
}, {
|
||||
Value: "StackPath",
|
||||
Help: "StackPath Object Storage",
|
||||
}, {
|
||||
Value: "Storj",
|
||||
Help: "Storj (S3 Compatible Gateway)",
|
||||
}, {
|
||||
Value: "TencentCOS",
|
||||
Help: "Tencent Cloud Object Storage (COS)",
|
||||
@@ -297,7 +288,7 @@ func init() {
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba,ChinaMobile,RackCorp,Scaleway,Storj,TencentCOS",
|
||||
Provider: "!AWS,Alibaba,RackCorp,Scaleway,TencentCOS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
||||
@@ -309,102 +300,6 @@ func init() {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nLeave blank if using AWS to use the default endpoint for the region.",
|
||||
Provider: "AWS",
|
||||
}, {
|
||||
// ChinaMobile endpoints: https://ecloud.10086.cn/op-help-center/doc/article/24534
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for China Mobile Ecloud Elastic Object Storage (EOS) API.",
|
||||
Provider: "ChinaMobile",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "eos-wuxi-1.cmecloud.cn",
|
||||
Help: "The default endpoint - a good choice if you are unsure.\nEast China (Suzhou)",
|
||||
}, {
|
||||
Value: "eos-jinan-1.cmecloud.cn",
|
||||
Help: "East China (Jinan)",
|
||||
}, {
|
||||
Value: "eos-ningbo-1.cmecloud.cn",
|
||||
Help: "East China (Hangzhou)",
|
||||
}, {
|
||||
Value: "eos-shanghai-1.cmecloud.cn",
|
||||
Help: "East China (Shanghai-1)",
|
||||
}, {
|
||||
Value: "eos-zhengzhou-1.cmecloud.cn",
|
||||
Help: "Central China (Zhengzhou)",
|
||||
}, {
|
||||
Value: "eos-hunan-1.cmecloud.cn",
|
||||
Help: "Central China (Changsha-1)",
|
||||
}, {
|
||||
Value: "eos-zhuzhou-1.cmecloud.cn",
|
||||
Help: "Central China (Changsha-2)",
|
||||
}, {
|
||||
Value: "eos-guangzhou-1.cmecloud.cn",
|
||||
Help: "South China (Guangzhou-2)",
|
||||
}, {
|
||||
Value: "eos-dongguan-1.cmecloud.cn",
|
||||
Help: "South China (Guangzhou-3)",
|
||||
}, {
|
||||
Value: "eos-beijing-1.cmecloud.cn",
|
||||
Help: "North China (Beijing-1)",
|
||||
}, {
|
||||
Value: "eos-beijing-2.cmecloud.cn",
|
||||
Help: "North China (Beijing-2)",
|
||||
}, {
|
||||
Value: "eos-beijing-4.cmecloud.cn",
|
||||
Help: "North China (Beijing-3)",
|
||||
}, {
|
||||
Value: "eos-huhehaote-1.cmecloud.cn",
|
||||
Help: "North China (Huhehaote)",
|
||||
}, {
|
||||
Value: "eos-chengdu-1.cmecloud.cn",
|
||||
Help: "Southwest China (Chengdu)",
|
||||
}, {
|
||||
Value: "eos-chongqing-1.cmecloud.cn",
|
||||
Help: "Southwest China (Chongqing)",
|
||||
}, {
|
||||
Value: "eos-guiyang-1.cmecloud.cn",
|
||||
Help: "Southwest China (Guiyang)",
|
||||
}, {
|
||||
Value: "eos-xian-1.cmecloud.cn",
|
||||
Help: "Nouthwest China (Xian)",
|
||||
}, {
|
||||
Value: "eos-yunnan.cmecloud.cn",
|
||||
Help: "Yunnan China (Kunming)",
|
||||
}, {
|
||||
Value: "eos-yunnan-2.cmecloud.cn",
|
||||
Help: "Yunnan China (Kunming-2)",
|
||||
}, {
|
||||
Value: "eos-tianjin-1.cmecloud.cn",
|
||||
Help: "Tianjin China (Tianjin)",
|
||||
}, {
|
||||
Value: "eos-jilin-1.cmecloud.cn",
|
||||
Help: "Jilin China (Changchun)",
|
||||
}, {
|
||||
Value: "eos-hubei-1.cmecloud.cn",
|
||||
Help: "Hubei China (Xiangyan)",
|
||||
}, {
|
||||
Value: "eos-jiangxi-1.cmecloud.cn",
|
||||
Help: "Jiangxi China (Nanchang)",
|
||||
}, {
|
||||
Value: "eos-gansu-1.cmecloud.cn",
|
||||
Help: "Gansu China (Lanzhou)",
|
||||
}, {
|
||||
Value: "eos-shanxi-1.cmecloud.cn",
|
||||
Help: "Shanxi China (Taiyuan)",
|
||||
}, {
|
||||
Value: "eos-liaoning-1.cmecloud.cn",
|
||||
Help: "Liaoning China (Shenyang)",
|
||||
}, {
|
||||
Value: "eos-hebei-1.cmecloud.cn",
|
||||
Help: "Hebei China (Shijiazhuang)",
|
||||
}, {
|
||||
Value: "eos-fujian-1.cmecloud.cn",
|
||||
Help: "Fujian China (Xiamen)",
|
||||
}, {
|
||||
Value: "eos-guangxi-1.cmecloud.cn",
|
||||
Help: "Guangxi China (Nanning)",
|
||||
}, {
|
||||
Value: "eos-anhui-1.cmecloud.cn",
|
||||
Help: "Anhui China (Huainan)",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for IBM COS S3 API.\n\nSpecify if using an IBM COS On Premise.",
|
||||
@@ -702,20 +597,6 @@ func init() {
|
||||
Value: "s3.eu-central-1.stackpathstorage.com",
|
||||
Help: "EU Endpoint",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint of the Shared Gateway.",
|
||||
Provider: "Storj",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "gateway.eu1.storjshare.io",
|
||||
Help: "EU1 Shared Gateway",
|
||||
}, {
|
||||
Value: "gateway.us1.storjshare.io",
|
||||
Help: "US1 Shared Gateway",
|
||||
}, {
|
||||
Value: "gateway.ap1.storjshare.io",
|
||||
Help: "Asia-Pacific Shared Gateway",
|
||||
}},
|
||||
}, {
|
||||
// cos endpoints: https://intl.cloud.tencent.com/document/product/436/6224
|
||||
Name: "endpoint",
|
||||
@@ -845,7 +726,7 @@ func init() {
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,IBMCOS,TencentCOS,Alibaba,ChinaMobile,Scaleway,StackPath,Storj,RackCorp",
|
||||
Provider: "!AWS,IBMCOS,TencentCOS,Alibaba,Scaleway,StackPath,RackCorp",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -866,18 +747,6 @@ func init() {
|
||||
Value: "localhost:8333",
|
||||
Help: "SeaweedFS S3 localhost",
|
||||
Provider: "SeaweedFS",
|
||||
}, {
|
||||
Value: "s3.us-east-1.lyvecloud.seagate.com",
|
||||
Help: "Seagate Lyve Cloud US East 1 (Virginia)",
|
||||
Provider: "LyveCloud",
|
||||
}, {
|
||||
Value: "s3.us-west-1.lyvecloud.seagate.com",
|
||||
Help: "Seagate Lyve Cloud US West 1 (California)",
|
||||
Provider: "LyveCloud",
|
||||
}, {
|
||||
Value: "s3.ap-southeast-1.lyvecloud.seagate.com",
|
||||
Help: "Seagate Lyve Cloud AP Southeast 1 (Singapore)",
|
||||
Provider: "LyveCloud",
|
||||
}, {
|
||||
Value: "s3.wasabisys.com",
|
||||
Help: "Wasabi US East endpoint",
|
||||
@@ -892,11 +761,7 @@ func init() {
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.ap-northeast-1.wasabisys.com",
|
||||
Help: "Wasabi AP Northeast 1 (Tokyo) endpoint",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.ap-northeast-2.wasabisys.com",
|
||||
Help: "Wasabi AP Northeast 2 (Osaka) endpoint",
|
||||
Help: "Wasabi AP Northeast endpoint",
|
||||
Provider: "Wasabi",
|
||||
}},
|
||||
}, {
|
||||
@@ -979,101 +844,6 @@ func init() {
|
||||
Value: "us-gov-west-1",
|
||||
Help: "AWS GovCloud (US) Region",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must match endpoint.\n\nUsed when creating buckets only.",
|
||||
Provider: "ChinaMobile",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "wuxi1",
|
||||
Help: "East China (Suzhou)",
|
||||
}, {
|
||||
Value: "jinan1",
|
||||
Help: "East China (Jinan)",
|
||||
}, {
|
||||
Value: "ningbo1",
|
||||
Help: "East China (Hangzhou)",
|
||||
}, {
|
||||
Value: "shanghai1",
|
||||
Help: "East China (Shanghai-1)",
|
||||
}, {
|
||||
Value: "zhengzhou1",
|
||||
Help: "Central China (Zhengzhou)",
|
||||
}, {
|
||||
Value: "hunan1",
|
||||
Help: "Central China (Changsha-1)",
|
||||
}, {
|
||||
Value: "zhuzhou1",
|
||||
Help: "Central China (Changsha-2)",
|
||||
}, {
|
||||
Value: "guangzhou1",
|
||||
Help: "South China (Guangzhou-2)",
|
||||
}, {
|
||||
Value: "dongguan1",
|
||||
Help: "South China (Guangzhou-3)",
|
||||
}, {
|
||||
Value: "beijing1",
|
||||
Help: "North China (Beijing-1)",
|
||||
}, {
|
||||
Value: "beijing2",
|
||||
Help: "North China (Beijing-2)",
|
||||
}, {
|
||||
Value: "beijing4",
|
||||
Help: "North China (Beijing-3)",
|
||||
}, {
|
||||
Value: "huhehaote1",
|
||||
Help: "North China (Huhehaote)",
|
||||
}, {
|
||||
Value: "chengdu1",
|
||||
Help: "Southwest China (Chengdu)",
|
||||
}, {
|
||||
Value: "chongqing1",
|
||||
Help: "Southwest China (Chongqing)",
|
||||
}, {
|
||||
Value: "guiyang1",
|
||||
Help: "Southwest China (Guiyang)",
|
||||
}, {
|
||||
Value: "xian1",
|
||||
Help: "Nouthwest China (Xian)",
|
||||
}, {
|
||||
Value: "yunnan",
|
||||
Help: "Yunnan China (Kunming)",
|
||||
}, {
|
||||
Value: "yunnan2",
|
||||
Help: "Yunnan China (Kunming-2)",
|
||||
}, {
|
||||
Value: "tianjin1",
|
||||
Help: "Tianjin China (Tianjin)",
|
||||
}, {
|
||||
Value: "jilin1",
|
||||
Help: "Jilin China (Changchun)",
|
||||
}, {
|
||||
Value: "hubei1",
|
||||
Help: "Hubei China (Xiangyan)",
|
||||
}, {
|
||||
Value: "jiangxi1",
|
||||
Help: "Jiangxi China (Nanchang)",
|
||||
}, {
|
||||
Value: "gansu1",
|
||||
Help: "Gansu China (Lanzhou)",
|
||||
}, {
|
||||
Value: "shanxi1",
|
||||
Help: "Shanxi China (Taiyuan)",
|
||||
}, {
|
||||
Value: "liaoning1",
|
||||
Help: "Liaoning China (Shenyang)",
|
||||
}, {
|
||||
Value: "hebei1",
|
||||
Help: "Hebei China (Shijiazhuang)",
|
||||
}, {
|
||||
Value: "fujian1",
|
||||
Help: "Fujian China (Xiamen)",
|
||||
}, {
|
||||
Value: "guangxi1",
|
||||
Help: "Guangxi China (Nanning)",
|
||||
}, {
|
||||
Value: "anhui1",
|
||||
Help: "Anhui China (Huainan)",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must match endpoint when using IBM Cloud Public.\n\nFor on-prem COS, do not make a selection from this list, hit enter.",
|
||||
@@ -1240,7 +1010,7 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,IBMCOS,Alibaba,ChinaMobile,RackCorp,Scaleway,StackPath,Storj,TencentCOS",
|
||||
Provider: "!AWS,IBMCOS,Alibaba,RackCorp,Scaleway,StackPath,TencentCOS",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -1250,8 +1020,10 @@ This ACL is used for creating objects and if bucket_acl isn't set, for creating
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
|
||||
Note that this ACL is applied when server-side copying objects as S3
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.`,
|
||||
Provider: "!Storj",
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.
|
||||
|
||||
If the ACL is set as the string "unset" then rclone won't set the ACL
|
||||
header so it will use the default of the cloud provider.`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "default",
|
||||
Help: "Owner gets Full_CONTROL.\nNo one else has access rights (default).",
|
||||
@@ -1275,11 +1047,11 @@ doesn't copy the ACL from the source but rather writes a fresh one.`,
|
||||
}, {
|
||||
Value: "bucket-owner-read",
|
||||
Help: "Object owner gets FULL_CONTROL.\nBucket owner gets READ access.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
|
||||
Provider: "!IBMCOS,ChinaMobile",
|
||||
Provider: "!IBMCOS",
|
||||
}, {
|
||||
Value: "bucket-owner-full-control",
|
||||
Help: "Both the object owner and the bucket owner get FULL_CONTROL over the object.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
|
||||
Provider: "!IBMCOS,ChinaMobile",
|
||||
Provider: "!IBMCOS",
|
||||
}, {
|
||||
Value: "private",
|
||||
Help: "Owner gets FULL_CONTROL.\nNo one else has access rights (default).\nThis acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS.",
|
||||
@@ -1328,7 +1100,7 @@ isn't set then "acl" is used instead.`,
|
||||
}, {
|
||||
Name: "server_side_encryption",
|
||||
Help: "The server-side encryption algorithm used when storing this object in S3.",
|
||||
Provider: "AWS,Ceph,ChinaMobile,Minio",
|
||||
Provider: "AWS,Ceph,Minio",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
@@ -1336,14 +1108,13 @@ isn't set then "acl" is used instead.`,
|
||||
Value: "AES256",
|
||||
Help: "AES256",
|
||||
}, {
|
||||
Value: "aws:kms",
|
||||
Help: "aws:kms",
|
||||
Provider: "!ChinaMobile",
|
||||
Value: "aws:kms",
|
||||
Help: "aws:kms",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_algorithm",
|
||||
Help: "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.",
|
||||
Provider: "AWS,Ceph,ChinaMobile,Minio",
|
||||
Provider: "AWS,Ceph,Minio",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
@@ -1366,7 +1137,7 @@ isn't set then "acl" is used instead.`,
|
||||
}, {
|
||||
Name: "sse_customer_key",
|
||||
Help: "If using SSE-C you must provide the secret encryption key used to encrypt/decrypt your data.",
|
||||
Provider: "AWS,Ceph,ChinaMobile,Minio",
|
||||
Provider: "AWS,Ceph,Minio",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
@@ -1378,7 +1149,7 @@ isn't set then "acl" is used instead.`,
|
||||
|
||||
If you leave it blank, this is calculated automatically from the sse_customer_key provided.
|
||||
`,
|
||||
Provider: "AWS,Ceph,ChinaMobile,Minio",
|
||||
Provider: "AWS,Ceph,Minio",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
@@ -1412,9 +1183,6 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
}, {
|
||||
Value: "INTELLIGENT_TIERING",
|
||||
Help: "Intelligent-Tiering storage class",
|
||||
}, {
|
||||
Value: "GLACIER_IR",
|
||||
Help: "Glacier Instant Retrieval storage class",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://www.alibabacloud.com/help/doc-detail/64919.htm
|
||||
@@ -1434,24 +1202,6 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Value: "STANDARD_IA",
|
||||
Help: "Infrequent access storage mode",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://ecloud.10086.cn/op-help-center/doc/article/24495
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in ChinaMobile.",
|
||||
Provider: "ChinaMobile",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Default",
|
||||
}, {
|
||||
Value: "STANDARD",
|
||||
Help: "Standard storage class",
|
||||
}, {
|
||||
Value: "GLACIER",
|
||||
Help: "Archive storage mode",
|
||||
}, {
|
||||
Value: "STANDARD_IA",
|
||||
Help: "Infrequent access storage mode",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925
|
||||
Name: "storage_class",
|
||||
@@ -1776,14 +1526,6 @@ See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rcl
|
||||
This is usually set to a CloudFront CDN URL as AWS S3 offers
|
||||
cheaper egress for data downloaded through the CloudFront network.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_multipart_etag",
|
||||
Help: `Whether to use ETag in multipart uploads for verification
|
||||
|
||||
This should be true, false or left unset to use the default for the provider.
|
||||
`,
|
||||
Default: fs.Tristate{},
|
||||
Advanced: true,
|
||||
},
|
||||
}})
|
||||
}
|
||||
@@ -1848,7 +1590,6 @@ type Options struct {
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
DisableHTTP2 bool `config:"disable_http2"`
|
||||
DownloadURL string `config:"download_url"`
|
||||
UseMultipartEtag fs.Tristate `config:"use_multipart_etag"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
@@ -2152,21 +1893,16 @@ func setQuirks(opt *Options) {
|
||||
listObjectsV2 = true
|
||||
virtualHostStyle = true
|
||||
urlEncodeListings = true
|
||||
useMultipartEtag = true
|
||||
)
|
||||
switch opt.Provider {
|
||||
case "AWS":
|
||||
// No quirks
|
||||
case "Alibaba":
|
||||
useMultipartEtag = false // Alibaba seems to calculate multipart Etags differently from AWS
|
||||
// No quirks
|
||||
case "Ceph":
|
||||
listObjectsV2 = false
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
case "ChinaMobile":
|
||||
listObjectsV2 = false
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
case "DigitalOcean":
|
||||
urlEncodeListings = false
|
||||
case "Dreamhost":
|
||||
@@ -2175,18 +1911,13 @@ func setQuirks(opt *Options) {
|
||||
listObjectsV2 = false // untested
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
useMultipartEtag = false // untested
|
||||
case "LyveCloud":
|
||||
useMultipartEtag = false // LyveCloud seems to calculate multipart Etags differently from AWS
|
||||
case "Minio":
|
||||
virtualHostStyle = false
|
||||
case "Netease":
|
||||
listObjectsV2 = false // untested
|
||||
urlEncodeListings = false
|
||||
useMultipartEtag = false // untested
|
||||
case "RackCorp":
|
||||
// No quirks
|
||||
useMultipartEtag = false // untested
|
||||
case "Scaleway":
|
||||
// Scaleway can only have 1000 parts in an upload
|
||||
if opt.MaxUploadParts > 1000 {
|
||||
@@ -2197,32 +1928,23 @@ func setQuirks(opt *Options) {
|
||||
listObjectsV2 = false // untested
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
useMultipartEtag = false // untested
|
||||
case "StackPath":
|
||||
listObjectsV2 = false // untested
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
case "Storj":
|
||||
// Force chunk size to >= 64 MiB
|
||||
if opt.ChunkSize < 64*fs.Mebi {
|
||||
opt.ChunkSize = 64 * fs.Mebi
|
||||
}
|
||||
case "TencentCOS":
|
||||
listObjectsV2 = false // untested
|
||||
useMultipartEtag = false // untested
|
||||
listObjectsV2 = false // untested
|
||||
case "Wasabi":
|
||||
// No quirks
|
||||
case "Other":
|
||||
listObjectsV2 = false
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
useMultipartEtag = false
|
||||
default:
|
||||
fs.Logf("s3", "s3 provider %q not known - please set correctly", opt.Provider)
|
||||
listObjectsV2 = false
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
useMultipartEtag = false
|
||||
}
|
||||
|
||||
// Path Style vs Virtual Host style
|
||||
@@ -2244,12 +1966,6 @@ func setQuirks(opt *Options) {
|
||||
opt.ListVersion = 1
|
||||
}
|
||||
}
|
||||
|
||||
// Set the correct use multipart Etag for error checking if not manually set
|
||||
if !opt.UseMultipartEtag.Valid {
|
||||
opt.UseMultipartEtag.Valid = true
|
||||
opt.UseMultipartEtag.Value = useMultipartEtag
|
||||
}
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
@@ -2277,6 +1993,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.ACL == "" {
|
||||
opt.ACL = "private"
|
||||
}
|
||||
if opt.ACL == "unset" {
|
||||
opt.ACL = ""
|
||||
}
|
||||
if opt.BucketACL == "" {
|
||||
opt.BucketACL = opt.ACL
|
||||
}
|
||||
@@ -2350,11 +2069,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
if opt.Provider == "Storj" {
|
||||
f.features.Copy = nil
|
||||
f.features.SetTier = false
|
||||
f.features.GetTier = false
|
||||
}
|
||||
// f.listMultipartUploads()
|
||||
return f, nil
|
||||
}
|
||||
@@ -2818,7 +2532,9 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
||||
return f.cache.Create(bucket, func() error {
|
||||
req := s3.CreateBucketInput{
|
||||
Bucket: &bucket,
|
||||
ACL: &f.opt.BucketACL,
|
||||
}
|
||||
if f.opt.BucketACL != "" {
|
||||
req.ACL = &f.opt.BucketACL
|
||||
}
|
||||
if f.opt.LocationConstraint != "" {
|
||||
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
|
||||
@@ -2883,7 +2599,9 @@ func pathEscape(s string) string {
|
||||
// method
|
||||
func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, src *Object) error {
|
||||
req.Bucket = &dstBucket
|
||||
req.ACL = &f.opt.ACL
|
||||
if f.opt.ACL != "" {
|
||||
req.ACL = &f.opt.ACL
|
||||
}
|
||||
req.Key = &dstPath
|
||||
source := pathEscape(path.Join(srcBucket, srcPath))
|
||||
req.CopySource = &source
|
||||
@@ -3501,6 +3219,9 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.LastModified == nil {
|
||||
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
|
||||
}
|
||||
o.setMetaData(resp.ETag, resp.ContentLength, resp.LastModified, resp.Metadata, resp.ContentType, resp.StorageClass)
|
||||
return nil
|
||||
}
|
||||
@@ -3530,7 +3251,6 @@ func (o *Object) setMetaData(etag *string, contentLength *int64, lastModified *t
|
||||
o.storageClass = aws.StringValue(storageClass)
|
||||
if lastModified == nil {
|
||||
o.lastModified = time.Now()
|
||||
fs.Logf(o, "Failed to read last modified")
|
||||
} else {
|
||||
o.lastModified = *lastModified
|
||||
}
|
||||
@@ -3611,7 +3331,11 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options
|
||||
return nil, err
|
||||
}
|
||||
|
||||
contentLength := &resp.ContentLength
|
||||
size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to parse content length from string %s, %v", resp.Header.Get("Content-Length"), err)
|
||||
}
|
||||
contentLength := &size
|
||||
if resp.Header.Get("Content-Range") != "" {
|
||||
var contentRange = resp.Header.Get("Content-Range")
|
||||
slash := strings.IndexRune(contentRange, '/')
|
||||
@@ -3702,7 +3426,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.LastModified == nil {
|
||||
fs.Logf(o, "Failed to read last modified: %v", err)
|
||||
}
|
||||
// read size from ContentLength or ContentRange
|
||||
size := resp.ContentLength
|
||||
if resp.ContentRange != nil {
|
||||
@@ -3725,7 +3451,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
var warnStreamUpload sync.Once
|
||||
|
||||
func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, size int64, in io.Reader) (etag string, err error) {
|
||||
func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, size int64, in io.Reader) (err error) {
|
||||
f := o.fs
|
||||
|
||||
// make concurrency machinery
|
||||
@@ -3772,7 +3498,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return etag, fmt.Errorf("multipart upload failed to initialise: %w", err)
|
||||
return fmt.Errorf("multipart upload failed to initialise: %w", err)
|
||||
}
|
||||
uid := cout.UploadId
|
||||
|
||||
@@ -3801,21 +3527,8 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
partsMu sync.Mutex // to protect parts
|
||||
parts []*s3.CompletedPart
|
||||
off int64
|
||||
md5sMu sync.Mutex
|
||||
md5s []byte
|
||||
)
|
||||
|
||||
addMd5 := func(md5binary *[md5.Size]byte, partNum int64) {
|
||||
md5sMu.Lock()
|
||||
defer md5sMu.Unlock()
|
||||
start := partNum * md5.Size
|
||||
end := start + md5.Size
|
||||
if extend := end - int64(len(md5s)); extend > 0 {
|
||||
md5s = append(md5s, make([]byte, extend)...)
|
||||
}
|
||||
copy(md5s[start:end], (*md5binary)[:])
|
||||
}
|
||||
|
||||
for partNum := int64(1); !finished; partNum++ {
|
||||
// Get a block of memory from the pool and token which limits concurrency.
|
||||
tokens.Get()
|
||||
@@ -3845,7 +3558,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
finished = true
|
||||
} else if err != nil {
|
||||
free()
|
||||
return etag, fmt.Errorf("multipart upload failed to read source: %w", err)
|
||||
return fmt.Errorf("multipart upload failed to read source: %w", err)
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
@@ -3858,7 +3571,6 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
|
||||
// create checksum of buffer for integrity checking
|
||||
md5sumBinary := md5.Sum(buf)
|
||||
addMd5(&md5sumBinary, partNum-1)
|
||||
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:])
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -3900,7 +3612,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
}
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return etag, err
|
||||
return err
|
||||
}
|
||||
|
||||
// sort the completed parts by part number
|
||||
@@ -3921,11 +3633,9 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return etag, fmt.Errorf("multipart upload failed to finalise: %w", err)
|
||||
return fmt.Errorf("multipart upload failed to finalise: %w", err)
|
||||
}
|
||||
hashOfHashes := md5.Sum(md5s)
|
||||
etag = fmt.Sprintf("%s-%d", hex.EncodeToString(hashOfHashes[:]), len(parts))
|
||||
return etag, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update the Object from in with modTime and size
|
||||
@@ -3951,20 +3661,19 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
|
||||
// - for multipart provided checksums aren't disabled
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash
|
||||
var md5sumBase64 string
|
||||
var md5sumHex string
|
||||
var md5sum string
|
||||
if !multipart || !o.fs.opt.DisableChecksum {
|
||||
md5sumHex, err = src.Hash(ctx, hash.MD5)
|
||||
if err == nil && matchMd5.MatchString(md5sumHex) {
|
||||
hashBytes, err := hex.DecodeString(md5sumHex)
|
||||
hash, err := src.Hash(ctx, hash.MD5)
|
||||
if err == nil && matchMd5.MatchString(hash) {
|
||||
hashBytes, err := hex.DecodeString(hash)
|
||||
if err == nil {
|
||||
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
|
||||
md5sum = base64.StdEncoding.EncodeToString(hashBytes)
|
||||
if (multipart || o.fs.etagIsNotMD5) && !o.fs.opt.DisableChecksum {
|
||||
// Set the md5sum as metadata on the object if
|
||||
// - a multipart upload
|
||||
// - the Etag is not an MD5, eg when using SSE/SSE-C
|
||||
// provided checksums aren't disabled
|
||||
metadata[metaMD5Hash] = &md5sumBase64
|
||||
metadata[metaMD5Hash] = &md5sum
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3974,13 +3683,15 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
mimeType := fs.MimeType(ctx, src)
|
||||
req := s3.PutObjectInput{
|
||||
Bucket: &bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &bucketPath,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
}
|
||||
if md5sumBase64 != "" {
|
||||
req.ContentMD5 = &md5sumBase64
|
||||
if o.fs.opt.ACL != "" {
|
||||
req.ACL = &o.fs.opt.ACL
|
||||
}
|
||||
if md5sum != "" {
|
||||
req.ContentMD5 = &md5sum
|
||||
}
|
||||
if o.fs.opt.RequesterPays {
|
||||
req.RequestPayer = aws.String(s3.RequestPayerRequester)
|
||||
@@ -4034,9 +3745,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
var resp *http.Response // response from PUT
|
||||
var wantETag string // Multipart upload Etag to check
|
||||
if multipart {
|
||||
wantETag, err = o.uploadMultipart(ctx, &req, size, in)
|
||||
err = o.uploadMultipart(ctx, &req, size, in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -4098,7 +3808,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// so make up the object as best we can assuming it got
|
||||
// uploaded properly. If size < 0 then we need to do the HEAD.
|
||||
if o.fs.opt.NoHead && size >= 0 {
|
||||
o.md5 = md5sumHex
|
||||
o.md5 = md5sum
|
||||
o.bytes = size
|
||||
o.lastModified = time.Now()
|
||||
o.meta = req.Metadata
|
||||
@@ -4116,18 +3826,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Read the metadata from the newly created object
|
||||
o.meta = nil // wipe old metadata
|
||||
head, err := o.headObject(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.setMetaData(head.ETag, head.ContentLength, head.LastModified, head.Metadata, head.ContentType, head.StorageClass)
|
||||
if o.fs.opt.UseMultipartEtag.Value && !o.fs.etagIsNotMD5 && wantETag != "" && head.ETag != nil && *head.ETag != "" {
|
||||
gotETag := strings.Trim(strings.ToLower(*head.ETag), `"`)
|
||||
if wantETag != gotETag {
|
||||
return fmt.Errorf("multipart upload corrupted: Etag differ: expecting %s but got %s", wantETag, gotETag)
|
||||
}
|
||||
fs.Debugf(o, "Multipart upload Etag: %s OK", wantETag)
|
||||
}
|
||||
err = o.readMetaData(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -42,8 +42,7 @@ const (
|
||||
hashCommandNotSupported = "none"
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
keepAliveInterval = time.Minute // send keepalives every this long while running commands
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -60,13 +59,11 @@ func init() {
|
||||
Help: "SSH host to connect to.\n\nE.g. \"example.com\".",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "SSH username.",
|
||||
Default: currentUser,
|
||||
Name: "user",
|
||||
Help: "SSH username, leave blank for current username, " + currentUser + ".",
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "SSH port number.",
|
||||
Default: 22,
|
||||
Name: "port",
|
||||
Help: "SSH port, leave blank to use default (22).",
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "SSH password, leave blank to use ssh-agent.",
|
||||
@@ -155,11 +152,11 @@ different. This issue affects among others Synology NAS boxes.
|
||||
|
||||
Shared folders can be found in directories representing volumes
|
||||
|
||||
rclone sync /home/local/directory remote:/directory --sftp-path-override /volume2/directory
|
||||
rclone sync /home/local/directory remote:/directory --ssh-path-override /volume2/directory
|
||||
|
||||
Home directory can be found in a shared folder called "home"
|
||||
|
||||
rclone sync /home/local/directory remote:/home/directory --sftp-path-override /volume1/homes/USER/directory`,
|
||||
rclone sync /home/local/directory remote:/home/directory --ssh-path-override /volume1/homes/USER/directory`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "set_modtime",
|
||||
@@ -342,32 +339,6 @@ func (c *conn) wait() {
|
||||
c.err <- c.sshClient.Conn.Wait()
|
||||
}
|
||||
|
||||
// Send a keepalive over the ssh connection
|
||||
func (c *conn) sendKeepAlive() {
|
||||
_, _, err := c.sshClient.SendRequest("keepalive@openssh.com", true, nil)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Failed to send keep alive: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Send keepalives every interval over the ssh connection until done is closed
|
||||
func (c *conn) sendKeepAlives(interval time.Duration) (done chan struct{}) {
|
||||
done = make(chan struct{})
|
||||
go func() {
|
||||
t := time.NewTicker(interval)
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
c.sendKeepAlive()
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return done
|
||||
}
|
||||
|
||||
// Closes the connection
|
||||
func (c *conn) close() error {
|
||||
sftpErr := c.sftpClient.Close()
|
||||
@@ -1127,9 +1098,6 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
|
||||
}
|
||||
defer f.putSftpConnection(&c, err)
|
||||
|
||||
// Send keepalives while the connection is open
|
||||
defer close(c.sendKeepAlives(keepAliveInterval))
|
||||
|
||||
session, err := c.sshClient.NewSession()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("run: get SFTP session: %w", err)
|
||||
@@ -1142,12 +1110,10 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
|
||||
session.Stdout = &stdout
|
||||
session.Stderr = &stderr
|
||||
|
||||
fs.Debugf(f, "Running remote command: %s", cmd)
|
||||
err = session.Run(cmd)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to run %q: %s: %w", cmd, bytes.TrimSpace(stderr.Bytes()), err)
|
||||
return nil, fmt.Errorf("failed to run %q: %s: %w", cmd, stderr.Bytes(), err)
|
||||
}
|
||||
fs.Debugf(f, "Remote command result: %s", bytes.TrimSpace(stdout.Bytes()))
|
||||
|
||||
return stdout.Bytes(), nil
|
||||
}
|
||||
@@ -1189,8 +1155,8 @@ func (f *Fs) Hashes() hash.Set {
|
||||
}
|
||||
|
||||
changed := false
|
||||
md5Works := checkHash([]string{"md5sum", "md5 -r", "rclone md5sum"}, "d41d8cd98f00b204e9800998ecf8427e", &f.opt.Md5sumCommand, &changed)
|
||||
sha1Works := checkHash([]string{"sha1sum", "sha1 -r", "rclone sha1sum"}, "da39a3ee5e6b4b0d3255bfef95601890afd80709", &f.opt.Sha1sumCommand, &changed)
|
||||
md5Works := checkHash([]string{"md5sum", "md5 -r"}, "d41d8cd98f00b204e9800998ecf8427e", &f.opt.Md5sumCommand, &changed)
|
||||
sha1Works := checkHash([]string{"sha1sum", "sha1 -r"}, "da39a3ee5e6b4b0d3255bfef95601890afd80709", &f.opt.Sha1sumCommand, &changed)
|
||||
|
||||
if changed {
|
||||
f.m.Set("md5sum_command", f.opt.Md5sumCommand)
|
||||
@@ -1264,6 +1230,8 @@ func (o *Object) Remote() string {
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
o.fs.addSession() // Show session in use
|
||||
defer o.fs.removeSession()
|
||||
if o.fs.opt.DisableHashCheck {
|
||||
return "", nil
|
||||
}
|
||||
@@ -1287,16 +1255,36 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
c, err := o.fs.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Hash get SFTP connection: %w", err)
|
||||
}
|
||||
session, err := c.sshClient.NewSession()
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Hash put SFTP connection: %w", err)
|
||||
}
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
session.Stdout = &stdout
|
||||
session.Stderr = &stderr
|
||||
escapedPath := shellEscape(o.path())
|
||||
if o.fs.opt.PathOverride != "" {
|
||||
escapedPath = shellEscape(path.Join(o.fs.opt.PathOverride, o.remote))
|
||||
}
|
||||
b, err := o.fs.run(ctx, hashCmd+" "+escapedPath)
|
||||
err = session.Run(hashCmd + " " + escapedPath)
|
||||
fs.Debugf(nil, "sftp cmd = %s", escapedPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to calculate %v hash: %w", r, err)
|
||||
_ = session.Close()
|
||||
fs.Debugf(o, "Failed to calculate %v hash: %v (%s)", r, err, bytes.TrimSpace(stderr.Bytes()))
|
||||
return "", nil
|
||||
}
|
||||
|
||||
_ = session.Close()
|
||||
b := stdout.Bytes()
|
||||
fs.Debugf(nil, "sftp output = %q", b)
|
||||
str := parseHash(b)
|
||||
fs.Debugf(nil, "sftp hash = %q", str)
|
||||
if r == hash.MD5 {
|
||||
o.md5sum = &str
|
||||
} else if r == hash.SHA1 {
|
||||
|
||||
@@ -754,34 +754,22 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
var total, objects int64
|
||||
if f.rootContainer != "" {
|
||||
var container swift.Container
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
container, _, err = f.c.Container(ctx, f.rootContainer)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("container info failed: %w", err)
|
||||
}
|
||||
total = container.Bytes
|
||||
objects = container.Count
|
||||
} else {
|
||||
var containers []swift.Container
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
containers, err = f.c.ContainersAll(ctx, nil)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("container listing failed: %w", err)
|
||||
}
|
||||
for _, c := range containers {
|
||||
total += c.Bytes
|
||||
objects += c.Count
|
||||
}
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var containers []swift.Container
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
containers, err = f.c.ContainersAll(ctx, nil)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("container listing failed: %w", err)
|
||||
}
|
||||
usage = &fs.Usage{
|
||||
var total, objects int64
|
||||
for _, c := range containers {
|
||||
total += c.Bytes
|
||||
objects += c.Count
|
||||
}
|
||||
usage := &fs.Usage{
|
||||
Used: fs.NewUsageValue(total), // bytes in use
|
||||
Objects: fs.NewUsageValue(objects), // objects in use
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
// Package storj provides an interface to Storj decentralized object storage.
|
||||
package storj
|
||||
// Package tardigrade provides an interface to Tardigrade decentralized object storage.
|
||||
package tardigrade
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -31,17 +31,16 @@ const (
|
||||
)
|
||||
|
||||
var satMap = map[string]string{
|
||||
"us-central-1.storj.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777",
|
||||
"europe-west-1.storj.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777",
|
||||
"asia-east-1.storj.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777",
|
||||
"us-central-1.tardigrade.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777",
|
||||
"europe-west-1.tardigrade.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777",
|
||||
"asia-east-1.tardigrade.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777",
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "storj",
|
||||
Description: "Storj Decentralized Cloud Storage",
|
||||
Aliases: []string{"tardigrade"},
|
||||
Name: "tardigrade",
|
||||
Description: "Tardigrade Decentralized Cloud Storage",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, configIn fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
provider, _ := m.Get(fs.ConfigProvider)
|
||||
@@ -85,9 +84,10 @@ func init() {
|
||||
},
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: fs.ConfigProvider,
|
||||
Help: "Choose an authentication method.",
|
||||
Default: existingProvider,
|
||||
Name: fs.ConfigProvider,
|
||||
Help: "Choose an authentication method.",
|
||||
Required: true,
|
||||
Default: existingProvider,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "existing",
|
||||
Help: "Use an existing access grant.",
|
||||
@@ -99,21 +99,23 @@ func init() {
|
||||
{
|
||||
Name: "access_grant",
|
||||
Help: "Access grant.",
|
||||
Required: false,
|
||||
Provider: "existing",
|
||||
},
|
||||
{
|
||||
Name: "satellite_address",
|
||||
Help: "Satellite address.\n\nCustom satellite address should match the format: `<nodeid>@<address>:<port>`.",
|
||||
Required: false,
|
||||
Provider: newProvider,
|
||||
Default: "us-central-1.storj.io",
|
||||
Default: "us-central-1.tardigrade.io",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "us-central-1.storj.io",
|
||||
Value: "us-central-1.tardigrade.io",
|
||||
Help: "US Central 1",
|
||||
}, {
|
||||
Value: "europe-west-1.storj.io",
|
||||
Value: "europe-west-1.tardigrade.io",
|
||||
Help: "Europe West 1",
|
||||
}, {
|
||||
Value: "asia-east-1.storj.io",
|
||||
Value: "asia-east-1.tardigrade.io",
|
||||
Help: "Asia East 1",
|
||||
},
|
||||
},
|
||||
@@ -121,11 +123,13 @@ func init() {
|
||||
{
|
||||
Name: "api_key",
|
||||
Help: "API key.",
|
||||
Required: false,
|
||||
Provider: newProvider,
|
||||
},
|
||||
{
|
||||
Name: "passphrase",
|
||||
Help: "Encryption passphrase.\n\nTo access existing objects enter passphrase used for uploading.",
|
||||
Required: false,
|
||||
Provider: newProvider,
|
||||
},
|
||||
},
|
||||
@@ -141,7 +145,7 @@ type Options struct {
|
||||
Passphrase string `config:"passphrase"`
|
||||
}
|
||||
|
||||
// Fs represents a remote to Storj
|
||||
// Fs represents a remote to Tardigrade
|
||||
type Fs struct {
|
||||
name string // the name of the remote
|
||||
root string // root of the filesystem
|
||||
@@ -159,12 +163,11 @@ var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
)
|
||||
|
||||
// NewFs creates a filesystem backed by Storj.
|
||||
// NewFs creates a filesystem backed by Tardigrade.
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs, err error) {
|
||||
// Setup filesystem and connection to Storj
|
||||
// Setup filesystem and connection to Tardigrade
|
||||
root = norm.NFC.String(root)
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
@@ -185,24 +188,24 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs,
|
||||
if f.opts.Access != "" {
|
||||
access, err = uplink.ParseAccess(f.opts.Access)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storj: access: %w", err)
|
||||
return nil, fmt.Errorf("tardigrade: access: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if access == nil && f.opts.SatelliteAddress != "" && f.opts.APIKey != "" && f.opts.Passphrase != "" {
|
||||
access, err = uplink.RequestAccessWithPassphrase(ctx, f.opts.SatelliteAddress, f.opts.APIKey, f.opts.Passphrase)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storj: access: %w", err)
|
||||
return nil, fmt.Errorf("tardigrade: access: %w", err)
|
||||
}
|
||||
|
||||
serializedAccess, err := access.Serialize()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storj: access: %w", err)
|
||||
return nil, fmt.Errorf("tardigrade: access: %w", err)
|
||||
}
|
||||
|
||||
err = config.SetValueAndSave(f.name, "access_grant", serializedAccess)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storj: access: %w", err)
|
||||
return nil, fmt.Errorf("tardigrade: access: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -234,7 +237,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs,
|
||||
if bucketName != "" && bucketPath != "" {
|
||||
_, err = project.StatBucket(ctx, bucketName)
|
||||
if err != nil {
|
||||
return f, fmt.Errorf("storj: bucket: %w", err)
|
||||
return f, fmt.Errorf("tardigrade: bucket: %w", err)
|
||||
}
|
||||
|
||||
object, err := project.StatObject(ctx, bucketName, bucketPath)
|
||||
@@ -260,7 +263,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs,
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// connect opens a connection to Storj.
|
||||
// connect opens a connection to Tardigrade.
|
||||
func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
|
||||
fs.Debugf(f, "connecting...")
|
||||
defer fs.Debugf(f, "connected: %+v", err)
|
||||
@@ -271,7 +274,7 @@ func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
|
||||
|
||||
project, err = cfg.OpenProject(ctx, f.access)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storj: project: %w", err)
|
||||
return nil, fmt.Errorf("tardigrade: project: %w", err)
|
||||
}
|
||||
|
||||
return
|
||||
@@ -680,43 +683,3 @@ func newPrefix(prefix string) string {
|
||||
|
||||
return prefix + "/"
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Move parameters
|
||||
srcBucket, srcKey := bucket.Split(srcObj.absolute)
|
||||
dstBucket, dstKey := f.absolute(remote)
|
||||
options := uplink.MoveObjectOptions{}
|
||||
|
||||
// Do the move
|
||||
err := f.project.MoveObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options)
|
||||
if err != nil {
|
||||
// Make sure destination bucket exists
|
||||
_, err := f.project.EnsureBucket(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rename object failed to create destination bucket: %w", err)
|
||||
}
|
||||
// And try again
|
||||
err = f.project.MoveObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rename object failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Read the new object
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package storj
|
||||
package tardigrade
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
"storj.io/uplink"
|
||||
)
|
||||
|
||||
// Object describes a Storj object
|
||||
// Object describes a Tardigrade object
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
|
||||
@@ -32,7 +32,7 @@ type Object struct {
|
||||
// Check the interfaces are satisfied.
|
||||
var _ fs.Object = &Object{}
|
||||
|
||||
// newObjectFromUplink creates a new object from a Storj uplink object.
|
||||
// newObjectFromUplink creates a new object from a Tardigrade uplink object.
|
||||
func newObjectFromUplink(f *Fs, relative string, object *uplink.Object) *Object {
|
||||
// Attempt to use the modified time from the metadata. Otherwise
|
||||
// fallback to the server time.
|
||||
@@ -1,20 +1,20 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
// Test Storj filesystem interface
|
||||
package storj_test
|
||||
// Test Tardigrade filesystem interface
|
||||
package tardigrade_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/storj"
|
||||
"github.com/rclone/rclone/backend/tardigrade"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestStorj:",
|
||||
NilObject: (*storj.Object)(nil),
|
||||
RemoteName: "TestTardigrade:",
|
||||
NilObject: (*tardigrade.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build plan9
|
||||
// +build plan9
|
||||
|
||||
package storj
|
||||
package tardigrade
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -85,10 +84,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
err := o.Update(ctx, readers[i], src, options...)
|
||||
if err != nil {
|
||||
errs[i] = fmt.Errorf("%s: %w", o.UpstreamFs().Name(), err)
|
||||
if len(entries) > 1 {
|
||||
// Drain the input buffer to allow other uploads to continue
|
||||
_, _ = io.Copy(ioutil.Discard, readers[i])
|
||||
}
|
||||
}
|
||||
} else {
|
||||
errs[i] = fs.ErrorNotAFile
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -34,21 +33,25 @@ func init() {
|
||||
Help: "List of space separated upstreams.\n\nCan be 'upstreama:test/dir upstreamb:', '\"upstreama:test/space:ro dir\" upstreamb:', etc.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "action_policy",
|
||||
Help: "Policy to choose upstream on ACTION category.",
|
||||
Default: "epall",
|
||||
Name: "action_policy",
|
||||
Help: "Policy to choose upstream on ACTION category.",
|
||||
Required: true,
|
||||
Default: "epall",
|
||||
}, {
|
||||
Name: "create_policy",
|
||||
Help: "Policy to choose upstream on CREATE category.",
|
||||
Default: "epmfs",
|
||||
Name: "create_policy",
|
||||
Help: "Policy to choose upstream on CREATE category.",
|
||||
Required: true,
|
||||
Default: "epmfs",
|
||||
}, {
|
||||
Name: "search_policy",
|
||||
Help: "Policy to choose upstream on SEARCH category.",
|
||||
Default: "ff",
|
||||
Name: "search_policy",
|
||||
Help: "Policy to choose upstream on SEARCH category.",
|
||||
Required: true,
|
||||
Default: "ff",
|
||||
}, {
|
||||
Name: "cache_time",
|
||||
Help: "Cache time of usage and free space (in seconds).\n\nThis option is only useful when a path preserving policy is used.",
|
||||
Default: 120,
|
||||
Name: "cache_time",
|
||||
Help: "Cache time of usage and free space (in seconds).\n\nThis option is only useful when a path preserving policy is used.",
|
||||
Required: true,
|
||||
Default: 120,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -487,10 +490,6 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
|
||||
}
|
||||
if err != nil {
|
||||
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
|
||||
if len(upstreams) > 1 {
|
||||
// Drain the input buffer to allow other uploads to continue
|
||||
_, _ = io.Copy(ioutil.Discard, readers[i])
|
||||
}
|
||||
return
|
||||
}
|
||||
objs[i] = u.WrapObject(o)
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -18,12 +20,19 @@ import (
|
||||
)
|
||||
|
||||
// MakeTestDirs makes directories in /tmp for testing
|
||||
func MakeTestDirs(t *testing.T, n int) (dirs []string) {
|
||||
func MakeTestDirs(t *testing.T, n int) (dirs []string, clean func()) {
|
||||
for i := 1; i <= n; i++ {
|
||||
dir := t.TempDir()
|
||||
dir, err := ioutil.TempDir("", fmt.Sprintf("rclone-union-test-%d", n))
|
||||
require.NoError(t, err)
|
||||
dirs = append(dirs, dir)
|
||||
}
|
||||
return dirs
|
||||
clean = func() {
|
||||
for _, dir := range dirs {
|
||||
err := os.RemoveAll(dir)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
return dirs, clean
|
||||
}
|
||||
|
||||
func (f *Fs) TestInternalReadOnly(t *testing.T) {
|
||||
@@ -86,7 +95,8 @@ func TestMoveCopy(t *testing.T) {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
ctx := context.Background()
|
||||
dirs := MakeTestDirs(t, 1)
|
||||
dirs, clean := MakeTestDirs(t, 1)
|
||||
defer clean()
|
||||
fsString := fmt.Sprintf(":union,upstreams='%s :memory:bucket':", dirs[0])
|
||||
f, err := fs.NewFs(ctx, fsString)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -27,7 +27,8 @@ func TestStandard(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs := union.MakeTestDirs(t, 3)
|
||||
dirs, clean := union.MakeTestDirs(t, 3)
|
||||
defer clean()
|
||||
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
|
||||
name := "TestUnion"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -48,7 +49,8 @@ func TestRO(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs := union.MakeTestDirs(t, 3)
|
||||
dirs, clean := union.MakeTestDirs(t, 3)
|
||||
defer clean()
|
||||
upstreams := dirs[0] + " " + dirs[1] + ":ro " + dirs[2] + ":ro"
|
||||
name := "TestUnionRO"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -69,7 +71,8 @@ func TestNC(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs := union.MakeTestDirs(t, 3)
|
||||
dirs, clean := union.MakeTestDirs(t, 3)
|
||||
defer clean()
|
||||
upstreams := dirs[0] + " " + dirs[1] + ":nc " + dirs[2] + ":nc"
|
||||
name := "TestUnionNC"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -90,7 +93,8 @@ func TestPolicy1(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs := union.MakeTestDirs(t, 3)
|
||||
dirs, clean := union.MakeTestDirs(t, 3)
|
||||
defer clean()
|
||||
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
|
||||
name := "TestUnionPolicy1"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -111,7 +115,8 @@ func TestPolicy2(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs := union.MakeTestDirs(t, 3)
|
||||
dirs, clean := union.MakeTestDirs(t, 3)
|
||||
defer clean()
|
||||
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
|
||||
name := "TestUnionPolicy2"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -132,7 +137,8 @@ func TestPolicy3(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs := union.MakeTestDirs(t, 3)
|
||||
dirs, clean := union.MakeTestDirs(t, 3)
|
||||
defer clean()
|
||||
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
|
||||
name := "TestUnionPolicy3"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -89,7 +91,7 @@ func New(ctx context.Context, remote, root string, cacheTime time.Duration) (*Fs
|
||||
return nil, err
|
||||
}
|
||||
f.RootFs = rFs
|
||||
rootString := fspath.JoinRootPath(remote, root)
|
||||
rootString := path.Join(remote, filepath.ToSlash(root))
|
||||
myFs, err := cache.Get(ctx, rootString)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, err
|
||||
|
||||
@@ -66,11 +66,6 @@ func init() {
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "hard_delete",
|
||||
Help: "Delete files permanently rather than putting them into the trash.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
@@ -84,9 +79,8 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Token string `config:"token"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
Token string `config:"token"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote yandex
|
||||
@@ -636,7 +630,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
}
|
||||
}
|
||||
//delete directory
|
||||
return f.delete(ctx, root, f.opt.HardDelete)
|
||||
return f.delete(ctx, root, false)
|
||||
}
|
||||
|
||||
// Rmdir deletes the container
|
||||
@@ -1147,7 +1141,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.delete(ctx, o.filePath(), o.fs.opt.HardDelete)
|
||||
return o.fs.delete(ctx, o.filePath(), false)
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
docker build -t rclone/xgo-cgofuse https://github.com/winfsp/cgofuse.git
|
||||
docker build -t rclone/xgo-cgofuse https://github.com/billziss-gh/cgofuse.git
|
||||
docker images
|
||||
docker push rclone/xgo-cgofuse
|
||||
|
||||
@@ -52,7 +52,6 @@ var (
|
||||
var osarches = []string{
|
||||
"windows/386",
|
||||
"windows/amd64",
|
||||
"windows/arm64",
|
||||
"darwin/amd64",
|
||||
"darwin/arm64",
|
||||
"linux/386",
|
||||
@@ -86,13 +85,6 @@ var archFlags = map[string][]string{
|
||||
"arm-v7": {"GOARM=7"},
|
||||
}
|
||||
|
||||
// Map Go architectures to NFPM architectures
|
||||
// Any missing are passed straight through
|
||||
var goarchToNfpm = map[string]string{
|
||||
"arm": "arm6",
|
||||
"arm-v7": "arm7",
|
||||
}
|
||||
|
||||
// runEnv - run a shell command with env
|
||||
func runEnv(args, env []string) error {
|
||||
if *debug {
|
||||
@@ -175,15 +167,11 @@ func buildDebAndRpm(dir, version, goarch string) []string {
|
||||
pkgVersion := version[1:]
|
||||
pkgVersion = strings.Replace(pkgVersion, "β", "-beta", -1)
|
||||
pkgVersion = strings.Replace(pkgVersion, "-", ".", -1)
|
||||
nfpmArch, ok := goarchToNfpm[goarch]
|
||||
if !ok {
|
||||
nfpmArch = goarch
|
||||
}
|
||||
|
||||
// Make nfpm.yaml from the template
|
||||
substitute("../bin/nfpm.yaml", path.Join(dir, "nfpm.yaml"), map[string]string{
|
||||
"Version": pkgVersion,
|
||||
"Arch": nfpmArch,
|
||||
"Arch": goarch,
|
||||
})
|
||||
|
||||
// build them
|
||||
@@ -389,7 +377,7 @@ func compileArch(version, goos, goarch, dir string) bool {
|
||||
artifacts := []string{buildZip(dir)}
|
||||
// build a .deb and .rpm if appropriate
|
||||
if goos == "linux" {
|
||||
artifacts = append(artifacts, buildDebAndRpm(dir, version, goarch)...)
|
||||
artifacts = append(artifacts, buildDebAndRpm(dir, version, stripVersion(goarch))...)
|
||||
}
|
||||
if *copyAs != "" {
|
||||
for _, artifact := range artifacts {
|
||||
|
||||
@@ -24,7 +24,6 @@ docs = [
|
||||
"overview.md",
|
||||
"flags.md",
|
||||
"docker.md",
|
||||
"bisync.md",
|
||||
|
||||
# Keep these alphabetical by full name
|
||||
"fichier.md",
|
||||
@@ -53,7 +52,6 @@ docs = [
|
||||
"mailru.md",
|
||||
"mega.md",
|
||||
"memory.md",
|
||||
"netstorage.md",
|
||||
"azureblob.md",
|
||||
"onedrive.md",
|
||||
"opendrive.md",
|
||||
@@ -65,9 +63,8 @@ docs = [
|
||||
"putio.md",
|
||||
"seafile.md",
|
||||
"sftp.md",
|
||||
"storj.md",
|
||||
"sugarsync.md",
|
||||
"tardigrade.md", # stub only to redirect to storj.md
|
||||
"tardigrade.md",
|
||||
"uptobox.md",
|
||||
"union.md",
|
||||
"webdav.md",
|
||||
|
||||
@@ -41,7 +41,7 @@ You can discover what commands a backend implements by using
|
||||
rclone backend help <backendname>
|
||||
|
||||
You can also discover information about the backend using (see
|
||||
[operations/fsinfo](/rc/#operations-fsinfo) in the remote control docs
|
||||
[operations/fsinfo](/rc/#operations/fsinfo) in the remote control docs
|
||||
for more info).
|
||||
|
||||
rclone backend features remote:
|
||||
@@ -55,7 +55,7 @@ Pass arguments to the backend by placing them on the end of the line
|
||||
rclone backend cleanup remote:path file1 file2 file3
|
||||
|
||||
Note to run these commands on a running backend then see
|
||||
[backend/command](/rc/#backend-command) in the rc docs.
|
||||
[backend/command](/rc/#backend/command) in the rc docs.
|
||||
`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 1e6, command, args)
|
||||
@@ -149,7 +149,7 @@ See [the "rclone backend" command](/commands/rclone_backend/) for more
|
||||
info on how to pass options and arguments.
|
||||
|
||||
These can be run on a running backend using the rc command
|
||||
[backend/command](/rc/#backend-command).
|
||||
[backend/command](/rc/#backend/command).
|
||||
|
||||
`, name)
|
||||
for _, cmd := range cmds {
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/winfsp/cgofuse/fuse"
|
||||
"github.com/billziss-gh/cgofuse/fuse"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/winfsp/cgofuse/fuse"
|
||||
"github.com/billziss-gh/cgofuse/fuse"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
@@ -168,7 +168,7 @@ func mount(VFS *vfs.VFS, mountPath string, opt *mountlib.Options) (<-chan error,
|
||||
host.SetCapCaseInsensitive(f.Features().CaseInsensitive)
|
||||
|
||||
// Create options
|
||||
options := mountOptions(VFS, opt.DeviceName, mountpoint, opt)
|
||||
options := mountOptions(VFS, f.Name()+":"+f.Root(), mountpoint, opt)
|
||||
fs.Debugf(f, "Mounting with options: %q", options)
|
||||
|
||||
// Serve the mount point in the background returning error to errChan
|
||||
|
||||
@@ -10,17 +10,11 @@
|
||||
package cmount
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest/testy"
|
||||
"github.com/rclone/rclone/vfs/vfstest"
|
||||
)
|
||||
|
||||
func TestMount(t *testing.T) {
|
||||
// Disable tests under macOS and the CI since they are locking up
|
||||
if runtime.GOOS == "darwin" {
|
||||
testy.SkipUnreliable(t)
|
||||
}
|
||||
vfstest.RunTests(t, false, mount)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package hashsum
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
@@ -25,11 +26,11 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
AddHashsumFlags(cmdFlags)
|
||||
AddHashFlags(cmdFlags)
|
||||
}
|
||||
|
||||
// AddHashsumFlags is a convenience function to add the command flags OutputBase64 and DownloadFlag to hashsum, md5sum, sha1sum
|
||||
func AddHashsumFlags(cmdFlags *pflag.FlagSet) {
|
||||
// AddHashFlags is a convenience function to add the command flags OutputBase64 and DownloadFlag to hashsum, md5sum, sha1sum
|
||||
func AddHashFlags(cmdFlags *pflag.FlagSet) {
|
||||
flags.BoolVarP(cmdFlags, &OutputBase64, "base64", "", OutputBase64, "Output base64 encoded hashsum")
|
||||
flags.StringVarP(cmdFlags, &HashsumOutfile, "output-file", "", HashsumOutfile, "Output hashsums to a file rather than the terminal")
|
||||
flags.StringVarP(cmdFlags, &ChecksumFile, "checkfile", "C", ChecksumFile, "Validate hashes against a given SUM file instead of printing them")
|
||||
@@ -40,7 +41,7 @@ func AddHashsumFlags(cmdFlags *pflag.FlagSet) {
|
||||
func GetHashsumOutput(filename string) (out *os.File, close func(), err error) {
|
||||
out, err = os.Create(filename)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to open output file %v: %w", filename, err)
|
||||
err = fmt.Errorf("Failed to open output file %v: %w", filename, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
@@ -54,32 +55,6 @@ func GetHashsumOutput(filename string) (out *os.File, close func(), err error) {
|
||||
return out, close, nil
|
||||
}
|
||||
|
||||
// CreateFromStdinArg checks args and produces hashsum from standard input if it is requested
|
||||
func CreateFromStdinArg(ht hash.Type, args []string, startArg int) (bool, error) {
|
||||
var stdinArg bool
|
||||
if len(args) == startArg {
|
||||
// Missing arg: Always read from stdin
|
||||
stdinArg = true
|
||||
} else if len(args) > startArg && args[startArg] == "-" {
|
||||
// Special arg: Read from stdin only if there is data available
|
||||
if fi, _ := os.Stdin.Stat(); fi.Mode()&os.ModeCharDevice == 0 {
|
||||
stdinArg = true
|
||||
}
|
||||
}
|
||||
if !stdinArg {
|
||||
return false, nil
|
||||
}
|
||||
if HashsumOutfile == "" {
|
||||
return true, operations.HashSumStream(ht, OutputBase64, os.Stdin, nil)
|
||||
}
|
||||
output, close, err := GetHashsumOutput(HashsumOutfile)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
defer close()
|
||||
return true, operations.HashSumStream(ht, OutputBase64, os.Stdin, output)
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "hashsum <hash> remote:path",
|
||||
Short: `Produces a hashsum file for all the objects in the path.`,
|
||||
@@ -93,11 +68,6 @@ not supported by the remote, no hash will be returned. With the
|
||||
download flag, the file will be downloaded from the remote and
|
||||
hashed locally enabling any hash for any remote.
|
||||
|
||||
This command can also hash data received on standard input (stdin),
|
||||
by not passing a remote:path, or by passing a hyphen as remote:path
|
||||
when there is data to read (if not, the hypen will be treated literaly,
|
||||
as a relative path).
|
||||
|
||||
Run without a hash to see the list of all supported hashes, e.g.
|
||||
|
||||
$ rclone hashsum
|
||||
@@ -113,6 +83,8 @@ Note that hash names are case insensitive and values are output in lower case.
|
||||
if len(args) == 0 {
|
||||
fmt.Print(hash.HelpString(0))
|
||||
return nil
|
||||
} else if len(args) == 1 {
|
||||
return errors.New("need hash type and remote")
|
||||
}
|
||||
var ht hash.Type
|
||||
err := ht.Set(args[0])
|
||||
@@ -120,10 +92,8 @@ Note that hash names are case insensitive and values are output in lower case.
|
||||
fmt.Println(hash.HelpString(0))
|
||||
return err
|
||||
}
|
||||
if found, err := CreateFromStdinArg(ht, args, 1); found {
|
||||
return err
|
||||
}
|
||||
fsrc := cmd.NewFsSrc(args[1:])
|
||||
|
||||
cmd.Run(false, false, command, func() error {
|
||||
if ChecksumFile != "" {
|
||||
fsum, sumFile := cmd.NewFsFile(ChecksumFile)
|
||||
|
||||
23
cmd/help.go
23
cmd/help.go
@@ -165,7 +165,7 @@ func runRoot(cmd *cobra.Command, args []string) {
|
||||
// setupRootCommand sets default usage, help, and error handling for
|
||||
// the root command.
|
||||
//
|
||||
// Helpful example: https://github.com/moby/moby/blob/master/cli/cobra.go
|
||||
// Helpful example: http://rtfcode.com/xref/moby-17.03.2-ce/cli/cobra.go
|
||||
func setupRootCommand(rootCmd *cobra.Command) {
|
||||
ci := fs.GetConfig(context.Background())
|
||||
// Add global flags
|
||||
@@ -329,29 +329,12 @@ func showBackend(name string) {
|
||||
if opt.IsPassword {
|
||||
fmt.Printf("**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).\n\n")
|
||||
}
|
||||
fmt.Printf("Properties:\n\n")
|
||||
fmt.Printf("- Config: %s\n", opt.Name)
|
||||
fmt.Printf("- Env Var: %s\n", opt.EnvVarName(backend.Prefix))
|
||||
if opt.Provider != "" {
|
||||
fmt.Printf("- Provider: %s\n", opt.Provider)
|
||||
}
|
||||
fmt.Printf("- Type: %s\n", opt.Type())
|
||||
defaultValue := opt.GetValue()
|
||||
// Default value and Required are related: Required means option must
|
||||
// have a value, but if there is a default then a value does not have
|
||||
// to be explicitely set and then Required makes no difference.
|
||||
if defaultValue != "" {
|
||||
fmt.Printf("- Default: %s\n", quoteString(defaultValue))
|
||||
} else {
|
||||
fmt.Printf("- Required: %v\n", opt.Required)
|
||||
}
|
||||
// List examples / possible choices
|
||||
fmt.Printf("- Default: %s\n", quoteString(opt.GetValue()))
|
||||
if len(opt.Examples) > 0 {
|
||||
if opt.Exclusive {
|
||||
fmt.Printf("- Choices:\n")
|
||||
} else {
|
||||
fmt.Printf("- Examples:\n")
|
||||
}
|
||||
fmt.Printf("- Examples:\n")
|
||||
for _, ex := range opt.Examples {
|
||||
fmt.Printf(" - %s\n", quoteString(ex.Value))
|
||||
for _, line := range strings.Split(ex.Help, "\n") {
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
hashsum.AddHashsumFlags(cmdFlags)
|
||||
hashsum.AddHashFlags(cmdFlags)
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -27,17 +27,9 @@ By default, the hash is requested from the remote. If MD5 is
|
||||
not supported by the remote, no hash will be returned. With the
|
||||
download flag, the file will be downloaded from the remote and
|
||||
hashed locally enabling MD5 for any remote.
|
||||
|
||||
This command can also hash data received on standard input (stdin),
|
||||
by not passing a remote:path, or by passing a hyphen as remote:path
|
||||
when there is data to read (if not, the hypen will be treated literaly,
|
||||
as a relative path).
|
||||
`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
if found, err := hashsum.CreateFromStdinArg(hash.MD5, args, 0); found {
|
||||
return err
|
||||
}
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
if hashsum.ChecksumFile != "" {
|
||||
@@ -54,6 +46,5 @@ as a relative path).
|
||||
defer close()
|
||||
return operations.HashLister(context.Background(), hash.MD5, hashsum.OutputBase64, hashsum.DownloadFlag, fsrc, output)
|
||||
})
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -86,7 +86,7 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
|
||||
|
||||
f := VFS.Fs()
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
c, err := fuse.Mount(mountpoint, mountOptions(VFS, opt.DeviceName, opt)...)
|
||||
c, err := fuse.Mount(mountpoint, mountOptions(VFS, f.Name()+":"+f.Root(), opt)...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
@@ -25,10 +25,11 @@ func init() {
|
||||
// mountOptions configures the options from the command line flags
|
||||
//
|
||||
// man mount.fuse for more info and note the -o flag for other options
|
||||
func mountOptions(fsys *FS, f fs.Fs, opt *mountlib.Options) (mountOpts *fuse.MountOptions) {
|
||||
func mountOptions(fsys *FS, f fs.Fs) (mountOpts *fuse.MountOptions) {
|
||||
device := f.Name() + ":" + f.Root()
|
||||
mountOpts = &fuse.MountOptions{
|
||||
AllowOther: fsys.opt.AllowOther,
|
||||
FsName: opt.DeviceName,
|
||||
FsName: device,
|
||||
Name: "rclone",
|
||||
DisableXAttrs: true,
|
||||
Debug: fsys.opt.DebugFUSE,
|
||||
@@ -119,7 +120,7 @@ func mountOptions(fsys *FS, f fs.Fs, opt *mountlib.Options) (mountOpts *fuse.Mou
|
||||
if runtime.GOOS == "darwin" {
|
||||
opts = append(opts,
|
||||
// VolumeName sets the volume name shown in Finder.
|
||||
fmt.Sprintf("volname=%s", opt.VolumeName),
|
||||
fmt.Sprintf("volname=%s", device),
|
||||
|
||||
// NoAppleXattr makes OSXFUSE disallow extended attributes with the
|
||||
// prefix "com.apple.". This disables persistent Finder state and
|
||||
@@ -166,7 +167,7 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
|
||||
//mOpts.Debug = mountlib.DebugFUSE
|
||||
|
||||
//conn := fusefs.NewFileSystemConnector(nodeFs.Root(), mOpts)
|
||||
mountOpts := mountOptions(fsys, f, opt)
|
||||
mountOpts := mountOptions(fsys, f)
|
||||
|
||||
// FIXME fill out
|
||||
opts := fusefs.Options{
|
||||
|
||||
@@ -65,10 +65,10 @@ at all, then 1 PiB is set as both the total and the free size.
|
||||
To run rclone @ on Windows, you will need to
|
||||
download and install [WinFsp](http://www.secfs.net/winfsp/).
|
||||
|
||||
[WinFsp](https://github.com/winfsp/winfsp) is an open-source
|
||||
[WinFsp](https://github.com/billziss-gh/winfsp) is an open-source
|
||||
Windows File System Proxy which makes it easy to write user space file
|
||||
systems for Windows. It provides a FUSE emulation layer which rclone
|
||||
uses combination with [cgofuse](https://github.com/winfsp/cgofuse).
|
||||
uses combination with [cgofuse](https://github.com/billziss-gh/cgofuse).
|
||||
Both of these packages are by Bill Zissimopoulos who was very helpful
|
||||
during the implementation of rclone @ for Windows.
|
||||
|
||||
@@ -218,7 +218,7 @@ from Microsoft's Sysinternals suite, which has option |-s| to start
|
||||
processes as the SYSTEM account. Another alternative is to run the mount
|
||||
command from a Windows Scheduled Task, or a Windows Service, configured
|
||||
to run as the SYSTEM account. A third alternative is to use the
|
||||
[WinFsp.Launcher infrastructure](https://github.com/winfsp/winfsp/wiki/WinFsp-Service-Architecture)).
|
||||
[WinFsp.Launcher infrastructure](https://github.com/billziss-gh/winfsp/wiki/WinFsp-Service-Architecture)).
|
||||
Note that when running rclone as another user, it will not use
|
||||
the configuration file from your profile unless you tell it to
|
||||
with the [|--config|](https://rclone.org/docs/#config-config-file) option.
|
||||
|
||||
@@ -40,7 +40,6 @@ type Options struct {
|
||||
ExtraOptions []string
|
||||
ExtraFlags []string
|
||||
AttrTimeout time.Duration // how long the kernel caches attribute for
|
||||
DeviceName string
|
||||
VolumeName string
|
||||
NoAppleDouble bool
|
||||
NoAppleXattr bool
|
||||
@@ -126,7 +125,6 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.BoolVarP(flagSet, &Opt.AsyncRead, "async-read", "", Opt.AsyncRead, "Use asynchronous reads (not supported on Windows)")
|
||||
flags.FVarP(flagSet, &Opt.MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads (not supported on Windows)")
|
||||
flags.BoolVarP(flagSet, &Opt.WritebackCache, "write-back-cache", "", Opt.WritebackCache, "Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows)")
|
||||
flags.StringVarP(flagSet, &Opt.DeviceName, "devname", "", Opt.DeviceName, "Set the device name - default is remote:path")
|
||||
// Windows and OSX
|
||||
flags.StringVarP(flagSet, &Opt.VolumeName, "volname", "", Opt.VolumeName, "Set the volume name (supported on Windows and OSX only)")
|
||||
// OSX only
|
||||
@@ -237,7 +235,6 @@ func (m *MountPoint) Mount() (daemon *os.Process, err error) {
|
||||
return nil, err
|
||||
}
|
||||
m.SetVolumeName(m.MountOpt.VolumeName)
|
||||
m.SetDeviceName(m.MountOpt.DeviceName)
|
||||
|
||||
// Start background task if --daemon is specified
|
||||
if m.MountOpt.Daemon {
|
||||
|
||||
@@ -16,16 +16,11 @@ import (
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs/config/configfile"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/fstest/testy"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRc(t *testing.T) {
|
||||
// Disable tests under macOS and the CI since they are locking up
|
||||
if runtime.GOOS == "darwin" {
|
||||
testy.SkipUnreliable(t)
|
||||
}
|
||||
ctx := context.Background()
|
||||
configfile.Install()
|
||||
mount := rc.Calls.Get("mount/mount")
|
||||
@@ -35,14 +30,19 @@ func TestRc(t *testing.T) {
|
||||
getMountTypes := rc.Calls.Get("mount/types")
|
||||
assert.NotNil(t, getMountTypes)
|
||||
|
||||
localDir := t.TempDir()
|
||||
err := ioutil.WriteFile(filepath.Join(localDir, "file.txt"), []byte("hello"), 0666)
|
||||
localDir, err := ioutil.TempDir("", "rclone-mountlib-localDir")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(localDir) }()
|
||||
err = ioutil.WriteFile(filepath.Join(localDir, "file.txt"), []byte("hello"), 0666)
|
||||
require.NoError(t, err)
|
||||
|
||||
mountPoint := t.TempDir()
|
||||
mountPoint, err := ioutil.TempDir("", "rclone-mountlib-mountPoint")
|
||||
require.NoError(t, err)
|
||||
if runtime.GOOS == "windows" {
|
||||
// Windows requires the mount point not to exist
|
||||
require.NoError(t, os.RemoveAll(mountPoint))
|
||||
} else {
|
||||
defer func() { _ = os.RemoveAll(mountPoint) }()
|
||||
}
|
||||
|
||||
out, err := getMountTypes.Fn(ctx, nil)
|
||||
|
||||
@@ -87,7 +87,7 @@ func (m *MountPoint) CheckAllowings() error {
|
||||
// SetVolumeName with sensible default
|
||||
func (m *MountPoint) SetVolumeName(vol string) {
|
||||
if vol == "" {
|
||||
vol = fs.ConfigString(m.Fs)
|
||||
vol = m.Fs.Name() + ":" + m.Fs.Root()
|
||||
}
|
||||
m.MountOpt.SetVolumeName(vol)
|
||||
}
|
||||
@@ -102,11 +102,3 @@ func (o *Options) SetVolumeName(vol string) {
|
||||
}
|
||||
o.VolumeName = vol
|
||||
}
|
||||
|
||||
// SetDeviceName with sensible default
|
||||
func (m *MountPoint) SetDeviceName(dev string) {
|
||||
if dev == "" {
|
||||
dev = fs.ConfigString(m.Fs)
|
||||
}
|
||||
m.MountOpt.DeviceName = dev
|
||||
}
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 1.4 KiB After Width: | Height: | Size: 20 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 724 B After Width: | Height: | Size: 5.6 KiB |
@@ -23,7 +23,6 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/testy"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -304,10 +303,6 @@ func (a *APIClient) request(path string, in, out interface{}, wantErr bool) {
|
||||
}
|
||||
|
||||
func testMountAPI(t *testing.T, sockAddr string) {
|
||||
// Disable tests under macOS and linux in the CI since they are locking up
|
||||
if runtime.GOOS == "darwin" || runtime.GOOS == "linux" {
|
||||
testy.SkipUnreliable(t)
|
||||
}
|
||||
if _, mountFn := mountlib.ResolveMountMethod(""); mountFn == nil {
|
||||
t.Skip("Test requires working mount command")
|
||||
}
|
||||
|
||||
@@ -16,10 +16,7 @@ import (
|
||||
)
|
||||
|
||||
// Help describes the options for the serve package
|
||||
var Help = `
|
||||
#### Template
|
||||
|
||||
--template allows a user to specify a custom markup template for http
|
||||
var Help = `--template allows a user to specify a custom markup template for http
|
||||
and webdav serve functions. The server exports the following markup
|
||||
to be used within the template to server pages:
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *httplib.Options)
|
||||
flags.StringVarP(flagSet, &Opt.SslKey, prefix+"key", "", Opt.SslKey, "SSL PEM Private key")
|
||||
flags.StringVarP(flagSet, &Opt.ClientCA, prefix+"client-ca", "", Opt.ClientCA, "Client certificate authority to verify clients with")
|
||||
flags.StringVarP(flagSet, &Opt.HtPasswd, prefix+"htpasswd", "", Opt.HtPasswd, "htpasswd file - if not provided no authentication is done")
|
||||
flags.StringVarP(flagSet, &Opt.Realm, prefix+"realm", "", Opt.Realm, "Realm for authentication")
|
||||
flags.StringVarP(flagSet, &Opt.Realm, prefix+"realm", "", Opt.Realm, "realm for authentication")
|
||||
flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication")
|
||||
flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication")
|
||||
flags.StringVarP(flagSet, &Opt.BaseURL, prefix+"baseurl", "", Opt.BaseURL, "Prefix for URLs - leave blank for root")
|
||||
|
||||
@@ -16,7 +16,6 @@ TestFichier:
|
||||
TestFTP:
|
||||
TestGoogleCloudStorage:
|
||||
TestHubic:
|
||||
TestNetStorage:
|
||||
TestOneDrive:
|
||||
TestPcloud:
|
||||
TestQingStor:
|
||||
|
||||
@@ -7,7 +7,9 @@ import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -111,7 +113,14 @@ func TestResticHandler(t *testing.T) {
|
||||
}
|
||||
|
||||
// setup rclone with a local backend in a temporary directory
|
||||
tempdir := t.TempDir()
|
||||
tempdir, err := ioutil.TempDir("", "rclone-restic-test-")
|
||||
require.NoError(t, err)
|
||||
|
||||
// make sure the tempdir is properly removed
|
||||
defer func() {
|
||||
err := os.RemoveAll(tempdir)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
// globally set append-only mode
|
||||
prev := appendOnly
|
||||
|
||||
@@ -7,7 +7,9 @@ import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -33,7 +35,14 @@ func TestResticPrivateRepositories(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// setup rclone with a local backend in a temporary directory
|
||||
tempdir := t.TempDir()
|
||||
tempdir, err := ioutil.TempDir("", "rclone-restic-test-")
|
||||
require.NoError(t, err)
|
||||
|
||||
// make sure the tempdir is properly removed
|
||||
defer func() {
|
||||
err := os.RemoveAll(tempdir)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
// globally set private-repos mode & test user
|
||||
prev := privateRepos
|
||||
|
||||
@@ -100,17 +100,6 @@ be used with sshd via ~/.ssh/authorized_keys, for example:
|
||||
|
||||
restrict,command="rclone serve sftp --stdio ./photos" ssh-rsa ...
|
||||
|
||||
On the client you need to set "--transfers 1" when using --stdio.
|
||||
Otherwise multiple instances of the rclone server are started by OpenSSH
|
||||
which can lead to "corrupted on transfer" errors. This is the case because
|
||||
the client chooses indiscriminately which server to send commands to while
|
||||
the servers all have different views of the state of the filing system.
|
||||
|
||||
The "restrict" in authorized_keys prevents SHA1SUMs and MD5SUMs from beeing
|
||||
used. Omitting "restrict" and using --sftp-path-override to enable
|
||||
checksumming is possible but less secure and you could use the SFTP server
|
||||
provided by OpenSSH in this case.
|
||||
|
||||
` + vfs.Help + proxy.Help,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
var f fs.Fs
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
hashsum.AddHashsumFlags(cmdFlags)
|
||||
hashsum.AddHashFlags(cmdFlags)
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -27,20 +27,9 @@ By default, the hash is requested from the remote. If SHA-1 is
|
||||
not supported by the remote, no hash will be returned. With the
|
||||
download flag, the file will be downloaded from the remote and
|
||||
hashed locally enabling SHA-1 for any remote.
|
||||
|
||||
This command can also hash data received on standard input (stdin),
|
||||
by not passing a remote:path, or by passing a hyphen as remote:path
|
||||
when there is data to read (if not, the hypen will be treated literaly,
|
||||
as a relative path).
|
||||
|
||||
This command can also hash data received on STDIN, if not passing
|
||||
a remote:path.
|
||||
`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
if found, err := hashsum.CreateFromStdinArg(hash.SHA1, args, 0); found {
|
||||
return err
|
||||
}
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
if hashsum.ChecksumFile != "" {
|
||||
@@ -57,6 +46,5 @@ a remote:path.
|
||||
defer close()
|
||||
return operations.HashLister(context.Background(), hash.SHA1, hashsum.OutputBase64, hashsum.DownloadFlag, fsrc, output)
|
||||
})
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ exec rclone --check-normalization=true --check-control=true --check-length=true
|
||||
TestDrive:testInfo \
|
||||
TestDropbox:testInfo \
|
||||
TestGoogleCloudStorage:rclone-testinfo \
|
||||
TestnStorage:testInfo \
|
||||
TestOneDrive:testInfo \
|
||||
TestS3:rclone-testinfo \
|
||||
TestSftp:testInfo \
|
||||
|
||||
@@ -5,7 +5,6 @@ package makefiles
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"math"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -17,9 +16,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -32,51 +29,37 @@ var (
|
||||
minFileNameLength = 4
|
||||
maxFileNameLength = 12
|
||||
seed = int64(1)
|
||||
zero = false
|
||||
sparse = false
|
||||
ascii = false
|
||||
pattern = false
|
||||
chargen = false
|
||||
|
||||
// Globals
|
||||
randSource *rand.Rand
|
||||
source io.Reader
|
||||
directoriesToCreate int
|
||||
totalDirectories int
|
||||
fileNames = map[string]struct{}{} // keep a note of which file name we've used already
|
||||
)
|
||||
|
||||
func init() {
|
||||
test.Command.AddCommand(makefilesCmd)
|
||||
makefilesFlags := makefilesCmd.Flags()
|
||||
flags.IntVarP(makefilesFlags, &numberOfFiles, "files", "", numberOfFiles, "Number of files to create")
|
||||
flags.IntVarP(makefilesFlags, &averageFilesPerDirectory, "files-per-directory", "", averageFilesPerDirectory, "Average number of files per directory")
|
||||
flags.IntVarP(makefilesFlags, &maxDepth, "max-depth", "", maxDepth, "Maximum depth of directory hierarchy")
|
||||
flags.FVarP(makefilesFlags, &minFileSize, "min-file-size", "", "Minimum size of file to create")
|
||||
flags.FVarP(makefilesFlags, &maxFileSize, "max-file-size", "", "Maximum size of files to create")
|
||||
flags.IntVarP(makefilesFlags, &minFileNameLength, "min-name-length", "", minFileNameLength, "Minimum size of file names")
|
||||
flags.IntVarP(makefilesFlags, &maxFileNameLength, "max-name-length", "", maxFileNameLength, "Maximum size of file names")
|
||||
|
||||
test.Command.AddCommand(makefileCmd)
|
||||
makefileFlags := makefileCmd.Flags()
|
||||
|
||||
// Common flags to makefiles and makefile
|
||||
for _, f := range []*pflag.FlagSet{makefilesFlags, makefileFlags} {
|
||||
flags.Int64VarP(f, &seed, "seed", "", seed, "Seed for the random number generator (0 for random)")
|
||||
flags.BoolVarP(f, &zero, "zero", "", zero, "Fill files with ASCII 0x00")
|
||||
flags.BoolVarP(f, &sparse, "sparse", "", sparse, "Make the files sparse (appear to be filled with ASCII 0x00)")
|
||||
flags.BoolVarP(f, &ascii, "ascii", "", ascii, "Fill files with random ASCII printable bytes only")
|
||||
flags.BoolVarP(f, &pattern, "pattern", "", pattern, "Fill files with a periodic pattern")
|
||||
flags.BoolVarP(f, &chargen, "chargen", "", chargen, "Fill files with a ASCII chargen pattern")
|
||||
}
|
||||
test.Command.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.IntVarP(cmdFlags, &numberOfFiles, "files", "", numberOfFiles, "Number of files to create")
|
||||
flags.IntVarP(cmdFlags, &averageFilesPerDirectory, "files-per-directory", "", averageFilesPerDirectory, "Average number of files per directory")
|
||||
flags.IntVarP(cmdFlags, &maxDepth, "max-depth", "", maxDepth, "Maximum depth of directory hierarchy")
|
||||
flags.FVarP(cmdFlags, &minFileSize, "min-file-size", "", "Minimum size of file to create")
|
||||
flags.FVarP(cmdFlags, &maxFileSize, "max-file-size", "", "Maximum size of files to create")
|
||||
flags.IntVarP(cmdFlags, &minFileNameLength, "min-name-length", "", minFileNameLength, "Minimum size of file names")
|
||||
flags.IntVarP(cmdFlags, &maxFileNameLength, "max-name-length", "", maxFileNameLength, "Maximum size of file names")
|
||||
flags.Int64VarP(cmdFlags, &seed, "seed", "", seed, "Seed for the random number generator (0 for random)")
|
||||
}
|
||||
|
||||
var makefilesCmd = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "makefiles <dir>",
|
||||
Short: `Make a random file hierarchy in a directory`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
commonInit()
|
||||
if seed == 0 {
|
||||
seed = time.Now().UnixNano()
|
||||
fs.Logf(nil, "Using random seed = %d", seed)
|
||||
}
|
||||
randSource = rand.New(rand.NewSource(seed))
|
||||
outputDirectory := args[0]
|
||||
directoriesToCreate = numberOfFiles / averageFilesPerDirectory
|
||||
averageSize := (minFileSize + maxFileSize) / 2
|
||||
@@ -90,130 +73,13 @@ var makefilesCmd = &cobra.Command{
|
||||
totalBytes := int64(0)
|
||||
for i := 0; i < numberOfFiles; i++ {
|
||||
dir := dirs[randSource.Intn(len(dirs))]
|
||||
size := int64(minFileSize)
|
||||
if maxFileSize > minFileSize {
|
||||
size += randSource.Int63n(int64(maxFileSize - minFileSize))
|
||||
}
|
||||
writeFile(dir, fileName(), size)
|
||||
totalBytes += size
|
||||
totalBytes += writeFile(dir, fileName())
|
||||
}
|
||||
dt := time.Since(start)
|
||||
fs.Logf(nil, "Written %vB in %v at %vB/s.", fs.SizeSuffix(totalBytes), dt.Round(time.Millisecond), fs.SizeSuffix((totalBytes*int64(time.Second))/int64(dt)))
|
||||
fs.Logf(nil, "Written %viB in %v at %viB/s.", fs.SizeSuffix(totalBytes), dt.Round(time.Millisecond), fs.SizeSuffix((totalBytes*int64(time.Second))/int64(dt)))
|
||||
},
|
||||
}
|
||||
|
||||
var makefileCmd = &cobra.Command{
|
||||
Use: "makefile <size> [<file>]+ [flags]",
|
||||
Short: `Make files with random contents of the size given`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1e6, command, args)
|
||||
commonInit()
|
||||
var size fs.SizeSuffix
|
||||
err := size.Set(args[0])
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse size %q: %v", args[0], err)
|
||||
}
|
||||
start := time.Now()
|
||||
fs.Logf(nil, "Creating %d files of size %v.", len(args[1:]), size)
|
||||
totalBytes := int64(0)
|
||||
for _, filePath := range args[1:] {
|
||||
dir := filepath.Dir(filePath)
|
||||
name := filepath.Base(filePath)
|
||||
writeFile(dir, name, int64(size))
|
||||
totalBytes += int64(size)
|
||||
}
|
||||
dt := time.Since(start)
|
||||
fs.Logf(nil, "Written %vB in %v at %vB/s.", fs.SizeSuffix(totalBytes), dt.Round(time.Millisecond), fs.SizeSuffix((totalBytes*int64(time.Second))/int64(dt)))
|
||||
},
|
||||
}
|
||||
|
||||
func bool2int(b bool) int {
|
||||
if b {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// common initialisation for makefiles and makefile
|
||||
func commonInit() {
|
||||
if seed == 0 {
|
||||
seed = time.Now().UnixNano()
|
||||
fs.Logf(nil, "Using random seed = %d", seed)
|
||||
}
|
||||
randSource = rand.New(rand.NewSource(seed))
|
||||
if bool2int(zero)+bool2int(sparse)+bool2int(ascii)+bool2int(pattern)+bool2int(chargen) > 1 {
|
||||
log.Fatal("Can only supply one of --zero, --sparse, --ascii, --pattern or --chargen")
|
||||
}
|
||||
switch {
|
||||
case zero, sparse:
|
||||
source = zeroReader{}
|
||||
case ascii:
|
||||
source = asciiReader{}
|
||||
case pattern:
|
||||
source = readers.NewPatternReader(math.MaxInt64)
|
||||
case chargen:
|
||||
source = &chargenReader{}
|
||||
default:
|
||||
source = randSource
|
||||
}
|
||||
if minFileSize > maxFileSize {
|
||||
maxFileSize = minFileSize
|
||||
}
|
||||
}
|
||||
|
||||
type zeroReader struct{}
|
||||
|
||||
// Read a chunk of zeroes
|
||||
func (zeroReader) Read(p []byte) (n int, err error) {
|
||||
for i := range p {
|
||||
p[i] = 0
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
type asciiReader struct{}
|
||||
|
||||
// Read a chunk of printable ASCII characters
|
||||
func (asciiReader) Read(p []byte) (n int, err error) {
|
||||
n, err = randSource.Read(p)
|
||||
for i := range p[:n] {
|
||||
p[i] = (p[i] % (0x7F - 0x20)) + 0x20
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
type chargenReader struct {
|
||||
start byte // offset from startChar to start line with
|
||||
written byte // chars in line so far
|
||||
}
|
||||
|
||||
// Read a chunk of printable ASCII characters in chargen format
|
||||
func (r *chargenReader) Read(p []byte) (n int, err error) {
|
||||
const (
|
||||
startChar = 0x20 // ' '
|
||||
endChar = 0x7E // '~' inclusive
|
||||
charsPerLine = 72
|
||||
)
|
||||
for i := range p {
|
||||
if r.written >= charsPerLine {
|
||||
r.start++
|
||||
if r.start > endChar-startChar {
|
||||
r.start = 0
|
||||
}
|
||||
p[i] = '\n'
|
||||
r.written = 0
|
||||
} else {
|
||||
c := r.start + r.written + startChar
|
||||
if c > endChar {
|
||||
c -= endChar - startChar + 1
|
||||
}
|
||||
p[i] = c
|
||||
r.written++
|
||||
}
|
||||
}
|
||||
return len(p), err
|
||||
}
|
||||
|
||||
// fileName creates a unique random file or directory name
|
||||
func fileName() (name string) {
|
||||
for {
|
||||
@@ -268,7 +134,7 @@ func (d *dir) list(path string, output []string) []string {
|
||||
}
|
||||
|
||||
// writeFile writes a random file at dir/name
|
||||
func writeFile(dir, name string, size int64) {
|
||||
func writeFile(dir, name string) int64 {
|
||||
err := file.MkdirAll(dir, 0777)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to make directory %q: %v", dir, err)
|
||||
@@ -278,11 +144,8 @@ func writeFile(dir, name string, size int64) {
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to open file %q: %v", path, err)
|
||||
}
|
||||
if sparse {
|
||||
err = fd.Truncate(size)
|
||||
} else {
|
||||
_, err = io.CopyN(fd, source, size)
|
||||
}
|
||||
size := randSource.Int63n(int64(maxFileSize-minFileSize)) + int64(minFileSize)
|
||||
_, err = io.CopyN(fd, randSource, size)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to write %v bytes to file %q: %v", size, path, err)
|
||||
}
|
||||
@@ -291,4 +154,5 @@ func writeFile(dir, name string, size int64) {
|
||||
log.Fatalf("Failed to close file %q: %v", path, err)
|
||||
}
|
||||
fs.Infof(path, "Written file size %v", fs.SizeSuffix(size))
|
||||
return size
|
||||
}
|
||||
|
||||
@@ -5,13 +5,11 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -65,32 +63,13 @@ then add the ` + "`--localtime`" + ` flag.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f, remote := newFsDst(args)
|
||||
f, fileName := cmd.NewFsFile(args[0])
|
||||
cmd.Run(true, false, command, func() error {
|
||||
return Touch(context.Background(), f, remote)
|
||||
return Touch(context.Background(), f, fileName)
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
// newFsDst creates a new dst fs from the arguments.
|
||||
//
|
||||
// The returned fs will never point to a file. It will point to the
|
||||
// parent directory of specified path, and is returned together with
|
||||
// the basename of file or directory, except if argument is only a
|
||||
// remote name. Similar to cmd.NewFsDstFile, but without raising fatal
|
||||
// when name of file or directory is empty (e.g. "remote:" or "remote:path/").
|
||||
func newFsDst(args []string) (f fs.Fs, remote string) {
|
||||
root, remote, err := fspath.Split(args[0])
|
||||
if err != nil {
|
||||
log.Fatalf("Parsing %q failed: %v", args[0], err)
|
||||
}
|
||||
if root == "" {
|
||||
root = "."
|
||||
}
|
||||
f = cmd.NewFsDir([]string{root})
|
||||
return f, remote
|
||||
}
|
||||
|
||||
// parseTimeArgument parses a timestamp string according to specific layouts
|
||||
func parseTimeArgument(timeString string) (time.Time, error) {
|
||||
layout := defaultLayout
|
||||
@@ -128,51 +107,47 @@ func createEmptyObject(ctx context.Context, remote string, modTime time.Time, f
|
||||
}
|
||||
|
||||
// Touch create new file or change file modification time.
|
||||
func Touch(ctx context.Context, f fs.Fs, remote string) error {
|
||||
func Touch(ctx context.Context, f fs.Fs, fileName string) error {
|
||||
t, err := timeOfTouch()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Debugf(nil, "Touch time %v", t)
|
||||
file, err := f.NewObject(ctx, remote)
|
||||
file, err := f.NewObject(ctx, fileName)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrorObjectNotFound) {
|
||||
// Touching non-existant path, possibly creating it as new file
|
||||
if remote == "" {
|
||||
fs.Logf(f, "Not touching empty directory")
|
||||
return nil
|
||||
}
|
||||
// Touch single non-existent file
|
||||
if notCreateNewFile {
|
||||
fs.Logf(f, "Not touching non-existent file due to --no-create")
|
||||
return nil
|
||||
}
|
||||
if recursive {
|
||||
// For consistency, --recursive never creates new files.
|
||||
fs.Logf(f, "Not touching non-existent file due to --recursive")
|
||||
return nil
|
||||
}
|
||||
if operations.SkipDestructive(ctx, f, "touch (create)") {
|
||||
return nil
|
||||
}
|
||||
fs.Debugf(f, "Touching (creating) %q", remote)
|
||||
if err = createEmptyObject(ctx, remote, t, f); err != nil {
|
||||
fs.Debugf(f, "Touching (creating)")
|
||||
if err = createEmptyObject(ctx, fileName, t, f); err != nil {
|
||||
return fmt.Errorf("failed to touch (create): %w", err)
|
||||
}
|
||||
}
|
||||
if errors.Is(err, fs.ErrorIsDir) {
|
||||
// Touching existing directory
|
||||
if recursive {
|
||||
fs.Debugf(f, "Touching recursively files in directory %q", remote)
|
||||
return operations.TouchDir(ctx, f, remote, t, true)
|
||||
// Touch existing directory, recursive
|
||||
fs.Debugf(nil, "Touching files in directory recursively")
|
||||
return operations.TouchDir(ctx, f, t, true)
|
||||
}
|
||||
fs.Debugf(f, "Touching non-recursively files in directory %q", remote)
|
||||
return operations.TouchDir(ctx, f, remote, t, false)
|
||||
// Touch existing directory without recursing
|
||||
fs.Debugf(nil, "Touching files in directory non-recursively")
|
||||
return operations.TouchDir(ctx, f, t, false)
|
||||
}
|
||||
return err
|
||||
}
|
||||
// Touch single existing file
|
||||
if !operations.SkipDestructive(ctx, remote, "touch") {
|
||||
fs.Debugf(f, "Touching %q", remote)
|
||||
if !operations.SkipDestructive(ctx, fileName, "touch") {
|
||||
fs.Debugf(f, "Touching %q", fileName)
|
||||
err = file.SetModTime(ctx, t)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to touch: %w", err)
|
||||
|
||||
@@ -113,15 +113,6 @@ func TestTouchCreateMultipleDirAndFile(t *testing.T) {
|
||||
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, []string{"a", "a/b"}, fs.ModTimeNotSupported)
|
||||
}
|
||||
|
||||
func TestTouchEmptyName(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
err := Touch(context.Background(), r.Fremote, "")
|
||||
require.NoError(t, err)
|
||||
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{}, fs.ModTimeNotSupported)
|
||||
}
|
||||
|
||||
func TestTouchEmptyDir(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
@@ -43,6 +43,7 @@ func init() {
|
||||
flags.StringVarP(cmdFlags, &outFileName, "output", "o", "", "Output to file instead of stdout")
|
||||
// Files
|
||||
flags.BoolVarP(cmdFlags, &opts.ByteSize, "size", "s", false, "Print the size in bytes of each file.")
|
||||
flags.BoolVarP(cmdFlags, &opts.UnitSize, "human", "", false, "Print the size in a more human readable way.")
|
||||
flags.BoolVarP(cmdFlags, &opts.FileMode, "protections", "p", false, "Print the protections for each file.")
|
||||
// flags.BoolVarP(cmdFlags, &opts.ShowUid, "uid", "", false, "Displays file owner or UID number.")
|
||||
// flags.BoolVarP(cmdFlags, &opts.ShowGid, "gid", "", false, "Displays file group owner or GID number.")
|
||||
|
||||
@@ -102,7 +102,8 @@ var envInitial []string
|
||||
// sets testConfig to testFolder/rclone.config.
|
||||
func createTestEnvironment(t *testing.T) {
|
||||
//Set temporary folder for config and test data
|
||||
tempFolder := t.TempDir()
|
||||
tempFolder, err := ioutil.TempDir("", "rclone_cmdtest_")
|
||||
require.NoError(t, err)
|
||||
testFolder = filepath.ToSlash(tempFolder)
|
||||
|
||||
// Set path to temporary config file
|
||||
|
||||
@@ -105,18 +105,15 @@ WebDAV or S3, that work out of the box.)
|
||||
|
||||
{{< provider_list >}}
|
||||
{{< provider name="1Fichier" home="https://1fichier.com/" config="/fichier/" start="true">}}
|
||||
{{< provider name="Akamai Netstorage" home="https://www.akamai.com/us/en/products/media-delivery/netstorage.jsp" config="/netstorage/" >}}
|
||||
{{< provider name="Alibaba Cloud (Aliyun) Object Storage System (OSS)" home="https://www.alibabacloud.com/product/oss/" config="/s3/#alibaba-oss" >}}
|
||||
{{< provider name="Amazon Drive" home="https://www.amazon.com/clouddrive" config="/amazonclouddrive/" note="#status">}}
|
||||
{{< provider name="Amazon S3" home="https://aws.amazon.com/s3/" config="/s3/" >}}
|
||||
{{< provider name="Backblaze B2" home="https://www.backblaze.com/b2/cloud-storage.html" config="/b2/" >}}
|
||||
{{< provider name="Box" home="https://www.box.com/" config="/box/" >}}
|
||||
{{< provider name="Ceph" home="http://ceph.com/" config="/s3/#ceph" >}}
|
||||
{{< provider name="China Mobile Ecloud Elastic Object Storage (EOS)" home="https://ecloud.10086.cn/home/product-introduction/eos/" config="/s3/#china-mobile-ecloud-eos" >}}
|
||||
{{< provider name="Citrix ShareFile" home="http://sharefile.com/" config="/sharefile/" >}}
|
||||
{{< provider name="C14" home="https://www.online.net/en/storage/c14-cold-storage" config="/s3/#scaleway" >}}
|
||||
{{< provider name="C14" home="https://www.online.net/en/storage/c14-cold-storage" config="/sftp/#c14" >}}
|
||||
{{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}}
|
||||
{{< provider name="Digi Storage" home="https://storage.rcs-rds.ro/" config="/koofr/#digi-storage" >}}
|
||||
{{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
|
||||
{{< provider name="Dropbox" home="https://www.dropbox.com/" config="/dropbox/" >}}
|
||||
{{< provider name="Enterprise File Fabric" home="https://storagemadeeasy.com/about/" config="/filefabric/" >}}
|
||||
@@ -151,13 +148,12 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="rsync.net" home="https://rsync.net/products/rclone.html" config="/sftp/#rsync-net" >}}
|
||||
{{< provider name="Scaleway" home="https://www.scaleway.com/object-storage/" config="/s3/#scaleway" >}}
|
||||
{{< provider name="Seafile" home="https://www.seafile.com/" config="/seafile/" >}}
|
||||
{{< provider name="Seagate Lyve Cloud" home="https://www.seagate.com/gb/en/services/cloud/storage/" config="/s3/#lyve" >}}
|
||||
{{< provider name="SeaweedFS" home="https://github.com/chrislusf/seaweedfs/" config="/s3/#seaweedfs" >}}
|
||||
{{< provider name="SFTP" home="https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol" config="/sftp/" >}}
|
||||
{{< provider name="Sia" home="https://sia.tech/" config="/sia/" >}}
|
||||
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
|
||||
{{< provider name="Storj" home="https://storj.io/" config="/storj/" >}}
|
||||
{{< provider name="SugarSync" home="https://sugarsync.com/" config="/sugarsync/" >}}
|
||||
{{< provider name="Tardigrade" home="https://tardigrade.io/" config="/tardigrade/" >}}
|
||||
{{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}}
|
||||
{{< provider name="Uptobox" home="https://uptobox.com" config="/uptobox/" >}}
|
||||
{{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" >}}
|
||||
|
||||
@@ -33,7 +33,7 @@ First run:
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
No remotes found, make a new one?
|
||||
No remotes found - make a new one
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
@@ -99,11 +99,9 @@ Remote or path to alias.
|
||||
|
||||
Can be "myremote:path/to/dir", "myremote:bucket", "myremote:" or "/local/path".
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: remote
|
||||
- Env Var: RCLONE_ALIAS_REMOTE
|
||||
- Type: string
|
||||
- Required: true
|
||||
- Default: ""
|
||||
|
||||
{{< rem autogenerated options stop >}}
|
||||
|
||||
@@ -53,7 +53,7 @@ Here is an example of how to make a remote called `remote`. First run:
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
No remotes found, make a new one?
|
||||
No remotes found - make a new one
|
||||
n) New remote
|
||||
r) Rename remote
|
||||
c) Copy remote
|
||||
@@ -168,12 +168,10 @@ OAuth Client Id.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_ACD_CLIENT_ID
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Default: ""
|
||||
|
||||
#### --acd-client-secret
|
||||
|
||||
@@ -181,12 +179,10 @@ OAuth Client Secret.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_secret
|
||||
- Env Var: RCLONE_ACD_CLIENT_SECRET
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Default: ""
|
||||
|
||||
### Advanced options
|
||||
|
||||
@@ -196,12 +192,10 @@ Here are the advanced options specific to amazon cloud drive (Amazon Drive).
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_ACD_TOKEN
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Default: ""
|
||||
|
||||
#### --acd-auth-url
|
||||
|
||||
@@ -209,12 +203,10 @@ Auth server URL.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_ACD_AUTH_URL
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Default: ""
|
||||
|
||||
#### --acd-token-url
|
||||
|
||||
@@ -222,23 +214,19 @@ Token server url.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_ACD_TOKEN_URL
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Default: ""
|
||||
|
||||
#### --acd-checkpoint
|
||||
|
||||
Checkpoint for internal polling (debug).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: checkpoint
|
||||
- Env Var: RCLONE_ACD_CHECKPOINT
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Default: ""
|
||||
|
||||
#### --acd-upload-wait-per-gb
|
||||
|
||||
@@ -264,8 +252,6 @@ of big files for a range of file sizes.
|
||||
Upload with the "-v" flag to see more info about what rclone is doing
|
||||
in this situation.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: upload_wait_per_gb
|
||||
- Env Var: RCLONE_ACD_UPLOAD_WAIT_PER_GB
|
||||
- Type: Duration
|
||||
@@ -284,8 +270,6 @@ To download files above this threshold, rclone requests a "tempLink"
|
||||
which downloads the file through a temporary URL directly from the
|
||||
underlying S3 storage.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: templink_threshold
|
||||
- Env Var: RCLONE_ACD_TEMPLINK_THRESHOLD
|
||||
- Type: SizeSuffix
|
||||
@@ -293,12 +277,10 @@ Properties:
|
||||
|
||||
#### --acd-encoding
|
||||
|
||||
The encoding for the backend.
|
||||
This sets the encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_ACD_ENCODING
|
||||
- Type: MultiEncoder
|
||||
|
||||
@@ -547,39 +547,3 @@ put them back in again.` >}}
|
||||
* bbabich <bbabich@datamossa.com>
|
||||
* David <dp.davide.palma@gmail.com>
|
||||
* Borna Butkovic <borna@favicode.net>
|
||||
* Fredric Arklid <fredric.arklid@consid.se>
|
||||
* Andy Jackson <Andrew.Jackson@bl.uk>
|
||||
* Sinan Tan <i@tinytangent.com>
|
||||
* deinferno <14363193+deinferno@users.noreply.github.com>
|
||||
* rsapkf <rsapkfff@pm.me>
|
||||
* Will Holtz <wholtz@gmail.com>
|
||||
* GGG KILLER <gggkiller2@gmail.com>
|
||||
* Logeshwaran Murugesan <logeshwaran@testpress.in>
|
||||
* Lu Wang <coolwanglu@gmail.com>
|
||||
* Bumsu Hyeon <ksitht@gmail.com>
|
||||
* Shmz Ozggrn <98463324+ShmzOzggrn@users.noreply.github.com>
|
||||
* Kim <kim@jotta.no>
|
||||
* Niels van de Weem <n.van.de.weem@smile.nl>
|
||||
* Koopa <codingkoopa@gmail.com>
|
||||
* Yunhai Luo <yunhai-luo@hotmail.com>
|
||||
* Charlie Jiang <w@chariri.moe>
|
||||
* Alain Nussbaumer <alain.nussbaumer@alleluia.ch>
|
||||
* Vanessasaurus <814322+vsoch@users.noreply.github.com>
|
||||
* Isaac Levy <isaac.r.levy@gmail.com>
|
||||
* Gourav T <workflowautomation@protonmail.com>
|
||||
* Paulo Martins <paulo.pontes.m@gmail.com>
|
||||
* viveknathani <viveknathani2402@gmail.com>
|
||||
* Eng Zer Jun <engzerjun@gmail.com>
|
||||
* Abhiraj <abhiraj.official15@gmail.com>
|
||||
* Márton Elek <elek@apache.org> <elek@users.noreply.github.com>
|
||||
* Vincent Murphy <vdm@vdm.ie>
|
||||
* ctrl-q <34975747+ctrl-q@users.noreply.github.com>
|
||||
* Nil Alexandrov <nalexand@akamai.com>
|
||||
* GuoXingbin <101376330+guoxingbin@users.noreply.github.com>
|
||||
* Berkan Teber <berkan@berkanteber.com>
|
||||
* Tobias Klauser <tklauser@distanz.ch>
|
||||
* KARBOWSKI Piotr <piotr.karbowski@gmail.com>
|
||||
* GH <geeklihui@foxmail.com>
|
||||
* rafma0 <int.main@gmail.com>
|
||||
* Adrien Rey-Jarthon <jobs@adrienjarthon.com>
|
||||
* Nick Gooding <73336146+nickgooding@users.noreply.github.com>
|
||||
|
||||
@@ -19,7 +19,7 @@ configuration. For a remote called `remote`. First run:
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
No remotes found, make a new one?
|
||||
No remotes found - make a new one
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
@@ -81,14 +81,6 @@ key. It is stored using RFC3339 Format time with nanosecond
|
||||
precision. The metadata is supplied during directory listings so
|
||||
there is no overhead to using it.
|
||||
|
||||
### Performance
|
||||
|
||||
When uploading large files, increasing the value of
|
||||
`--azureblob-upload-concurrency` will increase performance at the cost
|
||||
of using more memory. The default of 16 is set quite conservatively to
|
||||
use less memory. It maybe be necessary raise it to 64 or higher to
|
||||
fully utilize a 1 GBit/s link with a single file transfer.
|
||||
|
||||
### Restricted filename characters
|
||||
|
||||
In addition to the [default restricted characters set](/overview/#restricted-characters)
|
||||
@@ -166,12 +158,10 @@ Storage Account Name.
|
||||
|
||||
Leave blank to use SAS URL or Emulator.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: account
|
||||
- Env Var: RCLONE_AZUREBLOB_ACCOUNT
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Default: ""
|
||||
|
||||
#### --azureblob-service-principal-file
|
||||
|
||||
@@ -187,12 +177,10 @@ Leave blank normally. Needed only if you want to use a service principal instead
|
||||
See ["Create an Azure service principal"](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli) and ["Assign an Azure role for access to blob data"](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli) pages for more details.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: service_principal_file
|
||||
- Env Var: RCLONE_AZUREBLOB_SERVICE_PRINCIPAL_FILE
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Default: ""
|
||||
|
||||
#### --azureblob-key
|
||||
|
||||
@@ -200,12 +188,10 @@ Storage Account Key.
|
||||
|
||||
Leave blank to use SAS URL or Emulator.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: key
|
||||
- Env Var: RCLONE_AZUREBLOB_KEY
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Default: ""
|
||||
|
||||
#### --azureblob-sas-url
|
||||
|
||||
@@ -213,12 +199,10 @@ SAS URL for container level access only.
|
||||
|
||||
Leave blank if using account/key or Emulator.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: sas_url
|
||||
- Env Var: RCLONE_AZUREBLOB_SAS_URL
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Default: ""
|
||||
|
||||
#### --azureblob-use-msi
|
||||
|
||||
@@ -233,8 +217,6 @@ the user-assigned identity will be used by default. If the resource has multiple
|
||||
identities, the identity to use must be explicitly specified using exactly one of the msi_object_id,
|
||||
msi_client_id, or msi_mi_res_id parameters.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: use_msi
|
||||
- Env Var: RCLONE_AZUREBLOB_USE_MSI
|
||||
- Type: bool
|
||||
@@ -246,8 +228,6 @@ Uses local storage emulator if provided as 'true'.
|
||||
|
||||
Leave blank if using real azure storage endpoint.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: use_emulator
|
||||
- Env Var: RCLONE_AZUREBLOB_USE_EMULATOR
|
||||
- Type: bool
|
||||
@@ -263,12 +243,10 @@ Object ID of the user-assigned MSI to use, if any.
|
||||
|
||||
Leave blank if msi_client_id or msi_mi_res_id specified.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: msi_object_id
|
||||
- Env Var: RCLONE_AZUREBLOB_MSI_OBJECT_ID
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Default: ""
|
||||
|
||||
#### --azureblob-msi-client-id
|
||||
|
||||
@@ -276,12 +254,10 @@ Object ID of the user-assigned MSI to use, if any.
|
||||
|
||||
Leave blank if msi_object_id or msi_mi_res_id specified.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: msi_client_id
|
||||
- Env Var: RCLONE_AZUREBLOB_MSI_CLIENT_ID
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Default: ""
|
||||
|
||||
#### --azureblob-msi-mi-res-id
|
||||
|
||||
@@ -289,12 +265,10 @@ Azure resource ID of the user-assigned MSI to use, if any.
|
||||
|
||||
Leave blank if msi_client_id or msi_object_id specified.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: msi_mi_res_id
|
||||
- Env Var: RCLONE_AZUREBLOB_MSI_MI_RES_ID
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Default: ""
|
||||
|
||||
#### --azureblob-endpoint
|
||||
|
||||
@@ -302,65 +276,32 @@ Endpoint for the service.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: endpoint
|
||||
- Env Var: RCLONE_AZUREBLOB_ENDPOINT
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Default: ""
|
||||
|
||||
#### --azureblob-upload-cutoff
|
||||
|
||||
Cutoff for switching to chunked upload (<= 256 MiB) (deprecated).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: upload_cutoff
|
||||
- Env Var: RCLONE_AZUREBLOB_UPLOAD_CUTOFF
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Default: ""
|
||||
|
||||
#### --azureblob-chunk-size
|
||||
|
||||
Upload chunk size.
|
||||
Upload chunk size (<= 100 MiB).
|
||||
|
||||
Note that this is stored in memory and there may be up to
|
||||
"--transfers" * "--azureblob-upload-concurrency" chunks stored at once
|
||||
in memory.
|
||||
|
||||
Properties:
|
||||
"--transfers" chunks stored at once in memory.
|
||||
|
||||
- Config: chunk_size
|
||||
- Env Var: RCLONE_AZUREBLOB_CHUNK_SIZE
|
||||
- Type: SizeSuffix
|
||||
- Default: 4Mi
|
||||
|
||||
#### --azureblob-upload-concurrency
|
||||
|
||||
Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
If you are uploading small numbers of large files over high-speed
|
||||
links and these uploads do not fully utilize your bandwidth, then
|
||||
increasing this may help to speed up the transfers.
|
||||
|
||||
In tests, upload speed increases almost linearly with upload
|
||||
concurrency. For example to fill a gigabit pipe it may be necessary to
|
||||
raise this to 64. Note that this will use more memory.
|
||||
|
||||
Note that chunks are stored in memory and there may be up to
|
||||
"--transfers" * "--azureblob-upload-concurrency" chunks stored at once
|
||||
in memory.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: upload_concurrency
|
||||
- Env Var: RCLONE_AZUREBLOB_UPLOAD_CONCURRENCY
|
||||
- Type: int
|
||||
- Default: 16
|
||||
|
||||
#### --azureblob-list-chunk
|
||||
|
||||
Size of blob list.
|
||||
@@ -373,8 +314,6 @@ minutes per megabyte on average, it will time out (
|
||||
). This can be used to limit the number of blobs items to return, to
|
||||
avoid the time out.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: list_chunk
|
||||
- Env Var: RCLONE_AZUREBLOB_LIST_CHUNK
|
||||
- Type: int
|
||||
@@ -395,12 +334,10 @@ If blobs are in "archive tier" at remote, trying to perform data transfer
|
||||
operations from remote will not be allowed. User should first restore by
|
||||
tiering blob to "Hot" or "Cool".
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: access_tier
|
||||
- Env Var: RCLONE_AZUREBLOB_ACCESS_TIER
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Default: ""
|
||||
|
||||
#### --azureblob-archive-tier-delete
|
||||
|
||||
@@ -419,8 +356,6 @@ replacement. This has the potential for data loss if the upload fails
|
||||
archive tier blobs early may be chargable.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: archive_tier_delete
|
||||
- Env Var: RCLONE_AZUREBLOB_ARCHIVE_TIER_DELETE
|
||||
- Type: bool
|
||||
@@ -435,8 +370,6 @@ uploading it so it can add it to metadata on the object. This is great
|
||||
for data integrity checking but can cause long delays for large files
|
||||
to start uploading.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: disable_checksum
|
||||
- Env Var: RCLONE_AZUREBLOB_DISABLE_CHECKSUM
|
||||
- Type: bool
|
||||
@@ -449,8 +382,6 @@ How often internal memory buffer pools will be flushed.
|
||||
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
|
||||
This option controls how often unused buffers will be removed from the pool.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: memory_pool_flush_time
|
||||
- Env Var: RCLONE_AZUREBLOB_MEMORY_POOL_FLUSH_TIME
|
||||
- Type: Duration
|
||||
@@ -460,8 +391,6 @@ Properties:
|
||||
|
||||
Whether to use mmap buffers in internal memory pool.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: memory_pool_use_mmap
|
||||
- Env Var: RCLONE_AZUREBLOB_MEMORY_POOL_USE_MMAP
|
||||
- Type: bool
|
||||
@@ -469,12 +398,10 @@ Properties:
|
||||
|
||||
#### --azureblob-encoding
|
||||
|
||||
The encoding for the backend.
|
||||
This sets the encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_AZUREBLOB_ENCODING
|
||||
- Type: MultiEncoder
|
||||
@@ -484,12 +411,10 @@ Properties:
|
||||
|
||||
Public access level of a container: blob or container.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: public_access
|
||||
- Env Var: RCLONE_AZUREBLOB_PUBLIC_ACCESS
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Default: ""
|
||||
- Examples:
|
||||
- ""
|
||||
- The container and its blobs can be accessed only with an authorized request.
|
||||
@@ -503,8 +428,6 @@ Properties:
|
||||
|
||||
If set, do not do HEAD before GET when getting objects.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: no_head_object
|
||||
- Env Var: RCLONE_AZUREBLOB_NO_HEAD_OBJECT
|
||||
- Type: bool
|
||||
|
||||
@@ -23,7 +23,7 @@ recommended method. See below for further details on generating and using
|
||||
an Application Key.
|
||||
|
||||
```
|
||||
No remotes found, make a new one?
|
||||
No remotes found - make a new one
|
||||
n) New remote
|
||||
q) Quit config
|
||||
n/q> n
|
||||
@@ -329,30 +329,24 @@ Here are the standard options specific to b2 (Backblaze B2).
|
||||
|
||||
Account ID or Application Key ID.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: account
|
||||
- Env Var: RCLONE_B2_ACCOUNT
|
||||
- Type: string
|
||||
- Required: true
|
||||
- Default: ""
|
||||
|
||||
#### --b2-key
|
||||
|
||||
Application Key.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: key
|
||||
- Env Var: RCLONE_B2_KEY
|
||||
- Type: string
|
||||
- Required: true
|
||||
- Default: ""
|
||||
|
||||
#### --b2-hard-delete
|
||||
|
||||
Permanently delete files on remote removal, otherwise hide files.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: hard_delete
|
||||
- Env Var: RCLONE_B2_HARD_DELETE
|
||||
- Type: bool
|
||||
@@ -368,12 +362,10 @@ Endpoint for the service.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: endpoint
|
||||
- Env Var: RCLONE_B2_ENDPOINT
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Default: ""
|
||||
|
||||
#### --b2-test-mode
|
||||
|
||||
@@ -389,12 +381,10 @@ below will cause b2 to return specific errors:
|
||||
These will be set in the "X-Bz-Test-Mode" header which is documented
|
||||
in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: test_mode
|
||||
- Env Var: RCLONE_B2_TEST_MODE
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Default: ""
|
||||
|
||||
#### --b2-versions
|
||||
|
||||
@@ -403,8 +393,6 @@ Include old versions in directory listings.
|
||||
Note that when using this no file write operations are permitted,
|
||||
so you can't upload files or delete them.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: versions
|
||||
- Env Var: RCLONE_B2_VERSIONS
|
||||
- Type: bool
|
||||
@@ -418,8 +406,6 @@ Files above this size will be uploaded in chunks of "--b2-chunk-size".
|
||||
|
||||
This value should be set no larger than 4.657 GiB (== 5 GB).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: upload_cutoff
|
||||
- Env Var: RCLONE_B2_UPLOAD_CUTOFF
|
||||
- Type: SizeSuffix
|
||||
@@ -434,8 +420,6 @@ copied in chunks of this size.
|
||||
|
||||
The minimum is 0 and the maximum is 4.6 GiB.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: copy_cutoff
|
||||
- Env Var: RCLONE_B2_COPY_CUTOFF
|
||||
- Type: SizeSuffix
|
||||
@@ -452,8 +436,6 @@ might a maximum of "--transfers" chunks in progress at once.
|
||||
|
||||
5,000,000 Bytes is the minimum size.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: chunk_size
|
||||
- Env Var: RCLONE_B2_CHUNK_SIZE
|
||||
- Type: SizeSuffix
|
||||
@@ -468,8 +450,6 @@ uploading it so it can add it to metadata on the object. This is great
|
||||
for data integrity checking but can cause long delays for large files
|
||||
to start uploading.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: disable_checksum
|
||||
- Env Var: RCLONE_B2_DISABLE_CHECKSUM
|
||||
- Type: bool
|
||||
@@ -486,20 +466,10 @@ If the custom endpoint rewrites the requests for authentication,
|
||||
e.g., in Cloudflare Workers, this header needs to be handled properly.
|
||||
Leave blank if you want to use the endpoint provided by Backblaze.
|
||||
|
||||
The URL provided here SHOULD have the protocol and SHOULD NOT have
|
||||
a trailing slash or specify the /file/bucket subpath as rclone will
|
||||
request files with "{download_url}/file/{bucket_name}/{path}".
|
||||
|
||||
Example:
|
||||
> https://mysubdomain.mydomain.tld
|
||||
(No trailing "/", "file" or "bucket")
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: download_url
|
||||
- Env Var: RCLONE_B2_DOWNLOAD_URL
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Default: ""
|
||||
|
||||
#### --b2-download-auth-duration
|
||||
|
||||
@@ -508,8 +478,6 @@ Time before the authorization token will expire in s or suffix ms|s|m|h|d.
|
||||
The duration before the download authorization token will expire.
|
||||
The minimum value is 1 second. The maximum value is one week.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: download_auth_duration
|
||||
- Env Var: RCLONE_B2_DOWNLOAD_AUTH_DURATION
|
||||
- Type: Duration
|
||||
@@ -521,8 +489,6 @@ How often internal memory buffer pools will be flushed.
|
||||
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
|
||||
This option controls how often unused buffers will be removed from the pool.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: memory_pool_flush_time
|
||||
- Env Var: RCLONE_B2_MEMORY_POOL_FLUSH_TIME
|
||||
- Type: Duration
|
||||
@@ -532,8 +498,6 @@ Properties:
|
||||
|
||||
Whether to use mmap buffers in internal memory pool.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: memory_pool_use_mmap
|
||||
- Env Var: RCLONE_B2_MEMORY_POOL_USE_MMAP
|
||||
- Type: bool
|
||||
@@ -541,12 +505,10 @@ Properties:
|
||||
|
||||
#### --b2-encoding
|
||||
|
||||
The encoding for the backend.
|
||||
This sets the encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_B2_ENCODING
|
||||
- Type: MultiEncoder
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user