mirror of
https://github.com/rclone/rclone.git
synced 2026-01-04 17:43:50 +00:00
Compare commits
1 Commits
v1.59.2
...
fix-webdav
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dfc5b0460b |
24
.github/workflows/build.yml
vendored
24
.github/workflows/build.yml
vendored
@@ -245,6 +245,10 @@ jobs:
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
|
||||
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
|
||||
- name: Force NDK version
|
||||
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;23.1.7779620" | grep -v = || true
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
@@ -267,29 +271,27 @@ jobs:
|
||||
go install golang.org/x/mobile/cmd/gobind@latest
|
||||
go install golang.org/x/mobile/cmd/gomobile@latest
|
||||
env PATH=$PATH:~/go/bin gomobile init
|
||||
echo "RCLONE_NDK_VERSION=21" >> $GITHUB_ENV
|
||||
|
||||
- name: arm-v7a gomobile build
|
||||
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
||||
run: env PATH=$PATH:~/go/bin gomobile bind -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
||||
|
||||
- name: arm-v7a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||
echo 'GOARM=7' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: arm-v7a build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
|
||||
|
||||
- name: arm64-v8a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||
@@ -297,12 +299,12 @@ jobs:
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: arm64-v8a build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
|
||||
|
||||
- name: x86 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||
@@ -310,12 +312,12 @@ jobs:
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x86 build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
|
||||
|
||||
- name: x64 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||
@@ -323,7 +325,7 @@ jobs:
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x64 build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x64 .
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
|
||||
|
||||
- name: Upload artifacts
|
||||
run: |
|
||||
|
||||
@@ -4,8 +4,8 @@ linters:
|
||||
enable:
|
||||
- deadcode
|
||||
- errcheck
|
||||
#- goimports
|
||||
#- revive
|
||||
- goimports
|
||||
- revive
|
||||
- ineffassign
|
||||
- structcheck
|
||||
- varcheck
|
||||
|
||||
5166
MANUAL.html
generated
5166
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
5343
MANUAL.txt
generated
5343
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
19
README.md
19
README.md
@@ -42,9 +42,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
@@ -85,19 +83,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
|
||||
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
||||
|
||||
### Virtual storage providers
|
||||
|
||||
These backends adapt or modify other storage providers
|
||||
|
||||
* Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
|
||||
* Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
|
||||
* Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
|
||||
* Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
|
||||
* Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
|
||||
* Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
|
||||
* Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
|
||||
* Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
|
||||
|
||||
## Features
|
||||
|
||||
* MD5/SHA-1 hashes checked at all times for file integrity
|
||||
@@ -112,7 +97,7 @@ These backends adapt or modify other storage providers
|
||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||
* Multi-threaded downloads to local disk
|
||||
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDAV/FTP/SFTP/DLNA
|
||||
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
|
||||
|
||||
## Installation & documentation
|
||||
|
||||
@@ -133,5 +118,5 @@ Please see the [rclone website](https://rclone.org/) for:
|
||||
License
|
||||
-------
|
||||
|
||||
This is free software under the terms of the MIT license (check the
|
||||
This is free software under the terms of MIT the license (check the
|
||||
[COPYING file](/COPYING) included in this package).
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/box"
|
||||
_ "github.com/rclone/rclone/backend/cache"
|
||||
_ "github.com/rclone/rclone/backend/chunker"
|
||||
_ "github.com/rclone/rclone/backend/combine"
|
||||
_ "github.com/rclone/rclone/backend/compress"
|
||||
_ "github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
@@ -21,7 +20,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||
_ "github.com/rclone/rclone/backend/hasher"
|
||||
_ "github.com/rclone/rclone/backend/hdfs"
|
||||
_ "github.com/rclone/rclone/backend/hidrive"
|
||||
_ "github.com/rclone/rclone/backend/http"
|
||||
_ "github.com/rclone/rclone/backend/hubic"
|
||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
||||
|
||||
@@ -435,7 +435,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, title string, directorie
|
||||
query += " AND kind:" + folderKind
|
||||
} else if filesOnly {
|
||||
query += " AND kind:" + fileKind
|
||||
//} else {
|
||||
} else {
|
||||
// FIXME none of these work
|
||||
//query += " AND kind:(" + fileKind + " OR " + folderKind + ")"
|
||||
//query += " AND (kind:" + fileKind + " OR kind:" + folderKind + ")"
|
||||
|
||||
@@ -539,10 +539,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("chunk size: %w", err)
|
||||
return nil, fmt.Errorf("azure: chunk size: %w", err)
|
||||
}
|
||||
if opt.ListChunkSize > maxListChunkSize {
|
||||
return nil, fmt.Errorf("blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
|
||||
return nil, fmt.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
|
||||
}
|
||||
if opt.Endpoint == "" {
|
||||
opt.Endpoint = storageDefaultBaseURL
|
||||
@@ -551,12 +551,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.AccessTier == "" {
|
||||
opt.AccessTier = string(defaultAccessTier)
|
||||
} else if !validateAccessTier(opt.AccessTier) {
|
||||
return nil, fmt.Errorf("supported access tiers are %s, %s and %s",
|
||||
return nil, fmt.Errorf("Azure Blob: Supported access tiers are %s, %s and %s",
|
||||
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
|
||||
}
|
||||
|
||||
if !validatePublicAccess((opt.PublicAccess)) {
|
||||
return nil, fmt.Errorf("supported public access level are %s and %s",
|
||||
return nil, fmt.Errorf("Azure Blob: Supported public access level are %s and %s",
|
||||
string(azblob.PublicAccessBlob), string(azblob.PublicAccessContainer))
|
||||
}
|
||||
|
||||
@@ -598,13 +598,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
case opt.UseEmulator:
|
||||
credential, err := azblob.NewSharedKeyCredential(emulatorAccount, emulatorAccountKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse credentials: %w", err)
|
||||
return nil, fmt.Errorf("Failed to parse credentials: %w", err)
|
||||
}
|
||||
var actualEmulatorEndpoint = emulatorBlobEndpoint
|
||||
if opt.Endpoint != "" {
|
||||
actualEmulatorEndpoint = opt.Endpoint
|
||||
}
|
||||
u, err = url.Parse(actualEmulatorEndpoint)
|
||||
u, err = url.Parse(emulatorBlobEndpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
|
||||
}
|
||||
@@ -648,7 +644,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||
return nil, fmt.Errorf("Failed to acquire MSI token: %w", err)
|
||||
}
|
||||
|
||||
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
||||
@@ -683,7 +679,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
case opt.Account != "" && opt.Key != "":
|
||||
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse credentials: %w", err)
|
||||
return nil, fmt.Errorf("Failed to parse credentials: %w", err)
|
||||
}
|
||||
|
||||
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
||||
@@ -703,7 +699,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
parts := azblob.NewBlobURLParts(*u)
|
||||
if parts.ContainerName != "" {
|
||||
if f.rootContainer != "" && parts.ContainerName != f.rootContainer {
|
||||
return nil, errors.New("container name in SAS URL and container provided in command do not match")
|
||||
return nil, errors.New("Container name in SAS URL and container provided in command do not match")
|
||||
}
|
||||
containerURL := azblob.NewContainerURL(*u, pipeline)
|
||||
f.cntURLcache[parts.ContainerName] = &containerURL
|
||||
@@ -731,7 +727,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
pipe := f.newPipeline(azblob.NewTokenCredential("", tokenRefresher), options)
|
||||
serviceURL = azblob.NewServiceURL(*u, pipe)
|
||||
default:
|
||||
return nil, errors.New("no authentication method configured")
|
||||
return nil, errors.New("No authentication method configured")
|
||||
}
|
||||
f.svcURL = &serviceURL
|
||||
|
||||
@@ -1297,6 +1293,19 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
func (f *Fs) getMemoryPool(size int64) *pool.Pool {
|
||||
if size == int64(f.opt.ChunkSize) {
|
||||
return f.pool
|
||||
}
|
||||
|
||||
return pool.New(
|
||||
time.Duration(f.opt.MemoryPoolFlushTime),
|
||||
int(size),
|
||||
f.ci.Transfers,
|
||||
f.opt.MemoryPoolUseMmap,
|
||||
)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
@@ -1328,7 +1337,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
}
|
||||
data, err := base64.StdEncoding.DecodeString(o.md5)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to decode Content-MD5: %q: %w", o.md5, err)
|
||||
return "", fmt.Errorf("Failed to decode Content-MD5: %q: %w", o.md5, err)
|
||||
}
|
||||
return hex.EncodeToString(data), nil
|
||||
}
|
||||
@@ -1518,7 +1527,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
var offset int64
|
||||
var count int64
|
||||
if o.AccessTier() == azblob.AccessTierArchive {
|
||||
return nil, fmt.Errorf("blob in archive tier, you need to set tier to hot or cool first")
|
||||
return nil, fmt.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
|
||||
}
|
||||
fs.FixRangeOption(options, o.size)
|
||||
for _, option := range options {
|
||||
@@ -1676,14 +1685,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
}
|
||||
|
||||
uploadParts := maxUploadParts
|
||||
uploadParts := int64(maxUploadParts)
|
||||
if uploadParts < 1 {
|
||||
uploadParts = 1
|
||||
} else if uploadParts > maxUploadParts {
|
||||
uploadParts = maxUploadParts
|
||||
}
|
||||
// calculate size of parts/blocks
|
||||
partSize := chunksize.Calculator(o, src.Size(), uploadParts, o.fs.opt.ChunkSize)
|
||||
partSize := chunksize.Calculator(o, int(uploadParts), o.fs.opt.ChunkSize)
|
||||
|
||||
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
|
||||
BufferSize: int(partSize),
|
||||
@@ -1743,7 +1752,7 @@ func (o *Object) AccessTier() azblob.AccessTierType {
|
||||
// SetTier performs changing object tier
|
||||
func (o *Object) SetTier(tier string) error {
|
||||
if !validateAccessTier(tier) {
|
||||
return fmt.Errorf("tier %s not supported by Azure Blob Storage", tier)
|
||||
return fmt.Errorf("Tier %s not supported by Azure Blob Storage", tier)
|
||||
}
|
||||
|
||||
// Check if current tier already matches with desired tier
|
||||
@@ -1754,12 +1763,12 @@ func (o *Object) SetTier(tier string) error {
|
||||
blob := o.getBlobReference()
|
||||
ctx := context.Background()
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blob.SetTier(ctx, desiredAccessTier, azblob.LeaseAccessConditions{}, azblob.RehydratePriorityStandard)
|
||||
_, err := blob.SetTier(ctx, desiredAccessTier, azblob.LeaseAccessConditions{})
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set Blob Tier: %w", err)
|
||||
return fmt.Errorf("Failed to set Blob Tier: %w", err)
|
||||
}
|
||||
|
||||
// Set access tier on local object also, this typically
|
||||
|
||||
@@ -119,7 +119,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
|
||||
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("couldn't read IMDS response: %w", err)
|
||||
return result, fmt.Errorf("Couldn't read IMDS response: %w", err)
|
||||
}
|
||||
// Remove BOM, if any. azcopy does this so I'm following along.
|
||||
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
|
||||
@@ -130,7 +130,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
|
||||
// storage API call.
|
||||
err = json.Unmarshal(b, &result)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("couldn't unmarshal IMDS response: %w", err)
|
||||
return result, fmt.Errorf("Couldn't unmarshal IMDS response: %w", err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
|
||||
@@ -280,7 +280,7 @@ func (f *Fs) Root() string {
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.rootBucket == "" {
|
||||
return "B2 root"
|
||||
return fmt.Sprintf("B2 root")
|
||||
}
|
||||
if f.rootDirectory == "" {
|
||||
return fmt.Sprintf("B2 bucket %s", f.rootBucket)
|
||||
@@ -1205,7 +1205,10 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
}
|
||||
}
|
||||
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
|
||||
return time.Since(time.Time(timestamp)).Hours() > 24
|
||||
if time.Since(time.Time(timestamp)).Hours() > 24 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Delete Config.Transfers in parallel
|
||||
@@ -1482,9 +1485,13 @@ func (o *Object) Size() int64 {
|
||||
//
|
||||
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
|
||||
// Some tools (e.g. Cyberduck) use this
|
||||
func cleanSHA1(sha1 string) string {
|
||||
func cleanSHA1(sha1 string) (out string) {
|
||||
out = strings.ToLower(sha1)
|
||||
const unverified = "unverified:"
|
||||
return strings.TrimPrefix(strings.ToLower(sha1), unverified)
|
||||
if strings.HasPrefix(out, unverified) {
|
||||
out = out[len(unverified):]
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// decodeMetaDataRaw sets the metadata from the data passed in
|
||||
|
||||
@@ -97,7 +97,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
if size == -1 {
|
||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
||||
} else {
|
||||
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
|
||||
chunkSize = chunksize.Calculator(src, maxParts, defaultChunkSize)
|
||||
parts = size / int64(chunkSize)
|
||||
if size%int64(chunkSize) != 0 {
|
||||
parts++
|
||||
|
||||
@@ -897,7 +897,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
srcPath := srcObj.fs.rootSlash() + srcObj.remote
|
||||
dstPath := f.rootSlash() + remote
|
||||
if strings.EqualFold(srcPath, dstPath) {
|
||||
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
|
||||
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||
}
|
||||
|
||||
|
||||
2
backend/cache/cache.go
vendored
2
backend/cache/cache.go
vendored
@@ -1128,7 +1128,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
case fs.Directory:
|
||||
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
|
||||
default:
|
||||
return fmt.Errorf("unknown object type %T", entry)
|
||||
return fmt.Errorf("Unknown object type %T", entry)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
2
backend/cache/cache_test.go
vendored
2
backend/cache/cache_test.go
vendored
@@ -19,7 +19,7 @@ func TestIntegration(t *testing.T) {
|
||||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
|
||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||
})
|
||||
}
|
||||
|
||||
2
backend/cache/plex.go
vendored
2
backend/cache/plex.go
vendored
@@ -213,7 +213,7 @@ func (p *plexConnector) authenticate() error {
|
||||
var data map[string]interface{}
|
||||
err = json.NewDecoder(resp.Body).Decode(&data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to obtain token: %w", err)
|
||||
return fmt.Errorf("failed to obtain token: %v", err)
|
||||
}
|
||||
tokenGen, ok := get(data, "user", "authToken")
|
||||
if !ok {
|
||||
|
||||
5
backend/cache/storage_memory.go
vendored
5
backend/cache/storage_memory.go
vendored
@@ -76,7 +76,10 @@ func (m *Memory) CleanChunksByAge(chunkAge time.Duration) {
|
||||
|
||||
// CleanChunksByNeed will cleanup chunks after the FS passes a specific chunk
|
||||
func (m *Memory) CleanChunksByNeed(offset int64) {
|
||||
for key := range m.db.Items() {
|
||||
var items map[string]cache.Item
|
||||
|
||||
items = m.db.Items()
|
||||
for key := range items {
|
||||
sepIdx := strings.LastIndex(key, "-")
|
||||
keyOffset, err := strconv.ParseInt(key[sepIdx+1:], 10, 64)
|
||||
if err != nil {
|
||||
|
||||
19
backend/cache/storage_persistent.go
vendored
19
backend/cache/storage_persistent.go
vendored
@@ -250,7 +250,7 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
|
||||
if val != nil {
|
||||
err := json.Unmarshal(val, cachedDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during unmarshalling obj: %w", err)
|
||||
return fmt.Errorf("error during unmarshalling obj: %v", err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("missing cached dir: %v", cachedDir)
|
||||
@@ -456,7 +456,10 @@ func (b *Persistent) HasEntry(remote string) bool {
|
||||
|
||||
return fmt.Errorf("couldn't find object (%v)", remote)
|
||||
})
|
||||
return err == nil
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// HasChunk confirms the existence of a single chunk of an object
|
||||
@@ -551,7 +554,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
|
||||
err := b.db.Update(func(tx *bolt.Tx) error {
|
||||
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
|
||||
if dataTsBucket == nil {
|
||||
return fmt.Errorf("couldn't open (%v) bucket", DataTsBucket)
|
||||
return fmt.Errorf("Couldn't open (%v) bucket", DataTsBucket)
|
||||
}
|
||||
// iterate through ts
|
||||
c := dataTsBucket.Cursor()
|
||||
@@ -901,16 +904,16 @@ func (b *Persistent) rollbackPendingUpload(remote string) error {
|
||||
v := bucket.Get([]byte(remote))
|
||||
err = json.Unmarshal(v, tempObj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pending upload (%v) not found: %w", remote, err)
|
||||
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
|
||||
}
|
||||
tempObj.Started = false
|
||||
v2, err := json.Marshal(tempObj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pending upload not updated: %w", err)
|
||||
return fmt.Errorf("pending upload not updated %v", err)
|
||||
}
|
||||
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pending upload not updated: %w", err)
|
||||
return fmt.Errorf("pending upload not updated %v", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@@ -966,11 +969,11 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
|
||||
}
|
||||
v2, err := json.Marshal(tempObj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pending upload not updated: %w", err)
|
||||
return fmt.Errorf("pending upload not updated %v", err)
|
||||
}
|
||||
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pending upload not updated: %w", err)
|
||||
return fmt.Errorf("pending upload not updated %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -59,7 +59,7 @@ var mtime1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
|
||||
func testPutFile(ctx context.Context, t *testing.T, f fs.Fs, name, contents, message string, check bool) fs.Object {
|
||||
item := fstest.Item{Path: name, ModTime: mtime1}
|
||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
|
||||
assert.NotNil(t, obj, message)
|
||||
return obj
|
||||
}
|
||||
@@ -440,7 +440,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
|
||||
checkSmallFile := func(name, contents string) {
|
||||
filename := path.Join(dir, name)
|
||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||
put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
|
||||
_, put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
|
||||
assert.NotNil(t, put)
|
||||
checkSmallFileInternals(put)
|
||||
checkContents(put, contents)
|
||||
@@ -489,7 +489,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
||||
|
||||
newFile := func(name string) fs.Object {
|
||||
item := fstest.Item{Path: path.Join(dir, name), ModTime: modTime}
|
||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
require.NotNil(t, obj)
|
||||
return obj
|
||||
}
|
||||
@@ -599,7 +599,7 @@ func testChunkNumberOverflow(t *testing.T, f *Fs) {
|
||||
newFile := func(f fs.Fs, name string) (obj fs.Object, filename string, txnID string) {
|
||||
filename = path.Join(dir, name)
|
||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||
obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
_, obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
require.NotNil(t, obj)
|
||||
if chunkObj, isChunkObj := obj.(*Object); isChunkObj {
|
||||
txnID = chunkObj.xactID
|
||||
@@ -716,7 +716,7 @@ func testFutureProof(t *testing.T, f *Fs) {
|
||||
name = f.makeChunkName(name, part-1, "", "")
|
||||
}
|
||||
item := fstest.Item{Path: name, ModTime: modTime}
|
||||
obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
|
||||
_, obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
|
||||
assert.NotNil(t, obj, msg)
|
||||
}
|
||||
|
||||
@@ -790,7 +790,7 @@ func testBackwardsCompatibility(t *testing.T, f *Fs) {
|
||||
newFile := func(f fs.Fs, name string) (fs.Object, string) {
|
||||
filename := path.Join(dir, name)
|
||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
require.NotNil(t, obj)
|
||||
return obj, filename
|
||||
}
|
||||
@@ -844,7 +844,7 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
item := fstest.Item{Path: "movefile", ModTime: modTime}
|
||||
contents := "abcdef"
|
||||
file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
|
||||
_, file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
|
||||
|
||||
dstOverwritten, _ := fs2.NewObject(ctx, "movefile")
|
||||
dstFile, err := operations.Move(ctx, fs2, dstOverwritten, "movefile", file)
|
||||
|
||||
@@ -35,7 +35,6 @@ func TestIntegration(t *testing.T) {
|
||||
"MimeType",
|
||||
"GetTier",
|
||||
"SetTier",
|
||||
"Metadata",
|
||||
},
|
||||
UnimplementableFsMethods: []string{
|
||||
"PublicLink",
|
||||
@@ -54,7 +53,6 @@ func TestIntegration(t *testing.T) {
|
||||
{Name: name, Key: "type", Value: "chunker"},
|
||||
{Name: name, Key: "remote", Value: tempDir},
|
||||
}
|
||||
opt.QuickTestOK = true
|
||||
}
|
||||
fstests.Run(t, &opt)
|
||||
}
|
||||
|
||||
@@ -1,992 +0,0 @@
|
||||
// Package combine implents a backend to combine multipe remotes in a directory tree
|
||||
package combine
|
||||
|
||||
/*
|
||||
Have API to add/remove branches in the combine
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "combine",
|
||||
Description: "Combine several remotes into one",
|
||||
NewFs: NewFs,
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "upstreams",
|
||||
Help: `Upstreams for combining
|
||||
|
||||
These should be in the form
|
||||
|
||||
dir=remote:path dir2=remote2:path
|
||||
|
||||
Where before the = is specified the root directory and after is the remote to
|
||||
put there.
|
||||
|
||||
Embedded spaces can be added using quotes
|
||||
|
||||
"dir=remote:path with space" "dir2=remote2:path with space"
|
||||
|
||||
`,
|
||||
Required: true,
|
||||
Default: fs.SpaceSepList(nil),
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Upstreams fs.SpaceSepList `config:"upstreams"`
|
||||
}
|
||||
|
||||
// Fs represents a combine of upstreams
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this Fs
|
||||
root string // the path we are working on
|
||||
hashSet hash.Set // common hashes
|
||||
when time.Time // directory times
|
||||
upstreams map[string]*upstream // map of upstreams
|
||||
}
|
||||
|
||||
// adjustment stores the info to add a prefix to a path or chop characters off
|
||||
type adjustment struct {
|
||||
root string
|
||||
rootSlash string
|
||||
mountpoint string
|
||||
mountpointSlash string
|
||||
}
|
||||
|
||||
// newAdjustment makes a new path adjustment adjusting between mountpoint and root
|
||||
//
|
||||
// mountpoint is the point the upstream is mounted and root is the combine root
|
||||
func newAdjustment(root, mountpoint string) (a adjustment) {
|
||||
return adjustment{
|
||||
root: root,
|
||||
rootSlash: root + "/",
|
||||
mountpoint: mountpoint,
|
||||
mountpointSlash: mountpoint + "/",
|
||||
}
|
||||
}
|
||||
|
||||
var errNotUnderRoot = errors.New("file not under root")
|
||||
|
||||
// do makes the adjustment on s, mapping an upstream path into a combine path
|
||||
func (a *adjustment) do(s string) (string, error) {
|
||||
absPath := join(a.mountpoint, s)
|
||||
if a.root == "" {
|
||||
return absPath, nil
|
||||
}
|
||||
if absPath == a.root {
|
||||
return "", nil
|
||||
}
|
||||
if !strings.HasPrefix(absPath, a.rootSlash) {
|
||||
return "", errNotUnderRoot
|
||||
}
|
||||
return absPath[len(a.rootSlash):], nil
|
||||
}
|
||||
|
||||
// undo makes the adjustment on s, mapping a combine path into an upstream path
|
||||
func (a *adjustment) undo(s string) (string, error) {
|
||||
absPath := join(a.root, s)
|
||||
if absPath == a.mountpoint {
|
||||
return "", nil
|
||||
}
|
||||
if !strings.HasPrefix(absPath, a.mountpointSlash) {
|
||||
return "", errNotUnderRoot
|
||||
}
|
||||
return absPath[len(a.mountpointSlash):], nil
|
||||
}
|
||||
|
||||
// upstream represents an upstream Fs
|
||||
type upstream struct {
|
||||
f fs.Fs
|
||||
parent *Fs
|
||||
dir string // directory the upstream is mounted
|
||||
pathAdjustment adjustment // how to fiddle with the path
|
||||
}
|
||||
|
||||
// Create an upstream from the directory it is mounted on and the remote
|
||||
func (f *Fs) newUpstream(ctx context.Context, dir, remote string) (*upstream, error) {
|
||||
uFs, err := cache.Get(ctx, remote)
|
||||
if err == fs.ErrorIsFile {
|
||||
return nil, fmt.Errorf("can't combine files yet, only directories %q: %w", remote, err)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create upstream %q: %w", remote, err)
|
||||
}
|
||||
u := &upstream{
|
||||
f: uFs,
|
||||
parent: f,
|
||||
dir: dir,
|
||||
pathAdjustment: newAdjustment(f.root, dir),
|
||||
}
|
||||
cache.PinUntilFinalized(u.f, u)
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path.
|
||||
//
|
||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs.Fs, err error) {
|
||||
// defer log.Trace(nil, "name=%q, root=%q, m=%v", name, root, m)("f=%+v, err=%v", &outFs, &err)
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Backward compatible to old config
|
||||
if len(opt.Upstreams) == 0 {
|
||||
return nil, errors.New("combine can't point to an empty upstream - check the value of the upstreams setting")
|
||||
}
|
||||
for _, u := range opt.Upstreams {
|
||||
if strings.HasPrefix(u, name+":") {
|
||||
return nil, errors.New("can't point combine remote at itself - check the value of the upstreams setting")
|
||||
}
|
||||
}
|
||||
isDir := false
|
||||
for strings.HasSuffix(root, "/") {
|
||||
root = root[:len(root)-1]
|
||||
isDir = true
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
upstreams: make(map[string]*upstream, len(opt.Upstreams)),
|
||||
when: time.Now(),
|
||||
}
|
||||
|
||||
g, gCtx := errgroup.WithContext(ctx)
|
||||
var mu sync.Mutex
|
||||
for _, upstream := range opt.Upstreams {
|
||||
upstream := upstream
|
||||
g.Go(func() (err error) {
|
||||
equal := strings.IndexRune(upstream, '=')
|
||||
if equal < 0 {
|
||||
return fmt.Errorf("no \"=\" in upstream definition %q", upstream)
|
||||
}
|
||||
dir, remote := upstream[:equal], upstream[equal+1:]
|
||||
if dir == "" {
|
||||
return fmt.Errorf("empty dir in upstream definition %q", upstream)
|
||||
}
|
||||
if remote == "" {
|
||||
return fmt.Errorf("empty remote in upstream definition %q", upstream)
|
||||
}
|
||||
if strings.ContainsRune(dir, '/') {
|
||||
return fmt.Errorf("dirs can't contain / (yet): %q", dir)
|
||||
}
|
||||
u, err := f.newUpstream(gCtx, dir, remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mu.Lock()
|
||||
if _, found := f.upstreams[dir]; found {
|
||||
err = fmt.Errorf("duplicate directory name %q", dir)
|
||||
} else {
|
||||
f.upstreams[dir] = u
|
||||
}
|
||||
mu.Unlock()
|
||||
return err
|
||||
})
|
||||
}
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// check features
|
||||
var features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: false,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
}).Fill(ctx, f)
|
||||
canMove := true
|
||||
for _, u := range f.upstreams {
|
||||
features = features.Mask(ctx, u.f) // Mask all upstream fs
|
||||
if !operations.CanServerSideMove(u.f) {
|
||||
canMove = false
|
||||
}
|
||||
}
|
||||
// We can move if all remotes support Move or Copy
|
||||
if canMove {
|
||||
features.Move = f.Move
|
||||
}
|
||||
|
||||
// Enable ListR when upstreams either support ListR or is local
|
||||
// But not when all upstreams are local
|
||||
if features.ListR == nil {
|
||||
for _, u := range f.upstreams {
|
||||
if u.f.Features().ListR != nil {
|
||||
features.ListR = f.ListR
|
||||
} else if !u.f.Features().IsLocal {
|
||||
features.ListR = nil
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enable Purge when any upstreams support it
|
||||
if features.Purge == nil {
|
||||
for _, u := range f.upstreams {
|
||||
if u.f.Features().Purge != nil {
|
||||
features.Purge = f.Purge
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enable Shutdown when any upstreams support it
|
||||
if features.Shutdown == nil {
|
||||
for _, u := range f.upstreams {
|
||||
if u.f.Features().Shutdown != nil {
|
||||
features.Shutdown = f.Shutdown
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enable DirCacheFlush when any upstreams support it
|
||||
if features.DirCacheFlush == nil {
|
||||
for _, u := range f.upstreams {
|
||||
if u.f.Features().DirCacheFlush != nil {
|
||||
features.DirCacheFlush = f.DirCacheFlush
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enable ChangeNotify when any upstreams support it
|
||||
if features.ChangeNotify == nil {
|
||||
for _, u := range f.upstreams {
|
||||
if u.f.Features().ChangeNotify != nil {
|
||||
features.ChangeNotify = f.ChangeNotify
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
f.features = features
|
||||
|
||||
// Get common intersection of hashes
|
||||
var hashSet hash.Set
|
||||
var first = true
|
||||
for _, u := range f.upstreams {
|
||||
if first {
|
||||
hashSet = u.f.Hashes()
|
||||
first = false
|
||||
} else {
|
||||
hashSet = hashSet.Overlap(u.f.Hashes())
|
||||
}
|
||||
}
|
||||
f.hashSet = hashSet
|
||||
|
||||
// Check to see if the root is actually a file
|
||||
if f.root != "" && !isDir {
|
||||
_, err := f.NewObject(ctx, "")
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile || err == fs.ErrorIsDir {
|
||||
// File doesn't exist or is a directory so return old f
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check to see if the root path is actually an existing file
|
||||
f.root = path.Dir(f.root)
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
}
|
||||
// Adjust path adjustment to remove leaf
|
||||
for _, u := range f.upstreams {
|
||||
u.pathAdjustment = newAdjustment(f.root, u.dir)
|
||||
}
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Run a function over all the upstreams in parallel
|
||||
func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream) error) error {
|
||||
g, gCtx := errgroup.WithContext(ctx)
|
||||
for _, u := range f.upstreams {
|
||||
u := u
|
||||
g.Go(func() (err error) {
|
||||
return fn(gCtx, u)
|
||||
})
|
||||
}
|
||||
return g.Wait()
|
||||
}
|
||||
|
||||
// join the elements together but unline path.Join return empty string
|
||||
func join(elem ...string) string {
|
||||
result := path.Join(elem...)
|
||||
if result == "." {
|
||||
return ""
|
||||
}
|
||||
if len(result) > 0 && result[0] == '/' {
|
||||
result = result[1:]
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// find the upstream for the remote passed in, returning the upstream and the adjusted path
|
||||
func (f *Fs) findUpstream(remote string) (u *upstream, uRemote string, err error) {
|
||||
// defer log.Trace(remote, "")("f=%v, uRemote=%q, err=%v", &u, &uRemote, &err)
|
||||
for _, u := range f.upstreams {
|
||||
uRemote, err = u.pathAdjustment.undo(remote)
|
||||
if err == nil {
|
||||
return u, uRemote, nil
|
||||
}
|
||||
}
|
||||
return nil, "", fmt.Errorf("combine for remote %q: %w", remote, fs.ErrorDirNotFound)
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("combine root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
// The root always exists
|
||||
if f.root == "" && dir == "" {
|
||||
return nil
|
||||
}
|
||||
u, uRemote, err := f.findUpstream(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return u.f.Rmdir(ctx, uRemote)
|
||||
}
|
||||
|
||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return f.hashSet
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
// The root always exists
|
||||
if f.root == "" && dir == "" {
|
||||
return nil
|
||||
}
|
||||
u, uRemote, err := f.findUpstream(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return u.f.Mkdir(ctx, uRemote)
|
||||
}
|
||||
|
||||
// purge the upstream or fallback to a slow way
|
||||
func (u *upstream) purge(ctx context.Context, dir string) (err error) {
|
||||
if do := u.f.Features().Purge; do != nil {
|
||||
err = do(ctx, dir)
|
||||
} else {
|
||||
err = operations.Purge(ctx, u.f, dir)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Purge all files in the directory
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
if f.root == "" && dir == "" {
|
||||
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
|
||||
return u.purge(ctx, "")
|
||||
})
|
||||
}
|
||||
u, uRemote, err := f.findUpstream(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return u.purge(ctx, uRemote)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
dstU, dstRemote, err := f.findUpstream(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
do := dstU.f.Features().Copy
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
o, err := do(ctx, srcObj.Object, dstRemote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dstU.newObject(o), nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
dstU, dstRemote, err := f.findUpstream(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
do := dstU.f.Features().Move
|
||||
useCopy := false
|
||||
if do == nil {
|
||||
do = dstU.f.Features().Copy
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
useCopy = true
|
||||
}
|
||||
|
||||
o, err := do(ctx, srcObj.Object, dstRemote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If did Copy then remove the source object
|
||||
if useCopy {
|
||||
err = srcObj.Remove(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return dstU.newObject(o), nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
// defer log.Trace(f, "src=%v, srcRemote=%q, dstRemote=%q", src, srcRemote, dstRemote)("err=%v", &err)
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
dstU, dstURemote, err := f.findUpstream(dstRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
srcU, srcURemote, err := srcFs.findUpstream(srcRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
do := dstU.f.Features().DirMove
|
||||
if do == nil {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
fs.Logf(dstU.f, "srcU.f=%v, srcURemote=%q, dstURemote=%q", srcU.f, srcURemote, dstURemote)
|
||||
return do(ctx, srcU.f, srcURemote, dstURemote)
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path
|
||||
// that has had changes. If the implementation
|
||||
// uses polling, it should adhere to the given interval.
|
||||
// At least one value will be written to the channel,
|
||||
// specifying the initial value and updated values might
|
||||
// follow. A 0 Duration should pause the polling.
|
||||
// The ChangeNotify implementation must empty the channel
|
||||
// regularly. When the channel gets closed, the implementation
|
||||
// should stop polling and release resources.
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), ch <-chan time.Duration) {
|
||||
var uChans []chan time.Duration
|
||||
|
||||
for _, u := range f.upstreams {
|
||||
u := u
|
||||
if do := u.f.Features().ChangeNotify; do != nil {
|
||||
ch := make(chan time.Duration)
|
||||
uChans = append(uChans, ch)
|
||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||
newPath, err := u.pathAdjustment.do(path)
|
||||
if err != nil {
|
||||
fs.Logf(f, "ChangeNotify: unable to process %q: %s", path, err)
|
||||
return
|
||||
}
|
||||
fs.Debugf(f, "ChangeNotify: path %q entryType %d", newPath, entryType)
|
||||
notifyFunc(newPath, entryType)
|
||||
}
|
||||
do(ctx, wrappedNotifyFunc, ch)
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
for i := range ch {
|
||||
for _, c := range uChans {
|
||||
c <- i
|
||||
}
|
||||
}
|
||||
for _, c := range uChans {
|
||||
close(c)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing
|
||||
// as an optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
ctx := context.Background()
|
||||
_ = f.multithread(ctx, func(ctx context.Context, u *upstream) error {
|
||||
if do := u.f.Features().DirCacheFlush; do != nil {
|
||||
do()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
|
||||
srcPath := src.Remote()
|
||||
u, uRemote, err := f.findUpstream(srcPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uSrc := operations.NewOverrideRemote(src, uRemote)
|
||||
var o fs.Object
|
||||
if stream {
|
||||
o, err = u.f.Features().PutStream(ctx, in, uSrc, options...)
|
||||
} else {
|
||||
o, err = u.f.Put(ctx, in, uSrc, options...)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return u.newObject(o), nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
return f.put(ctx, in, src, false, options...)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
return f.put(ctx, in, src, true, options...)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
usage := &fs.Usage{
|
||||
Total: new(int64),
|
||||
Used: new(int64),
|
||||
Trashed: new(int64),
|
||||
Other: new(int64),
|
||||
Free: new(int64),
|
||||
Objects: new(int64),
|
||||
}
|
||||
for _, u := range f.upstreams {
|
||||
doAbout := u.f.Features().About
|
||||
if doAbout == nil {
|
||||
continue
|
||||
}
|
||||
usg, err := doAbout(ctx)
|
||||
if errors.Is(err, fs.ErrorDirNotFound) {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if usg.Total != nil && usage.Total != nil {
|
||||
*usage.Total += *usg.Total
|
||||
} else {
|
||||
usage.Total = nil
|
||||
}
|
||||
if usg.Used != nil && usage.Used != nil {
|
||||
*usage.Used += *usg.Used
|
||||
} else {
|
||||
usage.Used = nil
|
||||
}
|
||||
if usg.Trashed != nil && usage.Trashed != nil {
|
||||
*usage.Trashed += *usg.Trashed
|
||||
} else {
|
||||
usage.Trashed = nil
|
||||
}
|
||||
if usg.Other != nil && usage.Other != nil {
|
||||
*usage.Other += *usg.Other
|
||||
} else {
|
||||
usage.Other = nil
|
||||
}
|
||||
if usg.Free != nil && usage.Free != nil {
|
||||
*usage.Free += *usg.Free
|
||||
} else {
|
||||
usage.Free = nil
|
||||
}
|
||||
if usg.Objects != nil && usage.Objects != nil {
|
||||
*usage.Objects += *usg.Objects
|
||||
} else {
|
||||
usage.Objects = nil
|
||||
}
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Wraps entries for this upstream
|
||||
func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.DirEntries, error) {
|
||||
for i, entry := range entries {
|
||||
switch x := entry.(type) {
|
||||
case fs.Object:
|
||||
entries[i] = u.newObject(x)
|
||||
case fs.Directory:
|
||||
newDir := fs.NewDirCopy(ctx, x)
|
||||
newPath, err := u.pathAdjustment.do(newDir.Remote())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newDir.SetRemote(newPath)
|
||||
entries[i] = newDir
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown entry type %T", entry)
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
|
||||
if f.root == "" && dir == "" {
|
||||
entries = make(fs.DirEntries, 0, len(f.upstreams))
|
||||
for combineDir := range f.upstreams {
|
||||
d := fs.NewDir(combineDir, f.when)
|
||||
entries = append(entries, d)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
u, uRemote, err := f.findUpstream(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries, err = u.f.List(ctx, uRemote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return u.wrapEntries(ctx, entries)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
// from dir recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
//
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
// defer log.Trace(f, "dir=%q, callback=%v", dir, callback)("err=%v", &err)
|
||||
if f.root == "" && dir == "" {
|
||||
rootEntries, err := f.List(ctx, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = callback(rootEntries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var mu sync.Mutex
|
||||
syncCallback := func(entries fs.DirEntries) error {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return callback(entries)
|
||||
}
|
||||
err = f.multithread(ctx, func(ctx context.Context, u *upstream) error {
|
||||
return f.ListR(ctx, u.dir, syncCallback)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
u, uRemote, err := f.findUpstream(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wrapCallback := func(entries fs.DirEntries) error {
|
||||
entries, err := u.wrapEntries(ctx, entries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return callback(entries)
|
||||
}
|
||||
if do := u.f.Features().ListR; do != nil {
|
||||
err = do(ctx, uRemote, wrapCallback)
|
||||
} else {
|
||||
err = walk.ListR(ctx, u.f, uRemote, true, -1, walk.ListAll, wrapCallback)
|
||||
}
|
||||
if err == fs.ErrorDirNotFound {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// NewObject creates a new remote combine file object
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
u, uRemote, err := f.findUpstream(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if uRemote == "" || strings.HasSuffix(uRemote, "/") {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
o, err := u.f.NewObject(ctx, uRemote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return u.newObject(o), nil
|
||||
}
|
||||
|
||||
// Precision is the greatest Precision of all upstreams
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
var greatestPrecision time.Duration
|
||||
for _, u := range f.upstreams {
|
||||
uPrecision := u.f.Precision()
|
||||
if uPrecision > greatestPrecision {
|
||||
greatestPrecision = uPrecision
|
||||
}
|
||||
}
|
||||
return greatestPrecision
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any
|
||||
// cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
|
||||
if do := u.f.Features().Shutdown; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Object describes a wrapped Object
|
||||
//
|
||||
// This is a wrapped Object which knows its path prefix
|
||||
type Object struct {
|
||||
fs.Object
|
||||
u *upstream
|
||||
}
|
||||
|
||||
func (u *upstream) newObject(o fs.Object) *Object {
|
||||
return &Object{
|
||||
Object: o,
|
||||
u: u,
|
||||
}
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.u.parent
|
||||
}
|
||||
|
||||
// String returns the remote path
|
||||
func (o *Object) String() string {
|
||||
return o.Remote()
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
newPath, err := o.u.pathAdjustment.do(o.Object.String())
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Bad object: %v", err)
|
||||
return err.Error()
|
||||
}
|
||||
return newPath
|
||||
}
|
||||
|
||||
// MimeType returns the content type of the Object if known
|
||||
func (o *Object) MimeType(ctx context.Context) (mimeType string) {
|
||||
if do, ok := o.Object.(fs.MimeTyper); ok {
|
||||
mimeType = do.MimeType(ctx)
|
||||
}
|
||||
return mimeType
|
||||
}
|
||||
|
||||
// UnWrap returns the Object that this Object is wrapping or
|
||||
// nil if it isn't wrapping anything
|
||||
func (o *Object) UnWrap() fs.Object {
|
||||
return o.Object
|
||||
}
|
||||
|
||||
// GetTier returns storage tier or class of the Object
|
||||
func (o *Object) GetTier() string {
|
||||
do, ok := o.Object.(fs.GetTierer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.GetTier()
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
do, ok := o.Object.(fs.IDer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.ID()
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
do, ok := o.Object.(fs.Metadataer)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// SetTier performs changing storage tier of the Object if
|
||||
// multiple storage classes supported
|
||||
func (o *Object) SetTier(tier string) error {
|
||||
do, ok := o.Object.(fs.SetTierer)
|
||||
if !ok {
|
||||
return errors.New("underlying remote does not support SetTier")
|
||||
}
|
||||
return do.SetTier(tier)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.FullObject = (*Object)(nil)
|
||||
)
|
||||
@@ -1,94 +0,0 @@
|
||||
package combine
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAdjustmentDo(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
root string
|
||||
mountpoint string
|
||||
in string
|
||||
want string
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
root: "",
|
||||
mountpoint: "mountpoint",
|
||||
in: "path/to/file.txt",
|
||||
want: "mountpoint/path/to/file.txt",
|
||||
},
|
||||
{
|
||||
root: "mountpoint",
|
||||
mountpoint: "mountpoint",
|
||||
in: "path/to/file.txt",
|
||||
want: "path/to/file.txt",
|
||||
},
|
||||
{
|
||||
root: "mountpoint/path",
|
||||
mountpoint: "mountpoint",
|
||||
in: "path/to/file.txt",
|
||||
want: "to/file.txt",
|
||||
},
|
||||
{
|
||||
root: "mountpoint/path",
|
||||
mountpoint: "mountpoint",
|
||||
in: "wrongpath/to/file.txt",
|
||||
want: "",
|
||||
wantErr: errNotUnderRoot,
|
||||
},
|
||||
} {
|
||||
what := fmt.Sprintf("%+v", test)
|
||||
a := newAdjustment(test.root, test.mountpoint)
|
||||
got, gotErr := a.do(test.in)
|
||||
assert.Equal(t, test.wantErr, gotErr)
|
||||
assert.Equal(t, test.want, got, what)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestAdjustmentUndo(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
root string
|
||||
mountpoint string
|
||||
in string
|
||||
want string
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
root: "",
|
||||
mountpoint: "mountpoint",
|
||||
in: "mountpoint/path/to/file.txt",
|
||||
want: "path/to/file.txt",
|
||||
},
|
||||
{
|
||||
root: "mountpoint",
|
||||
mountpoint: "mountpoint",
|
||||
in: "path/to/file.txt",
|
||||
want: "path/to/file.txt",
|
||||
},
|
||||
{
|
||||
root: "mountpoint/path",
|
||||
mountpoint: "mountpoint",
|
||||
in: "to/file.txt",
|
||||
want: "path/to/file.txt",
|
||||
},
|
||||
{
|
||||
root: "wrongmountpoint/path",
|
||||
mountpoint: "mountpoint",
|
||||
in: "to/file.txt",
|
||||
want: "",
|
||||
wantErr: errNotUnderRoot,
|
||||
},
|
||||
} {
|
||||
what := fmt.Sprintf("%+v", test)
|
||||
a := newAdjustment(test.root, test.mountpoint)
|
||||
got, gotErr := a.undo(test.in)
|
||||
assert.Equal(t, test.wantErr, gotErr)
|
||||
assert.Equal(t, test.want, got, what)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
// Test Combine filesystem interface
|
||||
package combine_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
_ "github.com/rclone/rclone/backend/memory"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
t.Skip("Skipping as -remote not set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestLocal(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs := MakeTestDirs(t, 3)
|
||||
upstreams := "dir1=" + dirs[0] + " dir2=" + dirs[1] + " dir3=" + dirs[2]
|
||||
name := "TestCombineLocal"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":dir1",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "combine"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
func TestMemory(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
upstreams := "dir1=:memory:dir1 dir2=:memory:dir2 dir3=:memory:dir3"
|
||||
name := "TestCombineMemory"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":dir1",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "combine"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
func TestMixed(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs := MakeTestDirs(t, 2)
|
||||
upstreams := "dir1=" + dirs[0] + " dir2=" + dirs[1] + " dir3=:memory:dir3"
|
||||
name := "TestCombineMixed"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":dir1",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "combine"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// MakeTestDirs makes directories in /tmp for testing
|
||||
func MakeTestDirs(t *testing.T, n int) (dirs []string) {
|
||||
for i := 1; i <= n; i++ {
|
||||
dir := t.TempDir()
|
||||
dirs = append(dirs, dir)
|
||||
}
|
||||
return dirs
|
||||
}
|
||||
@@ -53,7 +53,7 @@ const (
|
||||
Gzip = 2
|
||||
)
|
||||
|
||||
var nameRegexp = regexp.MustCompile(`^(.+?)\.([A-Za-z0-9-_]{11})$`)
|
||||
var nameRegexp = regexp.MustCompile("^(.+?)\\.([A-Za-z0-9-_]{11})$")
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
@@ -70,9 +70,6 @@ func init() {
|
||||
Name: "compress",
|
||||
Description: "Compress a remote",
|
||||
NewFs: NewFs,
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote to compress.",
|
||||
@@ -183,9 +180,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
SetTier: true,
|
||||
BucketBased: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
// We support reading MIME types no matter the wrapped fs
|
||||
f.features.ReadMimeType = true
|
||||
@@ -228,7 +222,7 @@ func processFileName(compressedFileName string) (origFileName string, extension
|
||||
// Separate the filename and size from the extension
|
||||
extensionPos := strings.LastIndex(compressedFileName, ".")
|
||||
if extensionPos == -1 {
|
||||
return "", "", 0, errors.New("file name has no extension")
|
||||
return "", "", 0, errors.New("File name has no extension")
|
||||
}
|
||||
extension = compressedFileName[extensionPos:]
|
||||
nameWithSize := compressedFileName[:extensionPos]
|
||||
@@ -237,11 +231,11 @@ func processFileName(compressedFileName string) (origFileName string, extension
|
||||
}
|
||||
match := nameRegexp.FindStringSubmatch(nameWithSize)
|
||||
if match == nil || len(match) != 3 {
|
||||
return "", "", 0, errors.New("invalid filename")
|
||||
return "", "", 0, errors.New("Invalid filename")
|
||||
}
|
||||
size, err := base64ToInt64(match[2])
|
||||
if err != nil {
|
||||
return "", "", 0, errors.New("could not decode size")
|
||||
return "", "", 0, errors.New("Could not decode size")
|
||||
}
|
||||
return match[1], gzFileExt, size, nil
|
||||
}
|
||||
@@ -310,7 +304,7 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
|
||||
case fs.Directory:
|
||||
f.addDir(&newEntries, x)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown object type %T", entry)
|
||||
return nil, fmt.Errorf("Unknown object type %T", entry)
|
||||
}
|
||||
}
|
||||
return newEntries, nil
|
||||
@@ -472,10 +466,10 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create temporary local FS to spool file: %w", err)
|
||||
return nil, fmt.Errorf("Failed to create temporary local FS to spool file: %w", err)
|
||||
}
|
||||
if _, err = io.Copy(tempFile, in); err != nil {
|
||||
return nil, fmt.Errorf("failed to write temporary local file: %w", err)
|
||||
return nil, fmt.Errorf("Failed to write temporary local file: %w", err)
|
||||
}
|
||||
if _, err = tempFile.Seek(0, 0); err != nil {
|
||||
return nil, err
|
||||
@@ -726,7 +720,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
if found && (oldObj.(*Object).meta.Mode != Uncompressed || compressible) {
|
||||
err = oldObj.(*Object).Object.Remove(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't remove original object: %w", err)
|
||||
return nil, fmt.Errorf("Could remove original object: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -735,7 +729,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
if compressible {
|
||||
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't rename streamed object: %w", err)
|
||||
return nil, fmt.Errorf("Couldn't rename streamed Object.: %w", err)
|
||||
}
|
||||
newObj.Object = wrapObj
|
||||
}
|
||||
@@ -1220,21 +1214,6 @@ func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.meta.MimeType
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
err := o.loadMetadataIfNotLoaded(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
do, ok := o.mo.(fs.Metadataer)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||
@@ -1381,51 +1360,6 @@ func (o *ObjectInfo) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||
return "", nil // cannot know the checksum
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *ObjectInfo) ID() string {
|
||||
do, ok := o.src.(fs.IDer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.ID()
|
||||
}
|
||||
|
||||
// MimeType returns the content type of the Object if
|
||||
// known, or "" if not
|
||||
func (o *ObjectInfo) MimeType(ctx context.Context) string {
|
||||
do, ok := o.src.(fs.MimeTyper)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.MimeType(ctx)
|
||||
}
|
||||
|
||||
// UnWrap returns the Object that this Object is wrapping or
|
||||
// nil if it isn't wrapping anything
|
||||
func (o *ObjectInfo) UnWrap() fs.Object {
|
||||
return fs.UnWrapObjectInfo(o.src)
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *ObjectInfo) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
do, ok := o.src.(fs.Metadataer)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// GetTier returns storage tier or class of the Object
|
||||
func (o *ObjectInfo) GetTier() string {
|
||||
do, ok := o.src.(fs.GetTierer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.GetTier()
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
do, ok := o.Object.(fs.IDer)
|
||||
@@ -1478,6 +1412,11 @@ var (
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.FullObjectInfo = (*ObjectInfo)(nil)
|
||||
_ fs.FullObject = (*Object)(nil)
|
||||
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||
_ fs.GetTierer = (*Object)(nil)
|
||||
_ fs.SetTierer = (*Object)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -61,6 +61,5 @@ func TestRemoteGzip(t *testing.T) {
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "compression_mode", Value: "gzip"},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -96,7 +96,7 @@ func NewNameEncryptionMode(s string) (mode NameEncryptionMode, err error) {
|
||||
case "obfuscate":
|
||||
mode = NameEncryptionObfuscated
|
||||
default:
|
||||
err = fmt.Errorf("unknown file name encryption mode %q", s)
|
||||
err = fmt.Errorf("Unknown file name encryption mode %q", s)
|
||||
}
|
||||
return mode, err
|
||||
}
|
||||
@@ -162,7 +162,7 @@ func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
|
||||
case "base32768":
|
||||
enc = base32768.SafeEncoding
|
||||
default:
|
||||
err = fmt.Errorf("unknown file name encoding mode %q", s)
|
||||
err = fmt.Errorf("Unknown file name encoding mode %q", s)
|
||||
}
|
||||
return enc, err
|
||||
}
|
||||
|
||||
@@ -28,9 +28,6 @@ func init() {
|
||||
Description: "Encrypt/Decrypt a remote",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote to encrypt/decrypt.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
@@ -244,9 +241,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
return f, err
|
||||
@@ -334,7 +328,7 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
|
||||
case fs.Directory:
|
||||
f.addDir(ctx, &newEntries, x)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown object type %T", entry)
|
||||
return nil, fmt.Errorf("Unknown object type %T", entry)
|
||||
}
|
||||
}
|
||||
return newEntries, nil
|
||||
@@ -1062,50 +1056,6 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// GetTier returns storage tier or class of the Object
|
||||
func (o *ObjectInfo) GetTier() string {
|
||||
do, ok := o.ObjectInfo.(fs.GetTierer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.GetTier()
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *ObjectInfo) ID() string {
|
||||
do, ok := o.ObjectInfo.(fs.IDer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.ID()
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *ObjectInfo) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
do, ok := o.ObjectInfo.(fs.Metadataer)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// MimeType returns the content type of the Object if
|
||||
// known, or "" if not
|
||||
//
|
||||
// This is deliberately unsupported so we don't leak mime type info by
|
||||
// default.
|
||||
func (o *ObjectInfo) MimeType(ctx context.Context) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// UnWrap returns the Object that this Object is wrapping or
|
||||
// nil if it isn't wrapping anything
|
||||
func (o *ObjectInfo) UnWrap() fs.Object {
|
||||
return fs.UnWrapObjectInfo(o.ObjectInfo)
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
do, ok := o.Object.(fs.IDer)
|
||||
@@ -1134,26 +1084,6 @@ func (o *Object) GetTier() string {
|
||||
return do.GetTier()
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
do, ok := o.Object.(fs.Metadataer)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// MimeType returns the content type of the Object if
|
||||
// known, or "" if not
|
||||
//
|
||||
// This is deliberately unsupported so we don't leak mime type info by
|
||||
// default.
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -1176,6 +1106,10 @@ var (
|
||||
_ fs.UserInfoer = (*Fs)(nil)
|
||||
_ fs.Disconnecter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.FullObjectInfo = (*ObjectInfo)(nil)
|
||||
_ fs.FullObject = (*Object)(nil)
|
||||
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
_ fs.SetTierer = (*Object)(nil)
|
||||
_ fs.GetTierer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -91,9 +91,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
||||
src := f.newObjectInfo(oi, nonce)
|
||||
|
||||
// Test ObjectInfo methods
|
||||
if !f.opt.NoDataEncryption {
|
||||
assert.Equal(t, int64(outBuf.Len()), src.Size())
|
||||
}
|
||||
assert.Equal(t, int64(outBuf.Len()), src.Size())
|
||||
assert.Equal(t, f, src.Fs())
|
||||
assert.NotEqual(t, path, src.Remote())
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@ package crypt_test
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/crypt"
|
||||
@@ -47,7 +46,6 @@ func TestStandardBase32(t *testing.T) {
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -69,7 +67,6 @@ func TestStandardBase64(t *testing.T) {
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -91,7 +88,6 @@ func TestStandardBase32768(t *testing.T) {
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -113,7 +109,6 @@ func TestOff(t *testing.T) {
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -122,9 +117,6 @@ func TestObfuscate(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
if runtime.GOOS == "darwin" {
|
||||
t.Skip("Skipping on macOS as obfuscating control characters makes filenames macOS can't cope with")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||
name := "TestCrypt3"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -139,7 +131,6 @@ func TestObfuscate(t *testing.T) {
|
||||
SkipBadWindowsCharacters: true,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -148,9 +139,6 @@ func TestNoDataObfuscate(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
if runtime.GOOS == "darwin" {
|
||||
t.Skip("Skipping on macOS as obfuscating control characters makes filenames macOS can't cope with")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||
name := "TestCrypt4"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -166,6 +154,5 @@ func TestNoDataObfuscate(t *testing.T) {
|
||||
SkipBadWindowsCharacters: true,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -8,11 +8,11 @@ import "errors"
|
||||
|
||||
// Errors Unpad can return
|
||||
var (
|
||||
ErrorPaddingNotFound = errors.New("bad PKCS#7 padding - not padded")
|
||||
ErrorPaddingNotAMultiple = errors.New("bad PKCS#7 padding - not a multiple of blocksize")
|
||||
ErrorPaddingTooLong = errors.New("bad PKCS#7 padding - too long")
|
||||
ErrorPaddingTooShort = errors.New("bad PKCS#7 padding - too short")
|
||||
ErrorPaddingNotAllTheSame = errors.New("bad PKCS#7 padding - not all the same")
|
||||
ErrorPaddingNotFound = errors.New("Bad PKCS#7 padding - not padded")
|
||||
ErrorPaddingNotAMultiple = errors.New("Bad PKCS#7 padding - not a multiple of blocksize")
|
||||
ErrorPaddingTooLong = errors.New("Bad PKCS#7 padding - too long")
|
||||
ErrorPaddingTooShort = errors.New("Bad PKCS#7 padding - too short")
|
||||
ErrorPaddingNotAllTheSame = errors.New("Bad PKCS#7 padding - not all the same")
|
||||
)
|
||||
|
||||
// Pad buf using PKCS#7 to a multiple of n.
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"mime"
|
||||
"net/http"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -51,7 +50,6 @@ import (
|
||||
drive_v2 "google.golang.org/api/drive/v2"
|
||||
drive "google.golang.org/api/drive/v3"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
// Constants
|
||||
@@ -278,7 +276,6 @@ Leave blank normally.
|
||||
Fill in to access "Computers" folders (see docs), or for rclone to use
|
||||
a non root folder as its starting point.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
@@ -567,27 +564,6 @@ If this is set then rclone will not show any dangling shortcuts in listings.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "resource_key",
|
||||
Help: `Resource key for accessing a link-shared file.
|
||||
|
||||
If you need to access files shared with a link like this
|
||||
|
||||
https://drive.google.com/drive/folders/XXX?resourcekey=YYY&usp=sharing
|
||||
|
||||
Then you will need to use the first part "XXX" as the "root_folder_id"
|
||||
and the second part "YYY" as the "resource_key" otherwise you will get
|
||||
404 not found errors when trying to access the directory.
|
||||
|
||||
See: https://developers.google.com/drive/api/guides/resource-keys
|
||||
|
||||
This resource key requirement only applies to a subset of old files.
|
||||
|
||||
Note also that opening the folder once in the web interface (with the
|
||||
user you've authenticated rclone with) seems to be enough so that the
|
||||
resource key is no needed.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -649,7 +625,6 @@ type Options struct {
|
||||
StopOnDownloadLimit bool `config:"stop_on_download_limit"`
|
||||
SkipShortcuts bool `config:"skip_shortcuts"`
|
||||
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
|
||||
ResourceKey string `config:"resource_key"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -675,7 +650,6 @@ type Fs struct {
|
||||
grouping int32 // number of IDs to search at once in ListR - read with atomic
|
||||
listRmu *sync.Mutex // protects listRempties
|
||||
listRempties map[string]struct{} // IDs of supposedly empty directories which triggered grouping disable
|
||||
dirResourceKeys *sync.Map // map directory ID to resource key
|
||||
}
|
||||
|
||||
type baseObject struct {
|
||||
@@ -827,7 +801,6 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
// We must not filter with parent when we try list "ROOT" with drive-shared-with-me
|
||||
// If we need to list file inside those shared folders, we must search it without sharedWithMe
|
||||
parentsQuery := bytes.NewBufferString("(")
|
||||
var resourceKeys []string
|
||||
for _, dirID := range dirIDs {
|
||||
if dirID == "" {
|
||||
continue
|
||||
@@ -848,12 +821,7 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
} else {
|
||||
_, _ = fmt.Fprintf(parentsQuery, "'%s' in parents", dirID)
|
||||
}
|
||||
resourceKey, hasResourceKey := f.dirResourceKeys.Load(dirID)
|
||||
if hasResourceKey {
|
||||
resourceKeys = append(resourceKeys, fmt.Sprintf("%s/%s", dirID, resourceKey))
|
||||
}
|
||||
}
|
||||
resourceKeysHeader := strings.Join(resourceKeys, ",")
|
||||
if parentsQuery.Len() > 1 {
|
||||
_ = parentsQuery.WriteByte(')')
|
||||
query = append(query, parentsQuery.String())
|
||||
@@ -917,7 +885,7 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
}
|
||||
list.SupportsAllDrives(true)
|
||||
list.IncludeItemsFromAllDrives(true)
|
||||
if f.isTeamDrive && !f.opt.SharedWithMe {
|
||||
if f.isTeamDrive {
|
||||
list.DriveId(f.opt.TeamDriveID)
|
||||
list.Corpora("drive")
|
||||
}
|
||||
@@ -925,10 +893,6 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
if f.rootFolderID == "appDataFolder" {
|
||||
list.Spaces("appDataFolder")
|
||||
}
|
||||
// Add resource Keys if necessary
|
||||
if resourceKeysHeader != "" {
|
||||
list.Header().Add("X-Goog-Drive-Resource-Keys", resourceKeysHeader)
|
||||
}
|
||||
|
||||
fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.fileFields)
|
||||
|
||||
@@ -1188,16 +1152,15 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst))),
|
||||
m: m,
|
||||
grouping: listRGrouping,
|
||||
listRmu: new(sync.Mutex),
|
||||
listRempties: make(map[string]struct{}),
|
||||
dirResourceKeys: new(sync.Map),
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst))),
|
||||
m: m,
|
||||
grouping: listRGrouping,
|
||||
listRmu: new(sync.Mutex),
|
||||
listRempties: make(map[string]struct{}),
|
||||
}
|
||||
f.isTeamDrive = opt.TeamDriveID != ""
|
||||
f.fileFields = f.getFileFields()
|
||||
@@ -1211,13 +1174,13 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
f.client = oAuthClient
|
||||
f.svc, err = drive.NewService(context.Background(), option.WithHTTPClient(f.client))
|
||||
f.svc, err = drive.New(f.client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create Drive client: %w", err)
|
||||
}
|
||||
|
||||
if f.opt.V2DownloadMinSize >= 0 {
|
||||
f.v2Svc, err = drive_v2.NewService(context.Background(), option.WithHTTPClient(f.client))
|
||||
f.v2Svc, err = drive_v2.New(f.client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create Drive v2 client: %w", err)
|
||||
}
|
||||
@@ -1259,11 +1222,6 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
f.dirCache = dircache.New(f.root, f.rootFolderID, f)
|
||||
|
||||
// If resource key is set then cache it for the root folder id
|
||||
if f.opt.ResourceKey != "" {
|
||||
f.dirResourceKeys.Store(f.rootFolderID, f.opt.ResourceKey)
|
||||
}
|
||||
|
||||
// Parse extensions
|
||||
if f.opt.Extensions != "" {
|
||||
if f.opt.ExportExtensions != defaultExportExtensions {
|
||||
@@ -2061,7 +2019,7 @@ func splitID(compositeID string) (actualID, shortcutID string) {
|
||||
|
||||
// isShortcutID returns true if compositeID refers to a shortcut
|
||||
func isShortcutID(compositeID string) bool {
|
||||
return strings.ContainsRune(compositeID, shortcutSeparator)
|
||||
return strings.IndexRune(compositeID, shortcutSeparator) >= 0
|
||||
}
|
||||
|
||||
// actualID returns an actual ID from a composite ID
|
||||
@@ -2132,10 +2090,6 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *drive.File
|
||||
case item.MimeType == driveFolderType:
|
||||
// cache the directory ID for later lookups
|
||||
f.dirCache.Put(remote, item.Id)
|
||||
// cache the resource key for later lookups
|
||||
if item.ResourceKey != "" {
|
||||
f.dirResourceKeys.Store(item.Id, item.ResourceKey)
|
||||
}
|
||||
when, _ := time.Parse(timeFormatIn, item.ModifiedTime)
|
||||
d := fs.NewDir(remote, when).SetID(item.Id)
|
||||
if len(item.Parents) > 0 {
|
||||
@@ -2219,10 +2173,10 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
|
||||
exportExt, _, _ = f.findExportFormatByMimeType(ctx, importMimeType)
|
||||
if exportExt == "" {
|
||||
return nil, fmt.Errorf("no export format found for %q", importMimeType)
|
||||
return nil, fmt.Errorf("No export format found for %q", importMimeType)
|
||||
}
|
||||
if exportExt != srcExt && !f.opt.AllowImportNameChange {
|
||||
return nil, fmt.Errorf("can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
|
||||
return nil, fmt.Errorf("Can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2527,7 +2481,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
if f.opt.TrashedOnly {
|
||||
return errors.New("can't purge with --drive-trashed-only, use delete if you want to selectively delete files")
|
||||
return errors.New("Can't purge with --drive-trashed-only. Use delete if you want to selectively delete files")
|
||||
}
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
@@ -2993,12 +2947,12 @@ func (f *Fs) changeServiceAccountFile(ctx context.Context, file string) (err err
|
||||
return fmt.Errorf("drive: failed when making oauth client: %w", err)
|
||||
}
|
||||
f.client = oAuthClient
|
||||
f.svc, err = drive.NewService(context.Background(), option.WithHTTPClient(f.client))
|
||||
f.svc, err = drive.New(f.client)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't create Drive client: %w", err)
|
||||
}
|
||||
if f.opt.V2DownloadMinSize >= 0 {
|
||||
f.v2Svc, err = drive_v2.NewService(context.Background(), option.WithHTTPClient(f.client))
|
||||
f.v2Svc, err = drive_v2.New(f.client)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't create Drive v2 client: %w", err)
|
||||
}
|
||||
@@ -3287,7 +3241,7 @@ This will return a JSON list of objects like this
|
||||
|
||||
With the -o config parameter it will output the list in a format
|
||||
suitable for adding to a config file to make aliases for all the
|
||||
drives found and a combined drive.
|
||||
drives found.
|
||||
|
||||
[My Drive]
|
||||
type = alias
|
||||
@@ -3297,15 +3251,10 @@ drives found and a combined drive.
|
||||
type = alias
|
||||
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
|
||||
|
||||
[AllDrives]
|
||||
type = combine
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
|
||||
Adding this to the rclone config file will cause those team drives to
|
||||
be accessible with the aliases shown. Any illegal charactes will be
|
||||
substituted with "_" and duplicate names will have numbers suffixed.
|
||||
It will also add a remote called AllDrives which shows all the shared
|
||||
drives combined into one directory tree.
|
||||
be accessible with the aliases shown. This may require manual editing
|
||||
of the names.
|
||||
|
||||
`,
|
||||
}, {
|
||||
Name: "untrash",
|
||||
@@ -3353,12 +3302,6 @@ attempted if possible.
|
||||
|
||||
Use the -i flag to see what would be copied before copying.
|
||||
`,
|
||||
}, {
|
||||
Name: "exportformats",
|
||||
Short: "Dump the export formats for debug purposes",
|
||||
}, {
|
||||
Name: "importformats",
|
||||
Short: "Dump the import formats for debug purposes",
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
@@ -3378,7 +3321,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
out["service_account_file"] = f.opt.ServiceAccountFile
|
||||
}
|
||||
if _, ok := opt["chunk_size"]; ok {
|
||||
out["chunk_size"] = f.opt.ChunkSize.String()
|
||||
out["chunk_size"] = fmt.Sprintf("%s", f.opt.ChunkSize)
|
||||
}
|
||||
return out, nil
|
||||
case "set":
|
||||
@@ -3395,11 +3338,11 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
}
|
||||
if chunkSize, ok := opt["chunk_size"]; ok {
|
||||
chunkSizeMap := make(map[string]string)
|
||||
chunkSizeMap["previous"] = f.opt.ChunkSize.String()
|
||||
chunkSizeMap["previous"] = fmt.Sprintf("%s", f.opt.ChunkSize)
|
||||
if err = f.changeChunkSize(chunkSize); err != nil {
|
||||
return out, err
|
||||
}
|
||||
chunkSizeString := f.opt.ChunkSize.String()
|
||||
chunkSizeString := fmt.Sprintf("%s", f.opt.ChunkSize)
|
||||
f.m.Set("chunk_size", chunkSizeString)
|
||||
chunkSizeMap["current"] = chunkSizeString
|
||||
out["chunk_size"] = chunkSizeMap
|
||||
@@ -3427,30 +3370,14 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
re := regexp.MustCompile(`[^\w_. -]+`)
|
||||
if _, ok := opt["config"]; ok {
|
||||
lines := []string{}
|
||||
upstreams := []string{}
|
||||
names := make(map[string]struct{}, len(drives))
|
||||
for i, drive := range drives {
|
||||
name := re.ReplaceAllString(drive.Name, "_")
|
||||
for {
|
||||
if _, found := names[name]; !found {
|
||||
break
|
||||
}
|
||||
name += fmt.Sprintf("-%d", i)
|
||||
}
|
||||
names[name] = struct{}{}
|
||||
for _, drive := range drives {
|
||||
lines = append(lines, "")
|
||||
lines = append(lines, fmt.Sprintf("[%s]", name))
|
||||
lines = append(lines, "type = alias")
|
||||
lines = append(lines, fmt.Sprintf("[%s]", drive.Name))
|
||||
lines = append(lines, fmt.Sprintf("type = alias"))
|
||||
lines = append(lines, fmt.Sprintf("remote = %s,team_drive=%s,root_folder_id=:", f.name, drive.Id))
|
||||
upstreams = append(upstreams, fmt.Sprintf(`"%s=%s:"`, name, name))
|
||||
}
|
||||
lines = append(lines, "")
|
||||
lines = append(lines, "[AllDrives]")
|
||||
lines = append(lines, "type = combine")
|
||||
lines = append(lines, fmt.Sprintf("upstreams = %s", strings.Join(upstreams, " ")))
|
||||
return lines, nil
|
||||
}
|
||||
return drives, nil
|
||||
@@ -3473,10 +3400,6 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
case "exportformats":
|
||||
return f.exportFormats(ctx), nil
|
||||
case "importformats":
|
||||
return f.importFormats(ctx), nil
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
@@ -3526,6 +3449,12 @@ func (o *baseObject) Size() int64 {
|
||||
return o.bytes
|
||||
}
|
||||
|
||||
// getRemoteInfo returns a drive.File for the remote
|
||||
func (f *Fs) getRemoteInfo(ctx context.Context, remote string) (info *drive.File, err error) {
|
||||
info, _, _, _, _, err = f.getRemoteInfoWithExport(ctx, remote)
|
||||
return
|
||||
}
|
||||
|
||||
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote
|
||||
func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
|
||||
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
|
||||
@@ -3710,7 +3639,7 @@ func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOpt
|
||||
url += "acknowledgeAbuse=true"
|
||||
_, res, err = o.httpResponse(ctx, url, "GET", options)
|
||||
} else {
|
||||
err = fmt.Errorf("use the --drive-acknowledge-abuse flag to download this file: %w", err)
|
||||
err = fmt.Errorf("Use the --drive-acknowledge-abuse flag to download this file: %w", err)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
|
||||
@@ -378,9 +378,9 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
|
||||
// Make some objects, one in a subdir
|
||||
contents := random.String(100)
|
||||
file1 := fstest.NewItem("trashDir/toBeTrashed", contents, time.Now())
|
||||
obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false)
|
||||
_, obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false)
|
||||
file2 := fstest.NewItem("trashDir/subdir/toBeTrashed", contents, time.Now())
|
||||
_ = fstests.PutTestContents(ctx, t, f, &file2, contents, false)
|
||||
_, _ = fstests.PutTestContents(ctx, t, f, &file2, contents, false)
|
||||
|
||||
// Check objects
|
||||
checkObjects := func() {
|
||||
@@ -496,7 +496,7 @@ func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
file1 := fstest.Item{ModTime: time.Now(), Path: "agequery.txt"}
|
||||
_ = fstests.PutTestContents(defCtx, t, tempFs1, &file1, "abcxyz", true)
|
||||
_, _ = fstests.PutTestContents(defCtx, t, tempFs1, &file1, "abcxyz", true)
|
||||
|
||||
// validate sync/copy
|
||||
const timeQuery = "(modifiedTime >= '"
|
||||
|
||||
@@ -304,9 +304,6 @@ outer:
|
||||
//
|
||||
// Can be called from atexit handler
|
||||
func (b *batcher) Shutdown() {
|
||||
if !b.Batching() {
|
||||
return
|
||||
}
|
||||
b.shutOnce.Do(func() {
|
||||
atexit.Unregister(b.atexit)
|
||||
fs.Infof(b.f, "Commiting uploads - please wait...")
|
||||
|
||||
@@ -472,12 +472,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
args := team.NewMembersGetInfoArgs(members)
|
||||
|
||||
memberIds, err := f.team.MembersGetInfo(args)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
|
||||
}
|
||||
if len(memberIds) == 0 || memberIds[0].MemberInfo == nil || memberIds[0].MemberInfo.Profile == nil {
|
||||
return nil, fmt.Errorf("dropbox team member not found: %q", opt.Impersonate)
|
||||
}
|
||||
|
||||
cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
||||
}
|
||||
@@ -1199,7 +1197,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return
|
||||
}
|
||||
if len(listRes.Links) == 0 {
|
||||
err = errors.New("sharing link already exists, but list came back empty")
|
||||
err = errors.New("Dropbox says the sharing link already exists, but list came back empty")
|
||||
return
|
||||
}
|
||||
linkRes = listRes.Links[0]
|
||||
@@ -1211,7 +1209,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
case *sharing.FolderLinkMetadata:
|
||||
link = res.Url
|
||||
default:
|
||||
err = fmt.Errorf("don't know how to extract link, response has unknown format: %T", res)
|
||||
err = fmt.Errorf("Don't know how to extract link, response has unknown format: %T", res)
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -1435,7 +1433,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
}
|
||||
|
||||
if entryPath != "" {
|
||||
notifyFunc(f.opt.Enc.ToStandardPath(entryPath), entryType)
|
||||
notifyFunc(entryPath, entryType)
|
||||
}
|
||||
}
|
||||
if !changeList.HasMore {
|
||||
@@ -1697,9 +1695,6 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
|
||||
if size > 0 {
|
||||
// if size is known, check if next chunk is final
|
||||
appendArg.Close = uint64(size)-in.BytesRead() <= uint64(chunkSize)
|
||||
if in.BytesRead() > uint64(size) {
|
||||
return nil, fmt.Errorf("expected %d bytes in input, but have read %d so far", size, in.BytesRead())
|
||||
}
|
||||
} else {
|
||||
// if size is unknown, upload as long as we can read full chunks from the reader
|
||||
appendArg.Close = in.BytesRead()-cursor.Offset < uint64(chunkSize)
|
||||
|
||||
@@ -28,44 +28,25 @@ var retryErrorCodes = []int{
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
var errorRegex = regexp.MustCompile(`#\d{1,3}`)
|
||||
|
||||
func parseFichierError(err error) int {
|
||||
matches := errorRegex.FindStringSubmatch(err.Error())
|
||||
if len(matches) == 0 {
|
||||
return 0
|
||||
}
|
||||
code, err := strconv.Atoi(matches[0])
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "failed parsing fichier error: %v", err)
|
||||
return 0
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// 1Fichier uses HTTP error code 403 (Forbidden) for all kinds of errors with
|
||||
// responses looking like this: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
|
||||
// Detect this error which the integration tests provoke
|
||||
// error HTTP error 403 (403 Forbidden) returned body: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
|
||||
//
|
||||
// We attempt to parse the actual 1Fichier error code from this body and handle it accordingly
|
||||
// Most importantly #374 (Flood detected: IP locked) which the integration tests provoke
|
||||
// The list below is far from complete and should be expanded if we see any more error codes.
|
||||
if err != nil {
|
||||
switch parseFichierError(err) {
|
||||
case 93:
|
||||
return false, err // No such user
|
||||
case 186:
|
||||
return false, err // IP blocked?
|
||||
case 374:
|
||||
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
||||
time.Sleep(30 * time.Second)
|
||||
default:
|
||||
}
|
||||
// https://1fichier.com/api.html
|
||||
//
|
||||
// file/ls.cgi is limited :
|
||||
//
|
||||
// Warning (can be changed in case of abuses) :
|
||||
// List all files of the account is limited to 1 request per hour.
|
||||
// List folders is limited to 5 000 results and 1 request per folder per 30s.
|
||||
if err != nil && strings.Contains(err.Error(), "Flood detected") {
|
||||
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
||||
time.Sleep(30 * time.Second)
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
@@ -487,7 +468,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
|
||||
fileName = f.opt.Enc.FromStandardName(fileName)
|
||||
|
||||
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
||||
return nil, errors.New("invalid UploadID")
|
||||
return nil, errors.New("Invalid UploadID")
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
@@ -529,7 +510,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
|
||||
// fs.Debugf(f, "Ending File Upload `%s`", uploadID)
|
||||
|
||||
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
||||
return nil, errors.New("invalid UploadID")
|
||||
return nil, errors.New("Invalid UploadID")
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
|
||||
@@ -294,7 +294,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
path, ok := f.dirCache.GetInv(directoryID)
|
||||
|
||||
if !ok {
|
||||
return nil, errors.New("cannot find dir in dircache")
|
||||
return nil, errors.New("Cannot find dir in dircache")
|
||||
}
|
||||
|
||||
return f.newObjectFromFile(ctx, path, file), nil
|
||||
|
||||
@@ -490,7 +490,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// Root is a dir - cache its ID
|
||||
f.dirCache.Put(f.root, info.ID)
|
||||
}
|
||||
//} else {
|
||||
} else {
|
||||
// Root is not found so a directory
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ const (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "ftp",
|
||||
Description: "FTP",
|
||||
Description: "FTP Connection",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
@@ -487,7 +487,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
protocol = "ftps://"
|
||||
}
|
||||
if opt.TLS && opt.ExplicitTLS {
|
||||
return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config")
|
||||
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
|
||||
}
|
||||
var tlsConfig *tls.Config
|
||||
if opt.TLS || opt.ExplicitTLS {
|
||||
@@ -718,7 +718,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
case <-timer.C:
|
||||
// if timer fired assume no error but connection dead
|
||||
fs.Errorf(f, "Timeout when waiting for List")
|
||||
return nil, errors.New("timeout when waiting for List")
|
||||
return nil, errors.New("Timeout when waiting for List")
|
||||
}
|
||||
|
||||
// Annoyingly FTP returns success for a directory which
|
||||
|
||||
@@ -44,7 +44,6 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/googleapi"
|
||||
option "google.golang.org/api/option"
|
||||
|
||||
// NOTE: This API is deprecated
|
||||
storage "google.golang.org/api/storage/v1"
|
||||
@@ -307,15 +306,17 @@ rclone does if you know the bucket exists already.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "decompress",
|
||||
Help: `If set this will decompress gzip encoded objects.
|
||||
Name: "download_compressed",
|
||||
Help: `If set this will download compressed objects as-is.
|
||||
|
||||
It is possible to upload objects to GCS with "Content-Encoding: gzip"
|
||||
set. Normally rclone will download these files files as compressed objects.
|
||||
set. Normally rclone will transparently decompress these files on
|
||||
download. This means that rclone can't check the hash or the size of
|
||||
the file as both of these refer to the compressed object.
|
||||
|
||||
If this flag is set then rclone will decompress these files with
|
||||
If this flag is set then rclone will download files with
|
||||
"Content-Encoding: gzip" as they are received. This means that rclone
|
||||
can't check the size and hash but the file contents will be decompressed.
|
||||
can check the size and hash but the file contents will be compressed.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
@@ -342,7 +343,7 @@ type Options struct {
|
||||
Location string `config:"location"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
Decompress bool `config:"decompress"`
|
||||
DownloadCompressed bool `config:"download_compressed"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -390,7 +391,7 @@ func (f *Fs) Root() string {
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.rootBucket == "" {
|
||||
return "GCS root"
|
||||
return fmt.Sprintf("GCS root")
|
||||
}
|
||||
if f.rootDirectory == "" {
|
||||
return fmt.Sprintf("GCS bucket %s", f.rootBucket)
|
||||
@@ -523,7 +524,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
f.client = oAuthClient
|
||||
f.svc, err = storage.NewService(context.Background(), option.WithHTTPClient(f.client))
|
||||
f.svc, err = storage.New(f.client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create Google Cloud Storage client: %w", err)
|
||||
}
|
||||
@@ -1034,9 +1035,12 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
}
|
||||
|
||||
// If gunzipping then size and md5sum are unknown
|
||||
if o.gzipped && o.fs.opt.Decompress {
|
||||
if o.gzipped && !o.fs.opt.DownloadCompressed {
|
||||
o.bytes = -1
|
||||
o.md5sum = ""
|
||||
o.fs.warnCompressed.Do(func() {
|
||||
fs.Logf(o.fs, "Decompressing 'Content-Encoding: gzip' compressed file. Use --gcs-download-compressed to override")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1138,7 +1142,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return nil, err
|
||||
}
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
if o.gzipped && !o.fs.opt.Decompress {
|
||||
if o.gzipped && o.fs.opt.DownloadCompressed {
|
||||
// Allow files which are stored on the cloud storage system
|
||||
// compressed to be downloaded without being decompressed. Note
|
||||
// that setting this here overrides the automatic decompression
|
||||
@@ -1146,9 +1150,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
//
|
||||
// See: https://cloud.google.com/storage/docs/transcoding
|
||||
req.Header.Set("Accept-Encoding", "gzip")
|
||||
o.fs.warnCompressed.Do(func() {
|
||||
fs.Logf(o, "Not decompressing 'Content-Encoding: gzip' compressed file. Use --gcs-decompress to override")
|
||||
})
|
||||
}
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
var res *http.Response
|
||||
|
||||
@@ -37,7 +37,7 @@ func TestIntegration(t *testing.T) {
|
||||
}
|
||||
f, err := fs.NewFs(ctx, *fstest.RemoteName)
|
||||
if err == fs.ErrorNotFoundInConfigFile {
|
||||
t.Skipf("Couldn't create google photos backend - skipping tests: %v", err)
|
||||
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ func (f *testLister) listAlbums(ctx context.Context, shared bool) (all *albums,
|
||||
|
||||
// mock listUploads for testing
|
||||
func (f *testLister) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
entries = f.uploaded[dir]
|
||||
entries, _ = f.uploaded[dir]
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -27,9 +27,6 @@ func init() {
|
||||
Name: "hasher",
|
||||
Description: "Better checksums for other remotes",
|
||||
NewFs: NewFs,
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
||||
},
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
@@ -161,11 +158,6 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
|
||||
IsLocal: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
}
|
||||
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
|
||||
|
||||
@@ -493,17 +485,6 @@ func (o *Object) MimeType(ctx context.Context) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
do, ok := o.Object.(fs.Metadataer)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -526,5 +507,10 @@ var (
|
||||
_ fs.UserInfoer = (*Fs)(nil)
|
||||
_ fs.Disconnecter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.FullObject = (*Object)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
_ fs.SetTierer = (*Object)(nil)
|
||||
_ fs.GetTierer = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
func putFile(ctx context.Context, t *testing.T, f fs.Fs, name, data string) fs.Object {
|
||||
mtime1 := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
item := fstest.Item{Path: name, ModTime: mtime1}
|
||||
o := fstests.PutTestContents(ctx, t, f, &item, data, true)
|
||||
_, o := fstests.PutTestContents(ctx, t, f, &item, data, true)
|
||||
require.NotNil(t, o)
|
||||
return o
|
||||
}
|
||||
@@ -35,7 +35,7 @@ func (f *Fs) testUploadFromCrypt(t *testing.T) {
|
||||
// make a temporary crypt remote
|
||||
ctx := context.Background()
|
||||
pass := obscure.MustObscure("crypt")
|
||||
remote := fmt.Sprintf(`:crypt,remote="%s",password="%s":`, tempRoot, pass)
|
||||
remote := fmt.Sprintf(":crypt,remote=%s,password=%s:", tempRoot, pass)
|
||||
cryptFs, err := fs.NewFs(ctx, remote)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -33,7 +33,6 @@ func TestIntegration(t *testing.T) {
|
||||
{Name: "TestHasher", Key: "remote", Value: tempDir},
|
||||
}
|
||||
opt.RemoteName = "TestHasher:"
|
||||
opt.QuickTestOK = true
|
||||
}
|
||||
fstests.Run(t, &opt)
|
||||
}
|
||||
|
||||
@@ -92,7 +92,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.ServicePrincipalName != "" {
|
||||
options.KerberosClient, err = getKerberosClient()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("problem with kerberos authentication: %w", err)
|
||||
return nil, fmt.Errorf("Problem with kerberos authentication: %s", err)
|
||||
}
|
||||
options.KerberosServicePrincipleName = opt.ServicePrincipalName
|
||||
|
||||
|
||||
@@ -115,7 +115,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = o.fs.client.Stat(realpath)
|
||||
info, err := o.fs.client.Stat(realpath)
|
||||
if err == nil {
|
||||
err = o.fs.client.Remove(realpath)
|
||||
if err != nil {
|
||||
@@ -147,7 +147,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
info, err := o.fs.client.Stat(realpath)
|
||||
info, err = o.fs.client.Stat(realpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,81 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Some presets for different amounts of information that can be requested for fields;
|
||||
// it is recommended to only request the information that is actually needed.
|
||||
var (
|
||||
HiDriveObjectNoMetadataFields = []string{"name", "type"}
|
||||
HiDriveObjectWithMetadataFields = append(HiDriveObjectNoMetadataFields, "id", "size", "mtime", "chash")
|
||||
HiDriveObjectWithDirectoryMetadataFields = append(HiDriveObjectWithMetadataFields, "nmembers")
|
||||
DirectoryContentFields = []string{"nmembers"}
|
||||
)
|
||||
|
||||
// QueryParameters represents the parameters passed to an API-call.
|
||||
type QueryParameters struct {
|
||||
url.Values
|
||||
}
|
||||
|
||||
// NewQueryParameters initializes an instance of QueryParameters and
|
||||
// returns a pointer to it.
|
||||
func NewQueryParameters() *QueryParameters {
|
||||
return &QueryParameters{url.Values{}}
|
||||
}
|
||||
|
||||
// SetFileInDirectory sets the appropriate parameters
|
||||
// to specify a path to a file in a directory.
|
||||
// This is used by requests that work with paths for files that do not exist yet.
|
||||
// (For example when creating a file).
|
||||
// Most requests use the format produced by SetPath(...).
|
||||
func (p *QueryParameters) SetFileInDirectory(filePath string) {
|
||||
directory, file := path.Split(path.Clean(filePath))
|
||||
p.Set("dir", path.Clean(directory))
|
||||
p.Set("name", file)
|
||||
// NOTE: It would be possible to switch to pid-based requests
|
||||
// by modifying this function.
|
||||
}
|
||||
|
||||
// SetPath sets the appropriate parameters to access the given path.
|
||||
func (p *QueryParameters) SetPath(objectPath string) {
|
||||
p.Set("path", path.Clean(objectPath))
|
||||
// NOTE: It would be possible to switch to pid-based requests
|
||||
// by modifying this function.
|
||||
}
|
||||
|
||||
// SetTime sets the key to the time-value. It replaces any existing values.
|
||||
func (p *QueryParameters) SetTime(key string, value time.Time) error {
|
||||
valueAPI := Time(value)
|
||||
valueBytes, err := json.Marshal(&valueAPI)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Set(key, string(valueBytes))
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddList adds the given values as a list
|
||||
// with each value separated by the separator.
|
||||
// It appends to any existing values associated with key.
|
||||
func (p *QueryParameters) AddList(key string, separator string, values ...string) {
|
||||
original := p.Get(key)
|
||||
p.Set(key, strings.Join(values, separator))
|
||||
if original != "" {
|
||||
p.Set(key, original+separator+p.Get(key))
|
||||
}
|
||||
}
|
||||
|
||||
// AddFields sets the appropriate parameter to access the given fields.
|
||||
// The given fields will be appended to any other existing fields.
|
||||
func (p *QueryParameters) AddFields(prefix string, fields ...string) {
|
||||
modifiedFields := make([]string, len(fields))
|
||||
for i, field := range fields {
|
||||
modifiedFields[i] = prefix + field
|
||||
}
|
||||
p.AddList("fields", ",", modifiedFields...)
|
||||
}
|
||||
@@ -1,135 +0,0 @@
|
||||
// Package api has type definitions and code related to API-calls for the HiDrive-API.
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Time represents date and time information for the API.
|
||||
type Time time.Time
|
||||
|
||||
// MarshalJSON turns Time into JSON (in Unix-time/UTC).
|
||||
func (t *Time) MarshalJSON() ([]byte, error) {
|
||||
secs := time.Time(*t).Unix()
|
||||
return []byte(strconv.FormatInt(secs, 10)), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into Time.
|
||||
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
secs, err := strconv.ParseInt(string(data), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Time(time.Unix(secs, 0))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Error is returned from the API when things go wrong.
|
||||
type Error struct {
|
||||
Code json.Number `json:"code"`
|
||||
ContextInfo json.RawMessage
|
||||
Message string `json:"msg"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface.
|
||||
func (e *Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q", e.Code.String())
|
||||
if e.Message != "" {
|
||||
out += ": " + e.Message
|
||||
}
|
||||
if e.ContextInfo != nil {
|
||||
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Check Error satisfies the error interface.
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// possible types for HiDriveObject
|
||||
const (
|
||||
HiDriveObjectTypeDirectory = "dir"
|
||||
HiDriveObjectTypeFile = "file"
|
||||
HiDriveObjectTypeSymlink = "symlink"
|
||||
)
|
||||
|
||||
// HiDriveObject describes a folder, a symlink or a file.
|
||||
// Depending on the type and content, not all fields are present.
|
||||
type HiDriveObject struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
ParentID string `json:"parent_id"`
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Size int64 `json:"size"`
|
||||
MemberCount int64 `json:"nmembers"`
|
||||
ModifiedAt Time `json:"mtime"`
|
||||
ChangedAt Time `json:"ctime"`
|
||||
MetaHash string `json:"mhash"`
|
||||
MetaOnlyHash string `json:"mohash"`
|
||||
NameHash string `json:"nhash"`
|
||||
ContentHash string `json:"chash"`
|
||||
IsTeamfolder bool `json:"teamfolder"`
|
||||
Readable bool `json:"readable"`
|
||||
Writable bool `json:"writable"`
|
||||
Shareable bool `json:"shareable"`
|
||||
MIMEType string `json:"mime_type"`
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the HiDriveObject.
|
||||
func (i *HiDriveObject) ModTime() time.Time {
|
||||
t := time.Time(i.ModifiedAt)
|
||||
if t.IsZero() {
|
||||
t = time.Time(i.ChangedAt)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into HiDriveObject and
|
||||
// introduces specific default-values where necessary.
|
||||
func (i *HiDriveObject) UnmarshalJSON(data []byte) error {
|
||||
type objectAlias HiDriveObject
|
||||
defaultObject := objectAlias{
|
||||
Size: -1,
|
||||
MemberCount: -1,
|
||||
}
|
||||
|
||||
err := json.Unmarshal(data, &defaultObject)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
name, err := url.PathUnescape(defaultObject.Name)
|
||||
if err == nil {
|
||||
defaultObject.Name = name
|
||||
}
|
||||
|
||||
*i = HiDriveObject(defaultObject)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DirectoryContent describes the content of a directory.
|
||||
type DirectoryContent struct {
|
||||
TotalCount int64 `json:"nmembers"`
|
||||
Entries []HiDriveObject `json:"members"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into DirectoryContent and
|
||||
// introduces specific default-values where necessary.
|
||||
func (d *DirectoryContent) UnmarshalJSON(data []byte) error {
|
||||
type directoryContentAlias DirectoryContent
|
||||
defaultDirectoryContent := directoryContentAlias{
|
||||
TotalCount: -1,
|
||||
}
|
||||
|
||||
err := json.Unmarshal(data, &defaultDirectoryContent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*d = DirectoryContent(defaultDirectoryContent)
|
||||
return nil
|
||||
}
|
||||
@@ -1,888 +0,0 @@
|
||||
package hidrive
|
||||
|
||||
// This file is for helper-functions which may provide more general and
|
||||
// specialized functionality than the generic interfaces.
|
||||
// There are two sections:
|
||||
// 1. methods bound to Fs
|
||||
// 2. other functions independent from Fs used throughout the package
|
||||
|
||||
// NOTE: Functions accessing paths expect any relative paths
|
||||
// to be resolved prior to execution with resolvePath(...).
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/hidrive/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/ranges"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
const (
|
||||
// MaximumUploadBytes represents the maximum amount of bytes
|
||||
// a single upload-operation will support.
|
||||
MaximumUploadBytes = 2147483647 // = 2GiB - 1
|
||||
// iterationChunkSize represents the chunk size used to iterate directory contents.
|
||||
iterationChunkSize = 5000
|
||||
)
|
||||
|
||||
var (
|
||||
// retryErrorCodes is a slice of error codes that we will always retry.
|
||||
retryErrorCodes = []int{
|
||||
429, // Too Many Requests
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
// ErrorFileExists is returned when a query tries to create a file
|
||||
// that already exists.
|
||||
ErrorFileExists = errors.New("destination file already exists")
|
||||
)
|
||||
|
||||
// MemberType represents the possible types of entries a directory can contain.
|
||||
type MemberType string
|
||||
|
||||
// possible values for MemberType
|
||||
const (
|
||||
AllMembers MemberType = "all"
|
||||
NoMembers MemberType = "none"
|
||||
DirectoryMembers MemberType = api.HiDriveObjectTypeDirectory
|
||||
FileMembers MemberType = api.HiDriveObjectTypeFile
|
||||
SymlinkMembers MemberType = api.HiDriveObjectTypeSymlink
|
||||
)
|
||||
|
||||
// SortByField represents possible fields to sort entries of a directory by.
|
||||
type SortByField string
|
||||
|
||||
// possible values for SortByField
|
||||
const (
|
||||
descendingSort string = "-"
|
||||
SortByName SortByField = "name"
|
||||
SortByModTime SortByField = "mtime"
|
||||
SortByObjectType SortByField = "type"
|
||||
SortBySize SortByField = "size"
|
||||
SortByNameDescending SortByField = SortByField(descendingSort) + SortByName
|
||||
SortByModTimeDescending SortByField = SortByField(descendingSort) + SortByModTime
|
||||
SortByObjectTypeDescending SortByField = SortByField(descendingSort) + SortByObjectType
|
||||
SortBySizeDescending SortByField = SortByField(descendingSort) + SortBySize
|
||||
)
|
||||
|
||||
var (
|
||||
// Unsorted disables sorting and can therefore not be combined with other values.
|
||||
Unsorted = []SortByField{"none"}
|
||||
// DefaultSorted does not specify how to sort and
|
||||
// therefore implies the default sort order.
|
||||
DefaultSorted = []SortByField{}
|
||||
)
|
||||
|
||||
// CopyOrMoveOperationType represents the possible types of copy- and move-operations.
|
||||
type CopyOrMoveOperationType int
|
||||
|
||||
// possible values for CopyOrMoveOperationType
|
||||
const (
|
||||
MoveOriginal CopyOrMoveOperationType = iota
|
||||
CopyOriginal
|
||||
CopyOriginalPreserveModTime
|
||||
)
|
||||
|
||||
// OnExistAction represents possible actions the API should take,
|
||||
// when a request tries to create a path that already exists.
|
||||
type OnExistAction string
|
||||
|
||||
// possible values for OnExistAction
|
||||
const (
|
||||
// IgnoreOnExist instructs the API not to execute
|
||||
// the request in case of a conflict, but to return an error.
|
||||
IgnoreOnExist OnExistAction = "ignore"
|
||||
// AutoNameOnExist instructs the API to automatically rename
|
||||
// any conflicting request-objects.
|
||||
AutoNameOnExist OnExistAction = "autoname"
|
||||
// OverwriteOnExist instructs the API to overwrite any conflicting files.
|
||||
// This can only be used, if the request operates on files directly.
|
||||
// (For example when moving/copying a file.)
|
||||
// For most requests this action will simply be ignored.
|
||||
OverwriteOnExist OnExistAction = "overwrite"
|
||||
)
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err deserve to be retried.
|
||||
// It tries to expire/invalidate the token, if necessary.
|
||||
// It returns the err as a convenience.
|
||||
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
if resp != nil && (resp.StatusCode == 401 || isHTTPError(err, 401)) && len(resp.Header["Www-Authenticate"]) > 0 {
|
||||
fs.Debugf(f, "Token might be invalid: %v", err)
|
||||
if f.tokenRenewer != nil {
|
||||
iErr := f.tokenRenewer.Expire()
|
||||
if iErr == nil {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// resolvePath resolves the given (relative) path and
|
||||
// returns a path suitable for API-calls.
|
||||
// This will consider the root-path of the fs and any needed prefixes.
|
||||
//
|
||||
// Any relative paths passed to functions that access these paths should
|
||||
// be resolved with this first!
|
||||
func (f *Fs) resolvePath(objectPath string) string {
|
||||
resolved := path.Join(f.opt.RootPrefix, f.root, f.opt.Enc.FromStandardPath(objectPath))
|
||||
return resolved
|
||||
}
|
||||
|
||||
// iterateOverDirectory calls the given function callback
|
||||
// on each item found in a given directory.
|
||||
//
|
||||
// If callback ever returns true then this exits early with found = true.
|
||||
func (f *Fs) iterateOverDirectory(ctx context.Context, directory string, searchOnly MemberType, callback func(*api.HiDriveObject) bool, fields []string, sortBy []SortByField) (found bool, err error) {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetPath(directory)
|
||||
parameters.AddFields("members.", fields...)
|
||||
parameters.AddFields("", api.DirectoryContentFields...)
|
||||
parameters.Set("members", string(searchOnly))
|
||||
for _, v := range sortBy {
|
||||
// The explicit conversion is necessary for each element.
|
||||
parameters.AddList("sort", ",", string(v))
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/dir",
|
||||
Parameters: parameters.Values,
|
||||
}
|
||||
|
||||
iterateContent := func(result *api.DirectoryContent, err error) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, item := range result.Entries {
|
||||
item.Name = f.opt.Enc.ToStandardName(item.Name)
|
||||
if callback(&item) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
return f.paginateDirectoryAccess(ctx, &opts, iterationChunkSize, 0, iterateContent)
|
||||
}
|
||||
|
||||
// paginateDirectoryAccess executes requests specified via ctx and opts
|
||||
// which should produce api.DirectoryContent.
|
||||
// This will paginate the requests using limit starting at the given offset.
|
||||
//
|
||||
// The given function callback is called on each api.DirectoryContent found
|
||||
// along with any errors that occurred.
|
||||
// If callback ever returns true then this exits early with found = true.
|
||||
// If callback ever returns an error then this exits early with that error.
|
||||
func (f *Fs) paginateDirectoryAccess(ctx context.Context, opts *rest.Opts, limit int64, offset int64, callback func(*api.DirectoryContent, error) (bool, error)) (found bool, err error) {
|
||||
for {
|
||||
opts.Parameters.Set("limit", strconv.FormatInt(offset, 10)+","+strconv.FormatInt(limit, 10))
|
||||
|
||||
var result api.DirectoryContent
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, opts, nil, &result)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
found, err = callback(&result, err)
|
||||
if found || err != nil {
|
||||
return found, err
|
||||
}
|
||||
|
||||
offset += int64(len(result.Entries))
|
||||
if offset >= result.TotalCount || limit > int64(len(result.Entries)) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// fetchMetadataForPath reads the metadata from the path.
|
||||
func (f *Fs) fetchMetadataForPath(ctx context.Context, path string, fields []string) (*api.HiDriveObject, error) {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetPath(path)
|
||||
parameters.AddFields("", fields...)
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/meta",
|
||||
Parameters: parameters.Values,
|
||||
}
|
||||
|
||||
var result api.HiDriveObject
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// copyOrMove copies or moves a directory or file
|
||||
// from the source-path to the destination-path.
|
||||
//
|
||||
// The operation will only be successful
|
||||
// if the parent-directory of the destination-path exists.
|
||||
//
|
||||
// NOTE: Use the explicit methods instead of directly invoking this method.
|
||||
// (Those are: copyDirectory, moveDirectory, copyFile, moveFile.)
|
||||
func (f *Fs) copyOrMove(ctx context.Context, isDirectory bool, operationType CopyOrMoveOperationType, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.Set("src", source)
|
||||
parameters.Set("dst", destination)
|
||||
if onExist == AutoNameOnExist ||
|
||||
(onExist == OverwriteOnExist && !isDirectory) {
|
||||
parameters.Set("on_exist", string(onExist))
|
||||
}
|
||||
|
||||
endpoint := "/"
|
||||
if isDirectory {
|
||||
endpoint += "dir"
|
||||
} else {
|
||||
endpoint += "file"
|
||||
}
|
||||
switch operationType {
|
||||
case MoveOriginal:
|
||||
endpoint += "/move"
|
||||
case CopyOriginalPreserveModTime:
|
||||
parameters.Set("preserve_mtime", strconv.FormatBool(true))
|
||||
fallthrough
|
||||
case CopyOriginal:
|
||||
endpoint += "/copy"
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: endpoint,
|
||||
Parameters: parameters.Values,
|
||||
}
|
||||
|
||||
var result api.HiDriveObject
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// copyDirectory moves the directory at the source-path to the destination-path and
|
||||
// returns the resulting api-object if successful.
|
||||
//
|
||||
// The operation will only be successful
|
||||
// if the parent-directory of the destination-path exists.
|
||||
func (f *Fs) copyDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
return f.copyOrMove(ctx, true, CopyOriginalPreserveModTime, source, destination, onExist)
|
||||
}
|
||||
|
||||
// moveDirectory moves the directory at the source-path to the destination-path and
|
||||
// returns the resulting api-object if successful.
|
||||
//
|
||||
// The operation will only be successful
|
||||
// if the parent-directory of the destination-path exists.
|
||||
func (f *Fs) moveDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
return f.copyOrMove(ctx, true, MoveOriginal, source, destination, onExist)
|
||||
}
|
||||
|
||||
// copyFile copies the file at the source-path to the destination-path and
|
||||
// returns the resulting api-object if successful.
|
||||
//
|
||||
// The operation will only be successful
|
||||
// if the parent-directory of the destination-path exists.
|
||||
//
|
||||
// NOTE: This operation will expand sparse areas in the content of the source-file
|
||||
// to blocks of 0-bytes in the destination-file.
|
||||
func (f *Fs) copyFile(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
return f.copyOrMove(ctx, false, CopyOriginalPreserveModTime, source, destination, onExist)
|
||||
}
|
||||
|
||||
// moveFile moves the file at the source-path to the destination-path and
|
||||
// returns the resulting api-object if successful.
|
||||
//
|
||||
// The operation will only be successful
|
||||
// if the parent-directory of the destination-path exists.
|
||||
//
|
||||
// NOTE: This operation may expand sparse areas in the content of the source-file
|
||||
// to blocks of 0-bytes in the destination-file.
|
||||
func (f *Fs) moveFile(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
return f.copyOrMove(ctx, false, MoveOriginal, source, destination, onExist)
|
||||
}
|
||||
|
||||
// createDirectory creates the directory at the given path and
|
||||
// returns the resulting api-object if successful.
|
||||
//
|
||||
// The directory will only be created if its parent-directory exists.
|
||||
// This returns fs.ErrorDirNotFound if the parent-directory is not found.
|
||||
// This returns fs.ErrorDirExists if the directory already exists.
|
||||
func (f *Fs) createDirectory(ctx context.Context, directory string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetPath(directory)
|
||||
if onExist == AutoNameOnExist {
|
||||
parameters.Set("on_exist", string(onExist))
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/dir",
|
||||
Parameters: parameters.Values,
|
||||
}
|
||||
|
||||
var result api.HiDriveObject
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
return &result, nil
|
||||
case isHTTPError(err, 404):
|
||||
return nil, fs.ErrorDirNotFound
|
||||
case isHTTPError(err, 409):
|
||||
return nil, fs.ErrorDirExists
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// createDirectories creates the directory at the given path
|
||||
// along with any missing parent directories and
|
||||
// returns the resulting api-object (of the created directory) if successful.
|
||||
//
|
||||
// This returns fs.ErrorDirExists if the directory already exists.
|
||||
//
|
||||
// If an error occurs while the parent directories are being created,
|
||||
// any directories already created will NOT be deleted again.
|
||||
func (f *Fs) createDirectories(ctx context.Context, directory string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
result, err := f.createDirectory(ctx, directory, onExist)
|
||||
if err == nil {
|
||||
return result, nil
|
||||
}
|
||||
if err != fs.ErrorDirNotFound {
|
||||
return nil, err
|
||||
}
|
||||
parentDirectory := path.Dir(directory)
|
||||
_, err = f.createDirectories(ctx, parentDirectory, onExist)
|
||||
if err != nil && err != fs.ErrorDirExists {
|
||||
return nil, err
|
||||
}
|
||||
// NOTE: Ignoring fs.ErrorDirExists does no harm,
|
||||
// since it does not mean the child directory cannot be created.
|
||||
return f.createDirectory(ctx, directory, onExist)
|
||||
}
|
||||
|
||||
// deleteDirectory deletes the directory at the given path.
|
||||
//
|
||||
// If recursive is false, the directory will only be deleted if it is empty.
|
||||
// If recursive is true, the directory will be deleted regardless of its content.
|
||||
// This returns fs.ErrorDirNotFound if the directory is not found.
|
||||
// This returns fs.ErrorDirectoryNotEmpty if the directory is not empty and
|
||||
// recursive is false.
|
||||
func (f *Fs) deleteDirectory(ctx context.Context, directory string, recursive bool) error {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetPath(directory)
|
||||
parameters.Set("recursive", strconv.FormatBool(recursive))
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/dir",
|
||||
Parameters: parameters.Values,
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
switch {
|
||||
case isHTTPError(err, 404):
|
||||
return fs.ErrorDirNotFound
|
||||
case isHTTPError(err, 409):
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// deleteObject deletes the object/file at the given path.
|
||||
//
|
||||
// This returns fs.ErrorObjectNotFound if the object is not found.
|
||||
func (f *Fs) deleteObject(ctx context.Context, path string) error {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetPath(path)
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/file",
|
||||
Parameters: parameters.Values,
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if isHTTPError(err, 404) {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// createFile creates a file at the given path
|
||||
// with the content of the io.ReadSeeker.
|
||||
// This guarantees that existing files will not be overwritten.
|
||||
// The maximum size of the content is limited by MaximumUploadBytes.
|
||||
// The io.ReadSeeker should be resettable by seeking to its start.
|
||||
// If modTime is not the zero time instant,
|
||||
// it will be set as the file's modification time after the operation.
|
||||
//
|
||||
// This returns fs.ErrorDirNotFound
|
||||
// if the parent directory of the file is not found.
|
||||
// This returns ErrorFileExists if a file already exists at the specified path.
|
||||
func (f *Fs) createFile(ctx context.Context, path string, content io.ReadSeeker, modTime time.Time, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetFileInDirectory(path)
|
||||
if onExist == AutoNameOnExist {
|
||||
parameters.Set("on_exist", string(onExist))
|
||||
}
|
||||
|
||||
var err error
|
||||
if !modTime.IsZero() {
|
||||
err = parameters.SetTime("mtime", modTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file",
|
||||
Body: content,
|
||||
ContentType: "application/octet-stream",
|
||||
Parameters: parameters.Values,
|
||||
}
|
||||
|
||||
var result api.HiDriveObject
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// Reset the reading index (in case this is a retry).
|
||||
if _, err = content.Seek(0, io.SeekStart); err != nil {
|
||||
return false, err
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
return &result, nil
|
||||
case isHTTPError(err, 404):
|
||||
return nil, fs.ErrorDirNotFound
|
||||
case isHTTPError(err, 409):
|
||||
return nil, ErrorFileExists
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// overwriteFile updates the content of the file at the given path
|
||||
// with the content of the io.ReadSeeker.
|
||||
// If the file does not exist it will be created.
|
||||
// The maximum size of the content is limited by MaximumUploadBytes.
|
||||
// The io.ReadSeeker should be resettable by seeking to its start.
|
||||
// If modTime is not the zero time instant,
|
||||
// it will be set as the file's modification time after the operation.
|
||||
//
|
||||
// This returns fs.ErrorDirNotFound
|
||||
// if the parent directory of the file is not found.
|
||||
func (f *Fs) overwriteFile(ctx context.Context, path string, content io.ReadSeeker, modTime time.Time) (*api.HiDriveObject, error) {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetFileInDirectory(path)
|
||||
|
||||
var err error
|
||||
if !modTime.IsZero() {
|
||||
err = parameters.SetTime("mtime", modTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/file",
|
||||
Body: content,
|
||||
ContentType: "application/octet-stream",
|
||||
Parameters: parameters.Values,
|
||||
}
|
||||
|
||||
var result api.HiDriveObject
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// Reset the reading index (in case this is a retry).
|
||||
if _, err = content.Seek(0, io.SeekStart); err != nil {
|
||||
return false, err
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
return &result, nil
|
||||
case isHTTPError(err, 404):
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// uploadFileChunked updates the content of the existing file at the given path
|
||||
// with the content of the io.Reader.
|
||||
// Returns the position of the last successfully written byte, stopping before the first failed write.
|
||||
// If nothing was written this will be 0.
|
||||
// Returns the resulting api-object if successful.
|
||||
//
|
||||
// Replaces the file contents by uploading multiple chunks of the given size in parallel.
|
||||
// Therefore this can and be used to upload files of any size efficiently.
|
||||
// The number of parallel transfers is limited by transferLimit which should larger than 0.
|
||||
// If modTime is not the zero time instant,
|
||||
// it will be set as the file's modification time after the operation.
|
||||
//
|
||||
// NOTE: This method uses updateFileChunked and may create sparse files,
|
||||
// if the upload of a chunk fails unexpectedly.
|
||||
// See note about sparse files in patchFile.
|
||||
// If any of the uploads fail, the process will be aborted and
|
||||
// the first error that occurred will be returned.
|
||||
// This is not an atomic operation,
|
||||
// therefore if the upload fails the file may be partially modified.
|
||||
//
|
||||
// This returns fs.ErrorObjectNotFound if the object is not found.
|
||||
func (f *Fs) uploadFileChunked(ctx context.Context, path string, content io.Reader, modTime time.Time, chunkSize int, transferLimit int64) (okSize uint64, info *api.HiDriveObject, err error) {
|
||||
okSize, err = f.updateFileChunked(ctx, path, content, 0, chunkSize, transferLimit)
|
||||
|
||||
if err == nil {
|
||||
info, err = f.resizeFile(ctx, path, okSize, modTime)
|
||||
}
|
||||
return okSize, info, err
|
||||
}
|
||||
|
||||
// updateFileChunked updates the content of the existing file at the given path
|
||||
// starting at the given offset.
|
||||
// Returns the position of the last successfully written byte, stopping before the first failed write.
|
||||
// If nothing was written this will be 0.
|
||||
//
|
||||
// Replaces the file contents starting from the given byte offset
|
||||
// with the content of the io.Reader.
|
||||
// If the offset is beyond the file end, the file is extended up to the offset.
|
||||
//
|
||||
// The upload is done multiple chunks of the given size in parallel.
|
||||
// Therefore this can and be used to upload files of any size efficiently.
|
||||
// The number of parallel transfers is limited by transferLimit which should larger than 0.
|
||||
//
|
||||
// NOTE: Because it is inefficient to set the modification time with every chunk,
|
||||
// setting it to a specific value must be done in a separate request
|
||||
// after this operation finishes.
|
||||
//
|
||||
// NOTE: This method uses patchFile and may create sparse files,
|
||||
// especially if the upload of a chunk fails unexpectedly.
|
||||
// See note about sparse files in patchFile.
|
||||
// If any of the uploads fail, the process will be aborted and
|
||||
// the first error that occurred will be returned.
|
||||
// This is not an atomic operation,
|
||||
// therefore if the upload fails the file may be partially modified.
|
||||
//
|
||||
// This returns fs.ErrorObjectNotFound if the object is not found.
|
||||
func (f *Fs) updateFileChunked(ctx context.Context, path string, content io.Reader, offset uint64, chunkSize int, transferLimit int64) (okSize uint64, err error) {
|
||||
var (
|
||||
okChunksMu sync.Mutex // protects the variables below
|
||||
okChunks []ranges.Range
|
||||
)
|
||||
g, gCtx := errgroup.WithContext(ctx)
|
||||
transferSemaphore := semaphore.NewWeighted(transferLimit)
|
||||
|
||||
var readErr error
|
||||
startMoreTransfers := true
|
||||
zeroTime := time.Time{}
|
||||
for chunk := uint64(0); startMoreTransfers; chunk++ {
|
||||
// Acquire semaphore to limit number of transfers in parallel.
|
||||
readErr = transferSemaphore.Acquire(gCtx, 1)
|
||||
if readErr != nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Read a chunk of data.
|
||||
chunkReader, bytesRead, readErr := readerForChunk(content, chunkSize)
|
||||
if bytesRead < chunkSize {
|
||||
startMoreTransfers = false
|
||||
}
|
||||
if readErr != nil || bytesRead <= 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Transfer the chunk.
|
||||
chunkOffset := uint64(chunkSize)*chunk + offset
|
||||
g.Go(func() error {
|
||||
// After this upload is done,
|
||||
// signal that another transfer can be started.
|
||||
defer transferSemaphore.Release(1)
|
||||
uploadErr := f.patchFile(gCtx, path, cachedReader(chunkReader), chunkOffset, zeroTime)
|
||||
if uploadErr == nil {
|
||||
// Remember successfully written chunks.
|
||||
okChunksMu.Lock()
|
||||
okChunks = append(okChunks, ranges.Range{Pos: int64(chunkOffset), Size: int64(bytesRead)})
|
||||
okChunksMu.Unlock()
|
||||
fs.Debugf(f, "Done uploading chunk of size %v at offset %v.", bytesRead, chunkOffset)
|
||||
} else {
|
||||
fs.Infof(f, "Error while uploading chunk at offset %v. Error is %v.", chunkOffset, uploadErr)
|
||||
}
|
||||
return uploadErr
|
||||
})
|
||||
}
|
||||
|
||||
if readErr != nil {
|
||||
// Log the error in case it is later ignored because of an upload-error.
|
||||
fs.Infof(f, "Error while reading/preparing to upload a chunk. Error is %v.", readErr)
|
||||
}
|
||||
|
||||
err = g.Wait()
|
||||
|
||||
// Compute the first continuous range of the file content,
|
||||
// which does not contain any failed chunks.
|
||||
// Do not forget to add the file content up to the starting offset,
|
||||
// which is presumed to be already correct.
|
||||
rs := ranges.Ranges{}
|
||||
rs.Insert(ranges.Range{Pos: 0, Size: int64(offset)})
|
||||
for _, chunkRange := range okChunks {
|
||||
rs.Insert(chunkRange)
|
||||
}
|
||||
if len(rs) > 0 && rs[0].Pos == 0 {
|
||||
okSize = uint64(rs[0].Size)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return okSize, err
|
||||
}
|
||||
if readErr != nil {
|
||||
return okSize, readErr
|
||||
}
|
||||
|
||||
return okSize, nil
|
||||
}
|
||||
|
||||
// patchFile updates the content of the existing file at the given path
|
||||
// starting at the given offset.
|
||||
//
|
||||
// Replaces the file contents starting from the given byte offset
|
||||
// with the content of the io.ReadSeeker.
|
||||
// If the offset is beyond the file end, the file is extended up to the offset.
|
||||
// The maximum size of the update is limited by MaximumUploadBytes.
|
||||
// The io.ReadSeeker should be resettable by seeking to its start.
|
||||
// If modTime is not the zero time instant,
|
||||
// it will be set as the file's modification time after the operation.
|
||||
//
|
||||
// NOTE: By extending the file up to the offset this may create sparse files,
|
||||
// which allocate less space on the file system than their apparent size indicates,
|
||||
// since holes between data chunks are "real" holes
|
||||
// and not regions made up of consecutive 0-bytes.
|
||||
// Subsequent operations (such as copying data)
|
||||
// usually expand the holes into regions of 0-bytes.
|
||||
//
|
||||
// This returns fs.ErrorObjectNotFound if the object is not found.
|
||||
func (f *Fs) patchFile(ctx context.Context, path string, content io.ReadSeeker, offset uint64, modTime time.Time) error {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetPath(path)
|
||||
parameters.Set("offset", strconv.FormatUint(offset, 10))
|
||||
|
||||
if !modTime.IsZero() {
|
||||
err := parameters.SetTime("mtime", modTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "PATCH",
|
||||
Path: "/file",
|
||||
Body: content,
|
||||
ContentType: "application/octet-stream",
|
||||
Parameters: parameters.Values,
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// Reset the reading index (in case this is a retry).
|
||||
_, err = content.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
if isHTTPError(err, 423) {
|
||||
return true, err
|
||||
}
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if isHTTPError(err, 404) {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// resizeFile updates the existing file at the given path to be of the given size
|
||||
// and returns the resulting api-object if successful.
|
||||
//
|
||||
// If the given size is smaller than the current filesize,
|
||||
// the file is cut/truncated at that position.
|
||||
// If the given size is larger, the file is extended up to that position.
|
||||
// If modTime is not the zero time instant,
|
||||
// it will be set as the file's modification time after the operation.
|
||||
//
|
||||
// NOTE: By extending the file this may create sparse files,
|
||||
// which allocate less space on the file system than their apparent size indicates,
|
||||
// since holes between data chunks are "real" holes
|
||||
// and not regions made up of consecutive 0-bytes.
|
||||
// Subsequent operations (such as copying data)
|
||||
// usually expand the holes into regions of 0-bytes.
|
||||
//
|
||||
// This returns fs.ErrorObjectNotFound if the object is not found.
|
||||
func (f *Fs) resizeFile(ctx context.Context, path string, size uint64, modTime time.Time) (*api.HiDriveObject, error) {
|
||||
parameters := api.NewQueryParameters()
|
||||
parameters.SetPath(path)
|
||||
parameters.Set("size", strconv.FormatUint(size, 10))
|
||||
|
||||
if !modTime.IsZero() {
|
||||
err := parameters.SetTime("mtime", modTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file/truncate",
|
||||
Parameters: parameters.Values,
|
||||
}
|
||||
|
||||
var result api.HiDriveObject
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
return &result, nil
|
||||
case isHTTPError(err, 404):
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// isHTTPError compares the numerical status code
|
||||
// of an api.Error to the given HTTP status.
|
||||
//
|
||||
// If the given error is not an api.Error or
|
||||
// a numerical status code could not be determined, this returns false.
|
||||
// Otherwise this returns whether the status code of the error is equal to the given status.
|
||||
func isHTTPError(err error, status int64) bool {
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
errStatus, decodeErr := apiErr.Code.Int64()
|
||||
if decodeErr == nil && errStatus == status {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// createHiDriveScopes creates oauth-scopes
|
||||
// from the given user-role and access-permissions.
|
||||
//
|
||||
// If the arguments are empty, they will not be included in the result.
|
||||
func createHiDriveScopes(role string, access string) []string {
|
||||
switch {
|
||||
case role != "" && access != "":
|
||||
return []string{access + "," + role}
|
||||
case role != "":
|
||||
return []string{role}
|
||||
case access != "":
|
||||
return []string{access}
|
||||
}
|
||||
return []string{}
|
||||
}
|
||||
|
||||
// cachedReader returns a version of the reader that caches its contents and
|
||||
// can therefore be reset using Seek.
|
||||
func cachedReader(reader io.Reader) io.ReadSeeker {
|
||||
bytesReader, ok := reader.(*bytes.Reader)
|
||||
if ok {
|
||||
return bytesReader
|
||||
}
|
||||
|
||||
repeatableReader, ok := reader.(*readers.RepeatableReader)
|
||||
if ok {
|
||||
return repeatableReader
|
||||
}
|
||||
|
||||
return readers.NewRepeatableReader(reader)
|
||||
}
|
||||
|
||||
// readerForChunk reads a chunk of bytes from reader (after handling any accounting).
|
||||
// Returns a new io.Reader (chunkReader) for that chunk
|
||||
// and the number of bytes that have been read from reader.
|
||||
func readerForChunk(reader io.Reader, length int) (chunkReader io.Reader, bytesRead int, err error) {
|
||||
// Unwrap any accounting from the input if present.
|
||||
reader, wrap := accounting.UnWrap(reader)
|
||||
|
||||
// Read a chunk of data.
|
||||
buffer := make([]byte, length)
|
||||
bytesRead, err = io.ReadFull(reader, buffer)
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, bytesRead, err
|
||||
}
|
||||
// Truncate unused capacity.
|
||||
buffer = buffer[:bytesRead]
|
||||
|
||||
// Use wrap to put any accounting back for chunkReader.
|
||||
return wrap(bytes.NewReader(buffer)), bytesRead, nil
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,45 +0,0 @@
|
||||
// Test HiDrive filesystem interface
|
||||
package hidrive
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote.
|
||||
func TestIntegration(t *testing.T) {
|
||||
name := "TestHiDrive"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: 1,
|
||||
MaxChunkSize: MaximumUploadBytes,
|
||||
CeilChunkSize: nil,
|
||||
NeedMultipleChunks: false,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Change the configured UploadChunkSize.
|
||||
// Will only be called while no transfer is in progress.
|
||||
func (f *Fs) SetUploadChunkSize(chunksize fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
var old fs.SizeSuffix
|
||||
old, f.opt.UploadChunkSize = f.opt.UploadChunkSize, chunksize
|
||||
return old, nil
|
||||
}
|
||||
|
||||
// Change the configured UploadCutoff.
|
||||
// Will only be called while no transfer is in progress.
|
||||
func (f *Fs) SetUploadCutoff(cutoff fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
var old fs.SizeSuffix
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cutoff
|
||||
return old, nil
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
@@ -1,410 +0,0 @@
|
||||
// Package hidrivehash implements the HiDrive hashing algorithm which combines SHA-1 hashes hierarchically to a single top-level hash.
|
||||
//
|
||||
// Note: This implementation does not grant access to any partial hashes generated.
|
||||
//
|
||||
// See: https://developer.hidrive.com/wp-content/uploads/2021/07/HiDrive_Synchronization-v3.3-rev28.pdf
|
||||
// (link to newest version: https://static.hidrive.com/dev/0001)
|
||||
package hidrivehash
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"encoding"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/rclone/rclone/backend/hidrive/hidrivehash/internal"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockSize of the checksum in bytes.
|
||||
BlockSize = 4096
|
||||
// Size of the checksum in bytes.
|
||||
Size = sha1.Size
|
||||
// sumsPerLevel is the number of checksums
|
||||
sumsPerLevel = 256
|
||||
)
|
||||
|
||||
var (
|
||||
// zeroSum is a special hash consisting of 20 null-bytes.
|
||||
// This will be the hash of any empty file (or ones containing only null-bytes).
|
||||
zeroSum = [Size]byte{}
|
||||
// ErrorInvalidEncoding is returned when a hash should be decoded from a binary form that is invalid.
|
||||
ErrorInvalidEncoding = errors.New("encoded binary form is invalid for this hash")
|
||||
// ErrorHashFull is returned when a hash reached its capacity and cannot accept any more input.
|
||||
ErrorHashFull = errors.New("hash reached its capacity")
|
||||
)
|
||||
|
||||
// writeByBlock writes len(p) bytes from p to the io.Writer in blocks of size blockSize.
|
||||
// It returns the number of bytes written from p (0 <= n <= len(p))
|
||||
// and any error encountered that caused the write to stop early.
|
||||
//
|
||||
// A pointer bytesInBlock to a counter needs to be supplied,
|
||||
// that is used to keep track how many bytes have been written to the writer already.
|
||||
// A pointer onlyNullBytesInBlock to a boolean needs to be supplied,
|
||||
// that is used to keep track whether the block so far only consists of null-bytes.
|
||||
// The callback onBlockWritten is called whenever a full block has been written to the writer
|
||||
// and is given as input the number of bytes that still need to be written.
|
||||
func writeByBlock(p []byte, writer io.Writer, blockSize uint32, bytesInBlock *uint32, onlyNullBytesInBlock *bool, onBlockWritten func(remaining int) error) (n int, err error) {
|
||||
total := len(p)
|
||||
nullBytes := make([]byte, blockSize)
|
||||
for len(p) > 0 {
|
||||
toWrite := int(blockSize - *bytesInBlock)
|
||||
if toWrite > len(p) {
|
||||
toWrite = len(p)
|
||||
}
|
||||
c, err := writer.Write(p[:toWrite])
|
||||
*bytesInBlock += uint32(c)
|
||||
*onlyNullBytesInBlock = *onlyNullBytesInBlock && bytes.Equal(nullBytes[:toWrite], p[:toWrite])
|
||||
// Discard data written through a reslice
|
||||
p = p[c:]
|
||||
if err != nil {
|
||||
return total - len(p), err
|
||||
}
|
||||
if *bytesInBlock == blockSize {
|
||||
err = onBlockWritten(len(p))
|
||||
if err != nil {
|
||||
return total - len(p), err
|
||||
}
|
||||
*bytesInBlock = 0
|
||||
*onlyNullBytesInBlock = true
|
||||
}
|
||||
}
|
||||
return total, nil
|
||||
}
|
||||
|
||||
// level is a hash.Hash that is used to aggregate the checksums produced by the level hierarchically beneath it.
|
||||
// It is used to represent any level-n hash, except for level-0.
|
||||
type level struct {
|
||||
checksum [Size]byte // aggregated checksum of this level
|
||||
sumCount uint32 // number of sums contained in this level so far
|
||||
bytesInHasher uint32 // number of bytes written into hasher so far
|
||||
onlyNullBytesInHasher bool // whether the hasher only contains null-bytes so far
|
||||
hasher hash.Hash
|
||||
}
|
||||
|
||||
// NewLevel returns a new hash.Hash computing any level-n hash, except level-0.
|
||||
func NewLevel() hash.Hash {
|
||||
l := &level{}
|
||||
l.Reset()
|
||||
return l
|
||||
}
|
||||
|
||||
// Add takes a position-embedded SHA-1 checksum and adds it to the level.
|
||||
func (l *level) Add(sha1sum []byte) {
|
||||
var tmp uint
|
||||
var carry bool
|
||||
for i := Size - 1; i >= 0; i-- {
|
||||
tmp = uint(sha1sum[i]) + uint(l.checksum[i])
|
||||
if carry {
|
||||
tmp++
|
||||
}
|
||||
carry = tmp > 255
|
||||
l.checksum[i] = byte(tmp)
|
||||
}
|
||||
}
|
||||
|
||||
// IsFull returns whether the number of checksums added to this level reached its capacity.
|
||||
func (l *level) IsFull() bool {
|
||||
return l.sumCount >= sumsPerLevel
|
||||
}
|
||||
|
||||
// Write (via the embedded io.Writer interface) adds more data to the running hash.
|
||||
// Contrary to the specification from hash.Hash, this DOES return an error,
|
||||
// specifically ErrorHashFull if and only if IsFull() returns true.
|
||||
func (l *level) Write(p []byte) (n int, err error) {
|
||||
if l.IsFull() {
|
||||
return 0, ErrorHashFull
|
||||
}
|
||||
onBlockWritten := func(remaining int) error {
|
||||
if !l.onlyNullBytesInHasher {
|
||||
c, err := l.hasher.Write([]byte{byte(l.sumCount)})
|
||||
l.bytesInHasher += uint32(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.Add(l.hasher.Sum(nil))
|
||||
}
|
||||
l.sumCount++
|
||||
l.hasher.Reset()
|
||||
if remaining > 0 && l.IsFull() {
|
||||
return ErrorHashFull
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return writeByBlock(p, l.hasher, uint32(l.BlockSize()), &l.bytesInHasher, &l.onlyNullBytesInHasher, onBlockWritten)
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
// It does not change the underlying hash state.
|
||||
func (l *level) Sum(b []byte) []byte {
|
||||
return append(b, l.checksum[:]...)
|
||||
}
|
||||
|
||||
// Reset resets the Hash to its initial state.
|
||||
func (l *level) Reset() {
|
||||
l.checksum = zeroSum // clear the current checksum
|
||||
l.sumCount = 0
|
||||
l.bytesInHasher = 0
|
||||
l.onlyNullBytesInHasher = true
|
||||
l.hasher = sha1.New()
|
||||
}
|
||||
|
||||
// Size returns the number of bytes Sum will return.
|
||||
func (l *level) Size() int {
|
||||
return Size
|
||||
}
|
||||
|
||||
// BlockSize returns the hash's underlying block size.
|
||||
// The Write method must be able to accept any amount
|
||||
// of data, but it may operate more efficiently if all writes
|
||||
// are a multiple of the block size.
|
||||
func (l *level) BlockSize() int {
|
||||
return Size
|
||||
}
|
||||
|
||||
// MarshalBinary encodes the hash into a binary form and returns the result.
|
||||
func (l *level) MarshalBinary() ([]byte, error) {
|
||||
b := make([]byte, Size+4+4+1)
|
||||
copy(b, l.checksum[:])
|
||||
binary.BigEndian.PutUint32(b[Size:], l.sumCount)
|
||||
binary.BigEndian.PutUint32(b[Size+4:], l.bytesInHasher)
|
||||
if l.onlyNullBytesInHasher {
|
||||
b[Size+4+4] = 1
|
||||
}
|
||||
encodedHasher, err := l.hasher.(encoding.BinaryMarshaler).MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b = append(b, encodedHasher...)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary decodes the binary form generated by MarshalBinary.
|
||||
// The hash will replace its internal state accordingly.
|
||||
func (l *level) UnmarshalBinary(b []byte) error {
|
||||
if len(b) < Size+4+4+1 {
|
||||
return ErrorInvalidEncoding
|
||||
}
|
||||
copy(l.checksum[:], b)
|
||||
l.sumCount = binary.BigEndian.Uint32(b[Size:])
|
||||
l.bytesInHasher = binary.BigEndian.Uint32(b[Size+4:])
|
||||
switch b[Size+4+4] {
|
||||
case 0:
|
||||
l.onlyNullBytesInHasher = false
|
||||
case 1:
|
||||
l.onlyNullBytesInHasher = true
|
||||
default:
|
||||
return ErrorInvalidEncoding
|
||||
}
|
||||
err := l.hasher.(encoding.BinaryUnmarshaler).UnmarshalBinary(b[Size+4+4+1:])
|
||||
return err
|
||||
}
|
||||
|
||||
// hidriveHash is the hash computing the actual checksum used by HiDrive by combining multiple level-hashes.
|
||||
type hidriveHash struct {
|
||||
levels []*level // collection of level-hashes, one for each level starting at level-1
|
||||
lastSumWritten [Size]byte // the last checksum written to any of the levels
|
||||
bytesInBlock uint32 // bytes written into blockHash so far
|
||||
onlyNullBytesInBlock bool // whether the hasher only contains null-bytes so far
|
||||
blockHash hash.Hash
|
||||
}
|
||||
|
||||
// New returns a new hash.Hash computing the HiDrive checksum.
|
||||
func New() hash.Hash {
|
||||
h := &hidriveHash{}
|
||||
h.Reset()
|
||||
return h
|
||||
}
|
||||
|
||||
// aggregateToLevel writes the checksum to the level at the given index
|
||||
// and if necessary propagates any changes to levels above.
|
||||
func (h *hidriveHash) aggregateToLevel(index int, sum []byte) {
|
||||
for i := index; ; i++ {
|
||||
if i >= len(h.levels) {
|
||||
h.levels = append(h.levels, NewLevel().(*level))
|
||||
}
|
||||
_, err := h.levels[i].Write(sum)
|
||||
copy(h.lastSumWritten[:], sum)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("level-hash should not have produced an error: %w", err))
|
||||
}
|
||||
if !h.levels[i].IsFull() {
|
||||
break
|
||||
}
|
||||
sum = h.levels[i].Sum(nil)
|
||||
h.levels[i].Reset()
|
||||
}
|
||||
}
|
||||
|
||||
// Write (via the embedded io.Writer interface) adds more data to the running hash.
|
||||
// It never returns an error.
|
||||
func (h *hidriveHash) Write(p []byte) (n int, err error) {
|
||||
onBlockWritten := func(remaining int) error {
|
||||
var sum []byte
|
||||
if h.onlyNullBytesInBlock {
|
||||
sum = zeroSum[:]
|
||||
} else {
|
||||
sum = h.blockHash.Sum(nil)
|
||||
}
|
||||
h.blockHash.Reset()
|
||||
h.aggregateToLevel(0, sum)
|
||||
return nil
|
||||
}
|
||||
return writeByBlock(p, h.blockHash, uint32(BlockSize), &h.bytesInBlock, &h.onlyNullBytesInBlock, onBlockWritten)
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
// It does not change the underlying hash state.
|
||||
func (h *hidriveHash) Sum(b []byte) []byte {
|
||||
// Save internal state.
|
||||
state, err := h.MarshalBinary()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("saving the internal state should not have produced an error: %w", err))
|
||||
}
|
||||
|
||||
if h.bytesInBlock > 0 {
|
||||
// Fill remainder of block with null-bytes.
|
||||
filler := make([]byte, h.BlockSize()-int(h.bytesInBlock))
|
||||
_, err = h.Write(filler)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("filling with null-bytes should not have an error: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
checksum := zeroSum
|
||||
for i := 0; i < len(h.levels); i++ {
|
||||
level := h.levels[i]
|
||||
if i < len(h.levels)-1 {
|
||||
// Aggregate non-empty non-final levels.
|
||||
if level.sumCount >= 1 {
|
||||
h.aggregateToLevel(i+1, level.Sum(nil))
|
||||
level.Reset()
|
||||
}
|
||||
} else {
|
||||
// Determine sum of final level.
|
||||
if level.sumCount > 1 {
|
||||
copy(checksum[:], level.Sum(nil))
|
||||
} else {
|
||||
// This is needed, otherwise there is no way to return
|
||||
// the non-position-embedded checksum.
|
||||
checksum = h.lastSumWritten
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Restore internal state.
|
||||
err = h.UnmarshalBinary(state)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("restoring the internal state should not have produced an error: %w", err))
|
||||
}
|
||||
|
||||
return append(b, checksum[:]...)
|
||||
}
|
||||
|
||||
// Reset resets the Hash to its initial state.
|
||||
func (h *hidriveHash) Reset() {
|
||||
h.levels = nil
|
||||
h.lastSumWritten = zeroSum // clear the last written checksum
|
||||
h.bytesInBlock = 0
|
||||
h.onlyNullBytesInBlock = true
|
||||
h.blockHash = sha1.New()
|
||||
}
|
||||
|
||||
// Size returns the number of bytes Sum will return.
|
||||
func (h *hidriveHash) Size() int {
|
||||
return Size
|
||||
}
|
||||
|
||||
// BlockSize returns the hash's underlying block size.
|
||||
// The Write method must be able to accept any amount
|
||||
// of data, but it may operate more efficiently if all writes
|
||||
// are a multiple of the block size.
|
||||
func (h *hidriveHash) BlockSize() int {
|
||||
return BlockSize
|
||||
}
|
||||
|
||||
// MarshalBinary encodes the hash into a binary form and returns the result.
|
||||
func (h *hidriveHash) MarshalBinary() ([]byte, error) {
|
||||
b := make([]byte, Size+4+1+8)
|
||||
copy(b, h.lastSumWritten[:])
|
||||
binary.BigEndian.PutUint32(b[Size:], h.bytesInBlock)
|
||||
if h.onlyNullBytesInBlock {
|
||||
b[Size+4] = 1
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint64(b[Size+4+1:], uint64(len(h.levels)))
|
||||
for _, level := range h.levels {
|
||||
encodedLevel, err := level.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
encodedLength := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(encodedLength, uint64(len(encodedLevel)))
|
||||
b = append(b, encodedLength...)
|
||||
b = append(b, encodedLevel...)
|
||||
}
|
||||
encodedBlockHash, err := h.blockHash.(encoding.BinaryMarshaler).MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b = append(b, encodedBlockHash...)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary decodes the binary form generated by MarshalBinary.
|
||||
// The hash will replace its internal state accordingly.
|
||||
func (h *hidriveHash) UnmarshalBinary(b []byte) error {
|
||||
if len(b) < Size+4+1+8 {
|
||||
return ErrorInvalidEncoding
|
||||
}
|
||||
copy(h.lastSumWritten[:], b)
|
||||
h.bytesInBlock = binary.BigEndian.Uint32(b[Size:])
|
||||
switch b[Size+4] {
|
||||
case 0:
|
||||
h.onlyNullBytesInBlock = false
|
||||
case 1:
|
||||
h.onlyNullBytesInBlock = true
|
||||
default:
|
||||
return ErrorInvalidEncoding
|
||||
}
|
||||
|
||||
amount := binary.BigEndian.Uint64(b[Size+4+1:])
|
||||
h.levels = make([]*level, int(amount))
|
||||
offset := Size + 4 + 1 + 8
|
||||
for i := range h.levels {
|
||||
length := int(binary.BigEndian.Uint64(b[offset:]))
|
||||
offset += 8
|
||||
h.levels[i] = NewLevel().(*level)
|
||||
err := h.levels[i].UnmarshalBinary(b[offset : offset+length])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
offset += length
|
||||
}
|
||||
err := h.blockHash.(encoding.BinaryUnmarshaler).UnmarshalBinary(b[offset:])
|
||||
return err
|
||||
}
|
||||
|
||||
// Sum returns the HiDrive checksum of the data.
|
||||
func Sum(data []byte) [Size]byte {
|
||||
h := New().(*hidriveHash)
|
||||
_, _ = h.Write(data)
|
||||
var result [Size]byte
|
||||
copy(result[:], h.Sum(nil))
|
||||
return result
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied.
|
||||
var (
|
||||
_ hash.Hash = (*level)(nil)
|
||||
_ encoding.BinaryMarshaler = (*level)(nil)
|
||||
_ encoding.BinaryUnmarshaler = (*level)(nil)
|
||||
_ internal.LevelHash = (*level)(nil)
|
||||
_ hash.Hash = (*hidriveHash)(nil)
|
||||
_ encoding.BinaryMarshaler = (*hidriveHash)(nil)
|
||||
_ encoding.BinaryUnmarshaler = (*hidriveHash)(nil)
|
||||
)
|
||||
@@ -1,395 +0,0 @@
|
||||
package hidrivehash_test
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/hidrive/hidrivehash"
|
||||
"github.com/rclone/rclone/backend/hidrive/hidrivehash/internal"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// helper functions to set up test-tables
|
||||
|
||||
func sha1ArrayAsSlice(sum [sha1.Size]byte) []byte {
|
||||
return sum[:]
|
||||
}
|
||||
|
||||
func mustDecode(hexstring string) []byte {
|
||||
result, err := hex.DecodeString(hexstring)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
var testTableLevelPositionEmbedded = []struct {
|
||||
ins [][]byte
|
||||
outs [][]byte
|
||||
name string
|
||||
}{
|
||||
{
|
||||
[][]byte{
|
||||
sha1ArrayAsSlice([20]byte{245, 202, 195, 223, 121, 198, 189, 112, 138, 202, 222, 2, 146, 156, 127, 16, 208, 233, 98, 88}),
|
||||
sha1ArrayAsSlice([20]byte{78, 188, 156, 219, 173, 54, 81, 55, 47, 220, 222, 207, 201, 21, 57, 252, 255, 239, 251, 186}),
|
||||
},
|
||||
[][]byte{
|
||||
sha1ArrayAsSlice([20]byte{245, 202, 195, 223, 121, 198, 189, 112, 138, 202, 222, 2, 146, 156, 127, 16, 208, 233, 98, 88}),
|
||||
sha1ArrayAsSlice([20]byte{68, 135, 96, 187, 38, 253, 14, 167, 186, 167, 188, 210, 91, 177, 185, 13, 208, 217, 94, 18}),
|
||||
},
|
||||
"documentation-v3.2rev27-example L0 (position-embedded)",
|
||||
},
|
||||
{
|
||||
[][]byte{
|
||||
sha1ArrayAsSlice([20]byte{68, 254, 92, 166, 52, 37, 104, 180, 22, 123, 249, 144, 182, 78, 64, 74, 57, 117, 225, 195}),
|
||||
sha1ArrayAsSlice([20]byte{75, 211, 153, 190, 125, 179, 67, 49, 60, 149, 98, 246, 142, 20, 11, 254, 159, 162, 129, 237}),
|
||||
sha1ArrayAsSlice([20]byte{150, 2, 9, 153, 97, 153, 189, 104, 147, 14, 77, 203, 244, 243, 25, 212, 67, 48, 111, 107}),
|
||||
},
|
||||
[][]byte{
|
||||
sha1ArrayAsSlice([20]byte{68, 254, 92, 166, 52, 37, 104, 180, 22, 123, 249, 144, 182, 78, 64, 74, 57, 117, 225, 195}),
|
||||
sha1ArrayAsSlice([20]byte{144, 209, 246, 100, 177, 216, 171, 229, 83, 17, 92, 135, 68, 98, 76, 72, 217, 24, 99, 176}),
|
||||
sha1ArrayAsSlice([20]byte{38, 211, 255, 254, 19, 114, 105, 77, 230, 31, 170, 83, 57, 85, 102, 29, 28, 72, 211, 27}),
|
||||
},
|
||||
"documentation-example L0 (position-embedded)",
|
||||
},
|
||||
{
|
||||
[][]byte{
|
||||
sha1ArrayAsSlice([20]byte{173, 123, 132, 245, 176, 172, 43, 183, 121, 40, 66, 252, 101, 249, 188, 193, 160, 189, 2, 116}),
|
||||
sha1ArrayAsSlice([20]byte{40, 34, 8, 238, 37, 5, 237, 184, 79, 105, 10, 167, 171, 254, 13, 229, 132, 112, 254, 8}),
|
||||
sha1ArrayAsSlice([20]byte{39, 112, 26, 86, 190, 35, 100, 101, 28, 131, 122, 191, 254, 144, 239, 107, 253, 124, 104, 203}),
|
||||
},
|
||||
[][]byte{
|
||||
sha1ArrayAsSlice([20]byte{173, 123, 132, 245, 176, 172, 43, 183, 121, 40, 66, 252, 101, 249, 188, 193, 160, 189, 2, 116}),
|
||||
sha1ArrayAsSlice([20]byte{213, 157, 141, 227, 213, 178, 25, 111, 200, 145, 77, 164, 17, 247, 202, 167, 37, 46, 0, 124}),
|
||||
sha1ArrayAsSlice([20]byte{253, 13, 168, 58, 147, 213, 125, 212, 229, 20, 200, 100, 16, 136, 186, 19, 34, 170, 105, 71}),
|
||||
},
|
||||
"documentation-example L1 (position-embedded)",
|
||||
},
|
||||
}
|
||||
|
||||
var testTableLevel = []struct {
|
||||
ins [][]byte
|
||||
outs [][]byte
|
||||
name string
|
||||
}{
|
||||
{
|
||||
[][]byte{
|
||||
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
|
||||
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
|
||||
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
|
||||
},
|
||||
[][]byte{
|
||||
mustDecode("44fe5ca6342568b4167bf990b64e404a3975e1c3"),
|
||||
mustDecode("90d1f664b1d8abe553115c8744624c48d91863b0"),
|
||||
mustDecode("26d3fffe1372694de61faa533955661d1c48d31b"),
|
||||
},
|
||||
"documentation-example L0",
|
||||
},
|
||||
{
|
||||
[][]byte{
|
||||
mustDecode("75a9f88fb219ef1dd31adf41c93e2efaac8d0245"),
|
||||
mustDecode("daedc425199501b1e86b5eaba5649cbde205e6ae"),
|
||||
mustDecode("286ac5283f99c4e0f11683900a3e39661c375dd6"),
|
||||
},
|
||||
[][]byte{
|
||||
mustDecode("ad7b84f5b0ac2bb7792842fc65f9bcc1a0bd0274"),
|
||||
mustDecode("d59d8de3d5b2196fc8914da411f7caa7252e007c"),
|
||||
mustDecode("fd0da83a93d57dd4e514c8641088ba1322aa6947"),
|
||||
},
|
||||
"documentation-example L1",
|
||||
},
|
||||
{
|
||||
[][]byte{
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
mustDecode("75a9f88fb219ef1dd31adf41c93e2efaac8d0245"),
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
mustDecode("daedc425199501b1e86b5eaba5649cbde205e6ae"),
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
mustDecode("286ac5283f99c4e0f11683900a3e39661c375dd6"),
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
},
|
||||
[][]byte{
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
mustDecode("a197464ec19f2b2b2bc6b21f6c939c7e57772843"),
|
||||
mustDecode("a197464ec19f2b2b2bc6b21f6c939c7e57772843"),
|
||||
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
|
||||
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
|
||||
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
|
||||
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
|
||||
mustDecode("8f56351897b4e1d100646fa122c924347721b2f5"),
|
||||
mustDecode("8f56351897b4e1d100646fa122c924347721b2f5"),
|
||||
},
|
||||
"mixed-with-empties",
|
||||
},
|
||||
}
|
||||
|
||||
var testTable = []struct {
|
||||
data []byte
|
||||
// pattern describes how to use data to construct the hash-input.
|
||||
// For every entry n at even indices this repeats the data n times.
|
||||
// For every entry m at odd indices this repeats a null-byte m times.
|
||||
// The input-data is constructed by concatinating the results in order.
|
||||
pattern []int64
|
||||
out []byte
|
||||
name string
|
||||
}{
|
||||
{
|
||||
[]byte("#ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz\n"),
|
||||
[]int64{64},
|
||||
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
|
||||
"documentation-example L0",
|
||||
},
|
||||
{
|
||||
[]byte("#ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz\n"),
|
||||
[]int64{64 * 256},
|
||||
mustDecode("75a9f88fb219ef1dd31adf41c93e2efaac8d0245"),
|
||||
"documentation-example L1",
|
||||
},
|
||||
{
|
||||
[]byte("#ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz\n"),
|
||||
[]int64{64 * 256, 0, 64 * 128, 4096 * 128, 64*2 + 32},
|
||||
mustDecode("fd0da83a93d57dd4e514c8641088ba1322aa6947"),
|
||||
"documentation-example L2",
|
||||
},
|
||||
{
|
||||
[]byte("hello rclone\n"),
|
||||
[]int64{316},
|
||||
mustDecode("72370f9c18a2c20b31d71f3f4cee7a3cd2703737"),
|
||||
"not-block-aligned",
|
||||
},
|
||||
{
|
||||
[]byte("hello rclone\n"),
|
||||
[]int64{13, 4096 * 3, 4},
|
||||
mustDecode("a6990b81791f0d2db750b38f046df321c975aa60"),
|
||||
"not-block-aligned-with-null-bytes",
|
||||
},
|
||||
{
|
||||
[]byte{},
|
||||
[]int64{},
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
"empty",
|
||||
},
|
||||
{
|
||||
[]byte{},
|
||||
[]int64{0, 4096 * 256 * 256},
|
||||
mustDecode("0000000000000000000000000000000000000000"),
|
||||
"null-bytes",
|
||||
},
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
func TestLevelAdd(t *testing.T) {
|
||||
for _, test := range testTableLevelPositionEmbedded {
|
||||
l := hidrivehash.NewLevel().(internal.LevelHash)
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
for i := range test.ins {
|
||||
l.Add(test.ins[i])
|
||||
assert.Equal(t, test.outs[i], l.Sum(nil))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLevelWrite(t *testing.T) {
|
||||
for _, test := range testTableLevel {
|
||||
l := hidrivehash.NewLevel()
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
for i := range test.ins {
|
||||
l.Write(test.ins[i])
|
||||
assert.Equal(t, test.outs[i], l.Sum(nil))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLevelIsFull(t *testing.T) {
|
||||
content := [hidrivehash.Size]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}
|
||||
l := hidrivehash.NewLevel()
|
||||
for i := 0; i < 256; i++ {
|
||||
assert.False(t, l.(internal.LevelHash).IsFull())
|
||||
written, err := l.Write(content[:])
|
||||
assert.Equal(t, len(content), written)
|
||||
if !assert.NoError(t, err) {
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
assert.True(t, l.(internal.LevelHash).IsFull())
|
||||
written, err := l.Write(content[:])
|
||||
assert.True(t, l.(internal.LevelHash).IsFull())
|
||||
assert.Equal(t, 0, written)
|
||||
assert.ErrorIs(t, err, hidrivehash.ErrorHashFull)
|
||||
}
|
||||
|
||||
func TestLevelReset(t *testing.T) {
|
||||
l := hidrivehash.NewLevel()
|
||||
zeroHash := l.Sum(nil)
|
||||
_, err := l.Write([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19})
|
||||
if assert.NoError(t, err) {
|
||||
assert.NotEqual(t, zeroHash, l.Sum(nil))
|
||||
l.Reset()
|
||||
assert.Equal(t, zeroHash, l.Sum(nil))
|
||||
}
|
||||
}
|
||||
|
||||
func TestLevelSize(t *testing.T) {
|
||||
l := hidrivehash.NewLevel()
|
||||
assert.Equal(t, 20, l.Size())
|
||||
}
|
||||
|
||||
func TestLevelBlockSize(t *testing.T) {
|
||||
l := hidrivehash.NewLevel()
|
||||
assert.Equal(t, 20, l.BlockSize())
|
||||
}
|
||||
|
||||
func TestLevelBinaryMarshaler(t *testing.T) {
|
||||
content := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}
|
||||
l := hidrivehash.NewLevel().(internal.LevelHash)
|
||||
l.Write(content[:10])
|
||||
encoded, err := l.MarshalBinary()
|
||||
if assert.NoError(t, err) {
|
||||
d := hidrivehash.NewLevel().(internal.LevelHash)
|
||||
err = d.UnmarshalBinary(encoded)
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, l.Sum(nil), d.Sum(nil))
|
||||
l.Write(content[10:])
|
||||
d.Write(content[10:])
|
||||
assert.Equal(t, l.Sum(nil), d.Sum(nil))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLevelInvalidEncoding(t *testing.T) {
|
||||
l := hidrivehash.NewLevel().(internal.LevelHash)
|
||||
err := l.UnmarshalBinary([]byte{})
|
||||
assert.ErrorIs(t, err, hidrivehash.ErrorInvalidEncoding)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
type infiniteReader struct {
|
||||
source []byte
|
||||
offset int
|
||||
}
|
||||
|
||||
func (m *infiniteReader) Read(b []byte) (int, error) {
|
||||
count := copy(b, m.source[m.offset:])
|
||||
m.offset += count
|
||||
m.offset %= len(m.source)
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func writeInChunks(writer io.Writer, chunkSize int64, data []byte, pattern []int64) error {
|
||||
readers := make([]io.Reader, len(pattern))
|
||||
nullBytes := [4096]byte{}
|
||||
for i, n := range pattern {
|
||||
if i%2 == 0 {
|
||||
readers[i] = io.LimitReader(&infiniteReader{data, 0}, n*int64(len(data)))
|
||||
} else {
|
||||
readers[i] = io.LimitReader(&infiniteReader{nullBytes[:], 0}, n)
|
||||
}
|
||||
}
|
||||
reader := io.MultiReader(readers...)
|
||||
for {
|
||||
_, err := io.CopyN(writer, reader, chunkSize)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWrite(t *testing.T) {
|
||||
for _, test := range testTable {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
h := hidrivehash.New()
|
||||
err := writeInChunks(h, int64(h.BlockSize()), test.data, test.pattern)
|
||||
if assert.NoError(t, err) {
|
||||
normalSum := h.Sum(nil)
|
||||
assert.Equal(t, test.out, normalSum)
|
||||
// Test if different block-sizes produce differing results.
|
||||
for _, blockSize := range []int64{397, 512, 4091, 8192, 10000} {
|
||||
t.Run(fmt.Sprintf("block-size %v", blockSize), func(t *testing.T) {
|
||||
h := hidrivehash.New()
|
||||
err := writeInChunks(h, blockSize, test.data, test.pattern)
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, normalSum, h.Sum(nil))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReset(t *testing.T) {
|
||||
h := hidrivehash.New()
|
||||
zeroHash := h.Sum(nil)
|
||||
_, err := h.Write([]byte{1})
|
||||
if assert.NoError(t, err) {
|
||||
assert.NotEqual(t, zeroHash, h.Sum(nil))
|
||||
h.Reset()
|
||||
assert.Equal(t, zeroHash, h.Sum(nil))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSize(t *testing.T) {
|
||||
h := hidrivehash.New()
|
||||
assert.Equal(t, 20, h.Size())
|
||||
}
|
||||
|
||||
func TestBlockSize(t *testing.T) {
|
||||
h := hidrivehash.New()
|
||||
assert.Equal(t, 4096, h.BlockSize())
|
||||
}
|
||||
|
||||
func TestBinaryMarshaler(t *testing.T) {
|
||||
for _, test := range testTable {
|
||||
h := hidrivehash.New()
|
||||
d := hidrivehash.New()
|
||||
half := len(test.pattern) / 2
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
err := writeInChunks(h, int64(h.BlockSize()), test.data, test.pattern[:half])
|
||||
assert.NoError(t, err)
|
||||
encoded, err := h.(encoding.BinaryMarshaler).MarshalBinary()
|
||||
if assert.NoError(t, err) {
|
||||
err = d.(encoding.BinaryUnmarshaler).UnmarshalBinary(encoded)
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, h.Sum(nil), d.Sum(nil))
|
||||
err = writeInChunks(h, int64(h.BlockSize()), test.data, test.pattern[half:])
|
||||
assert.NoError(t, err)
|
||||
err = writeInChunks(d, int64(d.BlockSize()), test.data, test.pattern[half:])
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, h.Sum(nil), d.Sum(nil))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidEncoding(t *testing.T) {
|
||||
h := hidrivehash.New()
|
||||
err := h.(encoding.BinaryUnmarshaler).UnmarshalBinary([]byte{})
|
||||
assert.ErrorIs(t, err, hidrivehash.ErrorInvalidEncoding)
|
||||
}
|
||||
|
||||
func TestSum(t *testing.T) {
|
||||
assert.Equal(t, [hidrivehash.Size]byte{}, hidrivehash.Sum([]byte{}))
|
||||
content := []byte{1}
|
||||
h := hidrivehash.New()
|
||||
h.Write(content)
|
||||
sum := hidrivehash.Sum(content)
|
||||
assert.Equal(t, h.Sum(nil), sum[:])
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"hash"
|
||||
)
|
||||
|
||||
// LevelHash is an internal interface for level-hashes.
|
||||
type LevelHash interface {
|
||||
encoding.BinaryMarshaler
|
||||
encoding.BinaryUnmarshaler
|
||||
hash.Hash
|
||||
// Add takes a position-embedded checksum and adds it to the level.
|
||||
Add(sum []byte)
|
||||
// IsFull returns whether the number of checksums added to this level reached its capacity.
|
||||
IsFull() bool
|
||||
}
|
||||
@@ -35,11 +35,11 @@ var (
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "http",
|
||||
Description: "HTTP",
|
||||
Description: "http Connection",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "url",
|
||||
Help: "URL of HTTP host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
|
||||
Help: "URL of http host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "headers",
|
||||
|
||||
@@ -138,7 +138,7 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||
return err
|
||||
}
|
||||
f.expires = expires
|
||||
fs.Debugf(f, "Got swift credentials (expiry %v in %v)", f.expires, time.Until(f.expires))
|
||||
fs.Debugf(f, "Got swift credentials (expiry %v in %v)", f.expires, f.expires.Sub(time.Now()))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -38,100 +38,6 @@ func init() {
|
||||
Name: "internetarchive",
|
||||
Description: "Internet Archive",
|
||||
NewFs: NewFs,
|
||||
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
System: map[string]fs.MetadataHelp{
|
||||
"name": {
|
||||
Help: "Full file path, without the bucket part",
|
||||
Type: "filename",
|
||||
Example: "backend/internetarchive/internetarchive.go",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"source": {
|
||||
Help: "The source of the file",
|
||||
Type: "string",
|
||||
Example: "original",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"mtime": {
|
||||
Help: "Time of last modification, managed by Rclone",
|
||||
Type: "RFC 3339",
|
||||
Example: "2006-01-02T15:04:05.999999999Z",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"size": {
|
||||
Help: "File size in bytes",
|
||||
Type: "decimal number",
|
||||
Example: "123456",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"md5": {
|
||||
Help: "MD5 hash calculated by Internet Archive",
|
||||
Type: "string",
|
||||
Example: "01234567012345670123456701234567",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"crc32": {
|
||||
Help: "CRC32 calculated by Internet Archive",
|
||||
Type: "string",
|
||||
Example: "01234567",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"sha1": {
|
||||
Help: "SHA1 hash calculated by Internet Archive",
|
||||
Type: "string",
|
||||
Example: "0123456701234567012345670123456701234567",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"format": {
|
||||
Help: "Name of format identified by Internet Archive",
|
||||
Type: "string",
|
||||
Example: "Comma-Separated Values",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"old_version": {
|
||||
Help: "Whether the file was replaced and moved by keep-old-version flag",
|
||||
Type: "boolean",
|
||||
Example: "true",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"viruscheck": {
|
||||
Help: "The last time viruscheck process was run for the file (?)",
|
||||
Type: "unixtime",
|
||||
Example: "1654191352",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"summation": {
|
||||
Help: "Check https://forum.rclone.org/t/31922 for how it is used",
|
||||
Type: "string",
|
||||
Example: "md5",
|
||||
ReadOnly: true,
|
||||
},
|
||||
|
||||
"rclone-ia-mtime": {
|
||||
Help: "Time of last modification, managed by Internet Archive",
|
||||
Type: "RFC 3339",
|
||||
Example: "2006-01-02T15:04:05.999999999Z",
|
||||
},
|
||||
"rclone-mtime": {
|
||||
Help: "Time of last modification, managed by Rclone",
|
||||
Type: "RFC 3339",
|
||||
Example: "2006-01-02T15:04:05.999999999Z",
|
||||
},
|
||||
"rclone-update-track": {
|
||||
Help: "Random value used by Rclone for tracking changes inside Internet Archive",
|
||||
Type: "string",
|
||||
Example: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||
},
|
||||
},
|
||||
Help: `Metadata fields provided by Internet Archive.
|
||||
If there are multiple values for a key, only the first one is returned.
|
||||
This is a limitation of Rclone, that supports one value per one key.
|
||||
|
||||
Owner is able to add custom keys. Metadata feature grabs all the keys including them.
|
||||
`,
|
||||
},
|
||||
|
||||
Options: []fs.Option{{
|
||||
Name: "access_key_id",
|
||||
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
|
||||
@@ -184,14 +90,6 @@ Only enable if you need to be guaranteed to be reflected after write operations.
|
||||
// maximum size of an item. this is constant across all items
|
||||
const iaItemMaxSize int64 = 1099511627776
|
||||
|
||||
// metadata keys that are not writeable
|
||||
var roMetadataKey = map[string]interface{}{
|
||||
// do not add mtime here, it's a documented exception
|
||||
"name": nil, "source": nil, "size": nil, "md5": nil,
|
||||
"crc32": nil, "sha1": nil, "format": nil, "old_version": nil,
|
||||
"viruscheck": nil, "summation": nil,
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
AccessKeyID string `config:"access_key_id"`
|
||||
@@ -224,7 +122,6 @@ type Object struct {
|
||||
md5 string // md5 hash of the file presented by the server
|
||||
sha1 string // sha1 hash of the file presented by the server
|
||||
crc32 string // crc32 of the file presented by the server
|
||||
rawData json.RawMessage
|
||||
}
|
||||
|
||||
// IAFile reprensents a subset of object in MetadataResponse.Files
|
||||
@@ -238,9 +135,6 @@ type IAFile struct {
|
||||
Md5 string `json:"md5"`
|
||||
Crc32 string `json:"crc32"`
|
||||
Sha1 string `json:"sha1"`
|
||||
Summation string `json:"summation"`
|
||||
|
||||
rawData json.RawMessage
|
||||
}
|
||||
|
||||
// MetadataResponse reprensents subset of the JSON object returned by (frontend)/metadata/
|
||||
@@ -249,12 +143,6 @@ type MetadataResponse struct {
|
||||
ItemSize int64 `json:"item_size"`
|
||||
}
|
||||
|
||||
// MetadataResponseRaw is the form of MetadataResponse to deal with metadata
|
||||
type MetadataResponseRaw struct {
|
||||
Files []json.RawMessage `json:"files"`
|
||||
ItemSize int64 `json:"item_size"`
|
||||
}
|
||||
|
||||
// ModMetadataResponse represents response for amending metadata
|
||||
type ModMetadataResponse struct {
|
||||
// https://archive.org/services/docs/api/md-write.html#example
|
||||
@@ -338,10 +226,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
BucketBased: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
BucketBased: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
f.srv = rest.NewClient(fshttp.NewClient(ctx))
|
||||
@@ -422,17 +307,18 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) {
|
||||
}
|
||||
|
||||
// https://archive.org/services/docs/api/md-write.html
|
||||
// the following code might be useful for modifying metadata of an uploaded file
|
||||
patch := []map[string]string{
|
||||
var patch = []interface{}{
|
||||
// we should drop it first to clear all rclone-provided mtimes
|
||||
{
|
||||
"op": "remove",
|
||||
"path": "/rclone-mtime",
|
||||
}, {
|
||||
"op": "add",
|
||||
"path": "/rclone-mtime",
|
||||
"value": t.Format(time.RFC3339Nano),
|
||||
}}
|
||||
struct {
|
||||
Op string `json:"op"`
|
||||
Path string `json:"path"`
|
||||
}{"remove", "/rclone-mtime"},
|
||||
struct {
|
||||
Op string `json:"op"`
|
||||
Path string `json:"path"`
|
||||
Value string `json:"value"`
|
||||
}{"add", "/rclone-mtime", t.Format(time.RFC3339Nano)},
|
||||
}
|
||||
res, err := json.Marshal(patch)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -572,7 +458,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return "", err
|
||||
}
|
||||
bucket, bucketPath := f.split(remote)
|
||||
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, quotePath(bucketPath)), nil
|
||||
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, bucketPath), nil
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
@@ -760,7 +646,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// make a GET request to (frontend)/download/:item/:path
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: path.Join("/download/", o.fs.root, quotePath(o.fs.opt.Enc.FromStandardPath(o.remote))),
|
||||
Path: path.Join("/download/", o.fs.root, o.fs.opt.Enc.FromStandardPath(o.remote)),
|
||||
Options: optionsFixed,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -799,23 +685,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
headers["Content-Length"] = fmt.Sprintf("%d", size)
|
||||
headers["x-archive-size-hint"] = fmt.Sprintf("%d", size)
|
||||
}
|
||||
var mdata fs.Metadata
|
||||
mdata, err = fs.GetMetadataOptions(ctx, src, options)
|
||||
if err == nil && mdata != nil {
|
||||
for mk, mv := range mdata {
|
||||
mk = strings.ToLower(mk)
|
||||
if strings.HasPrefix(mk, "rclone-") {
|
||||
fs.LogPrintf(fs.LogLevelWarning, o, "reserved metadata key %s is about to set", mk)
|
||||
} else if _, ok := roMetadataKey[mk]; ok {
|
||||
fs.LogPrintf(fs.LogLevelWarning, o, "setting or modifying read-only key %s is requested, skipping", mk)
|
||||
continue
|
||||
} else if mk == "mtime" {
|
||||
// redirect to make it work
|
||||
mk = "rclone-mtime"
|
||||
}
|
||||
headers[fmt.Sprintf("x-amz-filemeta-%s", mk)] = mv
|
||||
}
|
||||
}
|
||||
|
||||
// read the md5sum if available
|
||||
var md5sumHex string
|
||||
@@ -893,34 +762,6 @@ func (o *Object) String() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Metadata returns all file metadata provided by Internet Archive
|
||||
func (o *Object) Metadata(ctx context.Context) (m fs.Metadata, err error) {
|
||||
if o.rawData == nil {
|
||||
return nil, nil
|
||||
}
|
||||
raw := make(map[string]json.RawMessage)
|
||||
err = json.Unmarshal(o.rawData, &raw)
|
||||
if err != nil {
|
||||
// fatal: json parsing failed
|
||||
return
|
||||
}
|
||||
for k, v := range raw {
|
||||
items, err := listOrString(v)
|
||||
if len(items) == 0 || err != nil {
|
||||
// skip: an entry failed to parse
|
||||
continue
|
||||
}
|
||||
m.Set(k, items[0])
|
||||
}
|
||||
// move the old mtime to an another key
|
||||
if v, ok := m["mtime"]; ok {
|
||||
m["rclone-ia-mtime"] = v
|
||||
}
|
||||
// overwrite with a correct mtime
|
||||
m["mtime"] = o.modTime.Format(time.RFC3339Nano)
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
if resp != nil {
|
||||
for _, e := range retryErrorCodes {
|
||||
@@ -947,7 +788,7 @@ func (o *Object) split() (bucket, bucketPath string) {
|
||||
return o.fs.split(o.remote)
|
||||
}
|
||||
|
||||
func (f *Fs) requestMetadata(ctx context.Context, bucket string) (result *MetadataResponse, err error) {
|
||||
func (f *Fs) requestMetadata(ctx context.Context, bucket string) (result MetadataResponse, err error) {
|
||||
var resp *http.Response
|
||||
// make a GET request to (frontend)/metadata/:item/
|
||||
opts := rest.Opts{
|
||||
@@ -955,15 +796,12 @@ func (f *Fs) requestMetadata(ctx context.Context, bucket string) (result *Metada
|
||||
Path: path.Join("/metadata/", bucket),
|
||||
}
|
||||
|
||||
var temp MetadataResponseRaw
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.front.CallJSON(ctx, &opts, nil, &temp)
|
||||
resp, err = f.front.CallJSON(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return temp.unraw()
|
||||
|
||||
return result, err
|
||||
}
|
||||
|
||||
// list up all files/directories without any filters
|
||||
@@ -1152,21 +990,15 @@ func (f *Fs) waitDelete(ctx context.Context, bucket, bucketPath string) (err err
|
||||
}
|
||||
|
||||
func makeValidObject(f *Fs, remote string, file IAFile, mtime time.Time, size int64) *Object {
|
||||
ret := &Object{
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
modTime: mtime,
|
||||
size: size,
|
||||
rawData: file.rawData,
|
||||
md5: file.Md5,
|
||||
crc32: file.Crc32,
|
||||
sha1: file.Sha1,
|
||||
}
|
||||
// hashes from _files.xml (where summation != "") is different from one in other files
|
||||
// https://forum.rclone.org/t/internet-archive-md5-tag-in-id-files-xml-interpreted-incorrectly/31922
|
||||
if file.Summation == "" {
|
||||
ret.md5 = file.Md5
|
||||
ret.crc32 = file.Crc32
|
||||
ret.sha1 = file.Sha1
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func makeValidObject2(f *Fs, file IAFile, bucket string) *Object {
|
||||
@@ -1213,23 +1045,6 @@ func (file IAFile) parseMtime() (mtime time.Time) {
|
||||
return mtime
|
||||
}
|
||||
|
||||
func (mrr *MetadataResponseRaw) unraw() (_ *MetadataResponse, err error) {
|
||||
var files []IAFile
|
||||
for _, raw := range mrr.Files {
|
||||
var parsed IAFile
|
||||
err = json.Unmarshal(raw, &parsed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parsed.rawData = raw
|
||||
files = append(files, parsed)
|
||||
}
|
||||
return &MetadataResponse{
|
||||
Files: files,
|
||||
ItemSize: mrr.ItemSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func compareSize(a, b int64) bool {
|
||||
if a < 0 || b < 0 {
|
||||
// we won't compare if any of them is not known
|
||||
@@ -1291,5 +1106,4 @@ var (
|
||||
_ fs.PublicLinker = &Fs{}
|
||||
_ fs.Abouter = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.Metadataer = &Object{}
|
||||
)
|
||||
|
||||
@@ -46,9 +46,9 @@ const (
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultDevice = "Jotta"
|
||||
defaultMountpoint = "Archive"
|
||||
jfsURL = "https://jfs.jottacloud.com/jfs/"
|
||||
rootURL = "https://jfs.jottacloud.com/jfs/"
|
||||
apiURL = "https://api.jottacloud.com/"
|
||||
wwwURL = "https://www.jottacloud.com/"
|
||||
baseURL = "https://www.jottacloud.com/"
|
||||
cachePrefix = "rclone-jcmd5-"
|
||||
configDevice = "device"
|
||||
configMountpoint = "mountpoint"
|
||||
@@ -127,7 +127,7 @@ func init() {
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
switch config.State {
|
||||
case "":
|
||||
return fs.ConfigChooseExclusiveFixed("auth_type_done", "config_type", `Select authentication type.`, []fs.OptionExample{{
|
||||
return fs.ConfigChooseFixed("auth_type_done", "config_type", `Authentication type.`, []fs.OptionExample{{
|
||||
Value: "standard",
|
||||
Help: "Standard authentication.\nUse this if you're a normal Jottacloud user.",
|
||||
}, {
|
||||
@@ -145,7 +145,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
return fs.ConfigGoto(config.Result)
|
||||
case "standard": // configure a jottacloud backend using the modern JottaCli token based authentication
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
return fs.ConfigInput("standard_token", "config_login_token", "Personal login token.\nGenerate here: https://www.jottacloud.com/web/secure")
|
||||
return fs.ConfigInput("standard_token", "config_login_token", "Personal login token.\n\nGenerate here: https://www.jottacloud.com/web/secure")
|
||||
case "standard_token":
|
||||
loginToken := config.Result
|
||||
m.Set(configClientID, defaultClientID)
|
||||
@@ -262,11 +262,7 @@ machines.`)
|
||||
},
|
||||
})
|
||||
case "choose_device":
|
||||
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint?
|
||||
Choosing no, the default, will let you access the storage used for the archive
|
||||
section of the official Jottacloud client. If you instead want to access the
|
||||
sync or the backup section, for example, you must choose yes.`)
|
||||
|
||||
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", "Use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?")
|
||||
case "choose_device_query":
|
||||
if config.Result != "true" {
|
||||
m.Set(configDevice, "")
|
||||
@@ -277,139 +273,43 @@ sync or the backup section, for example, you must choose yes.`)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL)
|
||||
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
|
||||
cust, err := getCustomerInfo(ctx, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.Set(configUsername, cust.Username)
|
||||
|
||||
acc, err := getDriveInfo(ctx, jfsSrv, cust.Username)
|
||||
acc, err := getDriveInfo(ctx, srv, cust.Username)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
deviceNames := make([]string, len(acc.Devices))
|
||||
for i, dev := range acc.Devices {
|
||||
if i > 0 && dev.Name == defaultDevice {
|
||||
// Insert the special Jotta device as first entry, making it the default choice.
|
||||
copy(deviceNames[1:i+1], deviceNames[0:i])
|
||||
deviceNames[0] = dev.Name
|
||||
} else {
|
||||
deviceNames[i] = dev.Name
|
||||
}
|
||||
}
|
||||
|
||||
help := fmt.Sprintf(`The device to use. In standard setup the built-in %s device is used,
|
||||
which contains predefined mountpoints for archive, sync etc. All other devices
|
||||
are treated as backup devices by the official Jottacloud client. You may create
|
||||
a new by entering a unique name.`, defaultDevice)
|
||||
return fs.ConfigChoose("choose_device_result", "config_device", help, len(deviceNames), func(i int) (string, string) {
|
||||
return deviceNames[i], ""
|
||||
return fs.ConfigChoose("choose_device_result", "config_device", `Please select the device to use. Normally this will be Jotta`, len(acc.Devices), func(i int) (string, string) {
|
||||
return acc.Devices[i].Name, ""
|
||||
})
|
||||
case "choose_device_result":
|
||||
device := config.Result
|
||||
m.Set(configDevice, device)
|
||||
|
||||
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
|
||||
cust, err := getCustomerInfo(ctx, apiSrv)
|
||||
username, _ := m.Get(configUsername)
|
||||
dev, err := getDeviceInfo(ctx, srv, path.Join(username, device))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
acc, err := getDriveInfo(ctx, jfsSrv, cust.Username)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
isNew := true
|
||||
for _, dev := range acc.Devices {
|
||||
if strings.EqualFold(dev.Name, device) { // If device name exists with different casing we prefer the existing (not sure if and how the api handles the opposite)
|
||||
device = dev.Name // Prefer same casing as existing, e.g. if user entered "jotta" we use the standard casing "Jotta" instead
|
||||
isNew = false
|
||||
break
|
||||
}
|
||||
}
|
||||
var dev *api.JottaDevice
|
||||
if isNew {
|
||||
fs.Debugf(nil, "Creating new device: %s", device)
|
||||
dev, err = createDevice(ctx, jfsSrv, path.Join(cust.Username, device))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
m.Set(configDevice, device)
|
||||
|
||||
if !isNew {
|
||||
dev, err = getDeviceInfo(ctx, jfsSrv, path.Join(cust.Username, device))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var help string
|
||||
if device == defaultDevice {
|
||||
// With built-in Jotta device the mountpoint choice is exclusive,
|
||||
// we do not want to risk any problems by creating new mountpoints on it.
|
||||
help = fmt.Sprintf(`The mountpoint to use on the built-in device %s.
|
||||
The standard setup is to use the %s mountpoint. Most other mountpoints
|
||||
have very limited support in rclone and should generally be avoided.`, defaultDevice, defaultMountpoint)
|
||||
return fs.ConfigChooseExclusive("choose_device_mountpoint", "config_mountpoint", help, len(dev.MountPoints), func(i int) (string, string) {
|
||||
return dev.MountPoints[i].Name, ""
|
||||
})
|
||||
}
|
||||
help = fmt.Sprintf(`The mountpoint to use on the non-standard device %s.
|
||||
You may create a new by entering a unique name.`, device)
|
||||
return fs.ConfigChoose("choose_device_mountpoint", "config_mountpoint", help, len(dev.MountPoints), func(i int) (string, string) {
|
||||
return fs.ConfigChoose("choose_device_mountpoint", "config_mountpoint", `Please select the mountpoint to use. Normally this will be Archive.`, len(dev.MountPoints), func(i int) (string, string) {
|
||||
return dev.MountPoints[i].Name, ""
|
||||
})
|
||||
case "choose_device_mountpoint":
|
||||
mountpoint := config.Result
|
||||
|
||||
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
|
||||
cust, err := getCustomerInfo(ctx, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
device, _ := m.Get(configDevice)
|
||||
|
||||
dev, err := getDeviceInfo(ctx, jfsSrv, path.Join(cust.Username, device))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
isNew := true
|
||||
for _, mnt := range dev.MountPoints {
|
||||
if strings.EqualFold(mnt.Name, mountpoint) {
|
||||
mountpoint = mnt.Name
|
||||
isNew = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if isNew {
|
||||
if device == defaultDevice {
|
||||
return nil, fmt.Errorf("custom mountpoints not supported on built-in %s device: %w", defaultDevice, err)
|
||||
}
|
||||
fs.Debugf(nil, "Creating new mountpoint: %s", mountpoint)
|
||||
_, err := createMountPoint(ctx, jfsSrv, path.Join(cust.Username, device, mountpoint))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
m.Set(configMountpoint, mountpoint)
|
||||
|
||||
return fs.ConfigGoto("end")
|
||||
case "end":
|
||||
// All the config flows end up here in case we need to carry on with something
|
||||
@@ -432,17 +332,16 @@ type Options struct {
|
||||
|
||||
// Fs represents a remote jottacloud
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
user string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
fileEndpoint string
|
||||
allocateEndpoint string
|
||||
jfsSrv *rest.Client
|
||||
apiSrv *rest.Client
|
||||
pacer *fs.Pacer
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
name string
|
||||
root string
|
||||
user string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
endpointURL string
|
||||
srv *rest.Client
|
||||
apiSrv *rest.Client
|
||||
pacer *fs.Pacer
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
}
|
||||
|
||||
// Object describes a jottacloud object
|
||||
@@ -689,47 +588,15 @@ func getDeviceInfo(ctx context.Context, srv *rest.Client, path string) (info *ap
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// createDevice makes a device
|
||||
func createDevice(ctx context.Context, srv *rest.Client, path string) (info *api.JottaDevice, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: urlPathEscape(path),
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.Parameters.Set("type", "WORKSTATION")
|
||||
|
||||
_, err = srv.CallXML(ctx, &opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create device: %w", err)
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// createMountPoint makes a mount point
|
||||
func createMountPoint(ctx context.Context, srv *rest.Client, path string) (info *api.JottaMountPoint, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: urlPathEscape(path),
|
||||
}
|
||||
|
||||
_, err = srv.CallXML(ctx, &opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create mountpoint: %w", err)
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// setEndpoints generates the API endpoints
|
||||
func (f *Fs) setEndpoints() {
|
||||
// setEndpointURL generates the API endpoint URL
|
||||
func (f *Fs) setEndpointURL() {
|
||||
if f.opt.Device == "" {
|
||||
f.opt.Device = defaultDevice
|
||||
}
|
||||
if f.opt.Mountpoint == "" {
|
||||
f.opt.Mountpoint = defaultMountpoint
|
||||
}
|
||||
f.fileEndpoint = path.Join(f.user, f.opt.Device, f.opt.Mountpoint)
|
||||
f.allocateEndpoint = path.Join("/jfs", f.opt.Device, f.opt.Mountpoint)
|
||||
f.endpointURL = path.Join(f.user, f.opt.Device, f.opt.Mountpoint)
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
@@ -741,7 +608,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Jo
|
||||
var result api.JottaFile
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
@@ -786,30 +653,13 @@ func urlPathEscape(in string) string {
|
||||
}
|
||||
|
||||
// filePathRaw returns an unescaped file path (f.root, file)
|
||||
// Optionally made absolute by prefixing with "/", typically required when used
|
||||
// as request parameter instead of the path (which is relative to some root url).
|
||||
func (f *Fs) filePathRaw(file string, absolute bool) string {
|
||||
prefix := ""
|
||||
if absolute {
|
||||
prefix = "/"
|
||||
}
|
||||
return path.Join(prefix, f.fileEndpoint, f.opt.Enc.FromStandardPath(path.Join(f.root, file)))
|
||||
func (f *Fs) filePathRaw(file string) string {
|
||||
return path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(path.Join(f.root, file)))
|
||||
}
|
||||
|
||||
// filePath returns an escaped file path (f.root, file)
|
||||
func (f *Fs) filePath(file string) string {
|
||||
return urlPathEscape(f.filePathRaw(file, false))
|
||||
}
|
||||
|
||||
// allocatePathRaw returns an unescaped allocate file path (f.root, file)
|
||||
// Optionally made absolute by prefixing with "/", typically required when used
|
||||
// as request parameter instead of the path (which is relative to some root url).
|
||||
func (f *Fs) allocatePathRaw(file string, absolute bool) string {
|
||||
prefix := ""
|
||||
if absolute {
|
||||
prefix = "/"
|
||||
}
|
||||
return path.Join(prefix, f.allocateEndpoint, f.opt.Enc.FromStandardPath(path.Join(f.root, file)))
|
||||
return urlPathEscape(f.filePathRaw(file))
|
||||
}
|
||||
|
||||
// Jottacloud requires the grant_type 'refresh_token' string
|
||||
@@ -842,12 +692,12 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
|
||||
if ok {
|
||||
ver, err = strconv.Atoi(version)
|
||||
if err != nil {
|
||||
return nil, nil, errors.New("failed to parse config version")
|
||||
return nil, nil, errors.New("Failed to parse config version")
|
||||
}
|
||||
ok = (ver == configVersion) || (ver == legacyConfigVersion)
|
||||
}
|
||||
if !ok {
|
||||
return nil, nil, errors.New("outdated config - please reconfigure this backend")
|
||||
return nil, nil, errors.New("Outdated config - please reconfigure this backend")
|
||||
}
|
||||
|
||||
baseClient := fshttp.NewClient(ctx)
|
||||
@@ -893,7 +743,7 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
|
||||
// Create OAuth Client
|
||||
oAuthClient, ts, err = oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to configure Jottacloud oauth client: %w", err)
|
||||
return nil, nil, fmt.Errorf("Failed to configure Jottacloud oauth client: %w", err)
|
||||
}
|
||||
return oAuthClient, ts, nil
|
||||
}
|
||||
@@ -919,7 +769,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
jfsSrv: rest.NewClient(oAuthClient).SetRoot(jfsURL),
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
@@ -929,7 +779,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: false,
|
||||
}).Fill(ctx, f)
|
||||
f.jfsSrv.SetErrorHandler(errorHandler)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
if opt.TrashedOnly { // we cannot support showing Trashed Files when using ListR right now
|
||||
f.features.ListR = nil
|
||||
}
|
||||
@@ -948,7 +798,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, err
|
||||
}
|
||||
f.user = cust.Username
|
||||
f.setEndpoints()
|
||||
f.setEndpointURL()
|
||||
|
||||
if root != "" && !rootIsDir {
|
||||
// Check to see if the root actually an existing file
|
||||
@@ -1014,7 +864,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) (jf *api.JottaFolder, e
|
||||
opts.Parameters.Set("mkDir", "true")
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &jf)
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &jf)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1043,7 +893,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
var resp *http.Response
|
||||
var result api.JottaFolder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
@@ -1180,7 +1030,7 @@ func parseListRStream(ctx context.Context, r io.Reader, filesystem *Fs, callback
|
||||
|
||||
if expected.Folders != actual.Folders ||
|
||||
expected.Files != actual.Files {
|
||||
return fmt.Errorf("invalid result from listStream: expected[%#v] != actual[%#v]", expected, actual)
|
||||
return fmt.Errorf("Invalid result from listStream: expected[%#v] != actual[%#v]", expected, actual)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1201,7 +1051,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.jfsSrv.Call(ctx, &opts)
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
}
|
||||
@@ -1251,6 +1101,9 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if f.opt.Device != "Jotta" {
|
||||
return nil, errors.New("upload not supported for devices other than Jotta")
|
||||
}
|
||||
o := f.createObject(src.Remote(), src.ModTime(ctx), src.Size())
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
@@ -1260,7 +1113,10 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
// chop off trailing / if it exists
|
||||
parent := path.Dir(strings.TrimSuffix(dirPath, "/"))
|
||||
if strings.HasSuffix(dirPath, "/") {
|
||||
dirPath = dirPath[:len(dirPath)-1]
|
||||
}
|
||||
parent := path.Dir(dirPath)
|
||||
if parent == "." {
|
||||
parent = ""
|
||||
}
|
||||
@@ -1308,7 +1164,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.jfsSrv.Call(ctx, &opts)
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1361,7 +1217,7 @@ func (f *Fs) createOrUpdate(ctx context.Context, file string, modTime time.Time,
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &info)
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
@@ -1382,11 +1238,11 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.Parameters.Set(method, f.filePathRaw(dest, true))
|
||||
opts.Parameters.Set(method, "/"+path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(path.Join(f.root, dest))))
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &info)
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1495,7 +1351,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.fileEndpoint, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
|
||||
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't move directory: %w", err)
|
||||
@@ -1520,7 +1376,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
var resp *http.Response
|
||||
var result api.JottaFile
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
@@ -1546,19 +1402,19 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return "", errors.New("couldn't create public link - no uri received")
|
||||
}
|
||||
if result.PublicSharePath != "" {
|
||||
webLink := joinPath(wwwURL, result.PublicSharePath)
|
||||
webLink := joinPath(baseURL, result.PublicSharePath)
|
||||
fs.Debugf(nil, "Web link: %s", webLink)
|
||||
} else {
|
||||
fs.Debugf(nil, "No web link received")
|
||||
}
|
||||
directLink := joinPath(wwwURL, fmt.Sprintf("opin/io/downloadPublic/%s/%s", f.user, result.PublicURI))
|
||||
directLink := joinPath(baseURL, fmt.Sprintf("opin/io/downloadPublic/%s/%s", f.user, result.PublicURI))
|
||||
fs.Debugf(nil, "Direct link: %s", directLink)
|
||||
return directLink, nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
info, err := getDriveInfo(ctx, f.jfsSrv, f.user)
|
||||
info, err := getDriveInfo(ctx, f.srv, f.user)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1761,7 +1617,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
opts.Parameters.Set("mode", "bin")
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.jfsSrv.Call(ctx, &opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1882,7 +1738,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Created: fileDate,
|
||||
Modified: fileDate,
|
||||
Md5: md5String,
|
||||
Path: o.fs.allocatePathRaw(o.remote, true),
|
||||
Path: path.Join(o.fs.opt.Mountpoint, o.fs.opt.Enc.FromStandardPath(path.Join(o.fs.root, o.remote))),
|
||||
}
|
||||
|
||||
// send it
|
||||
@@ -1952,7 +1808,7 @@ func (o *Object) remove(ctx context.Context, hard bool) error {
|
||||
}
|
||||
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.jfsSrv.CallXML(ctx, &opts, nil, nil)
|
||||
resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -351,9 +351,9 @@ func NewFsFromOptions(ctx context.Context, name, root string, opt *Options) (ff
|
||||
}
|
||||
if f.mountID == "" {
|
||||
if opt.MountID == "" {
|
||||
return nil, errors.New("failed to find primary mount")
|
||||
return nil, errors.New("Failed to find primary mount")
|
||||
}
|
||||
return nil, errors.New("failed to find mount " + opt.MountID)
|
||||
return nil, errors.New("Failed to find mount " + opt.MountID)
|
||||
}
|
||||
rootFile, err := f.client.FilesInfo(f.mountID, f.opt.Enc.FromStandardPath("/"+f.root))
|
||||
if err == nil && rootFile.Type != "dir" {
|
||||
|
||||
@@ -17,12 +17,8 @@ var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSp
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var available, total, free int64
|
||||
root, e := syscall.UTF16PtrFromString(f.root)
|
||||
if e != nil {
|
||||
return nil, fmt.Errorf("failed to read disk usage: %w", e)
|
||||
}
|
||||
_, _, e1 := getFreeDiskSpace.Call(
|
||||
uintptr(unsafe.Pointer(root)),
|
||||
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))),
|
||||
uintptr(unsafe.Pointer(&available)), // lpFreeBytesAvailable - for this user
|
||||
uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
|
||||
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
|
||||
|
||||
@@ -42,22 +42,9 @@ func init() {
|
||||
Description: "Local Disk",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
System: systemMetadataInfo,
|
||||
Help: `Depending on which OS is in use the local backend may return only some
|
||||
of the system metadata. Setting system metadata is supported on all
|
||||
OSes but setting user metadata is only supported on linux, freebsd,
|
||||
netbsd, macOS and Solaris. It is **not** supported on Windows yet
|
||||
([see pkg/attrs#47](https://github.com/pkg/xattr/issues/47)).
|
||||
|
||||
User metadata is stored as extended attributes (which may not be
|
||||
supported by all file systems) under the "user.*" prefix.
|
||||
`,
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows.",
|
||||
Default: false,
|
||||
Advanced: runtime.GOOS != "windows",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "true",
|
||||
@@ -234,16 +221,15 @@ type Options struct {
|
||||
|
||||
// Fs represents a local filesystem rooted at root
|
||||
type Fs struct {
|
||||
name string // the name of the remote
|
||||
root string // The root directory (OS path)
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
dev uint64 // device number of root node
|
||||
precisionOk sync.Once // Whether we need to read the precision
|
||||
precision time.Duration // precision of local filesystem
|
||||
warnedMu sync.Mutex // used for locking access to 'warned'.
|
||||
warned map[string]struct{} // whether we have warned about this string
|
||||
xattrSupported int32 // whether xattrs are supported (atomic access)
|
||||
name string // the name of the remote
|
||||
root string // The root directory (OS path)
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
dev uint64 // device number of root node
|
||||
precisionOk sync.Once // Whether we need to read the precision
|
||||
precision time.Duration // precision of local filesystem
|
||||
warnedMu sync.Mutex // used for locking access to 'warned'.
|
||||
warned map[string]struct{} // whether we have warned about this string
|
||||
|
||||
// do os.Lstat or os.Stat
|
||||
lstat func(name string) (os.FileInfo, error)
|
||||
@@ -287,18 +273,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
dev: devUnset,
|
||||
lstat: os.Lstat,
|
||||
}
|
||||
if xattrSupported {
|
||||
f.xattrSupported = 1
|
||||
}
|
||||
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: f.caseInsensitive(),
|
||||
CanHaveEmptyDirectories: true,
|
||||
IsLocal: true,
|
||||
SlowHash: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
|
||||
}).Fill(ctx, f)
|
||||
if opt.FollowSymlinks {
|
||||
f.lstat = os.Stat
|
||||
@@ -923,7 +903,7 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
return "", fmt.Errorf("hash: failed to open: %w", err)
|
||||
}
|
||||
var hashes map[hash.Type]string
|
||||
hashes, err = hash.StreamTypes(readers.NewContextReader(ctx, in), hash.NewHashSet(r))
|
||||
hashes, err = hash.StreamTypes(in, hash.NewHashSet(r))
|
||||
closeErr := in.Close()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("hash: failed to read: %w", err)
|
||||
@@ -957,22 +937,17 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// Set the atime and ltime of the object
|
||||
func (o *Object) setTimes(atime, mtime time.Time) (err error) {
|
||||
if o.translatedLink {
|
||||
err = lChtimes(o.path, atime, mtime)
|
||||
} else {
|
||||
err = os.Chtimes(o.path, atime, mtime)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
if o.fs.opt.NoSetModTime {
|
||||
return nil
|
||||
}
|
||||
err := o.setTimes(modTime, modTime)
|
||||
var err error
|
||||
if o.translatedLink {
|
||||
err = lChtimes(o.path, modTime, modTime)
|
||||
} else {
|
||||
err = os.Chtimes(o.path, modTime, modTime)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1247,16 +1222,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
// Fetch and set metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, src, options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
err = o.writeMetadata(meta)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set metadata: %w", err)
|
||||
}
|
||||
|
||||
// ReRead info now that we have finished
|
||||
return o.lstat()
|
||||
}
|
||||
@@ -1355,34 +1320,6 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
return remove(o.path)
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
|
||||
metadata, err = o.getXattr()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = o.readMetadataFromFile(&metadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
// Write the metadata on the object
|
||||
func (o *Object) writeMetadata(metadata fs.Metadata) (err error) {
|
||||
err = o.setXattr(metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = o.writeMetadataToFile(metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
|
||||
if runtime.GOOS == "windows" {
|
||||
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
|
||||
@@ -1422,5 +1359,4 @@ var (
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.OpenWriterAter = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.Metadataer = &Object{}
|
||||
)
|
||||
|
||||
@@ -3,12 +3,10 @@ package local
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -231,138 +229,3 @@ func TestHashOnDelete(t *testing.T) {
|
||||
_, err = o.Hash(ctx, hash.MD5)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestMetadata(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
const filePath = "metafile.txt"
|
||||
when := time.Now()
|
||||
const dayLength = len("2001-01-01")
|
||||
whenRFC := when.Format(time.RFC3339Nano)
|
||||
r.WriteFile(filePath, "metadata file contents", when)
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Get the object
|
||||
obj, err := f.NewObject(ctx, filePath)
|
||||
require.NoError(t, err)
|
||||
o := obj.(*Object)
|
||||
|
||||
features := f.Features()
|
||||
|
||||
var hasXID, hasAtime, hasBtime bool
|
||||
switch runtime.GOOS {
|
||||
case "darwin", "freebsd", "netbsd", "linux":
|
||||
hasXID, hasAtime, hasBtime = true, true, true
|
||||
case "openbsd", "solaris":
|
||||
hasXID, hasAtime = true, true
|
||||
case "windows":
|
||||
hasAtime, hasBtime = true, true
|
||||
case "plan9", "js":
|
||||
// nada
|
||||
default:
|
||||
t.Errorf("No test cases for OS %q", runtime.GOOS)
|
||||
}
|
||||
|
||||
assert.True(t, features.ReadMetadata)
|
||||
assert.True(t, features.WriteMetadata)
|
||||
assert.Equal(t, xattrSupported, features.UserMetadata)
|
||||
|
||||
t.Run("Xattr", func(t *testing.T) {
|
||||
if !xattrSupported {
|
||||
t.Skip()
|
||||
}
|
||||
m, err := o.getXattr()
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, m)
|
||||
|
||||
inM := fs.Metadata{
|
||||
"potato": "chips",
|
||||
"cabbage": "soup",
|
||||
}
|
||||
err = o.setXattr(inM)
|
||||
require.NoError(t, err)
|
||||
|
||||
m, err = o.getXattr()
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, m)
|
||||
assert.Equal(t, inM, m)
|
||||
})
|
||||
|
||||
checkTime := func(m fs.Metadata, key string, when time.Time) {
|
||||
mt, ok := o.parseMetadataTime(m, key)
|
||||
assert.True(t, ok)
|
||||
dt := mt.Sub(when)
|
||||
precision := time.Second
|
||||
assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v", key, dt, precision))
|
||||
}
|
||||
|
||||
checkInt := func(m fs.Metadata, key string, base int) int {
|
||||
value, ok := o.parseMetadataInt(m, key, base)
|
||||
assert.True(t, ok)
|
||||
return value
|
||||
}
|
||||
t.Run("Read", func(t *testing.T) {
|
||||
m, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, m)
|
||||
|
||||
// All OSes have these
|
||||
checkInt(m, "mode", 8)
|
||||
checkTime(m, "mtime", when)
|
||||
|
||||
assert.Equal(t, len(whenRFC), len(m["mtime"]))
|
||||
assert.Equal(t, whenRFC[:dayLength], m["mtime"][:dayLength])
|
||||
|
||||
if hasAtime {
|
||||
checkTime(m, "atime", when)
|
||||
}
|
||||
if hasBtime {
|
||||
checkTime(m, "btime", when)
|
||||
}
|
||||
if hasXID {
|
||||
checkInt(m, "uid", 10)
|
||||
checkInt(m, "gid", 10)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Write", func(t *testing.T) {
|
||||
newAtimeString := "2011-12-13T14:15:16.999999999Z"
|
||||
newAtime := fstest.Time(newAtimeString)
|
||||
newMtimeString := "2011-12-12T14:15:16.999999999Z"
|
||||
newMtime := fstest.Time(newMtimeString)
|
||||
newBtimeString := "2011-12-11T14:15:16.999999999Z"
|
||||
newBtime := fstest.Time(newBtimeString)
|
||||
newM := fs.Metadata{
|
||||
"mtime": newMtimeString,
|
||||
"atime": newAtimeString,
|
||||
"btime": newBtimeString,
|
||||
// Can't test uid, gid without being root
|
||||
"mode": "0767",
|
||||
"potato": "wedges",
|
||||
}
|
||||
err := o.writeMetadata(newM)
|
||||
require.NoError(t, err)
|
||||
|
||||
m, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, m)
|
||||
|
||||
mode := checkInt(m, "mode", 8)
|
||||
if runtime.GOOS != "windows" {
|
||||
assert.Equal(t, 0767, mode&0777, fmt.Sprintf("mode wrong - expecting 0767 got 0%o", mode&0777))
|
||||
}
|
||||
|
||||
checkTime(m, "mtime", newMtime)
|
||||
if hasAtime {
|
||||
checkTime(m, "atime", newAtime)
|
||||
}
|
||||
if haveSetBTime {
|
||||
checkTime(m, "btime", newBtime)
|
||||
}
|
||||
if xattrSupported {
|
||||
assert.Equal(t, "wedges", m["potato"])
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
@@ -11,8 +11,7 @@ import (
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "",
|
||||
NilObject: (*local.Object)(nil),
|
||||
QuickTestOK: true,
|
||||
RemoteName: "",
|
||||
NilObject: (*local.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,138 +0,0 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
const metadataTimeFormat = time.RFC3339Nano
|
||||
|
||||
// system metadata keys which this backend owns
|
||||
//
|
||||
// not all values supported on all OSes
|
||||
var systemMetadataInfo = map[string]fs.MetadataHelp{
|
||||
"mode": {
|
||||
Help: "File type and mode",
|
||||
Type: "octal, unix style",
|
||||
Example: "0100664",
|
||||
},
|
||||
"uid": {
|
||||
Help: "User ID of owner",
|
||||
Type: "decimal number",
|
||||
Example: "500",
|
||||
},
|
||||
"gid": {
|
||||
Help: "Group ID of owner",
|
||||
Type: "decimal number",
|
||||
Example: "500",
|
||||
},
|
||||
"rdev": {
|
||||
Help: "Device ID (if special file)",
|
||||
Type: "hexadecimal",
|
||||
Example: "1abc",
|
||||
},
|
||||
"atime": {
|
||||
Help: "Time of last access",
|
||||
Type: "RFC 3339",
|
||||
Example: "2006-01-02T15:04:05.999999999Z07:00",
|
||||
},
|
||||
"mtime": {
|
||||
Help: "Time of last modification",
|
||||
Type: "RFC 3339",
|
||||
Example: "2006-01-02T15:04:05.999999999Z07:00",
|
||||
},
|
||||
"btime": {
|
||||
Help: "Time of file birth (creation)",
|
||||
Type: "RFC 3339",
|
||||
Example: "2006-01-02T15:04:05.999999999Z07:00",
|
||||
},
|
||||
}
|
||||
|
||||
// parse a time string from metadata with key
|
||||
func (o *Object) parseMetadataTime(m fs.Metadata, key string) (t time.Time, ok bool) {
|
||||
value, ok := m[key]
|
||||
if ok {
|
||||
var err error
|
||||
t, err = time.Parse(metadataTimeFormat, value)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", key, value, err)
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
return t, ok
|
||||
}
|
||||
|
||||
// parse am int from metadata with key and base
|
||||
func (o *Object) parseMetadataInt(m fs.Metadata, key string, base int) (result int, ok bool) {
|
||||
value, ok := m[key]
|
||||
if ok {
|
||||
var err error
|
||||
result64, err := strconv.ParseInt(value, base, 64)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", key, value, err)
|
||||
ok = false
|
||||
}
|
||||
result = int(result64)
|
||||
}
|
||||
return result, ok
|
||||
}
|
||||
|
||||
// Write the metadata into the file
|
||||
//
|
||||
// It isn't possible to set the ctime and btime under Unix
|
||||
func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
||||
var err error
|
||||
atime, atimeOK := o.parseMetadataTime(m, "atime")
|
||||
mtime, mtimeOK := o.parseMetadataTime(m, "mtime")
|
||||
btime, btimeOK := o.parseMetadataTime(m, "btime")
|
||||
if atimeOK || mtimeOK {
|
||||
if atimeOK && !mtimeOK {
|
||||
mtime = atime
|
||||
}
|
||||
if !atimeOK && mtimeOK {
|
||||
atime = mtime
|
||||
}
|
||||
err = o.setTimes(atime, mtime)
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to set times: %w", err)
|
||||
}
|
||||
}
|
||||
if haveSetBTime {
|
||||
if btimeOK {
|
||||
err = setBTime(o.path, btime)
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to set birth (creation) time: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
uid, hasUID := o.parseMetadataInt(m, "uid", 10)
|
||||
gid, hasGID := o.parseMetadataInt(m, "gid", 10)
|
||||
if hasUID {
|
||||
// FIXME should read UID and GID of current user and only attempt to set it if different
|
||||
if !hasGID {
|
||||
gid = uid
|
||||
}
|
||||
if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
|
||||
fs.Debugf(o, "Ignoring request to set ownership %o.%o on this OS", gid, uid)
|
||||
} else {
|
||||
err = os.Chown(o.path, uid, gid)
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to change ownership: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
mode, hasMode := o.parseMetadataInt(m, "mode", 8)
|
||||
if hasMode {
|
||||
err = os.Chmod(o.path, os.FileMode(mode))
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to change permissions: %w", err)
|
||||
}
|
||||
}
|
||||
// FIXME not parsing rdev yet
|
||||
return outErr
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
//go:build darwin || freebsd || netbsd
|
||||
// +build darwin freebsd netbsd
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Read the metadata from the file into metadata where possible
|
||||
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
|
||||
info, err := o.fs.lstat(o.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stat, ok := info.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
fs.Debugf(o, "didn't return Stat_t as expected")
|
||||
return nil
|
||||
}
|
||||
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
|
||||
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
|
||||
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
|
||||
if stat.Rdev != 0 {
|
||||
m.Set("rdev", fmt.Sprintf("%x", stat.Rdev))
|
||||
}
|
||||
setTime := func(key string, t syscall.Timespec) {
|
||||
m.Set(key, time.Unix(t.Unix()).Format(metadataTimeFormat))
|
||||
}
|
||||
setTime("atime", stat.Atimespec)
|
||||
setTime("mtime", stat.Mtimespec)
|
||||
setTime("btime", stat.Birthtimespec)
|
||||
return nil
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Read the metadata from the file into metadata where possible
|
||||
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
|
||||
flags := unix.AT_SYMLINK_NOFOLLOW
|
||||
if o.fs.opt.FollowSymlinks {
|
||||
flags = 0
|
||||
}
|
||||
var stat unix.Statx_t
|
||||
err = unix.Statx(unix.AT_FDCWD, o.path, flags, (0 |
|
||||
unix.STATX_TYPE | // Want stx_mode & S_IFMT
|
||||
unix.STATX_MODE | // Want stx_mode & ~S_IFMT
|
||||
unix.STATX_UID | // Want stx_uid
|
||||
unix.STATX_GID | // Want stx_gid
|
||||
unix.STATX_ATIME | // Want stx_atime
|
||||
unix.STATX_MTIME | // Want stx_mtime
|
||||
unix.STATX_CTIME | // Want stx_ctime
|
||||
unix.STATX_BTIME), // Want stx_btime
|
||||
&stat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
|
||||
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
|
||||
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
|
||||
if stat.Rdev_major != 0 || stat.Rdev_minor != 0 {
|
||||
m.Set("rdev", fmt.Sprintf("%x", uint64(stat.Rdev_major)<<32|uint64(stat.Rdev_minor)))
|
||||
}
|
||||
setTime := func(key string, t unix.StatxTimestamp) {
|
||||
m.Set(key, time.Unix(t.Sec, int64(t.Nsec)).Format(metadataTimeFormat))
|
||||
}
|
||||
setTime("atime", stat.Atime)
|
||||
setTime("mtime", stat.Mtime)
|
||||
setTime("btime", stat.Btime)
|
||||
return nil
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
//go:build plan9 || js
|
||||
// +build plan9 js
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Read the metadata from the file into metadata where possible
|
||||
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
|
||||
info, err := o.fs.lstat(o.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set("mode", fmt.Sprintf("%0o", info.Mode()))
|
||||
m.Set("mtime", info.ModTime().Format(metadataTimeFormat))
|
||||
return nil
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
//go:build openbsd || solaris
|
||||
// +build openbsd solaris
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Read the metadata from the file into metadata where possible
|
||||
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
|
||||
info, err := o.fs.lstat(o.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stat, ok := info.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
fs.Debugf(o, "didn't return Stat_t as expected")
|
||||
return nil
|
||||
}
|
||||
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
|
||||
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
|
||||
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
|
||||
if stat.Rdev != 0 {
|
||||
m.Set("rdev", fmt.Sprintf("%x", stat.Rdev))
|
||||
}
|
||||
setTime := func(key string, t syscall.Timespec) {
|
||||
m.Set(key, time.Unix(t.Unix()).Format(metadataTimeFormat))
|
||||
}
|
||||
setTime("atime", stat.Atim)
|
||||
setTime("mtime", stat.Mtim)
|
||||
return nil
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Read the metadata from the file into metadata where possible
|
||||
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
|
||||
info, err := o.fs.lstat(o.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stat, ok := info.Sys().(*syscall.Win32FileAttributeData)
|
||||
if !ok {
|
||||
fs.Debugf(o, "didn't return Win32FileAttributeData as expected")
|
||||
return nil
|
||||
}
|
||||
// FIXME do something with stat.FileAttributes ?
|
||||
m.Set("mode", fmt.Sprintf("%0o", info.Mode()))
|
||||
setTime := func(key string, t syscall.Filetime) {
|
||||
m.Set(key, time.Unix(0, t.Nanoseconds()).Format(metadataTimeFormat))
|
||||
}
|
||||
setTime("atime", stat.LastAccessTime)
|
||||
setTime("mtime", stat.LastWriteTime)
|
||||
setTime("btime", stat.CreationTime)
|
||||
return nil
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const haveSetBTime = false
|
||||
|
||||
// setBTime changes the the birth time of the file passed in
|
||||
func setBTime(name string, btime time.Time) error {
|
||||
// Does nothing
|
||||
return nil
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const haveSetBTime = true
|
||||
|
||||
// setBTime sets the the birth time of the file passed in
|
||||
func setBTime(name string, btime time.Time) (err error) {
|
||||
h, err := syscall.Open(name, os.O_RDWR, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
closeErr := syscall.Close(h)
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
bFileTime := syscall.NsecToFiletime(btime.UnixNano())
|
||||
return syscall.SetFileTime(h, &bFileTime, nil, nil)
|
||||
}
|
||||
@@ -1,116 +0,0 @@
|
||||
//go:build !openbsd && !plan9
|
||||
// +build !openbsd,!plan9
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/xattr"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
const (
|
||||
xattrPrefix = "user." // FIXME is this correct for all unixes?
|
||||
xattrSupported = xattr.XATTR_SUPPORTED
|
||||
)
|
||||
|
||||
// Check to see if the error supplied is a not supported error, and if
|
||||
// so, disable xattrs
|
||||
func (f *Fs) xattrIsNotSupported(err error) bool {
|
||||
xattrErr, ok := err.(*xattr.Error)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
// Xattrs not supported can be ENOTSUP or ENOATTR or EINVAL (on Solaris)
|
||||
if xattrErr.Err == syscall.EINVAL || xattrErr.Err == syscall.ENOTSUP || xattrErr.Err == xattr.ENOATTR {
|
||||
// Show xattrs not supported
|
||||
if atomic.CompareAndSwapInt32(&f.xattrSupported, 1, 0) {
|
||||
fs.Errorf(f, "xattrs not supported - disabling: %v", err)
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getXattr returns the extended attributes for an object
|
||||
//
|
||||
// It doesn't return any attributes owned by this backend in
|
||||
// metadataKeys
|
||||
func (o *Object) getXattr() (metadata fs.Metadata, err error) {
|
||||
if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var list []string
|
||||
if o.fs.opt.FollowSymlinks {
|
||||
list, err = xattr.List(o.path)
|
||||
} else {
|
||||
list, err = xattr.LList(o.path)
|
||||
}
|
||||
if err != nil {
|
||||
if o.fs.xattrIsNotSupported(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to read xattr: %w", err)
|
||||
}
|
||||
if len(list) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
metadata = make(fs.Metadata, len(list))
|
||||
for _, k := range list {
|
||||
var v []byte
|
||||
if o.fs.opt.FollowSymlinks {
|
||||
v, err = xattr.Get(o.path, k)
|
||||
} else {
|
||||
v, err = xattr.LGet(o.path, k)
|
||||
}
|
||||
if err != nil {
|
||||
if o.fs.xattrIsNotSupported(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to read xattr key %q: %w", k, err)
|
||||
}
|
||||
k = strings.ToLower(k)
|
||||
if !strings.HasPrefix(k, xattrPrefix) {
|
||||
continue
|
||||
}
|
||||
k = k[len(xattrPrefix):]
|
||||
if _, found := systemMetadataInfo[k]; found {
|
||||
continue
|
||||
}
|
||||
metadata[k] = string(v)
|
||||
}
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
// setXattr sets the metadata on the file Xattrs
|
||||
//
|
||||
// It doesn't set any attributes owned by this backend in metadataKeys
|
||||
func (o *Object) setXattr(metadata fs.Metadata) (err error) {
|
||||
if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
|
||||
return nil
|
||||
}
|
||||
for k, value := range metadata {
|
||||
k = strings.ToLower(k)
|
||||
if _, found := systemMetadataInfo[k]; found {
|
||||
continue
|
||||
}
|
||||
k = xattrPrefix + k
|
||||
v := []byte(value)
|
||||
if o.fs.opt.FollowSymlinks {
|
||||
err = xattr.Set(o.path, k, v)
|
||||
} else {
|
||||
err = xattr.LSet(o.path, k, v)
|
||||
}
|
||||
if err != nil {
|
||||
if o.fs.xattrIsNotSupported(err) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("failed to set xattr key %q: %w", k, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
//go:build openbsd || plan9
|
||||
// +build openbsd plan9
|
||||
|
||||
// The pkg/xattr module doesn't compile for openbsd or plan9
|
||||
package local
|
||||
|
||||
import "github.com/rclone/rclone/fs"
|
||||
|
||||
const (
|
||||
xattrSupported = false
|
||||
)
|
||||
|
||||
// getXattr returns the extended attributes for an object
|
||||
func (o *Object) getXattr() (metadata fs.Metadata, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// setXattr sets the metadata on the file Xattrs
|
||||
func (o *Object) setXattr(metadata fs.Metadata) (err error) {
|
||||
return nil
|
||||
}
|
||||
@@ -16,9 +16,9 @@ import (
|
||||
|
||||
// protocol errors
|
||||
var (
|
||||
ErrorPrematureEOF = errors.New("premature EOF")
|
||||
ErrorInvalidLength = errors.New("invalid length")
|
||||
ErrorZeroTerminate = errors.New("string must end with zero")
|
||||
ErrorPrematureEOF = errors.New("Premature EOF")
|
||||
ErrorInvalidLength = errors.New("Invalid length")
|
||||
ErrorZeroTerminate = errors.New("String must end with zero")
|
||||
)
|
||||
|
||||
// BinWriter is a binary protocol writer
|
||||
|
||||
@@ -435,10 +435,10 @@ func (f *Fs) authorize(ctx context.Context, force bool) (err error) {
|
||||
t, err = oauthConfig.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
|
||||
}
|
||||
if err == nil && !tokenIsValid(t) {
|
||||
err = errors.New("invalid token")
|
||||
err = errors.New("Invalid token")
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to authorize: %w", err)
|
||||
return fmt.Errorf("Failed to authorize: %w", err)
|
||||
}
|
||||
|
||||
if err = oauthutil.PutToken(f.name, f.m, t, false); err != nil {
|
||||
@@ -580,7 +580,7 @@ func readBodyWord(res *http.Response) (word string, err error) {
|
||||
word = strings.Split(line, " ")[0]
|
||||
}
|
||||
if word == "" {
|
||||
return "", errors.New("empty reply from dispatcher")
|
||||
return "", errors.New("Empty reply from dispatcher")
|
||||
}
|
||||
return word, nil
|
||||
}
|
||||
@@ -1684,7 +1684,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
spoolFile, mrHash, err := makeTempFile(ctx, tmpFs, wrapIn, src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create spool file: %w", err)
|
||||
return fmt.Errorf("Failed to create spool file: %w", err)
|
||||
}
|
||||
if o.putByHash(ctx, mrHash, src, "spool") {
|
||||
// If put by hash is successful, ignore transitive error
|
||||
@@ -1723,7 +1723,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(fileHash, newHash) {
|
||||
if bytes.Compare(fileHash, newHash) != 0 {
|
||||
if o.fs.opt.CheckHash {
|
||||
return mrhash.ErrorInvalidHash
|
||||
}
|
||||
@@ -1966,7 +1966,7 @@ func (o *Object) readMetaData(ctx context.Context, force bool) error {
|
||||
return fs.ErrorIsDir
|
||||
}
|
||||
if newObj.remote != o.remote {
|
||||
return fmt.Errorf("file %q path has changed to %q", o.remote, newObj.remote)
|
||||
return fmt.Errorf("File %q path has changed to %q", o.remote, newObj.remote)
|
||||
}
|
||||
o.hasMetaData = true
|
||||
o.size = newObj.size
|
||||
@@ -2262,7 +2262,7 @@ func (e *endHandler) handle(err error) error {
|
||||
}
|
||||
|
||||
newHash := e.hasher.Sum(nil)
|
||||
if bytes.Equal(o.mrHash, newHash) {
|
||||
if bytes.Compare(o.mrHash, newHash) == 0 {
|
||||
return io.EOF
|
||||
}
|
||||
if o.fs.opt.CheckHash {
|
||||
@@ -2277,7 +2277,7 @@ type serverPool struct {
|
||||
pool pendingServerMap
|
||||
mu sync.Mutex
|
||||
path string
|
||||
expirySec int
|
||||
expirySec time.Duration
|
||||
fs *Fs
|
||||
}
|
||||
|
||||
@@ -2318,7 +2318,7 @@ func (p *serverPool) Dispatch(ctx context.Context, current string) (string, erro
|
||||
})
|
||||
if err != nil || url == "" {
|
||||
closeBody(res)
|
||||
return "", fmt.Errorf("failed to request file server: %w", err)
|
||||
return "", fmt.Errorf("Failed to request file server: %w", err)
|
||||
}
|
||||
|
||||
p.addServer(url, now)
|
||||
@@ -2384,7 +2384,7 @@ func (p *serverPool) addServer(url string, now time.Time) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
expiry := now.Add(time.Duration(p.expirySec) * time.Second)
|
||||
expiry := now.Add(p.expirySec * time.Second)
|
||||
|
||||
expiryStr := []byte("-")
|
||||
if p.fs.ci.LogLevel >= fs.LogLevelInfo {
|
||||
|
||||
@@ -10,8 +10,7 @@ import (
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: ":memory:",
|
||||
NilObject: (*Object)(nil),
|
||||
QuickTestOK: true,
|
||||
RemoteName: ":memory:",
|
||||
NilObject: (*Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -903,20 +903,22 @@ func (f *Fs) netStorageStatRequest(ctx context.Context, URL string, directory bo
|
||||
files = statResp.Files
|
||||
f.setStatCache(URL, files)
|
||||
}
|
||||
// Multiple objects can be returned with the "slash=both" option,
|
||||
// when file/symlink/directory has the same name
|
||||
for i := range files {
|
||||
if files[i].Type == "symlink" {
|
||||
// Add .rclonelink suffix to allow local backend code to convert to a symlink.
|
||||
files[i].Name += ".rclonelink"
|
||||
fs.Infof(nil, "Converting a symlink to the rclonelink on the stat request %s", files[i].Name)
|
||||
}
|
||||
entrywanted := (directory && files[i].Type == "dir") ||
|
||||
(!directory && files[i].Type != "dir")
|
||||
if entrywanted {
|
||||
filestamp := files[0]
|
||||
files[0] = files[i]
|
||||
files[i] = filestamp
|
||||
if files != nil {
|
||||
// Multiple objects can be returned with the "slash=both" option,
|
||||
// when file/symlink/directory has the same name
|
||||
for i := range files {
|
||||
if files[i].Type == "symlink" {
|
||||
// Add .rclonelink suffix to allow local backend code to convert to a symlink.
|
||||
files[i].Name += ".rclonelink"
|
||||
fs.Infof(nil, "Converting a symlink to the rclonelink on the stat request %s", files[i].Name)
|
||||
}
|
||||
entrywanted := (directory && files[i].Type == "dir") ||
|
||||
(!directory && files[i].Type != "dir")
|
||||
if entrywanted {
|
||||
filestamp := files[0]
|
||||
files[0] = files[i]
|
||||
files[i] = filestamp
|
||||
}
|
||||
}
|
||||
}
|
||||
return files, nil
|
||||
|
||||
@@ -292,7 +292,7 @@ type AsyncOperationStatus struct {
|
||||
func (i *Item) GetID() string {
|
||||
if i.IsRemote() && i.RemoteItem.ID != "" {
|
||||
return i.RemoteItem.ParentReference.DriveID + "#" + i.RemoteItem.ID
|
||||
} else if i.ParentReference != nil && !strings.Contains(i.ID, "#") {
|
||||
} else if i.ParentReference != nil && strings.Index(i.ID, "#") == -1 {
|
||||
return i.ParentReference.DriveID + "#" + i.ID
|
||||
}
|
||||
return i.ID
|
||||
|
||||
@@ -65,12 +65,12 @@ var (
|
||||
authPath = "/common/oauth2/v2.0/authorize"
|
||||
tokenPath = "/common/oauth2/v2.0/token"
|
||||
|
||||
scopeAccess = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "Sites.Read.All", "offline_access"}
|
||||
scopeAccessWithoutSites = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"}
|
||||
scopesWithSitePermission = []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access", "Sites.Read.All"}
|
||||
scopesWithoutSitePermission = []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"}
|
||||
|
||||
// Description of how to auth for this app for a business account
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: scopeAccess,
|
||||
Scopes: scopesWithSitePermission,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
@@ -150,27 +150,6 @@ there through a path traversal.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "access_scopes",
|
||||
Help: `Set scopes to be requested by rclone.
|
||||
|
||||
Choose or manually enter a custom space separated list with all scopes, that rclone should request.
|
||||
`,
|
||||
Default: scopeAccess,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access",
|
||||
Help: "Read and write access to all resources",
|
||||
},
|
||||
{
|
||||
Value: "Files.Read Files.Read.All Sites.Read.All offline_access",
|
||||
Help: "Read only access to all resources",
|
||||
},
|
||||
{
|
||||
Value: "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All offline_access",
|
||||
Help: "Read and write access to all resources, without the ability to browse SharePoint sites. \nSame as if disable_site_permission was set to true",
|
||||
},
|
||||
}}, {
|
||||
Name: "disable_site_permission",
|
||||
Help: `Disable the request for Sites.Read.All permission.
|
||||
|
||||
@@ -181,7 +160,6 @@ application, and your organization disallows users to consent app permission
|
||||
request on their own.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Hide: fs.OptionHideBoth,
|
||||
}, {
|
||||
Name: "expose_onenote_files",
|
||||
Help: `Set to make OneNote files show up in directory listings.
|
||||
@@ -423,16 +401,11 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
region, graphURL := getRegionURL(m)
|
||||
|
||||
if config.State == "" {
|
||||
var accessScopes fs.SpaceSepList
|
||||
accessScopesString, _ := m.Get("access_scopes")
|
||||
err := accessScopes.Set(accessScopesString)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse access_scopes: %w", err)
|
||||
}
|
||||
oauthConfig.Scopes = []string(accessScopes)
|
||||
disableSitePermission, _ := m.Get("disable_site_permission")
|
||||
if disableSitePermission == "true" {
|
||||
oauthConfig.Scopes = scopeAccessWithoutSites
|
||||
oauthConfig.Scopes = scopesWithoutSitePermission
|
||||
} else {
|
||||
oauthConfig.Scopes = scopesWithSitePermission
|
||||
}
|
||||
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||
AuthURL: authEndpoint[region] + authPath,
|
||||
@@ -451,7 +424,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
|
||||
switch config.State {
|
||||
case "choose_type":
|
||||
return fs.ConfigChooseExclusiveFixed("choose_type_done", "config_type", "Type of connection", []fs.OptionExample{{
|
||||
return fs.ConfigChooseFixed("choose_type_done", "config_type", "Type of connection", []fs.OptionExample{{
|
||||
Value: "onedrive",
|
||||
Help: "OneDrive Personal or Business",
|
||||
}, {
|
||||
@@ -589,7 +562,6 @@ type Options struct {
|
||||
DriveType string `config:"drive_type"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
DisableSitePermission bool `config:"disable_site_permission"`
|
||||
AccessScopes fs.SpaceSepList `config:"access_scopes"`
|
||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
@@ -688,7 +660,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
||||
}
|
||||
}
|
||||
case 401:
|
||||
if len(resp.Header["Www-Authenticate"]) == 1 && strings.Contains(resp.Header["Www-Authenticate"][0], "expired_token") {
|
||||
if len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
||||
retry = true
|
||||
fs.Debugf(nil, "Should retry: %v", err)
|
||||
} else if err != nil && strings.Contains(err.Error(), "Unable to initialize RPS") {
|
||||
@@ -744,7 +716,8 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||
firstSlashIndex := strings.IndexRune(path, '/')
|
||||
|
||||
if f.driveType != driveTypePersonal || firstSlashIndex == -1 {
|
||||
opts := f.newOptsCallWithPath(ctx, path, "GET", "")
|
||||
var opts rest.Opts
|
||||
opts = f.newOptsCallWithPath(ctx, path, "GET", "")
|
||||
opts.Path = strings.TrimSuffix(opts.Path, ":")
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
@@ -857,9 +830,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
rootURL := graphAPIEndpoint[opt.Region] + "/v1.0" + "/drives/" + opt.DriveID
|
||||
oauthConfig.Scopes = opt.AccessScopes
|
||||
if opt.DisableSitePermission {
|
||||
oauthConfig.Scopes = scopeAccessWithoutSites
|
||||
oauthConfig.Scopes = scopesWithoutSitePermission
|
||||
} else {
|
||||
oauthConfig.Scopes = scopesWithSitePermission
|
||||
}
|
||||
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||
AuthURL: authEndpoint[opt.Region] + authPath,
|
||||
@@ -1313,7 +1287,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if srcObj.fs == f {
|
||||
srcPath := srcObj.rootPath()
|
||||
dstPath := f.rootPath(remote)
|
||||
if strings.EqualFold(srcPath, dstPath) {
|
||||
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
|
||||
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||
}
|
||||
}
|
||||
@@ -1760,6 +1734,16 @@ func (o *Object) rootPath() string {
|
||||
return o.fs.rootPath(o.remote)
|
||||
}
|
||||
|
||||
// srvPath returns a path for use in server given a remote
|
||||
func (f *Fs) srvPath(remote string) string {
|
||||
return f.opt.Enc.FromStandardPath(f.rootSlash() + remote)
|
||||
}
|
||||
|
||||
// srvPath returns a path for use in server
|
||||
func (o *Object) srvPath() string {
|
||||
return o.fs.srvPath(o.remote)
|
||||
}
|
||||
|
||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if o.fs.driveType == driveTypePersonal {
|
||||
@@ -2201,7 +2185,7 @@ func (o *Object) ID() string {
|
||||
// Such a normalized ID can come from (*Item).GetID()
|
||||
func (f *Fs) parseNormalizedID(ID string) (string, string, string) {
|
||||
rootURL := graphAPIEndpoint[f.opt.Region] + "/v1.0/drives"
|
||||
if strings.Contains(ID, "#") {
|
||||
if strings.Index(ID, "#") >= 0 {
|
||||
s := strings.Split(ID, "#")
|
||||
return s[1], s[0], rootURL
|
||||
}
|
||||
@@ -2375,9 +2359,6 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
|
||||
func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (nextDeltaToken string, err error) {
|
||||
delta, err := f.changeNotifyNextChange(ctx, "latest")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
parsedURL, err := url.Parse(delta.DeltaLink)
|
||||
if err != nil {
|
||||
return
|
||||
@@ -2407,9 +2388,6 @@ func (f *Fs) buildDriveDeltaOpts(token string) rest.Opts {
|
||||
|
||||
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), deltaToken string) (nextDeltaToken string, err error) {
|
||||
delta, err := f.changeNotifyNextChange(ctx, deltaToken)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
parsedURL, err := url.Parse(delta.DeltaLink)
|
||||
if err != nil {
|
||||
return
|
||||
|
||||
@@ -361,8 +361,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
srcPath := srcObj.fs.rootSlash() + srcObj.remote
|
||||
dstPath := f.rootSlash() + remote
|
||||
if strings.EqualFold(srcPath, dstPath) {
|
||||
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
|
||||
return nil, fmt.Errorf("Can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
@@ -429,12 +429,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// move_copy will silently truncate new filenames
|
||||
if len(leaf) > 255 {
|
||||
fs.Debugf(src, "Can't move file: name (%q) exceeds 255 char", leaf)
|
||||
return nil, fs.ErrorFileNameTooLong
|
||||
}
|
||||
|
||||
// Copy the object
|
||||
var resp *http.Response
|
||||
response := moveCopyFileResponse{}
|
||||
@@ -594,6 +588,9 @@ func (f *Fs) readMetaDataForFolderID(ctx context.Context, id string) (info *Fold
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp != nil {
|
||||
}
|
||||
|
||||
return info, err
|
||||
}
|
||||
|
||||
@@ -614,13 +611,13 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if o.id == "" {
|
||||
if "" == o.id {
|
||||
// Attempt to read ID, ignore error
|
||||
// FIXME is this correct?
|
||||
_ = o.readMetaData(ctx)
|
||||
}
|
||||
|
||||
if o.id == "" {
|
||||
if "" == o.id {
|
||||
// We need to create an ID for this file
|
||||
var resp *http.Response
|
||||
response := createFileResponse{}
|
||||
@@ -1030,52 +1027,30 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
return err
|
||||
}
|
||||
var resp *http.Response
|
||||
fileInfo := File{}
|
||||
|
||||
// If we know the object id perform a direct lookup
|
||||
// because the /folder/itembyname.json endpoint is unreliable:
|
||||
// newly created objects take an arbitrary amount of time to show up
|
||||
if o.id != "" {
|
||||
folderList := FolderList{}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: fmt.Sprintf("/file/info.json/%s?session_id=%s",
|
||||
o.id, o.fs.session.SessionID),
|
||||
Path: fmt.Sprintf("/folder/itembyname.json/%s/%s?name=%s",
|
||||
o.fs.session.SessionID, directoryID, url.QueryEscape(o.fs.opt.Enc.FromStandardName(leaf))),
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &fileInfo)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get fileinfo: %w", err)
|
||||
}
|
||||
|
||||
o.id = fileInfo.FileID
|
||||
o.modTime = time.Unix(fileInfo.DateModified, 0)
|
||||
o.md5 = fileInfo.FileHash
|
||||
o.size = fileInfo.Size
|
||||
return nil
|
||||
}
|
||||
folderList := FolderList{}
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: fmt.Sprintf("/folder/itembyname.json/%s/%s?name=%s",
|
||||
o.fs.session.SessionID, directoryID, url.QueryEscape(o.fs.opt.Enc.FromStandardName(leaf))),
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &folderList)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get folder list: %w", err)
|
||||
}
|
||||
|
||||
if len(folderList.Files) == 0 {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
fileInfo = folderList.Files[0]
|
||||
o.id = fileInfo.FileID
|
||||
o.modTime = time.Unix(fileInfo.DateModified, 0)
|
||||
o.md5 = fileInfo.FileHash
|
||||
o.size = fileInfo.Size
|
||||
|
||||
leafFile := folderList.Files[0]
|
||||
o.id = leafFile.FileID
|
||||
o.modTime = time.Unix(leafFile.DateModified, 0)
|
||||
o.md5 = leafFile.FileHash
|
||||
o.size = leafFile.Size
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -136,7 +136,7 @@ func (g *GetFileLinkResult) IsValid() bool {
|
||||
if len(g.Hosts) == 0 {
|
||||
return false
|
||||
}
|
||||
return time.Until(time.Time(g.Expires)) > 30*time.Second
|
||||
return time.Time(g.Expires).Sub(time.Now()) > 30*time.Second
|
||||
}
|
||||
|
||||
// URL returns a URL from the Path and Hosts. Check with IsValid
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
@@ -131,19 +130,6 @@ with rclone authorize.
|
||||
Value: "eapi.pcloud.com",
|
||||
Help: "EU region",
|
||||
}},
|
||||
}, {
|
||||
Name: "username",
|
||||
Help: `Your pcloud username.
|
||||
|
||||
This is only required when you want to use the cleanup command. Due to a bug
|
||||
in the pcloud API the required API does not support OAuth authentication so
|
||||
we have to rely on user password authentication for it.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your pcloud password.",
|
||||
IsPassword: true,
|
||||
Advanced: true,
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
@@ -153,8 +139,6 @@ type Options struct {
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
Hostname string `config:"hostname"`
|
||||
Username string `config:"username"`
|
||||
Password string `config:"password"`
|
||||
}
|
||||
|
||||
// Fs represents a remote pcloud
|
||||
@@ -164,7 +148,6 @@ type Fs struct {
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
cleanupSrv *rest.Client // the connection used for the cleanup method
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
@@ -244,7 +227,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
||||
}
|
||||
}
|
||||
|
||||
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Contains(resp.Header["Www-Authenticate"][0], "expired_token") {
|
||||
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
||||
doRetry = true
|
||||
fs.Debugf(nil, "Should retry: %v", err)
|
||||
}
|
||||
@@ -310,7 +293,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
updateTokenURL(oauthConfig, opt.Hostname)
|
||||
|
||||
canCleanup := opt.Username != "" && opt.Password != ""
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
@@ -318,16 +300,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
srv: rest.NewClient(oAuthClient).SetRoot("https://" + opt.Hostname),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
if canCleanup {
|
||||
f.cleanupSrv = rest.NewClient(fshttp.NewClient(ctx)).SetRoot("https://" + opt.Hostname)
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: false,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
if !canCleanup {
|
||||
f.features.CleanUp = nil
|
||||
}
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// Renew the token in the background
|
||||
@@ -753,12 +729,10 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(rootID))
|
||||
opts.Parameters.Set("username", f.opt.Username)
|
||||
opts.Parameters.Set("password", obscure.MustReveal(f.opt.Password))
|
||||
var resp *http.Response
|
||||
var result api.Error
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.cleanupSrv.CallJSON(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
@@ -934,14 +908,10 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
free := q.Quota - q.UsedQuota
|
||||
if free < 0 {
|
||||
free = 0
|
||||
}
|
||||
usage = &fs.Usage{
|
||||
Total: fs.NewUsageValue(q.Quota), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(q.UsedQuota), // bytes in use
|
||||
Free: fs.NewUsageValue(free), // bytes which can be uploaded before reaching the quota
|
||||
Total: fs.NewUsageValue(q.Quota), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(q.UsedQuota), // bytes in use
|
||||
Free: fs.NewUsageValue(q.Quota - q.UsedQuota), // bytes which can be uploaded before reaching the quota
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
@@ -253,7 +253,7 @@ func qsServiceConnection(ctx context.Context, opt *Options) (*qs.Service, error)
|
||||
_protocol, _host, _port, err := qsParseEndpoint(endpoint)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("the endpoint \"%s\" format error", endpoint)
|
||||
return nil, fmt.Errorf("The endpoint \"%s\" format error", endpoint)
|
||||
}
|
||||
|
||||
if _protocol != "" {
|
||||
@@ -573,7 +573,9 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
remote = strings.TrimSuffix(remote, "/")
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
err = fn(remote, &qs.KeyType{Key: &remote}, true)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -773,6 +775,8 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
||||
retries++
|
||||
wasDeleted = true
|
||||
continue
|
||||
default:
|
||||
break
|
||||
}
|
||||
break
|
||||
}
|
||||
@@ -850,6 +854,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
continue
|
||||
default:
|
||||
err = e
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -184,7 +184,7 @@ func (u *uploader) upload() error {
|
||||
fs.Debugf(u, "Uploading as single part object to QingStor")
|
||||
return u.singlePartUpload(reader, u.readerPos)
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("read upload data failed: %w", err)
|
||||
return fmt.Errorf("read upload data failed: %s", err)
|
||||
}
|
||||
|
||||
fs.Debugf(u, "Uploading as multi-part object to QingStor")
|
||||
|
||||
598
backend/s3/s3.go
598
backend/s3/s3.go
@@ -8,12 +8,10 @@ import (
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -34,6 +32,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
v4 "github.com/aws/aws-sdk-go/aws/signer/v4"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -61,20 +60,9 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, IBM COS, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
switch config.State {
|
||||
case "":
|
||||
return nil, setEndpointValueForIDriveE2(m)
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
},
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
System: systemMetadataInfo,
|
||||
Help: `User metadata is stored as x-amz-meta- keys. S3 metadata keys are case insensitive and are always returned in lower case.`,
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: fs.ConfigProvider,
|
||||
Help: "Choose your S3 provider.",
|
||||
@@ -104,15 +92,9 @@ func init() {
|
||||
}, {
|
||||
Value: "Dreamhost",
|
||||
Help: "Dreamhost DreamObjects",
|
||||
}, {
|
||||
Value: "HuaweiOBS",
|
||||
Help: "Huawei Object Storage Service",
|
||||
}, {
|
||||
Value: "IBMCOS",
|
||||
Help: "IBM COS S3",
|
||||
}, {
|
||||
Value: "IDrive",
|
||||
Help: "IDrive e2",
|
||||
}, {
|
||||
Value: "LyveCloud",
|
||||
Help: "Seagate Lyve Cloud",
|
||||
@@ -323,56 +305,6 @@ func init() {
|
||||
Value: "pl-waw",
|
||||
Help: "Warsaw, Poland",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to. - the location where your bucket will be created and your data stored. Need bo be same with your endpoint.\n",
|
||||
Provider: "HuaweiOBS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "af-south-1",
|
||||
Help: "AF-Johannesburg",
|
||||
}, {
|
||||
Value: "ap-southeast-2",
|
||||
Help: "AP-Bangkok",
|
||||
}, {
|
||||
Value: "ap-southeast-3",
|
||||
Help: "AP-Singapore",
|
||||
}, {
|
||||
Value: "cn-east-3",
|
||||
Help: "CN East-Shanghai1",
|
||||
}, {
|
||||
Value: "cn-east-2",
|
||||
Help: "CN East-Shanghai2",
|
||||
}, {
|
||||
Value: "cn-north-1",
|
||||
Help: "CN North-Beijing1",
|
||||
}, {
|
||||
Value: "cn-north-4",
|
||||
Help: "CN North-Beijing4",
|
||||
}, {
|
||||
Value: "cn-south-1",
|
||||
Help: "CN South-Guangzhou",
|
||||
}, {
|
||||
Value: "ap-southeast-1",
|
||||
Help: "CN-Hong Kong",
|
||||
}, {
|
||||
Value: "sa-argentina-1",
|
||||
Help: "LA-Buenos Aires1",
|
||||
}, {
|
||||
Value: "sa-peru-1",
|
||||
Help: "LA-Lima1",
|
||||
}, {
|
||||
Value: "na-mexico-1",
|
||||
Help: "LA-Mexico City1",
|
||||
}, {
|
||||
Value: "sa-chile-1",
|
||||
Help: "LA-Santiago2",
|
||||
}, {
|
||||
Value: "sa-brazil-1",
|
||||
Help: "LA-Sao Paulo1",
|
||||
}, {
|
||||
Value: "ru-northwest-2",
|
||||
Help: "RU-Moscow2",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.",
|
||||
@@ -384,7 +316,7 @@ func init() {
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba,ChinaMobile,Cloudflare,ArvanCloud,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive",
|
||||
Provider: "!AWS,Alibaba,ChinaMobile,Cloudflare,ArvanCloud,RackCorp,Scaleway,Storj,TencentCOS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
||||
@@ -776,57 +708,6 @@ func init() {
|
||||
Value: "oss-me-east-1.aliyuncs.com",
|
||||
Help: "Middle East 1 (Dubai)",
|
||||
}},
|
||||
}, {
|
||||
// obs endpoints: https://developer.huaweicloud.com/intl/en-us/endpoint?OBS
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for OBS API.",
|
||||
Provider: "HuaweiOBS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "obs.af-south-1.myhuaweicloud.com",
|
||||
Help: "AF-Johannesburg",
|
||||
}, {
|
||||
Value: "obs.ap-southeast-2.myhuaweicloud.com",
|
||||
Help: "AP-Bangkok",
|
||||
}, {
|
||||
Value: "obs.ap-southeast-3.myhuaweicloud.com",
|
||||
Help: "AP-Singapore",
|
||||
}, {
|
||||
Value: "obs.cn-east-3.myhuaweicloud.com",
|
||||
Help: "CN East-Shanghai1",
|
||||
}, {
|
||||
Value: "obs.cn-east-2.myhuaweicloud.com",
|
||||
Help: "CN East-Shanghai2",
|
||||
}, {
|
||||
Value: "obs.cn-north-1.myhuaweicloud.com",
|
||||
Help: "CN North-Beijing1",
|
||||
}, {
|
||||
Value: "obs.cn-north-4.myhuaweicloud.com",
|
||||
Help: "CN North-Beijing4",
|
||||
}, {
|
||||
Value: "obs.cn-south-1.myhuaweicloud.com",
|
||||
Help: "CN South-Guangzhou",
|
||||
}, {
|
||||
Value: "obs.ap-southeast-1.myhuaweicloud.com",
|
||||
Help: "CN-Hong Kong",
|
||||
}, {
|
||||
Value: "obs.sa-argentina-1.myhuaweicloud.com",
|
||||
Help: "LA-Buenos Aires1",
|
||||
}, {
|
||||
Value: "obs.sa-peru-1.myhuaweicloud.com",
|
||||
Help: "LA-Lima1",
|
||||
}, {
|
||||
Value: "obs.na-mexico-1.myhuaweicloud.com",
|
||||
Help: "LA-Mexico City1",
|
||||
}, {
|
||||
Value: "obs.sa-chile-1.myhuaweicloud.com",
|
||||
Help: "LA-Santiago2",
|
||||
}, {
|
||||
Value: "obs.sa-brazil-1.myhuaweicloud.com",
|
||||
Help: "LA-Sao Paulo1",
|
||||
}, {
|
||||
Value: "obs.ru-northwest-2.myhuaweicloud.com",
|
||||
Help: "RU-Moscow2",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Scaleway Object Storage.",
|
||||
@@ -998,7 +879,7 @@ func init() {
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,IBMCOS,IDrive,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,ArvanCloud,Scaleway,StackPath,Storj,RackCorp",
|
||||
Provider: "!AWS,IBMCOS,TencentCOS,Alibaba,ChinaMobile,ArvanCloud,Scaleway,StackPath,Storj,RackCorp",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -1408,7 +1289,7 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,IBMCOS,IDrive,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,ArvanCloud,RackCorp,Scaleway,StackPath,Storj,TencentCOS",
|
||||
Provider: "!AWS,IBMCOS,Alibaba,ChinaMobile,Cloudflare,ArvanCloud,RackCorp,Scaleway,StackPath,Storj,TencentCOS",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -1692,14 +1573,7 @@ Files of unknown size are uploaded with the configured
|
||||
chunk_size. Since the default chunk size is 5 MiB and there can be at
|
||||
most 10,000 chunks, this means that by default the maximum size of
|
||||
a file you can stream upload is 48 GiB. If you wish to stream upload
|
||||
larger files then you will need to increase chunk_size.
|
||||
|
||||
Increasing the chunk size decreases the accuracy of the progress
|
||||
statistics displayed with "-P" flag. Rclone treats chunk as sent when
|
||||
it's buffered by the AWS SDK, when in fact it may still be uploading.
|
||||
A bigger chunk size means a bigger AWS SDK buffer and progress
|
||||
reporting more deviating from the truth.
|
||||
`,
|
||||
larger files then you will need to increase chunk_size.`,
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -1988,8 +1862,8 @@ circumstances or for testing.
|
||||
|
||||
// Constants
|
||||
const (
|
||||
metaMtime = "mtime" // the meta key to store mtime in - e.g. X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "md5chksum" // the meta key to store md5hash in
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - e.g. X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
||||
// The maximum size of object we can COPY - this should be 5 GiB but is < 5 GB for b2 compatibility
|
||||
// See https://forum.rclone.org/t/copying-files-within-a-b2-bucket/16680/76
|
||||
maxSizeForCopy = 4768 * 1024 * 1024
|
||||
@@ -2004,57 +1878,6 @@ const (
|
||||
maxExpireDuration = fs.Duration(7 * 24 * time.Hour) // max expiry is 1 week
|
||||
)
|
||||
|
||||
// system metadata keys which this backend owns
|
||||
var systemMetadataInfo = map[string]fs.MetadataHelp{
|
||||
"cache-control": {
|
||||
Help: "Cache-Control header",
|
||||
Type: "string",
|
||||
Example: "no-cache",
|
||||
},
|
||||
"content-disposition": {
|
||||
Help: "Content-Disposition header",
|
||||
Type: "string",
|
||||
Example: "inline",
|
||||
},
|
||||
"content-encoding": {
|
||||
Help: "Content-Encoding header",
|
||||
Type: "string",
|
||||
Example: "gzip",
|
||||
},
|
||||
"content-language": {
|
||||
Help: "Content-Language header",
|
||||
Type: "string",
|
||||
Example: "en-US",
|
||||
},
|
||||
"content-type": {
|
||||
Help: "Content-Type header",
|
||||
Type: "string",
|
||||
Example: "text/plain",
|
||||
},
|
||||
// "tagging": {
|
||||
// Help: "x-amz-tagging header",
|
||||
// Type: "string",
|
||||
// Example: "tag1=value1&tag2=value2",
|
||||
// },
|
||||
"tier": {
|
||||
Help: "Tier of the object",
|
||||
Type: "string",
|
||||
Example: "GLACIER",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"mtime": {
|
||||
Help: "Time of last modification, read from rclone metadata",
|
||||
Type: "RFC 3339",
|
||||
Example: "2006-01-02T15:04:05.999999999Z07:00",
|
||||
},
|
||||
"btime": {
|
||||
Help: "Time of file birth (creation) read from Last-Modified header",
|
||||
Type: "RFC 3339",
|
||||
Example: "2006-01-02T15:04:05.999999999Z07:00",
|
||||
ReadOnly: true,
|
||||
},
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Provider string `config:"provider"`
|
||||
@@ -2076,7 +1899,7 @@ type Options struct {
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
MaxUploadParts int `config:"max_upload_parts"`
|
||||
MaxUploadParts int64 `config:"max_upload_parts"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
SharedCredentialsFile string `config:"shared_credentials_file"`
|
||||
Profile string `config:"profile"`
|
||||
@@ -2110,6 +1933,7 @@ type Fs struct {
|
||||
ctx context.Context // global context for reading config
|
||||
features *fs.Features // optional features
|
||||
c *s3.S3 // the connection to the s3 server
|
||||
cu *s3.S3 // unsigned connection to the s3 server for PutObject
|
||||
ses *session.Session // the s3 session
|
||||
rootBucket string // bucket part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
@@ -2127,20 +1951,14 @@ type Object struct {
|
||||
//
|
||||
// List will read everything but meta & mimeType - to fill
|
||||
// that in you need to call readMetaData
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
md5 string // md5sum of the object
|
||||
bytes int64 // size of the object
|
||||
lastModified time.Time // Last modified
|
||||
meta map[string]string // The object metadata if known - may be nil - with lower case keys
|
||||
mimeType string // MimeType of object - may be ""
|
||||
|
||||
// Metadata as pointers to strings as they often won't be present
|
||||
storageClass *string // e.g. GLACIER
|
||||
cacheControl *string // Cache-Control: header
|
||||
contentDisposition *string // Content-Disposition: header
|
||||
contentEncoding *string // Content-Encoding: header
|
||||
contentLanguage *string // Content-Language: header
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
md5 string // md5sum of the object
|
||||
bytes int64 // size of the object
|
||||
lastModified time.Time // Last modified
|
||||
meta map[string]*string // The object metadata if known - may be nil
|
||||
mimeType string // MimeType of object - may be ""
|
||||
storageClass string // e.g. GLACIER
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -2158,7 +1976,7 @@ func (f *Fs) Root() string {
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.rootBucket == "" {
|
||||
return "S3 root"
|
||||
return fmt.Sprintf("S3 root")
|
||||
}
|
||||
if f.rootDirectory == "" {
|
||||
return fmt.Sprintf("S3 bucket %s", f.rootBucket)
|
||||
@@ -2192,10 +2010,6 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ShouldRetry(awsError.OrigErr()) {
|
||||
return true, err
|
||||
}
|
||||
// If it is a timeout then we want to retry that
|
||||
if awsError.Code() == "RequestTimeout" {
|
||||
return true, err
|
||||
}
|
||||
// Failing that, if it's a RequestFailure it's probably got an http status code we can check
|
||||
if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||
// 301 if wrong region for bucket - can only update if running from a bucket
|
||||
@@ -2252,7 +2066,11 @@ func getClient(ctx context.Context, opt *Options) *http.Client {
|
||||
}
|
||||
|
||||
// s3Connection makes a connection to s3
|
||||
func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S3, *session.Session, error) {
|
||||
//
|
||||
// If unsignedBody is set then the connection is configured for
|
||||
// unsigned bodies which is necessary for PutObject if we don't want
|
||||
// it to seek
|
||||
func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S3, *s3.S3, *session.Session, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
// Make the auth
|
||||
v := credentials.Value{
|
||||
@@ -2269,7 +2087,7 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
|
||||
// start a new AWS session
|
||||
awsSession, err := session.NewSession()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("NewSession: %w", err)
|
||||
return nil, nil, nil, fmt.Errorf("NewSession: %w", err)
|
||||
}
|
||||
|
||||
// first provider to supply a credential set "wins"
|
||||
@@ -2309,9 +2127,9 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
|
||||
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
|
||||
cred = credentials.AnonymousCredentials
|
||||
case v.AccessKeyID == "":
|
||||
return nil, nil, errors.New("access_key_id not found")
|
||||
return nil, nil, nil, errors.New("access_key_id not found")
|
||||
case v.SecretAccessKey == "":
|
||||
return nil, nil, errors.New("secret_access_key not found")
|
||||
return nil, nil, nil, errors.New("secret_access_key not found")
|
||||
}
|
||||
|
||||
if opt.Region == "" {
|
||||
@@ -2350,25 +2168,36 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
|
||||
// (from the shared config file) if the passed-in Options.Config.Credentials is nil.
|
||||
awsSessionOpts.Config.Credentials = nil
|
||||
}
|
||||
// Setting this stops PutObject reading the body twice and seeking
|
||||
// We add our own Content-MD5 for data protection
|
||||
awsSessionOpts.Config.S3DisableContentMD5Validation = aws.Bool(true)
|
||||
ses, err := session.NewSessionWithOptions(awsSessionOpts)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
c := s3.New(ses)
|
||||
if opt.V2Auth || opt.Region == "other-v2-signature" {
|
||||
fs.Debugf(nil, "Using v2 auth")
|
||||
signer := func(req *request.Request) {
|
||||
// Ignore AnonymousCredentials object
|
||||
if req.Config.Credentials == credentials.AnonymousCredentials {
|
||||
return
|
||||
newC := func(unsignedBody bool) *s3.S3 {
|
||||
c := s3.New(ses)
|
||||
if opt.V2Auth || opt.Region == "other-v2-signature" {
|
||||
fs.Debugf(nil, "Using v2 auth")
|
||||
signer := func(req *request.Request) {
|
||||
// Ignore AnonymousCredentials object
|
||||
if req.Config.Credentials == credentials.AnonymousCredentials {
|
||||
return
|
||||
}
|
||||
sign(v.AccessKeyID, v.SecretAccessKey, req.HTTPRequest)
|
||||
}
|
||||
sign(v.AccessKeyID, v.SecretAccessKey, req.HTTPRequest)
|
||||
c.Handlers.Sign.Clear()
|
||||
c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
|
||||
c.Handlers.Sign.PushBack(signer)
|
||||
} else if unsignedBody {
|
||||
// If the body is unsigned then tell the signer that we aren't signing the payload
|
||||
c.Handlers.Sign.Clear()
|
||||
c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
|
||||
c.Handlers.Sign.PushBackNamed(v4.BuildNamedHandler("v4.SignRequestHandler.WithUnsignedPayload", v4.WithUnsignedPayload))
|
||||
}
|
||||
c.Handlers.Sign.Clear()
|
||||
c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
|
||||
c.Handlers.Sign.PushBack(signer)
|
||||
return c
|
||||
}
|
||||
return c, ses, nil
|
||||
return newC(false), newC(true), ses, nil
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
@@ -2401,37 +2230,6 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// setEndpointValueForIDriveE2 gets user region endpoint against the Access Key details by calling the API
|
||||
func setEndpointValueForIDriveE2(m configmap.Mapper) (err error) {
|
||||
value, ok := m.Get(fs.ConfigProvider)
|
||||
if !ok || value != "IDrive" {
|
||||
return
|
||||
}
|
||||
value, ok = m.Get("access_key_id")
|
||||
if !ok || value == "" {
|
||||
return
|
||||
}
|
||||
client := &http.Client{Timeout: time.Second * 3}
|
||||
// API to get user region endpoint against the Access Key details: https://www.idrive.com/e2/guides/get_region_endpoint
|
||||
resp, err := client.Post("https://api.idrivee2.com/api/service/get_region_end_point",
|
||||
"application/json",
|
||||
strings.NewReader(`{"access_key": "`+value+`"}`))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
var data = &struct {
|
||||
RespCode int `json:"resp_code"`
|
||||
RespMsg string `json:"resp_msg"`
|
||||
DomainName string `json:"domain_name"`
|
||||
}{}
|
||||
if err = decoder.Decode(data); err == nil && data.RespCode == 0 {
|
||||
m.Set("endpoint", data.DomainName)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Set the provider quirks
|
||||
//
|
||||
// There should be no testing against opt.Provider anywhere in the
|
||||
@@ -2450,10 +2248,6 @@ func setQuirks(opt *Options) {
|
||||
// No quirks
|
||||
case "Alibaba":
|
||||
useMultipartEtag = false // Alibaba seems to calculate multipart Etags differently from AWS
|
||||
case "HuaweiOBS":
|
||||
// Huawei OBS PFS is not support listObjectV2, and if turn on the urlEncodeListing, marker will not work and keep list same page forever.
|
||||
urlEncodeListings = false
|
||||
listObjectsV2 = false
|
||||
case "Ceph":
|
||||
listObjectsV2 = false
|
||||
virtualHostStyle = false
|
||||
@@ -2478,8 +2272,6 @@ func setQuirks(opt *Options) {
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
useMultipartEtag = false // untested
|
||||
case "IDrive":
|
||||
virtualHostStyle = false
|
||||
case "LyveCloud":
|
||||
useMultipartEtag = false // LyveCloud seems to calculate multipart Etags differently from AWS
|
||||
case "Minio":
|
||||
@@ -2590,7 +2382,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
opt.SSECustomerKeyMD5 = base64.StdEncoding.EncodeToString(md5sumBinary[:])
|
||||
}
|
||||
srv := getClient(ctx, opt)
|
||||
c, ses, err := s3Connection(ctx, opt, srv)
|
||||
c, cu, ses, err := s3Connection(ctx, opt, srv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -2608,6 +2400,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
ci: ci,
|
||||
ctx: ctx,
|
||||
c: c,
|
||||
cu: cu,
|
||||
ses: ses,
|
||||
pacer: pc,
|
||||
cache: bucket.NewCache(),
|
||||
@@ -2634,9 +2427,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
SetTier: true,
|
||||
@@ -2662,9 +2452,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.features.SetTier = false
|
||||
f.features.GetTier = false
|
||||
}
|
||||
if opt.Provider == "IDrive" {
|
||||
f.features.SetTier = false
|
||||
}
|
||||
// f.listMultipartUploads()
|
||||
return f, nil
|
||||
}
|
||||
@@ -2687,7 +2474,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Obje
|
||||
}
|
||||
o.setMD5FromEtag(aws.StringValue(info.ETag))
|
||||
o.bytes = aws.Int64Value(info.Size)
|
||||
o.storageClass = info.StorageClass
|
||||
o.storageClass = aws.StringValue(info.StorageClass)
|
||||
} else if !o.fs.opt.NoHeadObject {
|
||||
err := o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
@@ -2737,11 +2524,12 @@ func (f *Fs) updateRegionForBucket(ctx context.Context, bucket string) error {
|
||||
// Make a new session with the new region
|
||||
oldRegion := f.opt.Region
|
||||
f.opt.Region = region
|
||||
c, ses, err := s3Connection(f.ctx, &f.opt, f.srv)
|
||||
c, cu, ses, err := s3Connection(f.ctx, &f.opt, f.srv)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating new session failed: %w", err)
|
||||
}
|
||||
f.c = c
|
||||
f.cu = cu
|
||||
f.ses = ses
|
||||
|
||||
fs.Logf(f, "Switched region to %q from %q", region, oldRegion)
|
||||
@@ -2881,7 +2669,9 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
remote = strings.TrimSuffix(remote, "/")
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
err = fn(remote, &s3.Object{Key: &remote}, true)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -3555,7 +3345,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
st.Status = "Not an S3 object"
|
||||
return
|
||||
}
|
||||
if o.storageClass == nil || (*o.storageClass != "GLACIER" && *o.storageClass != "DEEP_ARCHIVE") {
|
||||
if o.storageClass != "GLACIER" && o.storageClass != "DEEP_ARCHIVE" {
|
||||
st.Status = "Not GLACIER or DEEP_ARCHIVE storage class"
|
||||
return
|
||||
}
|
||||
@@ -3813,65 +3603,40 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.setMetaData(resp)
|
||||
// resp.ETag, resp.ContentLength, resp.LastModified, resp.Metadata, resp.ContentType, resp.StorageClass)
|
||||
o.setMetaData(resp.ETag, resp.ContentLength, resp.LastModified, resp.Metadata, resp.ContentType, resp.StorageClass)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert S3 metadata with pointers into a map[string]string
|
||||
// while lowercasing the keys
|
||||
func s3MetadataToMap(s3Meta map[string]*string) map[string]string {
|
||||
meta := make(map[string]string, len(s3Meta))
|
||||
for k, v := range s3Meta {
|
||||
if v != nil {
|
||||
meta[strings.ToLower(k)] = *v
|
||||
}
|
||||
}
|
||||
return meta
|
||||
}
|
||||
|
||||
// Convert our metadata back into S3 metadata
|
||||
func mapToS3Metadata(meta map[string]string) map[string]*string {
|
||||
s3Meta := make(map[string]*string, len(meta))
|
||||
for k, v := range meta {
|
||||
s3Meta[k] = aws.String(v)
|
||||
}
|
||||
return s3Meta
|
||||
}
|
||||
|
||||
func (o *Object) setMetaData(resp *s3.HeadObjectOutput) {
|
||||
func (o *Object) setMetaData(etag *string, contentLength *int64, lastModified *time.Time, meta map[string]*string, mimeType *string, storageClass *string) {
|
||||
// Ignore missing Content-Length assuming it is 0
|
||||
// Some versions of ceph do this due their apache proxies
|
||||
if resp.ContentLength != nil {
|
||||
o.bytes = *resp.ContentLength
|
||||
if contentLength != nil {
|
||||
o.bytes = *contentLength
|
||||
}
|
||||
o.setMD5FromEtag(aws.StringValue(etag))
|
||||
o.meta = meta
|
||||
if o.meta == nil {
|
||||
o.meta = map[string]*string{}
|
||||
}
|
||||
o.setMD5FromEtag(aws.StringValue(resp.ETag))
|
||||
o.meta = s3MetadataToMap(resp.Metadata)
|
||||
// Read MD5 from metadata if present
|
||||
if md5sumBase64, ok := o.meta[metaMD5Hash]; ok {
|
||||
md5sumBytes, err := base64.StdEncoding.DecodeString(md5sumBase64)
|
||||
md5sumBytes, err := base64.StdEncoding.DecodeString(*md5sumBase64)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to read md5sum from metadata %q: %v", md5sumBase64, err)
|
||||
fs.Debugf(o, "Failed to read md5sum from metadata %q: %v", *md5sumBase64, err)
|
||||
} else if len(md5sumBytes) != 16 {
|
||||
fs.Debugf(o, "Failed to read md5sum from metadata %q: wrong length", md5sumBase64)
|
||||
fs.Debugf(o, "Failed to read md5sum from metadata %q: wrong length", *md5sumBase64)
|
||||
} else {
|
||||
o.md5 = hex.EncodeToString(md5sumBytes)
|
||||
}
|
||||
}
|
||||
if resp.LastModified == nil {
|
||||
o.storageClass = aws.StringValue(storageClass)
|
||||
if lastModified == nil {
|
||||
o.lastModified = time.Now()
|
||||
fs.Logf(o, "Failed to read last modified")
|
||||
} else {
|
||||
o.lastModified = *resp.LastModified
|
||||
o.lastModified = *lastModified
|
||||
}
|
||||
o.mimeType = aws.StringValue(resp.ContentType)
|
||||
|
||||
// Set system metadata
|
||||
o.storageClass = resp.StorageClass
|
||||
o.cacheControl = resp.CacheControl
|
||||
o.contentDisposition = resp.ContentDisposition
|
||||
o.contentEncoding = resp.ContentEncoding
|
||||
o.contentLanguage = resp.ContentLanguage
|
||||
o.mimeType = aws.StringValue(mimeType)
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
@@ -3889,11 +3654,11 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
}
|
||||
// read mtime out of metadata if available
|
||||
d, ok := o.meta[metaMtime]
|
||||
if !ok {
|
||||
if !ok || d == nil {
|
||||
// fs.Debugf(o, "No metadata")
|
||||
return o.lastModified
|
||||
}
|
||||
modTime, err := swift.FloatStringToTime(d)
|
||||
modTime, err := swift.FloatStringToTime(*d)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read mtime from object: %v", err)
|
||||
return o.lastModified
|
||||
@@ -3907,10 +3672,10 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.meta[metaMtime] = swift.TimeToFloatString(modTime)
|
||||
o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
|
||||
|
||||
// Can't update metadata here, so return this error to force a recopy
|
||||
if o.storageClass != nil && (*o.storageClass == "GLACIER" || *o.storageClass == "DEEP_ARCHIVE") {
|
||||
if o.storageClass == "GLACIER" || o.storageClass == "DEEP_ARCHIVE" {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
@@ -3918,7 +3683,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
bucket, bucketPath := o.split()
|
||||
req := s3.CopyObjectInput{
|
||||
ContentType: aws.String(fs.MimeType(ctx, o)), // Guess the content type
|
||||
Metadata: mapToS3Metadata(o.meta),
|
||||
Metadata: o.meta,
|
||||
MetadataDirective: aws.String(s3.MetadataDirectiveReplace), // replace metadata with that passed in
|
||||
}
|
||||
if o.fs.opt.RequesterPays {
|
||||
@@ -3971,34 +3736,17 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options
|
||||
|
||||
metaData := make(map[string]*string)
|
||||
for key, value := range resp.Header {
|
||||
key = strings.ToLower(key)
|
||||
if strings.HasPrefix(key, "x-amz-meta-") {
|
||||
if strings.HasPrefix(key, "x-amz-meta") {
|
||||
metaKey := strings.TrimPrefix(key, "x-amz-meta-")
|
||||
metaData[metaKey] = &value[0]
|
||||
metaData[strings.Title(metaKey)] = &value[0]
|
||||
}
|
||||
}
|
||||
|
||||
header := func(k string) *string {
|
||||
v := resp.Header.Get(k)
|
||||
if v == "" {
|
||||
return nil
|
||||
}
|
||||
return &v
|
||||
}
|
||||
storageClass := resp.Header.Get("X-Amz-Storage-Class")
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
etag := resp.Header.Get("Etag")
|
||||
|
||||
var head = s3.HeadObjectOutput{
|
||||
ETag: header("Etag"),
|
||||
ContentLength: contentLength,
|
||||
LastModified: &lastModified,
|
||||
Metadata: metaData,
|
||||
CacheControl: header("Cache-Control"),
|
||||
ContentDisposition: header("Content-Disposition"),
|
||||
ContentEncoding: header("Content-Encoding"),
|
||||
ContentLanguage: header("Content-Language"),
|
||||
ContentType: header("Content-Type"),
|
||||
StorageClass: header("X-Amz-Storage-Class"),
|
||||
}
|
||||
o.setMetaData(&head)
|
||||
o.setMetaData(&etag, contentLength, &lastModified, metaData, &contentType, &storageClass)
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
@@ -4073,10 +3821,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
fs.Debugf(o, "Failed to find length in %q", contentRange)
|
||||
}
|
||||
}
|
||||
var head s3.HeadObjectOutput
|
||||
structs.SetFrom(&head, resp)
|
||||
head.ContentLength = size
|
||||
o.setMetaData(&head)
|
||||
o.setMetaData(resp.ETag, size, resp.LastModified, resp.Metadata, resp.ContentType, resp.StorageClass)
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
@@ -4108,10 +3853,10 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
if size == -1 {
|
||||
warnStreamUpload.Do(func() {
|
||||
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||
f.opt.ChunkSize, fs.SizeSuffix(int64(partSize)*int64(uploadParts)))
|
||||
f.opt.ChunkSize, fs.SizeSuffix(int64(partSize)*uploadParts))
|
||||
})
|
||||
} else {
|
||||
partSize = chunksize.Calculator(o, size, uploadParts, f.opt.ChunkSize)
|
||||
partSize = chunksize.Calculator(o, int(uploadParts), f.opt.ChunkSize)
|
||||
}
|
||||
|
||||
memPool := f.getMemoryPool(int64(partSize))
|
||||
@@ -4288,7 +4033,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
func unWrapAwsError(err error) (found bool, outErr error) {
|
||||
if awsErr, ok := err.(awserr.Error); ok {
|
||||
var origErrs []error
|
||||
if batchErr, ok := awsErr.(awserr.BatchedErrors); ok {
|
||||
if batchErr, ok := awsErr.(awserr.BatchError); ok {
|
||||
origErrs = batchErr.OrigErrs()
|
||||
} else {
|
||||
origErrs = []error{awsErr.OrigErr()}
|
||||
@@ -4306,28 +4051,19 @@ func unWrapAwsError(err error) (found bool, outErr error) {
|
||||
|
||||
// Upload a single part using PutObject
|
||||
func (o *Object) uploadSinglepartPutObject(ctx context.Context, req *s3.PutObjectInput, size int64, in io.Reader) (etag string, lastModified time.Time, err error) {
|
||||
r, resp := o.fs.c.PutObjectRequest(req)
|
||||
if req.ContentLength != nil && *req.ContentLength == 0 {
|
||||
// Can't upload zero length files like this for some reason
|
||||
r.Body = bytes.NewReader([]byte{})
|
||||
} else {
|
||||
r.SetStreamingBody(ioutil.NopCloser(in))
|
||||
}
|
||||
r.SetContext(ctx)
|
||||
r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD")
|
||||
|
||||
req.Body = readers.NewFakeSeeker(in, size)
|
||||
var resp *s3.PutObjectOutput
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
err := r.Send()
|
||||
resp, err = o.fs.cu.PutObject(req)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
// Return the underlying error if we have a
|
||||
// Serialization or RequestError error if possible
|
||||
// Return the underlying error if we have a Serialization error if possible
|
||||
//
|
||||
// These errors are synthesized locally in the SDK
|
||||
// (not returned from the server) and we'd rather have
|
||||
// the underlying error if there is one.
|
||||
if do, ok := err.(awserr.Error); ok && (do.Code() == request.ErrCodeSerialization || do.Code() == request.ErrCodeRequestError) {
|
||||
// Serialization errors are synthesized locally in the SDK (not returned from the
|
||||
// server). We'll get one if the SDK attempts a retry, however the FakeSeeker will
|
||||
// remember the previous error from Read and return that.
|
||||
if do, ok := err.(awserr.Error); ok && do.Code() == request.ErrCodeSerialization {
|
||||
if found, newErr := unWrapAwsError(err); found {
|
||||
err = newErr
|
||||
}
|
||||
@@ -4413,55 +4149,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
|
||||
req := s3.PutObjectInput{
|
||||
Bucket: &bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &bucketPath,
|
||||
}
|
||||
|
||||
// Fetch metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, src, options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
req.Metadata = make(map[string]*string, len(meta)+2)
|
||||
// merge metadata into request and user metadata
|
||||
for k, v := range meta {
|
||||
pv := aws.String(v)
|
||||
k = strings.ToLower(k)
|
||||
switch k {
|
||||
case "cache-control":
|
||||
req.CacheControl = pv
|
||||
case "content-disposition":
|
||||
req.ContentDisposition = pv
|
||||
case "content-encoding":
|
||||
req.ContentEncoding = pv
|
||||
case "content-language":
|
||||
req.ContentLanguage = pv
|
||||
case "content-type":
|
||||
req.ContentType = pv
|
||||
case "x-amz-tagging":
|
||||
req.Tagging = pv
|
||||
case "tier":
|
||||
// ignore
|
||||
case "mtime":
|
||||
// mtime in meta overrides source ModTime
|
||||
metaModTime, err := time.Parse(time.RFC3339Nano, v)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", k, v, err)
|
||||
} else {
|
||||
modTime = metaModTime
|
||||
}
|
||||
case "btime":
|
||||
// write as metadata since we can't set it
|
||||
req.Metadata[k] = pv
|
||||
default:
|
||||
req.Metadata[k] = pv
|
||||
}
|
||||
}
|
||||
|
||||
// Set the mtime in the meta data
|
||||
req.Metadata[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
|
||||
metadata := map[string]*string{
|
||||
metaMtime: aws.String(swift.TimeToFloatString(modTime)),
|
||||
}
|
||||
|
||||
// read the md5sum if available
|
||||
// - for non multipart
|
||||
@@ -4482,15 +4173,20 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// - a multipart upload
|
||||
// - the Etag is not an MD5, eg when using SSE/SSE-C
|
||||
// provided checksums aren't disabled
|
||||
req.Metadata[metaMD5Hash] = &md5sumBase64
|
||||
metadata[metaMD5Hash] = &md5sumBase64
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set the content type it it isn't set already
|
||||
if req.ContentType == nil {
|
||||
req.ContentType = aws.String(fs.MimeType(ctx, src))
|
||||
// Guess the content type
|
||||
mimeType := fs.MimeType(ctx, src)
|
||||
req := s3.PutObjectInput{
|
||||
Bucket: &bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &bucketPath,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
}
|
||||
if size >= 0 {
|
||||
req.ContentLength = &size
|
||||
@@ -4569,19 +4265,19 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// so make up the object as best we can assuming it got
|
||||
// uploaded properly. If size < 0 then we need to do the HEAD.
|
||||
if o.fs.opt.NoHead && size >= 0 {
|
||||
var head s3.HeadObjectOutput
|
||||
structs.SetFrom(&head, &req)
|
||||
head.ETag = &md5sumHex // doesn't matter quotes are misssing
|
||||
head.ContentLength = &size
|
||||
o.md5 = md5sumHex
|
||||
o.bytes = size
|
||||
o.lastModified = time.Now()
|
||||
o.meta = req.Metadata
|
||||
o.mimeType = aws.StringValue(req.ContentType)
|
||||
o.storageClass = aws.StringValue(req.StorageClass)
|
||||
// If we have done a single part PUT request then we can read these
|
||||
if gotEtag != "" {
|
||||
head.ETag = &gotEtag
|
||||
o.setMD5FromEtag(gotEtag)
|
||||
}
|
||||
if lastModified.IsZero() {
|
||||
lastModified = time.Now()
|
||||
if !o.lastModified.IsZero() {
|
||||
o.lastModified = lastModified
|
||||
}
|
||||
head.LastModified = &lastModified
|
||||
o.setMetaData(&head)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -4591,7 +4287,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.setMetaData(head)
|
||||
o.setMetaData(head.ETag, head.ContentLength, head.LastModified, head.Metadata, head.ContentType, head.StorageClass)
|
||||
if o.fs.opt.UseMultipartEtag.Value && !o.fs.etagIsNotMD5 && wantETag != "" && head.ETag != nil && *head.ETag != "" {
|
||||
gotETag := strings.Trim(strings.ToLower(*head.ETag), `"`)
|
||||
if wantETag != gotETag {
|
||||
@@ -4642,61 +4338,16 @@ func (o *Object) SetTier(tier string) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.storageClass = &tier
|
||||
o.storageClass = tier
|
||||
return err
|
||||
}
|
||||
|
||||
// GetTier returns storage class as string
|
||||
func (o *Object) GetTier() string {
|
||||
if o.storageClass == nil || *o.storageClass == "" {
|
||||
if o.storageClass == "" {
|
||||
return "STANDARD"
|
||||
}
|
||||
return *o.storageClass
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
|
||||
err = o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
metadata = make(fs.Metadata, len(o.meta)+7)
|
||||
for k, v := range o.meta {
|
||||
switch k {
|
||||
case metaMtime:
|
||||
if modTime, err := swift.FloatStringToTime(v); err == nil {
|
||||
metadata["mtime"] = modTime.Format(time.RFC3339Nano)
|
||||
}
|
||||
case metaMD5Hash:
|
||||
// don't write hash metadata
|
||||
default:
|
||||
metadata[k] = v
|
||||
}
|
||||
}
|
||||
if o.mimeType != "" {
|
||||
metadata["content-type"] = o.mimeType
|
||||
}
|
||||
// metadata["x-amz-tagging"] = ""
|
||||
if !o.lastModified.IsZero() {
|
||||
metadata["btime"] = o.lastModified.Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
// Set system metadata
|
||||
setMetadata := func(k string, v *string) {
|
||||
if v == nil || *v == "" {
|
||||
return
|
||||
}
|
||||
metadata[k] = *v
|
||||
}
|
||||
setMetadata("cache-control", o.cacheControl)
|
||||
setMetadata("content-disposition", o.contentDisposition)
|
||||
setMetadata("content-encoding", o.contentEncoding)
|
||||
setMetadata("content-language", o.contentLanguage)
|
||||
setMetadata("tier", o.storageClass)
|
||||
|
||||
return metadata, nil
|
||||
return o.storageClass
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
@@ -4711,5 +4362,4 @@ var (
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.GetTierer = &Object{}
|
||||
_ fs.SetTierer = &Object{}
|
||||
_ fs.Metadataer = &Object{}
|
||||
)
|
||||
|
||||
@@ -1,92 +0,0 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func gz(t *testing.T, s string) string {
|
||||
var buf bytes.Buffer
|
||||
zw := gzip.NewWriter(&buf)
|
||||
_, err := zw.Write([]byte(s))
|
||||
require.NoError(t, err)
|
||||
err = zw.Close()
|
||||
require.NoError(t, err)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestMetadata(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
contents := gz(t, random.String(1000))
|
||||
|
||||
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
btime := time.Now()
|
||||
metadata := fs.Metadata{
|
||||
"cache-control": "no-cache",
|
||||
"content-disposition": "inline",
|
||||
"content-encoding": "gzip",
|
||||
"content-language": "en-US",
|
||||
"content-type": "text/plain",
|
||||
"mtime": "2009-05-06T04:05:06.499999999Z",
|
||||
// "tier" - read only
|
||||
// "btime" - read only
|
||||
}
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, contents, true, "text/html", metadata)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
o := obj.(*Object)
|
||||
gotMetadata, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
for k, v := range metadata {
|
||||
got := gotMetadata[k]
|
||||
switch k {
|
||||
case "mtime":
|
||||
assert.True(t, fstest.Time(v).Equal(fstest.Time(got)))
|
||||
case "btime":
|
||||
gotBtime := fstest.Time(got)
|
||||
dt := gotBtime.Sub(btime)
|
||||
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("btime more than 1 minute out want %v got %v delta %v", btime, gotBtime, dt))
|
||||
assert.True(t, fstest.Time(v).Equal(fstest.Time(got)))
|
||||
case "tier":
|
||||
assert.NotEqual(t, "", got)
|
||||
default:
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestNoHead(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// Set NoHead for this test
|
||||
f.opt.NoHead = true
|
||||
defer func() {
|
||||
f.opt.NoHead = false
|
||||
}()
|
||||
contents := random.String(1000)
|
||||
item := fstest.NewItem("test-no-head", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
// PutTestcontests checks the received object
|
||||
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("Metadata", f.InternalTestMetadata)
|
||||
t.Run("NoHead", f.InternalTestNoHead)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
@@ -453,7 +453,7 @@ func (f *Fs) Root() string {
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.libraryName == "" {
|
||||
return "seafile root"
|
||||
return fmt.Sprintf("seafile root")
|
||||
}
|
||||
library := "library"
|
||||
if f.encrypted {
|
||||
@@ -886,7 +886,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
// 1- rename source
|
||||
err = srcFs.renameDir(ctx, srcLibraryID, srcPath, tempName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot rename source directory to a temporary name: %w", err)
|
||||
return fmt.Errorf("Cannot rename source directory to a temporary name: %w", err)
|
||||
}
|
||||
|
||||
// 2- move source to destination
|
||||
@@ -900,7 +900,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
// 3- rename destination back to source name
|
||||
err = f.renameDir(ctx, dstLibraryID, path.Join(dstDir, tempName), dstName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot rename temporary directory to destination name: %w", err)
|
||||
return fmt.Errorf("Cannot rename temporary directory to destination name: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -923,7 +923,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
// CleanUp the trash in the Fs
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
if f.libraryName == "" {
|
||||
return errors.New("cannot clean up at the root of the seafile server, please select a library to clean up")
|
||||
return errors.New("Cannot clean up at the root of the seafile server: please select a library to clean up")
|
||||
}
|
||||
libraryID, err := f.getLibraryID(ctx, f.libraryName)
|
||||
if err != nil {
|
||||
@@ -972,7 +972,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
libraryName, filePath := f.splitPath(remote)
|
||||
if libraryName == "" {
|
||||
// We cannot share the whole seafile server, we need at least a library
|
||||
return "", errors.New("cannot share the root of the seafile server, please select a library to share")
|
||||
return "", errors.New("Cannot share the root of the seafile server. Please select a library to share")
|
||||
}
|
||||
libraryID, err := f.getLibraryID(ctx, libraryName)
|
||||
if err != nil {
|
||||
@@ -984,9 +984,9 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(shareLinks) > 0 {
|
||||
if shareLinks != nil && len(shareLinks) > 0 {
|
||||
for _, shareLink := range shareLinks {
|
||||
if !shareLink.IsExpired {
|
||||
if shareLink.IsExpired == false {
|
||||
return shareLink.Link, nil
|
||||
}
|
||||
}
|
||||
@@ -1053,7 +1053,7 @@ func (f *Fs) isLibraryInCache(libraryName string) bool {
|
||||
return false
|
||||
}
|
||||
value, found := f.libraries.GetMaybe(librariesCacheKey)
|
||||
if !found {
|
||||
if found == false {
|
||||
return false
|
||||
}
|
||||
libraries := value.([]api.Library)
|
||||
@@ -1130,7 +1130,7 @@ func (f *Fs) mkLibrary(ctx context.Context, libraryName, password string) error
|
||||
}
|
||||
// Stores the library details into the cache
|
||||
value, found := f.libraries.GetMaybe(librariesCacheKey)
|
||||
if !found {
|
||||
if found == false {
|
||||
// Don't update the cache at that point
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ const (
|
||||
|
||||
// Errors specific to seafile fs
|
||||
var (
|
||||
ErrorInternalDuringUpload = errors.New("internal server error during file upload")
|
||||
ErrorInternalDuringUpload = errors.New("Internal server error during file upload")
|
||||
)
|
||||
|
||||
// ==================== Seafile API ====================
|
||||
|
||||
@@ -39,8 +39,6 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
defaultShellType = "unix"
|
||||
shellTypeNotSupported = "none"
|
||||
hashCommandNotSupported = "none"
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
@@ -49,15 +47,13 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
currentUser = env.CurrentUser()
|
||||
posixWinAbsPathRegex = regexp.MustCompile(`^/[a-zA-Z]\:($|/)`) // E.g. "/C:" or anything starting with "/C:/"
|
||||
unixShellEscapeRegex = regexp.MustCompile("[^A-Za-z0-9_.,:/\\@\u0080-\uFFFFFFFF\n-]")
|
||||
currentUser = env.CurrentUser()
|
||||
)
|
||||
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "sftp",
|
||||
Description: "SSH/SFTP",
|
||||
Description: "SSH/SFTP Connection",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
@@ -152,16 +148,16 @@ If this is set and no password is supplied then rclone will:
|
||||
}, {
|
||||
Name: "path_override",
|
||||
Default: "",
|
||||
Help: `Override path used by SSH shell commands.
|
||||
Help: `Override path used by SSH connection.
|
||||
|
||||
This allows checksum calculation when SFTP and SSH paths are
|
||||
different. This issue affects among others Synology NAS boxes.
|
||||
|
||||
E.g. if shared folders can be found in directories representing volumes:
|
||||
Shared folders can be found in directories representing volumes
|
||||
|
||||
rclone sync /home/local/directory remote:/directory --sftp-path-override /volume2/directory
|
||||
|
||||
E.g. if home directory can be found in a shared folder called "home":
|
||||
Home directory can be found in a shared folder called "home"
|
||||
|
||||
rclone sync /home/local/directory remote:/home/directory --sftp-path-override /volume1/homes/USER/directory`,
|
||||
Advanced: true,
|
||||
@@ -170,26 +166,6 @@ E.g. if home directory can be found in a shared folder called "home":
|
||||
Default: true,
|
||||
Help: "Set the modified time on the remote if set.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "shell_type",
|
||||
Default: "",
|
||||
Help: "The type of SSH shell on remote server, if any.\n\nLeave blank for autodetect.",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: shellTypeNotSupported,
|
||||
Help: "No shell access",
|
||||
}, {
|
||||
Value: "unix",
|
||||
Help: "Unix shell",
|
||||
}, {
|
||||
Value: "powershell",
|
||||
Help: "PowerShell",
|
||||
}, {
|
||||
Value: "cmd",
|
||||
Help: "Windows Command Prompt",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "md5sum_command",
|
||||
Default: "",
|
||||
@@ -270,57 +246,6 @@ If no connections have been returned to the connection pool in the time
|
||||
given, rclone will empty the connection pool.
|
||||
|
||||
Set to 0 to keep connections indefinitely.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Upload and download chunk size.
|
||||
|
||||
This controls the maximum packet size used in the SFTP protocol. The
|
||||
RFC limits this to 32768 bytes (32k), however a lot of servers
|
||||
support larger sizes and setting it larger will increase transfer
|
||||
speed dramatically on high latency links.
|
||||
|
||||
Only use a setting higher than 32k if you always connect to the same
|
||||
server or after sufficiently broad testing.
|
||||
|
||||
For example using the value of 252k with OpenSSH works well with its
|
||||
maximum packet size of 256k.
|
||||
|
||||
If you get the error "failed to send packet header: EOF" when copying
|
||||
a large file, try lowering this number.
|
||||
`,
|
||||
Default: 32 * fs.Kibi,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
Help: `The maximum number of outstanding requests for one file
|
||||
|
||||
This controls the maximum number of outstanding requests for one file.
|
||||
Increasing it will increase throughput on high latency links at the
|
||||
cost of using more memory.
|
||||
`,
|
||||
Default: 64,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "set_env",
|
||||
Default: fs.SpaceSepList{},
|
||||
Help: `Environment variables to pass to sftp and commands
|
||||
|
||||
Set environment variables in the form:
|
||||
|
||||
VAR=value
|
||||
|
||||
to be passed to the sftp client and to any commands run (eg md5sum).
|
||||
|
||||
Pass multiple variables space separated, eg
|
||||
|
||||
VAR1=value VAR2=value
|
||||
|
||||
and pass variables with spaces in in quotes, eg
|
||||
|
||||
"VAR3=value with space" "VAR4=value with space" VAR5=nospacehere
|
||||
|
||||
`,
|
||||
Advanced: true,
|
||||
}},
|
||||
@@ -330,34 +255,30 @@ and pass variables with spaces in in quotes, eg
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Port string `config:"port"`
|
||||
Pass string `config:"pass"`
|
||||
KeyPem string `config:"key_pem"`
|
||||
KeyFile string `config:"key_file"`
|
||||
KeyFilePass string `config:"key_file_pass"`
|
||||
PubKeyFile string `config:"pubkey_file"`
|
||||
KnownHostsFile string `config:"known_hosts_file"`
|
||||
KeyUseAgent bool `config:"key_use_agent"`
|
||||
UseInsecureCipher bool `config:"use_insecure_cipher"`
|
||||
DisableHashCheck bool `config:"disable_hashcheck"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
PathOverride string `config:"path_override"`
|
||||
SetModTime bool `config:"set_modtime"`
|
||||
ShellType string `config:"shell_type"`
|
||||
Md5sumCommand string `config:"md5sum_command"`
|
||||
Sha1sumCommand string `config:"sha1sum_command"`
|
||||
SkipLinks bool `config:"skip_links"`
|
||||
Subsystem string `config:"subsystem"`
|
||||
ServerCommand string `config:"server_command"`
|
||||
UseFstat bool `config:"use_fstat"`
|
||||
DisableConcurrentReads bool `config:"disable_concurrent_reads"`
|
||||
DisableConcurrentWrites bool `config:"disable_concurrent_writes"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Concurrency int `config:"concurrency"`
|
||||
SetEnv fs.SpaceSepList `config:"set_env"`
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Port string `config:"port"`
|
||||
Pass string `config:"pass"`
|
||||
KeyPem string `config:"key_pem"`
|
||||
KeyFile string `config:"key_file"`
|
||||
KeyFilePass string `config:"key_file_pass"`
|
||||
PubKeyFile string `config:"pubkey_file"`
|
||||
KnownHostsFile string `config:"known_hosts_file"`
|
||||
KeyUseAgent bool `config:"key_use_agent"`
|
||||
UseInsecureCipher bool `config:"use_insecure_cipher"`
|
||||
DisableHashCheck bool `config:"disable_hashcheck"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
PathOverride string `config:"path_override"`
|
||||
SetModTime bool `config:"set_modtime"`
|
||||
Md5sumCommand string `config:"md5sum_command"`
|
||||
Sha1sumCommand string `config:"sha1sum_command"`
|
||||
SkipLinks bool `config:"skip_links"`
|
||||
Subsystem string `config:"subsystem"`
|
||||
ServerCommand string `config:"server_command"`
|
||||
UseFstat bool `config:"use_fstat"`
|
||||
DisableConcurrentReads bool `config:"disable_concurrent_reads"`
|
||||
DisableConcurrentWrites bool `config:"disable_concurrent_writes"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
@@ -365,8 +286,6 @@ type Fs struct {
|
||||
name string
|
||||
root string
|
||||
absRoot string
|
||||
shellRoot string
|
||||
shellType string
|
||||
opt Options // parsed options
|
||||
ci *fs.ConfigInfo // global config
|
||||
m configmap.Mapper // config
|
||||
@@ -505,22 +424,6 @@ func (f *Fs) sftpConnection(ctx context.Context) (c *conn, err error) {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Set any environment variables on the ssh.Session
|
||||
func (f *Fs) setEnv(s *ssh.Session) error {
|
||||
for _, env := range f.opt.SetEnv {
|
||||
equal := strings.IndexRune(env, '=')
|
||||
if equal < 0 {
|
||||
return fmt.Errorf("no = found in env var %q", env)
|
||||
}
|
||||
// fs.Debugf(f, "Setting env %q = %q", env[:equal], env[equal+1:])
|
||||
err := s.Setenv(env[:equal], env[equal+1:])
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set env var %q: %w", env[:equal], err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Creates a new SFTP client on conn, using the specified subsystem
|
||||
// or sftp server, and zero or more option functions
|
||||
func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.Client, error) {
|
||||
@@ -528,10 +431,6 @@ func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.C
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = f.setEnv(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pw, err := s.StdinPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -555,8 +454,6 @@ func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.C
|
||||
sftp.UseFstat(f.opt.UseFstat),
|
||||
sftp.UseConcurrentReads(!f.opt.DisableConcurrentReads),
|
||||
sftp.UseConcurrentWrites(!f.opt.DisableConcurrentWrites),
|
||||
sftp.MaxPacketUnchecked(int(f.opt.ChunkSize)),
|
||||
sftp.MaxConcurrentRequestsPerFile(f.opt.Concurrency),
|
||||
)
|
||||
return sftp.NewClientPipe(pr, pw, opts...)
|
||||
}
|
||||
@@ -645,7 +542,7 @@ func (f *Fs) drainPool(ctx context.Context) (err error) {
|
||||
f.drain.Stop()
|
||||
}
|
||||
if len(f.pool) != 0 {
|
||||
fs.Debugf(f, "Closing %d unused connections", len(f.pool))
|
||||
fs.Debugf(f, "closing %d unused connections", len(f.pool))
|
||||
}
|
||||
for i, c := range f.pool {
|
||||
if cErr := c.closed(); cErr == nil {
|
||||
@@ -842,7 +739,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
//
|
||||
// Just send the password back for all questions
|
||||
func (f *Fs) keyboardInteractiveReponse(user, instruction string, questions []string, echos []bool, pass string) ([]string, error) {
|
||||
fs.Debugf(f, "Keyboard interactive auth requested")
|
||||
fs.Debugf(f, "keyboard interactive auth requested")
|
||||
answers := make([]string, len(questions))
|
||||
for i := range answers {
|
||||
answers[i] = pass
|
||||
@@ -872,7 +769,6 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
f.name = name
|
||||
f.root = root
|
||||
f.absRoot = root
|
||||
f.shellRoot = root
|
||||
f.opt = *opt
|
||||
f.m = m
|
||||
f.config = sshConfig
|
||||
@@ -882,7 +778,7 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
f.savedpswd = ""
|
||||
// set the pool drainer timer going
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain = time.AfterFunc(time.Duration(f.opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
|
||||
f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
@@ -894,70 +790,16 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NewFs: %w", err)
|
||||
}
|
||||
// Check remote shell type, try to auto-detect if not configured and save to config for later
|
||||
if f.opt.ShellType != "" {
|
||||
f.shellType = f.opt.ShellType
|
||||
fs.Debugf(f, "Shell type %q from config", f.shellType)
|
||||
} else {
|
||||
session, err := c.sshClient.NewSession()
|
||||
if err != nil {
|
||||
f.shellType = shellTypeNotSupported
|
||||
fs.Debugf(f, "Failed to get shell session for shell type detection command: %v", err)
|
||||
} else {
|
||||
var stdout, stderr bytes.Buffer
|
||||
session.Stdout = &stdout
|
||||
session.Stderr = &stderr
|
||||
shellCmd := "echo ${ShellId}%ComSpec%"
|
||||
fs.Debugf(f, "Running shell type detection remote command: %s", shellCmd)
|
||||
err = session.Run(shellCmd)
|
||||
_ = session.Close()
|
||||
if err != nil {
|
||||
f.shellType = defaultShellType
|
||||
fs.Debugf(f, "Remote command failed: %v (stdout=%v) (stderr=%v)", err, bytes.TrimSpace(stdout.Bytes()), bytes.TrimSpace(stderr.Bytes()))
|
||||
} else {
|
||||
outBytes := stdout.Bytes()
|
||||
fs.Debugf(f, "Remote command result: %s", outBytes)
|
||||
outString := string(bytes.TrimSpace(stdout.Bytes()))
|
||||
if strings.HasPrefix(outString, "Microsoft.PowerShell") { // If PowerShell: "Microsoft.PowerShell%ComSpec%"
|
||||
f.shellType = "powershell"
|
||||
} else if !strings.HasSuffix(outString, "%ComSpec%") { // If Command Prompt: "${ShellId}C:\WINDOWS\system32\cmd.exe"
|
||||
f.shellType = "cmd"
|
||||
} else { // If Unix: "%ComSpec%"
|
||||
f.shellType = "unix"
|
||||
}
|
||||
}
|
||||
}
|
||||
// Save permanently in config to avoid the extra work next time
|
||||
fs.Debugf(f, "Shell type %q detected (set option shell_type to override)", f.shellType)
|
||||
f.m.Set("shell_type", f.shellType)
|
||||
cwd, err := c.sftpClient.Getwd()
|
||||
f.putSftpConnection(&c, nil)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "Failed to read current directory - using relative paths: %v", err)
|
||||
} else if !path.IsAbs(f.root) {
|
||||
f.absRoot = path.Join(cwd, f.root)
|
||||
fs.Debugf(f, "Using absolute root directory %q", f.absRoot)
|
||||
}
|
||||
// Ensure we have absolute path to root
|
||||
// It appears that WS FTP doesn't like relative paths,
|
||||
// and the openssh sftp tool also uses absolute paths.
|
||||
if !path.IsAbs(f.root) {
|
||||
// Trying RealPath first, to perform proper server-side canonicalize.
|
||||
// It may fail (SSH_FX_FAILURE reported on WS FTP) and will then resort
|
||||
// to simple path join with current directory from Getwd (which can work
|
||||
// on WS FTP, even though it is also based on RealPath).
|
||||
absRoot, err := c.sftpClient.RealPath(f.root)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "Failed to resolve path using RealPath: %v", err)
|
||||
cwd, err := c.sftpClient.Getwd()
|
||||
if err != nil {
|
||||
fs.Debugf(f, "Failed to to read current directory - using relative paths: %v", err)
|
||||
} else {
|
||||
f.absRoot = path.Join(cwd, f.root)
|
||||
fs.Debugf(f, "Relative path joined with current directory to get absolute path %q", f.absRoot)
|
||||
}
|
||||
} else {
|
||||
f.absRoot = absRoot
|
||||
fs.Debugf(f, "Relative path resolved to %q", f.absRoot)
|
||||
}
|
||||
}
|
||||
f.putSftpConnection(&c, err)
|
||||
if root != "" {
|
||||
// Check to see if the root is actually an existing file,
|
||||
// and if so change the filesystem root to its parent directory.
|
||||
// Check to see if the root actually an existing file
|
||||
oldAbsRoot := f.absRoot
|
||||
remote := path.Base(root)
|
||||
f.root = path.Dir(root)
|
||||
@@ -965,24 +807,20 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
}
|
||||
_, err = f.NewObject(ctx, remote)
|
||||
_, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
if err != fs.ErrorObjectNotFound && err != fs.ErrorIsDir {
|
||||
return nil, err
|
||||
if err == fs.ErrorObjectNotFound || err == fs.ErrorIsDir {
|
||||
// File doesn't exist so return old f
|
||||
f.root = root
|
||||
f.absRoot = oldAbsRoot
|
||||
return f, nil
|
||||
}
|
||||
// File doesn't exist so keep the old f
|
||||
f.root = root
|
||||
f.absRoot = oldAbsRoot
|
||||
err = nil
|
||||
} else {
|
||||
// File exists so change fs to point to the parent and return it with an error
|
||||
err = fs.ErrorIsFile
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
err = nil
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
fs.Debugf(f, "Using root directory %q", f.absRoot)
|
||||
return f, err
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Name returns the configured name of the file system
|
||||
@@ -1296,10 +1134,6 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("run: get SFTP session: %w", err)
|
||||
}
|
||||
err = f.setEnv(session)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = session.Close()
|
||||
}()
|
||||
@@ -1321,183 +1155,74 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
|
||||
// Hashes returns the supported hash types of the filesystem
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
ctx := context.TODO()
|
||||
if f.opt.DisableHashCheck {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
if f.cachedHashes != nil {
|
||||
return *f.cachedHashes
|
||||
}
|
||||
|
||||
hashSet := hash.NewHashSet()
|
||||
f.cachedHashes = &hashSet
|
||||
|
||||
if f.opt.DisableHashCheck || f.shellType == shellTypeNotSupported {
|
||||
return hashSet
|
||||
}
|
||||
|
||||
// look for a hash command which works
|
||||
checkHash := func(hashType hash.Type, commands []struct{ hashFile, hashEmpty string }, expected string, hashCommand *string, changed *bool) bool {
|
||||
checkHash := func(commands []string, expected string, hashCommand *string, changed *bool) bool {
|
||||
if *hashCommand == hashCommandNotSupported {
|
||||
return false
|
||||
}
|
||||
if *hashCommand != "" {
|
||||
return true
|
||||
}
|
||||
fs.Debugf(f, "Checking default %v hash commands", hashType)
|
||||
*changed = true
|
||||
for _, command := range commands {
|
||||
output, err := f.run(ctx, command.hashEmpty)
|
||||
output, err := f.run(ctx, command)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "Hash command skipped: %v", err)
|
||||
continue
|
||||
}
|
||||
output = bytes.TrimSpace(output)
|
||||
fs.Debugf(f, "checking %q command: %q", command, output)
|
||||
if parseHash(output) == expected {
|
||||
*hashCommand = command.hashFile
|
||||
fs.Debugf(f, "Hash command accepted")
|
||||
*hashCommand = command
|
||||
return true
|
||||
}
|
||||
fs.Debugf(f, "Hash command skipped: Wrong output")
|
||||
}
|
||||
*hashCommand = hashCommandNotSupported
|
||||
return false
|
||||
}
|
||||
|
||||
changed := false
|
||||
md5Commands := []struct {
|
||||
hashFile, hashEmpty string
|
||||
}{
|
||||
{"md5sum", "md5sum"},
|
||||
{"md5 -r", "md5 -r"},
|
||||
{"rclone md5sum", "rclone md5sum"},
|
||||
}
|
||||
sha1Commands := []struct {
|
||||
hashFile, hashEmpty string
|
||||
}{
|
||||
{"sha1sum", "sha1sum"},
|
||||
{"sha1 -r", "sha1 -r"},
|
||||
{"rclone sha1sum", "rclone sha1sum"},
|
||||
}
|
||||
if f.shellType == "powershell" {
|
||||
md5Commands = append(md5Commands, struct {
|
||||
hashFile, hashEmpty string
|
||||
}{
|
||||
"&{param($Path);Get-FileHash -Algorithm MD5 -LiteralPath $Path -ErrorAction Stop|Select-Object -First 1 -ExpandProperty Hash|ForEach-Object{\"$($_.ToLower()) ${Path}\"}}",
|
||||
"Get-FileHash -Algorithm MD5 -InputStream ([System.IO.MemoryStream]::new()) -ErrorAction Stop|Select-Object -First 1 -ExpandProperty Hash|ForEach-Object{$_.ToLower()}",
|
||||
})
|
||||
|
||||
sha1Commands = append(sha1Commands, struct {
|
||||
hashFile, hashEmpty string
|
||||
}{
|
||||
"&{param($Path);Get-FileHash -Algorithm SHA1 -LiteralPath $Path -ErrorAction Stop|Select-Object -First 1 -ExpandProperty Hash|ForEach-Object{\"$($_.ToLower()) ${Path}\"}}",
|
||||
"Get-FileHash -Algorithm SHA1 -InputStream ([System.IO.MemoryStream]::new()) -ErrorAction Stop|Select-Object -First 1 -ExpandProperty Hash|ForEach-Object{$_.ToLower()}",
|
||||
})
|
||||
}
|
||||
|
||||
md5Works := checkHash(hash.MD5, md5Commands, "d41d8cd98f00b204e9800998ecf8427e", &f.opt.Md5sumCommand, &changed)
|
||||
sha1Works := checkHash(hash.SHA1, sha1Commands, "da39a3ee5e6b4b0d3255bfef95601890afd80709", &f.opt.Sha1sumCommand, &changed)
|
||||
md5Works := checkHash([]string{"md5sum", "md5 -r", "rclone md5sum"}, "d41d8cd98f00b204e9800998ecf8427e", &f.opt.Md5sumCommand, &changed)
|
||||
sha1Works := checkHash([]string{"sha1sum", "sha1 -r", "rclone sha1sum"}, "da39a3ee5e6b4b0d3255bfef95601890afd80709", &f.opt.Sha1sumCommand, &changed)
|
||||
|
||||
if changed {
|
||||
// Save permanently in config to avoid the extra work next time
|
||||
fs.Debugf(f, "Setting hash command for %v to %q (set sha1sum_command to override)", hash.MD5, f.opt.Md5sumCommand)
|
||||
f.m.Set("md5sum_command", f.opt.Md5sumCommand)
|
||||
fs.Debugf(f, "Setting hash command for %v to %q (set md5sum_command to override)", hash.SHA1, f.opt.Sha1sumCommand)
|
||||
f.m.Set("sha1sum_command", f.opt.Sha1sumCommand)
|
||||
}
|
||||
|
||||
set := hash.NewHashSet()
|
||||
if sha1Works {
|
||||
hashSet.Add(hash.SHA1)
|
||||
set.Add(hash.SHA1)
|
||||
}
|
||||
if md5Works {
|
||||
hashSet.Add(hash.MD5)
|
||||
set.Add(hash.MD5)
|
||||
}
|
||||
|
||||
return hashSet
|
||||
f.cachedHashes = &set
|
||||
return set
|
||||
}
|
||||
|
||||
// About gets usage stats
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
// If server implements the vendor-specific VFS statistics extension prefer that
|
||||
// (OpenSSH implements it on using syscall.Statfs on Linux and API function GetDiskFreeSpace on Windows)
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
escapedPath := shellEscape(f.root)
|
||||
if f.opt.PathOverride != "" {
|
||||
escapedPath = shellEscape(path.Join(f.opt.PathOverride, f.root))
|
||||
}
|
||||
if len(escapedPath) == 0 {
|
||||
escapedPath = "/"
|
||||
}
|
||||
stdout, err := f.run(ctx, "df -k "+escapedPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var vfsStats *sftp.StatVFS
|
||||
if _, found := c.sftpClient.HasExtension("statvfs@openssh.com"); found {
|
||||
fs.Debugf(f, "Server has VFS statistics extension")
|
||||
aboutPath := f.absRoot
|
||||
if aboutPath == "" {
|
||||
aboutPath = "/"
|
||||
}
|
||||
fs.Debugf(f, "About path %q", aboutPath)
|
||||
vfsStats, err = c.sftpClient.StatVFS(aboutPath)
|
||||
}
|
||||
f.putSftpConnection(&c, err) // Return to pool asap, if running shell command below it will be re-used
|
||||
if vfsStats != nil {
|
||||
total := vfsStats.TotalSpace()
|
||||
free := vfsStats.FreeSpace()
|
||||
used := total - free
|
||||
return &fs.Usage{
|
||||
Total: fs.NewUsageValue(int64(total)),
|
||||
Used: fs.NewUsageValue(int64(used)),
|
||||
Free: fs.NewUsageValue(int64(free)),
|
||||
}, nil
|
||||
} else if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(f, "Failed to retrieve VFS statistics, trying shell command instead: %v", err)
|
||||
} else {
|
||||
fs.Debugf(f, "Server does not have the VFS statistics extension, trying shell command instead")
|
||||
}
|
||||
|
||||
// Fall back to shell command method if possible
|
||||
if f.shellType == shellTypeNotSupported || f.shellType == "cmd" {
|
||||
fs.Debugf(f, "About shell command is not available for shell type %q (set option shell_type to override)", f.shellType)
|
||||
return nil, fmt.Errorf("not supported with shell type %q", f.shellType)
|
||||
}
|
||||
aboutShellPath := f.remoteShellPath("")
|
||||
if aboutShellPath == "" {
|
||||
aboutShellPath = "/"
|
||||
}
|
||||
fs.Debugf(f, "About path %q", aboutShellPath)
|
||||
aboutShellPathArg, err := f.quoteOrEscapeShellPath(aboutShellPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// PowerShell
|
||||
if f.shellType == "powershell" {
|
||||
shellCmd := "Get-Item " + aboutShellPathArg + " -ErrorAction Stop|Select-Object -First 1 -ExpandProperty PSDrive|ForEach-Object{\"$($_.Used) $($_.Free)\"}"
|
||||
fs.Debugf(f, "About using shell command for shell type %q", f.shellType)
|
||||
stdout, err := f.run(ctx, shellCmd)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "About shell command for shell type %q failed (set option shell_type to override): %v", f.shellType, err)
|
||||
return nil, fmt.Errorf("powershell command failed: %w", err)
|
||||
}
|
||||
split := strings.Fields(string(stdout))
|
||||
usage := &fs.Usage{}
|
||||
if len(split) == 2 {
|
||||
usedValue, usedErr := strconv.ParseInt(split[0], 10, 64)
|
||||
if usedErr == nil {
|
||||
usage.Used = fs.NewUsageValue(usedValue)
|
||||
}
|
||||
freeValue, freeErr := strconv.ParseInt(split[1], 10, 64)
|
||||
if freeErr == nil {
|
||||
usage.Free = fs.NewUsageValue(freeValue)
|
||||
if usedErr == nil {
|
||||
usage.Total = fs.NewUsageValue(usedValue + freeValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
// Unix/default shell
|
||||
shellCmd := "df -k " + aboutShellPathArg
|
||||
fs.Debugf(f, "About using shell command for shell type %q", f.shellType)
|
||||
stdout, err := f.run(ctx, shellCmd)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "About shell command for shell type %q failed (set option shell_type to override): %v", f.shellType, err)
|
||||
return nil, fmt.Errorf("your remote may not have the required df utility: %w", err)
|
||||
}
|
||||
|
||||
usageTotal, usageUsed, usageAvail := parseUsage(stdout)
|
||||
usage := &fs.Usage{}
|
||||
if usageTotal >= 0 {
|
||||
@@ -1562,78 +1287,31 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
shellPathArg, err := o.fs.quoteOrEscapeShellPath(o.shellPath())
|
||||
escapedPath := shellEscape(o.path())
|
||||
if o.fs.opt.PathOverride != "" {
|
||||
escapedPath = shellEscape(path.Join(o.fs.opt.PathOverride, o.remote))
|
||||
}
|
||||
b, err := o.fs.run(ctx, hashCmd+" "+escapedPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to calculate %v hash: %w", r, err)
|
||||
}
|
||||
outBytes, err := o.fs.run(ctx, hashCmd+" "+shellPathArg)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to calculate %v hash: %w", r, err)
|
||||
}
|
||||
hashString := parseHash(outBytes)
|
||||
fs.Debugf(o, "Parsed hash: %s", hashString)
|
||||
|
||||
str := parseHash(b)
|
||||
if r == hash.MD5 {
|
||||
o.md5sum = &hashString
|
||||
o.md5sum = &str
|
||||
} else if r == hash.SHA1 {
|
||||
o.sha1sum = &hashString
|
||||
o.sha1sum = &str
|
||||
}
|
||||
return hashString, nil
|
||||
return str, nil
|
||||
}
|
||||
|
||||
// quoteOrEscapeShellPath makes path a valid string argument in configured shell
|
||||
// and also ensures it cannot cause unintended behavior.
|
||||
func quoteOrEscapeShellPath(shellType string, shellPath string) (string, error) {
|
||||
// PowerShell
|
||||
if shellType == "powershell" {
|
||||
return "'" + strings.ReplaceAll(shellPath, "'", "''") + "'", nil
|
||||
}
|
||||
// Windows Command Prompt
|
||||
if shellType == "cmd" {
|
||||
if strings.Contains(shellPath, "\"") {
|
||||
return "", fmt.Errorf("path is not valid in shell type %s: %s", shellType, shellPath)
|
||||
}
|
||||
return "\"" + shellPath + "\"", nil
|
||||
}
|
||||
// Unix shell
|
||||
safe := unixShellEscapeRegex.ReplaceAllString(shellPath, `\$0`)
|
||||
return strings.ReplaceAll(safe, "\n", "'\n'"), nil
|
||||
}
|
||||
var shellEscapeRegex = regexp.MustCompile("[^A-Za-z0-9_.,:/\\@\u0080-\uFFFFFFFF\n-]")
|
||||
|
||||
// quoteOrEscapeShellPath makes path a valid string argument in configured shell
|
||||
func (f *Fs) quoteOrEscapeShellPath(shellPath string) (string, error) {
|
||||
return quoteOrEscapeShellPath(f.shellType, shellPath)
|
||||
}
|
||||
|
||||
// remotePath returns the native SFTP path of the file or directory at the remote given
|
||||
func (f *Fs) remotePath(remote string) string {
|
||||
return path.Join(f.absRoot, remote)
|
||||
}
|
||||
|
||||
// remoteShellPath returns the SSH shell path of the file or directory at the remote given
|
||||
func (f *Fs) remoteShellPath(remote string) string {
|
||||
if f.opt.PathOverride != "" {
|
||||
shellPath := path.Join(f.opt.PathOverride, remote)
|
||||
fs.Debugf(f, "Shell path redirected to %q with option path_override", shellPath)
|
||||
return shellPath
|
||||
}
|
||||
shellPath := path.Join(f.absRoot, remote)
|
||||
if f.shellType == "powershell" || f.shellType == "cmd" {
|
||||
// If remote shell is powershell or cmd, then server is probably Windows.
|
||||
// The sftp package converts everything to POSIX paths: Forward slashes, and
|
||||
// absolute paths starts with a slash. An absolute path on a Windows server will
|
||||
// then look like this "/C:/Windows/System32". We must remove the "/" prefix
|
||||
// to make this a valid path for shell commands. In case of PowerShell there is a
|
||||
// possibility that it is a Unix server, with PowerShell Core shell, but assuming
|
||||
// root folders with names such as "C:" are rare, we just take this risk,
|
||||
// and option path_override can always be used to work around corner cases.
|
||||
if posixWinAbsPathRegex.MatchString(shellPath) {
|
||||
shellPath = strings.TrimPrefix(shellPath, "/")
|
||||
fs.Debugf(f, "Shell path adjusted to %q (set option path_override to override)", shellPath)
|
||||
return shellPath
|
||||
}
|
||||
}
|
||||
fs.Debugf(f, "Shell path %q", shellPath)
|
||||
return shellPath
|
||||
// Escape a string s.t. it cannot cause unintended behavior
|
||||
// when sending it to a shell.
|
||||
func shellEscape(str string) string {
|
||||
safe := shellEscapeRegex.ReplaceAllString(str, `\$0`)
|
||||
return strings.ReplaceAll(safe, "\n", "'\n'")
|
||||
}
|
||||
|
||||
// Converts a byte array from the SSH session returned by
|
||||
@@ -1684,14 +1362,9 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// path returns the native SFTP path of the object
|
||||
// path returns the native path of the object
|
||||
func (o *Object) path() string {
|
||||
return o.fs.remotePath(o.remote)
|
||||
}
|
||||
|
||||
// shellPath returns the SSH shell path of the object
|
||||
func (o *Object) shellPath() string {
|
||||
return o.fs.remoteShellPath(o.remote)
|
||||
return path.Join(o.fs.absRoot, o.remote)
|
||||
}
|
||||
|
||||
// setMetadata updates the info in the object from the stat result passed in
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestShellEscapeUnix(t *testing.T) {
|
||||
func TestShellEscape(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
unescaped, escaped string
|
||||
}{
|
||||
@@ -20,44 +20,7 @@ func TestShellEscapeUnix(t *testing.T) {
|
||||
{"/test/\n", "/test/'\n'"},
|
||||
{":\"'", ":\\\"\\'"},
|
||||
} {
|
||||
got, err := quoteOrEscapeShellPath("unix", test.unescaped)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, test.escaped, got, fmt.Sprintf("Test %d unescaped = %q", i, test.unescaped))
|
||||
}
|
||||
}
|
||||
|
||||
func TestShellEscapeCmd(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
unescaped, escaped string
|
||||
ok bool
|
||||
}{
|
||||
{"", "\"\"", true},
|
||||
{"c:/this/is/harmless", "\"c:/this/is/harmless\"", true},
|
||||
{"c:/test¬epad", "\"c:/test¬epad\"", true},
|
||||
{"c:/test\"&\"notepad", "", false},
|
||||
} {
|
||||
got, err := quoteOrEscapeShellPath("cmd", test.unescaped)
|
||||
if test.ok {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, test.escaped, got, fmt.Sprintf("Test %d unescaped = %q", i, test.unescaped))
|
||||
} else {
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestShellEscapePowerShell(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
unescaped, escaped string
|
||||
}{
|
||||
{"", "''"},
|
||||
{"c:/this/is/harmless", "'c:/this/is/harmless'"},
|
||||
{"c:/test¬epad", "'c:/test¬epad'"},
|
||||
{"c:/test\"&\"notepad", "'c:/test\"&\"notepad'"},
|
||||
{"c:/test'&'notepad", "'c:/test''&''notepad'"},
|
||||
} {
|
||||
got, err := quoteOrEscapeShellPath("powershell", test.unescaped)
|
||||
assert.NoError(t, err)
|
||||
got := shellEscape(test.unescaped)
|
||||
assert.Equal(t, test.escaped, got, fmt.Sprintf("Test %d unescaped = %q", i, test.unescaped))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1077,7 +1077,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
|
||||
}
|
||||
dstLeaf = f.opt.Enc.FromStandardName(dstLeaf)
|
||||
|
||||
sameName := strings.EqualFold(srcLeaf, dstLeaf)
|
||||
sameName := strings.ToLower(srcLeaf) == strings.ToLower(dstLeaf)
|
||||
if sameName && srcParentID == dstParentID {
|
||||
return nil, fmt.Errorf("copy: can't copy to a file in the same directory whose name only differs in case: %q vs %q", srcLeaf, dstLeaf)
|
||||
}
|
||||
@@ -1096,7 +1096,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
|
||||
directCopy = true
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("copy: failed to examine destination dir: %w", err)
|
||||
//} else {
|
||||
} else {
|
||||
// otherwise need to copy via a temporary directory
|
||||
}
|
||||
}
|
||||
|
||||
@@ -423,7 +423,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opt.APIURL = strings.TrimSuffix(opt.APIURL, "/")
|
||||
if strings.HasSuffix(opt.APIURL, "/") {
|
||||
opt.APIURL = strings.TrimSuffix(opt.APIURL, "/")
|
||||
}
|
||||
|
||||
// Parse the endpoint
|
||||
u, err := url.Parse(opt.APIURL)
|
||||
|
||||
@@ -872,7 +872,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
srcPath := srcObj.fs.rootSlash() + srcObj.remote
|
||||
dstPath := f.rootSlash() + remote
|
||||
if strings.EqualFold(srcPath, dstPath) {
|
||||
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
|
||||
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||
}
|
||||
|
||||
|
||||
@@ -268,7 +268,7 @@ func (f *Fs) Root() string {
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.rootContainer == "" {
|
||||
return "Swift root"
|
||||
return fmt.Sprintf("Swift root")
|
||||
}
|
||||
if f.rootDirectory == "" {
|
||||
return fmt.Sprintf("Swift container %s", f.rootContainer)
|
||||
@@ -1271,7 +1271,7 @@ func (o *Object) getSegmentsLargeObject(ctx context.Context) (map[string][]strin
|
||||
if _, ok := containerSegments[segmentContainer]; !ok {
|
||||
containerSegments[segmentContainer] = make([]string, 0, len(segmentObjects))
|
||||
}
|
||||
segments := containerSegments[segmentContainer]
|
||||
segments, _ := containerSegments[segmentContainer]
|
||||
segments = append(segments, segment.Name)
|
||||
containerSegments[segmentContainer] = segments
|
||||
}
|
||||
@@ -1303,7 +1303,7 @@ func (o *Object) getSegmentsDlo(ctx context.Context) (segmentsContainer string,
|
||||
}
|
||||
delimiter := strings.Index(dirManifest, "/")
|
||||
if len(dirManifest) == 0 || delimiter < 0 {
|
||||
err = errors.New("missing or wrong structure of manifest of Dynamic large object")
|
||||
err = errors.New("Missing or wrong structure of manifest of Dynamic large object")
|
||||
return
|
||||
}
|
||||
return dirManifest[:delimiter], dirManifest[delimiter+1:], nil
|
||||
@@ -1363,7 +1363,7 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
|
||||
return
|
||||
}
|
||||
fs.Debugf(o, "Delete segments when err raise %v", err)
|
||||
if len(segmentInfos) == 0 {
|
||||
if segmentInfos == nil || len(segmentInfos) == 0 {
|
||||
return
|
||||
}
|
||||
_ctx := context.Background()
|
||||
@@ -1418,7 +1418,7 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
|
||||
}
|
||||
|
||||
func deleteChunks(ctx context.Context, o *Object, segmentsContainer string, segmentInfos []string) {
|
||||
if len(segmentInfos) == 0 {
|
||||
if segmentInfos == nil || len(segmentInfos) == 0 {
|
||||
return
|
||||
}
|
||||
for _, v := range segmentInfos {
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
// Package common defines code common to the union and the policies
|
||||
//
|
||||
// These need to be defined in a separate package to avoid import loops
|
||||
package common
|
||||
|
||||
import "github.com/rclone/rclone/fs"
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Upstreams fs.SpaceSepList `config:"upstreams"`
|
||||
Remotes fs.SpaceSepList `config:"remotes"` // Deprecated
|
||||
ActionPolicy string `config:"action_policy"`
|
||||
CreatePolicy string `config:"create_policy"`
|
||||
SearchPolicy string `config:"search_policy"`
|
||||
CacheTime int `config:"cache_time"`
|
||||
MinFreeSpace fs.SizeSuffix `config:"min_free_space"`
|
||||
}
|
||||
@@ -2,7 +2,6 @@ package union
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -35,8 +34,9 @@ type entry interface {
|
||||
candidates() []upstream.Entry
|
||||
}
|
||||
|
||||
// UnWrapUpstream returns the upstream Object that this Object is wrapping
|
||||
func (o *Object) UnWrapUpstream() *upstream.Object {
|
||||
// UnWrap returns the Object that this Object is wrapping or
|
||||
// nil if it isn't wrapping anything
|
||||
func (o *Object) UnWrap() *upstream.Object {
|
||||
return o.Object
|
||||
}
|
||||
|
||||
@@ -140,42 +140,6 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
|
||||
return errs.Err()
|
||||
}
|
||||
|
||||
// GetTier returns storage tier or class of the Object
|
||||
func (o *Object) GetTier() string {
|
||||
do, ok := o.Object.Object.(fs.GetTierer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.GetTier()
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
do, ok := o.Object.Object.(fs.IDer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.ID()
|
||||
}
|
||||
|
||||
// MimeType returns the content type of the Object if known
|
||||
func (o *Object) MimeType(ctx context.Context) (mimeType string) {
|
||||
if do, ok := o.Object.Object.(fs.MimeTyper); ok {
|
||||
mimeType = do.MimeType(ctx)
|
||||
}
|
||||
return mimeType
|
||||
}
|
||||
|
||||
// SetTier performs changing storage tier of the Object if
|
||||
// multiple storage classes supported
|
||||
func (o *Object) SetTier(tier string) error {
|
||||
do, ok := o.Object.Object.(fs.SetTierer)
|
||||
if !ok {
|
||||
return errors.New("underlying remote does not support SetTier")
|
||||
}
|
||||
return do.SetTier(tier)
|
||||
}
|
||||
|
||||
// ModTime returns the modification date of the directory
|
||||
// It returns the latest ModTime of all candidates
|
||||
func (d *Directory) ModTime(ctx context.Context) (t time.Time) {
|
||||
@@ -200,8 +164,3 @@ func (d *Directory) Size() (s int64) {
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.FullObject = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -2,7 +2,6 @@ package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
@@ -19,8 +18,6 @@ type EpLfs struct {
|
||||
EpAll
|
||||
}
|
||||
|
||||
var errNoUpstreamsFound = errors.New("no upstreams found with more than min_free_space space spare")
|
||||
|
||||
func (p *EpLfs) lfs(upstreams []*upstream.Fs) (*upstream.Fs, error) {
|
||||
var minFreeSpace int64 = math.MaxInt64
|
||||
var lfsupstream *upstream.Fs
|
||||
@@ -30,35 +27,31 @@ func (p *EpLfs) lfs(upstreams []*upstream.Fs) (*upstream.Fs, error) {
|
||||
fs.LogPrintf(fs.LogLevelNotice, nil,
|
||||
"Free Space is not supported for upstream %s, treating as infinite", u.Name())
|
||||
}
|
||||
if space < minFreeSpace && space > int64(u.Opt.MinFreeSpace) {
|
||||
if space < minFreeSpace {
|
||||
minFreeSpace = space
|
||||
lfsupstream = u
|
||||
}
|
||||
}
|
||||
if lfsupstream == nil {
|
||||
return nil, errNoUpstreamsFound
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return lfsupstream, nil
|
||||
}
|
||||
|
||||
func (p *EpLfs) lfsEntries(entries []upstream.Entry) (upstream.Entry, error) {
|
||||
var minFreeSpace int64 = math.MaxInt64
|
||||
var minFreeSpace int64
|
||||
var lfsEntry upstream.Entry
|
||||
for _, e := range entries {
|
||||
u := e.UpstreamFs()
|
||||
space, err := u.GetFreeSpace()
|
||||
space, err := e.UpstreamFs().GetFreeSpace()
|
||||
if err != nil {
|
||||
fs.LogPrintf(fs.LogLevelNotice, nil,
|
||||
"Free Space is not supported for upstream %s, treating as infinite", u.Name())
|
||||
"Free Space is not supported for upstream %s, treating as infinite", e.UpstreamFs().Name())
|
||||
}
|
||||
if space < minFreeSpace && space > int64(u.Opt.MinFreeSpace) {
|
||||
if space < minFreeSpace {
|
||||
minFreeSpace = space
|
||||
lfsEntry = e
|
||||
}
|
||||
}
|
||||
if lfsEntry == nil {
|
||||
return nil, errNoUpstreamsFound
|
||||
}
|
||||
return lfsEntry, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ func (p *EpLus) lusEntries(entries []upstream.Entry) (upstream.Entry, error) {
|
||||
var minUsedSpace int64 = math.MaxInt64
|
||||
var lusEntry upstream.Entry
|
||||
for _, e := range entries {
|
||||
space, err := e.UpstreamFs().GetUsedSpace()
|
||||
space, err := e.UpstreamFs().GetFreeSpace()
|
||||
if err != nil {
|
||||
fs.LogPrintf(fs.LogLevelNotice, nil,
|
||||
"Used Space is not supported for upstream %s, treating as 0", e.UpstreamFs().Name())
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/common"
|
||||
"github.com/rclone/rclone/backend/union/policy"
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -30,9 +29,6 @@ func init() {
|
||||
Name: "union",
|
||||
Description: "Union merges the contents of several upstream fs",
|
||||
NewFs: NewFs,
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "upstreams",
|
||||
Help: "List of space separated upstreams.\n\nCan be 'upstreama:test/dir upstreamb:', '\"upstreama:test/space:ro dir\" upstreamb:', etc.",
|
||||
@@ -53,24 +49,26 @@ func init() {
|
||||
Name: "cache_time",
|
||||
Help: "Cache time of usage and free space (in seconds).\n\nThis option is only useful when a path preserving policy is used.",
|
||||
Default: 120,
|
||||
}, {
|
||||
Name: "min_free_space",
|
||||
Help: `Minimum viable free space for lfs/eplfs policies.
|
||||
|
||||
If a remote has less than this much free space then it won't be
|
||||
considered for use in lfs or eplfs policies.`,
|
||||
Advanced: true,
|
||||
Default: fs.Gibi,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Upstreams fs.SpaceSepList `config:"upstreams"`
|
||||
Remotes fs.SpaceSepList `config:"remotes"` // Deprecated
|
||||
ActionPolicy string `config:"action_policy"`
|
||||
CreatePolicy string `config:"create_policy"`
|
||||
SearchPolicy string `config:"search_policy"`
|
||||
CacheTime int `config:"cache_time"`
|
||||
}
|
||||
|
||||
// Fs represents a union of upstreams
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
features *fs.Features // optional features
|
||||
opt common.Options // options for this Fs
|
||||
opt Options // options for this Fs
|
||||
root string // the path we are working on
|
||||
upstreams []*upstream.Fs // slice of upstreams
|
||||
hashSet hash.Set // intersection of hash types
|
||||
@@ -85,16 +83,16 @@ func (f *Fs) wrapEntries(entries ...upstream.Entry) (entry, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch e := e.(type) {
|
||||
switch e.(type) {
|
||||
case *upstream.Object:
|
||||
return &Object{
|
||||
Object: e,
|
||||
Object: e.(*upstream.Object),
|
||||
fs: f,
|
||||
co: entries,
|
||||
}, nil
|
||||
case *upstream.Directory:
|
||||
return &Directory{
|
||||
Directory: e,
|
||||
Directory: e.(*upstream.Directory),
|
||||
cd: entries,
|
||||
}, nil
|
||||
default:
|
||||
@@ -169,11 +167,7 @@ func (f *Fs) mkdir(ctx context.Context, dir string) ([]*upstream.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If created roots then choose one
|
||||
if dir == "" {
|
||||
upstreams, err = f.create(ctx, dir)
|
||||
}
|
||||
return upstreams, err
|
||||
return upstreams, nil
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
@@ -226,7 +220,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
o := srcObj.UnWrapUpstream()
|
||||
o := srcObj.UnWrap()
|
||||
su := o.UpstreamFs()
|
||||
if su.Features().Copy == nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
@@ -814,7 +808,7 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(common.Options)
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -838,12 +832,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
}
|
||||
|
||||
root = strings.Trim(root, "/")
|
||||
upstreams := make([]*upstream.Fs, len(opt.Upstreams))
|
||||
errs := Errors(make([]error, len(opt.Upstreams)))
|
||||
multithread(len(opt.Upstreams), func(i int) {
|
||||
u := opt.Upstreams[i]
|
||||
upstreams[i], errs[i] = upstream.New(ctx, u, root, opt)
|
||||
upstreams[i], errs[i] = upstream.New(ctx, u, root, time.Duration(opt.CacheTime)*time.Second)
|
||||
})
|
||||
var usedUpstreams []*upstream.Fs
|
||||
var fserr error
|
||||
@@ -889,9 +882,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
}).Fill(ctx, f)
|
||||
canMove := true
|
||||
for _, f := range upstreams {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user