mirror of
https://github.com/rclone/rclone.git
synced 2026-01-04 17:43:50 +00:00
Compare commits
2 Commits
fix-7103-n
...
mount-wind
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0093e23e42 | ||
|
|
11443e4491 |
4
.github/FUNDING.yml
vendored
Normal file
4
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
github: [ncw]
|
||||
patreon: njcw
|
||||
liberapay: ncw
|
||||
custom: ["https://rclone.org/donate/"]
|
||||
6
.github/dependabot.yml
vendored
6
.github/dependabot.yml
vendored
@@ -1,6 +0,0 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
14
.github/workflows/build.yml
vendored
14
.github/workflows/build.yml
vendored
@@ -8,9 +8,9 @@ name: build
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
- '*'
|
||||
tags:
|
||||
- '**'
|
||||
- '*'
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
@@ -104,7 +104,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
check-latest: true
|
||||
@@ -217,7 +217,7 @@ jobs:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# working-directory: '$(modulePath)'
|
||||
# Deploy binaries if enabled in config && not a PR && not a fork
|
||||
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
lint:
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
@@ -237,7 +237,7 @@ jobs:
|
||||
|
||||
# Run govulncheck on the latest go version, the one we build binaries with
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.20'
|
||||
check-latest: true
|
||||
@@ -262,7 +262,7 @@ jobs:
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.20'
|
||||
|
||||
@@ -352,4 +352,4 @@ jobs:
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# Upload artifacts if not a PR && not a fork
|
||||
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
if: github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
name: Docker beta build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
|
||||
# GitHub Actions at the start of a workflow run to identify the job.
|
||||
# This is used to authenticate against GitHub Container Registry.
|
||||
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
|
||||
# for more detailed information.
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
push: true # push the image to ghcr
|
||||
tags: |
|
||||
ghcr.io/rclone/rclone:beta
|
||||
rclone/rclone:beta
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
provenance: false
|
||||
# Eventually cache will need to be cleared if builds more frequent than once a week
|
||||
# https://github.com/docker/build-push-action/issues/252
|
||||
26
.github/workflows/build_publish_docker_image.yml
vendored
Normal file
26
.github/workflows/build_publish_docker_image.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
name: Docker beta build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish image
|
||||
uses: ilteoood/docker_buildx@1.1.0
|
||||
with:
|
||||
tag: beta
|
||||
imageName: rclone/rclone
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
publish: true
|
||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
14
.github/workflows/winget.yml
vendored
14
.github/workflows/winget.yml
vendored
@@ -1,14 +0,0 @@
|
||||
name: Publish to Winget
|
||||
on:
|
||||
release:
|
||||
types: [released]
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: windows-latest # Action can only run on Windows
|
||||
steps:
|
||||
- uses: vedantmgoyal2009/winget-releaser@v2
|
||||
with:
|
||||
identifier: Rclone.Rclone
|
||||
installers-regex: '-windows-\w+\.zip$'
|
||||
token: ${{ secrets.WINGET_TOKEN }}
|
||||
@@ -2,17 +2,15 @@
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- deadcode
|
||||
- errcheck
|
||||
- goimports
|
||||
- revive
|
||||
- ineffassign
|
||||
- structcheck
|
||||
- varcheck
|
||||
- govet
|
||||
- unconvert
|
||||
- staticcheck
|
||||
- gosimple
|
||||
- stylecheck
|
||||
- unused
|
||||
- misspell
|
||||
#- prealloc
|
||||
#- maligned
|
||||
disable-all: true
|
||||
@@ -27,30 +25,6 @@ issues:
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
|
||||
exclude-rules:
|
||||
|
||||
- linters:
|
||||
- staticcheck
|
||||
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
|
||||
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
timeout: 10m
|
||||
|
||||
linters-settings:
|
||||
revive:
|
||||
rules:
|
||||
- name: unreachable-code
|
||||
disabled: true
|
||||
- name: unused-parameter
|
||||
disabled: true
|
||||
- name: empty-block
|
||||
disabled: true
|
||||
- name: redefines-builtin-id
|
||||
disabled: true
|
||||
- name: superfluous-else
|
||||
disabled: true
|
||||
stylecheck:
|
||||
# Only enable the checks performed by the staticcheck stand-alone tool,
|
||||
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
||||
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
||||
|
||||
@@ -11,7 +11,7 @@ RUN ./rclone version
|
||||
# Begin final image
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
|
||||
RUN apk --no-cache add ca-certificates fuse tzdata && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
|
||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||
|
||||
@@ -16,8 +16,6 @@ Current active maintainers of rclone are:
|
||||
| Max Sum | @Max-Sum | union backend |
|
||||
| Fred | @creativeprojects | seafile backend |
|
||||
| Caleb Case | @calebcase | storj backend |
|
||||
| wiserain | @wiserain | pikpak backend |
|
||||
| albertony | @albertony | |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
3933
MANUAL.html
generated
3933
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
4405
MANUAL.txt
generated
4405
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
2
Makefile
2
Makefile
@@ -96,7 +96,7 @@ build_dep:
|
||||
|
||||
# Get the release dependencies we only install on linux
|
||||
release_dep_linux:
|
||||
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64\.tar\.gz'
|
||||
|
||||
# Get the release dependencies we only install on Windows
|
||||
release_dep_windows:
|
||||
|
||||
@@ -25,19 +25,18 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
* Box [:page_facing_up:](https://rclone.org/box/)
|
||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
||||
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
||||
* Arvan Cloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
@@ -61,15 +60,12 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||
* OVH [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
|
||||
@@ -10,7 +10,7 @@ This file describes how to make the various kinds of releases
|
||||
## Making a release
|
||||
|
||||
* git checkout master # see below for stable branch
|
||||
* git pull # IMPORTANT
|
||||
* git pull
|
||||
* git status - make sure everything is checked in
|
||||
* Check GitHub actions build for master is Green
|
||||
* make test # see integration test server or run locally
|
||||
@@ -21,7 +21,6 @@ This file describes how to make the various kinds of releases
|
||||
* git status - to check for new man pages - git add them
|
||||
* git commit -a -v -m "Version v1.XX.0"
|
||||
* make retag
|
||||
* git push origin # without --follow-tags so it doesn't push the tag if it fails
|
||||
* git push --follow-tags origin
|
||||
* # Wait for the GitHub builds to complete then...
|
||||
* make fetch_binaries
|
||||
|
||||
@@ -36,7 +36,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/opendrive"
|
||||
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
_ "github.com/rclone/rclone/backend/pikpak"
|
||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||
_ "github.com/rclone/rclone/backend/putio"
|
||||
_ "github.com/rclone/rclone/backend/qingstor"
|
||||
|
||||
@@ -58,8 +58,6 @@ const (
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
maxListChunkSize = 5000 // number of items to read at once
|
||||
modTimeKey = "mtime"
|
||||
dirMetaKey = "hdi_isfolder"
|
||||
dirMetaValue = "true"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
storageDefaultBaseURL = "blob.core.windows.net"
|
||||
@@ -365,18 +363,6 @@ This option controls how often unused buffers will be removed from the pool.`,
|
||||
},
|
||||
},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "directory_markers",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Help: `Upload an empty object with a trailing slash when a new directory is created
|
||||
|
||||
Empty folders are unsupported for bucket based remotes, this option
|
||||
creates an empty object ending with "/", to persist the folder.
|
||||
|
||||
This object also has the metadata "` + dirMetaKey + ` = ` + dirMetaValue + `" to conform to
|
||||
the Microsoft standard.
|
||||
`,
|
||||
}, {
|
||||
Name: "no_check_container",
|
||||
Help: `If set, don't attempt to check the container exists or create it.
|
||||
@@ -426,7 +412,6 @@ type Options struct {
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
PublicAccess string `config:"public_access"`
|
||||
DirectoryMarkers bool `config:"directory_markers"`
|
||||
NoCheckContainer bool `config:"no_check_container"`
|
||||
NoHeadObject bool `config:"no_head_object"`
|
||||
}
|
||||
@@ -501,7 +486,7 @@ func parsePath(path string) (root string) {
|
||||
// split returns container and containerPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (containerName, containerPath string) {
|
||||
containerName, containerPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
||||
containerName, containerPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return f.opt.Enc.FromStandardName(containerName), f.opt.Enc.FromStandardPath(containerPath)
|
||||
}
|
||||
|
||||
@@ -679,10 +664,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(ctx, f)
|
||||
if opt.DirectoryMarkers {
|
||||
f.features.CanHaveEmptyDirectories = true
|
||||
fs.Debugf(f, "Using directory markers")
|
||||
}
|
||||
|
||||
// Client options specifying our own transport
|
||||
policyClientOptions := policy.ClientOptions{
|
||||
@@ -709,7 +690,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
cred, err = azidentity.NewDefaultAzureCredential(&options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create azure environment credential failed: %w", err)
|
||||
return nil, fmt.Errorf("create azure enviroment credential failed: %w", err)
|
||||
}
|
||||
case opt.UseEmulator:
|
||||
if opt.Account == "" {
|
||||
@@ -925,7 +906,7 @@ func (f *Fs) cntSVC(containerName string) (containerClient *container.Client) {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *container.BlobItem) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *container.BlobItem) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
@@ -936,7 +917,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *contain
|
||||
return nil, err
|
||||
}
|
||||
} else if !o.fs.opt.NoHeadObject {
|
||||
err := o.readMetaData(ctx) // reads info and headers, returning an error
|
||||
err := o.readMetaData() // reads info and headers, returning an error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -947,7 +928,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *contain
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// getBlobSVC creates a blob client
|
||||
@@ -972,7 +953,7 @@ func (o *Object) updateMetadataWithModTime(modTime time.Time) {
|
||||
}
|
||||
|
||||
// Returns whether file is a directory marker or not
|
||||
func isDirectoryMarker(size int64, metadata map[string]*string, remote string) bool {
|
||||
func isDirectoryMarker(size int64, metadata map[string]string, remote string) bool {
|
||||
// Directory markers are 0 length
|
||||
if size == 0 {
|
||||
endsWithSlash := strings.HasSuffix(remote, "/")
|
||||
@@ -983,7 +964,31 @@ func isDirectoryMarker(size int64, metadata map[string]*string, remote string) b
|
||||
// defacto standard for marking blobs as directories.
|
||||
// Note also that the metadata hasn't been normalised to lower case yet
|
||||
for k, v := range metadata {
|
||||
if v != nil && strings.EqualFold(k, dirMetaKey) && *v == dirMetaValue {
|
||||
if strings.EqualFold(k, "hdi_isfolder") && v == "true" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns whether file is a directory marker or not using metadata
|
||||
// with pointers to strings as the SDK seems to use both forms rather
|
||||
// annoyingly.
|
||||
//
|
||||
// NB This is a duplicate of isDirectoryMarker
|
||||
func isDirectoryMarkerP(size int64, metadata map[string]*string, remote string) bool {
|
||||
// Directory markers are 0 length
|
||||
if size == 0 {
|
||||
endsWithSlash := strings.HasSuffix(remote, "/")
|
||||
if endsWithSlash || remote == "" {
|
||||
return true
|
||||
}
|
||||
// Note that metadata with hdi_isfolder = true seems to be a
|
||||
// defacto standard for marking blobs as directories.
|
||||
// Note also that the metadata hasn't been normalised to lower case yet
|
||||
for k, pv := range metadata {
|
||||
if strings.EqualFold(k, "hdi_isfolder") && pv != nil && *pv == "true" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -1028,7 +1033,6 @@ func (f *Fs) list(ctx context.Context, containerName, directory, prefix string,
|
||||
Prefix: &directory,
|
||||
MaxResults: &maxResults,
|
||||
})
|
||||
foundItems := 0
|
||||
for pager.More() {
|
||||
var response container.ListBlobsHierarchyResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
@@ -1047,7 +1051,6 @@ func (f *Fs) list(ctx context.Context, containerName, directory, prefix string,
|
||||
}
|
||||
// Advance marker to next
|
||||
// marker = response.NextMarker
|
||||
foundItems += len(response.Segment.BlobItems)
|
||||
for i := range response.Segment.BlobItems {
|
||||
file := response.Segment.BlobItems[i]
|
||||
// Finish if file name no longer has prefix
|
||||
@@ -1063,27 +1066,20 @@ func (f *Fs) list(ctx context.Context, containerName, directory, prefix string,
|
||||
fs.Debugf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
}
|
||||
isDirectory := isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote)
|
||||
if isDirectory {
|
||||
// Don't insert the root directory
|
||||
if remote == directory {
|
||||
continue
|
||||
}
|
||||
// process directory markers as directories
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
if isDirectoryMarkerP(*file.Properties.ContentLength, file.Metadata, remote) {
|
||||
continue // skip directory marker
|
||||
}
|
||||
if addContainer {
|
||||
remote = path.Join(containerName, remote)
|
||||
}
|
||||
// Send object
|
||||
err = fn(remote, file, isDirectory)
|
||||
err = fn(remote, file, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Send the subdirectories
|
||||
foundItems += len(response.Segment.BlobPrefixes)
|
||||
for _, remote := range response.Segment.BlobPrefixes {
|
||||
if remote.Name == nil {
|
||||
fs.Debugf(f, "Nil prefix received")
|
||||
@@ -1106,26 +1102,16 @@ func (f *Fs) list(ctx context.Context, containerName, directory, prefix string,
|
||||
}
|
||||
}
|
||||
}
|
||||
if f.opt.DirectoryMarkers && foundItems == 0 && directory != "" {
|
||||
// Determine whether the directory exists or not by whether it has a marker
|
||||
_, err := f.readMetaData(ctx, containerName, directory)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert a list item into a DirEntry
|
||||
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *container.BlobItem, isDirectory bool) (fs.DirEntry, error) {
|
||||
func (f *Fs) itemToDirEntry(remote string, object *container.BlobItem, isDirectory bool) (fs.DirEntry, error) {
|
||||
if isDirectory {
|
||||
d := fs.NewDir(remote, time.Time{})
|
||||
return d, nil
|
||||
}
|
||||
o, err := f.newObjectWithInfo(ctx, remote, object)
|
||||
o, err := f.newObjectWithInfo(remote, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1153,7 +1139,7 @@ func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix strin
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
err = f.list(ctx, containerName, directory, prefix, addContainer, false, int32(f.opt.ListChunkSize), func(remote string, object *container.BlobItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1234,7 +1220,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
list := walk.NewListRHelper(callback)
|
||||
listR := func(containerName, directory, prefix string, addContainer bool) error {
|
||||
return f.list(ctx, containerName, directory, prefix, addContainer, true, int32(f.opt.ListChunkSize), func(remote string, object *container.BlobItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1328,71 +1314,10 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Create directory marker file and parents
|
||||
func (f *Fs) createDirectoryMarker(ctx context.Context, container, dir string) error {
|
||||
if !f.opt.DirectoryMarkers || container == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Object to be uploaded
|
||||
o := &Object{
|
||||
fs: f,
|
||||
modTime: time.Now(),
|
||||
meta: map[string]string{
|
||||
dirMetaKey: dirMetaValue,
|
||||
},
|
||||
}
|
||||
|
||||
for {
|
||||
_, containerPath := f.split(dir)
|
||||
// Don't create the directory marker if it is the bucket or at the very root
|
||||
if containerPath == "" {
|
||||
break
|
||||
}
|
||||
o.remote = dir + "/"
|
||||
|
||||
// Check to see if object already exists
|
||||
_, err := f.readMetaData(ctx, container, containerPath+"/")
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Upload it if not
|
||||
fs.Debugf(o, "Creating directory marker")
|
||||
content := io.Reader(strings.NewReader(""))
|
||||
err = o.Update(ctx, content, o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating directory marker failed: %w", err)
|
||||
}
|
||||
|
||||
// Now check parent directory exists
|
||||
dir = path.Dir(dir)
|
||||
if dir == "/" || dir == "." {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
container, _ := f.split(dir)
|
||||
e := f.makeContainer(ctx, container)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
return f.createDirectoryMarker(ctx, container, dir)
|
||||
}
|
||||
|
||||
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
||||
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
dir := path.Dir(remote)
|
||||
if dir == "/" || dir == "." {
|
||||
dir = ""
|
||||
}
|
||||
return f.Mkdir(ctx, dir)
|
||||
return f.makeContainer(ctx, container)
|
||||
}
|
||||
|
||||
// makeContainer creates the container if it doesn't exist
|
||||
@@ -1492,18 +1417,6 @@ func (f *Fs) deleteContainer(ctx context.Context, containerName string) error {
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
container, directory := f.split(dir)
|
||||
// Remove directory marker file
|
||||
if f.opt.DirectoryMarkers && container != "" && dir != "" {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: dir + "/",
|
||||
}
|
||||
fs.Debugf(o, "Removing directory marker")
|
||||
err := o.Remove(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("removing directory marker failed: %w", err)
|
||||
}
|
||||
}
|
||||
if container == "" || directory != "" {
|
||||
return nil
|
||||
}
|
||||
@@ -1545,7 +1458,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
dstContainer, dstPath := f.split(remote)
|
||||
err := f.mkdirParent(ctx, remote)
|
||||
err := f.makeContainer(ctx, dstContainer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1558,8 +1471,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
srcBlobSVC := srcObj.getBlobSVC()
|
||||
srcURL := srcBlobSVC.URL()
|
||||
|
||||
tier := blob.AccessTier(f.opt.AccessTier)
|
||||
options := blob.StartCopyFromURLOptions{
|
||||
Tier: parseTier(f.opt.AccessTier),
|
||||
Tier: &tier,
|
||||
}
|
||||
var startCopy blob.StartCopyFromURLResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -1638,15 +1552,12 @@ func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// Set o.metadata from metadata
|
||||
func (o *Object) setMetadata(metadata map[string]*string) {
|
||||
func (o *Object) setMetadata(metadata map[string]string) {
|
||||
if len(metadata) > 0 {
|
||||
// Lower case the metadata
|
||||
o.meta = make(map[string]string, len(metadata))
|
||||
for k, v := range metadata {
|
||||
if v != nil {
|
||||
o.meta[strings.ToLower(k)] = *v
|
||||
}
|
||||
o.meta[strings.ToLower(k)] = v
|
||||
}
|
||||
// Set o.modTime from metadata if it exists and
|
||||
// UseServerModTime isn't in use.
|
||||
@@ -1662,17 +1573,20 @@ func (o *Object) setMetadata(metadata map[string]*string) {
|
||||
}
|
||||
}
|
||||
|
||||
// Get metadata from o.meta
|
||||
func (o *Object) getMetadata() (metadata map[string]*string) {
|
||||
if len(o.meta) == 0 {
|
||||
return nil
|
||||
// Duplicte of setMetadata but taking pointers to strings
|
||||
func (o *Object) setMetadataP(metadata map[string]*string) {
|
||||
if len(metadata) > 0 {
|
||||
// Convert the format of the metadata
|
||||
newMeta := make(map[string]string, len(metadata))
|
||||
for k, v := range metadata {
|
||||
if v != nil {
|
||||
newMeta[k] = *v
|
||||
}
|
||||
}
|
||||
o.setMetadata(newMeta)
|
||||
} else {
|
||||
o.meta = nil
|
||||
}
|
||||
metadata = make(map[string]*string, len(o.meta))
|
||||
for k, v := range o.meta {
|
||||
v := v
|
||||
metadata[k] = &v
|
||||
}
|
||||
return metadata
|
||||
}
|
||||
|
||||
// decodeMetaDataFromPropertiesResponse sets the metadata from the data passed in
|
||||
@@ -1782,7 +1696,7 @@ func (o *Object) decodeMetaDataFromBlob(info *container.BlobItem) (err error) {
|
||||
} else {
|
||||
size = *info.Properties.ContentLength
|
||||
}
|
||||
if isDirectoryMarker(size, metadata, o.remote) {
|
||||
if isDirectoryMarkerP(size, metadata, o.remote) {
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
|
||||
@@ -1804,7 +1718,7 @@ func (o *Object) decodeMetaDataFromBlob(info *container.BlobItem) (err error) {
|
||||
} else {
|
||||
o.accessTier = *info.Properties.AccessTier
|
||||
}
|
||||
o.setMetadata(metadata)
|
||||
o.setMetadataP(metadata)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1815,34 +1729,17 @@ func (o *Object) getBlobSVC() *blob.Client {
|
||||
return o.fs.getBlobSVC(container, directory)
|
||||
}
|
||||
|
||||
// getBlockBlobSVC creates a block blob client
|
||||
func (o *Object) getBlockBlobSVC() *blockblob.Client {
|
||||
container, directory := o.split()
|
||||
return o.fs.getBlockBlobSVC(container, directory)
|
||||
}
|
||||
|
||||
// clearMetaData clears enough metadata so readMetaData will re-read it
|
||||
func (o *Object) clearMetaData() {
|
||||
o.modTime = time.Time{}
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
func (f *Fs) readMetaData(ctx context.Context, container, containerPath string) (blobProperties blob.GetPropertiesResponse, err error) {
|
||||
if !f.containerOK(container) {
|
||||
return blobProperties, fs.ErrorObjectNotFound
|
||||
}
|
||||
blb := f.getBlobSVC(container, containerPath)
|
||||
|
||||
// Read metadata (this includes metadata)
|
||||
options := blob.GetPropertiesOptions{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
blobProperties, err = blb.GetProperties(ctx, &options)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
// On directories - GetProperties does not work and current SDK does not populate service code correctly hence check regular http response as well
|
||||
if storageErr, ok := err.(*azcore.ResponseError); ok && (storageErr.ErrorCode == string(bloberror.BlobNotFound) || storageErr.StatusCode == http.StatusNotFound) {
|
||||
return blobProperties, fs.ErrorObjectNotFound
|
||||
}
|
||||
return blobProperties, err
|
||||
}
|
||||
return blobProperties, nil
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// Sets
|
||||
@@ -1851,15 +1748,33 @@ func (f *Fs) readMetaData(ctx context.Context, container, containerPath string)
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.md5
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
container, _ := o.split()
|
||||
if !o.fs.containerOK(container) {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
if !o.modTime.IsZero() {
|
||||
return nil
|
||||
}
|
||||
container, containerPath := o.split()
|
||||
blobProperties, err := o.fs.readMetaData(ctx, container, containerPath)
|
||||
blb := o.getBlobSVC()
|
||||
// fs.Debugf(o, "Blob URL = %q", blb.URL())
|
||||
|
||||
// Read metadata (this includes metadata)
|
||||
options := blob.GetPropertiesOptions{}
|
||||
ctx := context.Background()
|
||||
var blobProperties blob.GetPropertiesResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
blobProperties, err = blb.GetProperties(ctx, &options)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
// On directories - GetProperties does not work and current SDK does not populate service code correctly hence check regular http response as well
|
||||
if storageErr, ok := err.(*azcore.ResponseError); ok && (storageErr.ErrorCode == string(bloberror.BlobNotFound) || storageErr.StatusCode == http.StatusNotFound) {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return o.decodeMetaDataFromPropertiesResponse(&blobProperties)
|
||||
}
|
||||
|
||||
@@ -1869,7 +1784,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) (result time.Time) {
|
||||
// The error is logged in readMetaData
|
||||
_ = o.readMetaData(ctx)
|
||||
_ = o.readMetaData()
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
@@ -1885,7 +1800,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
blb := o.getBlobSVC()
|
||||
opt := blob.SetMetadataOptions{}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blb.SetMetadata(ctx, o.getMetadata(), &opt)
|
||||
_, err := blb.SetMetadata(ctx, o.meta, &opt)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1956,6 +1871,48 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return downloadResponse.Body, nil
|
||||
}
|
||||
|
||||
// poolWrapper wraps a pool.Pool as an azblob.TransferManager
|
||||
type poolWrapper struct {
|
||||
pool *pool.Pool
|
||||
bufToken chan struct{}
|
||||
runToken chan struct{}
|
||||
}
|
||||
|
||||
// newPoolWrapper creates an azblob.TransferManager that will use a
|
||||
// pool.Pool with maximum concurrency as specified.
|
||||
func (f *Fs) newPoolWrapper(concurrency int) *poolWrapper {
|
||||
return &poolWrapper{
|
||||
pool: f.pool,
|
||||
bufToken: make(chan struct{}, concurrency),
|
||||
runToken: make(chan struct{}, concurrency),
|
||||
}
|
||||
}
|
||||
|
||||
// Get implements TransferManager.Get().
|
||||
func (pw *poolWrapper) Get() []byte {
|
||||
pw.bufToken <- struct{}{}
|
||||
return pw.pool.Get()
|
||||
}
|
||||
|
||||
// Put implements TransferManager.Put().
|
||||
func (pw *poolWrapper) Put(b []byte) {
|
||||
pw.pool.Put(b)
|
||||
<-pw.bufToken
|
||||
}
|
||||
|
||||
// Run implements TransferManager.Run().
|
||||
func (pw *poolWrapper) Run(f func()) {
|
||||
pw.runToken <- struct{}{}
|
||||
go func() {
|
||||
f()
|
||||
<-pw.runToken
|
||||
}()
|
||||
}
|
||||
|
||||
// Close implements TransferManager.Close().
|
||||
func (pw *poolWrapper) Close() {
|
||||
}
|
||||
|
||||
// Converts a string into a pointer to a string
|
||||
func pString(s string) *string {
|
||||
return &s
|
||||
@@ -2137,9 +2094,10 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
return err
|
||||
}
|
||||
|
||||
tier := blob.AccessTier(o.fs.opt.AccessTier)
|
||||
options := blockblob.CommitBlockListOptions{
|
||||
Metadata: o.getMetadata(),
|
||||
Tier: parseTier(o.fs.opt.AccessTier),
|
||||
Metadata: o.meta,
|
||||
Tier: &tier,
|
||||
HTTPHeaders: httpHeaders,
|
||||
}
|
||||
|
||||
@@ -2183,9 +2141,10 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
|
||||
b := bytes.NewReader(buf[:n])
|
||||
rs := &readSeekCloser{Reader: b, Seeker: b}
|
||||
|
||||
tier := blob.AccessTier(o.fs.opt.AccessTier)
|
||||
options := blockblob.UploadOptions{
|
||||
Metadata: o.getMetadata(),
|
||||
Tier: parseTier(o.fs.opt.AccessTier),
|
||||
Metadata: o.meta,
|
||||
Tier: &tier,
|
||||
HTTPHeaders: httpHeaders,
|
||||
}
|
||||
|
||||
@@ -2215,17 +2174,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if container == "" || containerPath == "" {
|
||||
return fmt.Errorf("can't upload to root - need a container")
|
||||
}
|
||||
// Create parent dir/bucket if not saving directory marker
|
||||
_, isDirMarker := o.meta[dirMetaKey]
|
||||
if !isDirMarker {
|
||||
err = o.fs.mkdirParent(ctx, o.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = o.fs.makeContainer(ctx, container)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update Mod time
|
||||
fs.Debugf(nil, "o.meta = %+v", o.meta)
|
||||
o.updateMetadataWithModTime(src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -2273,7 +2227,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
size := src.Size()
|
||||
multipartUpload := size < 0 || size > o.fs.poolSize
|
||||
|
||||
fs.Debugf(nil, "o.meta = %+v", o.meta)
|
||||
if multipartUpload {
|
||||
err = o.uploadMultipart(ctx, in, size, blb, &httpHeaders)
|
||||
} else {
|
||||
@@ -2284,12 +2237,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// Refresh metadata on object
|
||||
if !isDirMarker {
|
||||
o.clearMetaData()
|
||||
err = o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.clearMetaData()
|
||||
err = o.readMetaData()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If tier is not changed or not specified, do not attempt to invoke `SetBlobTier` operation
|
||||
@@ -2363,14 +2314,6 @@ func (o *Object) GetTier() string {
|
||||
return string(o.accessTier)
|
||||
}
|
||||
|
||||
func parseTier(tier string) *blob.AccessTier {
|
||||
if tier == "" {
|
||||
return nil
|
||||
}
|
||||
msTier := blob.AccessTier(tier)
|
||||
return &msTier
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@@ -26,25 +25,6 @@ func TestIntegration(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// TestIntegration2 runs integration tests against the remote
|
||||
func TestIntegration2(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
name := "TestAzureBlob:"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name,
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: defaultChunkSize,
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "directory_markers", Value: "true"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang-jwt/jwt/v4"
|
||||
"github.com/rclone/rclone/backend/box/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -46,6 +45,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/youmark/pkcs8"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/jws"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -76,11 +76,6 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
type boxCustomClaims struct {
|
||||
jwt.RegisteredClaims
|
||||
BoxSubType string `json:"box_sub_type,omitempty"`
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -183,7 +178,7 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
|
||||
signingHeaders := getSigningHeaders(boxConfig)
|
||||
queryParams := getQueryParams(boxConfig)
|
||||
client := fshttp.NewClient(ctx)
|
||||
err = jwtutil.Config("box", name, tokenURL, *claims, signingHeaders, queryParams, privateKey, m, client)
|
||||
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -199,29 +194,34 @@ func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||
return boxConfig, nil
|
||||
}
|
||||
|
||||
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomClaims, err error) {
|
||||
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
|
||||
val, err := jwtutil.RandomHex(20)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err)
|
||||
}
|
||||
|
||||
claims = &boxCustomClaims{
|
||||
RegisteredClaims: jwt.RegisteredClaims{
|
||||
ID: val,
|
||||
Issuer: boxConfig.BoxAppSettings.ClientID,
|
||||
Subject: boxConfig.EnterpriseID,
|
||||
Audience: jwt.ClaimStrings{tokenURL},
|
||||
ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Second * 45)),
|
||||
claims = &jws.ClaimSet{
|
||||
Iss: boxConfig.BoxAppSettings.ClientID,
|
||||
Sub: boxConfig.EnterpriseID,
|
||||
Aud: tokenURL,
|
||||
Exp: time.Now().Add(time.Second * 45).Unix(),
|
||||
PrivateClaims: map[string]interface{}{
|
||||
"box_sub_type": boxSubType,
|
||||
"aud": tokenURL,
|
||||
"jti": val,
|
||||
},
|
||||
BoxSubType: boxSubType,
|
||||
}
|
||||
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]interface{} {
|
||||
signingHeaders := map[string]interface{}{
|
||||
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
||||
func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
|
||||
signingHeaders := &jws.Header{
|
||||
Algorithm: "RS256",
|
||||
Typ: "JWT",
|
||||
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
||||
}
|
||||
|
||||
return signingHeaders
|
||||
}
|
||||
|
||||
|
||||
2
backend/cache/cache.go
vendored
2
backend/cache/cache.go
vendored
@@ -1787,7 +1787,7 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// StopBackgroundRunners will signal all the runners to stop their work
|
||||
// StopBackgroundRunners will signall all the runners to stop their work
|
||||
// can be triggered from a terminate signal or from testing between runs
|
||||
func (f *Fs) StopBackgroundRunners() {
|
||||
f.cleanupChan <- false
|
||||
|
||||
21
backend/cache/cache_internal_test.go
vendored
21
backend/cache/cache_internal_test.go
vendored
@@ -1098,6 +1098,27 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
|
||||
return l, err
|
||||
}
|
||||
|
||||
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
|
||||
in, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = in.Close()
|
||||
}()
|
||||
|
||||
out, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = out.Close()
|
||||
}()
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||
var err error
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Package combine implements a backend to combine multiple remotes in a directory tree
|
||||
// Package combine implents a backend to combine multiple remotes in a directory tree
|
||||
package combine
|
||||
|
||||
/*
|
||||
@@ -233,7 +233,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
canMove := true
|
||||
for _, u := range f.upstreams {
|
||||
@@ -290,16 +289,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
}
|
||||
}
|
||||
|
||||
// Enable CleanUp when any upstreams support it
|
||||
if features.CleanUp == nil {
|
||||
for _, u := range f.upstreams {
|
||||
if u.f.Features().CleanUp != nil {
|
||||
features.CleanUp = f.CleanUp
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enable ChangeNotify when any upstreams support it
|
||||
if features.ChangeNotify == nil {
|
||||
for _, u := range f.upstreams {
|
||||
@@ -310,9 +299,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
}
|
||||
}
|
||||
|
||||
// show that we wrap other backends
|
||||
features.Overlay = true
|
||||
|
||||
f.features = features
|
||||
|
||||
// Get common intersection of hashes
|
||||
@@ -365,7 +351,7 @@ func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream
|
||||
return g.Wait()
|
||||
}
|
||||
|
||||
// join the elements together but unlike path.Join return empty string
|
||||
// join the elements together but unline path.Join return empty string
|
||||
func join(elem ...string) string {
|
||||
result := path.Join(elem...)
|
||||
if result == "." {
|
||||
@@ -901,100 +887,6 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
})
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
u, uRemote, err := f.findUpstream(remote)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
do := u.f.Features().PublicLink
|
||||
if do == nil {
|
||||
return "", fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx, uRemote, expire, unlink)
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
//
|
||||
// May create duplicates or return errors if src already
|
||||
// exists.
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
srcPath := src.Remote()
|
||||
u, uRemote, err := f.findUpstream(srcPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
do := u.f.Features().PutUnchecked
|
||||
if do == nil {
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
uSrc := fs.NewOverrideRemote(src, uRemote)
|
||||
return do(ctx, in, uSrc, options...)
|
||||
}
|
||||
|
||||
// MergeDirs merges the contents of all the directories passed
|
||||
// in into the first one and rmdirs the other directories.
|
||||
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
if len(dirs) == 0 {
|
||||
return nil
|
||||
}
|
||||
var (
|
||||
u *upstream
|
||||
uDirs []fs.Directory
|
||||
)
|
||||
for _, dir := range dirs {
|
||||
uNew, uDir, err := f.findUpstream(dir.Remote())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if u == nil {
|
||||
u = uNew
|
||||
} else if u != uNew {
|
||||
return fmt.Errorf("can't merge directories from different upstreams")
|
||||
}
|
||||
uDirs = append(uDirs, fs.NewOverrideDirectory(dir, uDir))
|
||||
}
|
||||
do := u.f.Features().MergeDirs
|
||||
if do == nil {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx, uDirs)
|
||||
}
|
||||
|
||||
// CleanUp the trash in the Fs
|
||||
//
|
||||
// Implement this if you have a way of emptying the trash or
|
||||
// otherwise cleaning up old versions of files.
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
|
||||
if do := u.f.Features().CleanUp; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// OpenWriterAt opens with a handle for random access writes
|
||||
//
|
||||
// Pass in the remote desired and the size if known.
|
||||
//
|
||||
// It truncates any existing object
|
||||
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||
u, uRemote, err := f.findUpstream(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
do := u.f.Features().OpenWriterAt
|
||||
if do == nil {
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx, uRemote, size)
|
||||
}
|
||||
|
||||
// Object describes a wrapped Object
|
||||
//
|
||||
// This is a wrapped Object which knows its path prefix
|
||||
@@ -1024,7 +916,7 @@ func (o *Object) String() string {
|
||||
func (o *Object) Remote() string {
|
||||
newPath, err := o.u.pathAdjustment.do(o.Object.String())
|
||||
if err != nil {
|
||||
fs.Errorf(o.Object, "Bad object: %v", err)
|
||||
fs.Errorf(o, "Bad object: %v", err)
|
||||
return err.Error()
|
||||
}
|
||||
return newPath
|
||||
@@ -1096,10 +988,5 @@ var (
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ fs.MergeDirser = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.OpenWriterAter = (*Fs)(nil)
|
||||
_ fs.FullObject = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -10,11 +10,6 @@ import (
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
var (
|
||||
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect"}
|
||||
unimplementableObjectMethods = []string{}
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
@@ -22,8 +17,8 @@ func TestIntegration(t *testing.T) {
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -40,9 +35,7 @@ func TestLocal(t *testing.T) {
|
||||
{Name: name, Key: "type", Value: "combine"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -58,9 +51,7 @@ func TestMemory(t *testing.T) {
|
||||
{Name: name, Key: "type", Value: "combine"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -77,8 +68,6 @@ func TestMixed(t *testing.T) {
|
||||
{Name: name, Key: "type", Value: "combine"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
},
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -186,7 +186,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
// We support reading MIME types no matter the wrapped fs
|
||||
f.features.ReadMimeType = true
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
"github.com/rfjakob/eme"
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
@@ -38,6 +37,7 @@ const (
|
||||
blockHeaderSize = secretbox.Overhead
|
||||
blockDataSize = 64 * 1024
|
||||
blockSize = blockHeaderSize + blockDataSize
|
||||
encryptedSuffix = ".bin" // when file name encryption is off we add this suffix to make sure the cloud provider doesn't process the file
|
||||
)
|
||||
|
||||
// Errors returned by cipher
|
||||
@@ -53,9 +53,8 @@ var (
|
||||
ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?")
|
||||
ErrorBadBase32Encoding = errors.New("bad base32 filename encoding")
|
||||
ErrorFileClosed = errors.New("file already closed")
|
||||
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - does not match suffix")
|
||||
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - no \"" + encryptedSuffix + "\" suffix")
|
||||
ErrorBadSeek = errors.New("Seek beyond end of file")
|
||||
ErrorSuffixMissingDot = errors.New("suffix config setting should include a '.'")
|
||||
defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1}
|
||||
obfuscQuoteRune = '!'
|
||||
)
|
||||
@@ -170,30 +169,27 @@ func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
|
||||
|
||||
// Cipher defines an encoding and decoding cipher for the crypt backend
|
||||
type Cipher struct {
|
||||
dataKey [32]byte // Key for secretbox
|
||||
nameKey [32]byte // 16,24 or 32 bytes
|
||||
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
|
||||
block gocipher.Block
|
||||
mode NameEncryptionMode
|
||||
fileNameEnc fileNameEncoding
|
||||
buffers sync.Pool // encrypt/decrypt buffers
|
||||
cryptoRand io.Reader // read crypto random numbers from here
|
||||
dirNameEncrypt bool
|
||||
passBadBlocks bool // if set passed bad blocks as zeroed blocks
|
||||
encryptedSuffix string
|
||||
dataKey [32]byte // Key for secretbox
|
||||
nameKey [32]byte // 16,24 or 32 bytes
|
||||
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
|
||||
block gocipher.Block
|
||||
mode NameEncryptionMode
|
||||
fileNameEnc fileNameEncoding
|
||||
buffers sync.Pool // encrypt/decrypt buffers
|
||||
cryptoRand io.Reader // read crypto random numbers from here
|
||||
dirNameEncrypt bool
|
||||
}
|
||||
|
||||
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
|
||||
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) {
|
||||
c := &Cipher{
|
||||
mode: mode,
|
||||
fileNameEnc: enc,
|
||||
cryptoRand: rand.Reader,
|
||||
dirNameEncrypt: dirNameEncrypt,
|
||||
encryptedSuffix: ".bin",
|
||||
mode: mode,
|
||||
fileNameEnc: enc,
|
||||
cryptoRand: rand.Reader,
|
||||
dirNameEncrypt: dirNameEncrypt,
|
||||
}
|
||||
c.buffers.New = func() interface{} {
|
||||
return new([blockSize]byte)
|
||||
return make([]byte, blockSize)
|
||||
}
|
||||
err := c.Key(password, salt)
|
||||
if err != nil {
|
||||
@@ -202,29 +198,11 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// setEncryptedSuffix set suffix, or an empty string
|
||||
func (c *Cipher) setEncryptedSuffix(suffix string) {
|
||||
if strings.EqualFold(suffix, "none") {
|
||||
c.encryptedSuffix = ""
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(suffix, ".") {
|
||||
fs.Errorf(nil, "crypt: bad suffix: %v", ErrorSuffixMissingDot)
|
||||
suffix = "." + suffix
|
||||
}
|
||||
c.encryptedSuffix = suffix
|
||||
}
|
||||
|
||||
// Call to set bad block pass through
|
||||
func (c *Cipher) setPassBadBlocks(passBadBlocks bool) {
|
||||
c.passBadBlocks = passBadBlocks
|
||||
}
|
||||
|
||||
// Key creates all the internal keys from the password passed in using
|
||||
// scrypt.
|
||||
//
|
||||
// If salt is "" we use a fixed salt just to make attackers lives
|
||||
// slightly harder than using no salt.
|
||||
// slighty harder than using no salt.
|
||||
//
|
||||
// Note that empty password makes all 0x00 keys which is used in the
|
||||
// tests.
|
||||
@@ -252,12 +230,15 @@ func (c *Cipher) Key(password, salt string) (err error) {
|
||||
}
|
||||
|
||||
// getBlock gets a block from the pool of size blockSize
|
||||
func (c *Cipher) getBlock() *[blockSize]byte {
|
||||
return c.buffers.Get().(*[blockSize]byte)
|
||||
func (c *Cipher) getBlock() []byte {
|
||||
return c.buffers.Get().([]byte)
|
||||
}
|
||||
|
||||
// putBlock returns a block to the pool of size blockSize
|
||||
func (c *Cipher) putBlock(buf *[blockSize]byte) {
|
||||
func (c *Cipher) putBlock(buf []byte) {
|
||||
if len(buf) != blockSize {
|
||||
panic("bad blocksize returned to pool")
|
||||
}
|
||||
c.buffers.Put(buf)
|
||||
}
|
||||
|
||||
@@ -527,7 +508,7 @@ func (c *Cipher) encryptFileName(in string) string {
|
||||
// EncryptFileName encrypts a file path
|
||||
func (c *Cipher) EncryptFileName(in string) string {
|
||||
if c.mode == NameEncryptionOff {
|
||||
return in + c.encryptedSuffix
|
||||
return in + encryptedSuffix
|
||||
}
|
||||
return c.encryptFileName(in)
|
||||
}
|
||||
@@ -587,8 +568,8 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
||||
// DecryptFileName decrypts a file path
|
||||
func (c *Cipher) DecryptFileName(in string) (string, error) {
|
||||
if c.mode == NameEncryptionOff {
|
||||
remainingLength := len(in) - len(c.encryptedSuffix)
|
||||
if remainingLength == 0 || !strings.HasSuffix(in, c.encryptedSuffix) {
|
||||
remainingLength := len(in) - len(encryptedSuffix)
|
||||
if remainingLength == 0 || !strings.HasSuffix(in, encryptedSuffix) {
|
||||
return "", ErrorNotAnEncryptedFile
|
||||
}
|
||||
decrypted := in[:remainingLength]
|
||||
@@ -628,7 +609,7 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
|
||||
// fromReader fills the nonce from an io.Reader - normally the OSes
|
||||
// crypto random number generator
|
||||
func (n *nonce) fromReader(in io.Reader) error {
|
||||
read, err := readers.ReadFill(in, (*n)[:])
|
||||
read, err := io.ReadFull(in, (*n)[:])
|
||||
if read != fileNonceSize {
|
||||
return fmt.Errorf("short read of nonce: %w", err)
|
||||
}
|
||||
@@ -683,8 +664,8 @@ type encrypter struct {
|
||||
in io.Reader
|
||||
c *Cipher
|
||||
nonce nonce
|
||||
buf *[blockSize]byte
|
||||
readBuf *[blockSize]byte
|
||||
buf []byte
|
||||
readBuf []byte
|
||||
bufIndex int
|
||||
bufSize int
|
||||
err error
|
||||
@@ -709,9 +690,9 @@ func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
|
||||
}
|
||||
}
|
||||
// Copy magic into buffer
|
||||
copy((*fh.buf)[:], fileMagicBytes)
|
||||
copy(fh.buf, fileMagicBytes)
|
||||
// Copy nonce into buffer
|
||||
copy((*fh.buf)[fileMagicSize:], fh.nonce[:])
|
||||
copy(fh.buf[fileMagicSize:], fh.nonce[:])
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
@@ -726,20 +707,22 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
|
||||
if fh.bufIndex >= fh.bufSize {
|
||||
// Read data
|
||||
// FIXME should overlap the reads with a go-routine and 2 buffers?
|
||||
readBuf := (*fh.readBuf)[:blockDataSize]
|
||||
n, err = readers.ReadFill(fh.in, readBuf)
|
||||
readBuf := fh.readBuf[:blockDataSize]
|
||||
n, err = io.ReadFull(fh.in, readBuf)
|
||||
if n == 0 {
|
||||
// err can't be nil since:
|
||||
// n == len(buf) if and only if err == nil.
|
||||
return fh.finish(err)
|
||||
}
|
||||
// possibly err != nil here, but we will process the
|
||||
// data and the next call to ReadFill will return 0, err
|
||||
// data and the next call to ReadFull will return 0, err
|
||||
// Encrypt the block using the nonce
|
||||
secretbox.Seal((*fh.buf)[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
fh.bufIndex = 0
|
||||
fh.bufSize = blockHeaderSize + n
|
||||
fh.nonce.increment()
|
||||
}
|
||||
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufSize])
|
||||
n = copy(p, fh.buf[fh.bufIndex:fh.bufSize])
|
||||
fh.bufIndex += n
|
||||
return n, nil
|
||||
}
|
||||
@@ -780,8 +763,8 @@ type decrypter struct {
|
||||
nonce nonce
|
||||
initialNonce nonce
|
||||
c *Cipher
|
||||
buf *[blockSize]byte
|
||||
readBuf *[blockSize]byte
|
||||
buf []byte
|
||||
readBuf []byte
|
||||
bufIndex int
|
||||
bufSize int
|
||||
err error
|
||||
@@ -799,12 +782,12 @@ func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
|
||||
limit: -1,
|
||||
}
|
||||
// Read file header (magic + nonce)
|
||||
readBuf := (*fh.readBuf)[:fileHeaderSize]
|
||||
n, err := readers.ReadFill(fh.rc, readBuf)
|
||||
if n < fileHeaderSize && err == io.EOF {
|
||||
readBuf := fh.readBuf[:fileHeaderSize]
|
||||
_, err := io.ReadFull(fh.rc, readBuf)
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
// This read from 0..fileHeaderSize-1 bytes
|
||||
return nil, fh.finishAndClose(ErrorEncryptedFileTooShort)
|
||||
} else if err != io.EOF && err != nil {
|
||||
} else if err != nil {
|
||||
return nil, fh.finishAndClose(err)
|
||||
}
|
||||
// check the magic
|
||||
@@ -862,8 +845,10 @@ func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offse
|
||||
func (fh *decrypter) fillBuffer() (err error) {
|
||||
// FIXME should overlap the reads with a go-routine and 2 buffers?
|
||||
readBuf := fh.readBuf
|
||||
n, err := readers.ReadFill(fh.rc, (*readBuf)[:])
|
||||
n, err := io.ReadFull(fh.rc, readBuf)
|
||||
if n == 0 {
|
||||
// err can't be nil since:
|
||||
// n == len(buf) if and only if err == nil.
|
||||
return err
|
||||
}
|
||||
// possibly err != nil here, but we will process the data and
|
||||
@@ -871,25 +856,18 @@ func (fh *decrypter) fillBuffer() (err error) {
|
||||
|
||||
// Check header + 1 byte exists
|
||||
if n <= blockHeaderSize {
|
||||
if err != nil && err != io.EOF {
|
||||
if err != nil {
|
||||
return err // return pending error as it is likely more accurate
|
||||
}
|
||||
return ErrorEncryptedFileBadHeader
|
||||
}
|
||||
// Decrypt the block using the nonce
|
||||
_, ok := secretbox.Open((*fh.buf)[:0], (*readBuf)[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
_, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
if !ok {
|
||||
if err != nil && err != io.EOF {
|
||||
if err != nil {
|
||||
return err // return pending error as it is likely more accurate
|
||||
}
|
||||
if !fh.c.passBadBlocks {
|
||||
return ErrorEncryptedBadBlock
|
||||
}
|
||||
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
|
||||
// Zero out the bad block and continue
|
||||
for i := range (*fh.buf)[:n] {
|
||||
(*fh.buf)[i] = 0
|
||||
}
|
||||
return ErrorEncryptedBadBlock
|
||||
}
|
||||
fh.bufIndex = 0
|
||||
fh.bufSize = n - blockHeaderSize
|
||||
@@ -915,7 +893,7 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
|
||||
if fh.limit >= 0 && fh.limit < int64(toCopy) {
|
||||
toCopy = int(fh.limit)
|
||||
}
|
||||
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufIndex+toCopy])
|
||||
n = copy(p, fh.buf[fh.bufIndex:fh.bufIndex+toCopy])
|
||||
fh.bufIndex += n
|
||||
if fh.limit >= 0 {
|
||||
fh.limit -= int64(n)
|
||||
@@ -926,8 +904,9 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// calculateUnderlying converts an (offset, limit) in an encrypted file
|
||||
// into an (underlyingOffset, underlyingLimit) for the underlying file.
|
||||
// calculateUnderlying converts an (offset, limit) in a crypted file
|
||||
// into an (underlyingOffset, underlyingLimit) for the underlying
|
||||
// file.
|
||||
//
|
||||
// It also returns number of bytes to discard after reading the first
|
||||
// block and number of blocks this is from the start so the nonce can
|
||||
|
||||
@@ -27,14 +27,14 @@ func TestNewNameEncryptionMode(t *testing.T) {
|
||||
{"off", NameEncryptionOff, ""},
|
||||
{"standard", NameEncryptionStandard, ""},
|
||||
{"obfuscate", NameEncryptionObfuscated, ""},
|
||||
{"potato", NameEncryptionOff, "unknown file name encryption mode \"potato\""},
|
||||
{"potato", NameEncryptionOff, "Unknown file name encryption mode \"potato\""},
|
||||
} {
|
||||
actual, actualErr := NewNameEncryptionMode(test.in)
|
||||
assert.Equal(t, actual, test.expected)
|
||||
if test.expectedErr == "" {
|
||||
assert.NoError(t, actualErr)
|
||||
} else {
|
||||
assert.EqualError(t, actualErr, test.expectedErr)
|
||||
assert.Error(t, actualErr, test.expectedErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -405,13 +405,6 @@ func TestNonStandardEncryptFileName(t *testing.T) {
|
||||
// Off mode
|
||||
c, _ := newCipher(NameEncryptionOff, "", "", true, nil)
|
||||
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
|
||||
// Off mode with custom suffix
|
||||
c, _ = newCipher(NameEncryptionOff, "", "", true, nil)
|
||||
c.setEncryptedSuffix(".jpg")
|
||||
assert.Equal(t, "1/12/123.jpg", c.EncryptFileName("1/12/123"))
|
||||
// Off mode with empty suffix
|
||||
c.setEncryptedSuffix("none")
|
||||
assert.Equal(t, "1/12/123", c.EncryptFileName("1/12/123"))
|
||||
// Obfuscation mode
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil)
|
||||
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
||||
@@ -490,27 +483,21 @@ func TestNonStandardDecryptFileName(t *testing.T) {
|
||||
in string
|
||||
expected string
|
||||
expectedErr error
|
||||
customSuffix string
|
||||
}{
|
||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil, ""},
|
||||
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile, ""},
|
||||
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile, ""},
|
||||
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil, ""},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil, ""},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil, ""},
|
||||
{NameEncryptionOff, true, "1/12/123.jpg", "1/12/123", nil, ".jpg"},
|
||||
{NameEncryptionOff, true, "1/12/123", "1/12/123", nil, "none"},
|
||||
{NameEncryptionObfuscated, true, "!.hello", "hello", nil, ""},
|
||||
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile, ""},
|
||||
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil, ""},
|
||||
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil, ""},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil, ""},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil, ""},
|
||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
|
||||
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
|
||||
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
|
||||
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
|
||||
if test.customSuffix != "" {
|
||||
c.setEncryptedSuffix(test.customSuffix)
|
||||
}
|
||||
actual, actualErr := c.DecryptFileName(test.in)
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
@@ -739,7 +726,7 @@ func TestNonceFromReader(t *testing.T) {
|
||||
assert.Equal(t, nonce{'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o'}, x)
|
||||
buf = bytes.NewBufferString("123456789abcdefghijklmn")
|
||||
err = x.fromReader(buf)
|
||||
assert.EqualError(t, err, "short read of nonce: EOF")
|
||||
assert.Error(t, err, "short read of nonce")
|
||||
}
|
||||
|
||||
func TestNonceFromBuf(t *testing.T) {
|
||||
@@ -1063,7 +1050,7 @@ func TestRandomSource(t *testing.T) {
|
||||
_, _ = source.Read(buf)
|
||||
sink = newRandomSource(1e8)
|
||||
_, err = io.Copy(sink, source)
|
||||
assert.EqualError(t, err, "Error in stream at 1")
|
||||
assert.Error(t, err, "Error in stream")
|
||||
}
|
||||
|
||||
type zeroes struct{}
|
||||
@@ -1180,13 +1167,13 @@ func TestNewEncrypter(t *testing.T) {
|
||||
fh, err := c.newEncrypter(z, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, nonce{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.nonce)
|
||||
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, (*fh.buf)[:32])
|
||||
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.buf[:32])
|
||||
|
||||
// Test error path
|
||||
c.cryptoRand = bytes.NewBufferString("123456789abcdefghijklmn")
|
||||
fh, err = c.newEncrypter(z, nil)
|
||||
assert.Nil(t, fh)
|
||||
assert.EqualError(t, err, "short read of nonce: EOF")
|
||||
assert.Error(t, err, "short read of nonce")
|
||||
}
|
||||
|
||||
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to
|
||||
@@ -1237,7 +1224,7 @@ func TestNewDecrypter(t *testing.T) {
|
||||
cd := newCloseDetector(bytes.NewBuffer(file0[:i]))
|
||||
fh, err = c.newDecrypter(cd)
|
||||
assert.Nil(t, fh)
|
||||
assert.EqualError(t, err, ErrorEncryptedFileTooShort.Error())
|
||||
assert.Error(t, err, ErrorEncryptedFileTooShort.Error())
|
||||
assert.Equal(t, 1, cd.closed)
|
||||
}
|
||||
|
||||
@@ -1245,7 +1232,7 @@ func TestNewDecrypter(t *testing.T) {
|
||||
cd = newCloseDetector(er)
|
||||
fh, err = c.newDecrypter(cd)
|
||||
assert.Nil(t, fh)
|
||||
assert.EqualError(t, err, "potato")
|
||||
assert.Error(t, err, "potato")
|
||||
assert.Equal(t, 1, cd.closed)
|
||||
|
||||
// bad magic
|
||||
@@ -1256,7 +1243,7 @@ func TestNewDecrypter(t *testing.T) {
|
||||
cd := newCloseDetector(bytes.NewBuffer(file0copy))
|
||||
fh, err := c.newDecrypter(cd)
|
||||
assert.Nil(t, fh)
|
||||
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error())
|
||||
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
|
||||
file0copy[i] ^= 0x1
|
||||
assert.Equal(t, 1, cd.closed)
|
||||
}
|
||||
@@ -1508,10 +1495,8 @@ func TestDecrypterRead(t *testing.T) {
|
||||
case i == fileHeaderSize:
|
||||
// This would normally produce an error *except* on the first block
|
||||
expectedErr = nil
|
||||
case i <= fileHeaderSize+blockHeaderSize:
|
||||
expectedErr = ErrorEncryptedFileBadHeader
|
||||
default:
|
||||
expectedErr = ErrorEncryptedBadBlock
|
||||
expectedErr = io.ErrUnexpectedEOF
|
||||
}
|
||||
if expectedErr != nil {
|
||||
assert.EqualError(t, err, expectedErr.Error(), what)
|
||||
@@ -1529,7 +1514,7 @@ func TestDecrypterRead(t *testing.T) {
|
||||
fh, err := c.newDecrypter(cd)
|
||||
assert.NoError(t, err)
|
||||
_, err = io.ReadAll(fh)
|
||||
assert.EqualError(t, err, "potato")
|
||||
assert.Error(t, err, "potato")
|
||||
assert.Equal(t, 0, cd.closed)
|
||||
|
||||
// Test corrupting the input
|
||||
@@ -1540,26 +1525,15 @@ func TestDecrypterRead(t *testing.T) {
|
||||
file16copy[i] ^= 0xFF
|
||||
fh, err := c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
|
||||
if i < fileMagicSize {
|
||||
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error())
|
||||
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
|
||||
assert.Nil(t, fh)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
_, err = io.ReadAll(fh)
|
||||
assert.EqualError(t, err, ErrorEncryptedBadBlock.Error())
|
||||
assert.Error(t, err, ErrorEncryptedFileBadHeader.Error())
|
||||
}
|
||||
file16copy[i] ^= 0xFF
|
||||
}
|
||||
|
||||
// Test that we can corrupt a byte and read zeroes if
|
||||
// passBadBlocks is set
|
||||
copy(file16copy, file16)
|
||||
file16copy[len(file16copy)-1] ^= 0xFF
|
||||
c.passBadBlocks = true
|
||||
fh, err = c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
|
||||
assert.NoError(t, err)
|
||||
buf, err := io.ReadAll(fh)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, make([]byte, 16), buf)
|
||||
}
|
||||
|
||||
func TestDecrypterClose(t *testing.T) {
|
||||
@@ -1580,7 +1554,7 @@ func TestDecrypterClose(t *testing.T) {
|
||||
|
||||
// double close
|
||||
err = fh.Close()
|
||||
assert.EqualError(t, err, ErrorFileClosed.Error())
|
||||
assert.Error(t, err, ErrorFileClosed.Error())
|
||||
assert.Equal(t, 1, cd.closed)
|
||||
|
||||
// try again reading the file this time
|
||||
@@ -1607,6 +1581,8 @@ func TestPutGetBlock(t *testing.T) {
|
||||
block := c.getBlock()
|
||||
c.putBlock(block)
|
||||
c.putBlock(block)
|
||||
|
||||
assert.Panics(t, func() { c.putBlock(block[:len(block)-1]) })
|
||||
}
|
||||
|
||||
func TestKey(t *testing.T) {
|
||||
|
||||
@@ -48,7 +48,7 @@ func init() {
|
||||
Help: "Very simple filename obfuscation.",
|
||||
}, {
|
||||
Value: "off",
|
||||
Help: "Don't encrypt the file names.\nAdds a \".bin\", or \"suffix\" extension only.",
|
||||
Help: "Don't encrypt the file names.\nAdds a \".bin\" extension only.",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
@@ -79,9 +79,7 @@ NB If filename_encryption is "off" then this option will do nothing.`,
|
||||
}, {
|
||||
Name: "server_side_across_configs",
|
||||
Default: false,
|
||||
Help: `Deprecated: use --server-side-across-configs instead.
|
||||
|
||||
Allow server-side operations (e.g. copy) to work across different crypt configs.
|
||||
Help: `Allow server-side operations (e.g. copy) to work across different crypt configs.
|
||||
|
||||
Normally this option is not what you want, but if you have two crypts
|
||||
pointing to the same backend you can use it.
|
||||
@@ -121,15 +119,6 @@ names, or for debugging purposes.`,
|
||||
Help: "Encrypt file data.",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "pass_bad_blocks",
|
||||
Help: `If set this will pass bad blocks through as all 0.
|
||||
|
||||
This should not be set in normal operation, it should only be set if
|
||||
trying to recover an encrypted file with errors and it is desired to
|
||||
recover as much of the file as possible.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "filename_encoding",
|
||||
Help: `How to encode the encrypted filename to text string.
|
||||
@@ -149,18 +138,10 @@ length and if it's case sensitive.`,
|
||||
},
|
||||
{
|
||||
Value: "base32768",
|
||||
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive, Dropbox)",
|
||||
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)",
|
||||
},
|
||||
},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "suffix",
|
||||
Help: `If this is set it will override the default suffix of ".bin".
|
||||
|
||||
Setting suffix to "none" will result in an empty suffix. This may be useful
|
||||
when the path length is critical.`,
|
||||
Default: ".bin",
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -193,8 +174,6 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make cipher: %w", err)
|
||||
}
|
||||
cipher.setEncryptedSuffix(opt.Suffix)
|
||||
cipher.setPassBadBlocks(opt.PassBadBlocks)
|
||||
return cipher, nil
|
||||
}
|
||||
|
||||
@@ -268,7 +247,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
return f, err
|
||||
@@ -284,9 +262,7 @@ type Options struct {
|
||||
Password2 string `config:"password2"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
ShowMapping bool `config:"show_mapping"`
|
||||
PassBadBlocks bool `config:"pass_bad_blocks"`
|
||||
FilenameEncoding string `config:"filename_encoding"`
|
||||
Suffix string `config:"suffix"`
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
@@ -478,7 +454,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||
}
|
||||
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
|
||||
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
|
||||
}
|
||||
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
||||
}
|
||||
|
||||
@@ -202,7 +202,7 @@ func init() {
|
||||
m.Set("root_folder_id", "appDataFolder")
|
||||
}
|
||||
|
||||
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" && !opt.EnvAuth {
|
||||
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" {
|
||||
return oauthutil.ConfigOut("teamdrive", &oauthutil.Options{
|
||||
OAuth2Config: driveConfig,
|
||||
})
|
||||
@@ -499,9 +499,7 @@ need to use --ignore size also.`,
|
||||
}, {
|
||||
Name: "server_side_across_configs",
|
||||
Default: false,
|
||||
Help: `Deprecated: use --server-side-across-configs instead.
|
||||
|
||||
Allow server-side operations (e.g. copy) to work across different drive configs.
|
||||
Help: `Allow server-side operations (e.g. copy) to work across different drive configs.
|
||||
|
||||
This can be useful if you wish to do a server-side copy between two
|
||||
different Google drives. Note that this isn't enabled by default
|
||||
@@ -600,18 +598,6 @@ resource key is no needed.
|
||||
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
|
||||
// Don't encode / as it's a valid name character in drive.
|
||||
Default: encoder.EncodeInvalidUtf8,
|
||||
}, {
|
||||
Name: "env_auth",
|
||||
Help: "Get IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "false",
|
||||
Help: "Enter credentials in the next step.",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
|
||||
@@ -668,7 +654,6 @@ type Options struct {
|
||||
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
|
||||
ResourceKey string `config:"resource_key"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
}
|
||||
|
||||
// Fs represents a remote drive server
|
||||
@@ -776,7 +761,7 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
|
||||
fs.Errorf(f, "Received download limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if f.opt.StopOnUploadLimit && (reason == "quotaExceeded" || reason == "storageQuotaExceeded") {
|
||||
} else if f.opt.StopOnUploadLimit && reason == "quotaExceeded" {
|
||||
fs.Errorf(f, "Received upload limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
|
||||
@@ -1137,12 +1122,6 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create oauth client from service account: %w", err)
|
||||
}
|
||||
} else if opt.EnvAuth {
|
||||
scopes := driveScopes(opt.Scope)
|
||||
oAuthClient, err = google.DefaultClient(ctx, scopes...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create client from environment: %w", err)
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt))
|
||||
if err != nil {
|
||||
@@ -1514,9 +1493,6 @@ func (f *Fs) newObjectWithExportInfo(
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
info, extension, exportName, exportMimeType, isDocument, err := f.getRemoteInfoWithExport(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -2904,7 +2880,6 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
if f.rootFolderID == "appDataFolder" {
|
||||
changesCall.Spaces("appDataFolder")
|
||||
}
|
||||
changesCall.RestrictToMyDrive(!f.opt.SharedWithMe)
|
||||
changeList, err = changesCall.Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -3886,7 +3861,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newO, err := o.fs.newObjectWithInfo(ctx, o.remote, info)
|
||||
newO, err := o.fs.newObjectWithInfo(ctx, src.Remote(), info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -243,15 +243,6 @@ func (f *Fs) InternalTestShouldRetry(t *testing.T) {
|
||||
quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, quotaExceededRetry)
|
||||
assert.Equal(t, quotaExceededError, expectedQuotaError)
|
||||
|
||||
sqEItem := googleapi.ErrorItem{
|
||||
Reason: "storageQuotaExceeded",
|
||||
}
|
||||
generic403.Errors[0] = sqEItem
|
||||
expectedStorageQuotaError := fserrors.FatalError(&generic403)
|
||||
storageQuotaExceededRetry, storageQuotaExceededError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, storageQuotaExceededRetry)
|
||||
assert.Equal(t, storageQuotaExceededError, expectedStorageQuotaError)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
@@ -139,12 +140,55 @@ func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionF
|
||||
return complete, nil
|
||||
}
|
||||
|
||||
// finishBatchJobStatus waits for the batch to complete returning completed entries
|
||||
func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
|
||||
if launchBatchStatus.AsyncJobId == "" {
|
||||
return nil, errors.New("wait for batch completion: empty job ID")
|
||||
}
|
||||
var batchStatus *files.UploadSessionFinishBatchJobStatus
|
||||
sleepTime := 100 * time.Millisecond
|
||||
const maxSleepTime = 1 * time.Second
|
||||
startTime := time.Now()
|
||||
try := 1
|
||||
for {
|
||||
remaining := time.Duration(b.f.opt.BatchCommitTimeout) - time.Since(startTime)
|
||||
if remaining < 0 {
|
||||
break
|
||||
}
|
||||
err = b.f.pacer.Call(func() (bool, error) {
|
||||
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
|
||||
AsyncJobId: launchBatchStatus.AsyncJobId,
|
||||
})
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d remaining %v", sleepTime, err, try, remaining)
|
||||
} else {
|
||||
if batchStatus.Tag == "complete" {
|
||||
fs.Debugf(b.f, "Upload batch completed in %v", time.Since(startTime))
|
||||
return batchStatus.Complete, nil
|
||||
}
|
||||
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d remaining %v", sleepTime, batchStatus.Tag, try, remaining)
|
||||
}
|
||||
time.Sleep(sleepTime)
|
||||
sleepTime *= 2
|
||||
if sleepTime > maxSleepTime {
|
||||
sleepTime = maxSleepTime
|
||||
}
|
||||
try++
|
||||
}
|
||||
if err == nil {
|
||||
err = errors.New("batch didn't complete")
|
||||
}
|
||||
return nil, fmt.Errorf("wait for batch failed after %d tries in %v: %w", try, time.Since(startTime), err)
|
||||
}
|
||||
|
||||
// commit a batch
|
||||
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
|
||||
// If commit fails then signal clients if sync
|
||||
var signalled = b.async
|
||||
defer func() {
|
||||
if err != nil && !signalled {
|
||||
if err != nil && signalled {
|
||||
// Signal to clients that there was an error
|
||||
for _, result := range results {
|
||||
result <- batcherResponse{err: err}
|
||||
|
||||
@@ -58,7 +58,7 @@ import (
|
||||
const (
|
||||
rcloneClientID = "5jcck7diasz0rqy"
|
||||
rcloneEncryptedClientSecret = "fRS5vVLr2v6FbyXYnIgjwBuUAt0osq_QZTXAEcmZ7g"
|
||||
defaultMinSleep = fs.Duration(10 * time.Millisecond)
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
// Upload chunk size - setting too small makes uploads slow.
|
||||
@@ -260,8 +260,8 @@ uploaded.
|
||||
The default for this is 0 which means rclone will choose a sensible
|
||||
default based on the batch_mode in use.
|
||||
|
||||
- batch_mode: async - default batch_timeout is 10s
|
||||
- batch_mode: sync - default batch_timeout is 500ms
|
||||
- batch_mode: async - default batch_timeout is 500ms
|
||||
- batch_mode: sync - default batch_timeout is 10s
|
||||
- batch_mode: off - not in use
|
||||
`,
|
||||
Default: fs.Duration(0),
|
||||
@@ -271,11 +271,6 @@ default based on the batch_mode in use.
|
||||
Help: `Max time to wait for a batch to finish committing`,
|
||||
Default: fs.Duration(10 * time.Minute),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "pacer_min_sleep",
|
||||
Default: defaultMinSleep,
|
||||
Help: "Minimum time to sleep between API calls.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -304,7 +299,6 @@ type Options struct {
|
||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||
BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"`
|
||||
AsyncBatch bool `config:"async_batch"`
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -448,7 +442,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
name: name,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
|
||||
if err != nil {
|
||||
@@ -542,7 +536,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
// if the mount failed we have to abort here
|
||||
// if the moint failed we have to abort here
|
||||
}
|
||||
// if the mount succeeded it's now a normal folder in the users root namespace
|
||||
// we disable shared folder mode and proceed normally
|
||||
@@ -725,7 +719,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
leaf := f.opt.Enc.ToStandardName(entry.Name)
|
||||
d := fs.NewDir(leaf, time.Time{}).SetID(entry.SharedFolderId)
|
||||
d := fs.NewDir(leaf, time.Now()).SetID(entry.SharedFolderId)
|
||||
entries = append(entries, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -912,7 +906,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
|
||||
remote := path.Join(dir, leaf)
|
||||
if folderInfo != nil {
|
||||
d := fs.NewDir(remote, time.Time{}).SetID(folderInfo.Id)
|
||||
d := fs.NewDir(remote, time.Now()).SetID(folderInfo.Id)
|
||||
entries = append(entries, d)
|
||||
} else if fileInfo != nil {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, fileInfo)
|
||||
|
||||
@@ -118,9 +118,6 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
|
||||
Single: 1,
|
||||
Pass: f.opt.FilePassword,
|
||||
}
|
||||
if f.opt.CDN {
|
||||
request.CDN = 1
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/download/get_token.cgi",
|
||||
@@ -476,7 +473,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("didn't get an upload node: %w", err)
|
||||
return nil, fmt.Errorf("didnt got an upload node: %w", err)
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Got Upload node")
|
||||
|
||||
@@ -54,11 +54,6 @@ func init() {
|
||||
Name: "folder_password",
|
||||
Advanced: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Help: "Set if you wish to use CDN download links.",
|
||||
Name: "cdn",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -94,7 +89,6 @@ type Options struct {
|
||||
SharedFolder string `config:"shared_folder"`
|
||||
FilePassword string `config:"file_password"`
|
||||
FolderPassword string `config:"folder_password"`
|
||||
CDN bool `config:"cdn"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -339,7 +333,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if size > int64(300e9) {
|
||||
return nil, errors.New("File too big, can't upload")
|
||||
return nil, errors.New("File too big, cant upload")
|
||||
} else if size == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
@@ -20,7 +20,6 @@ type DownloadRequest struct {
|
||||
URL string `json:"url"`
|
||||
Single int `json:"single"`
|
||||
Pass string `json:"pass,omitempty"`
|
||||
CDN int `json:"cdn,omitempty"`
|
||||
}
|
||||
|
||||
// RemoveFolderRequest is the request structure of the corresponding request
|
||||
|
||||
@@ -315,33 +315,18 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Return a *textproto.Error if err contains one or nil otherwise
|
||||
func textprotoError(err error) (errX *textproto.Error) {
|
||||
if errors.As(err, &errX) {
|
||||
return errX
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// returns true if this FTP error should be retried
|
||||
func isRetriableFtpError(err error) bool {
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusNotAvailable, ftp.StatusTransfertAborted:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserve to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
if isRetriableFtpError(err) {
|
||||
return true, err
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusNotAvailable:
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
@@ -478,7 +463,8 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
*pc = nil
|
||||
if err != nil {
|
||||
// If not a regular FTP error code then check the connection
|
||||
if tpErr := textprotoError(err); tpErr != nil {
|
||||
var tpErr *textproto.Error
|
||||
if !errors.As(err, &tpErr) {
|
||||
nopErr := c.NoOp()
|
||||
if nopErr != nil {
|
||||
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
|
||||
@@ -580,7 +566,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
// set the pool drainer timer going
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
@@ -628,7 +613,8 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
|
||||
// translateErrorFile turns FTP errors into rclone errors if possible for a file
|
||||
func translateErrorFile(err error) error {
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
||||
err = fs.ErrorObjectNotFound
|
||||
@@ -639,7 +625,8 @@ func translateErrorFile(err error) error {
|
||||
|
||||
// translateErrorDir turns FTP errors into rclone errors if possible for a directory
|
||||
func translateErrorDir(err error) error {
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
||||
err = fs.ErrorDirNotFound
|
||||
@@ -693,12 +680,6 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusBadArguments:
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if entry != nil {
|
||||
@@ -936,7 +917,8 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
|
||||
}
|
||||
err = c.MakeDir(f.dirFromStandardPath(abspath))
|
||||
f.putFtpConnection(&c, err)
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
|
||||
err = nil
|
||||
@@ -1105,7 +1087,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
// SetModTime sets the modification time of the object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
if !o.fs.fSetTime {
|
||||
fs.Debugf(o.fs, "SetModTime is not supported")
|
||||
fs.Errorf(o.fs, "SetModTime is not supported")
|
||||
return nil
|
||||
}
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
@@ -1177,7 +1159,8 @@ func (f *ftpReadCloser) Close() error {
|
||||
// mask the error if it was caused by a premature close
|
||||
// NB StatusAboutToSend is to work around a bug in pureftpd
|
||||
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
|
||||
err = nil
|
||||
@@ -1203,26 +1186,15 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
fd *ftp.Response
|
||||
c *ftp.ServerConn
|
||||
)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
c, err = o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return false, err // getFtpConnection has retries already
|
||||
}
|
||||
fd, err = c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
|
||||
if err != nil {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
}
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open: %w", err)
|
||||
}
|
||||
|
||||
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
|
||||
if err != nil {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
return nil, fmt.Errorf("open: %w", err)
|
||||
}
|
||||
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
|
||||
return rc, nil
|
||||
}
|
||||
@@ -1255,10 +1227,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
|
||||
// Ignore error 250 here - send by some servers
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusRequestedFileActionOK:
|
||||
err = nil
|
||||
if err != nil {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusRequestedFileActionOK:
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
|
||||
@@ -82,8 +82,7 @@ func init() {
|
||||
saFile, _ := m.Get("service_account_file")
|
||||
saCreds, _ := m.Get("service_account_credentials")
|
||||
anonymous, _ := m.Get("anonymous")
|
||||
envAuth, _ := m.Get("env_auth")
|
||||
if saFile != "" || saCreds != "" || anonymous == "true" || envAuth == "true" {
|
||||
if saFile != "" || saCreds != "" || anonymous == "true" {
|
||||
return nil, nil
|
||||
}
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
@@ -93,9 +92,6 @@ func init() {
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "project_number",
|
||||
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||
}, {
|
||||
Name: "user_project",
|
||||
Help: "User project.\n\nOptional - needed only for requester pays.",
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
@@ -301,15 +297,6 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
Value: "DURABLE_REDUCED_AVAILABILITY",
|
||||
Help: "Durable reduced availability storage class",
|
||||
}},
|
||||
}, {
|
||||
Name: "directory_markers",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Help: `Upload an empty object with a trailing slash when a new directory is created
|
||||
|
||||
Empty folders are unsupported for bucket based remotes, this option creates an empty
|
||||
object ending with "/", to persist the folder.
|
||||
`,
|
||||
}, {
|
||||
Name: "no_check_bucket",
|
||||
Help: `If set, don't attempt to check the bucket exists or create it.
|
||||
@@ -343,17 +330,6 @@ can't check the size and hash but the file contents will be decompressed.
|
||||
Default: (encoder.Base |
|
||||
encoder.EncodeCrLf |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}, {
|
||||
Name: "env_auth",
|
||||
Help: "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
|
||||
Default: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "false",
|
||||
Help: "Enter credentials in the next step.",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
@@ -361,7 +337,6 @@ can't check the size and hash but the file contents will be decompressed.
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ProjectNumber string `config:"project_number"`
|
||||
UserProject string `config:"user_project"`
|
||||
ServiceAccountFile string `config:"service_account_file"`
|
||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||
Anonymous bool `config:"anonymous"`
|
||||
@@ -374,8 +349,6 @@ type Options struct {
|
||||
Decompress bool `config:"decompress"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
DirectoryMarkers bool `config:"directory_markers"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
@@ -471,7 +444,7 @@ func parsePath(path string) (root string) {
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
@@ -527,11 +500,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err)
|
||||
}
|
||||
} else if opt.EnvAuth {
|
||||
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
||||
if err != nil {
|
||||
@@ -557,9 +525,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
}).Fill(ctx, f)
|
||||
if opt.DirectoryMarkers {
|
||||
f.features.CanHaveEmptyDirectories = true
|
||||
}
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
f.client = oAuthClient
|
||||
@@ -576,11 +541,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// Check to see if the object exists
|
||||
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
get := f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
get = get.UserProject(f.opt.UserProject)
|
||||
}
|
||||
_, err = get.Do()
|
||||
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -640,13 +601,9 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
directory += "/"
|
||||
}
|
||||
list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
|
||||
if f.opt.UserProject != "" {
|
||||
list = list.UserProject(f.opt.UserProject)
|
||||
}
|
||||
if !recurse {
|
||||
list = list.Delimiter("/")
|
||||
}
|
||||
foundItems := 0
|
||||
for {
|
||||
var objects *storage.Objects
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -662,7 +619,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
return err
|
||||
}
|
||||
if !recurse {
|
||||
foundItems += len(objects.Prefixes)
|
||||
var object storage.Object
|
||||
for _, remote := range objects.Prefixes {
|
||||
if !strings.HasSuffix(remote, "/") {
|
||||
@@ -683,29 +639,22 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
}
|
||||
}
|
||||
foundItems += len(objects.Items)
|
||||
for _, object := range objects.Items {
|
||||
remote := f.opt.Enc.ToStandardPath(object.Name)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", object.Name)
|
||||
continue
|
||||
}
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
// is this a directory marker?
|
||||
if isDirectory {
|
||||
// Don't insert the root directory
|
||||
if remote == directory {
|
||||
continue
|
||||
}
|
||||
// process directory markers as directories
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
|
||||
err = fn(remote, object, isDirectory)
|
||||
// is this a directory marker?
|
||||
if isDirectory {
|
||||
continue // skip directory marker
|
||||
}
|
||||
err = fn(remote, object, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -715,17 +664,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
list.PageToken(objects.NextPageToken)
|
||||
}
|
||||
if f.opt.DirectoryMarkers && foundItems == 0 && directory != "" {
|
||||
// Determine whether the directory exists or not by whether it has a marker
|
||||
_, err := f.readObjectInfo(ctx, bucket, directory)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -769,9 +707,6 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
return nil, errors.New("can't list buckets without project number")
|
||||
}
|
||||
listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks)
|
||||
if f.opt.UserProject != "" {
|
||||
listBuckets = listBuckets.UserProject(f.opt.UserProject)
|
||||
}
|
||||
for {
|
||||
var buckets *storage.Buckets
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -889,69 +824,10 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Create directory marker file and parents
|
||||
func (f *Fs) createDirectoryMarker(ctx context.Context, bucket, dir string) error {
|
||||
if !f.opt.DirectoryMarkers || bucket == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Object to be uploaded
|
||||
o := &Object{
|
||||
fs: f,
|
||||
modTime: time.Now(),
|
||||
}
|
||||
|
||||
for {
|
||||
_, bucketPath := f.split(dir)
|
||||
// Don't create the directory marker if it is the bucket or at the very root
|
||||
if bucketPath == "" {
|
||||
break
|
||||
}
|
||||
o.remote = dir + "/"
|
||||
|
||||
// Check to see if object already exists
|
||||
_, err := o.readObjectInfo(ctx)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Upload it if not
|
||||
fs.Debugf(o, "Creating directory marker")
|
||||
content := io.Reader(strings.NewReader(""))
|
||||
err = o.Update(ctx, content, o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating directory marker failed: %w", err)
|
||||
}
|
||||
|
||||
// Now check parent directory exists
|
||||
dir = path.Dir(dir)
|
||||
if dir == "/" || dir == "." {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
bucket, _ := f.split(dir)
|
||||
e := f.checkBucket(ctx, bucket)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
return f.createDirectoryMarker(ctx, bucket, dir)
|
||||
|
||||
}
|
||||
|
||||
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
||||
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
dir := path.Dir(remote)
|
||||
if dir == "/" || dir == "." {
|
||||
dir = ""
|
||||
}
|
||||
return f.Mkdir(ctx, dir)
|
||||
return f.makeBucket(ctx, bucket)
|
||||
}
|
||||
|
||||
// makeBucket creates the bucket if it doesn't exist
|
||||
@@ -960,11 +836,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
// List something from the bucket to see if it exists. Doing it like this enables the use of a
|
||||
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
list := f.svc.Objects.List(bucket).MaxResults(1).Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
list = list.UserProject(f.opt.UserProject)
|
||||
}
|
||||
_, err = list.Do()
|
||||
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -999,11 +871,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
||||
}
|
||||
insertBucket = insertBucket.Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
insertBucket = insertBucket.UserProject(f.opt.UserProject)
|
||||
}
|
||||
_, err = insertBucket.Do()
|
||||
_, err = insertBucket.Context(ctx).Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}, nil)
|
||||
@@ -1023,28 +891,12 @@ func (f *Fs) checkBucket(ctx context.Context, bucket string) error {
|
||||
// to delete was not empty.
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
bucket, directory := f.split(dir)
|
||||
// Remove directory marker file
|
||||
if f.opt.DirectoryMarkers && bucket != "" && dir != "" {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: dir + "/",
|
||||
}
|
||||
fs.Debugf(o, "Removing directory marker")
|
||||
err := o.Remove(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("removing directory marker failed: %w", err)
|
||||
}
|
||||
}
|
||||
if bucket == "" || directory != "" {
|
||||
return nil
|
||||
}
|
||||
return f.cache.Remove(bucket, func() error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
deleteBucket := f.svc.Buckets.Delete(bucket).Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
deleteBucket = deleteBucket.UserProject(f.opt.UserProject)
|
||||
}
|
||||
err = deleteBucket.Do()
|
||||
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
})
|
||||
@@ -1066,7 +918,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
dstBucket, dstPath := f.split(remote)
|
||||
err := f.mkdirParent(ctx, remote)
|
||||
err := f.checkBucket(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1090,11 +942,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var rewriteResponse *storage.RewriteResponse
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
rewriteRequest = rewriteRequest.Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
rewriteRequest.UserProject(f.opt.UserProject)
|
||||
}
|
||||
rewriteResponse, err = rewriteRequest.Do()
|
||||
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1204,17 +1052,8 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
// readObjectInfo reads the definition for an object
|
||||
func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
return o.fs.readObjectInfo(ctx, bucket, bucketPath)
|
||||
}
|
||||
|
||||
// readObjectInfo reads the definition for an object
|
||||
func (f *Fs) readObjectInfo(ctx context.Context, bucket, bucketPath string) (object *storage.Object, err error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
get := f.svc.Objects.Get(bucket, bucketPath).Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
get = get.UserProject(f.opt.UserProject)
|
||||
}
|
||||
object, err = get.Do()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1286,11 +1125,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
copyObject = copyObject.Context(ctx)
|
||||
if o.fs.opt.UserProject != "" {
|
||||
copyObject = copyObject.UserProject(o.fs.opt.UserProject)
|
||||
}
|
||||
newObject, err = copyObject.Do()
|
||||
newObject, err = copyObject.Context(ctx).Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1307,9 +1142,6 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if o.fs.opt.UserProject != "" {
|
||||
o.url = o.url + "&userProject=" + o.fs.opt.UserProject
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1353,14 +1185,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
bucket, bucketPath := o.split()
|
||||
// Create parent dir/bucket if not saving directory marker
|
||||
if !strings.HasSuffix(o.remote, "/") {
|
||||
err = o.fs.mkdirParent(ctx, o.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err := o.fs.checkBucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
@@ -1405,11 +1234,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
insertObject = insertObject.Context(ctx)
|
||||
if o.fs.opt.UserProject != "" {
|
||||
insertObject = insertObject.UserProject(o.fs.opt.UserProject)
|
||||
}
|
||||
newObject, err = insertObject.Do()
|
||||
newObject, err = insertObject.Context(ctx).Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1424,11 +1249,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
deleteBucket := o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx)
|
||||
if o.fs.opt.UserProject != "" {
|
||||
deleteBucket = deleteBucket.UserProject(o.fs.opt.UserProject)
|
||||
}
|
||||
err = deleteBucket.Do()
|
||||
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return err
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/googlecloudstorage"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -17,17 +16,3 @@ func TestIntegration(t *testing.T) {
|
||||
NilObject: (*googlecloudstorage.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func TestIntegration2(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
name := "TestGoogleCloudStorage"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*googlecloudstorage.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "directory_markers", Value: "true"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -166,7 +166,6 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
}
|
||||
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
|
||||
|
||||
|
||||
@@ -42,9 +42,9 @@ for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
|
||||
Help: `Kerberos data transfer protection: authentication|integrity|privacy.
|
||||
|
||||
Specifies whether or not authentication, data signature integrity
|
||||
checks, and wire encryption are required when communicating with
|
||||
the datanodes. Possible values are 'authentication', 'integrity'
|
||||
and 'privacy'. Used only with KERBEROS enabled.`,
|
||||
checks, and wire encryption is required when communicating the the
|
||||
datanodes. Possible values are 'authentication', 'integrity' and
|
||||
'privacy'. Used only with KERBEROS enabled.`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "privacy",
|
||||
Help: "Ensure authentication, integrity and encryption enabled.",
|
||||
|
||||
@@ -294,6 +294,15 @@ func (f *Fs) copyOrMove(ctx context.Context, isDirectory bool, operationType Cop
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// copyDirectory moves the directory at the source-path to the destination-path and
|
||||
// returns the resulting api-object if successful.
|
||||
//
|
||||
// The operation will only be successful
|
||||
// if the parent-directory of the destination-path exists.
|
||||
func (f *Fs) copyDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
return f.copyOrMove(ctx, true, CopyOriginalPreserveModTime, source, destination, onExist)
|
||||
}
|
||||
|
||||
// moveDirectory moves the directory at the source-path to the destination-path and
|
||||
// returns the resulting api-object if successful.
|
||||
//
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
package hidrive
|
||||
|
||||
// FIXME HiDrive only supports file or folder names of 255 characters or less.
|
||||
// Operations that create files or folders with longer names will throw an HTTP error:
|
||||
// Operations that create files oder folder with longer names will throw a HTTP error:
|
||||
// - 422 Unprocessable Entity
|
||||
// A more graceful way for rclone to handle this may be desirable.
|
||||
|
||||
@@ -338,7 +338,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, fmt.Errorf("could not access root-prefix: %w", err)
|
||||
}
|
||||
if item.Type != api.HiDriveObjectTypeDirectory {
|
||||
return nil, errors.New("the root-prefix needs to point to a valid directory or be empty")
|
||||
return nil, errors.New("The root-prefix needs to point to a valid directory or be empty")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -495,7 +495,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
add(file)
|
||||
case fs.ErrorNotAFile:
|
||||
// ...found a directory not a file
|
||||
add(fs.NewDir(remote, time.Time{}))
|
||||
add(fs.NewDir(remote, timeUnset))
|
||||
default:
|
||||
fs.Debugf(remote, "skipping because of error: %v", err)
|
||||
}
|
||||
@@ -507,7 +507,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
name = strings.TrimRight(name, "/")
|
||||
remote := path.Join(dir, name)
|
||||
if isDir {
|
||||
add(fs.NewDir(remote, time.Time{}))
|
||||
add(fs.NewDir(remote, timeUnset))
|
||||
} else {
|
||||
in <- remote
|
||||
}
|
||||
|
||||
@@ -1838,12 +1838,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err == nil {
|
||||
// if the object exists delete it
|
||||
err = o.remove(ctx, true)
|
||||
if err != nil && err != fs.ErrorObjectNotFound {
|
||||
// if delete failed then report that, unless it was because the file did not exist after all
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove old object: %w", err)
|
||||
}
|
||||
} else if err != fs.ErrorObjectNotFound {
|
||||
// if the object does not exist we can just continue but if the error is something different we should report that
|
||||
}
|
||||
// if the object does not exist we can just continue but if the error is something different we should report that
|
||||
if err != fs.ErrorObjectNotFound {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -1930,7 +1930,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
o.md5 = result.Md5
|
||||
o.modTime = time.Unix(result.Modified/1000, 0)
|
||||
} else {
|
||||
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still need to update our metadata
|
||||
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still ned to update our metadata
|
||||
return o.readMetaData(ctx, true)
|
||||
}
|
||||
|
||||
@@ -1951,17 +1951,10 @@ func (o *Object) remove(ctx context.Context, hard bool) error {
|
||||
opts.Parameters.Set("dl", "true")
|
||||
}
|
||||
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.jfsSrv.CallXML(ctx, &opts, nil, nil)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// attempting to hard delete will fail if path does not exist, but standard delete will succeed
|
||||
if apiErr.StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
|
||||
@@ -376,7 +376,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
for i, file := range files {
|
||||
remote := path.Join(dir, f.opt.Enc.ToStandardName(file.Name))
|
||||
if file.Type == "dir" {
|
||||
entries[i] = fs.NewDir(remote, time.Time{})
|
||||
entries[i] = fs.NewDir(remote, time.Unix(0, 0))
|
||||
} else {
|
||||
entries[i] = &Object{
|
||||
fs: f,
|
||||
|
||||
@@ -266,10 +266,7 @@ type Object struct {
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
var (
|
||||
errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
||||
errLinksNeedsSuffix = errors.New("need \"" + linkSuffix + "\" suffix to refer to symlink when using -l/--links")
|
||||
)
|
||||
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
@@ -303,7 +300,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
WriteMetadata: true,
|
||||
UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
|
||||
FilterAware: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
if opt.FollowSymlinks {
|
||||
f.lstat = os.Stat
|
||||
@@ -314,16 +310,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err == nil {
|
||||
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
||||
}
|
||||
// Check to see if this is a .rclonelink if not found
|
||||
hasLinkSuffix := strings.HasSuffix(f.root, linkSuffix)
|
||||
if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) {
|
||||
fi, err = f.lstat(strings.TrimSuffix(f.root, linkSuffix))
|
||||
}
|
||||
if err == nil && f.isRegular(fi.Mode()) {
|
||||
// Handle the odd case, that a symlink was specified by name without the link suffix
|
||||
if !hasLinkSuffix && opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
return nil, errLinksNeedsSuffix
|
||||
}
|
||||
// It is a file, so use the parent as the root
|
||||
f.root = filepath.Dir(f.root)
|
||||
// return an error with an fs which points to the parent
|
||||
@@ -537,10 +524,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
localPath := filepath.Join(fsDirPath, name)
|
||||
fi, err = os.Stat(localPath)
|
||||
// Quietly skip errors on excluded files and directories
|
||||
if err != nil && useFilter && !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
if os.IsNotExist(err) || isCircularSymlinkError(err) {
|
||||
// Skip bad symlinks and circular symlinks
|
||||
err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err))
|
||||
@@ -553,6 +536,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
mode = fi.Mode()
|
||||
}
|
||||
// Don't include non directory if not included
|
||||
// we leave directory filtering to the layer above
|
||||
if useFilter && !fi.IsDir() && !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
if fi.IsDir() {
|
||||
// Ignore directories which are symlinks. These are junction points under windows which
|
||||
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
|
||||
@@ -565,11 +553,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
newRemote += linkSuffix
|
||||
}
|
||||
// Don't include non directory if not included
|
||||
// we leave directory filtering to the layer above
|
||||
if useFilter && !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
fso, err := f.newObjectWithInfo(newRemote, fi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -146,20 +145,6 @@ func TestSymlink(t *testing.T) {
|
||||
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")
|
||||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
|
||||
// Check that NewFs works with the suffixed version and --links
|
||||
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+linkSuffix), configmap.Simple{
|
||||
"links": "true",
|
||||
})
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
require.Equal(t, dir, f2.(*Fs).root)
|
||||
|
||||
// Check that NewFs doesn't see the non suffixed version with --links
|
||||
f2, err = NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"), configmap.Simple{
|
||||
"links": "true",
|
||||
})
|
||||
require.Equal(t, errLinksNeedsSuffix, err)
|
||||
require.Nil(t, f2)
|
||||
|
||||
// Check reading the object
|
||||
in, err := o.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -410,107 +395,3 @@ func TestFilter(t *testing.T) {
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
func testFilterSymlink(t *testing.T, copyLinks bool) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
when := time.Now()
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Create a file, a directory, a symlink to a file, a symlink to a directory and a dangling symlink
|
||||
r.WriteFile("included.file", "included file", when)
|
||||
r.WriteFile("included.dir/included.sub.file", "included sub file", when)
|
||||
require.NoError(t, os.Symlink("included.file", filepath.Join(r.LocalName, "included.file.link")))
|
||||
require.NoError(t, os.Symlink("included.dir", filepath.Join(r.LocalName, "included.dir.link")))
|
||||
require.NoError(t, os.Symlink("dangling", filepath.Join(r.LocalName, "dangling.link")))
|
||||
|
||||
defer func() {
|
||||
// Reset -L/-l mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Lstat
|
||||
}()
|
||||
if copyLinks {
|
||||
// Set fs into "-L" mode
|
||||
f.opt.FollowSymlinks = true
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Stat
|
||||
} else {
|
||||
// Set fs into "-l" mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = true
|
||||
f.lstat = os.Lstat
|
||||
}
|
||||
|
||||
// Check set up for filtering
|
||||
assert.True(t, f.Features().FilterAware)
|
||||
|
||||
// Reset global error count
|
||||
accounting.Stats(ctx).ResetErrors()
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
|
||||
// Add a filter
|
||||
ctx, fi := filter.AddConfig(ctx)
|
||||
require.NoError(t, fi.AddRule("+ included.file"))
|
||||
require.NoError(t, fi.AddRule("+ included.dir/**"))
|
||||
if copyLinks {
|
||||
require.NoError(t, fi.AddRule("+ included.file.link"))
|
||||
require.NoError(t, fi.AddRule("+ included.dir.link/**"))
|
||||
} else {
|
||||
require.NoError(t, fi.AddRule("+ included.file.link.rclonelink"))
|
||||
require.NoError(t, fi.AddRule("+ included.dir.link.rclonelink"))
|
||||
}
|
||||
require.NoError(t, fi.AddRule("- *"))
|
||||
|
||||
// Check listing without use filter flag
|
||||
entries, err := f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
if copyLinks {
|
||||
// Check 1 global errors one for each dangling symlink
|
||||
assert.Equal(t, int64(1), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
} else {
|
||||
// Check 0 global errors as dangling symlink copied properly
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
}
|
||||
accounting.Stats(ctx).ResetErrors()
|
||||
|
||||
sort.Sort(entries)
|
||||
if copyLinks {
|
||||
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
|
||||
} else {
|
||||
require.Equal(t, "[dangling.link.rclonelink included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
// Add user filter flag
|
||||
ctx = filter.SetUseFilter(ctx, true)
|
||||
|
||||
// Check listing with use filter flag
|
||||
entries, err = f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
|
||||
sort.Sort(entries)
|
||||
if copyLinks {
|
||||
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
|
||||
} else {
|
||||
require.Equal(t, "[included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
// Check listing through a symlink still works
|
||||
entries, err = f.List(ctx, "included.dir")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included.dir/included.sub.file]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
func TestFilterSymlinkCopyLinks(t *testing.T) {
|
||||
testFilterSymlink(t, true)
|
||||
}
|
||||
|
||||
func TestFilterSymlinkLinks(t *testing.T) {
|
||||
testFilterSymlink(t, false)
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -24,7 +23,7 @@ func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
|
||||
// Check statx() is available as it was only introduced in kernel 4.11
|
||||
// If not, fall back to fstatat() which was introduced in 2.6.16 which is guaranteed for all Go versions
|
||||
var stat unix.Statx_t
|
||||
if runtime.GOOS != "android" && unix.Statx(unix.AT_FDCWD, ".", 0, unix.STATX_ALL, &stat) != unix.ENOSYS {
|
||||
if unix.Statx(unix.AT_FDCWD, ".", 0, unix.STATX_ALL, &stat) != unix.ENOSYS {
|
||||
readMetadataFromFileFn = readMetadataFromFileStatx
|
||||
} else {
|
||||
readMetadataFromFileFn = readMetadataFromFileFstatat
|
||||
@@ -92,7 +91,7 @@ func readMetadataFromFileFstatat(o *Object, m *fs.Metadata) (err error) {
|
||||
// The types of t.Sec and t.Nsec vary from int32 to int64 on
|
||||
// different Linux architectures so we need to cast them to
|
||||
// int64 here and hence need to quiet the linter about
|
||||
// unnecessary casts.
|
||||
// unecessary casts.
|
||||
//
|
||||
// nolint: unconvert
|
||||
m.Set(key, time.Unix(int64(t.Sec), int64(t.Nsec)).Format(metadataTimeFormat))
|
||||
|
||||
@@ -90,7 +90,7 @@ permanently delete objects instead.`,
|
||||
MEGA uses plain text HTTP connections by default.
|
||||
Some ISPs throttle HTTP connections, this causes transfers to become very slow.
|
||||
Enabling this will force MEGA to use HTTPS for all transfers.
|
||||
HTTPS is normally not necessary since all data is already encrypted anyway.
|
||||
HTTPS is normally not necesary since all data is already encrypted anyway.
|
||||
Enabling it will increase CPU usage and add network overhead.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
|
||||
@@ -819,8 +819,6 @@ func (f *Fs) getAuth(req *http.Request) error {
|
||||
// Set Authorization header
|
||||
dataHeader := generateDataHeader(f)
|
||||
path := req.URL.RequestURI()
|
||||
//lint:ignore SA1008 false positive when running staticcheck, the header name is according to docs even if not canonical
|
||||
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1008
|
||||
actionHeader := req.Header["X-Akamai-ACS-Action"][0]
|
||||
fs.Debugf(nil, "NetStorage API %s call %s for path %q", req.Method, actionHeader, path)
|
||||
req.Header.Set("X-Akamai-ACS-Auth-Data", dataHeader)
|
||||
|
||||
@@ -196,9 +196,7 @@ listing, set this option.`,
|
||||
}, {
|
||||
Name: "server_side_across_configs",
|
||||
Default: false,
|
||||
Help: `Deprecated: use --server-side-across-configs instead.
|
||||
|
||||
Allow server-side operations (e.g. copy) to work across different onedrive configs.
|
||||
Help: `Allow server-side operations (e.g. copy) to work across different onedrive configs.
|
||||
|
||||
This will only work if you are copying between two OneDrive *Personal* drives AND
|
||||
the files to copy are already shared between them. In other cases, rclone will
|
||||
@@ -267,7 +265,7 @@ At the time of writing this only works with OneDrive personal paid accounts.
|
||||
Help: `Specify the hash in use for the backend.
|
||||
|
||||
This specifies the hash type in use. If set to "auto" it will use the
|
||||
default hash which is QuickXorHash.
|
||||
default hash which is is QuickXorHash.
|
||||
|
||||
Before rclone 1.62 an SHA1 hash was used by default for Onedrive
|
||||
Personal. For 1.62 and later the default is to use a QuickXorHash for
|
||||
@@ -303,24 +301,6 @@ rclone.
|
||||
Help: "None - don't use any hashes",
|
||||
}},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "av_override",
|
||||
Default: false,
|
||||
Help: `Allows download of files the server thinks has a virus.
|
||||
|
||||
The onedrive/sharepoint server may check files uploaded with an Anti
|
||||
Virus checker. If it detects any potential viruses or malware it will
|
||||
block download of the file.
|
||||
|
||||
In this case you will see a message like this
|
||||
|
||||
server reports this file is infected with a virus - use --onedrive-av-override to download anyway: Infected (name of virus): 403 Forbidden:
|
||||
|
||||
If you are 100% sure you want to download this file anyway then use
|
||||
the --onedrive-av-override flag, or av_override = true in the config
|
||||
file.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -660,7 +640,6 @@ type Options struct {
|
||||
LinkType string `config:"link_type"`
|
||||
LinkPassword string `config:"link_password"`
|
||||
HashType string `config:"hash_type"`
|
||||
AVOverride bool `config:"av_override"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -1745,10 +1724,6 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
token := make(chan struct{}, f.ci.Checkers)
|
||||
var wg sync.WaitGroup
|
||||
err := walk.Walk(ctx, f, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to list %q: %v", path, err)
|
||||
return nil
|
||||
}
|
||||
err = entries.ForObjectError(func(obj fs.Object) error {
|
||||
o, ok := obj.(*Object)
|
||||
if !ok {
|
||||
@@ -1987,20 +1962,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
var resp *http.Response
|
||||
opts := o.fs.newOptsCall(o.id, "GET", "/content")
|
||||
opts.Options = options
|
||||
if o.fs.opt.AVOverride {
|
||||
opts.Parameters = url.Values{"AVOverride": {"1"}}
|
||||
}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
if virus := resp.Header.Get("X-Virus-Infected"); virus != "" {
|
||||
err = fmt.Errorf("server reports this file is infected with a virus - use --onedrive-av-override to download anyway: %s: %w", virus, err)
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -61,7 +61,7 @@ func New() hash.Hash {
|
||||
func (q *quickXorHash) Write(p []byte) (n int, err error) {
|
||||
var i int
|
||||
// fill last remain
|
||||
lastRemain := q.size % dataSize
|
||||
lastRemain := int(q.size) % dataSize
|
||||
if lastRemain != 0 {
|
||||
i += xorBytes(q.data[lastRemain:], p)
|
||||
}
|
||||
|
||||
@@ -289,7 +289,7 @@ Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/using
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_kms_key_id",
|
||||
Help: `if using your own master key in vault, this header specifies the
|
||||
Help: `if using using your own master key in vault, this header specifies the
|
||||
OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call
|
||||
the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key.
|
||||
Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.`,
|
||||
|
||||
@@ -589,7 +589,12 @@ func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Durat
|
||||
if operations.SkipDestructive(ctx, what, "remove pending upload") {
|
||||
continue
|
||||
}
|
||||
_ = f.abortMultiPartUpload(ctx, *upload.Bucket, *upload.Object, *upload.UploadId)
|
||||
ignoreErr := f.abortMultiPartUpload(ctx, *upload.Bucket, *upload.Object, *upload.UploadId)
|
||||
if ignoreErr != nil {
|
||||
// fs.Debugf(f, "ignoring error %s", ignoreErr)
|
||||
}
|
||||
} else {
|
||||
// fs.Debugf(f, "ignoring %s", what)
|
||||
}
|
||||
} else {
|
||||
fs.Infof(f, "MultipartUpload doesn't have sufficient details to abort.")
|
||||
|
||||
@@ -1,536 +0,0 @@
|
||||
// Package api has type definitions for pikpak
|
||||
//
|
||||
// Manually obtained from the API responses using Browse Dev. Tool and https://mholt.github.io/json-to-go/
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// "2022-09-17T14:31:06.056+08:00"
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
)
|
||||
|
||||
// Time represents date and time information for the pikpak API, by using RFC3339
|
||||
type Time time.Time
|
||||
|
||||
// MarshalJSON turns a Time into JSON (in UTC)
|
||||
func (t *Time) MarshalJSON() (out []byte, err error) {
|
||||
timeString := (*time.Time)(t).Format(timeFormat)
|
||||
return []byte(timeString), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Time
|
||||
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
if string(data) == "null" || string(data) == `""` {
|
||||
return nil
|
||||
}
|
||||
newT, err := time.Parse(timeFormat, string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Time(newT)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
KindOfFolder = "drive#folder"
|
||||
KindOfFile = "drive#file"
|
||||
KindOfFileList = "drive#fileList"
|
||||
KindOfResumable = "drive#resumable"
|
||||
KindOfForm = "drive#form"
|
||||
ThumbnailSizeS = "SIZE_SMALL"
|
||||
ThumbnailSizeM = "SIZE_MEDIUM"
|
||||
ThumbnailSizeL = "SIZE_LARGE"
|
||||
PhaseTypeComplete = "PHASE_TYPE_COMPLETE"
|
||||
PhaseTypeRunning = "PHASE_TYPE_RUNNING"
|
||||
PhaseTypeError = "PHASE_TYPE_ERROR"
|
||||
PhaseTypePending = "PHASE_TYPE_PENDING"
|
||||
UploadTypeForm = "UPLOAD_TYPE_FORM"
|
||||
UploadTypeResumable = "UPLOAD_TYPE_RESUMABLE"
|
||||
ListLimit = 100
|
||||
)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Error details api error from pikpak
|
||||
type Error struct {
|
||||
Reason string `json:"error"` // short description of the reason, e.g. "file_name_empty" "invalid_request"
|
||||
Code int `json:"error_code"`
|
||||
URL string `json:"error_url,omitempty"`
|
||||
Message string `json:"error_description,omitempty"`
|
||||
// can have either of `error_details` or `details``
|
||||
ErrorDetails []*ErrorDetails `json:"error_details,omitempty"`
|
||||
Details []*ErrorDetails `json:"details,omitempty"`
|
||||
}
|
||||
|
||||
// ErrorDetails contains further details of api error
|
||||
type ErrorDetails struct {
|
||||
Type string `json:"@type,omitempty"`
|
||||
Reason string `json:"reason,omitempty"`
|
||||
Domain string `json:"domain,omitempty"`
|
||||
Metadata struct {
|
||||
} `json:"metadata,omitempty"` // TODO: undiscovered yet
|
||||
Locale string `json:"locale,omitempty"` // e.g. "en"
|
||||
Message string `json:"message,omitempty"`
|
||||
StackEntries []interface{} `json:"stack_entries,omitempty"` // TODO: undiscovered yet
|
||||
Detail string `json:"detail,omitempty"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q (%d)", e.Reason, e.Code)
|
||||
if e.Message != "" {
|
||||
out += ": " + e.Message
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Filters contains parameters for filters when listing.
|
||||
//
|
||||
// possible operators
|
||||
// * in: a list of comma-separated string
|
||||
// * eq: "true" or "false"
|
||||
// * gt or lt: time format string, e.g. "2023-01-28T10:56:49.757+08:00"
|
||||
type Filters struct {
|
||||
Phase map[string]string `json:"phase,omitempty"` // "in" or "eq"
|
||||
Trashed map[string]bool `json:"trashed,omitempty"` // "eq"
|
||||
Kind map[string]string `json:"kind,omitempty"` // "eq"
|
||||
Starred map[string]bool `json:"starred,omitempty"` // "eq"
|
||||
ModifiedTime map[string]string `json:"modified_time,omitempty"` // "gt" or "lt"
|
||||
}
|
||||
|
||||
// Set sets filter values using field name, operator and corresponding value
|
||||
func (f *Filters) Set(field, operator, value string) {
|
||||
if value == "" {
|
||||
// UNSET for empty values
|
||||
return
|
||||
}
|
||||
r := reflect.ValueOf(f)
|
||||
fd := reflect.Indirect(r).FieldByName(field)
|
||||
if v, err := strconv.ParseBool(value); err == nil {
|
||||
fd.Set(reflect.ValueOf(map[string]bool{operator: v}))
|
||||
} else {
|
||||
fd.Set(reflect.ValueOf(map[string]string{operator: value}))
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Common Elements
|
||||
|
||||
// Link contains a download URL for opening files
|
||||
type Link struct {
|
||||
URL string `json:"url"`
|
||||
Token string `json:"token"`
|
||||
Expire Time `json:"expire"`
|
||||
Type string `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
// Valid reports whether l is non-nil, has an URL, and is not expired.
|
||||
func (l *Link) Valid() bool {
|
||||
return l != nil && l.URL != "" && time.Now().Add(10*time.Second).Before(time.Time(l.Expire))
|
||||
}
|
||||
|
||||
// URL is a basic form of URL
|
||||
type URL struct {
|
||||
Kind string `json:"kind,omitempty"` // e.g. "upload#url"
|
||||
URL string `json:"url,omitempty"`
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Base Elements
|
||||
|
||||
// FileList contains a list of File elements
|
||||
type FileList struct {
|
||||
Kind string `json:"kind,omitempty"` // drive#fileList
|
||||
Files []*File `json:"files,omitempty"`
|
||||
NextPageToken string `json:"next_page_token"`
|
||||
Version string `json:"version,omitempty"`
|
||||
VersionOutdated bool `json:"version_outdated,omitempty"`
|
||||
}
|
||||
|
||||
// File is a basic element representing a single file object
|
||||
//
|
||||
// There are two types of download links,
|
||||
// 1) one from File.WebContentLink or File.Links.ApplicationOctetStream.URL and
|
||||
// 2) the other from File.Medias[].Link.URL.
|
||||
// Empirically, 2) is less restrictive to multiple concurrent range-requests
|
||||
// for a single file, i.e. supports for higher `--multi-thread-streams=N`.
|
||||
// However, it is not generally applicable as it is only for meadia.
|
||||
type File struct {
|
||||
Apps []*FileApp `json:"apps,omitempty"`
|
||||
Audit *FileAudit `json:"audit,omitempty"`
|
||||
Collection string `json:"collection,omitempty"` // TODO
|
||||
CreatedTime Time `json:"created_time,omitempty"`
|
||||
DeleteTime Time `json:"delete_time,omitempty"`
|
||||
FileCategory string `json:"file_category,omitempty"`
|
||||
FileExtension string `json:"file_extension,omitempty"`
|
||||
FolderType string `json:"folder_type,omitempty"`
|
||||
Hash string `json:"hash,omitempty"` // sha1 but NOT a valid file hash. looks like a torrent hash
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
Kind string `json:"kind,omitempty"` // "drive#file"
|
||||
Links *FileLinks `json:"links,omitempty"`
|
||||
Md5Checksum string `json:"md5_checksum,omitempty"`
|
||||
Medias []*Media `json:"medias,omitempty"`
|
||||
MimeType string `json:"mime_type,omitempty"`
|
||||
ModifiedTime Time `json:"modified_time,omitempty"` // updated when renamed or moved
|
||||
Name string `json:"name,omitempty"`
|
||||
OriginalFileIndex int `json:"original_file_index,omitempty"` // TODO
|
||||
OriginalURL string `json:"original_url,omitempty"`
|
||||
Params *FileParams `json:"params,omitempty"`
|
||||
ParentID string `json:"parent_id,omitempty"`
|
||||
Phase string `json:"phase,omitempty"`
|
||||
Revision int `json:"revision,omitempty,string"`
|
||||
Size int64 `json:"size,omitempty,string"`
|
||||
SortName string `json:"sort_name,omitempty"`
|
||||
Space string `json:"space,omitempty"`
|
||||
SpellName []interface{} `json:"spell_name,omitempty"` // TODO maybe list of something?
|
||||
Starred bool `json:"starred,omitempty"`
|
||||
ThumbnailLink string `json:"thumbnail_link,omitempty"`
|
||||
Trashed bool `json:"trashed,omitempty"`
|
||||
UserID string `json:"user_id,omitempty"`
|
||||
UserModifiedTime Time `json:"user_modified_time,omitempty"`
|
||||
WebContentLink string `json:"web_content_link,omitempty"`
|
||||
Writable bool `json:"writable,omitempty"`
|
||||
}
|
||||
|
||||
// FileLinks includes links to file at backend
|
||||
type FileLinks struct {
|
||||
ApplicationOctetStream *Link `json:"application/octet-stream,omitempty"`
|
||||
}
|
||||
|
||||
// FileAudit contains audit information for the file
|
||||
type FileAudit struct {
|
||||
Status string `json:"status,omitempty"` // "STATUS_OK"
|
||||
Message string `json:"message,omitempty"`
|
||||
Title string `json:"title,omitempty"`
|
||||
}
|
||||
|
||||
// Media contains info about supported version of media, e.g. original, transcoded, etc
|
||||
type Media struct {
|
||||
MediaID string `json:"media_id,omitempty"`
|
||||
MediaName string `json:"media_name,omitempty"`
|
||||
Video struct {
|
||||
Height int `json:"height,omitempty"`
|
||||
Width int `json:"width,omitempty"`
|
||||
Duration int64 `json:"duration,omitempty"`
|
||||
BitRate int `json:"bit_rate,omitempty"`
|
||||
FrameRate int `json:"frame_rate,omitempty"`
|
||||
VideoCodec string `json:"video_codec,omitempty"` // "h264", "hevc"
|
||||
AudioCodec string `json:"audio_codec,omitempty"` // "pcm_bluray", "aac"
|
||||
VideoType string `json:"video_type,omitempty"` // "mpegts"
|
||||
HdrType string `json:"hdr_type,omitempty"`
|
||||
} `json:"video,omitempty"`
|
||||
Link *Link `json:"link,omitempty"`
|
||||
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
|
||||
VipTypes []interface{} `json:"vip_types,omitempty"` // TODO maybe list of something?
|
||||
RedirectLink string `json:"redirect_link,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
IsDefault bool `json:"is_default,omitempty"`
|
||||
Priority int `json:"priority,omitempty"`
|
||||
IsOrigin bool `json:"is_origin,omitempty"`
|
||||
ResolutionName string `json:"resolution_name,omitempty"`
|
||||
IsVisible bool `json:"is_visible,omitempty"`
|
||||
Category string `json:"category,omitempty"`
|
||||
}
|
||||
|
||||
// FileParams includes parameters for instant open
|
||||
type FileParams struct {
|
||||
Duration int64 `json:"duration,omitempty,string"` // in seconds
|
||||
Height int `json:"height,omitempty,string"`
|
||||
Platform string `json:"platform,omitempty"` // "Upload"
|
||||
PlatformIcon string `json:"platform_icon,omitempty"`
|
||||
URL string `json:"url,omitempty"`
|
||||
Width int `json:"width,omitempty,string"`
|
||||
}
|
||||
|
||||
// FileApp includes parameters for instant open
|
||||
type FileApp struct {
|
||||
ID string `json:"id,omitempty"` // "decompress" for rar files
|
||||
Name string `json:"name,omitempty"` // decompress" for rar files
|
||||
Access []interface{} `json:"access,omitempty"`
|
||||
Link string `json:"link,omitempty"` // "https://mypikpak.com/drive/decompression/{File.Id}?gcid={File.Hash}\u0026wv-style=topbar%3Ahide"
|
||||
RedirectLink string `json:"redirect_link,omitempty"`
|
||||
VipTypes []interface{} `json:"vip_types,omitempty"`
|
||||
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
IsDefault bool `json:"is_default,omitempty"`
|
||||
Params struct {
|
||||
} `json:"params,omitempty"` // TODO
|
||||
CategoryIds []interface{} `json:"category_ids,omitempty"`
|
||||
AdSceneType int `json:"ad_scene_type,omitempty"`
|
||||
Space string `json:"space,omitempty"`
|
||||
Links struct {
|
||||
} `json:"links,omitempty"` // TODO
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// TaskList contains a list of Task elements
|
||||
type TaskList struct {
|
||||
Tasks []*Task `json:"tasks,omitempty"` // "drive#task"
|
||||
NextPageToken string `json:"next_page_token"`
|
||||
ExpiresIn int `json:"expires_in,omitempty"`
|
||||
}
|
||||
|
||||
// Task is a basic element representing a single task such as offline download and upload
|
||||
type Task struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#task"
|
||||
ID string `json:"id,omitempty"` // task id?
|
||||
Name string `json:"name,omitempty"` // torrent name?
|
||||
Type string `json:"type,omitempty"` // "offline"
|
||||
UserID string `json:"user_id,omitempty"`
|
||||
Statuses []interface{} `json:"statuses,omitempty"` // TODO
|
||||
StatusSize int `json:"status_size,omitempty"` // TODO
|
||||
Params *TaskParams `json:"params,omitempty"` // TODO
|
||||
FileID string `json:"file_id,omitempty"`
|
||||
FileName string `json:"file_name,omitempty"`
|
||||
FileSize string `json:"file_size,omitempty"`
|
||||
Message string `json:"message,omitempty"` // e.g. "Saving"
|
||||
CreatedTime Time `json:"created_time,omitempty"`
|
||||
UpdatedTime Time `json:"updated_time,omitempty"`
|
||||
ThirdTaskID string `json:"third_task_id,omitempty"` // TODO
|
||||
Phase string `json:"phase,omitempty"` // e.g. "PHASE_TYPE_RUNNING"
|
||||
Progress int `json:"progress,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
Callback string `json:"callback,omitempty"`
|
||||
ReferenceResource interface{} `json:"reference_resource,omitempty"` // TODO
|
||||
Space string `json:"space,omitempty"`
|
||||
}
|
||||
|
||||
// TaskParams includes parameters informing status of Task
|
||||
type TaskParams struct {
|
||||
Age string `json:"age,omitempty"`
|
||||
PredictSpeed string `json:"predict_speed,omitempty"`
|
||||
PredictType string `json:"predict_type,omitempty"`
|
||||
URL string `json:"url,omitempty"`
|
||||
}
|
||||
|
||||
// Form contains parameters for upload by multipart/form-data
|
||||
type Form struct {
|
||||
Headers struct{} `json:"headers"`
|
||||
Kind string `json:"kind"` // "drive#form"
|
||||
Method string `json:"method"` // "POST"
|
||||
MultiParts struct {
|
||||
OSSAccessKeyID string `json:"OSSAccessKeyId"`
|
||||
Signature string `json:"Signature"`
|
||||
Callback string `json:"callback"`
|
||||
Key string `json:"key"`
|
||||
Policy string `json:"policy"`
|
||||
XUserData string `json:"x:user_data"`
|
||||
} `json:"multi_parts"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
// Resumable contains parameters for upload by resumable
|
||||
type Resumable struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#resumable"
|
||||
Provider string `json:"provider,omitempty"` // e.g. "PROVIDER_ALIYUN"
|
||||
Params *ResumableParams `json:"params,omitempty"`
|
||||
}
|
||||
|
||||
// ResumableParams specifies resumable paramegers
|
||||
type ResumableParams struct {
|
||||
AccessKeyID string `json:"access_key_id,omitempty"`
|
||||
AccessKeySecret string `json:"access_key_secret,omitempty"`
|
||||
Bucket string `json:"bucket,omitempty"`
|
||||
Endpoint string `json:"endpoint,omitempty"`
|
||||
Expiration Time `json:"expiration,omitempty"`
|
||||
Key string `json:"key,omitempty"`
|
||||
SecurityToken string `json:"security_token,omitempty"`
|
||||
}
|
||||
|
||||
// FileInArchive is a basic element in archive
|
||||
type FileInArchive struct {
|
||||
Index int `json:"index,omitempty"`
|
||||
Filename string `json:"filename,omitempty"`
|
||||
Filesize string `json:"filesize,omitempty"`
|
||||
MimeType string `json:"mime_type,omitempty"`
|
||||
Gcid string `json:"gcid,omitempty"`
|
||||
Kind string `json:"kind,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
Path string `json:"path,omitempty"`
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// NewFile is a response to RequestNewFile
|
||||
type NewFile struct {
|
||||
File *File `json:"file,omitempty"`
|
||||
Form *Form `json:"form,omitempty"`
|
||||
Resumable *Resumable `json:"resumable,omitempty"`
|
||||
Task *Task `json:"task,omitempty"` // null in this case
|
||||
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_FORM" or "UPLOAD_TYPE_RESUMABLE"
|
||||
}
|
||||
|
||||
// NewTask is a response to RequestNewTask
|
||||
type NewTask struct {
|
||||
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_URL"
|
||||
File *File `json:"file,omitempty"` // null in this case
|
||||
Task *Task `json:"task,omitempty"`
|
||||
URL *URL `json:"url,omitempty"` // {"kind": "upload#url"}
|
||||
}
|
||||
|
||||
// About informs drive status
|
||||
type About struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#about"
|
||||
Quota *Quota `json:"quota,omitempty"`
|
||||
ExpiresAt string `json:"expires_at,omitempty"`
|
||||
Quotas struct {
|
||||
} `json:"quotas,omitempty"` // maybe []*Quota?
|
||||
}
|
||||
|
||||
// Quota informs drive quota
|
||||
type Quota struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#quota"
|
||||
Limit int64 `json:"limit,omitempty,string"` // limit in bytes
|
||||
Usage int64 `json:"usage,omitempty,string"` // bytes in use
|
||||
UsageInTrash int64 `json:"usage_in_trash,omitempty,string"` // bytes in trash but this seems not working
|
||||
PlayTimesLimit string `json:"play_times_limit,omitempty"` // maybe in seconds
|
||||
PlayTimesUsage string `json:"play_times_usage,omitempty"` // maybe in seconds
|
||||
}
|
||||
|
||||
// Share is a response to RequestShare
|
||||
//
|
||||
// used in PublicLink()
|
||||
type Share struct {
|
||||
ShareID string `json:"share_id,omitempty"`
|
||||
ShareURL string `json:"share_url,omitempty"`
|
||||
PassCode string `json:"pass_code,omitempty"`
|
||||
ShareText string `json:"share_text,omitempty"`
|
||||
}
|
||||
|
||||
// User contains user account information
|
||||
//
|
||||
// GET https://user.mypikpak.com/v1/user/me
|
||||
type User struct {
|
||||
Sub string `json:"sub,omitempty"` // userid for internal use
|
||||
Name string `json:"name,omitempty"` // Username
|
||||
Picture string `json:"picture,omitempty"` // URL to Avatar image
|
||||
Email string `json:"email,omitempty"` // redacted email address
|
||||
Providers *[]UserProvider `json:"providers,omitempty"` // OAuth provider
|
||||
PhoneNumber string `json:"phone_number,omitempty"`
|
||||
Password string `json:"password,omitempty"` // "SET" if configured
|
||||
Status string `json:"status,omitempty"` // "ACTIVE"
|
||||
CreatedAt Time `json:"created_at,omitempty"`
|
||||
PasswordUpdatedAt Time `json:"password_updated_at,omitempty"`
|
||||
}
|
||||
|
||||
// UserProvider details third-party authentication
|
||||
type UserProvider struct {
|
||||
ID string `json:"id,omitempty"` // e.g. "google.com"
|
||||
ProviderUserID string `json:"provider_user_id,omitempty"`
|
||||
Name string `json:"name,omitempty"` // username
|
||||
}
|
||||
|
||||
// VIP includes subscription details about premium account
|
||||
//
|
||||
// GET https://api-drive.mypikpak.com/drive/v1/privilege/vip
|
||||
type VIP struct {
|
||||
Result string `json:"result,omitempty"` // "ACCEPTED"
|
||||
Message string `json:"message,omitempty"`
|
||||
RedirectURI string `json:"redirect_uri,omitempty"`
|
||||
Data struct {
|
||||
Expire Time `json:"expire,omitempty"`
|
||||
Status string `json:"status,omitempty"` // "invalid" or "ok"
|
||||
Type string `json:"type,omitempty"` // "novip" or "platinum"
|
||||
UserID string `json:"user_id,omitempty"` // same as User.Sub
|
||||
} `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
// DecompressResult is a response to RequestDecompress
|
||||
type DecompressResult struct {
|
||||
Status string `json:"status,omitempty"` // "OK"
|
||||
StatusText string `json:"status_text,omitempty"`
|
||||
TaskID string `json:"task_id,omitempty"` // same as File.Id
|
||||
FilesNum int `json:"files_num,omitempty"` // number of files in archive
|
||||
RedirectLink string `json:"redirect_link,omitempty"`
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// RequestShare is to request for file share
|
||||
type RequestShare struct {
|
||||
FileIds []string `json:"file_ids,omitempty"`
|
||||
ShareTo string `json:"share_to,omitempty"` // "publiclink",
|
||||
ExpirationDays int `json:"expiration_days,omitempty"` // -1 = 'forever'
|
||||
PassCodeOption string `json:"pass_code_option,omitempty"` // "NOT_REQUIRED"
|
||||
}
|
||||
|
||||
// RequestBatch is to request for batch actions
|
||||
type RequestBatch struct {
|
||||
Ids []string `json:"ids,omitempty"`
|
||||
To map[string]string `json:"to,omitempty"`
|
||||
}
|
||||
|
||||
// RequestNewFile is to request for creating a new `drive#folder` or `drive#file`
|
||||
type RequestNewFile struct {
|
||||
// always required
|
||||
Kind string `json:"kind"` // "drive#folder" or "drive#file"
|
||||
Name string `json:"name"`
|
||||
ParentID string `json:"parent_id"`
|
||||
FolderType string `json:"folder_type"`
|
||||
// only when uploading a new file
|
||||
Hash string `json:"hash,omitempty"` // sha1sum
|
||||
Resumable map[string]string `json:"resumable,omitempty"` // {"provider": "PROVIDER_ALIYUN"}
|
||||
Size int64 `json:"size,omitempty"`
|
||||
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_FORM" or "UPLOAD_TYPE_RESUMABLE"
|
||||
}
|
||||
|
||||
// RequestNewTask is to request for creating a new task like offline downloads
|
||||
//
|
||||
// Name and ParentID can be left empty.
|
||||
type RequestNewTask struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#file"
|
||||
Name string `json:"name,omitempty"`
|
||||
ParentID string `json:"parent_id,omitempty"`
|
||||
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_URL"
|
||||
URL *URL `json:"url,omitempty"` // {"url": downloadUrl}
|
||||
FolderType string `json:"folder_type,omitempty"` // "" if parent_id else "DOWNLOAD"
|
||||
}
|
||||
|
||||
// RequestDecompress is to request for decompress of archive files
|
||||
type RequestDecompress struct {
|
||||
Gcid string `json:"gcid,omitempty"` // same as File.Hash
|
||||
Password string `json:"password,omitempty"` // ""
|
||||
FileID string `json:"file_id,omitempty"`
|
||||
Files []*FileInArchive `json:"files,omitempty"` // can request selected files to be decompressed
|
||||
DefaultParent bool `json:"default_parent,omitempty"`
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// NOT implemented YET
|
||||
|
||||
// RequestArchiveFileList is to request for a list of files in archive
|
||||
//
|
||||
// POST https://api-drive.mypikpak.com/decompress/v1/list
|
||||
type RequestArchiveFileList struct {
|
||||
Gcid string `json:"gcid,omitempty"` // same as api.File.Hash
|
||||
Path string `json:"path,omitempty"` // "" by default
|
||||
Password string `json:"password,omitempty"` // "" by default
|
||||
FileID string `json:"file_id,omitempty"`
|
||||
}
|
||||
|
||||
// ArchiveFileList is a response to RequestArchiveFileList
|
||||
type ArchiveFileList struct {
|
||||
Status string `json:"status,omitempty"` // "OK"
|
||||
StatusText string `json:"status_text,omitempty"` // ""
|
||||
TaskID string `json:"task_id,omitempty"` // ""
|
||||
CurrentPath string `json:"current_path,omitempty"` // ""
|
||||
Title string `json:"title,omitempty"`
|
||||
FileSize int64 `json:"file_size,omitempty"`
|
||||
Gcid string `json:"gcid,omitempty"` // same as File.Hash
|
||||
Files []*FileInArchive `json:"files,omitempty"`
|
||||
}
|
||||
@@ -1,253 +0,0 @@
|
||||
package pikpak
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/rclone/rclone/backend/pikpak/api"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Globals
|
||||
const (
|
||||
cachePrefix = "rclone-pikpak-sha1sum-"
|
||||
)
|
||||
|
||||
// requestDecompress requests decompress of compressed files
|
||||
func (f *Fs) requestDecompress(ctx context.Context, file *api.File, password string) (info *api.DecompressResult, err error) {
|
||||
req := &api.RequestDecompress{
|
||||
Gcid: file.Hash,
|
||||
Password: password,
|
||||
FileID: file.ID,
|
||||
Files: []*api.FileInArchive{},
|
||||
DefaultParent: true,
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/decompress/v1/decompress",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// getUserInfo gets UserInfo from API
|
||||
func (f *Fs) getUserInfo(ctx context.Context) (info *api.User, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: "https://user.mypikpak.com/v1/user/me",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get userinfo: %w", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// getVIPInfo gets VIPInfo from API
|
||||
func (f *Fs) getVIPInfo(ctx context.Context) (info *api.VIP, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: "https://api-drive.mypikpak.com/drive/v1/privilege/vip",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get vip info: %w", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// requestBatchAction requests batch actions to API
|
||||
//
|
||||
// action can be one of batch{Copy,Delete,Trash,Untrash}
|
||||
func (f *Fs) requestBatchAction(ctx context.Context, action string, req *api.RequestBatch) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/v1/files:" + action,
|
||||
NoResponse: true, // Only returns `{"task_id":""}
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, nil)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("batch action %q failed: %w", action, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// requestNewTask requests a new api.NewTask and returns api.Task
|
||||
func (f *Fs) requestNewTask(ctx context.Context, req *api.RequestNewTask) (info *api.Task, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/v1/files",
|
||||
}
|
||||
var newTask api.NewTask
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &newTask)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newTask.Task, nil
|
||||
}
|
||||
|
||||
// requestNewFile requests a new api.NewFile and returns api.File
|
||||
func (f *Fs) requestNewFile(ctx context.Context, req *api.RequestNewFile) (info *api.NewFile, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/v1/files",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// getFile gets api.File from API for the ID passed
|
||||
// and returns rich information containing additional fields below
|
||||
// * web_content_link
|
||||
// * thumbnail_link
|
||||
// * links
|
||||
// * medias
|
||||
func (f *Fs) getFile(ctx context.Context, ID string) (info *api.File, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/drive/v1/files/" + ID,
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
if err == nil && info.Phase != api.PhaseTypeComplete {
|
||||
// could be pending right after file is created/uploaded.
|
||||
return true, errors.New("not PHASE_TYPE_COMPLETE")
|
||||
}
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// patchFile updates attributes of the file by ID
|
||||
//
|
||||
// currently known patchable fields are
|
||||
// * name
|
||||
func (f *Fs) patchFile(ctx context.Context, ID string, req *api.File) (info *api.File, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PATCH",
|
||||
Path: "/drive/v1/files/" + ID,
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// getAbout gets drive#quota information from server
|
||||
func (f *Fs) getAbout(ctx context.Context) (info *api.About, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/drive/v1/about",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// requestShare returns information about ssharable links
|
||||
func (f *Fs) requestShare(ctx context.Context, req *api.RequestShare) (info *api.Share, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/v1/share",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Read the sha1 of in returning a reader which will read the same contents
|
||||
//
|
||||
// The cleanup function should be called when out is finished with
|
||||
// regardless of whether this function returned an error or not.
|
||||
func readSHA1(in io.Reader, size, threshold int64) (sha1sum string, out io.Reader, cleanup func(), err error) {
|
||||
// we need an SHA1
|
||||
hash := sha1.New()
|
||||
// use the teeReader to write to the local file AND calculate the SHA1 while doing so
|
||||
teeReader := io.TeeReader(in, hash)
|
||||
|
||||
// nothing to clean up by default
|
||||
cleanup = func() {}
|
||||
|
||||
// don't cache small files on disk to reduce wear of the disk
|
||||
if size > threshold {
|
||||
var tempFile *os.File
|
||||
|
||||
// create the cache file
|
||||
tempFile, err = os.CreateTemp("", cachePrefix)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_ = os.Remove(tempFile.Name()) // Delete the file - may not work on Windows
|
||||
|
||||
// clean up the file after we are done downloading
|
||||
cleanup = func() {
|
||||
// the file should normally already be close, but just to make sure
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name()) // delete the cache file after we are done - may be deleted already
|
||||
}
|
||||
|
||||
// copy the ENTIRE file to disc and calculate the SHA1 in the process
|
||||
if _, err = io.Copy(tempFile, teeReader); err != nil {
|
||||
return
|
||||
}
|
||||
// jump to the start of the local file so we can pass it along
|
||||
if _, err = tempFile.Seek(0, 0); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// replace the already read source with a reader of our cached file
|
||||
out = tempFile
|
||||
} else {
|
||||
// that's a small file, just read it into memory
|
||||
var inData []byte
|
||||
inData, err = io.ReadAll(teeReader)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// set the reader to our read memory block
|
||||
out = bytes.NewReader(inData)
|
||||
}
|
||||
return hex.EncodeToString(hash.Sum(nil)), out, cleanup, nil
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,17 +0,0 @@
|
||||
// Test PikPak filesystem interface
|
||||
package pikpak_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/pikpak"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestPikPak:",
|
||||
NilObject: (*pikpak.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
@@ -253,12 +252,9 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||
return f.putUnchecked(ctx, in, src, src.Remote(), options...)
|
||||
}
|
||||
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||
// defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err)
|
||||
size := src.Size()
|
||||
remote := src.Remote()
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -544,59 +540,24 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
modTime := src.ModTime(ctx)
|
||||
var resp struct {
|
||||
File putio.File `json:"file"`
|
||||
}
|
||||
// For some unknown reason the API sometimes returns the name
|
||||
// already exists unless we upload to a temporary name and
|
||||
// rename
|
||||
//
|
||||
// {"error_id":null,"error_message":"Name already exist","error_type":"NAME_ALREADY_EXIST","error_uri":"http://api.put.io/v2/docs","extra":{},"status":"ERROR","status_code":400}
|
||||
suffix := "." + random.String(8)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
params := url.Values{}
|
||||
params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10))
|
||||
params.Set("parent_id", directoryID)
|
||||
params.Set("name", f.opt.Enc.FromStandardName(leaf+suffix))
|
||||
|
||||
params.Set("name", f.opt.Enc.FromStandardName(leaf))
|
||||
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/copy", strings.NewReader(params.Encode()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
// fs.Debugf(f, "copying file (%d) to parent_id: %s", srcObj.file.ID, directoryID)
|
||||
_, err = f.client.Do(req, &resp)
|
||||
_, err = f.client.Do(req, nil)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
params := url.Values{}
|
||||
params.Set("file_id", strconv.FormatInt(resp.File.ID, 10))
|
||||
params.Set("name", f.opt.Enc.FromStandardName(leaf))
|
||||
|
||||
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/rename", strings.NewReader(params.Encode()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
_, err = f.client.Do(req, &resp)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o, err = f.newObjectWithInfo(ctx, remote, resp.File)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = o.SetModTime(ctx, modTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
@@ -618,7 +579,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
modTime := src.ModTime(ctx)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
params := url.Values{}
|
||||
params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10))
|
||||
@@ -636,15 +596,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o, err = f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = o.SetModTime(ctx, modTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
|
||||
@@ -275,7 +275,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newObj, err := o.fs.putUnchecked(ctx, in, src, o.remote, options...)
|
||||
newObj, err := o.fs.PutUnchecked(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
339
backend/s3/s3.go
339
backend/s3/s3.go
@@ -1,4 +1,4 @@
|
||||
// Package s3 provides an interface to Amazon S3 object storage
|
||||
// Package s3 provides an interface to Amazon S3 oject storage
|
||||
package s3
|
||||
|
||||
//go:generate go run gen_setfrom.go -o setfrom.go
|
||||
@@ -66,7 +66,7 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, China Mobile, Cloudflare, GCS, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, Petabox, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
@@ -91,9 +91,6 @@ func init() {
|
||||
}, {
|
||||
Value: "Alibaba",
|
||||
Help: "Alibaba Cloud Object Storage System (OSS) formerly Aliyun",
|
||||
}, {
|
||||
Value: "ArvanCloud",
|
||||
Help: "Arvan Cloud Object Storage (AOS)",
|
||||
}, {
|
||||
Value: "Ceph",
|
||||
Help: "Ceph Object Storage",
|
||||
@@ -103,15 +100,15 @@ func init() {
|
||||
}, {
|
||||
Value: "Cloudflare",
|
||||
Help: "Cloudflare R2 Storage",
|
||||
}, {
|
||||
Value: "ArvanCloud",
|
||||
Help: "Arvan Cloud Object Storage (AOS)",
|
||||
}, {
|
||||
Value: "DigitalOcean",
|
||||
Help: "DigitalOcean Spaces",
|
||||
}, {
|
||||
Value: "Dreamhost",
|
||||
Help: "Dreamhost DreamObjects",
|
||||
}, {
|
||||
Value: "GCS",
|
||||
Help: "Google Cloud Storage",
|
||||
}, {
|
||||
Value: "HuaweiOBS",
|
||||
Help: "Huawei Object Storage Service",
|
||||
@@ -136,9 +133,6 @@ func init() {
|
||||
}, {
|
||||
Value: "Netease",
|
||||
Help: "Netease Object Storage (NOS)",
|
||||
}, {
|
||||
Value: "Petabox",
|
||||
Help: "Petabox Object Storage",
|
||||
}, {
|
||||
Value: "RackCorp",
|
||||
Help: "RackCorp Object Storage",
|
||||
@@ -443,30 +437,10 @@ func init() {
|
||||
Value: "eu-south-2",
|
||||
Help: "Logrono, Spain",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region where your bucket will be created and your data stored.\n",
|
||||
Provider: "Petabox",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "us-east-1",
|
||||
Help: "US East (N. Virginia)",
|
||||
}, {
|
||||
Value: "eu-central-1",
|
||||
Help: "Europe (Frankfurt)",
|
||||
}, {
|
||||
Value: "ap-southeast-1",
|
||||
Help: "Asia Pacific (Singapore)",
|
||||
}, {
|
||||
Value: "me-south-1",
|
||||
Help: "Middle East (Bahrain)",
|
||||
}, {
|
||||
Value: "sa-east-1",
|
||||
Help: "South America (São Paulo)",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive",
|
||||
Provider: "!AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,Liara,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
||||
@@ -575,15 +549,15 @@ func init() {
|
||||
Help: "Anhui China (Huainan)",
|
||||
}},
|
||||
}, {
|
||||
// ArvanCloud endpoints: https://www.arvancloud.ir/en/products/cloud-storage
|
||||
// ArvanCloud endpoints: https://www.arvancloud.com/en/products/cloud-storage
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Arvan Cloud Object Storage (AOS) API.",
|
||||
Provider: "ArvanCloud",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "s3.ir-thr-at1.arvanstorage.ir",
|
||||
Help: "The default endpoint - a good choice if you are unsure.\nTehran Iran (Simin)",
|
||||
Value: "s3.ir-thr-at1.arvanstorage.com",
|
||||
Help: "The default endpoint - a good choice if you are unsure.\nTehran Iran (Asiatech)",
|
||||
}, {
|
||||
Value: "s3.ir-tbz-sh1.arvanstorage.ir",
|
||||
Value: "s3.ir-tbz-sh1.arvanstorage.com",
|
||||
Help: "Tabriz Iran (Shahriar)",
|
||||
}},
|
||||
}, {
|
||||
@@ -791,30 +765,6 @@ func init() {
|
||||
Value: "s3-eu-south-2.ionoscloud.com",
|
||||
Help: "Logrono, Spain",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Petabox S3 Object Storage.\n\nSpecify the endpoint from the same region.",
|
||||
Provider: "Petabox",
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "s3.petabox.io",
|
||||
Help: "US East (N. Virginia)",
|
||||
}, {
|
||||
Value: "s3.us-east-1.petabox.io",
|
||||
Help: "US East (N. Virginia)",
|
||||
}, {
|
||||
Value: "s3.eu-central-1.petabox.io",
|
||||
Help: "Europe (Frankfurt)",
|
||||
}, {
|
||||
Value: "s3.ap-southeast-1.petabox.io",
|
||||
Help: "Asia Pacific (Singapore)",
|
||||
}, {
|
||||
Value: "s3.me-south-1.petabox.io",
|
||||
Help: "Middle East (Bahrain)",
|
||||
}, {
|
||||
Value: "s3.sa-east-1.petabox.io",
|
||||
Help: "South America (São Paulo)",
|
||||
}},
|
||||
}, {
|
||||
// Liara endpoints: https://liara.ir/landing/object-storage
|
||||
Name: "endpoint",
|
||||
@@ -984,14 +934,6 @@ func init() {
|
||||
Value: "s3.eu-central-1.stackpathstorage.com",
|
||||
Help: "EU Endpoint",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Google Cloud Storage.",
|
||||
Provider: "GCS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "https://storage.googleapis.com",
|
||||
Help: "Google Cloud Storage endpoint",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Storj Gateway.",
|
||||
@@ -1156,7 +1098,7 @@ func init() {
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Scaleway,StackPath,Storj,RackCorp,Qiniu,Petabox",
|
||||
Provider: "!AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,Liara,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -1258,12 +1200,8 @@ func init() {
|
||||
Help: "Liara Iran endpoint",
|
||||
Provider: "Liara",
|
||||
}, {
|
||||
Value: "s3.ir-thr-at1.arvanstorage.ir",
|
||||
Help: "ArvanCloud Tehran Iran (Simin) endpoint",
|
||||
Provider: "ArvanCloud",
|
||||
}, {
|
||||
Value: "s3.ir-tbz-sh1.arvanstorage.ir",
|
||||
Help: "ArvanCloud Tabriz Iran (Shahriar) endpoint",
|
||||
Value: "s3.ir-thr-at1.arvanstorage.com",
|
||||
Help: "ArvanCloud Tehran Iran (Asiatech) endpoint",
|
||||
Provider: "ArvanCloud",
|
||||
}},
|
||||
}, {
|
||||
@@ -1447,7 +1385,7 @@ func init() {
|
||||
Provider: "ArvanCloud",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "ir-thr-at1",
|
||||
Help: "Tehran Iran (Simin)",
|
||||
Help: "Tehran Iran (Asiatech)",
|
||||
}, {
|
||||
Value: "ir-tbz-sh1",
|
||||
Help: "Tabriz Iran (Shahriar)",
|
||||
@@ -1644,7 +1582,7 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Liara,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS,Petabox",
|
||||
Provider: "!AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Liara,ArvanCloud,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -1887,7 +1825,7 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Help: "Standard storage class",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://www.arvancloud.ir/en/products/cloud-storage
|
||||
// Mapping from here: https://www.arvancloud.com/en/products/cloud-storage
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in ArvanCloud.",
|
||||
Provider: "ArvanCloud",
|
||||
@@ -1914,7 +1852,7 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Help: "Infrequent access storage mode",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://www.scaleway.com/en/docs/storage/object/quickstart/
|
||||
// Mapping from here: https://www.scaleway.com/en/docs/object-storage-glacier/#-Scaleway-Storage-Classes
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in S3.",
|
||||
Provider: "Scaleway",
|
||||
@@ -1923,13 +1861,10 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Help: "Default.",
|
||||
}, {
|
||||
Value: "STANDARD",
|
||||
Help: "The Standard class for any upload.\nSuitable for on-demand content like streaming or CDN.\nAvailable in all regions.",
|
||||
Help: "The Standard class for any upload.\nSuitable for on-demand content like streaming or CDN.",
|
||||
}, {
|
||||
Value: "GLACIER",
|
||||
Help: "Archived storage.\nPrices are lower, but it needs to be restored first to be accessed.\nAvailable in FR-PAR and NL-AMS regions.",
|
||||
}, {
|
||||
Value: "ONEZONE_IA",
|
||||
Help: "One Zone - Infrequent Access.\nA good choice for storing secondary backup copies or easily re-creatable data.\nAvailable in the FR-PAR region only.",
|
||||
Help: "Archived storage.\nPrices are lower, but it needs to be restored first to be accessed.",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://developer.qiniu.com/kodo/5906/storage-type
|
||||
@@ -2247,15 +2182,6 @@ See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rcl
|
||||
This is usually set to a CloudFront CDN URL as AWS S3 offers
|
||||
cheaper egress for data downloaded through the CloudFront network.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "directory_markers",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Help: `Upload an empty object with a trailing slash when a new directory is created
|
||||
|
||||
Empty folders are unsupported for bucket based remotes, this option creates an empty
|
||||
object ending with "/", to persist the folder.
|
||||
`,
|
||||
}, {
|
||||
Name: "use_multipart_etag",
|
||||
Help: `Whether to use ETag in multipart uploads for verification
|
||||
@@ -2332,24 +2258,6 @@ will decompress the object on the fly.
|
||||
If this is set to unset (the default) then rclone will choose
|
||||
according to the provider setting what to apply, but you can override
|
||||
rclone's choice here.
|
||||
`, "|", "`"),
|
||||
Default: fs.Tristate{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_accept_encoding_gzip",
|
||||
Help: strings.ReplaceAll(`Whether to send |Accept-Encoding: gzip| header.
|
||||
|
||||
By default, rclone will append |Accept-Encoding: gzip| to the request to download
|
||||
compressed objects whenever possible.
|
||||
|
||||
However some providers such as Google Cloud Storage may alter the HTTP headers, breaking
|
||||
the signature of the request.
|
||||
|
||||
A symptom of this would be receiving errors like
|
||||
|
||||
SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided.
|
||||
|
||||
In this case, you might want to try disabling this option.
|
||||
`, "|", "`"),
|
||||
Default: fs.Tristate{},
|
||||
Advanced: true,
|
||||
@@ -2485,14 +2393,12 @@ type Options struct {
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
DisableHTTP2 bool `config:"disable_http2"`
|
||||
DownloadURL string `config:"download_url"`
|
||||
DirectoryMarkers bool `config:"directory_markers"`
|
||||
UseMultipartEtag fs.Tristate `config:"use_multipart_etag"`
|
||||
UsePresignedRequest bool `config:"use_presigned_request"`
|
||||
Versions bool `config:"versions"`
|
||||
VersionAt fs.Time `config:"version_at"`
|
||||
Decompress bool `config:"decompress"`
|
||||
MightGzip fs.Tristate `config:"might_gzip"`
|
||||
UseAcceptEncodingGzip fs.Tristate `config:"use_accept_encoding_gzip"`
|
||||
NoSystemMetadata bool `config:"no_system_metadata"`
|
||||
}
|
||||
|
||||
@@ -2885,12 +2791,11 @@ func setEndpointValueForIDriveE2(m configmap.Mapper) (err error) {
|
||||
// These should be differences from AWS S3
|
||||
func setQuirks(opt *Options) {
|
||||
var (
|
||||
listObjectsV2 = true
|
||||
virtualHostStyle = true
|
||||
urlEncodeListings = true
|
||||
useMultipartEtag = true
|
||||
useAcceptEncodingGzip = true
|
||||
mightGzip = true // assume all providers might gzip until proven otherwise
|
||||
listObjectsV2 = true
|
||||
virtualHostStyle = true
|
||||
urlEncodeListings = true
|
||||
useMultipartEtag = true
|
||||
mightGzip = true // assume all providers might gzip until proven otherwise
|
||||
)
|
||||
switch opt.Provider {
|
||||
case "AWS":
|
||||
@@ -2932,8 +2837,6 @@ func setQuirks(opt *Options) {
|
||||
// listObjectsV2 supported - https://api.ionos.com/docs/s3/#Basic-Operations-get-Bucket-list-type-2
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
case "Petabox":
|
||||
// No quirks
|
||||
case "Liara":
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
@@ -2977,11 +2880,6 @@ func setQuirks(opt *Options) {
|
||||
case "Qiniu":
|
||||
useMultipartEtag = false
|
||||
urlEncodeListings = false
|
||||
virtualHostStyle = false
|
||||
case "GCS":
|
||||
// Google break request Signature by mutating accept-encoding HTTP header
|
||||
// https://github.com/rclone/rclone/issues/6670
|
||||
useAcceptEncodingGzip = false
|
||||
case "Other":
|
||||
listObjectsV2 = false
|
||||
virtualHostStyle = false
|
||||
@@ -3026,12 +2924,6 @@ func setQuirks(opt *Options) {
|
||||
opt.MightGzip.Valid = true
|
||||
opt.MightGzip.Value = mightGzip
|
||||
}
|
||||
|
||||
// set UseAcceptEncodingGzip if not manually set
|
||||
if !opt.UseAcceptEncodingGzip.Valid {
|
||||
opt.UseAcceptEncodingGzip.Valid = true
|
||||
opt.UseAcceptEncodingGzip.Value = useAcceptEncodingGzip
|
||||
}
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
@@ -3056,7 +2948,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(nil, "name = %q, root = %q, opt = %#v", name, root, opt)
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("s3: chunk size: %w", err)
|
||||
@@ -3066,7 +2957,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, fmt.Errorf("s3: upload cutoff: %w", err)
|
||||
}
|
||||
if opt.Versions && opt.VersionAt.IsSet() {
|
||||
return nil, errors.New("s3: can't use --s3-versions and --s3-version-at at the same time")
|
||||
return nil, errors.New("s3: cant use --s3-versions and --s3-version-at at the same time")
|
||||
}
|
||||
if opt.BucketACL == "" {
|
||||
opt.BucketACL = opt.ACL
|
||||
@@ -3147,9 +3038,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.Provider == "IDrive" {
|
||||
f.features.SetTier = false
|
||||
}
|
||||
if opt.DirectoryMarkers {
|
||||
f.features.CanHaveEmptyDirectories = true
|
||||
}
|
||||
// f.listMultipartUploads()
|
||||
|
||||
if f.rootBucket != "" && f.rootDirectory != "" && !opt.NoHeadObject && !strings.HasSuffix(root, "/") {
|
||||
@@ -3190,7 +3078,6 @@ func (f *Fs) getMetaDataListing(ctx context.Context, wantRemote string) (info *s
|
||||
err = f.list(ctx, listOpt{
|
||||
bucket: bucket,
|
||||
directory: bucketPath,
|
||||
prefix: f.rootDirectory,
|
||||
recurse: true,
|
||||
withVersions: f.opt.Versions,
|
||||
findFile: true,
|
||||
@@ -3596,10 +3483,10 @@ type listOpt struct {
|
||||
// list lists the objects into the function supplied with the opt
|
||||
// supplied.
|
||||
func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
if opt.prefix != "" {
|
||||
opt.prefix += "/"
|
||||
}
|
||||
if !opt.findFile {
|
||||
if opt.prefix != "" {
|
||||
opt.prefix += "/"
|
||||
}
|
||||
if opt.directory != "" {
|
||||
opt.directory += "/"
|
||||
}
|
||||
@@ -3642,7 +3529,6 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
default:
|
||||
listBucket = f.newV2List(&req)
|
||||
}
|
||||
foundItems := 0
|
||||
for {
|
||||
var resp *s3.ListObjectsV2Output
|
||||
var err error
|
||||
@@ -3684,7 +3570,6 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
return err
|
||||
}
|
||||
if !opt.recurse {
|
||||
foundItems += len(resp.CommonPrefixes)
|
||||
for _, commonPrefix := range resp.CommonPrefixes {
|
||||
if commonPrefix.Prefix == nil {
|
||||
fs.Logf(f, "Nil common prefix received")
|
||||
@@ -3717,7 +3602,6 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
foundItems += len(resp.Contents)
|
||||
for i, object := range resp.Contents {
|
||||
remote := aws.StringValue(object.Key)
|
||||
if urlEncodeListings {
|
||||
@@ -3732,29 +3616,19 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
}
|
||||
isDirectory := (remote == "" || strings.HasSuffix(remote, "/")) && object.Size != nil && *object.Size == 0
|
||||
// is this a directory marker?
|
||||
if isDirectory {
|
||||
if opt.noSkipMarkers {
|
||||
// process directory markers as files
|
||||
isDirectory = false
|
||||
} else {
|
||||
// Don't insert the root directory
|
||||
if remote == opt.directory {
|
||||
continue
|
||||
}
|
||||
// process directory markers as directories
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
}
|
||||
}
|
||||
remote = remote[len(opt.prefix):]
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
if opt.addBucket {
|
||||
remote = bucket.Join(opt.bucket, remote)
|
||||
}
|
||||
// is this a directory marker?
|
||||
if isDirectory && object.Size != nil && *object.Size == 0 && !opt.noSkipMarkers {
|
||||
continue // skip directory marker
|
||||
}
|
||||
if versionIDs != nil {
|
||||
err = fn(remote, object, versionIDs[i], isDirectory)
|
||||
err = fn(remote, object, versionIDs[i], false)
|
||||
} else {
|
||||
err = fn(remote, object, nil, isDirectory)
|
||||
err = fn(remote, object, nil, false)
|
||||
}
|
||||
if err != nil {
|
||||
if err == errEndList {
|
||||
@@ -3767,20 +3641,6 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
break
|
||||
}
|
||||
}
|
||||
if f.opt.DirectoryMarkers && foundItems == 0 && opt.directory != "" {
|
||||
// Determine whether the directory exists or not by whether it has a marker
|
||||
req := s3.HeadObjectInput{
|
||||
Bucket: &opt.bucket,
|
||||
Key: &opt.directory,
|
||||
}
|
||||
_, err := f.headObject(ctx, &req)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3971,70 +3831,10 @@ func (f *Fs) bucketExists(ctx context.Context, bucket string) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Create directory marker file and parents
|
||||
func (f *Fs) createDirectoryMarker(ctx context.Context, bucket, dir string) error {
|
||||
if !f.opt.DirectoryMarkers || bucket == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Object to be uploaded
|
||||
o := &Object{
|
||||
fs: f,
|
||||
meta: map[string]string{
|
||||
metaMtime: swift.TimeToFloatString(time.Now()),
|
||||
},
|
||||
}
|
||||
|
||||
for {
|
||||
_, bucketPath := f.split(dir)
|
||||
// Don't create the directory marker if it is the bucket or at the very root
|
||||
if bucketPath == "" {
|
||||
break
|
||||
}
|
||||
o.remote = dir + "/"
|
||||
|
||||
// Check to see if object already exists
|
||||
_, err := o.headObject(ctx)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Upload it if not
|
||||
fs.Debugf(o, "Creating directory marker")
|
||||
content := io.Reader(strings.NewReader(""))
|
||||
err = o.Update(ctx, content, o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating directory marker failed: %w", err)
|
||||
}
|
||||
|
||||
// Now check parent directory exists
|
||||
dir = path.Dir(dir)
|
||||
if dir == "/" || dir == "." {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
bucket, _ := f.split(dir)
|
||||
e := f.makeBucket(ctx, bucket)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
return f.createDirectoryMarker(ctx, bucket, dir)
|
||||
}
|
||||
|
||||
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
||||
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
dir := path.Dir(remote)
|
||||
if dir == "/" || dir == "." {
|
||||
dir = ""
|
||||
}
|
||||
return f.Mkdir(ctx, dir)
|
||||
return f.makeBucket(ctx, bucket)
|
||||
}
|
||||
|
||||
// makeBucket creates the bucket if it doesn't exist
|
||||
@@ -4075,18 +3875,6 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
bucket, directory := f.split(dir)
|
||||
// Remove directory marker file
|
||||
if f.opt.DirectoryMarkers && bucket != "" && dir != "" {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: dir + "/",
|
||||
}
|
||||
fs.Debugf(o, "Removing directory marker")
|
||||
err := o.Remove(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("removing directory marker failed: %w", err)
|
||||
}
|
||||
}
|
||||
if bucket == "" || directory != "" {
|
||||
return nil
|
||||
}
|
||||
@@ -4285,7 +4073,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, errNotWithVersionAt
|
||||
}
|
||||
dstBucket, dstPath := f.split(remote)
|
||||
err := f.mkdirParent(ctx, remote)
|
||||
err := f.makeBucket(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -4912,26 +4700,22 @@ func (o *Object) headObject(ctx context.Context) (resp *s3.HeadObjectOutput, err
|
||||
Key: &bucketPath,
|
||||
VersionId: o.versionID,
|
||||
}
|
||||
return o.fs.headObject(ctx, &req)
|
||||
}
|
||||
|
||||
func (f *Fs) headObject(ctx context.Context, req *s3.HeadObjectInput) (resp *s3.HeadObjectOutput, err error) {
|
||||
if f.opt.RequesterPays {
|
||||
if o.fs.opt.RequesterPays {
|
||||
req.RequestPayer = aws.String(s3.RequestPayerRequester)
|
||||
}
|
||||
if f.opt.SSECustomerAlgorithm != "" {
|
||||
req.SSECustomerAlgorithm = &f.opt.SSECustomerAlgorithm
|
||||
if o.fs.opt.SSECustomerAlgorithm != "" {
|
||||
req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm
|
||||
}
|
||||
if f.opt.SSECustomerKey != "" {
|
||||
req.SSECustomerKey = &f.opt.SSECustomerKey
|
||||
if o.fs.opt.SSECustomerKey != "" {
|
||||
req.SSECustomerKey = &o.fs.opt.SSECustomerKey
|
||||
}
|
||||
if f.opt.SSECustomerKeyMD5 != "" {
|
||||
req.SSECustomerKeyMD5 = &f.opt.SSECustomerKeyMD5
|
||||
if o.fs.opt.SSECustomerKeyMD5 != "" {
|
||||
req.SSECustomerKeyMD5 = &o.fs.opt.SSECustomerKeyMD5
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
resp, err = f.c.HeadObjectWithContext(ctx, req)
|
||||
return f.shouldRetry(ctx, err)
|
||||
resp, err = o.fs.c.HeadObjectWithContext(ctx, &req)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.RequestFailure); ok {
|
||||
@@ -4941,9 +4725,7 @@ func (f *Fs) headObject(ctx context.Context, req *s3.HeadObjectInput) (resp *s3.
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if req.Bucket != nil {
|
||||
f.cache.MarkOK(*req.Bucket)
|
||||
}
|
||||
o.fs.cache.MarkOK(bucket)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -5180,9 +4962,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// Override the automatic decompression in the transport to
|
||||
// download compressed files as-is
|
||||
if o.fs.opt.UseAcceptEncodingGzip.Value {
|
||||
httpReq.HTTPRequest.Header.Set("Accept-Encoding", "gzip")
|
||||
}
|
||||
httpReq.HTTPRequest.Header.Set("Accept-Encoding", "gzip")
|
||||
|
||||
for _, option := range options {
|
||||
switch option.(type) {
|
||||
@@ -5298,9 +5078,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
}
|
||||
uid := cout.UploadId
|
||||
|
||||
uploadCtx, cancel := context.WithCancel(ctx)
|
||||
defer atexit.OnError(&err, func() {
|
||||
cancel()
|
||||
if o.fs.opt.LeavePartsOnError {
|
||||
return
|
||||
}
|
||||
@@ -5320,7 +5098,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
})()
|
||||
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(uploadCtx)
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
finished = false
|
||||
partsMu sync.Mutex // to protect parts
|
||||
parts []*s3.CompletedPart
|
||||
@@ -5402,7 +5180,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
uout, err := f.c.UploadPartWithContext(gCtx, uploadPartReq)
|
||||
if err != nil {
|
||||
if partNum <= int64(concurrency) {
|
||||
return f.shouldRetry(gCtx, err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
}
|
||||
// retry all chunks once have done the first batch
|
||||
return true, err
|
||||
@@ -5434,7 +5212,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
|
||||
var resp *s3.CompleteMultipartUploadOutput
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.c.CompleteMultipartUploadWithContext(uploadCtx, &s3.CompleteMultipartUploadInput{
|
||||
resp, err = f.c.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{
|
||||
Bucket: req.Bucket,
|
||||
Key: req.Key,
|
||||
MultipartUpload: &s3.CompletedMultipartUpload{
|
||||
@@ -5443,7 +5221,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
RequestPayer: req.RequestPayer,
|
||||
UploadId: uid,
|
||||
})
|
||||
return f.shouldRetry(uploadCtx, err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return wantETag, gotETag, nil, fmt.Errorf("multipart upload failed to finalise: %w", err)
|
||||
@@ -5592,12 +5370,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return errNotWithVersionAt
|
||||
}
|
||||
bucket, bucketPath := o.split()
|
||||
// Create parent dir/bucket if not saving directory marker
|
||||
if !strings.HasSuffix(o.remote, "/") {
|
||||
err := o.fs.mkdirParent(ctx, o.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err := o.fs.makeBucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
modTime := src.ModTime(ctx)
|
||||
size := src.Size()
|
||||
@@ -5920,7 +5695,7 @@ func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error)
|
||||
setMetadata("content-disposition", o.contentDisposition)
|
||||
setMetadata("content-encoding", o.contentEncoding)
|
||||
setMetadata("content-language", o.contentLanguage)
|
||||
metadata["tier"] = o.GetTier()
|
||||
setMetadata("tier", o.storageClass)
|
||||
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
@@ -6,19 +6,15 @@ import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -254,8 +250,7 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// Create an object
|
||||
const dirName = "versions"
|
||||
const fileName = dirName + "/" + "test-versions.txt"
|
||||
const fileName = "test-versions.txt"
|
||||
contents := random.String(100)
|
||||
item := fstest.NewItem(fileName, contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
@@ -285,12 +280,11 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
||||
}()
|
||||
|
||||
// Read the contents
|
||||
entries, err := f.List(ctx, dirName)
|
||||
entries, err := f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
tests := 0
|
||||
var fileNameVersion string
|
||||
for _, entry := range entries {
|
||||
t.Log(entry)
|
||||
remote := entry.Remote()
|
||||
if remote == fileName {
|
||||
t.Run("ReadCurrent", func(t *testing.T) {
|
||||
@@ -315,23 +309,6 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
||||
require.NotNil(t, o)
|
||||
assert.Equal(t, int64(100), o.Size(), o.Remote())
|
||||
})
|
||||
|
||||
// Check we can make a NewFs from that object with a version suffix
|
||||
t.Run("NewFs", func(t *testing.T) {
|
||||
newPath := bucket.Join(fs.ConfigStringFull(f), fileNameVersion)
|
||||
// Make sure --s3-versions is set in the config of the new remote
|
||||
fs.Debugf(nil, "oldPath = %q", newPath)
|
||||
lastColon := strings.LastIndex(newPath, ":")
|
||||
require.True(t, lastColon >= 0)
|
||||
newPath = newPath[:lastColon] + ",versions" + newPath[lastColon:]
|
||||
fs.Debugf(nil, "newPath = %q", newPath)
|
||||
fNew, err := cache.Get(ctx, newPath)
|
||||
// This should return pointing to a file
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
require.NotNil(t, fNew)
|
||||
// With the directory the directory above
|
||||
assert.Equal(t, dirName, path.Base(fs.ConfigStringFull(fNew)))
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("VersionAt", func(t *testing.T) {
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -21,24 +20,6 @@ func TestIntegration(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestIntegration2(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("skipping as -remote is set")
|
||||
}
|
||||
name := "TestS3"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "directory_markers", Value: "true"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
|
||||
// URL parameters that need to be added to the signature
|
||||
var s3ParamsToSign = map[string]struct{}{
|
||||
"delete": {},
|
||||
"acl": {},
|
||||
"location": {},
|
||||
"logging": {},
|
||||
|
||||
@@ -953,9 +953,11 @@ func (f *Fs) emptyLibraryTrash(ctx context.Context, libraryID string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// === API v2 from the official documentation, but that have been replaced by the much better v2.1 (undocumented as of Apr 2020)
|
||||
// === getDirectoryEntriesAPIv2 is needed to keep compatibility with seafile v6,
|
||||
// === the others can probably be removed after the API v2.1 is documented
|
||||
|
||||
func (f *Fs) getDirectoryEntriesAPIv2(ctx context.Context, libraryID, dirPath string) ([]api.DirEntry, error) {
|
||||
// API v2 from the official documentation, but that have been replaced by the much better v2.1 (undocumented as of Apr 2020)
|
||||
// getDirectoryEntriesAPIv2 is needed to keep compatibility with seafile v6.
|
||||
// API Documentation
|
||||
// https://download.seafile.com/published/web-api/v2.1/directories.md#user-content-List%20Items%20in%20Directory
|
||||
if libraryID == "" {
|
||||
@@ -999,3 +1001,95 @@ func (f *Fs) getDirectoryEntriesAPIv2(ctx context.Context, libraryID, dirPath st
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (f *Fs) copyFileAPIv2(ctx context.Context, srcLibraryID, srcPath, dstLibraryID, dstPath string) (*api.FileInfo, error) {
|
||||
// API Documentation
|
||||
// https://download.seafile.com/published/web-api/v2.1/file.md#user-content-Copy%20File
|
||||
if srcLibraryID == "" || dstLibraryID == "" {
|
||||
return nil, errors.New("libraryID and/or file path argument(s) missing")
|
||||
}
|
||||
srcPath = path.Join("/", srcPath)
|
||||
dstPath = path.Join("/", dstPath)
|
||||
|
||||
// Older API does not seem to accept JSON input here either
|
||||
postParameters := url.Values{
|
||||
"operation": {"copy"},
|
||||
"dst_repo": {dstLibraryID},
|
||||
"dst_dir": {f.opt.Enc.FromStandardPath(dstPath)},
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: APIv20 + srcLibraryID + "/file/",
|
||||
Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(srcPath)}},
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
Body: bytes.NewBuffer([]byte(postParameters.Encode())),
|
||||
}
|
||||
result := &api.FileInfo{}
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
if resp.StatusCode == 401 || resp.StatusCode == 403 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("failed to copy file %s:'%s' to %s:'%s': %w", srcLibraryID, srcPath, dstLibraryID, dstPath, err)
|
||||
}
|
||||
err = rest.DecodeJSON(resp, &result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.decodeFileInfo(result), nil
|
||||
}
|
||||
|
||||
func (f *Fs) renameFileAPIv2(ctx context.Context, libraryID, filePath, newname string) error {
|
||||
// API Documentation
|
||||
// https://download.seafile.com/published/web-api/v2.1/file.md#user-content-Rename%20File
|
||||
if libraryID == "" || newname == "" {
|
||||
return errors.New("libraryID and/or file path argument(s) missing")
|
||||
}
|
||||
filePath = path.Join("/", filePath)
|
||||
|
||||
// No luck with JSON input with the older api2
|
||||
postParameters := url.Values{
|
||||
"operation": {"rename"},
|
||||
"reloaddir": {"true"}, // This is an undocumented trick to avoid an http code 301 response (found in https://github.com/haiwen/seahub/blob/master/seahub/api2/views.py)
|
||||
"newname": {f.opt.Enc.FromStandardName(newname)},
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: APIv20 + libraryID + "/file/",
|
||||
Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(filePath)}},
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
Body: bytes.NewBuffer([]byte(postParameters.Encode())),
|
||||
NoRedirect: true,
|
||||
NoResponse: true,
|
||||
}
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
if resp.StatusCode == 301 {
|
||||
// This is the normal response from the server
|
||||
return nil
|
||||
}
|
||||
if resp.StatusCode == 401 || resp.StatusCode == 403 {
|
||||
return fs.ErrorPermissionDenied
|
||||
}
|
||||
if resp.StatusCode == 404 {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("failed to rename file: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
iofs "io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
@@ -324,7 +323,7 @@ Pass multiple variables space separated, eg
|
||||
|
||||
VAR1=value VAR2=value
|
||||
|
||||
and pass variables with spaces in quotes, eg
|
||||
and pass variables with spaces in in quotes, eg
|
||||
|
||||
"VAR3=value with space" "VAR4=value with space" VAR5=nospacehere
|
||||
|
||||
@@ -368,20 +367,6 @@ At least one must match with server configuration. This can be checked for examp
|
||||
Example:
|
||||
|
||||
umac-64-etm@openssh.com umac-128-etm@openssh.com hmac-sha2-256-etm@openssh.com
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "host_key_algorithms",
|
||||
Default: fs.SpaceSepList{},
|
||||
Help: `Space separated list of host key algorithms, ordered by preference.
|
||||
|
||||
At least one must match with server configuration. This can be checked for example using ssh -Q HostKeyAlgorithms.
|
||||
|
||||
Note: This can affect the outcome of key negotiation with the server even if server host key validation is not enabled.
|
||||
|
||||
Example:
|
||||
|
||||
ssh-ed25519 ssh-rsa ssh-dss
|
||||
`,
|
||||
Advanced: true,
|
||||
}},
|
||||
@@ -422,7 +407,6 @@ type Options struct {
|
||||
Ciphers fs.SpaceSepList `config:"ciphers"`
|
||||
KeyExchange fs.SpaceSepList `config:"key_exchange"`
|
||||
MACs fs.SpaceSepList `config:"macs"`
|
||||
HostKeyAlgorithms fs.SpaceSepList `config:"host_key_algorithms"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
@@ -755,10 +739,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
ClientVersion: "SSH-2.0-" + f.ci.UserAgent,
|
||||
}
|
||||
|
||||
if len(opt.HostKeyAlgorithms) != 0 {
|
||||
sshConfig.HostKeyAlgorithms = []string(opt.HostKeyAlgorithms)
|
||||
}
|
||||
|
||||
if opt.KnownHostsFile != "" {
|
||||
hostcallback, err := knownhosts.New(env.ShellExpand(opt.KnownHostsFile))
|
||||
if err != nil {
|
||||
@@ -802,32 +782,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, fmt.Errorf("couldn't read ssh agent signers: %w", err)
|
||||
}
|
||||
if keyFile != "" {
|
||||
// If `opt.KeyUseAgent` is false, then it's expected that `opt.KeyFile` contains the private key
|
||||
// and `${opt.KeyFile}.pub` contains the public key.
|
||||
//
|
||||
// If `opt.KeyUseAgent` is true, then it's expected that `opt.KeyFile` contains the public key.
|
||||
// This is how it works with openssh; the `IdentityFile` in openssh config points to the public key.
|
||||
// It's not necessary to specify the public key explicitly when using ssh-agent, since openssh and rclone
|
||||
// will try all the keys they find in the ssh-agent until they find one that works. But just like
|
||||
// `IdentityFile` is used in openssh config to limit the search to one specific key, so does
|
||||
// `opt.KeyFile` in rclone config limit the search to that specific key.
|
||||
//
|
||||
// However, previous versions of rclone would always expect to find the public key in
|
||||
// `${opt.KeyFile}.pub` even if `opt.KeyUseAgent` was true. So for the sake of backward compatibility
|
||||
// we still first attempt to read the public key from `${opt.KeyFile}.pub`. But if it fails with
|
||||
// an `fs.ErrNotExist` then we also try to read the public key from `opt.KeyFile`.
|
||||
pubBytes, err := os.ReadFile(keyFile + ".pub")
|
||||
if err != nil {
|
||||
if errors.Is(err, iofs.ErrNotExist) && opt.KeyUseAgent {
|
||||
pubBytes, err = os.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read public key file: %w", err)
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("failed to read public key file: %w", err)
|
||||
}
|
||||
return nil, fmt.Errorf("failed to read public key file: %w", err)
|
||||
}
|
||||
|
||||
pub, _, _, _, err := ssh.ParseAuthorizedKey(pubBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse public key file: %w", err)
|
||||
@@ -849,8 +807,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
}
|
||||
|
||||
// Load key file as a private key, if specified. This is only needed when not using an ssh agent.
|
||||
if (keyFile != "" && !opt.KeyUseAgent) || opt.KeyPem != "" {
|
||||
// Load key file if specified
|
||||
if keyFile != "" || opt.KeyPem != "" {
|
||||
var key []byte
|
||||
if opt.KeyPem == "" {
|
||||
key, err = os.ReadFile(keyFile)
|
||||
@@ -994,7 +952,6 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
SlowHash: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
// Make a connection and pool it to return errors early
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
@@ -1066,7 +1023,7 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
}
|
||||
}
|
||||
f.putSftpConnection(&c, err)
|
||||
if root != "" && !strings.HasSuffix(root, "/") {
|
||||
if root != "" {
|
||||
// Check to see if the root is actually an existing file,
|
||||
// and if so change the filesystem root to its parent directory.
|
||||
oldAbsRoot := f.absRoot
|
||||
@@ -1169,6 +1126,13 @@ func (f *Fs) dirExists(ctx context.Context, dir string) (bool, error) {
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
root := path.Join(f.absRoot, dir)
|
||||
ok, err := f.dirExists(ctx, root)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("List failed: %w", err)
|
||||
}
|
||||
if !ok {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
sftpDir := root
|
||||
if sftpDir == "" {
|
||||
sftpDir = "."
|
||||
@@ -1180,9 +1144,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
infos, err := c.sftpClient.ReadDir(sftpDir)
|
||||
f.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return nil, fmt.Errorf("error listing %q: %w", dir, err)
|
||||
}
|
||||
for _, info := range infos {
|
||||
@@ -1326,17 +1287,10 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Move: %w", err)
|
||||
}
|
||||
srcPath, dstPath := srcObj.path(), path.Join(f.absRoot, remote)
|
||||
if _, ok := c.sftpClient.HasExtension("posix-rename@openssh.com"); ok {
|
||||
err = c.sftpClient.PosixRename(srcPath, dstPath)
|
||||
} else {
|
||||
// If haven't got PosixRename then remove source first before renaming
|
||||
err = c.sftpClient.Remove(dstPath)
|
||||
if err != nil && !errors.Is(err, iofs.ErrNotExist) {
|
||||
fs.Errorf(f, "Move: Failed to remove existing file %q: %v", dstPath, err)
|
||||
}
|
||||
err = c.sftpClient.Rename(srcPath, dstPath)
|
||||
}
|
||||
err = c.sftpClient.Rename(
|
||||
srcObj.path(),
|
||||
path.Join(f.absRoot, remote),
|
||||
)
|
||||
f.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Move Rename failed: %w", err)
|
||||
|
||||
@@ -775,13 +775,8 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
}
|
||||
}
|
||||
|
||||
// FIXMEPutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
//
|
||||
// PutStream no longer appears to work - the streamed uploads need the
|
||||
// size specified at the start otherwise we get this error:
|
||||
//
|
||||
// upload failed: file size does not match (-2)
|
||||
func (f *Fs) FIXMEPutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
@@ -1458,12 +1453,12 @@ func (o *Object) ID() string {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
// _ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
|
||||
@@ -34,10 +34,9 @@ func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
|
||||
|
||||
d := &smb2.Dialer{
|
||||
Initiator: &smb2.NTLMInitiator{
|
||||
User: f.opt.User,
|
||||
Password: pass,
|
||||
Domain: f.opt.Domain,
|
||||
TargetSPN: f.opt.SPN,
|
||||
User: f.opt.User,
|
||||
Password: pass,
|
||||
Domain: f.opt.Domain,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -106,9 +105,9 @@ func (f *Fs) getSessions() int32 {
|
||||
func (f *Fs) newConnection(ctx context.Context, share string) (c *conn, err error) {
|
||||
// As we are pooling these connections we need to decouple
|
||||
// them from the current context
|
||||
bgCtx := context.Background()
|
||||
ctx = context.Background()
|
||||
|
||||
c, err = f.dial(bgCtx, "tcp", f.opt.Host+":"+f.opt.Port)
|
||||
c, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't connect SMB: %w", err)
|
||||
}
|
||||
@@ -119,7 +118,7 @@ func (f *Fs) newConnection(ctx context.Context, share string) (c *conn, err erro
|
||||
_ = c.smbSession.Logoff()
|
||||
return nil, fmt.Errorf("couldn't initialize SMB: %w", err)
|
||||
}
|
||||
c.smbShare = c.smbShare.WithContext(bgCtx)
|
||||
c.smbShare = c.smbShare.WithContext(ctx)
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
@@ -60,17 +60,6 @@ func init() {
|
||||
Name: "domain",
|
||||
Help: "Domain name for NTLM authentication.",
|
||||
Default: "WORKGROUP",
|
||||
}, {
|
||||
Name: "spn",
|
||||
Help: `Service principal name.
|
||||
|
||||
Rclone presents this name to the server. Some servers use this as further
|
||||
authentication, and it often needs to be set for clusters. For example:
|
||||
|
||||
cifs/remotehost:1020
|
||||
|
||||
Leave blank if not sure.
|
||||
`,
|
||||
}, {
|
||||
Name: "idle_timeout",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
@@ -120,7 +109,6 @@ type Options struct {
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Domain string `config:"domain"`
|
||||
SPN string `config:"spn"`
|
||||
HideSpecial bool `config:"hide_special_share"`
|
||||
CaseInsensitive bool `config:"case_insensitive"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
|
||||
@@ -528,11 +528,7 @@ func (f *Fs) NewObject(ctx context.Context, relative string) (_ fs.Object, err e
|
||||
// May create the object even if it returns an error - if so will return the
|
||||
// object and the error, otherwise will return nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (_ fs.Object, err error) {
|
||||
return f.put(ctx, in, src, src.Remote(), options...)
|
||||
}
|
||||
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options ...fs.OpenOption) (_ fs.Object, err error) {
|
||||
fs.Debugf(f, "cp input ./%s # %+v %d", remote, options, src.Size())
|
||||
fs.Debugf(f, "cp input ./%s # %+v %d", src.Remote(), options, src.Size())
|
||||
|
||||
// Reject options we don't support.
|
||||
for _, option := range options {
|
||||
@@ -543,7 +539,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
|
||||
}
|
||||
}
|
||||
|
||||
bucketName, bucketPath := f.absolute(remote)
|
||||
bucketName, bucketPath := f.absolute(src.Remote())
|
||||
|
||||
upload, err := f.project.UploadObject(ctx, bucketName, bucketPath, nil)
|
||||
if err != nil {
|
||||
@@ -553,7 +549,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
|
||||
if err != nil {
|
||||
aerr := upload.Abort()
|
||||
if aerr != nil && !errors.Is(aerr, uplink.ErrUploadDone) {
|
||||
fs.Errorf(f, "cp input ./%s %+v: %+v", remote, options, aerr)
|
||||
fs.Errorf(f, "cp input ./%s %+v: %+v", src.Remote(), options, aerr)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -578,7 +574,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
|
||||
}
|
||||
|
||||
err = fserrors.RetryError(err)
|
||||
fs.Errorf(f, "cp input ./%s %+v: %+v\n", remote, options, err)
|
||||
fs.Errorf(f, "cp input ./%s %+v: %+v\n", src.Remote(), options, err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
@@ -593,19 +589,11 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
|
||||
return nil, err
|
||||
}
|
||||
err = fserrors.RetryError(errors.New("bucket was not available, now created, the upload must be retried"))
|
||||
} else if errors.Is(err, uplink.ErrTooManyRequests) {
|
||||
// Storj has a rate limit of 1 per second of uploading to the same file.
|
||||
// This produces ErrTooManyRequests here, so we wait 1 second and retry.
|
||||
//
|
||||
// See: https://github.com/storj/uplink/issues/149
|
||||
fs.Debugf(f, "uploading too fast - sleeping for 1 second: %v", err)
|
||||
time.Sleep(time.Second)
|
||||
err = fserrors.RetryError(err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newObjectFromUplink(f, remote, upload.Info()), nil
|
||||
return newObjectFromUplink(f, src.Remote(), upload.Info()), nil
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate
|
||||
|
||||
@@ -176,9 +176,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (_ io.ReadC
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
||||
// return an error or update the object properly (rather than e.g. calling panic).
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
fs.Debugf(o, "cp input ./%s %+v", o.Remote(), options)
|
||||
fs.Debugf(o, "cp input ./%s %+v", src.Remote(), options)
|
||||
|
||||
oNew, err := o.fs.put(ctx, in, src, o.Remote(), options...)
|
||||
oNew, err := o.fs.Put(ctx, in, src, options...)
|
||||
|
||||
if err == nil {
|
||||
*o = *(oNew.(*Object))
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -100,7 +101,7 @@ but other operations such as Remove and Copy will fail.
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "swift",
|
||||
Description: "OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)",
|
||||
Description: "OpenStack Swift (Rackspace Cloud Files, Memset Memstore, OVH)",
|
||||
NewFs: NewFs,
|
||||
Options: append([]fs.Option{{
|
||||
Name: "env_auth",
|
||||
@@ -142,9 +143,6 @@ func init() {
|
||||
}, {
|
||||
Value: "https://auth.cloud.ovh.net/v3",
|
||||
Help: "OVH",
|
||||
}, {
|
||||
Value: "https://authenticate.ain.net",
|
||||
Help: "Blomp Cloud Storage",
|
||||
}},
|
||||
}, {
|
||||
Name: "user_id",
|
||||
@@ -1330,6 +1328,23 @@ func (o *Object) removeSegmentsLargeObject(ctx context.Context, containerSegment
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) getSegmentsDlo(ctx context.Context) (segmentsContainer string, prefix string, err error) {
|
||||
if err = o.readMetaData(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
dirManifest := o.headers["X-Object-Manifest"]
|
||||
dirManifest, err = url.PathUnescape(dirManifest)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
delimiter := strings.Index(dirManifest, "/")
|
||||
if len(dirManifest) == 0 || delimiter < 0 {
|
||||
err = errors.New("missing or wrong structure of manifest of Dynamic large object")
|
||||
return
|
||||
}
|
||||
return dirManifest[:delimiter], dirManifest[delimiter+1:], nil
|
||||
}
|
||||
|
||||
// urlEncode encodes a string so that it is a valid URL
|
||||
//
|
||||
// We don't use any of Go's standard methods as we need `/` not
|
||||
@@ -1561,10 +1576,6 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
// Remove file/manifest first
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectDelete(ctx, container, containerPath)
|
||||
if err == swift.ObjectNotFound {
|
||||
fs.Errorf(o, "Dangling object - ignoring: %v", err)
|
||||
err = nil
|
||||
}
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -49,7 +49,8 @@ func (e Errors) Error() string {
|
||||
|
||||
if len(e) == 0 {
|
||||
buf.WriteString("no error")
|
||||
} else if len(e) == 1 {
|
||||
}
|
||||
if len(e) == 1 {
|
||||
buf.WriteString("1 error: ")
|
||||
} else {
|
||||
fmt.Fprintf(&buf, "%d errors: ", len(e))
|
||||
@@ -60,17 +61,8 @@ func (e Errors) Error() string {
|
||||
buf.WriteString("; ")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
buf.WriteString(err.Error())
|
||||
} else {
|
||||
buf.WriteString("nil error")
|
||||
}
|
||||
buf.WriteString(err.Error())
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Unwrap returns the wrapped errors
|
||||
func (e Errors) Unwrap() []error {
|
||||
return e
|
||||
}
|
||||
|
||||
@@ -1,94 +0,0 @@
|
||||
//go:build go1.20
|
||||
// +build go1.20
|
||||
|
||||
package union
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var (
|
||||
err1 = errors.New("Error 1")
|
||||
err2 = errors.New("Error 2")
|
||||
err3 = errors.New("Error 3")
|
||||
)
|
||||
|
||||
func TestErrorsMap(t *testing.T) {
|
||||
es := Errors{
|
||||
nil,
|
||||
err1,
|
||||
err2,
|
||||
}
|
||||
want := Errors{
|
||||
err2,
|
||||
}
|
||||
got := es.Map(func(e error) error {
|
||||
if e == err1 {
|
||||
return nil
|
||||
}
|
||||
return e
|
||||
})
|
||||
assert.Equal(t, want, got)
|
||||
}
|
||||
|
||||
func TestErrorsFilterNil(t *testing.T) {
|
||||
es := Errors{
|
||||
nil,
|
||||
err1,
|
||||
nil,
|
||||
err2,
|
||||
nil,
|
||||
}
|
||||
want := Errors{
|
||||
err1,
|
||||
err2,
|
||||
}
|
||||
got := es.FilterNil()
|
||||
assert.Equal(t, want, got)
|
||||
}
|
||||
|
||||
func TestErrorsErr(t *testing.T) {
|
||||
// Check not all nil case
|
||||
es := Errors{
|
||||
nil,
|
||||
err1,
|
||||
nil,
|
||||
err2,
|
||||
nil,
|
||||
}
|
||||
want := Errors{
|
||||
err1,
|
||||
err2,
|
||||
}
|
||||
got := es.Err()
|
||||
|
||||
// Check all nil case
|
||||
assert.Equal(t, want, got)
|
||||
es = Errors{
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
}
|
||||
assert.Nil(t, es.Err())
|
||||
}
|
||||
|
||||
func TestErrorsError(t *testing.T) {
|
||||
assert.Equal(t, "no error", Errors{}.Error())
|
||||
assert.Equal(t, "1 error: Error 1", Errors{err1}.Error())
|
||||
assert.Equal(t, "1 error: nil error", Errors{nil}.Error())
|
||||
assert.Equal(t, "2 errors: Error 1; Error 2", Errors{err1, err2}.Error())
|
||||
}
|
||||
|
||||
func TestErrorsUnwrap(t *testing.T) {
|
||||
es := Errors{
|
||||
err1,
|
||||
err2,
|
||||
}
|
||||
assert.Equal(t, []error{err1, err2}, es.Unwrap())
|
||||
assert.True(t, errors.Is(es, err1))
|
||||
assert.True(t, errors.Is(es, err2))
|
||||
assert.False(t, errors.Is(es, err3))
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package policy
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -108,7 +109,9 @@ func findEntry(ctx context.Context, f fs.Fs, remote string) fs.DirEntry {
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return fs.NewDir("", time.Time{})
|
||||
// random modtime for root
|
||||
randomNow := time.Unix(time.Now().Unix()-rand.Int63n(10000), 0)
|
||||
return fs.NewDir("", randomNow)
|
||||
}
|
||||
found := false
|
||||
for _, e := range entries {
|
||||
|
||||
@@ -756,6 +756,14 @@ func (f *Fs) create(ctx context.Context, path string) ([]*upstream.Fs, error) {
|
||||
return f.createPolicy.Create(ctx, f.upstreams, path)
|
||||
}
|
||||
|
||||
func (f *Fs) createEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
return f.createPolicy.CreateEntries(entries...)
|
||||
}
|
||||
|
||||
func (f *Fs) search(ctx context.Context, path string) (*upstream.Fs, error) {
|
||||
return f.searchPolicy.Search(ctx, f.upstreams, path)
|
||||
}
|
||||
|
||||
func (f *Fs) searchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
return f.searchPolicy.SearchEntries(entries...)
|
||||
}
|
||||
@@ -801,24 +809,6 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
return errs.Err()
|
||||
}
|
||||
|
||||
// CleanUp the trash in the Fs
|
||||
//
|
||||
// Implement this if you have a way of emptying the trash or
|
||||
// otherwise cleaning up old versions of files.
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
errs := Errors(make([]error, len(f.upstreams)))
|
||||
multithread(len(f.upstreams), func(i int) {
|
||||
u := f.upstreams[i]
|
||||
if do := u.Features().CleanUp; do != nil {
|
||||
err := do(ctx)
|
||||
if err != nil {
|
||||
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
|
||||
}
|
||||
}
|
||||
})
|
||||
return errs.Err()
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path.
|
||||
//
|
||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||
@@ -902,7 +892,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
canMove, slowHash := true, false
|
||||
for _, f := range upstreams {
|
||||
@@ -933,9 +922,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
}
|
||||
|
||||
// show that we wrap other backends
|
||||
features.Overlay = true
|
||||
|
||||
f.features = features
|
||||
|
||||
// Get common intersection of hashes
|
||||
@@ -982,5 +968,4 @@ var (
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
)
|
||||
|
||||
@@ -11,11 +11,6 @@ import (
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
var (
|
||||
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "PublicLink", "PutUnchecked", "MergeDirs", "OpenWriterAt"}
|
||||
unimplementableObjectMethods = []string{}
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
@@ -23,8 +18,8 @@ func TestIntegration(t *testing.T) {
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -44,8 +39,8 @@ func TestStandard(t *testing.T) {
|
||||
{Name: name, Key: "create_policy", Value: "epmfs"},
|
||||
{Name: name, Key: "search_policy", Value: "ff"},
|
||||
},
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
@@ -66,8 +61,8 @@ func TestRO(t *testing.T) {
|
||||
{Name: name, Key: "create_policy", Value: "epmfs"},
|
||||
{Name: name, Key: "search_policy", Value: "ff"},
|
||||
},
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
@@ -88,8 +83,8 @@ func TestNC(t *testing.T) {
|
||||
{Name: name, Key: "create_policy", Value: "epmfs"},
|
||||
{Name: name, Key: "search_policy", Value: "ff"},
|
||||
},
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
@@ -110,8 +105,8 @@ func TestPolicy1(t *testing.T) {
|
||||
{Name: name, Key: "create_policy", Value: "lus"},
|
||||
{Name: name, Key: "search_policy", Value: "all"},
|
||||
},
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
@@ -132,8 +127,8 @@ func TestPolicy2(t *testing.T) {
|
||||
{Name: name, Key: "create_policy", Value: "rand"},
|
||||
{Name: name, Key: "search_policy", Value: "ff"},
|
||||
},
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
@@ -154,8 +149,8 @@ func TestPolicy3(t *testing.T) {
|
||||
{Name: name, Key: "create_policy", Value: "all"},
|
||||
{Name: name, Key: "search_policy", Value: "all"},
|
||||
},
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -45,11 +45,6 @@ func init() {
|
||||
Options: []fs.Option{{
|
||||
Help: "Your access token.\n\nGet it from https://uptobox.com/my_account.",
|
||||
Name: "access_token",
|
||||
}, {
|
||||
Help: "Set to make uploaded files private",
|
||||
Name: "private",
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -68,7 +63,6 @@ func init() {
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
AccessToken string `config:"access_token"`
|
||||
Private bool `config:"private"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -81,7 +75,6 @@ type Fs struct {
|
||||
srv *rest.Client
|
||||
pacer *fs.Pacer
|
||||
IDRegexp *regexp.Regexp
|
||||
public string // "0" to make objects private
|
||||
}
|
||||
|
||||
// Object represents an Uptobox object
|
||||
@@ -218,13 +211,10 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe
|
||||
CanHaveEmptyDirectories: true,
|
||||
ReadMimeType: false,
|
||||
}).Fill(ctx, f)
|
||||
if f.opt.Private {
|
||||
f.public = "0"
|
||||
}
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
f.srv = rest.NewClient(client).SetRoot(apiBaseURL)
|
||||
f.IDRegexp = regexp.MustCompile(`^https://uptobox\.com/([a-zA-Z0-9]+)`)
|
||||
f.IDRegexp = regexp.MustCompile(`https://uptobox\.com/([a-zA-Z0-9]+)`)
|
||||
|
||||
_, err = f.readMetaDataForPath(ctx, f.dirPath(""), &api.MetadataRequestOptions{Limit: 10})
|
||||
if err != nil {
|
||||
@@ -482,11 +472,11 @@ func (f *Fs) updateFileInformation(ctx context.Context, update *api.UpdateFileIn
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) error {
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if size > int64(200e9) { // max size 200GB
|
||||
return errors.New("file too big, can't upload")
|
||||
return nil, errors.New("file too big, can't upload")
|
||||
} else if size == 0 {
|
||||
return fs.ErrorCantUploadEmptyFiles
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
// yes it does take 4 requests if we're uploading to root and 6+ if we're uploading to any subdir :(
|
||||
|
||||
@@ -504,19 +494,19 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if info.StatusCode != 0 {
|
||||
return fmt.Errorf("putUnchecked api error: %d - %s", info.StatusCode, info.Message)
|
||||
return nil, fmt.Errorf("putUnchecked api error: %d - %s", info.StatusCode, info.Message)
|
||||
}
|
||||
// we need to have a safe name for the upload to work
|
||||
tmpName := "rcloneTemp" + random.String(8)
|
||||
upload, err := f.uploadFile(ctx, in, size, tmpName, info.Data.UploadLink, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if len(upload.Files) != 1 {
|
||||
return errors.New("upload unexpected response")
|
||||
return nil, errors.New("upload unexpected response")
|
||||
}
|
||||
match := f.IDRegexp.FindStringSubmatch(upload.Files[0].URL)
|
||||
|
||||
@@ -531,27 +521,23 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
||||
// this might need some more error handling. if any of the following requests fail
|
||||
// we'll leave an orphaned temporary file floating around somewhere
|
||||
// they rarely fail though
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = f.move(ctx, fullBase, match[1])
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// rename file to final name
|
||||
err = f.updateFileInformation(ctx, &api.UpdateFileInformation{
|
||||
Token: f.opt.AccessToken,
|
||||
FileCode: match[1],
|
||||
NewName: f.opt.Enc.FromStandardName(leaf),
|
||||
Public: f.public,
|
||||
})
|
||||
err = f.updateFileInformation(ctx, &api.UpdateFileInformation{Token: f.opt.AccessToken, FileCode: match[1], NewName: f.opt.Enc.FromStandardName(leaf)})
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil
|
||||
// finally fetch the file object.
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
@@ -581,11 +567,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
err := f.putUnchecked(ctx, in, src.Remote(), src.Size(), options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.NewObject(ctx, src.Remote())
|
||||
return f.putUnchecked(ctx, in, src.Remote(), src.Size(), options...)
|
||||
}
|
||||
|
||||
// CreateDir dir creates a directory with the given parent path
|
||||
@@ -678,7 +660,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(info.Data.Folders) > 0 || len(info.Data.Files) > 0 {
|
||||
if info.Data.CurrentFolder.FileCount > 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
|
||||
@@ -714,12 +696,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// rename to final name if we need to
|
||||
if needRename {
|
||||
err := f.updateFileInformation(ctx, &api.UpdateFileInformation{
|
||||
Token: f.opt.AccessToken,
|
||||
FileCode: srcObj.code,
|
||||
NewName: f.opt.Enc.FromStandardName(dstLeaf),
|
||||
Public: f.public,
|
||||
})
|
||||
err := f.updateFileInformation(ctx, &api.UpdateFileInformation{Token: f.opt.AccessToken, FileCode: srcObj.code, NewName: f.opt.Enc.FromStandardName(dstLeaf)})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move: failed final rename: %w", err)
|
||||
}
|
||||
@@ -911,12 +888,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
if needRename {
|
||||
err := f.updateFileInformation(ctx, &api.UpdateFileInformation{
|
||||
Token: f.opt.AccessToken,
|
||||
FileCode: newObj.(*Object).code,
|
||||
NewName: f.opt.Enc.FromStandardName(dstLeaf),
|
||||
Public: f.public,
|
||||
})
|
||||
err := f.updateFileInformation(ctx, &api.UpdateFileInformation{Token: f.opt.AccessToken, FileCode: newObj.(*Object).code, NewName: f.opt.Enc.FromStandardName(dstLeaf)})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy: failed final rename: %w", err)
|
||||
}
|
||||
@@ -951,8 +923,7 @@ func (o *Object) Remote() string {
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
ci := fs.GetConfig(ctx)
|
||||
return time.Time(ci.DefaultTime)
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
@@ -1029,7 +1000,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// upload with new size but old name
|
||||
err := o.fs.putUnchecked(ctx, in, o.Remote(), src.Size(), options...)
|
||||
info, err := o.fs.putUnchecked(ctx, in, o.Remote(), src.Size(), options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1040,12 +1011,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return fmt.Errorf("failed to remove old version: %w", err)
|
||||
}
|
||||
|
||||
// Fetch new object after deleting the duplicate
|
||||
info, err := o.fs.NewObject(ctx, o.Remote())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Replace guts of old object with new one
|
||||
*o = *info.(*Object)
|
||||
|
||||
|
||||
@@ -75,7 +75,6 @@ type Prop struct {
|
||||
Size int64 `xml:"DAV: prop>getcontentlength,omitempty"`
|
||||
Modified Time `xml:"DAV: prop>getlastmodified,omitempty"`
|
||||
Checksums []string `xml:"prop>checksums>checksum,omitempty"`
|
||||
MESha1Hex *string `xml:"ME: prop>sha1hex,omitempty"` // Fastmail-specific sha1 checksum
|
||||
}
|
||||
|
||||
// Parse a status of the form "HTTP/1.1 200 OK" or "HTTP/1.1 200"
|
||||
@@ -103,27 +102,22 @@ func (p *Prop) StatusOK() bool {
|
||||
|
||||
// Hashes returns a map of all checksums - may be nil
|
||||
func (p *Prop) Hashes() (hashes map[hash.Type]string) {
|
||||
if len(p.Checksums) > 0 {
|
||||
hashes = make(map[hash.Type]string)
|
||||
for _, checksums := range p.Checksums {
|
||||
checksums = strings.ToLower(checksums)
|
||||
for _, checksum := range strings.Split(checksums, " ") {
|
||||
switch {
|
||||
case strings.HasPrefix(checksum, "sha1:"):
|
||||
hashes[hash.SHA1] = checksum[5:]
|
||||
case strings.HasPrefix(checksum, "md5:"):
|
||||
hashes[hash.MD5] = checksum[4:]
|
||||
}
|
||||
}
|
||||
}
|
||||
return hashes
|
||||
} else if p.MESha1Hex != nil {
|
||||
hashes = make(map[hash.Type]string)
|
||||
hashes[hash.SHA1] = *p.MESha1Hex
|
||||
return hashes
|
||||
} else {
|
||||
if len(p.Checksums) == 0 {
|
||||
return nil
|
||||
}
|
||||
hashes = make(map[hash.Type]string)
|
||||
for _, checksums := range p.Checksums {
|
||||
checksums = strings.ToLower(checksums)
|
||||
for _, checksum := range strings.Split(checksums, " ") {
|
||||
switch {
|
||||
case strings.HasPrefix(checksum, "sha1:"):
|
||||
hashes[hash.SHA1] = checksum[5:]
|
||||
case strings.HasPrefix(checksum, "md5:"):
|
||||
hashes[hash.MD5] = checksum[4:]
|
||||
}
|
||||
}
|
||||
}
|
||||
return hashes
|
||||
}
|
||||
|
||||
// PropValue is a tagged name and value
|
||||
|
||||
@@ -1,215 +0,0 @@
|
||||
package webdav
|
||||
|
||||
/*
|
||||
chunked update for Nextcloud
|
||||
see https://docs.nextcloud.com/server/20/developer_manual/client_apis/WebDAV/chunking.html
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
func (f *Fs) shouldRetryChunkMerge(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
// Not found. Can be returned by NextCloud when merging chunks of an upload.
|
||||
if resp != nil && resp.StatusCode == 404 {
|
||||
return true, err
|
||||
}
|
||||
|
||||
// 423 LOCKED
|
||||
if resp != nil && resp.StatusCode == 423 {
|
||||
return false, fmt.Errorf("merging the uploaded chunks failed with 423 LOCKED. This usually happens when the chunks merging is still in progress on NextCloud, but it may also indicate a failed transfer: %w", err)
|
||||
}
|
||||
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
}
|
||||
|
||||
// set the chunk size for testing
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) getChunksUploadURL() string {
|
||||
return strings.Replace(f.endpointURL, "/dav/files/", "/dav/uploads/", 1)
|
||||
}
|
||||
|
||||
func (o *Object) getChunksUploadDir() (string, error) {
|
||||
hasher := md5.New()
|
||||
_, err := hasher.Write([]byte(o.filePath()))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("chunked upload couldn't hash URL: %w", err)
|
||||
}
|
||||
uploadDir := "rclone-chunked-upload-" + hex.EncodeToString(hasher.Sum(nil))
|
||||
return uploadDir, nil
|
||||
}
|
||||
|
||||
func (f *Fs) verifyChunkConfig() error {
|
||||
if f.opt.ChunkSize != 0 && !validateNextCloudChunkedURL.MatchString(f.endpointURL) {
|
||||
return errors.New("chunked upload with nextcloud must use /dav/files/USER endpoint not /webdav")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) shouldUseChunkedUpload(src fs.ObjectInfo) bool {
|
||||
return o.fs.canChunk && o.fs.opt.ChunkSize > 0 && src.Size() > int64(o.fs.opt.ChunkSize)
|
||||
}
|
||||
|
||||
func (o *Object) updateChunked(ctx context.Context, in0 io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
var uploadDir string
|
||||
|
||||
// see https://docs.nextcloud.com/server/24/developer_manual/client_apis/WebDAV/chunking.html#starting-a-chunked-upload
|
||||
uploadDir, err = o.createChunksUploadDirectory(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
partObj := &Object{
|
||||
fs: o.fs,
|
||||
}
|
||||
|
||||
// see https://docs.nextcloud.com/server/24/developer_manual/client_apis/WebDAV/chunking.html#uploading-chunks
|
||||
err = o.uploadChunks(ctx, in0, src.Size(), partObj, uploadDir, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// see https://docs.nextcloud.com/server/24/developer_manual/client_apis/WebDAV/chunking.html#assembling-the-chunks
|
||||
err = o.mergeChunks(ctx, uploadDir, options, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) uploadChunks(ctx context.Context, in0 io.Reader, size int64, partObj *Object, uploadDir string, options []fs.OpenOption) error {
|
||||
chunkSize := int64(partObj.fs.opt.ChunkSize)
|
||||
|
||||
// TODO: upload chunks in parallel for faster transfer speeds
|
||||
for offset := int64(0); offset < size; offset += chunkSize {
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
contentLength := chunkSize
|
||||
|
||||
// Last chunk may be smaller
|
||||
if size-offset < contentLength {
|
||||
contentLength = size - offset
|
||||
}
|
||||
|
||||
endOffset := offset + contentLength - 1
|
||||
|
||||
partObj.remote = fmt.Sprintf("%s/%015d-%015d", uploadDir, offset, endOffset)
|
||||
// Enable low-level HTTP 2 retries.
|
||||
// 2022-04-28 15:59:06 ERROR : stuff/video.avi: Failed to copy: uploading chunk failed: Put "https://censored.com/remote.php/dav/uploads/Admin/rclone-chunked-upload-censored/000006113198080-000006123683840": http2: Transport: cannot retry err [http2: Transport received Server's graceful shutdown GOAWAY] after Request.Body was written; define Request.GetBody to avoid this error
|
||||
|
||||
buf := make([]byte, chunkSize)
|
||||
in := readers.NewRepeatableLimitReaderBuffer(in0, buf, chunkSize)
|
||||
|
||||
getBody := func() (io.ReadCloser, error) {
|
||||
// RepeatableReader{} plays well with accounting so rewinding doesn't make the progress buggy
|
||||
if _, err := in.Seek(0, io.SeekStart); err == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return io.NopCloser(in), nil
|
||||
}
|
||||
|
||||
err := partObj.updateSimple(ctx, in, getBody, partObj.remote, contentLength, "application/x-www-form-urlencoded", nil, o.fs.chunksUploadURL, options...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("uploading chunk failed: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) createChunksUploadDirectory(ctx context.Context) (string, error) {
|
||||
uploadDir, err := o.getChunksUploadDir()
|
||||
if err != nil {
|
||||
return uploadDir, err
|
||||
}
|
||||
|
||||
err = o.purgeUploadedChunks(ctx, uploadDir)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("chunked upload couldn't purge upload directory: %w", err)
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "MKCOL",
|
||||
Path: uploadDir + "/",
|
||||
NoResponse: true,
|
||||
RootURL: o.fs.chunksUploadURL,
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("making upload directory failed: %w", err)
|
||||
}
|
||||
return uploadDir, err
|
||||
}
|
||||
|
||||
func (o *Object) mergeChunks(ctx context.Context, uploadDir string, options []fs.OpenOption, src fs.ObjectInfo) error {
|
||||
var resp *http.Response
|
||||
|
||||
// see https://docs.nextcloud.com/server/24/developer_manual/client_apis/WebDAV/chunking.html?highlight=chunk#assembling-the-chunks
|
||||
opts := rest.Opts{
|
||||
Method: "MOVE",
|
||||
Path: path.Join(uploadDir, ".file"),
|
||||
NoResponse: true,
|
||||
Options: options,
|
||||
RootURL: o.fs.chunksUploadURL,
|
||||
}
|
||||
destinationURL, err := rest.URLJoin(o.fs.endpoint, o.filePath())
|
||||
if err != nil {
|
||||
return fmt.Errorf("finalize chunked upload couldn't join URL: %w", err)
|
||||
}
|
||||
opts.ExtraHeaders = o.extraHeaders(ctx, src)
|
||||
opts.ExtraHeaders["Destination"] = destinationURL.String()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetryChunkMerge(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("finalize chunked upload failed, destinationURL: \"%s\": %w", destinationURL, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (o *Object) purgeUploadedChunks(ctx context.Context, uploadDir string) error {
|
||||
// clean the upload directory if it exists (this means that a previous try didn't clean up properly).
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: uploadDir + "/",
|
||||
NoResponse: true,
|
||||
RootURL: o.fs.chunksUploadURL,
|
||||
}
|
||||
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil)
|
||||
|
||||
// directory doesn't exist, no need to purge
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
"net/url"
|
||||
"os/exec"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -43,7 +42,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
minSleep = fs.Duration(10 * time.Millisecond)
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultDepth = "1" // depth for PROPFIND
|
||||
@@ -77,9 +76,6 @@ func init() {
|
||||
Name: "vendor",
|
||||
Help: "Name of the WebDAV site/service/software you are using.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "fastmail",
|
||||
Help: "Fastmail Files",
|
||||
}, {
|
||||
Value: "nextcloud",
|
||||
Help: "Nextcloud",
|
||||
}, {
|
||||
@@ -128,22 +124,6 @@ You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'
|
||||
`,
|
||||
Default: fs.CommaSepList{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "pacer_min_sleep",
|
||||
Help: "Minimum time to sleep between API calls.",
|
||||
Default: minSleep,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "nextcloud_chunk_size",
|
||||
Help: `Nextcloud upload chunk size.
|
||||
|
||||
We recommend configuring your NextCloud instance to increase the max chunk size to 1 GB for better upload performances.
|
||||
See https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/big_file_upload_configuration.html#adjust-chunk-size-on-nextcloud-side
|
||||
|
||||
Set to 0 to disable chunked uploading.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: 10 * fs.Mebi, // Default NextCloud `max_chunk_size` is `10 MiB`. See https://github.com/nextcloud/server/blob/0447b53bda9fe95ea0cbed765aa332584605d652/apps/files/lib/App.php#L57
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -158,8 +138,6 @@ type Options struct {
|
||||
BearerTokenCommand string `config:"bearer_token_command"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
Headers fs.CommaSepList `config:"headers"`
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
ChunkSize fs.SizeSuffix `config:"nextcloud_chunk_size"`
|
||||
}
|
||||
|
||||
// Fs represents a remote webdav
|
||||
@@ -175,15 +153,11 @@ type Fs struct {
|
||||
precision time.Duration // mod time precision
|
||||
canStream bool // set if can stream
|
||||
useOCMtime bool // set if can use X-OC-Mtime
|
||||
propsetMtime bool // set if can use propset
|
||||
retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default)
|
||||
checkBeforePurge bool // enables extra check that directory to purge really exists
|
||||
hasOCMD5 bool // set if can use owncloud style checksums for MD5
|
||||
hasOCSHA1 bool // set if can use owncloud style checksums for SHA1
|
||||
hasMESHA1 bool // set if can use fastmail style checksums for SHA1
|
||||
hasMD5 bool // set if can use owncloud style checksums for MD5
|
||||
hasSHA1 bool // set if can use owncloud style checksums for SHA1
|
||||
ntlmAuthMu sync.Mutex // mutex to serialize NTLM auth roundtrips
|
||||
chunksUploadURL string // upload URL for nextcloud chunked
|
||||
canChunk bool // set if nextcloud and nextcloud_chunk_size is set
|
||||
}
|
||||
|
||||
// Object describes a webdav object
|
||||
@@ -304,7 +278,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string)
|
||||
},
|
||||
NoRedirect: true,
|
||||
}
|
||||
if f.hasOCMD5 || f.hasOCSHA1 {
|
||||
if f.hasMD5 || f.hasSHA1 {
|
||||
opts.Body = bytes.NewBuffer(owncloudProps)
|
||||
}
|
||||
var result api.Multistatus
|
||||
@@ -437,7 +411,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
opt: *opt,
|
||||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
precision: fs.ModTimeNotSupported,
|
||||
}
|
||||
|
||||
@@ -569,34 +543,19 @@ func (f *Fs) fetchAndSetBearerToken() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var validateNextCloudChunkedURL = regexp.MustCompile(`^.*/dav/files/`)
|
||||
|
||||
// setQuirks adjusts the Fs for the vendor passed in
|
||||
func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
|
||||
switch vendor {
|
||||
case "fastmail":
|
||||
f.canStream = true
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
f.hasMESHA1 = true
|
||||
case "owncloud":
|
||||
f.canStream = true
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
f.propsetMtime = true
|
||||
f.hasOCMD5 = true
|
||||
f.hasOCSHA1 = true
|
||||
f.hasMD5 = true
|
||||
f.hasSHA1 = true
|
||||
case "nextcloud":
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
f.propsetMtime = true
|
||||
f.hasOCSHA1 = true
|
||||
f.canChunk = true
|
||||
if err := f.verifyChunkConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
f.chunksUploadURL = f.getChunksUploadURL()
|
||||
fs.Logf(nil, "Chunks temporary upload directory: %s", f.chunksUploadURL)
|
||||
f.hasSHA1 = true
|
||||
case "sharepoint":
|
||||
// To mount sharepoint, two Cookies are required
|
||||
// They have to be set instead of BasicAuth
|
||||
@@ -708,7 +667,7 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
|
||||
"Depth": depth,
|
||||
},
|
||||
}
|
||||
if f.hasOCMD5 || f.hasOCSHA1 {
|
||||
if f.hasMD5 || f.hasSHA1 {
|
||||
opts.Body = bytes.NewBuffer(owncloudProps)
|
||||
}
|
||||
var result api.Multistatus
|
||||
@@ -1037,7 +996,7 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
|
||||
dstPath := f.filePath(remote)
|
||||
err := f.mkParentDir(ctx, dstPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy mkParentDir failed: %w", err)
|
||||
return nil, fmt.Errorf("Copy mkParentDir failed: %w", err)
|
||||
}
|
||||
destinationURL, err := rest.URLJoin(f.endpoint, dstPath)
|
||||
if err != nil {
|
||||
@@ -1050,7 +1009,7 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
|
||||
NoResponse: true,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Destination": destinationURL.String(),
|
||||
"Overwrite": "T",
|
||||
"Overwrite": "F",
|
||||
},
|
||||
}
|
||||
if f.useOCMtime {
|
||||
@@ -1062,18 +1021,11 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
|
||||
return srcFs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy call failed: %w", err)
|
||||
return nil, fmt.Errorf("Copy call failed: %w", err)
|
||||
}
|
||||
dstObj, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy NewObject failed: %w", err)
|
||||
}
|
||||
if f.useOCMtime && resp.Header.Get("X-OC-Mtime") != "accepted" && f.propsetMtime {
|
||||
fs.Debugf(dstObj, "Setting modtime after copy to %v", src.ModTime(ctx))
|
||||
err = dstObj.SetModTime(ctx, src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to set modtime: %w", err)
|
||||
}
|
||||
return nil, fmt.Errorf("Copy NewObject failed: %w", err)
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
@@ -1157,7 +1109,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
NoResponse: true,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Destination": addSlash(destinationURL.String()),
|
||||
"Overwrite": "T",
|
||||
"Overwrite": "F",
|
||||
},
|
||||
}
|
||||
// Direct the MOVE/COPY to the source server
|
||||
@@ -1174,10 +1126,10 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
hashes := hash.Set(hash.None)
|
||||
if f.hasOCMD5 {
|
||||
if f.hasMD5 {
|
||||
hashes.Add(hash.MD5)
|
||||
}
|
||||
if f.hasOCSHA1 || f.hasMESHA1 {
|
||||
if f.hasSHA1 {
|
||||
hashes.Add(hash.SHA1)
|
||||
}
|
||||
return hashes
|
||||
@@ -1245,10 +1197,10 @@ func (o *Object) Remote() string {
|
||||
|
||||
// Hash returns the SHA1 or MD5 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t == hash.MD5 && o.fs.hasOCMD5 {
|
||||
if t == hash.MD5 && o.fs.hasMD5 {
|
||||
return o.md5, nil
|
||||
}
|
||||
if t == hash.SHA1 && (o.fs.hasOCSHA1 || o.fs.hasMESHA1) {
|
||||
if t == hash.SHA1 && o.fs.hasSHA1 {
|
||||
return o.sha1, nil
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
@@ -1270,12 +1222,12 @@ func (o *Object) setMetaData(info *api.Prop) (err error) {
|
||||
o.hasMetaData = true
|
||||
o.size = info.Size
|
||||
o.modTime = time.Time(info.Modified)
|
||||
if o.fs.hasOCMD5 || o.fs.hasOCSHA1 || o.fs.hasMESHA1 {
|
||||
if o.fs.hasMD5 || o.fs.hasSHA1 {
|
||||
hashes := info.Hashes()
|
||||
if o.fs.hasOCSHA1 || o.fs.hasMESHA1 {
|
||||
if o.fs.hasSHA1 {
|
||||
o.sha1 = hashes[hash.SHA1]
|
||||
}
|
||||
if o.fs.hasOCMD5 {
|
||||
if o.fs.hasMD5 {
|
||||
o.md5 = hashes[hash.MD5]
|
||||
}
|
||||
}
|
||||
@@ -1309,53 +1261,8 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// Set modified time using propset
|
||||
//
|
||||
// <d:multistatus xmlns:d="DAV:" xmlns:s="http://sabredav.org/ns"><d:response><d:href>/ocm/remote.php/webdav/office/wir.jpg</d:href><d:propstat><d:prop><d:lastmodified/></d:prop><d:status>HTTP/1.1 200 OK</d:status></d:propstat></d:response></d:multistatus>
|
||||
var owncloudPropset = `<?xml version="1.0" encoding="utf-8" ?>
|
||||
<D:propertyupdate xmlns:D="DAV:">
|
||||
<D:set>
|
||||
<D:prop>
|
||||
<lastmodified xmlns="DAV:">%d</lastmodified>
|
||||
</D:prop>
|
||||
</D:set>
|
||||
</D:propertyupdate>
|
||||
`
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
if o.fs.propsetMtime {
|
||||
opts := rest.Opts{
|
||||
Method: "PROPPATCH",
|
||||
Path: o.filePath(),
|
||||
NoRedirect: true,
|
||||
Body: strings.NewReader(fmt.Sprintf(owncloudPropset, modTime.Unix())),
|
||||
}
|
||||
var result api.Multistatus
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// does not exist
|
||||
if apiErr.StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("couldn't set modified time: %w", err)
|
||||
}
|
||||
// FIXME check if response is valid
|
||||
if len(result.Responses) == 1 && result.Responses[0].Props.StatusOK() {
|
||||
// update cached modtime
|
||||
o.modTime = modTime
|
||||
return nil
|
||||
}
|
||||
// fallback
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
@@ -1397,72 +1304,36 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return fmt.Errorf("Update mkParentDir failed: %w", err)
|
||||
}
|
||||
|
||||
if o.shouldUseChunkedUpload(src) {
|
||||
fs.Debugf(src, "Update will use the chunked upload strategy")
|
||||
err = o.updateChunked(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(src, "Update will use the normal upload strategy (no chunks)")
|
||||
contentType := fs.MimeType(ctx, src)
|
||||
filePath := o.filePath()
|
||||
extraHeaders := o.extraHeaders(ctx, src)
|
||||
// TODO: define getBody() to enable low-level HTTP/2 retries
|
||||
err = o.updateSimple(ctx, in, nil, filePath, src.Size(), contentType, extraHeaders, o.fs.endpointURL, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
size := src.Size()
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: o.filePath(),
|
||||
Body: in,
|
||||
NoResponse: true,
|
||||
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
Options: options,
|
||||
}
|
||||
|
||||
// read metadata from remote
|
||||
o.hasMetaData = false
|
||||
return o.readMetaData(ctx)
|
||||
}
|
||||
|
||||
func (o *Object) extraHeaders(ctx context.Context, src fs.ObjectInfo) map[string]string {
|
||||
extraHeaders := map[string]string{}
|
||||
if o.fs.useOCMtime || o.fs.hasOCMD5 || o.fs.hasOCSHA1 {
|
||||
if o.fs.useOCMtime || o.fs.hasMD5 || o.fs.hasSHA1 {
|
||||
opts.ExtraHeaders = map[string]string{}
|
||||
if o.fs.useOCMtime {
|
||||
extraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix())
|
||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix())
|
||||
}
|
||||
// Set one upload checksum
|
||||
// Owncloud uses one checksum only to check the upload and stores its own SHA1 and MD5
|
||||
// Nextcloud stores the checksum you supply (SHA1 or MD5) but only stores one
|
||||
if o.fs.hasOCSHA1 {
|
||||
if o.fs.hasSHA1 {
|
||||
if sha1, _ := src.Hash(ctx, hash.SHA1); sha1 != "" {
|
||||
extraHeaders["OC-Checksum"] = "SHA1:" + sha1
|
||||
opts.ExtraHeaders["OC-Checksum"] = "SHA1:" + sha1
|
||||
}
|
||||
}
|
||||
if o.fs.hasOCMD5 && extraHeaders["OC-Checksum"] == "" {
|
||||
if o.fs.hasMD5 && opts.ExtraHeaders["OC-Checksum"] == "" {
|
||||
if md5, _ := src.Hash(ctx, hash.MD5); md5 != "" {
|
||||
extraHeaders["OC-Checksum"] = "MD5:" + md5
|
||||
opts.ExtraHeaders["OC-Checksum"] = "MD5:" + md5
|
||||
}
|
||||
}
|
||||
}
|
||||
return extraHeaders
|
||||
}
|
||||
|
||||
// Standard update in one request (no chunks)
|
||||
func (o *Object) updateSimple(ctx context.Context, body io.Reader, getBody func() (io.ReadCloser, error), filePath string, size int64, contentType string, extraHeaders map[string]string, rootURL string, options ...fs.OpenOption) (err error) {
|
||||
var resp *http.Response
|
||||
|
||||
if extraHeaders == nil {
|
||||
extraHeaders = map[string]string{}
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: filePath,
|
||||
GetBody: getBody,
|
||||
Body: body,
|
||||
NoResponse: true,
|
||||
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
|
||||
ContentType: contentType,
|
||||
Options: options,
|
||||
ExtraHeaders: extraHeaders,
|
||||
RootURL: rootURL,
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
@@ -1478,8 +1349,9 @@ func (o *Object) updateSimple(ctx context.Context, body io.Reader, getBody func(
|
||||
_ = o.Remove(ctx)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
// read metadata from remote
|
||||
o.hasMetaData = false
|
||||
return o.readMetaData(ctx)
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Test Webdav filesystem interface
|
||||
package webdav
|
||||
package webdav_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/backend/webdav"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
@@ -13,10 +13,7 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestWebdavNextcloud:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: 1 * fs.Mebi,
|
||||
},
|
||||
NilObject: (*webdav.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -27,10 +24,7 @@ func TestIntegration2(t *testing.T) {
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestWebdavOwncloud:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
Skip: true,
|
||||
},
|
||||
NilObject: (*webdav.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -41,10 +35,7 @@ func TestIntegration3(t *testing.T) {
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestWebdavRclone:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
Skip: true,
|
||||
},
|
||||
NilObject: (*webdav.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -55,10 +46,6 @@ func TestIntegration4(t *testing.T) {
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestWebdavNTLM:",
|
||||
NilObject: (*Object)(nil),
|
||||
NilObject: (*webdav.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
@@ -1100,7 +1100,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeT
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
@@ -331,6 +331,15 @@ func parsePath(path string) (root string) {
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) splitPath(remote string) (directory, leaf string) {
|
||||
directory, leaf = dircache.SplitPath(remote)
|
||||
if f.root != "" {
|
||||
// Adds the root folder to the path to get a full path
|
||||
directory = path.Join(f.root, directory)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
||||
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
||||
@@ -1206,7 +1215,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if partialContent && resp.StatusCode == 200 && resp.Header.Get("Content-Range") == "" {
|
||||
if partialContent && resp.StatusCode == 200 {
|
||||
if start > 0 {
|
||||
// We need to read and discard the beginning of the data...
|
||||
_, err = io.CopyN(io.Discard, resp.Body, start)
|
||||
|
||||
@@ -225,7 +225,7 @@ func buildWindowsResourceSyso(goarch string, versionTag string) string {
|
||||
"StringFileInfo": M{
|
||||
"CompanyName": "https://rclone.org",
|
||||
"ProductName": "Rclone",
|
||||
"FileDescription": "Rclone",
|
||||
"FileDescription": "Rsync for cloud storage",
|
||||
"InternalName": "rclone",
|
||||
"OriginalFilename": "rclone.exe",
|
||||
"LegalCopyright": "The Rclone Authors",
|
||||
|
||||
@@ -64,7 +64,6 @@ docs = [
|
||||
"sia.md",
|
||||
"swift.md",
|
||||
"pcloud.md",
|
||||
"pikpak.md",
|
||||
"premiumizeme.md",
|
||||
"putio.md",
|
||||
"seafile.md",
|
||||
|
||||
@@ -26,8 +26,7 @@ echo "Making release ${VERSION} anchor ${ANCHOR} to repo ${REPO}"
|
||||
gh release create "${VERSION}" \
|
||||
--repo ${REPO} \
|
||||
--title "rclone ${VERSION}" \
|
||||
--notes-file "/tmp/${VERSION}-release-notes" \
|
||||
--draft=true
|
||||
--notes-file "/tmp/${VERSION}-release-notes"
|
||||
|
||||
for build in build/*; do
|
||||
case $build in
|
||||
@@ -41,10 +40,6 @@ for build in build/*; do
|
||||
"${build}"
|
||||
done
|
||||
|
||||
gh release edit "${VERSION}" \
|
||||
--repo ${REPO} \
|
||||
--draft=false
|
||||
|
||||
gh release view "${VERSION}" \
|
||||
--repo ${REPO}
|
||||
|
||||
|
||||
@@ -98,14 +98,8 @@ Note to run these commands on a running backend then see
|
||||
out, err = doCommand(context.Background(), name, arg, opt)
|
||||
}
|
||||
if err != nil {
|
||||
if err == fs.ErrorCommandNotFound {
|
||||
extra := ""
|
||||
if f.Features().Overlay {
|
||||
extra = " (try the underlying remote)"
|
||||
}
|
||||
return fmt.Errorf("%q %w%s", name, err, extra)
|
||||
}
|
||||
return fmt.Errorf("command %q failed: %w", name, err)
|
||||
|
||||
}
|
||||
// Output the result
|
||||
writeJSON := false
|
||||
|
||||
@@ -824,9 +824,8 @@ func touchFiles(ctx context.Context, dateStr string, f fs.Fs, dir, glob string)
|
||||
err = nil
|
||||
buf := new(bytes.Buffer)
|
||||
size := obj.Size()
|
||||
separator := ""
|
||||
if size > 0 {
|
||||
err = operations.Cat(ctx, f, buf, 0, size, []byte(separator))
|
||||
err = operations.Cat(ctx, f, buf, 0, size)
|
||||
}
|
||||
info := object.NewStaticObjectInfo(remote, date, size, true, nil, f)
|
||||
if err == nil {
|
||||
|
||||
@@ -128,6 +128,7 @@ var commandDefinition = &cobra.Command{
|
||||
ctx := context.Background()
|
||||
opt := Opt
|
||||
opt.applyContext(ctx)
|
||||
|
||||
if tzLocal {
|
||||
TZ = time.Local
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ func rcBisync(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
if maxDelete < 0 || maxDelete > 100 {
|
||||
return nil, rc.NewErrParamInvalid(errors.New("maxDelete must be a percentage between 0 and 100"))
|
||||
}
|
||||
opt.MaxDelete = int(maxDelete)
|
||||
ci.MaxDelete = maxDelete
|
||||
} else if rc.NotErrParamNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -3,8 +3,8 @@ test check-access-filters
|
||||
# NOTE: Include Other tests may result in listing diffs due to rclone processing order change. False fail.
|
||||
#
|
||||
# Tests are done in two phases:
|
||||
# - EXCLUDE OTHER tests check that RCLONE_TEST files are only found in the explicitly included directories
|
||||
# - INCLUDE OTHER tesss check that RCLONE_TEST files are found in all directories not explicitly excluded
|
||||
# - EXCLUDE OTHER tests check that RCLONE_TEST files are only found in the explicity included directories
|
||||
# - INCLUDE OTHER tesss check that RCLONE_TEST files are found in all directories not explicity excluded
|
||||
#
|
||||
# Each phase checks that:
|
||||
# - missing RCLONE_TEST files in don't care directories don't cause failures
|
||||
|
||||
@@ -16,12 +16,11 @@ import (
|
||||
|
||||
// Globals
|
||||
var (
|
||||
head = int64(0)
|
||||
tail = int64(0)
|
||||
offset = int64(0)
|
||||
count = int64(-1)
|
||||
discard = false
|
||||
separator = string("")
|
||||
head = int64(0)
|
||||
tail = int64(0)
|
||||
offset = int64(0)
|
||||
count = int64(-1)
|
||||
discard = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -32,7 +31,6 @@ func init() {
|
||||
flags.Int64VarP(cmdFlags, &offset, "offset", "", offset, "Start printing at offset N (or from end if -ve)")
|
||||
flags.Int64VarP(cmdFlags, &count, "count", "", count, "Only print N characters")
|
||||
flags.BoolVarP(cmdFlags, &discard, "discard", "", discard, "Discard the output instead of printing")
|
||||
flags.StringVarP(cmdFlags, &separator, "separator", "", separator, "Separator to use between objects when printing multiple files")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -58,18 +56,6 @@ Use the |--head| flag to print characters only at the start, |--tail| for
|
||||
the end and |--offset| and |--count| to print a section in the middle.
|
||||
Note that if offset is negative it will count from the end, so
|
||||
|--offset -1 --count 1| is equivalent to |--tail 1|.
|
||||
|
||||
Use the |--separator| flag to print a separator value between files. Be sure to
|
||||
shell-escape special characters. For example, to print a newline between
|
||||
files, use:
|
||||
|
||||
* bash:
|
||||
|
||||
rclone --include "*.txt" --separator $'\n' cat remote:path/to/dir
|
||||
|
||||
* powershell:
|
||||
|
||||
rclone --include "*.txt" --separator "|n" cat remote:path/to/dir
|
||||
`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
@@ -96,7 +82,7 @@ files, use:
|
||||
w = io.Discard
|
||||
}
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return operations.Cat(context.Background(), fsrc, w, offset, count, []byte(separator))
|
||||
return operations.Cat(context.Background(), fsrc, w, offset, count)
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
@@ -72,9 +72,6 @@ you what happened to it. These are reminiscent of diff files.
|
||||
- |+ path| means path was missing on the destination, so only in the source
|
||||
- |* path| means path was present in source and destination but different.
|
||||
- |! path| means there was an error reading or hashing the source or dest.
|
||||
|
||||
The default number of parallel checks is 8. See the [--checkers=N](/docs/#checkers-n)
|
||||
option for more information.
|
||||
`, "|", "`")
|
||||
|
||||
// GetCheckOpt gets the options corresponding to the check flags
|
||||
@@ -145,7 +142,7 @@ match. It doesn't alter the source or destination.
|
||||
|
||||
For the [crypt](/crypt/) remote there is a dedicated command,
|
||||
[cryptcheck](/commands/rclone_cryptcheck/), that are able to check
|
||||
the checksums of the encrypted files.
|
||||
the checksums of the crypted files.
|
||||
|
||||
If you supply the |--size-only| flag, it will only compare the sizes not
|
||||
the hashes as well. Use this for a quick check.
|
||||
|
||||
@@ -160,11 +160,7 @@ func mount(VFS *vfs.VFS, mountPath string, opt *mountlib.Options) (<-chan error,
|
||||
fsys := NewFS(VFS)
|
||||
host := fuse.NewFileSystemHost(fsys)
|
||||
host.SetCapReaddirPlus(true) // only works on Windows
|
||||
if opt.CaseInsensitive.Valid {
|
||||
host.SetCapCaseInsensitive(opt.CaseInsensitive.Value)
|
||||
} else {
|
||||
host.SetCapCaseInsensitive(f.Features().CaseInsensitive)
|
||||
}
|
||||
host.SetCapCaseInsensitive(f.Features().CaseInsensitive)
|
||||
|
||||
// Create options
|
||||
options := mountOptions(VFS, opt.DeviceName, mountpoint, opt)
|
||||
|
||||
@@ -28,7 +28,7 @@ func init() {
|
||||
// returns an error, and an error channel for the serve process to
|
||||
// report an error when fusermount is called.
|
||||
func mount(_ *vfs.VFS, _ string, _ *mountlib.Options) (<-chan error, func() error, error) {
|
||||
return nil, nil, errors.New("rclone mount is not supported on MacOS when rclone is installed via Homebrew. " +
|
||||
"Please install the rclone binaries available at https://rclone.org/downloads/ " +
|
||||
"instead if you want to use the rclone mount command")
|
||||
return nil, nil, errors.New("mount is not supported on MacOS when installed via Homebrew. " +
|
||||
"Please install the binaries available at https://rclone." +
|
||||
"org/downloads/ instead if you want to use the mount command")
|
||||
}
|
||||
|
||||
@@ -26,5 +26,6 @@ func getMountpoint(f fs.Fs, mountPath string, opt *mountlib.Options) (string, er
|
||||
if err = mountlib.CheckAllowNonEmpty(mountPath, opt); err != nil {
|
||||
return "", err
|
||||
}
|
||||
opt.VolumeName = mountlib.MakeVolumeNameValidOnUnix(opt.VolumeName)
|
||||
return mountPath, nil
|
||||
}
|
||||
|
||||
@@ -9,9 +9,11 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
)
|
||||
|
||||
@@ -19,10 +21,13 @@ var isDriveRegex = regexp.MustCompile(`^[a-zA-Z]\:$`)
|
||||
var isDriveRootPathRegex = regexp.MustCompile(`^[a-zA-Z]\:\\$`)
|
||||
var isDriveOrRootPathRegex = regexp.MustCompile(`^[a-zA-Z]\:\\?$`)
|
||||
var isNetworkSharePathRegex = regexp.MustCompile(`^\\\\[^\\\?]+\\[^\\]`)
|
||||
var isAnyPathSeparatorRegex = regexp.MustCompile(`[/\\]+`) // Matches any path separators, slash or backslash, or sequences of them
|
||||
|
||||
// isNetworkSharePath returns true if the given string is a valid network share path,
|
||||
// in the basic UNC format "\\Server\Share\Path", where the first two path components
|
||||
// are required ("\\Server\Share", which represents the volume).
|
||||
// isNetworkSharePath returns true if the given string is a network share path,
|
||||
// in the basic UNC format "\\Server\Share\Path". The first two path components
|
||||
// are required ("\\Server\Share"), and represents the volume. The rest of the
|
||||
// string can be anything, i.e. can be a nested path ("\\Server\Share\Path\Path\Path").
|
||||
// Actual validity of the path, e.g. if it contains invalid characters, is not considered.
|
||||
// Extended-length UNC format "\\?\UNC\Server\Share\Path" is not considered, as it is
|
||||
// not supported by cgofuse/winfsp, so returns false for any paths with prefix "\\?\".
|
||||
// Note: There is a UNCPath function in lib/file, but it refers to any extended-length
|
||||
@@ -111,7 +116,7 @@ func handleLocalMountpath(f fs.Fs, mountpath string, opt *mountlib.Options) (str
|
||||
// Drive letter string can be used as is, since we have already checked it does not exist,
|
||||
// but directory path needs more checks.
|
||||
if opt.NetworkMode {
|
||||
fs.Errorf(nil, "Ignoring --network-mode as it is not supported with directory mountpoint")
|
||||
fs.Debugf(nil, "Ignoring --network-mode as it is not supported with directory mountpoint")
|
||||
opt.NetworkMode = false
|
||||
}
|
||||
var err error
|
||||
@@ -132,30 +137,47 @@ func handleLocalMountpath(f fs.Fs, mountpath string, opt *mountlib.Options) (str
|
||||
return mountpath, nil
|
||||
}
|
||||
|
||||
// networkSharePathEncoder is an encoder used to make strings valid as (part of) Windows network share UNC paths
|
||||
const networkSharePathEncoder = (encoder.EncodeZero | // NUL(0x00)
|
||||
encoder.EncodeCtl | // CTRL(0x01-0x1F)
|
||||
encoder.EncodeDel | // DEL(0x7F)
|
||||
encoder.EncodeWin | // :?"*<>|
|
||||
encoder.EncodeInvalidUtf8) // Also encode invalid UTF-8 bytes as Go can't convert them to UTF-16.
|
||||
|
||||
// encodeNetworkSharePath makes a string valid to use as (part of) a Windows network share UNC path.
|
||||
// Using backslash as path separator here, but forward slashes would also be treated as
|
||||
// path separators by the library, and therefore does not encode either of them. For convenience,
|
||||
// normalizes to backslashes-only. UNC paths always start with two path separators, but WinFsp
|
||||
// requires volume prefix as UNC-like path but with only a single backslash prefix, and multiple
|
||||
// separators are not valid in any other parts of network share paths, so therefore (unlike what
|
||||
// filepath.FromSlash would do) replaces multiple separators with a single one (like filpath.Clean
|
||||
// would do, but it does also more). A trailing path separator would just be ignored, but we
|
||||
// remove it here as well for convenience.
|
||||
func encodeNetworkSharePath(volumeName string) string {
|
||||
return networkSharePathEncoder.Encode(strings.TrimRight(isAnyPathSeparatorRegex.ReplaceAllString(volumeName, `\`), `\`))
|
||||
}
|
||||
|
||||
// handleVolumeName handles the volume name option.
|
||||
func handleVolumeName(opt *mountlib.Options, volumeName string) {
|
||||
// If volumeName parameter is set, then just set that into options replacing any existing value.
|
||||
// Else, ensure the volume name option is a valid network share UNC path if network mode,
|
||||
func handleVolumeName(opt *mountlib.Options) {
|
||||
// Ensure the volume name option is a valid network share UNC path if network mode,
|
||||
// and ensure network mode if configured volume name is already UNC path.
|
||||
if volumeName != "" {
|
||||
opt.VolumeName = volumeName
|
||||
} else if opt.VolumeName != "" { // Should always be true due to code in mountlib caller
|
||||
if opt.VolumeName != "" { // Should always be true due to code in mountlib caller
|
||||
// Use value of given volume name option, but check if it is disk volume name or network volume prefix
|
||||
if isNetworkSharePath(opt.VolumeName) {
|
||||
// Specified volume name is network share UNC path, assume network mode and use it as volume prefix
|
||||
opt.VolumeName = opt.VolumeName[1:] // WinFsp requires volume prefix as UNC-like path but with only a single backslash
|
||||
opt.VolumeName = encodeNetworkSharePath(opt.VolumeName[1:]) // We know from isNetworkSharePath it has a duplicate path separator prefix, so removes that right away (but encodeNetworkSharePath would remove it also)
|
||||
if !opt.NetworkMode {
|
||||
// Specified volume name is network share UNC path, force network mode and use it as volume prefix
|
||||
fs.Debugf(nil, "Forcing network mode due to network share (UNC) volume name")
|
||||
opt.NetworkMode = true
|
||||
}
|
||||
} else if opt.NetworkMode {
|
||||
// Plain volume name treated as share name in network mode, append to hard coded "\\server" prefix to get full volume prefix.
|
||||
opt.VolumeName = "\\server\\" + opt.VolumeName
|
||||
// Specified volume name is not a valid network share UNC path, but network mode is enabled, so append to a hard coded server prefix and use it as volume prefix
|
||||
opt.VolumeName = `\server\` + strings.TrimLeft(encodeNetworkSharePath(opt.VolumeName), `\`)
|
||||
}
|
||||
} else if opt.NetworkMode {
|
||||
// Hard coded default
|
||||
opt.VolumeName = "\\server\\share"
|
||||
// Use hard coded default
|
||||
opt.VolumeName = `\server\share`
|
||||
}
|
||||
}
|
||||
|
||||
@@ -174,22 +196,27 @@ func getMountpoint(f fs.Fs, mountpath string, opt *mountlib.Options) (mountpoint
|
||||
}
|
||||
|
||||
// Handle mountpath
|
||||
var volumeName string
|
||||
if isDefaultPath(mountpath) {
|
||||
// Mount path indicates defaults, which will automatically pick an unused drive letter.
|
||||
mountpoint, err = handleDefaultMountpath()
|
||||
if mountpoint, err = handleDefaultMountpath(); err != nil {
|
||||
return
|
||||
}
|
||||
} else if isNetworkSharePath(mountpath) {
|
||||
// Mount path is a valid network share path (UNC format, "\\Server\Share" prefix).
|
||||
mountpoint, err = handleNetworkShareMountpath(mountpath, opt)
|
||||
// In this case the volume name is taken from the mount path, will replace any existing volume name option.
|
||||
volumeName = mountpath[1:] // WinFsp requires volume prefix as UNC-like path but with only a single backslash
|
||||
if mountpoint, err = handleNetworkShareMountpath(mountpath, opt); err != nil {
|
||||
return
|
||||
}
|
||||
// In this case the volume name is taken from the mount path, it replaces any existing volume name option.
|
||||
opt.VolumeName = mountpath
|
||||
} else {
|
||||
// Mount path is drive letter or directory path.
|
||||
mountpoint, err = handleLocalMountpath(f, mountpath, opt)
|
||||
if mountpoint, err = handleLocalMountpath(f, mountpath, opt); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Handle volume name
|
||||
handleVolumeName(opt, volumeName)
|
||||
handleVolumeName(opt)
|
||||
|
||||
// Done, return mountpoint to be used, together with updated mount options.
|
||||
if opt.NetworkMode {
|
||||
|
||||
@@ -22,11 +22,11 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "cryptcheck remote:path cryptedremote:path",
|
||||
Short: `Cryptcheck checks the integrity of an encrypted remote.`,
|
||||
Short: `Cryptcheck checks the integrity of a crypted remote.`,
|
||||
Long: `
|
||||
rclone cryptcheck checks a remote against a [crypted](/crypt/) remote.
|
||||
This is the equivalent of running rclone [check](/commands/rclone_check/),
|
||||
but able to check the checksums of the encrypted remote.
|
||||
but able to check the checksums of the crypted remote.
|
||||
|
||||
For it to work the underlying remote of the cryptedremote must support
|
||||
some kind of checksum.
|
||||
@@ -59,7 +59,7 @@ After it has run it will log the status of the encryptedremote:.
|
||||
},
|
||||
}
|
||||
|
||||
// cryptCheck checks the integrity of an encrypted remote
|
||||
// cryptCheck checks the integrity of a crypted remote
|
||||
func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error {
|
||||
// Check to see fcrypt is a crypt
|
||||
fcrypt, ok := fdst.(*crypt.Fs)
|
||||
|
||||
@@ -11,7 +11,7 @@ func init() {
|
||||
}
|
||||
|
||||
var completionDefinition = &cobra.Command{
|
||||
Use: "completion [shell]",
|
||||
Use: "genautocomplete [shell]",
|
||||
Short: `Output completion script for a given shell.`,
|
||||
Long: `
|
||||
Generates a shell completion script for rclone.
|
||||
@@ -20,5 +20,4 @@ Run with ` + "`--help`" + ` to list the supported shells.
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
},
|
||||
Aliases: []string{"genautocomplete"},
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user