1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-06 00:03:32 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
31cb3beb7b s3: attempt to fix –s3-profile failing when explicit s3 endpoint is present FIXME DO NOT MERGE
This effectively reverts a fix so shouldn't be merged directly
2023-02-28 16:34:21 +00:00
361 changed files with 7838 additions and 28355 deletions

4
.github/FUNDING.yml vendored Normal file
View File

@@ -0,0 +1,4 @@
github: [ncw]
patreon: njcw
liberapay: ncw
custom: ["https://rclone.org/donate/"]

View File

@@ -1,6 +0,0 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

View File

@@ -8,9 +8,9 @@ name: build
on:
push:
branches:
- '**'
- '*'
tags:
- '**'
- '*'
pull_request:
workflow_dispatch:
inputs:
@@ -27,12 +27,12 @@ jobs:
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.19', 'go1.20']
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.18', 'go1.19']
include:
- job_name: linux
os: ubuntu-latest
go: '1.21.0-rc.3'
go: '1.20'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
@@ -43,14 +43,14 @@ jobs:
- job_name: linux_386
os: ubuntu-latest
go: '1.21.0-rc.3'
go: '1.20'
goarch: 386
gotags: cmount
quicktest: true
- job_name: mac_amd64
os: macos-11
go: '1.21.0-rc.3'
go: '1.20'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
@@ -59,14 +59,14 @@ jobs:
- job_name: mac_arm64
os: macos-11
go: '1.21.0-rc.3'
go: '1.20'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows
os: windows-latest
go: '1.21.0-rc.3'
go: '1.20'
gotags: cmount
cgo: '0'
build_flags: '-include "^windows/"'
@@ -76,20 +76,20 @@ jobs:
- job_name: other_os
os: ubuntu-latest
go: '1.21.0-rc.3'
go: '1.20'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
- job_name: go1.19
- job_name: go1.18
os: ubuntu-latest
go: '1.19'
go: '1.18'
quicktest: true
racequicktest: true
- job_name: go1.20
- job_name: go1.19
os: ubuntu-latest
go: '1.20'
go: '1.19'
quicktest: true
racequicktest: true
@@ -104,7 +104,7 @@ jobs:
fetch-depth: 0
- name: Install Go
uses: actions/setup-go@v4
uses: actions/setup-go@v3
with:
go-version: ${{ matrix.go }}
check-latest: true
@@ -130,11 +130,6 @@ jobs:
- name: Install Libraries on macOS
shell: bash
run: |
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
unset HOMEBREW_NO_INSTALL_FROM_API
brew untap --force homebrew/core
brew untap --force homebrew/cask
brew update
brew install --cask macfuse
if: matrix.os == 'macos-11'
@@ -222,7 +217,7 @@ jobs:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# working-directory: '$(modulePath)'
# Deploy binaries if enabled in config && not a PR && not a fork
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
lint:
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
@@ -242,9 +237,9 @@ jobs:
# Run govulncheck on the latest go version, the one we build binaries with
- name: Install Go
uses: actions/setup-go@v4
uses: actions/setup-go@v3
with:
go-version: '1.21.0-rc.3'
go-version: '1.20'
check-latest: true
- name: Install govulncheck
@@ -267,9 +262,9 @@ jobs:
# Upgrade together with NDK version
- name: Set up Go
uses: actions/setup-go@v4
uses: actions/setup-go@v3
with:
go-version: '1.21.0-rc.3'
go-version: '1.20'
- name: Go module cache
uses: actions/cache@v3
@@ -357,4 +352,4 @@ jobs:
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone'
if: github.head_ref == '' && github.repository == 'rclone/rclone'

View File

@@ -1,61 +0,0 @@
name: Docker beta build
on:
push:
branches:
- master
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v4
with:
images: ghcr.io/${{ github.repository }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
# GitHub Actions at the start of a workflow run to identify the job.
# This is used to authenticate against GitHub Container Registry.
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
# for more detailed information.
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and publish image
uses: docker/build-push-action@v4
with:
file: Dockerfile
context: .
push: true # push the image to ghcr
tags: |
ghcr.io/rclone/rclone:beta
rclone/rclone:beta
labels: ${{ steps.meta.outputs.labels }}
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
cache-from: type=gha
cache-to: type=gha,mode=max
provenance: false
# Eventually cache will need to be cleared if builds more frequent than once a week
# https://github.com/docker/build-push-action/issues/252

View File

@@ -0,0 +1,26 @@
name: Docker beta build
on:
push:
branches:
- master
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Build and publish image
uses: ilteoood/docker_buildx@1.1.0
with:
tag: beta
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}

View File

@@ -1,14 +0,0 @@
name: Publish to Winget
on:
release:
types: [released]
jobs:
publish:
runs-on: windows-latest # Action can only run on Windows
steps:
- uses: vedantmgoyal2009/winget-releaser@v2
with:
identifier: Rclone.Rclone
installers-regex: '-windows-\w+\.zip$'
token: ${{ secrets.WINGET_TOKEN }}

View File

@@ -2,17 +2,15 @@
linters:
enable:
- deadcode
- errcheck
- goimports
- revive
- ineffassign
- structcheck
- varcheck
- govet
- unconvert
- staticcheck
- gosimple
- stylecheck
- unused
- misspell
#- prealloc
#- maligned
disable-all: true
@@ -27,30 +25,6 @@ issues:
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0
exclude-rules:
- linters:
- staticcheck
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
run:
# timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 10m
linters-settings:
revive:
rules:
- name: unreachable-code
disabled: true
- name: unused-parameter
disabled: true
- name: empty-block
disabled: true
- name: redefines-builtin-id
disabled: true
- name: superfluous-else
disabled: true
stylecheck:
# Only enable the checks performed by the staticcheck stand-alone tool,
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]

View File

@@ -419,7 +419,7 @@ remote or an fs.
Research
* Look at the interfaces defined in `fs/types.go`
* Look at the interfaces defined in `fs/fs.go`
* Study one or more of the existing remotes
Getting going

View File

@@ -11,7 +11,7 @@ RUN ./rclone version
# Begin final image
FROM alpine:latest
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
RUN apk --no-cache add ca-certificates fuse tzdata && \
echo "user_allow_other" >> /etc/fuse.conf
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/

View File

@@ -16,9 +16,6 @@ Current active maintainers of rclone are:
| Max Sum | @Max-Sum | union backend |
| Fred | @creativeprojects | seafile backend |
| Caleb Case | @calebcase | storj backend |
| wiserain | @wiserain | pikpak backend |
| albertony | @albertony | |
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
**This is a work in progress Draft**

3933
MANUAL.html generated

File diff suppressed because it is too large Load Diff

4234
MANUAL.md generated

File diff suppressed because it is too large Load Diff

4405
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -96,7 +96,7 @@ build_dep:
# Get the release dependencies we only install on linux
release_dep_linux:
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64\.tar\.gz'
# Get the release dependencies we only install on Windows
release_dep_windows:

View File

@@ -25,19 +25,18 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
* Box [:page_facing_up:](https://rclone.org/box/)
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
* Arvan Cloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
* FTP [:page_facing_up:](https://rclone.org/ftp/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
@@ -51,7 +50,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
@@ -62,15 +60,12 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
* OVH [:page_facing_up:](https://rclone.org/swift/)
* Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
@@ -85,7 +80,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* Storj [:page_facing_up:](https://rclone.org/storj/)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)

View File

@@ -10,7 +10,7 @@ This file describes how to make the various kinds of releases
## Making a release
* git checkout master # see below for stable branch
* git pull # IMPORTANT
* git pull
* git status - make sure everything is checked in
* Check GitHub actions build for master is Green
* make test # see integration test server or run locally
@@ -21,7 +21,6 @@ This file describes how to make the various kinds of releases
* git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX.0"
* make retag
* git push origin # without --follow-tags so it doesn't push the tag if it fails
* git push --follow-tags origin
* # Wait for the GitHub builds to complete then...
* make fetch_binaries

View File

@@ -1 +1 @@
v1.64.0
v1.62.0

View File

@@ -36,9 +36,7 @@ import (
_ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
_ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/pikpak"
_ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/protondrive"
_ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/s3"

View File

@@ -1,5 +1,5 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
//go:build !plan9 && !solaris && !js && go1.18
// +build !plan9,!solaris,!js,go1.18
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
package azureblob
@@ -58,8 +58,6 @@ const (
decayConstant = 1 // bigger for slower decay, exponential
maxListChunkSize = 5000 // number of items to read at once
modTimeKey = "mtime"
dirMetaKey = "hdi_isfolder"
dirMetaValue = "true"
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
storageDefaultBaseURL = "blob.core.windows.net"
@@ -95,7 +93,6 @@ Leave blank to use SAS URL or Emulator, otherwise it needs to be set.
If this is blank and if env_auth is set it will be read from the
environment variable ` + "`AZURE_STORAGE_ACCOUNT_NAME`" + ` if possible.
`,
Sensitive: true,
}, {
Name: "env_auth",
Help: `Read credentials from runtime (environment variables, CLI or MSI).
@@ -107,13 +104,11 @@ See the [authentication docs](/azureblob#authentication) for full info.`,
Help: `Storage Account Shared Key.
Leave blank to use SAS URL or Emulator.`,
Sensitive: true,
}, {
Name: "sas_url",
Help: `SAS URL for container level access only.
Leave blank if using account/key or Emulator.`,
Sensitive: true,
}, {
Name: "tenant",
Help: `ID of the service principal's tenant. Also called its directory ID.
@@ -123,7 +118,6 @@ Set this if using
- Service principal with certificate
- User with username and password
`,
Sensitive: true,
}, {
Name: "client_id",
Help: `The ID of the client in use.
@@ -133,7 +127,6 @@ Set this if using
- Service principal with certificate
- User with username and password
`,
Sensitive: true,
}, {
Name: "client_secret",
Help: `One of the service principal's client secrets
@@ -141,7 +134,6 @@ Set this if using
Set this if using
- Service principal with client secret
`,
Sensitive: true,
}, {
Name: "client_certificate_path",
Help: `Path to a PEM or PKCS12 certificate file including the private key.
@@ -179,8 +171,7 @@ Optionally set this if using
Set this if using
- User with username and password
`,
Advanced: true,
Sensitive: true,
Advanced: true,
}, {
Name: "password",
Help: `The user's password
@@ -223,20 +214,17 @@ msi_client_id, or msi_mi_res_id parameters.`,
Default: false,
Advanced: true,
}, {
Name: "msi_object_id",
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.",
Advanced: true,
Sensitive: true,
Name: "msi_object_id",
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.",
Advanced: true,
}, {
Name: "msi_client_id",
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_object_id or msi_mi_res_id specified.",
Advanced: true,
Sensitive: true,
Name: "msi_client_id",
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_object_id or msi_mi_res_id specified.",
Advanced: true,
}, {
Name: "msi_mi_res_id",
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
Advanced: true,
Sensitive: true,
Name: "msi_mi_res_id",
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
Advanced: true,
}, {
Name: "use_emulator",
Help: "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.",
@@ -375,18 +363,6 @@ This option controls how often unused buffers will be removed from the pool.`,
},
},
Advanced: true,
}, {
Name: "directory_markers",
Default: false,
Advanced: true,
Help: `Upload an empty object with a trailing slash when a new directory is created
Empty folders are unsupported for bucket based remotes, this option
creates an empty object ending with "/", to persist the folder.
This object also has the metadata "` + dirMetaKey + ` = ` + dirMetaValue + `" to conform to
the Microsoft standard.
`,
}, {
Name: "no_check_container",
Help: `If set, don't attempt to check the container exists or create it.
@@ -436,7 +412,6 @@ type Options struct {
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Enc encoder.MultiEncoder `config:"encoding"`
PublicAccess string `config:"public_access"`
DirectoryMarkers bool `config:"directory_markers"`
NoCheckContainer bool `config:"no_check_container"`
NoHeadObject bool `config:"no_head_object"`
}
@@ -511,7 +486,7 @@ func parsePath(path string) (root string) {
// split returns container and containerPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (containerName, containerPath string) {
containerName, containerPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
containerName, containerPath = bucket.Split(path.Join(f.root, rootRelativePath))
return f.opt.Enc.FromStandardName(containerName), f.opt.Enc.FromStandardPath(containerPath)
}
@@ -689,10 +664,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
SetTier: true,
GetTier: true,
}).Fill(ctx, f)
if opt.DirectoryMarkers {
f.features.CanHaveEmptyDirectories = true
fs.Debugf(f, "Using directory markers")
}
// Client options specifying our own transport
policyClientOptions := policy.ClientOptions{
@@ -719,7 +690,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
cred, err = azidentity.NewDefaultAzureCredential(&options)
if err != nil {
return nil, fmt.Errorf("create azure environment credential failed: %w", err)
return nil, fmt.Errorf("create azure enviroment credential failed: %w", err)
}
case opt.UseEmulator:
if opt.Account == "" {
@@ -935,7 +906,7 @@ func (f *Fs) cntSVC(containerName string) (containerClient *container.Client) {
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *container.BlobItem) (fs.Object, error) {
func (f *Fs) newObjectWithInfo(remote string, info *container.BlobItem) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
@@ -946,7 +917,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *contain
return nil, err
}
} else if !o.fs.opt.NoHeadObject {
err := o.readMetaData(ctx) // reads info and headers, returning an error
err := o.readMetaData() // reads info and headers, returning an error
if err != nil {
return nil, err
}
@@ -957,7 +928,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *contain
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
return f.newObjectWithInfo(remote, nil)
}
// getBlobSVC creates a blob client
@@ -982,7 +953,7 @@ func (o *Object) updateMetadataWithModTime(modTime time.Time) {
}
// Returns whether file is a directory marker or not
func isDirectoryMarker(size int64, metadata map[string]*string, remote string) bool {
func isDirectoryMarker(size int64, metadata map[string]string, remote string) bool {
// Directory markers are 0 length
if size == 0 {
endsWithSlash := strings.HasSuffix(remote, "/")
@@ -993,7 +964,31 @@ func isDirectoryMarker(size int64, metadata map[string]*string, remote string) b
// defacto standard for marking blobs as directories.
// Note also that the metadata hasn't been normalised to lower case yet
for k, v := range metadata {
if v != nil && strings.EqualFold(k, dirMetaKey) && *v == dirMetaValue {
if strings.EqualFold(k, "hdi_isfolder") && v == "true" {
return true
}
}
}
return false
}
// Returns whether file is a directory marker or not using metadata
// with pointers to strings as the SDK seems to use both forms rather
// annoyingly.
//
// NB This is a duplicate of isDirectoryMarker
func isDirectoryMarkerP(size int64, metadata map[string]*string, remote string) bool {
// Directory markers are 0 length
if size == 0 {
endsWithSlash := strings.HasSuffix(remote, "/")
if endsWithSlash || remote == "" {
return true
}
// Note that metadata with hdi_isfolder = true seems to be a
// defacto standard for marking blobs as directories.
// Note also that the metadata hasn't been normalised to lower case yet
for k, pv := range metadata {
if strings.EqualFold(k, "hdi_isfolder") && pv != nil && *pv == "true" {
return true
}
}
@@ -1038,7 +1033,6 @@ func (f *Fs) list(ctx context.Context, containerName, directory, prefix string,
Prefix: &directory,
MaxResults: &maxResults,
})
foundItems := 0
for pager.More() {
var response container.ListBlobsHierarchyResponse
err := f.pacer.Call(func() (bool, error) {
@@ -1057,7 +1051,6 @@ func (f *Fs) list(ctx context.Context, containerName, directory, prefix string,
}
// Advance marker to next
// marker = response.NextMarker
foundItems += len(response.Segment.BlobItems)
for i := range response.Segment.BlobItems {
file := response.Segment.BlobItems[i]
// Finish if file name no longer has prefix
@@ -1073,27 +1066,20 @@ func (f *Fs) list(ctx context.Context, containerName, directory, prefix string,
fs.Debugf(f, "Odd name received %q", remote)
continue
}
isDirectory := isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote)
if isDirectory {
// Don't insert the root directory
if remote == directory {
continue
}
// process directory markers as directories
remote = strings.TrimRight(remote, "/")
}
remote = remote[len(prefix):]
if isDirectoryMarkerP(*file.Properties.ContentLength, file.Metadata, remote) {
continue // skip directory marker
}
if addContainer {
remote = path.Join(containerName, remote)
}
// Send object
err = fn(remote, file, isDirectory)
err = fn(remote, file, false)
if err != nil {
return err
}
}
// Send the subdirectories
foundItems += len(response.Segment.BlobPrefixes)
for _, remote := range response.Segment.BlobPrefixes {
if remote.Name == nil {
fs.Debugf(f, "Nil prefix received")
@@ -1116,26 +1102,16 @@ func (f *Fs) list(ctx context.Context, containerName, directory, prefix string,
}
}
}
if f.opt.DirectoryMarkers && foundItems == 0 && directory != "" {
// Determine whether the directory exists or not by whether it has a marker
_, err := f.readMetaData(ctx, containerName, directory)
if err != nil {
if err == fs.ErrorObjectNotFound {
return fs.ErrorDirNotFound
}
return err
}
}
return nil
}
// Convert a list item into a DirEntry
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *container.BlobItem, isDirectory bool) (fs.DirEntry, error) {
func (f *Fs) itemToDirEntry(remote string, object *container.BlobItem, isDirectory bool) (fs.DirEntry, error) {
if isDirectory {
d := fs.NewDir(remote, time.Time{})
return d, nil
}
o, err := f.newObjectWithInfo(ctx, remote, object)
o, err := f.newObjectWithInfo(remote, object)
if err != nil {
return nil, err
}
@@ -1163,7 +1139,7 @@ func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix strin
return nil, fs.ErrorDirNotFound
}
err = f.list(ctx, containerName, directory, prefix, addContainer, false, int32(f.opt.ListChunkSize), func(remote string, object *container.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
@@ -1244,7 +1220,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
list := walk.NewListRHelper(callback)
listR := func(containerName, directory, prefix string, addContainer bool) error {
return f.list(ctx, containerName, directory, prefix, addContainer, true, int32(f.opt.ListChunkSize), func(remote string, object *container.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
@@ -1338,71 +1314,10 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
return f.Put(ctx, in, src, options...)
}
// Create directory marker file and parents
func (f *Fs) createDirectoryMarker(ctx context.Context, container, dir string) error {
if !f.opt.DirectoryMarkers || container == "" {
return nil
}
// Object to be uploaded
o := &Object{
fs: f,
modTime: time.Now(),
meta: map[string]string{
dirMetaKey: dirMetaValue,
},
}
for {
_, containerPath := f.split(dir)
// Don't create the directory marker if it is the bucket or at the very root
if containerPath == "" {
break
}
o.remote = dir + "/"
// Check to see if object already exists
_, err := f.readMetaData(ctx, container, containerPath+"/")
if err == nil {
return nil
}
// Upload it if not
fs.Debugf(o, "Creating directory marker")
content := io.Reader(strings.NewReader(""))
err = o.Update(ctx, content, o)
if err != nil {
return fmt.Errorf("creating directory marker failed: %w", err)
}
// Now check parent directory exists
dir = path.Dir(dir)
if dir == "/" || dir == "." {
break
}
}
return nil
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
container, _ := f.split(dir)
e := f.makeContainer(ctx, container)
if e != nil {
return e
}
return f.createDirectoryMarker(ctx, container, dir)
}
// mkdirParent creates the parent bucket/directory if it doesn't exist
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
remote = strings.TrimRight(remote, "/")
dir := path.Dir(remote)
if dir == "/" || dir == "." {
dir = ""
}
return f.Mkdir(ctx, dir)
return f.makeContainer(ctx, container)
}
// makeContainer creates the container if it doesn't exist
@@ -1502,18 +1417,6 @@ func (f *Fs) deleteContainer(ctx context.Context, containerName string) error {
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
container, directory := f.split(dir)
// Remove directory marker file
if f.opt.DirectoryMarkers && container != "" && dir != "" {
o := &Object{
fs: f,
remote: dir + "/",
}
fs.Debugf(o, "Removing directory marker")
err := o.Remove(ctx)
if err != nil {
return fmt.Errorf("removing directory marker failed: %w", err)
}
}
if container == "" || directory != "" {
return nil
}
@@ -1555,7 +1458,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstContainer, dstPath := f.split(remote)
err := f.mkdirParent(ctx, remote)
err := f.makeContainer(ctx, dstContainer)
if err != nil {
return nil, err
}
@@ -1568,8 +1471,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
srcBlobSVC := srcObj.getBlobSVC()
srcURL := srcBlobSVC.URL()
tier := blob.AccessTier(f.opt.AccessTier)
options := blob.StartCopyFromURLOptions{
Tier: parseTier(f.opt.AccessTier),
Tier: &tier,
}
var startCopy blob.StartCopyFromURLResponse
err = f.pacer.Call(func() (bool, error) {
@@ -1648,15 +1552,12 @@ func (o *Object) Size() int64 {
return o.size
}
// Set o.metadata from metadata
func (o *Object) setMetadata(metadata map[string]*string) {
func (o *Object) setMetadata(metadata map[string]string) {
if len(metadata) > 0 {
// Lower case the metadata
o.meta = make(map[string]string, len(metadata))
for k, v := range metadata {
if v != nil {
o.meta[strings.ToLower(k)] = *v
}
o.meta[strings.ToLower(k)] = v
}
// Set o.modTime from metadata if it exists and
// UseServerModTime isn't in use.
@@ -1672,17 +1573,20 @@ func (o *Object) setMetadata(metadata map[string]*string) {
}
}
// Get metadata from o.meta
func (o *Object) getMetadata() (metadata map[string]*string) {
if len(o.meta) == 0 {
return nil
// Duplicte of setMetadata but taking pointers to strings
func (o *Object) setMetadataP(metadata map[string]*string) {
if len(metadata) > 0 {
// Convert the format of the metadata
newMeta := make(map[string]string, len(metadata))
for k, v := range metadata {
if v != nil {
newMeta[k] = *v
}
}
o.setMetadata(newMeta)
} else {
o.meta = nil
}
metadata = make(map[string]*string, len(o.meta))
for k, v := range o.meta {
v := v
metadata[k] = &v
}
return metadata
}
// decodeMetaDataFromPropertiesResponse sets the metadata from the data passed in
@@ -1792,7 +1696,7 @@ func (o *Object) decodeMetaDataFromBlob(info *container.BlobItem) (err error) {
} else {
size = *info.Properties.ContentLength
}
if isDirectoryMarker(size, metadata, o.remote) {
if isDirectoryMarkerP(size, metadata, o.remote) {
return fs.ErrorNotAFile
}
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
@@ -1814,7 +1718,7 @@ func (o *Object) decodeMetaDataFromBlob(info *container.BlobItem) (err error) {
} else {
o.accessTier = *info.Properties.AccessTier
}
o.setMetadata(metadata)
o.setMetadataP(metadata)
return nil
}
@@ -1825,34 +1729,17 @@ func (o *Object) getBlobSVC() *blob.Client {
return o.fs.getBlobSVC(container, directory)
}
// getBlockBlobSVC creates a block blob client
func (o *Object) getBlockBlobSVC() *blockblob.Client {
container, directory := o.split()
return o.fs.getBlockBlobSVC(container, directory)
}
// clearMetaData clears enough metadata so readMetaData will re-read it
func (o *Object) clearMetaData() {
o.modTime = time.Time{}
}
// readMetaData gets the metadata if it hasn't already been fetched
func (f *Fs) readMetaData(ctx context.Context, container, containerPath string) (blobProperties blob.GetPropertiesResponse, err error) {
if !f.containerOK(container) {
return blobProperties, fs.ErrorObjectNotFound
}
blb := f.getBlobSVC(container, containerPath)
// Read metadata (this includes metadata)
options := blob.GetPropertiesOptions{}
err = f.pacer.Call(func() (bool, error) {
blobProperties, err = blb.GetProperties(ctx, &options)
return f.shouldRetry(ctx, err)
})
if err != nil {
// On directories - GetProperties does not work and current SDK does not populate service code correctly hence check regular http response as well
if storageErr, ok := err.(*azcore.ResponseError); ok && (storageErr.ErrorCode == string(bloberror.BlobNotFound) || storageErr.StatusCode == http.StatusNotFound) {
return blobProperties, fs.ErrorObjectNotFound
}
return blobProperties, err
}
return blobProperties, nil
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// Sets
@@ -1861,15 +1748,33 @@ func (f *Fs) readMetaData(ctx context.Context, container, containerPath string)
// o.modTime
// o.size
// o.md5
func (o *Object) readMetaData(ctx context.Context) (err error) {
func (o *Object) readMetaData() (err error) {
container, _ := o.split()
if !o.fs.containerOK(container) {
return fs.ErrorObjectNotFound
}
if !o.modTime.IsZero() {
return nil
}
container, containerPath := o.split()
blobProperties, err := o.fs.readMetaData(ctx, container, containerPath)
blb := o.getBlobSVC()
// fs.Debugf(o, "Blob URL = %q", blb.URL())
// Read metadata (this includes metadata)
options := blob.GetPropertiesOptions{}
ctx := context.Background()
var blobProperties blob.GetPropertiesResponse
err = o.fs.pacer.Call(func() (bool, error) {
blobProperties, err = blb.GetProperties(ctx, &options)
return o.fs.shouldRetry(ctx, err)
})
if err != nil {
// On directories - GetProperties does not work and current SDK does not populate service code correctly hence check regular http response as well
if storageErr, ok := err.(*azcore.ResponseError); ok && (storageErr.ErrorCode == string(bloberror.BlobNotFound) || storageErr.StatusCode == http.StatusNotFound) {
return fs.ErrorObjectNotFound
}
return err
}
return o.decodeMetaDataFromPropertiesResponse(&blobProperties)
}
@@ -1879,7 +1784,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) (result time.Time) {
// The error is logged in readMetaData
_ = o.readMetaData(ctx)
_ = o.readMetaData()
return o.modTime
}
@@ -1895,7 +1800,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
blb := o.getBlobSVC()
opt := blob.SetMetadataOptions{}
err := o.fs.pacer.Call(func() (bool, error) {
_, err := blb.SetMetadata(ctx, o.getMetadata(), &opt)
_, err := blb.SetMetadata(ctx, o.meta, &opt)
return o.fs.shouldRetry(ctx, err)
})
if err != nil {
@@ -1966,6 +1871,48 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return downloadResponse.Body, nil
}
// poolWrapper wraps a pool.Pool as an azblob.TransferManager
type poolWrapper struct {
pool *pool.Pool
bufToken chan struct{}
runToken chan struct{}
}
// newPoolWrapper creates an azblob.TransferManager that will use a
// pool.Pool with maximum concurrency as specified.
func (f *Fs) newPoolWrapper(concurrency int) *poolWrapper {
return &poolWrapper{
pool: f.pool,
bufToken: make(chan struct{}, concurrency),
runToken: make(chan struct{}, concurrency),
}
}
// Get implements TransferManager.Get().
func (pw *poolWrapper) Get() []byte {
pw.bufToken <- struct{}{}
return pw.pool.Get()
}
// Put implements TransferManager.Put().
func (pw *poolWrapper) Put(b []byte) {
pw.pool.Put(b)
<-pw.bufToken
}
// Run implements TransferManager.Run().
func (pw *poolWrapper) Run(f func()) {
pw.runToken <- struct{}{}
go func() {
f()
<-pw.runToken
}()
}
// Close implements TransferManager.Close().
func (pw *poolWrapper) Close() {
}
// Converts a string into a pointer to a string
func pString(s string) *string {
return &s
@@ -2147,9 +2094,10 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
return err
}
tier := blob.AccessTier(o.fs.opt.AccessTier)
options := blockblob.CommitBlockListOptions{
Metadata: o.getMetadata(),
Tier: parseTier(o.fs.opt.AccessTier),
Metadata: o.meta,
Tier: &tier,
HTTPHeaders: httpHeaders,
}
@@ -2193,9 +2141,10 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
b := bytes.NewReader(buf[:n])
rs := &readSeekCloser{Reader: b, Seeker: b}
tier := blob.AccessTier(o.fs.opt.AccessTier)
options := blockblob.UploadOptions{
Metadata: o.getMetadata(),
Tier: parseTier(o.fs.opt.AccessTier),
Metadata: o.meta,
Tier: &tier,
HTTPHeaders: httpHeaders,
}
@@ -2225,17 +2174,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if container == "" || containerPath == "" {
return fmt.Errorf("can't upload to root - need a container")
}
// Create parent dir/bucket if not saving directory marker
_, isDirMarker := o.meta[dirMetaKey]
if !isDirMarker {
err = o.fs.mkdirParent(ctx, o.remote)
if err != nil {
return err
}
err = o.fs.makeContainer(ctx, container)
if err != nil {
return err
}
// Update Mod time
fs.Debugf(nil, "o.meta = %+v", o.meta)
o.updateMetadataWithModTime(src.ModTime(ctx))
if err != nil {
return err
@@ -2283,7 +2227,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
size := src.Size()
multipartUpload := size < 0 || size > o.fs.poolSize
fs.Debugf(nil, "o.meta = %+v", o.meta)
if multipartUpload {
err = o.uploadMultipart(ctx, in, size, blb, &httpHeaders)
} else {
@@ -2294,12 +2237,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// Refresh metadata on object
if !isDirMarker {
o.clearMetaData()
err = o.readMetaData(ctx)
if err != nil {
return err
}
o.clearMetaData()
err = o.readMetaData()
if err != nil {
return err
}
// If tier is not changed or not specified, do not attempt to invoke `SetBlobTier` operation
@@ -2373,14 +2314,6 @@ func (o *Object) GetTier() string {
return string(o.accessTier)
}
func parseTier(tier string) *blob.AccessTier {
if tier == "" {
return nil
}
msTier := blob.AccessTier(tier)
return &msTier
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}

View File

@@ -1,5 +1,5 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
//go:build !plan9 && !solaris && !js && go1.18
// +build !plan9,!solaris,!js,go1.18
package azureblob

View File

@@ -1,7 +1,7 @@
// Test AzureBlob filesystem interface
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
//go:build !plan9 && !solaris && !js && go1.18
// +build !plan9,!solaris,!js,go1.18
package azureblob
@@ -9,7 +9,6 @@ import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
)
@@ -26,25 +25,6 @@ func TestIntegration(t *testing.T) {
})
}
// TestIntegration2 runs integration tests against the remote
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
name := "TestAzureBlob:"
fstests.Run(t, &fstests.Opt{
RemoteName: name,
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: defaultChunkSize,
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "directory_markers", Value: "true"},
},
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}

View File

@@ -1,7 +1,7 @@
// Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || solaris || js
// +build plan9 solaris js
//go:build plan9 || solaris || js || !go1.18
// +build plan9 solaris js !go1.18
package azureblob

View File

@@ -75,15 +75,13 @@ func init() {
Description: "Backblaze B2",
NewFs: NewFs,
Options: []fs.Option{{
Name: "account",
Help: "Account ID or Application Key ID.",
Required: true,
Sensitive: true,
Name: "account",
Help: "Account ID or Application Key ID.",
Required: true,
}, {
Name: "key",
Help: "Application Key.",
Required: true,
Sensitive: true,
Name: "key",
Help: "Application Key.",
Required: true,
}, {
Name: "endpoint",
Help: "Endpoint for the service.\n\nLeave blank normally.",
@@ -1223,7 +1221,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
fs.Errorf(object.Name, "Can't create object %v", err)
continue
}
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "deleting")
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
err = f.deleteByID(ctx, object.ID, object.Name)
checkErr(err)
tr.Done(ctx, err)
@@ -1237,7 +1235,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
if err != nil {
fs.Errorf(object, "Can't create object %+v", err)
}
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
if oldOnly && last != remote {
// Check current version of the file
if object.Action == "hide" {

View File

@@ -27,7 +27,6 @@ import (
"sync/atomic"
"time"
"github.com/golang-jwt/jwt/v4"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -46,6 +45,7 @@ import (
"github.com/rclone/rclone/lib/rest"
"github.com/youmark/pkcs8"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jws"
)
const (
@@ -76,11 +76,6 @@ var (
}
)
type boxCustomClaims struct {
jwt.StandardClaims
BoxSubType string `json:"box_sub_type,omitempty"`
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
@@ -107,18 +102,16 @@ func init() {
return nil, nil
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "0",
Advanced: true,
Sensitive: true,
Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "0",
Advanced: true,
}, {
Name: "box_config_file",
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
}, {
Name: "access_token",
Help: "Box App Primary Access Token\n\nLeave blank normally.",
Sensitive: true,
Name: "access_token",
Help: "Box App Primary Access Token\n\nLeave blank normally.",
}, {
Name: "box_sub_type",
Default: "user",
@@ -185,7 +178,7 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(ctx)
err = jwtutil.Config("box", name, tokenURL, *claims, signingHeaders, queryParams, privateKey, m, client)
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
return err
}
@@ -201,31 +194,34 @@ func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
return boxConfig, nil
}
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomClaims, err error) {
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
val, err := jwtutil.RandomHex(20)
if err != nil {
return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err)
}
claims = &boxCustomClaims{
//lint:ignore SA1019 since we need to use jwt.StandardClaims even if deprecated in jwt-go v4 until a more permanent solution is ready in time before jwt-go v5 where it is removed entirely
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1019
StandardClaims: jwt.StandardClaims{
Id: val,
Issuer: boxConfig.BoxAppSettings.ClientID,
Subject: boxConfig.EnterpriseID,
Audience: tokenURL,
ExpiresAt: time.Now().Add(time.Second * 45).Unix(),
claims = &jws.ClaimSet{
Iss: boxConfig.BoxAppSettings.ClientID,
Sub: boxConfig.EnterpriseID,
Aud: tokenURL,
Exp: time.Now().Add(time.Second * 45).Unix(),
PrivateClaims: map[string]interface{}{
"box_sub_type": boxSubType,
"aud": tokenURL,
"jti": val,
},
BoxSubType: boxSubType,
}
return claims, nil
}
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]interface{} {
signingHeaders := map[string]interface{}{
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
signingHeaders := &jws.Header{
Algorithm: "RS256",
Typ: "JWT",
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
}
return signingHeaders
}

View File

@@ -76,19 +76,17 @@ func init() {
Name: "plex_url",
Help: "The URL of the Plex server.",
}, {
Name: "plex_username",
Help: "The username of the Plex user.",
Sensitive: true,
Name: "plex_username",
Help: "The username of the Plex user.",
}, {
Name: "plex_password",
Help: "The password of the Plex user.",
IsPassword: true,
}, {
Name: "plex_token",
Help: "The plex token for authentication - auto set normally.",
Hide: fs.OptionHideBoth,
Advanced: true,
Sensitive: true,
Name: "plex_token",
Help: "The plex token for authentication - auto set normally.",
Hide: fs.OptionHideBoth,
Advanced: true,
}, {
Name: "plex_insecure",
Help: "Skip all certificate verification when connecting to the Plex server.",
@@ -1789,7 +1787,7 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
}
}
// StopBackgroundRunners will signal all the runners to stop their work
// StopBackgroundRunners will signall all the runners to stop their work
// can be triggered from a terminate signal or from testing between runs
func (f *Fs) StopBackgroundRunners() {
f.cleanupChan <- false

View File

@@ -1098,6 +1098,27 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
return l, err
}
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer func() {
_ = in.Close()
}()
out, err := os.Create(dst)
if err != nil {
return err
}
defer func() {
_ = out.Close()
}()
_, err = io.Copy(out, in)
return err
}
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error

View File

@@ -160,11 +160,11 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
minSize := 5242880
maxSize := 10485760
totalFiles := 10
randInstance := rand.New(rand.NewSource(time.Now().Unix()))
rand.Seed(time.Now().Unix())
lastFile := ""
for i := 0; i < totalFiles; i++ {
size := int64(randInstance.Intn(maxSize-minSize) + minSize)
size := int64(rand.Intn(maxSize-minSize) + minSize)
testReader := runInstance.randomReader(t, size)
remote := "test/" + strconv.Itoa(i) + ".bin"
runInstance.writeRemoteReader(t, rootFs, remote, testReader)

View File

@@ -1,4 +1,4 @@
// Package combine implements a backend to combine multiple remotes in a directory tree
// Package combine implents a backend to combine multiple remotes in a directory tree
package combine
/*
@@ -233,7 +233,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
PartialUploads: true,
}).Fill(ctx, f)
canMove := true
for _, u := range f.upstreams {
@@ -290,16 +289,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
}
}
// Enable CleanUp when any upstreams support it
if features.CleanUp == nil {
for _, u := range f.upstreams {
if u.f.Features().CleanUp != nil {
features.CleanUp = f.CleanUp
break
}
}
}
// Enable ChangeNotify when any upstreams support it
if features.ChangeNotify == nil {
for _, u := range f.upstreams {
@@ -310,9 +299,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
}
}
// show that we wrap other backends
features.Overlay = true
f.features = features
// Get common intersection of hashes
@@ -365,7 +351,7 @@ func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream
return g.Wait()
}
// join the elements together but unlike path.Join return empty string
// join the elements together but unline path.Join return empty string
func join(elem ...string) string {
result := path.Join(elem...)
if result == "." {
@@ -901,100 +887,6 @@ func (f *Fs) Shutdown(ctx context.Context) error {
})
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
u, uRemote, err := f.findUpstream(remote)
if err != nil {
return "", err
}
do := u.f.Features().PublicLink
if do == nil {
return "", fs.ErrorNotImplemented
}
return do(ctx, uRemote, expire, unlink)
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
//
// May create duplicates or return errors if src already
// exists.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
srcPath := src.Remote()
u, uRemote, err := f.findUpstream(srcPath)
if err != nil {
return nil, err
}
do := u.f.Features().PutUnchecked
if do == nil {
return nil, fs.ErrorNotImplemented
}
uSrc := fs.NewOverrideRemote(src, uRemote)
return do(ctx, in, uSrc, options...)
}
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
if len(dirs) == 0 {
return nil
}
var (
u *upstream
uDirs []fs.Directory
)
for _, dir := range dirs {
uNew, uDir, err := f.findUpstream(dir.Remote())
if err != nil {
return err
}
if u == nil {
u = uNew
} else if u != uNew {
return fmt.Errorf("can't merge directories from different upstreams")
}
uDirs = append(uDirs, fs.NewOverrideDirectory(dir, uDir))
}
do := u.f.Features().MergeDirs
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx, uDirs)
}
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files.
func (f *Fs) CleanUp(ctx context.Context) error {
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
if do := u.f.Features().CleanUp; do != nil {
return do(ctx)
}
return nil
})
}
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
//
// It truncates any existing object
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
u, uRemote, err := f.findUpstream(remote)
if err != nil {
return nil, err
}
do := u.f.Features().OpenWriterAt
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx, uRemote, size)
}
// Object describes a wrapped Object
//
// This is a wrapped Object which knows its path prefix
@@ -1024,7 +916,7 @@ func (o *Object) String() string {
func (o *Object) Remote() string {
newPath, err := o.u.pathAdjustment.do(o.Object.String())
if err != nil {
fs.Errorf(o.Object, "Bad object: %v", err)
fs.Errorf(o, "Bad object: %v", err)
return err.Error()
}
return newPath
@@ -1096,10 +988,5 @@ var (
_ fs.Abouter = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.OpenWriterAter = (*Fs)(nil)
_ fs.FullObject = (*Object)(nil)
)

View File

@@ -10,11 +10,6 @@ import (
"github.com/rclone/rclone/fstest/fstests"
)
var (
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect"}
unimplementableObjectMethods = []string{}
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
@@ -22,8 +17,8 @@ func TestIntegration(t *testing.T) {
}
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
@@ -40,9 +35,7 @@ func TestLocal(t *testing.T) {
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
QuickTestOK: true,
})
}
@@ -58,9 +51,7 @@ func TestMemory(t *testing.T) {
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
QuickTestOK: true,
})
}
@@ -77,8 +68,6 @@ func TestMixed(t *testing.T) {
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}

View File

@@ -186,7 +186,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// We support reading MIME types no matter the wrapped fs
f.features.ReadMimeType = true

View File

@@ -21,7 +21,6 @@ import (
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/version"
"github.com/rfjakob/eme"
"golang.org/x/crypto/nacl/secretbox"
@@ -38,6 +37,7 @@ const (
blockHeaderSize = secretbox.Overhead
blockDataSize = 64 * 1024
blockSize = blockHeaderSize + blockDataSize
encryptedSuffix = ".bin" // when file name encryption is off we add this suffix to make sure the cloud provider doesn't process the file
)
// Errors returned by cipher
@@ -53,9 +53,8 @@ var (
ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?")
ErrorBadBase32Encoding = errors.New("bad base32 filename encoding")
ErrorFileClosed = errors.New("file already closed")
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - does not match suffix")
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - no \"" + encryptedSuffix + "\" suffix")
ErrorBadSeek = errors.New("Seek beyond end of file")
ErrorSuffixMissingDot = errors.New("suffix config setting should include a '.'")
defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1}
obfuscQuoteRune = '!'
)
@@ -170,30 +169,27 @@ func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
// Cipher defines an encoding and decoding cipher for the crypt backend
type Cipher struct {
dataKey [32]byte // Key for secretbox
nameKey [32]byte // 16,24 or 32 bytes
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
block gocipher.Block
mode NameEncryptionMode
fileNameEnc fileNameEncoding
buffers sync.Pool // encrypt/decrypt buffers
cryptoRand io.Reader // read crypto random numbers from here
dirNameEncrypt bool
passBadBlocks bool // if set passed bad blocks as zeroed blocks
encryptedSuffix string
dataKey [32]byte // Key for secretbox
nameKey [32]byte // 16,24 or 32 bytes
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
block gocipher.Block
mode NameEncryptionMode
fileNameEnc fileNameEncoding
buffers sync.Pool // encrypt/decrypt buffers
cryptoRand io.Reader // read crypto random numbers from here
dirNameEncrypt bool
}
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) {
c := &Cipher{
mode: mode,
fileNameEnc: enc,
cryptoRand: rand.Reader,
dirNameEncrypt: dirNameEncrypt,
encryptedSuffix: ".bin",
mode: mode,
fileNameEnc: enc,
cryptoRand: rand.Reader,
dirNameEncrypt: dirNameEncrypt,
}
c.buffers.New = func() interface{} {
return new([blockSize]byte)
return make([]byte, blockSize)
}
err := c.Key(password, salt)
if err != nil {
@@ -202,29 +198,11 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
return c, nil
}
// setEncryptedSuffix set suffix, or an empty string
func (c *Cipher) setEncryptedSuffix(suffix string) {
if strings.EqualFold(suffix, "none") {
c.encryptedSuffix = ""
return
}
if !strings.HasPrefix(suffix, ".") {
fs.Errorf(nil, "crypt: bad suffix: %v", ErrorSuffixMissingDot)
suffix = "." + suffix
}
c.encryptedSuffix = suffix
}
// Call to set bad block pass through
func (c *Cipher) setPassBadBlocks(passBadBlocks bool) {
c.passBadBlocks = passBadBlocks
}
// Key creates all the internal keys from the password passed in using
// scrypt.
//
// If salt is "" we use a fixed salt just to make attackers lives
// slightly harder than using no salt.
// slighty harder than using no salt.
//
// Note that empty password makes all 0x00 keys which is used in the
// tests.
@@ -252,12 +230,15 @@ func (c *Cipher) Key(password, salt string) (err error) {
}
// getBlock gets a block from the pool of size blockSize
func (c *Cipher) getBlock() *[blockSize]byte {
return c.buffers.Get().(*[blockSize]byte)
func (c *Cipher) getBlock() []byte {
return c.buffers.Get().([]byte)
}
// putBlock returns a block to the pool of size blockSize
func (c *Cipher) putBlock(buf *[blockSize]byte) {
func (c *Cipher) putBlock(buf []byte) {
if len(buf) != blockSize {
panic("bad blocksize returned to pool")
}
c.buffers.Put(buf)
}
@@ -527,7 +508,7 @@ func (c *Cipher) encryptFileName(in string) string {
// EncryptFileName encrypts a file path
func (c *Cipher) EncryptFileName(in string) string {
if c.mode == NameEncryptionOff {
return in + c.encryptedSuffix
return in + encryptedSuffix
}
return c.encryptFileName(in)
}
@@ -587,8 +568,8 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
// DecryptFileName decrypts a file path
func (c *Cipher) DecryptFileName(in string) (string, error) {
if c.mode == NameEncryptionOff {
remainingLength := len(in) - len(c.encryptedSuffix)
if remainingLength == 0 || !strings.HasSuffix(in, c.encryptedSuffix) {
remainingLength := len(in) - len(encryptedSuffix)
if remainingLength == 0 || !strings.HasSuffix(in, encryptedSuffix) {
return "", ErrorNotAnEncryptedFile
}
decrypted := in[:remainingLength]
@@ -628,7 +609,7 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
// fromReader fills the nonce from an io.Reader - normally the OSes
// crypto random number generator
func (n *nonce) fromReader(in io.Reader) error {
read, err := readers.ReadFill(in, (*n)[:])
read, err := io.ReadFull(in, (*n)[:])
if read != fileNonceSize {
return fmt.Errorf("short read of nonce: %w", err)
}
@@ -683,8 +664,8 @@ type encrypter struct {
in io.Reader
c *Cipher
nonce nonce
buf *[blockSize]byte
readBuf *[blockSize]byte
buf []byte
readBuf []byte
bufIndex int
bufSize int
err error
@@ -709,9 +690,9 @@ func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
}
}
// Copy magic into buffer
copy((*fh.buf)[:], fileMagicBytes)
copy(fh.buf, fileMagicBytes)
// Copy nonce into buffer
copy((*fh.buf)[fileMagicSize:], fh.nonce[:])
copy(fh.buf[fileMagicSize:], fh.nonce[:])
return fh, nil
}
@@ -726,20 +707,22 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
if fh.bufIndex >= fh.bufSize {
// Read data
// FIXME should overlap the reads with a go-routine and 2 buffers?
readBuf := (*fh.readBuf)[:blockDataSize]
n, err = readers.ReadFill(fh.in, readBuf)
readBuf := fh.readBuf[:blockDataSize]
n, err = io.ReadFull(fh.in, readBuf)
if n == 0 {
// err can't be nil since:
// n == len(buf) if and only if err == nil.
return fh.finish(err)
}
// possibly err != nil here, but we will process the
// data and the next call to ReadFill will return 0, err
// data and the next call to ReadFull will return 0, err
// Encrypt the block using the nonce
secretbox.Seal((*fh.buf)[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
fh.bufIndex = 0
fh.bufSize = blockHeaderSize + n
fh.nonce.increment()
}
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufSize])
n = copy(p, fh.buf[fh.bufIndex:fh.bufSize])
fh.bufIndex += n
return n, nil
}
@@ -780,8 +763,8 @@ type decrypter struct {
nonce nonce
initialNonce nonce
c *Cipher
buf *[blockSize]byte
readBuf *[blockSize]byte
buf []byte
readBuf []byte
bufIndex int
bufSize int
err error
@@ -799,12 +782,12 @@ func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
limit: -1,
}
// Read file header (magic + nonce)
readBuf := (*fh.readBuf)[:fileHeaderSize]
n, err := readers.ReadFill(fh.rc, readBuf)
if n < fileHeaderSize && err == io.EOF {
readBuf := fh.readBuf[:fileHeaderSize]
_, err := io.ReadFull(fh.rc, readBuf)
if err == io.EOF || err == io.ErrUnexpectedEOF {
// This read from 0..fileHeaderSize-1 bytes
return nil, fh.finishAndClose(ErrorEncryptedFileTooShort)
} else if err != io.EOF && err != nil {
} else if err != nil {
return nil, fh.finishAndClose(err)
}
// check the magic
@@ -862,8 +845,10 @@ func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offse
func (fh *decrypter) fillBuffer() (err error) {
// FIXME should overlap the reads with a go-routine and 2 buffers?
readBuf := fh.readBuf
n, err := readers.ReadFill(fh.rc, (*readBuf)[:])
n, err := io.ReadFull(fh.rc, readBuf)
if n == 0 {
// err can't be nil since:
// n == len(buf) if and only if err == nil.
return err
}
// possibly err != nil here, but we will process the data and
@@ -871,25 +856,18 @@ func (fh *decrypter) fillBuffer() (err error) {
// Check header + 1 byte exists
if n <= blockHeaderSize {
if err != nil && err != io.EOF {
if err != nil {
return err // return pending error as it is likely more accurate
}
return ErrorEncryptedFileBadHeader
}
// Decrypt the block using the nonce
_, ok := secretbox.Open((*fh.buf)[:0], (*readBuf)[:n], fh.nonce.pointer(), &fh.c.dataKey)
_, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
if !ok {
if err != nil && err != io.EOF {
if err != nil {
return err // return pending error as it is likely more accurate
}
if !fh.c.passBadBlocks {
return ErrorEncryptedBadBlock
}
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
// Zero out the bad block and continue
for i := range (*fh.buf)[:n] {
(*fh.buf)[i] = 0
}
return ErrorEncryptedBadBlock
}
fh.bufIndex = 0
fh.bufSize = n - blockHeaderSize
@@ -915,7 +893,7 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
if fh.limit >= 0 && fh.limit < int64(toCopy) {
toCopy = int(fh.limit)
}
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufIndex+toCopy])
n = copy(p, fh.buf[fh.bufIndex:fh.bufIndex+toCopy])
fh.bufIndex += n
if fh.limit >= 0 {
fh.limit -= int64(n)
@@ -926,8 +904,9 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
return n, nil
}
// calculateUnderlying converts an (offset, limit) in an encrypted file
// into an (underlyingOffset, underlyingLimit) for the underlying file.
// calculateUnderlying converts an (offset, limit) in a crypted file
// into an (underlyingOffset, underlyingLimit) for the underlying
// file.
//
// It also returns number of bytes to discard after reading the first
// block and number of blocks this is from the start so the nonce can

View File

@@ -27,14 +27,14 @@ func TestNewNameEncryptionMode(t *testing.T) {
{"off", NameEncryptionOff, ""},
{"standard", NameEncryptionStandard, ""},
{"obfuscate", NameEncryptionObfuscated, ""},
{"potato", NameEncryptionOff, "unknown file name encryption mode \"potato\""},
{"potato", NameEncryptionOff, "Unknown file name encryption mode \"potato\""},
} {
actual, actualErr := NewNameEncryptionMode(test.in)
assert.Equal(t, actual, test.expected)
if test.expectedErr == "" {
assert.NoError(t, actualErr)
} else {
assert.EqualError(t, actualErr, test.expectedErr)
assert.Error(t, actualErr, test.expectedErr)
}
}
}
@@ -405,13 +405,6 @@ func TestNonStandardEncryptFileName(t *testing.T) {
// Off mode
c, _ := newCipher(NameEncryptionOff, "", "", true, nil)
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
// Off mode with custom suffix
c, _ = newCipher(NameEncryptionOff, "", "", true, nil)
c.setEncryptedSuffix(".jpg")
assert.Equal(t, "1/12/123.jpg", c.EncryptFileName("1/12/123"))
// Off mode with empty suffix
c.setEncryptedSuffix("none")
assert.Equal(t, "1/12/123", c.EncryptFileName("1/12/123"))
// Obfuscation mode
c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil)
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
@@ -490,27 +483,21 @@ func TestNonStandardDecryptFileName(t *testing.T) {
in string
expected string
expectedErr error
customSuffix string
}{
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil, ""},
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile, ""},
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile, ""},
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil, ""},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil, ""},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil, ""},
{NameEncryptionOff, true, "1/12/123.jpg", "1/12/123", nil, ".jpg"},
{NameEncryptionOff, true, "1/12/123", "1/12/123", nil, "none"},
{NameEncryptionObfuscated, true, "!.hello", "hello", nil, ""},
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile, ""},
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil, ""},
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil, ""},
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil, ""},
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil, ""},
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
} {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
if test.customSuffix != "" {
c.setEncryptedSuffix(test.customSuffix)
}
actual, actualErr := c.DecryptFileName(test.in)
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, test.expected, actual, what)
@@ -739,7 +726,7 @@ func TestNonceFromReader(t *testing.T) {
assert.Equal(t, nonce{'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o'}, x)
buf = bytes.NewBufferString("123456789abcdefghijklmn")
err = x.fromReader(buf)
assert.EqualError(t, err, "short read of nonce: EOF")
assert.Error(t, err, "short read of nonce")
}
func TestNonceFromBuf(t *testing.T) {
@@ -1063,7 +1050,7 @@ func TestRandomSource(t *testing.T) {
_, _ = source.Read(buf)
sink = newRandomSource(1e8)
_, err = io.Copy(sink, source)
assert.EqualError(t, err, "Error in stream at 1")
assert.Error(t, err, "Error in stream")
}
type zeroes struct{}
@@ -1180,13 +1167,13 @@ func TestNewEncrypter(t *testing.T) {
fh, err := c.newEncrypter(z, nil)
assert.NoError(t, err)
assert.Equal(t, nonce{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.nonce)
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, (*fh.buf)[:32])
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.buf[:32])
// Test error path
c.cryptoRand = bytes.NewBufferString("123456789abcdefghijklmn")
fh, err = c.newEncrypter(z, nil)
assert.Nil(t, fh)
assert.EqualError(t, err, "short read of nonce: EOF")
assert.Error(t, err, "short read of nonce")
}
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to
@@ -1237,7 +1224,7 @@ func TestNewDecrypter(t *testing.T) {
cd := newCloseDetector(bytes.NewBuffer(file0[:i]))
fh, err = c.newDecrypter(cd)
assert.Nil(t, fh)
assert.EqualError(t, err, ErrorEncryptedFileTooShort.Error())
assert.Error(t, err, ErrorEncryptedFileTooShort.Error())
assert.Equal(t, 1, cd.closed)
}
@@ -1245,7 +1232,7 @@ func TestNewDecrypter(t *testing.T) {
cd = newCloseDetector(er)
fh, err = c.newDecrypter(cd)
assert.Nil(t, fh)
assert.EqualError(t, err, "potato")
assert.Error(t, err, "potato")
assert.Equal(t, 1, cd.closed)
// bad magic
@@ -1256,7 +1243,7 @@ func TestNewDecrypter(t *testing.T) {
cd := newCloseDetector(bytes.NewBuffer(file0copy))
fh, err := c.newDecrypter(cd)
assert.Nil(t, fh)
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error())
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
file0copy[i] ^= 0x1
assert.Equal(t, 1, cd.closed)
}
@@ -1508,10 +1495,8 @@ func TestDecrypterRead(t *testing.T) {
case i == fileHeaderSize:
// This would normally produce an error *except* on the first block
expectedErr = nil
case i <= fileHeaderSize+blockHeaderSize:
expectedErr = ErrorEncryptedFileBadHeader
default:
expectedErr = ErrorEncryptedBadBlock
expectedErr = io.ErrUnexpectedEOF
}
if expectedErr != nil {
assert.EqualError(t, err, expectedErr.Error(), what)
@@ -1529,7 +1514,7 @@ func TestDecrypterRead(t *testing.T) {
fh, err := c.newDecrypter(cd)
assert.NoError(t, err)
_, err = io.ReadAll(fh)
assert.EqualError(t, err, "potato")
assert.Error(t, err, "potato")
assert.Equal(t, 0, cd.closed)
// Test corrupting the input
@@ -1540,26 +1525,15 @@ func TestDecrypterRead(t *testing.T) {
file16copy[i] ^= 0xFF
fh, err := c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
if i < fileMagicSize {
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error())
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
assert.Nil(t, fh)
} else {
assert.NoError(t, err)
_, err = io.ReadAll(fh)
assert.EqualError(t, err, ErrorEncryptedBadBlock.Error())
assert.Error(t, err, ErrorEncryptedFileBadHeader.Error())
}
file16copy[i] ^= 0xFF
}
// Test that we can corrupt a byte and read zeroes if
// passBadBlocks is set
copy(file16copy, file16)
file16copy[len(file16copy)-1] ^= 0xFF
c.passBadBlocks = true
fh, err = c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
assert.NoError(t, err)
buf, err := io.ReadAll(fh)
assert.NoError(t, err)
assert.Equal(t, make([]byte, 16), buf)
}
func TestDecrypterClose(t *testing.T) {
@@ -1580,7 +1554,7 @@ func TestDecrypterClose(t *testing.T) {
// double close
err = fh.Close()
assert.EqualError(t, err, ErrorFileClosed.Error())
assert.Error(t, err, ErrorFileClosed.Error())
assert.Equal(t, 1, cd.closed)
// try again reading the file this time
@@ -1607,6 +1581,8 @@ func TestPutGetBlock(t *testing.T) {
block := c.getBlock()
c.putBlock(block)
c.putBlock(block)
assert.Panics(t, func() { c.putBlock(block[:len(block)-1]) })
}
func TestKey(t *testing.T) {

View File

@@ -48,7 +48,7 @@ func init() {
Help: "Very simple filename obfuscation.",
}, {
Value: "off",
Help: "Don't encrypt the file names.\nAdds a \".bin\", or \"suffix\" extension only.",
Help: "Don't encrypt the file names.\nAdds a \".bin\" extension only.",
},
},
}, {
@@ -79,9 +79,7 @@ NB If filename_encryption is "off" then this option will do nothing.`,
}, {
Name: "server_side_across_configs",
Default: false,
Help: `Deprecated: use --server-side-across-configs instead.
Allow server-side operations (e.g. copy) to work across different crypt configs.
Help: `Allow server-side operations (e.g. copy) to work across different crypt configs.
Normally this option is not what you want, but if you have two crypts
pointing to the same backend you can use it.
@@ -121,15 +119,6 @@ names, or for debugging purposes.`,
Help: "Encrypt file data.",
},
},
}, {
Name: "pass_bad_blocks",
Help: `If set this will pass bad blocks through as all 0.
This should not be set in normal operation, it should only be set if
trying to recover an encrypted file with errors and it is desired to
recover as much of the file as possible.`,
Default: false,
Advanced: true,
}, {
Name: "filename_encoding",
Help: `How to encode the encrypted filename to text string.
@@ -149,18 +138,10 @@ length and if it's case sensitive.`,
},
{
Value: "base32768",
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive, Dropbox)",
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)",
},
},
Advanced: true,
}, {
Name: "suffix",
Help: `If this is set it will override the default suffix of ".bin".
Setting suffix to "none" will result in an empty suffix. This may be useful
when the path length is critical.`,
Default: ".bin",
Advanced: true,
}},
})
}
@@ -193,8 +174,6 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
if err != nil {
return nil, fmt.Errorf("failed to make cipher: %w", err)
}
cipher.setEncryptedSuffix(opt.Suffix)
cipher.setPassBadBlocks(opt.PassBadBlocks)
return cipher, nil
}
@@ -268,7 +247,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
return f, err
@@ -284,9 +262,7 @@ type Options struct {
Password2 string `config:"password2"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ShowMapping bool `config:"show_mapping"`
PassBadBlocks bool `config:"pass_bad_blocks"`
FilenameEncoding string `config:"filename_encoding"`
Suffix string `config:"suffix"`
}
// Fs represents a wrapped fs.Fs
@@ -478,7 +454,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
}
fs.Debugf(src, "%v = %s OK", ht, srcHash)
}

View File

@@ -202,7 +202,7 @@ func init() {
m.Set("root_folder_id", "appDataFolder")
}
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" && !opt.EnvAuth {
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" {
return oauthutil.ConfigOut("teamdrive", &oauthutil.Options{
OAuth2Config: driveConfig,
})
@@ -277,23 +277,20 @@ Leave blank normally.
Fill in to access "Computers" folders (see docs), or for rclone to use
a non root folder as its starting point.
`,
Advanced: true,
Sensitive: true,
Advanced: true,
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
}, {
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideConfigurator,
Advanced: true,
Sensitive: true,
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideConfigurator,
Advanced: true,
}, {
Name: "team_drive",
Help: "ID of the Shared Drive (Team Drive).",
Hide: fs.OptionHideConfigurator,
Advanced: true,
Sensitive: true,
Name: "team_drive",
Help: "ID of the Shared Drive (Team Drive).",
Hide: fs.OptionHideConfigurator,
Advanced: true,
}, {
Name: "auth_owner_only",
Default: false,
@@ -419,11 +416,10 @@ date is used.`,
Help: "Size of listing chunk 100-1000, 0 to disable.",
Advanced: true,
}, {
Name: "impersonate",
Default: "",
Help: `Impersonate this user when using a service account.`,
Advanced: true,
Sensitive: true,
Name: "impersonate",
Default: "",
Help: `Impersonate this user when using a service account.`,
Advanced: true,
}, {
Name: "alternate_export",
Default: false,
@@ -503,9 +499,7 @@ need to use --ignore size also.`,
}, {
Name: "server_side_across_configs",
Default: false,
Help: `Deprecated: use --server-side-across-configs instead.
Allow server-side operations (e.g. copy) to work across different drive configs.
Help: `Allow server-side operations (e.g. copy) to work across different drive configs.
This can be useful if you wish to do a server-side copy between two
different Google drives. Note that this isn't enabled by default
@@ -596,8 +590,7 @@ Note also that opening the folder once in the web interface (with the
user you've authenticated rclone with) seems to be enough so that the
resource key is no needed.
`,
Advanced: true,
Sensitive: true,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -605,18 +598,6 @@ resource key is no needed.
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
// Don't encode / as it's a valid name character in drive.
Default: encoder.EncodeInvalidUtf8,
}, {
Name: "env_auth",
Help: "Get IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
Default: false,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter credentials in the next step.",
}, {
Value: "true",
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
}},
}}...),
})
@@ -673,7 +654,6 @@ type Options struct {
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
ResourceKey string `config:"resource_key"`
Enc encoder.MultiEncoder `config:"encoding"`
EnvAuth bool `config:"env_auth"`
}
// Fs represents a remote drive server
@@ -781,7 +761,7 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
fs.Errorf(f, "Received download limit error: %v", err)
return false, fserrors.FatalError(err)
} else if f.opt.StopOnUploadLimit && (reason == "quotaExceeded" || reason == "storageQuotaExceeded") {
} else if f.opt.StopOnUploadLimit && reason == "quotaExceeded" {
fs.Errorf(f, "Received upload limit error: %v", err)
return false, fserrors.FatalError(err)
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
@@ -1142,12 +1122,6 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
if err != nil {
return nil, fmt.Errorf("failed to create oauth client from service account: %w", err)
}
} else if opt.EnvAuth {
scopes := driveScopes(opt.Scope)
oAuthClient, err = google.DefaultClient(ctx, scopes...)
if err != nil {
return nil, fmt.Errorf("failed to create client from environment: %w", err)
}
} else {
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt))
if err != nil {
@@ -1519,9 +1493,6 @@ func (f *Fs) newObjectWithExportInfo(
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if strings.HasSuffix(remote, "/") {
return nil, fs.ErrorIsDir
}
info, extension, exportName, exportMimeType, isDocument, err := f.getRemoteInfoWithExport(ctx, remote)
if err != nil {
return nil, err
@@ -2909,7 +2880,6 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
if f.rootFolderID == "appDataFolder" {
changesCall.Spaces("appDataFolder")
}
changesCall.RestrictToMyDrive(!f.opt.SharedWithMe)
changeList, err = changesCall.Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
@@ -3891,7 +3861,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return err
}
newO, err := o.fs.newObjectWithInfo(ctx, o.remote, info)
newO, err := o.fs.newObjectWithInfo(ctx, src.Remote(), info)
if err != nil {
return err
}

View File

@@ -243,15 +243,6 @@ func (f *Fs) InternalTestShouldRetry(t *testing.T) {
quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403)
assert.False(t, quotaExceededRetry)
assert.Equal(t, quotaExceededError, expectedQuotaError)
sqEItem := googleapi.ErrorItem{
Reason: "storageQuotaExceeded",
}
generic403.Errors[0] = sqEItem
expectedStorageQuotaError := fserrors.FatalError(&generic403)
storageQuotaExceededRetry, storageQuotaExceededError := f.shouldRetry(ctx, &generic403)
assert.False(t, storageQuotaExceededRetry)
assert.Equal(t, storageQuotaExceededError, expectedStorageQuotaError)
}
func (f *Fs) InternalTestDocumentImport(t *testing.T) {

View File

@@ -13,6 +13,7 @@ import (
"sync"
"time"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
@@ -139,12 +140,55 @@ func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionF
return complete, nil
}
// finishBatchJobStatus waits for the batch to complete returning completed entries
func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
if launchBatchStatus.AsyncJobId == "" {
return nil, errors.New("wait for batch completion: empty job ID")
}
var batchStatus *files.UploadSessionFinishBatchJobStatus
sleepTime := 100 * time.Millisecond
const maxSleepTime = 1 * time.Second
startTime := time.Now()
try := 1
for {
remaining := time.Duration(b.f.opt.BatchCommitTimeout) - time.Since(startTime)
if remaining < 0 {
break
}
err = b.f.pacer.Call(func() (bool, error) {
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
AsyncJobId: launchBatchStatus.AsyncJobId,
})
return shouldRetry(ctx, err)
})
if err != nil {
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d remaining %v", sleepTime, err, try, remaining)
} else {
if batchStatus.Tag == "complete" {
fs.Debugf(b.f, "Upload batch completed in %v", time.Since(startTime))
return batchStatus.Complete, nil
}
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d remaining %v", sleepTime, batchStatus.Tag, try, remaining)
}
time.Sleep(sleepTime)
sleepTime *= 2
if sleepTime > maxSleepTime {
sleepTime = maxSleepTime
}
try++
}
if err == nil {
err = errors.New("batch didn't complete")
}
return nil, fmt.Errorf("wait for batch failed after %d tries in %v: %w", try, time.Since(startTime), err)
}
// commit a batch
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
// If commit fails then signal clients if sync
var signalled = b.async
defer func() {
if err != nil && !signalled {
if err != nil && signalled {
// Signal to clients that there was an error
for _, result := range results {
result <- batcherResponse{err: err}

View File

@@ -58,7 +58,7 @@ import (
const (
rcloneClientID = "5jcck7diasz0rqy"
rcloneEncryptedClientSecret = "fRS5vVLr2v6FbyXYnIgjwBuUAt0osq_QZTXAEcmZ7g"
defaultMinSleep = fs.Duration(10 * time.Millisecond)
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
// Upload chunk size - setting too small makes uploads slow.
@@ -182,9 +182,8 @@ client_secret) to use this option as currently rclone's default set of
permissions doesn't include "members.read". This can be added once
v1.55 or later is in use everywhere.
`,
Default: "",
Advanced: true,
Sensitive: true,
Default: "",
Advanced: true,
}, {
Name: "shared_files",
Help: `Instructs rclone to work on individual shared files.
@@ -261,8 +260,8 @@ uploaded.
The default for this is 0 which means rclone will choose a sensible
default based on the batch_mode in use.
- batch_mode: async - default batch_timeout is 10s
- batch_mode: sync - default batch_timeout is 500ms
- batch_mode: async - default batch_timeout is 500ms
- batch_mode: sync - default batch_timeout is 10s
- batch_mode: off - not in use
`,
Default: fs.Duration(0),
@@ -272,11 +271,6 @@ default based on the batch_mode in use.
Help: `Max time to wait for a batch to finish committing`,
Default: fs.Duration(10 * time.Minute),
Advanced: true,
}, {
Name: "pacer_min_sleep",
Default: defaultMinSleep,
Help: "Minimum time to sleep between API calls.",
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -305,7 +299,6 @@ type Options struct {
BatchTimeout fs.Duration `config:"batch_timeout"`
BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"`
AsyncBatch bool `config:"async_batch"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -449,7 +442,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
name: name,
opt: *opt,
ci: ci,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
if err != nil {
@@ -543,7 +536,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
default:
return nil, err
}
// if the mount failed we have to abort here
// if the moint failed we have to abort here
}
// if the mount succeeded it's now a normal folder in the users root namespace
// we disable shared folder mode and proceed normally
@@ -726,7 +719,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
}
for _, entry := range res.Entries {
leaf := f.opt.Enc.ToStandardName(entry.Name)
d := fs.NewDir(leaf, time.Time{}).SetID(entry.SharedFolderId)
d := fs.NewDir(leaf, time.Now()).SetID(entry.SharedFolderId)
entries = append(entries, d)
if err != nil {
return nil, err
@@ -913,7 +906,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
remote := path.Join(dir, leaf)
if folderInfo != nil {
d := fs.NewDir(remote, time.Time{}).SetID(folderInfo.Id)
d := fs.NewDir(remote, time.Now()).SetID(folderInfo.Id)
entries = append(entries, d)
} else if fileInfo != nil {
o, err := f.newObjectWithInfo(ctx, remote, fileInfo)

View File

@@ -118,9 +118,6 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
Single: 1,
Pass: f.opt.FilePassword,
}
if f.opt.CDN {
request.CDN = 1
}
opts := rest.Opts{
Method: "POST",
Path: "/download/get_token.cgi",
@@ -408,32 +405,6 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
return response, nil
}
func (f *Fs) moveDir(ctx context.Context, folderID int, newLeaf string, destinationFolderID int) (response *MoveDirResponse, err error) {
request := &MoveDirRequest{
FolderID: folderID,
DestinationFolderID: destinationFolderID,
Rename: newLeaf,
// DestinationUser: destinationUser,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/mv.cgi",
}
response = &MoveDirResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("couldn't move dir: %w", err)
}
return response, nil
}
func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) {
request := &CopyFileRequest{
URLs: []string{url},
@@ -502,7 +473,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("didn't get an upload node: %w", err)
return nil, fmt.Errorf("didnt got an upload node: %w", err)
}
// fs.Debugf(f, "Got Upload node")

View File

@@ -38,9 +38,8 @@ func init() {
Description: "1Fichier",
NewFs: NewFs,
Options: []fs.Option{{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
Name: "api_key",
Sensitive: true,
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
Name: "api_key",
}, {
Help: "If you want to download a shared folder, add this parameter.",
Name: "shared_folder",
@@ -55,11 +54,6 @@ func init() {
Name: "folder_password",
Advanced: true,
IsPassword: true,
}, {
Help: "Set if you wish to use CDN download links.",
Name: "cdn",
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -95,7 +89,6 @@ type Options struct {
SharedFolder string `config:"shared_folder"`
FilePassword string `config:"file_password"`
FolderPassword string `config:"folder_password"`
CDN bool `config:"cdn"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -340,7 +333,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// checking to see if there is one already - use Put() for that.
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
if size > int64(300e9) {
return nil, errors.New("File too big, can't upload")
return nil, errors.New("File too big, cant upload")
} else if size == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}
@@ -488,51 +481,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return dstObj, nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove.
//
// If destination exists then return fs.ErrorDirExists.
//
// This is complicated by the fact that we can't use moveDir to move
// to a different directory AND rename at the same time as it can
// overwrite files in the source directory.
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
if err != nil {
return err
}
srcIDnumeric, err := strconv.Atoi(srcID)
if err != nil {
return err
}
dstDirectoryIDnumeric, err := strconv.Atoi(dstDirectoryID)
if err != nil {
return err
}
var resp *MoveDirResponse
resp, err = f.moveDir(ctx, srcIDnumeric, dstLeaf, dstDirectoryIDnumeric)
if err != nil {
return fmt.Errorf("couldn't rename leaf: %w", err)
}
if resp.Status != "OK" {
return fmt.Errorf("couldn't rename leaf: %s", resp.Message)
}
srcFs.dirCache.FlushDir(srcRemote)
return nil
}
// Copy src to this remote using server side move operations.
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
@@ -606,7 +554,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)

View File

@@ -20,7 +20,6 @@ type DownloadRequest struct {
URL string `json:"url"`
Single int `json:"single"`
Pass string `json:"pass,omitempty"`
CDN int `json:"cdn,omitempty"`
}
// RemoveFolderRequest is the request structure of the corresponding request
@@ -70,22 +69,6 @@ type MoveFileResponse struct {
URLs []string `json:"urls"`
}
// MoveDirRequest is the request structure of the corresponding request
type MoveDirRequest struct {
FolderID int `json:"folder_id"`
DestinationFolderID int `json:"destination_folder_id,omitempty"`
DestinationUser string `json:"destination_user"`
Rename string `json:"rename,omitempty"`
}
// MoveDirResponse is the response structure of the corresponding request
type MoveDirResponse struct {
Status string `json:"status"`
Message string `json:"message"`
OldName string `json:"old_name"`
NewName string `json:"new_name"`
}
// CopyFileRequest is the request structure of the corresponding request
type CopyFileRequest struct {
URLs []string `json:"urls"`

View File

@@ -84,7 +84,6 @@ Leave blank normally.
Fill in to make rclone start with directory of a given ID.
`,
Sensitive: true,
}, {
Name: "permanent_token",
Help: `Permanent Authentication Token.
@@ -98,7 +97,6 @@ These tokens are normally valid for several years.
For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
`,
Sensitive: true,
}, {
Name: "token",
Help: `Session Token.
@@ -108,8 +106,7 @@ usually valid for 1 hour.
Don't set this value - rclone will set it automatically.
`,
Advanced: true,
Sensitive: true,
Advanced: true,
}, {
Name: "token_expiry",
Help: `Token expiry time.

View File

@@ -28,7 +28,6 @@ import (
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/proxy"
"github.com/rclone/rclone/lib/readers"
)
@@ -49,15 +48,13 @@ func init() {
Description: "FTP",
NewFs: NewFs,
Options: []fs.Option{{
Name: "host",
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
Required: true,
Sensitive: true,
Name: "host",
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
Required: true,
}, {
Name: "user",
Help: "FTP username.",
Default: currentUser,
Sensitive: true,
Name: "user",
Help: "FTP username.",
Default: currentUser,
}, {
Name: "port",
Help: "FTP port number.",
@@ -175,18 +172,6 @@ Enabled by default. Use 0 to disable.`,
If this is set and no password is supplied then rclone will ask for a password
`,
Advanced: true,
}, {
Name: "socks_proxy",
Default: "",
Help: `Socks 5 proxy host.
Supports the format user:pass@host:port, user@host:port, host:port.
Example:
myUser:myPass@localhost:9005
`,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -231,7 +216,6 @@ type Options struct {
ShutTimeout fs.Duration `config:"shut_timeout"`
AskPassword bool `config:"ask_password"`
Enc encoder.MultiEncoder `config:"encoding"`
SocksProxy string `config:"socks_proxy"`
}
// Fs represents a remote FTP server
@@ -331,33 +315,18 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
return len(p), nil
}
// Return a *textproto.Error if err contains one or nil otherwise
func textprotoError(err error) (errX *textproto.Error) {
if errors.As(err, &errX) {
return errX
}
return nil
}
// returns true if this FTP error should be retried
func isRetriableFtpError(err error) bool {
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusNotAvailable, ftp.StatusTransfertAborted:
return true
}
}
return false
}
// shouldRetry returns a boolean as to whether this err deserve to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
if isRetriableFtpError(err) {
return true, err
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusNotAvailable:
return true, err
}
}
return fserrors.ShouldRetry(err), err
}
@@ -373,12 +342,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
defer func() {
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
}()
baseDialer := fshttp.NewDialer(ctx)
if f.opt.SocksProxy != "" {
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
} else {
conn, err = baseDialer.Dial(network, address)
}
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
if err != nil {
return nil, err
}
@@ -499,7 +463,8 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
*pc = nil
if err != nil {
// If not a regular FTP error code then check the connection
if tpErr := textprotoError(err); tpErr != nil {
var tpErr *textproto.Error
if !errors.As(err, &tpErr) {
nopErr := c.NoOp()
if nopErr != nil {
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
@@ -601,7 +566,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
PartialUploads: true,
}).Fill(ctx, f)
// set the pool drainer timer going
if f.opt.IdleTimeout > 0 {
@@ -649,7 +613,8 @@ func (f *Fs) Shutdown(ctx context.Context) error {
// translateErrorFile turns FTP errors into rclone errors if possible for a file
func translateErrorFile(err error) error {
if errX := textprotoError(err); errX != nil {
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
err = fs.ErrorObjectNotFound
@@ -660,7 +625,8 @@ func translateErrorFile(err error) error {
// translateErrorDir turns FTP errors into rclone errors if possible for a directory
func translateErrorDir(err error) error {
if errX := textprotoError(err); errX != nil {
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
err = fs.ErrorDirNotFound
@@ -714,12 +680,6 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
if err == fs.ErrorObjectNotFound {
return nil, nil
}
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusBadArguments:
err = nil
}
}
return nil, err
}
if entry != nil {
@@ -957,7 +917,8 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
}
err = c.MakeDir(f.dirFromStandardPath(abspath))
f.putFtpConnection(&c, err)
if errX := textprotoError(err); errX != nil {
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
err = nil
@@ -1126,7 +1087,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// SetModTime sets the modification time of the object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
if !o.fs.fSetTime {
fs.Debugf(o.fs, "SetModTime is not supported")
fs.Errorf(o.fs, "SetModTime is not supported")
return nil
}
c, err := o.fs.getFtpConnection(ctx)
@@ -1198,7 +1159,8 @@ func (f *ftpReadCloser) Close() error {
// mask the error if it was caused by a premature close
// NB StatusAboutToSend is to work around a bug in pureftpd
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
if errX := textprotoError(err); errX != nil {
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
err = nil
@@ -1224,26 +1186,15 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
}
}
}
var (
fd *ftp.Response
c *ftp.ServerConn
)
err = o.fs.pacer.Call(func() (bool, error) {
c, err = o.fs.getFtpConnection(ctx)
if err != nil {
return false, err // getFtpConnection has retries already
}
fd, err = c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
}
return shouldRetry(ctx, err)
})
c, err := o.fs.getFtpConnection(ctx)
if err != nil {
return nil, fmt.Errorf("open: %w", err)
}
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
return nil, fmt.Errorf("open: %w", err)
}
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
return rc, nil
}
@@ -1276,10 +1227,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
// Ignore error 250 here - send by some servers
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusRequestedFileActionOK:
err = nil
if err != nil {
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusRequestedFileActionOK:
err = nil
}
}
}
if err != nil {

View File

@@ -82,8 +82,7 @@ func init() {
saFile, _ := m.Get("service_account_file")
saCreds, _ := m.Get("service_account_credentials")
anonymous, _ := m.Get("anonymous")
envAuth, _ := m.Get("env_auth")
if saFile != "" || saCreds != "" || anonymous == "true" || envAuth == "true" {
if saFile != "" || saCreds != "" || anonymous == "true" {
return nil, nil
}
return oauthutil.ConfigOut("", &oauthutil.Options{
@@ -91,21 +90,15 @@ func init() {
})
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "project_number",
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
Sensitive: true,
}, {
Name: "user_project",
Help: "User project.\n\nOptional - needed only for requester pays.",
Sensitive: true,
Name: "project_number",
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
}, {
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth,
Sensitive: true,
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth,
}, {
Name: "anonymous",
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
@@ -304,15 +297,6 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
Value: "DURABLE_REDUCED_AVAILABILITY",
Help: "Durable reduced availability storage class",
}},
}, {
Name: "directory_markers",
Default: false,
Advanced: true,
Help: `Upload an empty object with a trailing slash when a new directory is created
Empty folders are unsupported for bucket based remotes, this option creates an empty
object ending with "/", to persist the folder.
`,
}, {
Name: "no_check_bucket",
Help: `If set, don't attempt to check the bucket exists or create it.
@@ -346,17 +330,6 @@ can't check the size and hash but the file contents will be decompressed.
Default: (encoder.Base |
encoder.EncodeCrLf |
encoder.EncodeInvalidUtf8),
}, {
Name: "env_auth",
Help: "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
Default: false,
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter credentials in the next step.",
}, {
Value: "true",
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
}},
}}...),
})
}
@@ -364,7 +337,6 @@ can't check the size and hash but the file contents will be decompressed.
// Options defines the configuration for this backend
type Options struct {
ProjectNumber string `config:"project_number"`
UserProject string `config:"user_project"`
ServiceAccountFile string `config:"service_account_file"`
ServiceAccountCredentials string `config:"service_account_credentials"`
Anonymous bool `config:"anonymous"`
@@ -377,8 +349,6 @@ type Options struct {
Decompress bool `config:"decompress"`
Endpoint string `config:"endpoint"`
Enc encoder.MultiEncoder `config:"encoding"`
EnvAuth bool `config:"env_auth"`
DirectoryMarkers bool `config:"directory_markers"`
}
// Fs represents a remote storage server
@@ -474,7 +444,7 @@ func parsePath(path string) (root string) {
// split returns bucket and bucketPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
}
@@ -530,11 +500,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err)
}
} else if opt.EnvAuth {
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
if err != nil {
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
}
} else {
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
if err != nil {
@@ -560,9 +525,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
BucketBased: true,
BucketBasedRootOK: true,
}).Fill(ctx, f)
if opt.DirectoryMarkers {
f.features.CanHaveEmptyDirectories = true
}
// Create a new authorized Drive client.
f.client = oAuthClient
@@ -579,11 +541,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Check to see if the object exists
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
err = f.pacer.Call(func() (bool, error) {
get := f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx)
if f.opt.UserProject != "" {
get = get.UserProject(f.opt.UserProject)
}
_, err = get.Do()
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
return shouldRetry(ctx, err)
})
if err == nil {
@@ -643,13 +601,9 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
directory += "/"
}
list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
if f.opt.UserProject != "" {
list = list.UserProject(f.opt.UserProject)
}
if !recurse {
list = list.Delimiter("/")
}
foundItems := 0
for {
var objects *storage.Objects
err = f.pacer.Call(func() (bool, error) {
@@ -665,7 +619,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
return err
}
if !recurse {
foundItems += len(objects.Prefixes)
var object storage.Object
for _, remote := range objects.Prefixes {
if !strings.HasSuffix(remote, "/") {
@@ -686,29 +639,22 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
}
}
foundItems += len(objects.Items)
for _, object := range objects.Items {
remote := f.opt.Enc.ToStandardPath(object.Name)
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", object.Name)
continue
}
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
// is this a directory marker?
if isDirectory {
// Don't insert the root directory
if remote == directory {
continue
}
// process directory markers as directories
remote = strings.TrimRight(remote, "/")
}
remote = remote[len(prefix):]
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
if addBucket {
remote = path.Join(bucket, remote)
}
err = fn(remote, object, isDirectory)
// is this a directory marker?
if isDirectory {
continue // skip directory marker
}
err = fn(remote, object, false)
if err != nil {
return err
}
@@ -718,17 +664,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
list.PageToken(objects.NextPageToken)
}
if f.opt.DirectoryMarkers && foundItems == 0 && directory != "" {
// Determine whether the directory exists or not by whether it has a marker
_, err := f.readObjectInfo(ctx, bucket, directory)
if err != nil {
if err == fs.ErrorObjectNotFound {
return fs.ErrorDirNotFound
}
return err
}
}
return nil
}
@@ -772,9 +707,6 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
return nil, errors.New("can't list buckets without project number")
}
listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks)
if f.opt.UserProject != "" {
listBuckets = listBuckets.UserProject(f.opt.UserProject)
}
for {
var buckets *storage.Buckets
err = f.pacer.Call(func() (bool, error) {
@@ -892,69 +824,10 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
return f.Put(ctx, in, src, options...)
}
// Create directory marker file and parents
func (f *Fs) createDirectoryMarker(ctx context.Context, bucket, dir string) error {
if !f.opt.DirectoryMarkers || bucket == "" {
return nil
}
// Object to be uploaded
o := &Object{
fs: f,
modTime: time.Now(),
}
for {
_, bucketPath := f.split(dir)
// Don't create the directory marker if it is the bucket or at the very root
if bucketPath == "" {
break
}
o.remote = dir + "/"
// Check to see if object already exists
_, err := o.readObjectInfo(ctx)
if err == nil {
return nil
}
// Upload it if not
fs.Debugf(o, "Creating directory marker")
content := io.Reader(strings.NewReader(""))
err = o.Update(ctx, content, o)
if err != nil {
return fmt.Errorf("creating directory marker failed: %w", err)
}
// Now check parent directory exists
dir = path.Dir(dir)
if dir == "/" || dir == "." {
break
}
}
return nil
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
bucket, _ := f.split(dir)
e := f.checkBucket(ctx, bucket)
if e != nil {
return e
}
return f.createDirectoryMarker(ctx, bucket, dir)
}
// mkdirParent creates the parent bucket/directory if it doesn't exist
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
remote = strings.TrimRight(remote, "/")
dir := path.Dir(remote)
if dir == "/" || dir == "." {
dir = ""
}
return f.Mkdir(ctx, dir)
return f.makeBucket(ctx, bucket)
}
// makeBucket creates the bucket if it doesn't exist
@@ -963,11 +836,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
// List something from the bucket to see if it exists. Doing it like this enables the use of a
// service account that only has the "Storage Object Admin" role. See #2193 for details.
err = f.pacer.Call(func() (bool, error) {
list := f.svc.Objects.List(bucket).MaxResults(1).Context(ctx)
if f.opt.UserProject != "" {
list = list.UserProject(f.opt.UserProject)
}
_, err = list.Do()
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
return shouldRetry(ctx, err)
})
if err == nil {
@@ -1002,11 +871,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
if !f.opt.BucketPolicyOnly {
insertBucket.PredefinedAcl(f.opt.BucketACL)
}
insertBucket = insertBucket.Context(ctx)
if f.opt.UserProject != "" {
insertBucket = insertBucket.UserProject(f.opt.UserProject)
}
_, err = insertBucket.Do()
_, err = insertBucket.Context(ctx).Do()
return shouldRetry(ctx, err)
})
}, nil)
@@ -1026,28 +891,12 @@ func (f *Fs) checkBucket(ctx context.Context, bucket string) error {
// to delete was not empty.
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
bucket, directory := f.split(dir)
// Remove directory marker file
if f.opt.DirectoryMarkers && bucket != "" && dir != "" {
o := &Object{
fs: f,
remote: dir + "/",
}
fs.Debugf(o, "Removing directory marker")
err := o.Remove(ctx)
if err != nil {
return fmt.Errorf("removing directory marker failed: %w", err)
}
}
if bucket == "" || directory != "" {
return nil
}
return f.cache.Remove(bucket, func() error {
return f.pacer.Call(func() (bool, error) {
deleteBucket := f.svc.Buckets.Delete(bucket).Context(ctx)
if f.opt.UserProject != "" {
deleteBucket = deleteBucket.UserProject(f.opt.UserProject)
}
err = deleteBucket.Do()
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
return shouldRetry(ctx, err)
})
})
@@ -1069,7 +918,7 @@ func (f *Fs) Precision() time.Duration {
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstBucket, dstPath := f.split(remote)
err := f.mkdirParent(ctx, remote)
err := f.checkBucket(ctx, dstBucket)
if err != nil {
return nil, err
}
@@ -1093,11 +942,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var rewriteResponse *storage.RewriteResponse
for {
err = f.pacer.Call(func() (bool, error) {
rewriteRequest = rewriteRequest.Context(ctx)
if f.opt.UserProject != "" {
rewriteRequest.UserProject(f.opt.UserProject)
}
rewriteResponse, err = rewriteRequest.Do()
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
return shouldRetry(ctx, err)
})
if err != nil {
@@ -1207,17 +1052,8 @@ func (o *Object) setMetaData(info *storage.Object) {
// readObjectInfo reads the definition for an object
func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) {
bucket, bucketPath := o.split()
return o.fs.readObjectInfo(ctx, bucket, bucketPath)
}
// readObjectInfo reads the definition for an object
func (f *Fs) readObjectInfo(ctx context.Context, bucket, bucketPath string) (object *storage.Object, err error) {
err = f.pacer.Call(func() (bool, error) {
get := f.svc.Objects.Get(bucket, bucketPath).Context(ctx)
if f.opt.UserProject != "" {
get = get.UserProject(f.opt.UserProject)
}
object, err = get.Do()
err = o.fs.pacer.Call(func() (bool, error) {
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
return shouldRetry(ctx, err)
})
if err != nil {
@@ -1289,11 +1125,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
if !o.fs.opt.BucketPolicyOnly {
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
}
copyObject = copyObject.Context(ctx)
if o.fs.opt.UserProject != "" {
copyObject = copyObject.UserProject(o.fs.opt.UserProject)
}
newObject, err = copyObject.Do()
newObject, err = copyObject.Context(ctx).Do()
return shouldRetry(ctx, err)
})
if err != nil {
@@ -1310,9 +1142,6 @@ func (o *Object) Storable() bool {
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.fs.opt.UserProject != "" {
o.url = o.url + "&userProject=" + o.fs.opt.UserProject
}
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
if err != nil {
return nil, err
@@ -1356,14 +1185,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
bucket, bucketPath := o.split()
// Create parent dir/bucket if not saving directory marker
if !strings.HasSuffix(o.remote, "/") {
err = o.fs.mkdirParent(ctx, o.remote)
if err != nil {
return err
}
err := o.fs.checkBucket(ctx, bucket)
if err != nil {
return err
}
modTime := src.ModTime(ctx)
@@ -1408,11 +1234,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if !o.fs.opt.BucketPolicyOnly {
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
}
insertObject = insertObject.Context(ctx)
if o.fs.opt.UserProject != "" {
insertObject = insertObject.UserProject(o.fs.opt.UserProject)
}
newObject, err = insertObject.Do()
newObject, err = insertObject.Context(ctx).Do()
return shouldRetry(ctx, err)
})
if err != nil {
@@ -1427,11 +1249,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
func (o *Object) Remove(ctx context.Context) (err error) {
bucket, bucketPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
deleteBucket := o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx)
if o.fs.opt.UserProject != "" {
deleteBucket = deleteBucket.UserProject(o.fs.opt.UserProject)
}
err = deleteBucket.Do()
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
return shouldRetry(ctx, err)
})
return err

View File

@@ -6,7 +6,6 @@ import (
"testing"
"github.com/rclone/rclone/backend/googlecloudstorage"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
@@ -17,17 +16,3 @@ func TestIntegration(t *testing.T) {
NilObject: (*googlecloudstorage.Object)(nil),
})
}
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
name := "TestGoogleCloudStorage"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*googlecloudstorage.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "directory_markers", Value: "true"},
},
})
}

View File

@@ -161,7 +161,7 @@ func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bo
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
}
accounting.Stats(ctx).NewCheckingTransfer(obj, "importing").Done(ctx, err)
accounting.Stats(ctx).NewCheckingTransfer(obj).Done(ctx, err)
doneCount++
}
})

View File

@@ -166,7 +166,6 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
PartialUploads: true,
}
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)

View File

@@ -19,10 +19,9 @@ func init() {
Description: "Hadoop distributed file system",
NewFs: NewFs,
Options: []fs.Option{{
Name: "namenode",
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
Required: true,
Sensitive: true,
Name: "namenode",
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
Required: true,
}, {
Name: "username",
Help: "Hadoop user name.",
@@ -30,7 +29,6 @@ func init() {
Value: "root",
Help: "Connect to hdfs as root.",
}},
Sensitive: true,
}, {
Name: "service_principal_name",
Help: `Kerberos service principal name for the namenode.
@@ -38,16 +36,15 @@ func init() {
Enables KERBEROS authentication. Specifies the Service Principal Name
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
Advanced: true,
Sensitive: true,
Advanced: true,
}, {
Name: "data_transfer_protection",
Help: `Kerberos data transfer protection: authentication|integrity|privacy.
Specifies whether or not authentication, data signature integrity
checks, and wire encryption are required when communicating with
the datanodes. Possible values are 'authentication', 'integrity'
and 'privacy'. Used only with KERBEROS enabled.`,
checks, and wire encryption is required when communicating the the
datanodes. Possible values are 'authentication', 'integrity' and
'privacy'. Used only with KERBEROS enabled.`,
Examples: []fs.OptionExample{{
Value: "privacy",
Help: "Ensure authentication, integrity and encryption enabled.",

View File

@@ -294,6 +294,15 @@ func (f *Fs) copyOrMove(ctx context.Context, isDirectory bool, operationType Cop
return &result, nil
}
// copyDirectory moves the directory at the source-path to the destination-path and
// returns the resulting api-object if successful.
//
// The operation will only be successful
// if the parent-directory of the destination-path exists.
func (f *Fs) copyDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
return f.copyOrMove(ctx, true, CopyOriginalPreserveModTime, source, destination, onExist)
}
// moveDirectory moves the directory at the source-path to the destination-path and
// returns the resulting api-object if successful.
//

View File

@@ -2,7 +2,7 @@
package hidrive
// FIXME HiDrive only supports file or folder names of 255 characters or less.
// Operations that create files or folders with longer names will throw an HTTP error:
// Operations that create files oder folder with longer names will throw a HTTP error:
// - 422 Unprocessable Entity
// A more graceful way for rclone to handle this may be desirable.
@@ -338,7 +338,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, fmt.Errorf("could not access root-prefix: %w", err)
}
if item.Type != api.HiDriveObjectTypeDirectory {
return nil, errors.New("the root-prefix needs to point to a valid directory or be empty")
return nil, errors.New("The root-prefix needs to point to a valid directory or be empty")
}
}

View File

@@ -495,7 +495,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
add(file)
case fs.ErrorNotAFile:
// ...found a directory not a file
add(fs.NewDir(remote, time.Time{}))
add(fs.NewDir(remote, timeUnset))
default:
fs.Debugf(remote, "skipping because of error: %v", err)
}
@@ -507,7 +507,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
name = strings.TrimRight(name, "/")
remote := path.Join(dir, name)
if isDir {
add(fs.NewDir(remote, time.Time{}))
add(fs.NewDir(remote, timeUnset))
} else {
in <- remote
}

View File

@@ -133,13 +133,11 @@ Owner is able to add custom keys. Metadata feature grabs all the keys including
},
Options: []fs.Option{{
Name: "access_key_id",
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
Sensitive: true,
Name: "access_key_id",
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
}, {
Name: "secret_access_key",
Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.",
Sensitive: true,
Name: "secret_access_key",
Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.",
}, {
// their official client (https://github.com/jjjake/internetarchive) hardcodes following the two
Name: "endpoint",

View File

@@ -74,10 +74,6 @@ const (
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
tele2CloudClientID = "desktop"
onlimeCloudTokenURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/token"
onlimeCloudAuthURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/auth"
onlimeCloudClientID = "desktop"
)
// Register with Fs
@@ -88,7 +84,7 @@ func init() {
Description: "Jottacloud",
NewFs: NewFs,
Config: Config,
Options: append(oauthutil.SharedOptions, []fs.Option{{
Options: []fs.Option{{
Name: "md5_memory_limit",
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
Default: fs.SizeSuffix(10 * 1024 * 1024),
@@ -123,7 +119,7 @@ func init() {
Default: (encoder.Display |
encoder.EncodeWin | // :?"*<>|
encoder.EncodeInvalidUtf8),
}}...),
}},
})
}
@@ -143,9 +139,6 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
}, {
Value: "tele2",
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
}, {
Value: "onlime",
Help: "Onlime Cloud authentication.\nUse this if you are using Onlime Cloud.",
}})
case "auth_type_done":
// Jump to next state according to config chosen
@@ -268,21 +261,6 @@ machines.`)
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "onlime": // onlime cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, onlimeCloudClientID)
m.Set(configTokenURL, onlimeCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: onlimeCloudAuthURL,
TokenURL: onlimeCloudTokenURL,
},
ClientID: onlimeCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "choose_device":
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint?
Choosing no, the default, will let you access the storage used for the archive
@@ -1860,12 +1838,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err == nil {
// if the object exists delete it
err = o.remove(ctx, true)
if err != nil && err != fs.ErrorObjectNotFound {
// if delete failed then report that, unless it was because the file did not exist after all
if err != nil {
return fmt.Errorf("failed to remove old object: %w", err)
}
} else if err != fs.ErrorObjectNotFound {
// if the object does not exist we can just continue but if the error is something different we should report that
}
// if the object does not exist we can just continue but if the error is something different we should report that
if err != fs.ErrorObjectNotFound {
return err
}
}
@@ -1952,7 +1930,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
o.md5 = result.Md5
o.modTime = time.Unix(result.Modified/1000, 0)
} else {
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still need to update our metadata
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still ned to update our metadata
return o.readMetaData(ctx, true)
}
@@ -1973,17 +1951,10 @@ func (o *Object) remove(ctx context.Context, hard bool) error {
opts.Parameters.Set("dl", "true")
}
err := o.fs.pacer.Call(func() (bool, error) {
return o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.jfsSrv.CallXML(ctx, &opts, nil, nil)
return shouldRetry(ctx, resp, err)
})
if apiErr, ok := err.(*api.Error); ok {
// attempting to hard delete will fail if path does not exist, but standard delete will succeed
if apiErr.StatusCode == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
}
return err
}
// Remove an object

View File

@@ -61,10 +61,9 @@ func init() {
Default: true,
Advanced: true,
}, {
Name: "user",
Help: "Your user name.",
Required: true,
Sensitive: true,
Name: "user",
Help: "Your user name.",
Required: true,
}, {
Name: "password",
Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
@@ -377,7 +376,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
for i, file := range files {
remote := path.Join(dir, f.opt.Enc.ToStandardName(file.Name))
if file.Type == "dir" {
entries[i] = fs.NewDir(remote, time.Time{})
entries[i] = fs.NewDir(remote, time.Unix(0, 0))
} else {
entries[i] = &Object{
fs: f,

View File

@@ -266,10 +266,7 @@ type Object struct {
// ------------------------------------------------------------
var (
errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
errLinksNeedsSuffix = errors.New("need \"" + linkSuffix + "\" suffix to refer to symlink when using -l/--links")
)
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
// NewFs constructs an Fs from the path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
@@ -303,7 +300,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
WriteMetadata: true,
UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
FilterAware: true,
PartialUploads: true,
}).Fill(ctx, f)
if opt.FollowSymlinks {
f.lstat = os.Stat
@@ -314,16 +310,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err == nil {
f.dev = readDevice(fi, f.opt.OneFileSystem)
}
// Check to see if this is a .rclonelink if not found
hasLinkSuffix := strings.HasSuffix(f.root, linkSuffix)
if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) {
fi, err = f.lstat(strings.TrimSuffix(f.root, linkSuffix))
}
if err == nil && f.isRegular(fi.Mode()) {
// Handle the odd case, that a symlink was specified by name without the link suffix
if !hasLinkSuffix && opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
return nil, errLinksNeedsSuffix
}
// It is a file, so use the parent as the root
f.root = filepath.Dir(f.root)
// return an error with an fs which points to the parent
@@ -516,7 +503,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
continue
}
}
fierr = fmt.Errorf("failed to get info about directory entry %q: %w", namepath, fierr)
err = fmt.Errorf("failed to read directory %q: %w", namepath, fierr)
fs.Errorf(dir, "%v", fierr)
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
continue
@@ -537,10 +524,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
localPath := filepath.Join(fsDirPath, name)
fi, err = os.Stat(localPath)
// Quietly skip errors on excluded files and directories
if err != nil && useFilter && !filter.IncludeRemote(newRemote) {
continue
}
if os.IsNotExist(err) || isCircularSymlinkError(err) {
// Skip bad symlinks and circular symlinks
err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err))
@@ -553,6 +536,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
mode = fi.Mode()
}
// Don't include non directory if not included
// we leave directory filtering to the layer above
if useFilter && !fi.IsDir() && !filter.IncludeRemote(newRemote) {
continue
}
if fi.IsDir() {
// Ignore directories which are symlinks. These are junction points under windows which
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
@@ -565,11 +553,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
newRemote += linkSuffix
}
// Don't include non directory if not included
// we leave directory filtering to the layer above
if useFilter && !filter.IncludeRemote(newRemote) {
continue
}
fso, err := f.newObjectWithInfo(newRemote, fi)
if err != nil {
return nil, err

View File

@@ -14,12 +14,10 @@ import (
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/readers"
@@ -147,20 +145,6 @@ func TestSymlink(t *testing.T) {
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")
require.Equal(t, fs.ErrorObjectNotFound, err)
// Check that NewFs works with the suffixed version and --links
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+linkSuffix), configmap.Simple{
"links": "true",
})
require.Equal(t, fs.ErrorIsFile, err)
require.Equal(t, dir, f2.(*Fs).root)
// Check that NewFs doesn't see the non suffixed version with --links
f2, err = NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"), configmap.Simple{
"links": "true",
})
require.Equal(t, errLinksNeedsSuffix, err)
require.Nil(t, f2)
// Check reading the object
in, err := o.Open(ctx)
require.NoError(t, err)
@@ -411,147 +395,3 @@ func TestFilter(t *testing.T) {
sort.Sort(entries)
require.Equal(t, "[included]", fmt.Sprint(entries))
}
func testFilterSymlink(t *testing.T, copyLinks bool) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
when := time.Now()
f := r.Flocal.(*Fs)
// Create a file, a directory, a symlink to a file, a symlink to a directory and a dangling symlink
r.WriteFile("included.file", "included file", when)
r.WriteFile("included.dir/included.sub.file", "included sub file", when)
require.NoError(t, os.Symlink("included.file", filepath.Join(r.LocalName, "included.file.link")))
require.NoError(t, os.Symlink("included.dir", filepath.Join(r.LocalName, "included.dir.link")))
require.NoError(t, os.Symlink("dangling", filepath.Join(r.LocalName, "dangling.link")))
defer func() {
// Reset -L/-l mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = false
f.lstat = os.Lstat
}()
if copyLinks {
// Set fs into "-L" mode
f.opt.FollowSymlinks = true
f.opt.TranslateSymlinks = false
f.lstat = os.Stat
} else {
// Set fs into "-l" mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = true
f.lstat = os.Lstat
}
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
// Reset global error count
accounting.Stats(ctx).ResetErrors()
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
// Add a filter
ctx, fi := filter.AddConfig(ctx)
require.NoError(t, fi.AddRule("+ included.file"))
require.NoError(t, fi.AddRule("+ included.dir/**"))
if copyLinks {
require.NoError(t, fi.AddRule("+ included.file.link"))
require.NoError(t, fi.AddRule("+ included.dir.link/**"))
} else {
require.NoError(t, fi.AddRule("+ included.file.link.rclonelink"))
require.NoError(t, fi.AddRule("+ included.dir.link.rclonelink"))
}
require.NoError(t, fi.AddRule("- *"))
// Check listing without use filter flag
entries, err := f.List(ctx, "")
require.NoError(t, err)
if copyLinks {
// Check 1 global errors one for each dangling symlink
assert.Equal(t, int64(1), accounting.Stats(ctx).GetErrors(), "global errors found")
} else {
// Check 0 global errors as dangling symlink copied properly
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
}
accounting.Stats(ctx).ResetErrors()
sort.Sort(entries)
if copyLinks {
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
} else {
require.Equal(t, "[dangling.link.rclonelink included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries))
}
// Add user filter flag
ctx = filter.SetUseFilter(ctx, true)
// Check listing with use filter flag
entries, err = f.List(ctx, "")
require.NoError(t, err)
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
sort.Sort(entries)
if copyLinks {
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
} else {
require.Equal(t, "[included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries))
}
// Check listing through a symlink still works
entries, err = f.List(ctx, "included.dir")
require.NoError(t, err)
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
sort.Sort(entries)
require.Equal(t, "[included.dir/included.sub.file]", fmt.Sprint(entries))
}
func TestFilterSymlinkCopyLinks(t *testing.T) {
testFilterSymlink(t, true)
}
func TestFilterSymlinkLinks(t *testing.T) {
testFilterSymlink(t, false)
}
func TestCopySymlink(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
when := time.Now()
f := r.Flocal.(*Fs)
// Create a file and a symlink to it
r.WriteFile("src/file.txt", "hello world", when)
require.NoError(t, os.Symlink("file.txt", filepath.Join(r.LocalName, "src", "link.txt")))
defer func() {
// Reset -L/-l mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = false
f.lstat = os.Lstat
}()
// Set fs into "-l/--links" mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = true
f.lstat = os.Lstat
// Create dst
require.NoError(t, f.Mkdir(ctx, "dst"))
// Do copy from src into dst
src, err := f.NewObject(ctx, "src/link.txt.rclonelink")
require.NoError(t, err)
require.NotNil(t, src)
dst, err := operations.Copy(ctx, f, nil, "dst/link.txt.rclonelink", src)
require.NoError(t, err)
require.NotNil(t, dst)
// Test that we made a symlink and it has the right contents
dstPath := filepath.Join(r.LocalName, "dst", "link.txt")
linkContents, err := os.Readlink(dstPath)
require.NoError(t, err)
assert.Equal(t, "file.txt", linkContents)
}

View File

@@ -5,7 +5,6 @@ package local
import (
"fmt"
"runtime"
"sync"
"time"
@@ -24,7 +23,7 @@ func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
// Check statx() is available as it was only introduced in kernel 4.11
// If not, fall back to fstatat() which was introduced in 2.6.16 which is guaranteed for all Go versions
var stat unix.Statx_t
if runtime.GOOS != "android" && unix.Statx(unix.AT_FDCWD, ".", 0, unix.STATX_ALL, &stat) != unix.ENOSYS {
if unix.Statx(unix.AT_FDCWD, ".", 0, unix.STATX_ALL, &stat) != unix.ENOSYS {
readMetadataFromFileFn = readMetadataFromFileStatx
} else {
readMetadataFromFileFn = readMetadataFromFileFstatat
@@ -92,7 +91,7 @@ func readMetadataFromFileFstatat(o *Object, m *fs.Metadata) (err error) {
// The types of t.Sec and t.Nsec vary from int32 to int64 on
// different Linux architectures so we need to cast them to
// int64 here and hence need to quiet the linter about
// unnecessary casts.
// unecessary casts.
//
// nolint: unconvert
m.Set(key, time.Unix(int64(t.Sec), int64(t.Nsec)).Format(metadataTimeFormat))

View File

@@ -85,11 +85,10 @@ func init() {
Name: "mailru",
Description: "Mail.ru Cloud",
NewFs: NewFs,
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "user",
Help: "User name (usually email).",
Required: true,
Sensitive: true,
Options: []fs.Option{{
Name: "user",
Help: "User name (usually email).",
Required: true,
}, {
Name: "pass",
Help: `Password.
@@ -214,7 +213,7 @@ Supported quirks: atomicmkdir binlist unknowndirs`,
encoder.EncodeWin | // :?"*<>|
encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8),
}}...),
}},
})
}

View File

@@ -58,10 +58,9 @@ func init() {
Description: "Mega",
NewFs: NewFs,
Options: []fs.Option{{
Name: "user",
Help: "User name.",
Required: true,
Sensitive: true,
Name: "user",
Help: "User name.",
Required: true,
}, {
Name: "pass",
Help: "Password.",
@@ -84,17 +83,6 @@ than permanently deleting them. If you specify this then rclone will
permanently delete objects instead.`,
Default: false,
Advanced: true,
}, {
Name: "use_https",
Help: `Use HTTPS for transfers.
MEGA uses plain text HTTP connections by default.
Some ISPs throttle HTTP connections, this causes transfers to become very slow.
Enabling this will force MEGA to use HTTPS for all transfers.
HTTPS is normally not necessary since all data is already encrypted anyway.
Enabling it will increase CPU usage and add network overhead.`,
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -112,7 +100,6 @@ type Options struct {
Pass string `config:"pass"`
Debug bool `config:"debug"`
HardDelete bool `config:"hard_delete"`
UseHTTPS bool `config:"use_https"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -217,7 +204,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if srv == nil {
srv = mega.New().SetClient(fshttp.NewClient(ctx))
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
srv.SetHTTPS(opt.UseHTTPS)
srv.SetLogger(func(format string, v ...interface{}) {
fs.Infof("*go-mega*", format, v...)
})

View File

@@ -65,13 +65,11 @@ HTTP is provided primarily for debugging purposes.`,
Help: `Domain+path of NetStorage host to connect to.
Format should be ` + "`<domain>/<internal folders>`",
Required: true,
Sensitive: true,
Required: true,
}, {
Name: "account",
Help: "Set the NetStorage account name",
Required: true,
Sensitive: true,
Name: "account",
Help: "Set the NetStorage account name",
Required: true,
}, {
Name: "secret",
Help: `Set the NetStorage account secret/G2O key for authentication.
@@ -821,8 +819,6 @@ func (f *Fs) getAuth(req *http.Request) error {
// Set Authorization header
dataHeader := generateDataHeader(f)
path := req.URL.RequestURI()
//lint:ignore SA1008 false positive when running staticcheck, the header name is according to docs even if not canonical
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1008
actionHeader := req.Header["X-Akamai-ACS-Action"][0]
fs.Debugf(nil, "NetStorage API %s call %s for path %q", req.Method, actionHeader, path)
req.Header.Set("X-Akamai-ACS-Auth-Data", dataHeader)

View File

@@ -126,7 +126,6 @@ type HashesType struct {
Sha1Hash string `json:"sha1Hash"` // hex encoded SHA1 hash for the contents of the file (if available)
Crc32Hash string `json:"crc32Hash"` // hex encoded CRC32 value of the file (if available)
QuickXorHash string `json:"quickXorHash"` // base64 encoded QuickXorHash value of the file (if available)
Sha256Hash string `json:"sha256Hash"` // hex encoded SHA256 value of the file (if available)
}
// FileFacet groups file-related data on OneDrive into a single structure.

View File

@@ -131,11 +131,10 @@ Note that the chunks will be buffered into memory.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "drive_id",
Help: "The ID of the drive to use.",
Default: "",
Advanced: true,
Sensitive: true,
Name: "drive_id",
Help: "The ID of the drive to use.",
Default: "",
Advanced: true,
}, {
Name: "drive_type",
Help: "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").",
@@ -149,8 +148,7 @@ This isn't normally needed, but in special circumstances you might
know the folder ID that you wish to access but not be able to get
there through a path traversal.
`,
Advanced: true,
Sensitive: true,
Advanced: true,
}, {
Name: "access_scopes",
Help: `Set scopes to be requested by rclone.
@@ -198,9 +196,7 @@ listing, set this option.`,
}, {
Name: "server_side_across_configs",
Default: false,
Help: `Deprecated: use --server-side-across-configs instead.
Allow server-side operations (e.g. copy) to work across different onedrive configs.
Help: `Allow server-side operations (e.g. copy) to work across different onedrive configs.
This will only work if you are copying between two OneDrive *Personal* drives AND
the files to copy are already shared between them. In other cases, rclone will
@@ -261,67 +257,6 @@ this flag there.
Help: `Set the password for links created by the link command.
At the time of writing this only works with OneDrive personal paid accounts.
`,
Advanced: true,
Sensitive: true,
}, {
Name: "hash_type",
Default: "auto",
Help: `Specify the hash in use for the backend.
This specifies the hash type in use. If set to "auto" it will use the
default hash which is QuickXorHash.
Before rclone 1.62 an SHA1 hash was used by default for Onedrive
Personal. For 1.62 and later the default is to use a QuickXorHash for
all onedrive types. If an SHA1 hash is desired then set this option
accordingly.
From July 2023 QuickXorHash will be the only available hash for
both OneDrive for Business and OneDriver Personal.
This can be set to "none" to not use any hashes.
If the hash requested does not exist on the object, it will be
returned as an empty string which is treated as a missing hash by
rclone.
`,
Examples: []fs.OptionExample{{
Value: "auto",
Help: "Rclone chooses the best hash",
}, {
Value: "quickxor",
Help: "QuickXor",
}, {
Value: "sha1",
Help: "SHA1",
}, {
Value: "sha256",
Help: "SHA256",
}, {
Value: "crc32",
Help: "CRC32",
}, {
Value: "none",
Help: "None - don't use any hashes",
}},
Advanced: true,
}, {
Name: "av_override",
Default: false,
Help: `Allows download of files the server thinks has a virus.
The onedrive/sharepoint server may check files uploaded with an Anti
Virus checker. If it detects any potential viruses or malware it will
block download of the file.
In this case you will see a message like this
server reports this file is infected with a virus - use --onedrive-av-override to download anyway: Infected (name of virus): 403 Forbidden:
If you are 100% sure you want to download this file anyway then use
the --onedrive-av-override flag, or av_override = true in the config
file.
`,
Advanced: true,
}, {
@@ -662,8 +597,6 @@ type Options struct {
LinkScope string `config:"link_scope"`
LinkType string `config:"link_type"`
LinkPassword string `config:"link_password"`
HashType string `config:"hash_type"`
AVOverride bool `config:"av_override"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -680,7 +613,6 @@ type Fs struct {
tokenRenewer *oauthutil.Renew // renew the token on expiry
driveID string // ID to use for querying Microsoft Graph
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
hashType hash.Type // type of the hash we are using
}
// Object describes a OneDrive object
@@ -694,7 +626,8 @@ type Object struct {
size int64 // size of the object
modTime time.Time // modification time of the object
id string // ID of the object
hash string // Hash of the content, usually QuickXorHash but set as hash_type
sha1 string // SHA-1 of the object content
quickxorhash string // QuickXorHash of the object content
mimeType string // Content-Type of object from server (may not be as uploaded)
}
@@ -949,7 +882,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
driveType: opt.DriveType,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
hashType: QuickXorHashType,
}
f.features = (&fs.Features{
CaseInsensitive: true,
@@ -959,15 +891,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}).Fill(ctx, f)
f.srv.SetErrorHandler(errorHandler)
// Set the user defined hash
if opt.HashType == "auto" || opt.HashType == "" {
opt.HashType = QuickXorHashType.String()
}
err = f.hashType.Set(opt.HashType)
if err != nil {
return nil, err
}
// Disable change polling in China region
// See: https://github.com/rclone/rclone/issues/6444
if f.opt.Region == regionCN {
@@ -1633,7 +1556,10 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(f.hashType)
if f.driveType == driveTypePersonal {
return hash.Set(hash.SHA1)
}
return hash.Set(QuickXorHashType)
}
// PublicLink returns a link for downloading without account.
@@ -1748,10 +1674,6 @@ func (f *Fs) CleanUp(ctx context.Context) error {
token := make(chan struct{}, f.ci.Checkers)
var wg sync.WaitGroup
err := walk.Walk(ctx, f, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
if err != nil {
fs.Errorf(f, "Failed to list %q: %v", path, err)
return nil
}
err = entries.ForObjectError(func(obj fs.Object) error {
o, ok := obj.(*Object)
if !ok {
@@ -1846,8 +1768,14 @@ func (o *Object) rootPath() string {
// Hash returns the SHA-1 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t == o.fs.hashType {
return o.hash, nil
if o.fs.driveType == driveTypePersonal {
if t == hash.SHA1 {
return o.sha1, nil
}
} else {
if t == QuickXorHashType {
return o.quickxorhash, nil
}
}
return "", hash.ErrUnsupported
}
@@ -1878,23 +1806,16 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
file := info.GetFile()
if file != nil {
o.mimeType = file.MimeType
o.hash = ""
switch o.fs.hashType {
case QuickXorHashType:
if file.Hashes.QuickXorHash != "" {
h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
if err != nil {
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
} else {
o.hash = hex.EncodeToString(h)
}
if file.Hashes.Sha1Hash != "" {
o.sha1 = strings.ToLower(file.Hashes.Sha1Hash)
}
if file.Hashes.QuickXorHash != "" {
h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
if err != nil {
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
} else {
o.quickxorhash = hex.EncodeToString(h)
}
case hash.SHA1:
o.hash = strings.ToLower(file.Hashes.Sha1Hash)
case hash.SHA256:
o.hash = strings.ToLower(file.Hashes.Sha256Hash)
case hash.CRC32:
o.hash = strings.ToLower(file.Hashes.Crc32Hash)
}
}
fileSystemInfo := info.GetFileSystemInfo()
@@ -1990,20 +1911,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
var resp *http.Response
opts := o.fs.newOptsCall(o.id, "GET", "/content")
opts.Options = options
if o.fs.opt.AVOverride {
opts.Parameters = url.Values{"AVOverride": {"1"}}
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
})
if err != nil {
if resp != nil {
if virus := resp.Header.Get("X-Virus-Infected"); virus != "" {
err = fmt.Errorf("server reports this file is infected with a virus - use --onedrive-av-override to download anyway: %s: %w", virus, err)
}
}
return nil, err
}

View File

@@ -61,7 +61,7 @@ func New() hash.Hash {
func (q *quickXorHash) Write(p []byte) (n int, err error) {
var i int
// fill last remain
lastRemain := q.size % dataSize
lastRemain := int(q.size) % dataSize
if lastRemain != 0 {
i += xorBytes(q.data[lastRemain:], p)
}

View File

@@ -42,10 +42,9 @@ func init() {
Description: "OpenDrive",
NewFs: NewFs,
Options: []fs.Option{{
Name: "username",
Help: "Username.",
Required: true,
Sensitive: true,
Name: "username",
Help: "Username.",
Required: true,
}, {
Name: "password",
Help: "Password.",

View File

@@ -92,16 +92,14 @@ func newOptions() []fs.Option {
Help: noAuthHelpText,
}},
}, {
Name: "namespace",
Help: "Object storage namespace",
Required: true,
Sensitive: true,
Name: "namespace",
Help: "Object storage namespace",
Required: true,
}, {
Name: "compartment",
Help: "Object storage compartment OCID",
Provider: "!no_auth",
Required: true,
Sensitive: true,
Name: "compartment",
Help: "Object storage compartment OCID",
Provider: "!no_auth",
Required: true,
}, {
Name: "region",
Help: "Object storage Region",
@@ -291,7 +289,7 @@ Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/using
}},
}, {
Name: "sse_kms_key_id",
Help: `if using your own master key in vault, this header specifies the
Help: `if using using your own master key in vault, this header specifies the
OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call
the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key.
Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.`,

View File

@@ -589,7 +589,12 @@ func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Durat
if operations.SkipDestructive(ctx, what, "remove pending upload") {
continue
}
_ = f.abortMultiPartUpload(ctx, *upload.Bucket, *upload.Object, *upload.UploadId)
ignoreErr := f.abortMultiPartUpload(ctx, *upload.Bucket, *upload.Object, *upload.UploadId)
if ignoreErr != nil {
// fs.Debugf(f, "ignoring error %s", ignoreErr)
}
} else {
// fs.Debugf(f, "ignoring %s", what)
}
} else {
fs.Infof(f, "MultipartUpload doesn't have sufficient details to abort.")

View File

@@ -110,11 +110,10 @@ func init() {
encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8),
}, {
Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "d0",
Advanced: true,
Sensitive: true,
Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "d0",
Advanced: true,
}, {
Name: "hostname",
Help: `Hostname to connect to.
@@ -139,8 +138,7 @@ with rclone authorize.
This is only required when you want to use the cleanup command. Due to a bug
in the pcloud API the required API does not support OAuth authentication so
we have to rely on user password authentication for it.`,
Advanced: true,
Sensitive: true,
Advanced: true,
}, {
Name: "password",
Help: "Your pcloud password.",

View File

@@ -1,536 +0,0 @@
// Package api has type definitions for pikpak
//
// Manually obtained from the API responses using Browse Dev. Tool and https://mholt.github.io/json-to-go/
package api
import (
"fmt"
"reflect"
"strconv"
"time"
)
const (
// "2022-09-17T14:31:06.056+08:00"
timeFormat = `"` + time.RFC3339 + `"`
)
// Time represents date and time information for the pikpak API, by using RFC3339
type Time time.Time
// MarshalJSON turns a Time into JSON (in UTC)
func (t *Time) MarshalJSON() (out []byte, err error) {
timeString := (*time.Time)(t).Format(timeFormat)
return []byte(timeString), nil
}
// UnmarshalJSON turns JSON into a Time
func (t *Time) UnmarshalJSON(data []byte) error {
if string(data) == "null" || string(data) == `""` {
return nil
}
newT, err := time.Parse(timeFormat, string(data))
if err != nil {
return err
}
*t = Time(newT)
return nil
}
// Types of things in Item
const (
KindOfFolder = "drive#folder"
KindOfFile = "drive#file"
KindOfFileList = "drive#fileList"
KindOfResumable = "drive#resumable"
KindOfForm = "drive#form"
ThumbnailSizeS = "SIZE_SMALL"
ThumbnailSizeM = "SIZE_MEDIUM"
ThumbnailSizeL = "SIZE_LARGE"
PhaseTypeComplete = "PHASE_TYPE_COMPLETE"
PhaseTypeRunning = "PHASE_TYPE_RUNNING"
PhaseTypeError = "PHASE_TYPE_ERROR"
PhaseTypePending = "PHASE_TYPE_PENDING"
UploadTypeForm = "UPLOAD_TYPE_FORM"
UploadTypeResumable = "UPLOAD_TYPE_RESUMABLE"
ListLimit = 100
)
// ------------------------------------------------------------
// Error details api error from pikpak
type Error struct {
Reason string `json:"error"` // short description of the reason, e.g. "file_name_empty" "invalid_request"
Code int `json:"error_code"`
URL string `json:"error_url,omitempty"`
Message string `json:"error_description,omitempty"`
// can have either of `error_details` or `details``
ErrorDetails []*ErrorDetails `json:"error_details,omitempty"`
Details []*ErrorDetails `json:"details,omitempty"`
}
// ErrorDetails contains further details of api error
type ErrorDetails struct {
Type string `json:"@type,omitempty"`
Reason string `json:"reason,omitempty"`
Domain string `json:"domain,omitempty"`
Metadata struct {
} `json:"metadata,omitempty"` // TODO: undiscovered yet
Locale string `json:"locale,omitempty"` // e.g. "en"
Message string `json:"message,omitempty"`
StackEntries []interface{} `json:"stack_entries,omitempty"` // TODO: undiscovered yet
Detail string `json:"detail,omitempty"`
}
// Error returns a string for the error and satisfies the error interface
func (e *Error) Error() string {
out := fmt.Sprintf("Error %q (%d)", e.Reason, e.Code)
if e.Message != "" {
out += ": " + e.Message
}
return out
}
// Check Error satisfies the error interface
var _ error = (*Error)(nil)
// ------------------------------------------------------------
// Filters contains parameters for filters when listing.
//
// possible operators
// * in: a list of comma-separated string
// * eq: "true" or "false"
// * gt or lt: time format string, e.g. "2023-01-28T10:56:49.757+08:00"
type Filters struct {
Phase map[string]string `json:"phase,omitempty"` // "in" or "eq"
Trashed map[string]bool `json:"trashed,omitempty"` // "eq"
Kind map[string]string `json:"kind,omitempty"` // "eq"
Starred map[string]bool `json:"starred,omitempty"` // "eq"
ModifiedTime map[string]string `json:"modified_time,omitempty"` // "gt" or "lt"
}
// Set sets filter values using field name, operator and corresponding value
func (f *Filters) Set(field, operator, value string) {
if value == "" {
// UNSET for empty values
return
}
r := reflect.ValueOf(f)
fd := reflect.Indirect(r).FieldByName(field)
if v, err := strconv.ParseBool(value); err == nil {
fd.Set(reflect.ValueOf(map[string]bool{operator: v}))
} else {
fd.Set(reflect.ValueOf(map[string]string{operator: value}))
}
}
// ------------------------------------------------------------
// Common Elements
// Link contains a download URL for opening files
type Link struct {
URL string `json:"url"`
Token string `json:"token"`
Expire Time `json:"expire"`
Type string `json:"type,omitempty"`
}
// Valid reports whether l is non-nil, has an URL, and is not expired.
func (l *Link) Valid() bool {
return l != nil && l.URL != "" && time.Now().Add(10*time.Second).Before(time.Time(l.Expire))
}
// URL is a basic form of URL
type URL struct {
Kind string `json:"kind,omitempty"` // e.g. "upload#url"
URL string `json:"url,omitempty"`
}
// ------------------------------------------------------------
// Base Elements
// FileList contains a list of File elements
type FileList struct {
Kind string `json:"kind,omitempty"` // drive#fileList
Files []*File `json:"files,omitempty"`
NextPageToken string `json:"next_page_token"`
Version string `json:"version,omitempty"`
VersionOutdated bool `json:"version_outdated,omitempty"`
}
// File is a basic element representing a single file object
//
// There are two types of download links,
// 1) one from File.WebContentLink or File.Links.ApplicationOctetStream.URL and
// 2) the other from File.Medias[].Link.URL.
// Empirically, 2) is less restrictive to multiple concurrent range-requests
// for a single file, i.e. supports for higher `--multi-thread-streams=N`.
// However, it is not generally applicable as it is only for meadia.
type File struct {
Apps []*FileApp `json:"apps,omitempty"`
Audit *FileAudit `json:"audit,omitempty"`
Collection string `json:"collection,omitempty"` // TODO
CreatedTime Time `json:"created_time,omitempty"`
DeleteTime Time `json:"delete_time,omitempty"`
FileCategory string `json:"file_category,omitempty"`
FileExtension string `json:"file_extension,omitempty"`
FolderType string `json:"folder_type,omitempty"`
Hash string `json:"hash,omitempty"` // sha1 but NOT a valid file hash. looks like a torrent hash
IconLink string `json:"icon_link,omitempty"`
ID string `json:"id,omitempty"`
Kind string `json:"kind,omitempty"` // "drive#file"
Links *FileLinks `json:"links,omitempty"`
Md5Checksum string `json:"md5_checksum,omitempty"`
Medias []*Media `json:"medias,omitempty"`
MimeType string `json:"mime_type,omitempty"`
ModifiedTime Time `json:"modified_time,omitempty"` // updated when renamed or moved
Name string `json:"name,omitempty"`
OriginalFileIndex int `json:"original_file_index,omitempty"` // TODO
OriginalURL string `json:"original_url,omitempty"`
Params *FileParams `json:"params,omitempty"`
ParentID string `json:"parent_id,omitempty"`
Phase string `json:"phase,omitempty"`
Revision int `json:"revision,omitempty,string"`
Size int64 `json:"size,omitempty,string"`
SortName string `json:"sort_name,omitempty"`
Space string `json:"space,omitempty"`
SpellName []interface{} `json:"spell_name,omitempty"` // TODO maybe list of something?
Starred bool `json:"starred,omitempty"`
ThumbnailLink string `json:"thumbnail_link,omitempty"`
Trashed bool `json:"trashed,omitempty"`
UserID string `json:"user_id,omitempty"`
UserModifiedTime Time `json:"user_modified_time,omitempty"`
WebContentLink string `json:"web_content_link,omitempty"`
Writable bool `json:"writable,omitempty"`
}
// FileLinks includes links to file at backend
type FileLinks struct {
ApplicationOctetStream *Link `json:"application/octet-stream,omitempty"`
}
// FileAudit contains audit information for the file
type FileAudit struct {
Status string `json:"status,omitempty"` // "STATUS_OK"
Message string `json:"message,omitempty"`
Title string `json:"title,omitempty"`
}
// Media contains info about supported version of media, e.g. original, transcoded, etc
type Media struct {
MediaID string `json:"media_id,omitempty"`
MediaName string `json:"media_name,omitempty"`
Video struct {
Height int `json:"height,omitempty"`
Width int `json:"width,omitempty"`
Duration int64 `json:"duration,omitempty"`
BitRate int `json:"bit_rate,omitempty"`
FrameRate int `json:"frame_rate,omitempty"`
VideoCodec string `json:"video_codec,omitempty"` // "h264", "hevc"
AudioCodec string `json:"audio_codec,omitempty"` // "pcm_bluray", "aac"
VideoType string `json:"video_type,omitempty"` // "mpegts"
HdrType string `json:"hdr_type,omitempty"`
} `json:"video,omitempty"`
Link *Link `json:"link,omitempty"`
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
VipTypes []interface{} `json:"vip_types,omitempty"` // TODO maybe list of something?
RedirectLink string `json:"redirect_link,omitempty"`
IconLink string `json:"icon_link,omitempty"`
IsDefault bool `json:"is_default,omitempty"`
Priority int `json:"priority,omitempty"`
IsOrigin bool `json:"is_origin,omitempty"`
ResolutionName string `json:"resolution_name,omitempty"`
IsVisible bool `json:"is_visible,omitempty"`
Category string `json:"category,omitempty"`
}
// FileParams includes parameters for instant open
type FileParams struct {
Duration int64 `json:"duration,omitempty,string"` // in seconds
Height int `json:"height,omitempty,string"`
Platform string `json:"platform,omitempty"` // "Upload"
PlatformIcon string `json:"platform_icon,omitempty"`
URL string `json:"url,omitempty"`
Width int `json:"width,omitempty,string"`
}
// FileApp includes parameters for instant open
type FileApp struct {
ID string `json:"id,omitempty"` // "decompress" for rar files
Name string `json:"name,omitempty"` // decompress" for rar files
Access []interface{} `json:"access,omitempty"`
Link string `json:"link,omitempty"` // "https://mypikpak.com/drive/decompression/{File.Id}?gcid={File.Hash}\u0026wv-style=topbar%3Ahide"
RedirectLink string `json:"redirect_link,omitempty"`
VipTypes []interface{} `json:"vip_types,omitempty"`
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
IconLink string `json:"icon_link,omitempty"`
IsDefault bool `json:"is_default,omitempty"`
Params struct {
} `json:"params,omitempty"` // TODO
CategoryIds []interface{} `json:"category_ids,omitempty"`
AdSceneType int `json:"ad_scene_type,omitempty"`
Space string `json:"space,omitempty"`
Links struct {
} `json:"links,omitempty"` // TODO
}
// ------------------------------------------------------------
// TaskList contains a list of Task elements
type TaskList struct {
Tasks []*Task `json:"tasks,omitempty"` // "drive#task"
NextPageToken string `json:"next_page_token"`
ExpiresIn int `json:"expires_in,omitempty"`
}
// Task is a basic element representing a single task such as offline download and upload
type Task struct {
Kind string `json:"kind,omitempty"` // "drive#task"
ID string `json:"id,omitempty"` // task id?
Name string `json:"name,omitempty"` // torrent name?
Type string `json:"type,omitempty"` // "offline"
UserID string `json:"user_id,omitempty"`
Statuses []interface{} `json:"statuses,omitempty"` // TODO
StatusSize int `json:"status_size,omitempty"` // TODO
Params *TaskParams `json:"params,omitempty"` // TODO
FileID string `json:"file_id,omitempty"`
FileName string `json:"file_name,omitempty"`
FileSize string `json:"file_size,omitempty"`
Message string `json:"message,omitempty"` // e.g. "Saving"
CreatedTime Time `json:"created_time,omitempty"`
UpdatedTime Time `json:"updated_time,omitempty"`
ThirdTaskID string `json:"third_task_id,omitempty"` // TODO
Phase string `json:"phase,omitempty"` // e.g. "PHASE_TYPE_RUNNING"
Progress int `json:"progress,omitempty"`
IconLink string `json:"icon_link,omitempty"`
Callback string `json:"callback,omitempty"`
ReferenceResource interface{} `json:"reference_resource,omitempty"` // TODO
Space string `json:"space,omitempty"`
}
// TaskParams includes parameters informing status of Task
type TaskParams struct {
Age string `json:"age,omitempty"`
PredictSpeed string `json:"predict_speed,omitempty"`
PredictType string `json:"predict_type,omitempty"`
URL string `json:"url,omitempty"`
}
// Form contains parameters for upload by multipart/form-data
type Form struct {
Headers struct{} `json:"headers"`
Kind string `json:"kind"` // "drive#form"
Method string `json:"method"` // "POST"
MultiParts struct {
OSSAccessKeyID string `json:"OSSAccessKeyId"`
Signature string `json:"Signature"`
Callback string `json:"callback"`
Key string `json:"key"`
Policy string `json:"policy"`
XUserData string `json:"x:user_data"`
} `json:"multi_parts"`
URL string `json:"url"`
}
// Resumable contains parameters for upload by resumable
type Resumable struct {
Kind string `json:"kind,omitempty"` // "drive#resumable"
Provider string `json:"provider,omitempty"` // e.g. "PROVIDER_ALIYUN"
Params *ResumableParams `json:"params,omitempty"`
}
// ResumableParams specifies resumable paramegers
type ResumableParams struct {
AccessKeyID string `json:"access_key_id,omitempty"`
AccessKeySecret string `json:"access_key_secret,omitempty"`
Bucket string `json:"bucket,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
Expiration Time `json:"expiration,omitempty"`
Key string `json:"key,omitempty"`
SecurityToken string `json:"security_token,omitempty"`
}
// FileInArchive is a basic element in archive
type FileInArchive struct {
Index int `json:"index,omitempty"`
Filename string `json:"filename,omitempty"`
Filesize string `json:"filesize,omitempty"`
MimeType string `json:"mime_type,omitempty"`
Gcid string `json:"gcid,omitempty"`
Kind string `json:"kind,omitempty"`
IconLink string `json:"icon_link,omitempty"`
Path string `json:"path,omitempty"`
}
// ------------------------------------------------------------
// NewFile is a response to RequestNewFile
type NewFile struct {
File *File `json:"file,omitempty"`
Form *Form `json:"form,omitempty"`
Resumable *Resumable `json:"resumable,omitempty"`
Task *Task `json:"task,omitempty"` // null in this case
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_FORM" or "UPLOAD_TYPE_RESUMABLE"
}
// NewTask is a response to RequestNewTask
type NewTask struct {
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_URL"
File *File `json:"file,omitempty"` // null in this case
Task *Task `json:"task,omitempty"`
URL *URL `json:"url,omitempty"` // {"kind": "upload#url"}
}
// About informs drive status
type About struct {
Kind string `json:"kind,omitempty"` // "drive#about"
Quota *Quota `json:"quota,omitempty"`
ExpiresAt string `json:"expires_at,omitempty"`
Quotas struct {
} `json:"quotas,omitempty"` // maybe []*Quota?
}
// Quota informs drive quota
type Quota struct {
Kind string `json:"kind,omitempty"` // "drive#quota"
Limit int64 `json:"limit,omitempty,string"` // limit in bytes
Usage int64 `json:"usage,omitempty,string"` // bytes in use
UsageInTrash int64 `json:"usage_in_trash,omitempty,string"` // bytes in trash but this seems not working
PlayTimesLimit string `json:"play_times_limit,omitempty"` // maybe in seconds
PlayTimesUsage string `json:"play_times_usage,omitempty"` // maybe in seconds
}
// Share is a response to RequestShare
//
// used in PublicLink()
type Share struct {
ShareID string `json:"share_id,omitempty"`
ShareURL string `json:"share_url,omitempty"`
PassCode string `json:"pass_code,omitempty"`
ShareText string `json:"share_text,omitempty"`
}
// User contains user account information
//
// GET https://user.mypikpak.com/v1/user/me
type User struct {
Sub string `json:"sub,omitempty"` // userid for internal use
Name string `json:"name,omitempty"` // Username
Picture string `json:"picture,omitempty"` // URL to Avatar image
Email string `json:"email,omitempty"` // redacted email address
Providers *[]UserProvider `json:"providers,omitempty"` // OAuth provider
PhoneNumber string `json:"phone_number,omitempty"`
Password string `json:"password,omitempty"` // "SET" if configured
Status string `json:"status,omitempty"` // "ACTIVE"
CreatedAt Time `json:"created_at,omitempty"`
PasswordUpdatedAt Time `json:"password_updated_at,omitempty"`
}
// UserProvider details third-party authentication
type UserProvider struct {
ID string `json:"id,omitempty"` // e.g. "google.com"
ProviderUserID string `json:"provider_user_id,omitempty"`
Name string `json:"name,omitempty"` // username
}
// VIP includes subscription details about premium account
//
// GET https://api-drive.mypikpak.com/drive/v1/privilege/vip
type VIP struct {
Result string `json:"result,omitempty"` // "ACCEPTED"
Message string `json:"message,omitempty"`
RedirectURI string `json:"redirect_uri,omitempty"`
Data struct {
Expire Time `json:"expire,omitempty"`
Status string `json:"status,omitempty"` // "invalid" or "ok"
Type string `json:"type,omitempty"` // "novip" or "platinum"
UserID string `json:"user_id,omitempty"` // same as User.Sub
} `json:"data,omitempty"`
}
// DecompressResult is a response to RequestDecompress
type DecompressResult struct {
Status string `json:"status,omitempty"` // "OK"
StatusText string `json:"status_text,omitempty"`
TaskID string `json:"task_id,omitempty"` // same as File.Id
FilesNum int `json:"files_num,omitempty"` // number of files in archive
RedirectLink string `json:"redirect_link,omitempty"`
}
// ------------------------------------------------------------
// RequestShare is to request for file share
type RequestShare struct {
FileIds []string `json:"file_ids,omitempty"`
ShareTo string `json:"share_to,omitempty"` // "publiclink",
ExpirationDays int `json:"expiration_days,omitempty"` // -1 = 'forever'
PassCodeOption string `json:"pass_code_option,omitempty"` // "NOT_REQUIRED"
}
// RequestBatch is to request for batch actions
type RequestBatch struct {
Ids []string `json:"ids,omitempty"`
To map[string]string `json:"to,omitempty"`
}
// RequestNewFile is to request for creating a new `drive#folder` or `drive#file`
type RequestNewFile struct {
// always required
Kind string `json:"kind"` // "drive#folder" or "drive#file"
Name string `json:"name"`
ParentID string `json:"parent_id"`
FolderType string `json:"folder_type"`
// only when uploading a new file
Hash string `json:"hash,omitempty"` // sha1sum
Resumable map[string]string `json:"resumable,omitempty"` // {"provider": "PROVIDER_ALIYUN"}
Size int64 `json:"size,omitempty"`
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_FORM" or "UPLOAD_TYPE_RESUMABLE"
}
// RequestNewTask is to request for creating a new task like offline downloads
//
// Name and ParentID can be left empty.
type RequestNewTask struct {
Kind string `json:"kind,omitempty"` // "drive#file"
Name string `json:"name,omitempty"`
ParentID string `json:"parent_id,omitempty"`
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_URL"
URL *URL `json:"url,omitempty"` // {"url": downloadUrl}
FolderType string `json:"folder_type,omitempty"` // "" if parent_id else "DOWNLOAD"
}
// RequestDecompress is to request for decompress of archive files
type RequestDecompress struct {
Gcid string `json:"gcid,omitempty"` // same as File.Hash
Password string `json:"password,omitempty"` // ""
FileID string `json:"file_id,omitempty"`
Files []*FileInArchive `json:"files,omitempty"` // can request selected files to be decompressed
DefaultParent bool `json:"default_parent,omitempty"`
}
// ------------------------------------------------------------
// NOT implemented YET
// RequestArchiveFileList is to request for a list of files in archive
//
// POST https://api-drive.mypikpak.com/decompress/v1/list
type RequestArchiveFileList struct {
Gcid string `json:"gcid,omitempty"` // same as api.File.Hash
Path string `json:"path,omitempty"` // "" by default
Password string `json:"password,omitempty"` // "" by default
FileID string `json:"file_id,omitempty"`
}
// ArchiveFileList is a response to RequestArchiveFileList
type ArchiveFileList struct {
Status string `json:"status,omitempty"` // "OK"
StatusText string `json:"status_text,omitempty"` // ""
TaskID string `json:"task_id,omitempty"` // ""
CurrentPath string `json:"current_path,omitempty"` // ""
Title string `json:"title,omitempty"`
FileSize int64 `json:"file_size,omitempty"`
Gcid string `json:"gcid,omitempty"` // same as File.Hash
Files []*FileInArchive `json:"files,omitempty"`
}

View File

@@ -1,253 +0,0 @@
package pikpak
import (
"bytes"
"context"
"crypto/sha1"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"os"
"github.com/rclone/rclone/backend/pikpak/api"
"github.com/rclone/rclone/lib/rest"
)
// Globals
const (
cachePrefix = "rclone-pikpak-sha1sum-"
)
// requestDecompress requests decompress of compressed files
func (f *Fs) requestDecompress(ctx context.Context, file *api.File, password string) (info *api.DecompressResult, err error) {
req := &api.RequestDecompress{
Gcid: file.Hash,
Password: password,
FileID: file.ID,
Files: []*api.FileInArchive{},
DefaultParent: true,
}
opts := rest.Opts{
Method: "POST",
Path: "/decompress/v1/decompress",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
return f.shouldRetry(ctx, resp, err)
})
return
}
// getUserInfo gets UserInfo from API
func (f *Fs) getUserInfo(ctx context.Context) (info *api.User, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: "https://user.mypikpak.com/v1/user/me",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get userinfo: %w", err)
}
return
}
// getVIPInfo gets VIPInfo from API
func (f *Fs) getVIPInfo(ctx context.Context) (info *api.VIP, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: "https://api-drive.mypikpak.com/drive/v1/privilege/vip",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get vip info: %w", err)
}
return
}
// requestBatchAction requests batch actions to API
//
// action can be one of batch{Copy,Delete,Trash,Untrash}
func (f *Fs) requestBatchAction(ctx context.Context, action string, req *api.RequestBatch) (err error) {
opts := rest.Opts{
Method: "POST",
Path: "/drive/v1/files:" + action,
NoResponse: true, // Only returns `{"task_id":""}
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, nil)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return fmt.Errorf("batch action %q failed: %w", action, err)
}
return nil
}
// requestNewTask requests a new api.NewTask and returns api.Task
func (f *Fs) requestNewTask(ctx context.Context, req *api.RequestNewTask) (info *api.Task, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/drive/v1/files",
}
var newTask api.NewTask
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, &newTask)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
}
return newTask.Task, nil
}
// requestNewFile requests a new api.NewFile and returns api.File
func (f *Fs) requestNewFile(ctx context.Context, req *api.RequestNewFile) (info *api.NewFile, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/drive/v1/files",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
return f.shouldRetry(ctx, resp, err)
})
return
}
// getFile gets api.File from API for the ID passed
// and returns rich information containing additional fields below
// * web_content_link
// * thumbnail_link
// * links
// * medias
func (f *Fs) getFile(ctx context.Context, ID string) (info *api.File, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/drive/v1/files/" + ID,
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
if err == nil && info.Phase != api.PhaseTypeComplete {
// could be pending right after file is created/uploaded.
return true, errors.New("not PHASE_TYPE_COMPLETE")
}
return f.shouldRetry(ctx, resp, err)
})
return
}
// patchFile updates attributes of the file by ID
//
// currently known patchable fields are
// * name
func (f *Fs) patchFile(ctx context.Context, ID string, req *api.File) (info *api.File, err error) {
opts := rest.Opts{
Method: "PATCH",
Path: "/drive/v1/files/" + ID,
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
return f.shouldRetry(ctx, resp, err)
})
return
}
// getAbout gets drive#quota information from server
func (f *Fs) getAbout(ctx context.Context) (info *api.About, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/drive/v1/about",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
return f.shouldRetry(ctx, resp, err)
})
return
}
// requestShare returns information about ssharable links
func (f *Fs) requestShare(ctx context.Context, req *api.RequestShare) (info *api.Share, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/drive/v1/share",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
return f.shouldRetry(ctx, resp, err)
})
return
}
// Read the sha1 of in returning a reader which will read the same contents
//
// The cleanup function should be called when out is finished with
// regardless of whether this function returned an error or not.
func readSHA1(in io.Reader, size, threshold int64) (sha1sum string, out io.Reader, cleanup func(), err error) {
// we need an SHA1
hash := sha1.New()
// use the teeReader to write to the local file AND calculate the SHA1 while doing so
teeReader := io.TeeReader(in, hash)
// nothing to clean up by default
cleanup = func() {}
// don't cache small files on disk to reduce wear of the disk
if size > threshold {
var tempFile *os.File
// create the cache file
tempFile, err = os.CreateTemp("", cachePrefix)
if err != nil {
return
}
_ = os.Remove(tempFile.Name()) // Delete the file - may not work on Windows
// clean up the file after we are done downloading
cleanup = func() {
// the file should normally already be close, but just to make sure
_ = tempFile.Close()
_ = os.Remove(tempFile.Name()) // delete the cache file after we are done - may be deleted already
}
// copy the ENTIRE file to disc and calculate the SHA1 in the process
if _, err = io.Copy(tempFile, teeReader); err != nil {
return
}
// jump to the start of the local file so we can pass it along
if _, err = tempFile.Seek(0, 0); err != nil {
return
}
// replace the already read source with a reader of our cached file
out = tempFile
} else {
// that's a small file, just read it into memory
var inData []byte
inData, err = io.ReadAll(teeReader)
if err != nil {
return
}
// set the reader to our read memory block
out = bytes.NewReader(inData)
}
return hex.EncodeToString(hash.Sum(nil)), out, cleanup, nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +0,0 @@
// Test PikPak filesystem interface
package pikpak_test
import (
"testing"
"github.com/rclone/rclone/backend/pikpak"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestPikPak:",
NilObject: (*pikpak.Object)(nil),
})
}

View File

@@ -82,15 +82,14 @@ func init() {
OAuth2Config: oauthConfig,
})
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Options: []fs.Option{{
Name: "api_key",
Help: `API Key.
This is not normally used - use oauth instead.
`,
Hide: fs.OptionHideBoth,
Default: "",
Sensitive: true,
Hide: fs.OptionHideBoth,
Default: "",
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -100,7 +99,7 @@ This is not normally used - use oauth instead.
encoder.EncodeBackSlash |
encoder.EncodeDoubleQuote |
encoder.EncodeInvalidUtf8),
}}...),
}},
})
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,16 +0,0 @@
package protondrive_test
import (
"testing"
"github.com/rclone/rclone/backend/protondrive"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestProtonDrive:",
NilObject: (*protondrive.Object)(nil),
})
}

View File

@@ -23,7 +23,6 @@ import (
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/readers"
)
@@ -253,12 +252,9 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
return f.putUnchecked(ctx, in, src, src.Remote(), options...)
}
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options ...fs.OpenOption) (o fs.Object, err error) {
// defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err)
size := src.Size()
remote := src.Remote()
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
if err != nil {
return nil, err
@@ -544,59 +540,24 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Objec
if err != nil {
return nil, err
}
modTime := src.ModTime(ctx)
var resp struct {
File putio.File `json:"file"`
}
// For some unknown reason the API sometimes returns the name
// already exists unless we upload to a temporary name and
// rename
//
// {"error_id":null,"error_message":"Name already exist","error_type":"NAME_ALREADY_EXIST","error_uri":"http://api.put.io/v2/docs","extra":{},"status":"ERROR","status_code":400}
suffix := "." + random.String(8)
err = f.pacer.Call(func() (bool, error) {
params := url.Values{}
params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10))
params.Set("parent_id", directoryID)
params.Set("name", f.opt.Enc.FromStandardName(leaf+suffix))
params.Set("name", f.opt.Enc.FromStandardName(leaf))
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/copy", strings.NewReader(params.Encode()))
if err != nil {
return false, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
// fs.Debugf(f, "copying file (%d) to parent_id: %s", srcObj.file.ID, directoryID)
_, err = f.client.Do(req, &resp)
_, err = f.client.Do(req, nil)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
err = f.pacer.Call(func() (bool, error) {
params := url.Values{}
params.Set("file_id", strconv.FormatInt(resp.File.ID, 10))
params.Set("name", f.opt.Enc.FromStandardName(leaf))
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/rename", strings.NewReader(params.Encode()))
if err != nil {
return false, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
_, err = f.client.Do(req, &resp)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
o, err = f.newObjectWithInfo(ctx, remote, resp.File)
if err != nil {
return nil, err
}
err = o.SetModTime(ctx, modTime)
if err != nil {
return nil, err
}
return o, nil
return f.NewObject(ctx, remote)
}
// Move src to this remote using server-side move operations.
@@ -618,7 +579,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Objec
if err != nil {
return nil, err
}
modTime := src.ModTime(ctx)
err = f.pacer.Call(func() (bool, error) {
params := url.Values{}
params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10))
@@ -636,15 +596,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Objec
if err != nil {
return nil, err
}
o, err = f.NewObject(ctx, remote)
if err != nil {
return nil, err
}
err = o.SetModTime(ctx, modTime)
if err != nil {
return nil, err
}
return o, nil
return f.NewObject(ctx, remote)
}
// DirMove moves src, srcRemote to this remote at dstRemote

View File

@@ -275,7 +275,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return err
}
newObj, err := o.fs.putUnchecked(ctx, in, src, o.remote, options...)
newObj, err := o.fs.PutUnchecked(ctx, in, src, options...)
if err != nil {
return err
}

View File

@@ -67,7 +67,7 @@ func init() {
NoOffline: true,
})
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Options: []fs.Option{{
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
@@ -77,7 +77,7 @@ func init() {
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8),
}}...),
}},
})
}

View File

@@ -49,13 +49,11 @@ func init() {
Help: "Get QingStor credentials from the environment (env vars or IAM).",
}},
}, {
Name: "access_key_id",
Help: "QingStor Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
Sensitive: true,
Name: "access_key_id",
Help: "QingStor Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
}, {
Name: "secret_access_key",
Help: "QingStor Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
Sensitive: true,
Name: "secret_access_key",
Help: "QingStor Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
}, {
Name: "endpoint",
Help: "Enter an endpoint URL to connection QingStor API.\n\nLeave blank will use the default value \"https://qingstor.com:443\".",

View File

@@ -1,4 +1,4 @@
// Package s3 provides an interface to Amazon S3 object storage
// Package s3 provides an interface to Amazon S3 oject storage
package s3
//go:generate go run gen_setfrom.go -o setfrom.go
@@ -66,7 +66,7 @@ import (
func init() {
fs.Register(&fs.RegInfo{
Name: "s3",
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, China Mobile, Cloudflare, GCS, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Leviia, Liara, Lyve Cloud, Minio, Netease, Petabox, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS, Qiniu and Wasabi",
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi",
NewFs: NewFs,
CommandHelp: commandHelp,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
@@ -91,9 +91,6 @@ func init() {
}, {
Value: "Alibaba",
Help: "Alibaba Cloud Object Storage System (OSS) formerly Aliyun",
}, {
Value: "ArvanCloud",
Help: "Arvan Cloud Object Storage (AOS)",
}, {
Value: "Ceph",
Help: "Ceph Object Storage",
@@ -103,15 +100,15 @@ func init() {
}, {
Value: "Cloudflare",
Help: "Cloudflare R2 Storage",
}, {
Value: "ArvanCloud",
Help: "Arvan Cloud Object Storage (AOS)",
}, {
Value: "DigitalOcean",
Help: "DigitalOcean Spaces",
}, {
Value: "Dreamhost",
Help: "Dreamhost DreamObjects",
}, {
Value: "GCS",
Help: "Google Cloud Storage",
}, {
Value: "HuaweiOBS",
Help: "Huawei Object Storage Service",
@@ -127,9 +124,6 @@ func init() {
}, {
Value: "LyveCloud",
Help: "Seagate Lyve Cloud",
}, {
Value: "Leviia",
Help: "Leviia Object Storage",
}, {
Value: "Liara",
Help: "Liara Object Storage",
@@ -139,9 +133,6 @@ func init() {
}, {
Value: "Netease",
Help: "Netease Object Storage (NOS)",
}, {
Value: "Petabox",
Help: "Petabox Object Storage",
}, {
Value: "RackCorp",
Help: "RackCorp Object Storage",
@@ -157,9 +148,6 @@ func init() {
}, {
Value: "Storj",
Help: "Storj (S3 Compatible Gateway)",
}, {
Value: "Synology",
Help: "Synology C2 Object Storage",
}, {
Value: "TencentCOS",
Help: "Tencent Cloud Object Storage (COS)",
@@ -185,13 +173,11 @@ func init() {
Help: "Get AWS credentials from the environment (env vars or IAM).",
}},
}, {
Name: "access_key_id",
Help: "AWS Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
Sensitive: true,
Name: "access_key_id",
Help: "AWS Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
}, {
Name: "secret_access_key",
Help: "AWS Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
Sensitive: true,
Name: "secret_access_key",
Help: "AWS Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
}, {
// References:
// 1. https://docs.aws.amazon.com/general/latest/gr/rande.html
@@ -451,50 +437,10 @@ func init() {
Value: "eu-south-2",
Help: "Logrono, Spain",
}},
}, {
Name: "region",
Help: "Region where your bucket will be created and your data stored.\n",
Provider: "Petabox",
Examples: []fs.OptionExample{{
Value: "us-east-1",
Help: "US East (N. Virginia)",
}, {
Value: "eu-central-1",
Help: "Europe (Frankfurt)",
}, {
Value: "ap-southeast-1",
Help: "Asia Pacific (Singapore)",
}, {
Value: "me-south-1",
Help: "Middle East (Bahrain)",
}, {
Value: "sa-east-1",
Help: "South America (São Paulo)",
}},
}, {
Name: "region",
Help: "Region where your data stored.\n",
Provider: "Synology",
Examples: []fs.OptionExample{{
Value: "eu-001",
Help: "Europe Region 1",
}, {
Value: "eu-002",
Help: "Europe Region 2",
}, {
Value: "us-001",
Help: "US Region 1",
}, {
Value: "us-002",
Help: "US Region 2",
}, {
Value: "tw-001",
Help: "Asia (Taiwan)",
}},
}, {
Name: "region",
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Qiniu,RackCorp,Scaleway,Storj,Synology,TencentCOS,HuaweiOBS,IDrive",
Provider: "!AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,Liara,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive",
Examples: []fs.OptionExample{{
Value: "",
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
@@ -603,15 +549,15 @@ func init() {
Help: "Anhui China (Huainan)",
}},
}, {
// ArvanCloud endpoints: https://www.arvancloud.ir/en/products/cloud-storage
// ArvanCloud endpoints: https://www.arvancloud.com/en/products/cloud-storage
Name: "endpoint",
Help: "Endpoint for Arvan Cloud Object Storage (AOS) API.",
Provider: "ArvanCloud",
Examples: []fs.OptionExample{{
Value: "s3.ir-thr-at1.arvanstorage.ir",
Help: "The default endpoint - a good choice if you are unsure.\nTehran Iran (Simin)",
Value: "s3.ir-thr-at1.arvanstorage.com",
Help: "The default endpoint - a good choice if you are unsure.\nTehran Iran (Asiatech)",
}, {
Value: "s3.ir-tbz-sh1.arvanstorage.ir",
Value: "s3.ir-tbz-sh1.arvanstorage.com",
Help: "Tabriz Iran (Shahriar)",
}},
}, {
@@ -819,39 +765,6 @@ func init() {
Value: "s3-eu-south-2.ionoscloud.com",
Help: "Logrono, Spain",
}},
}, {
Name: "endpoint",
Help: "Endpoint for Petabox S3 Object Storage.\n\nSpecify the endpoint from the same region.",
Provider: "Petabox",
Required: true,
Examples: []fs.OptionExample{{
Value: "s3.petabox.io",
Help: "US East (N. Virginia)",
}, {
Value: "s3.us-east-1.petabox.io",
Help: "US East (N. Virginia)",
}, {
Value: "s3.eu-central-1.petabox.io",
Help: "Europe (Frankfurt)",
}, {
Value: "s3.ap-southeast-1.petabox.io",
Help: "Asia Pacific (Singapore)",
}, {
Value: "s3.me-south-1.petabox.io",
Help: "Middle East (Bahrain)",
}, {
Value: "s3.sa-east-1.petabox.io",
Help: "South America (São Paulo)",
}},
}, {
// Leviia endpoints: https://www.leviia.com/object-storage/
Name: "endpoint",
Help: "Endpoint for Leviia Object Storage API.",
Provider: "Leviia",
Examples: []fs.OptionExample{{
Value: "s3.leviia.com",
Help: "The default endpoint\nLeviia",
}},
}, {
// Liara endpoints: https://liara.ir/landing/object-storage
Name: "endpoint",
@@ -1021,14 +934,6 @@ func init() {
Value: "s3.eu-central-1.stackpathstorage.com",
Help: "EU Endpoint",
}},
}, {
Name: "endpoint",
Help: "Endpoint for Google Cloud Storage.",
Provider: "GCS",
Examples: []fs.OptionExample{{
Value: "https://storage.googleapis.com",
Help: "Google Cloud Storage endpoint",
}},
}, {
Name: "endpoint",
Help: "Endpoint for Storj Gateway.",
@@ -1037,26 +942,6 @@ func init() {
Value: "gateway.storjshare.io",
Help: "Global Hosted Gateway",
}},
}, {
Name: "endpoint",
Help: "Endpoint for Synology C2 Object Storage API.",
Provider: "Synology",
Examples: []fs.OptionExample{{
Value: "eu-001.s3.synologyc2.net",
Help: "EU Endpoint 1",
}, {
Value: "eu-002.s3.synologyc2.net",
Help: "EU Endpoint 2",
}, {
Value: "us-001.s3.synologyc2.net",
Help: "US Endpoint 1",
}, {
Value: "us-002.s3.synologyc2.net",
Help: "US Endpoint 2",
}, {
Value: "tw-001.s3.synologyc2.net",
Help: "TW Endpoint 1",
}},
}, {
// cos endpoints: https://intl.cloud.tencent.com/document/product/436/6224
Name: "endpoint",
@@ -1213,7 +1098,7 @@ func init() {
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Scaleway,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
Provider: "!AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,Liara,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu",
Examples: []fs.OptionExample{{
Value: "objects-us-east-1.dream.io",
Help: "Dream Objects endpoint",
@@ -1315,12 +1200,8 @@ func init() {
Help: "Liara Iran endpoint",
Provider: "Liara",
}, {
Value: "s3.ir-thr-at1.arvanstorage.ir",
Help: "ArvanCloud Tehran Iran (Simin) endpoint",
Provider: "ArvanCloud",
}, {
Value: "s3.ir-tbz-sh1.arvanstorage.ir",
Help: "ArvanCloud Tabriz Iran (Shahriar) endpoint",
Value: "s3.ir-thr-at1.arvanstorage.com",
Help: "ArvanCloud Tehran Iran (Asiatech) endpoint",
Provider: "ArvanCloud",
}},
}, {
@@ -1504,7 +1385,7 @@ func init() {
Provider: "ArvanCloud",
Examples: []fs.OptionExample{{
Value: "ir-thr-at1",
Help: "Tehran Iran (Simin)",
Help: "Tehran Iran (Asiatech)",
}, {
Value: "ir-tbz-sh1",
Help: "Tabriz Iran (Shahriar)",
@@ -1701,7 +1582,7 @@ func init() {
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Leviia,Liara,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS,Petabox",
Provider: "!AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Liara,ArvanCloud,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS",
}, {
Name: "acl",
Help: `Canned ACL used when creating buckets and storing or copying objects.
@@ -1716,7 +1597,7 @@ doesn't copy the ACL from the source but rather writes a fresh one.
If the acl is an empty string then no X-Amz-Acl: header is added and
the default (private) will be used.
`,
Provider: "!Storj,Synology,Cloudflare",
Provider: "!Storj,Cloudflare",
Examples: []fs.OptionExample{{
Value: "default",
Help: "Owner gets Full_CONTROL.\nNo one else has access rights (default).",
@@ -1832,7 +1713,6 @@ header is added and the default (private) will be used.
Value: "arn:aws:kms:us-east-1:*",
Help: "arn:aws:kms:*",
}},
Sensitive: true,
}, {
Name: "sse_customer_key",
Help: `To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.
@@ -1844,7 +1724,6 @@ Alternatively you can provide --sse-customer-key-base64.`,
Value: "",
Help: "None",
}},
Sensitive: true,
}, {
Name: "sse_customer_key_base64",
Help: `If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.
@@ -1856,7 +1735,6 @@ Alternatively you can provide --sse-customer-key.`,
Value: "",
Help: "None",
}},
Sensitive: true,
}, {
Name: "sse_customer_key_md5",
Help: `If using SSE-C you may provide the secret encryption key MD5 checksum (optional).
@@ -1869,7 +1747,6 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
Value: "",
Help: "None",
}},
Sensitive: true,
}, {
Name: "storage_class",
Help: "The storage class to use when storing new objects in S3.",
@@ -1948,7 +1825,7 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
Help: "Standard storage class",
}},
}, {
// Mapping from here: https://www.arvancloud.ir/en/products/cloud-storage
// Mapping from here: https://www.arvancloud.com/en/products/cloud-storage
Name: "storage_class",
Help: "The storage class to use when storing new objects in ArvanCloud.",
Provider: "ArvanCloud",
@@ -1975,7 +1852,7 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
Help: "Infrequent access storage mode",
}},
}, {
// Mapping from here: https://www.scaleway.com/en/docs/storage/object/quickstart/
// Mapping from here: https://www.scaleway.com/en/docs/object-storage-glacier/#-Scaleway-Storage-Classes
Name: "storage_class",
Help: "The storage class to use when storing new objects in S3.",
Provider: "Scaleway",
@@ -1984,13 +1861,10 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
Help: "Default.",
}, {
Value: "STANDARD",
Help: "The Standard class for any upload.\nSuitable for on-demand content like streaming or CDN.\nAvailable in all regions.",
Help: "The Standard class for any upload.\nSuitable for on-demand content like streaming or CDN.",
}, {
Value: "GLACIER",
Help: "Archived storage.\nPrices are lower, but it needs to be restored first to be accessed.\nAvailable in FR-PAR and NL-AMS regions.",
}, {
Value: "ONEZONE_IA",
Help: "One Zone - Infrequent Access.\nA good choice for storing secondary backup copies or easily re-creatable data.\nAvailable in the FR-PAR region only.",
Help: "Archived storage.\nPrices are lower, but it needs to be restored first to be accessed.",
}},
}, {
// Mapping from here: https://developer.qiniu.com/kodo/5906/storage-type
@@ -2111,10 +1985,9 @@ If empty it will default to the environment variable "AWS_PROFILE" or
`,
Advanced: true,
}, {
Name: "session_token",
Help: "An AWS session token.",
Advanced: true,
Sensitive: true,
Name: "session_token",
Help: "An AWS session token.",
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
@@ -2309,15 +2182,6 @@ See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rcl
This is usually set to a CloudFront CDN URL as AWS S3 offers
cheaper egress for data downloaded through the CloudFront network.`,
Advanced: true,
}, {
Name: "directory_markers",
Default: false,
Advanced: true,
Help: `Upload an empty object with a trailing slash when a new directory is created
Empty folders are unsupported for bucket based remotes, this option creates an empty
object ending with "/", to persist the folder.
`,
}, {
Name: "use_multipart_etag",
Help: `Whether to use ETag in multipart uploads for verification
@@ -2394,24 +2258,6 @@ will decompress the object on the fly.
If this is set to unset (the default) then rclone will choose
according to the provider setting what to apply, but you can override
rclone's choice here.
`, "|", "`"),
Default: fs.Tristate{},
Advanced: true,
}, {
Name: "use_accept_encoding_gzip",
Help: strings.ReplaceAll(`Whether to send |Accept-Encoding: gzip| header.
By default, rclone will append |Accept-Encoding: gzip| to the request to download
compressed objects whenever possible.
However some providers such as Google Cloud Storage may alter the HTTP headers, breaking
the signature of the request.
A symptom of this would be receiving errors like
SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided.
In this case, you might want to try disabling this option.
`, "|", "`"),
Default: fs.Tristate{},
Advanced: true,
@@ -2420,11 +2266,6 @@ In this case, you might want to try disabling this option.
Help: `Suppress setting and reading of system metadata`,
Advanced: true,
Default: false,
}, {
Name: "sts_endpoint",
Help: "Endpoint for STS.\n\nLeave blank if using AWS to use the default endpoint for the region.",
Provider: "AWS",
Advanced: true,
},
}})
}
@@ -2511,7 +2352,6 @@ type Options struct {
SecretAccessKey string `config:"secret_access_key"`
Region string `config:"region"`
Endpoint string `config:"endpoint"`
STSEndpoint string `config:"sts_endpoint"`
LocationConstraint string `config:"location_constraint"`
ACL string `config:"acl"`
BucketACL string `config:"bucket_acl"`
@@ -2547,14 +2387,12 @@ type Options struct {
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
DisableHTTP2 bool `config:"disable_http2"`
DownloadURL string `config:"download_url"`
DirectoryMarkers bool `config:"directory_markers"`
UseMultipartEtag fs.Tristate `config:"use_multipart_etag"`
UsePresignedRequest bool `config:"use_presigned_request"`
Versions bool `config:"versions"`
VersionAt fs.Time `config:"version_at"`
Decompress bool `config:"decompress"`
MightGzip fs.Tristate `config:"might_gzip"`
UseAcceptEncodingGzip fs.Tristate `config:"use_accept_encoding_gzip"`
NoSystemMetadata bool `config:"no_system_metadata"`
}
@@ -2690,7 +2528,7 @@ func parsePath(path string) (root string) {
// split returns bucket and bucketPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
}
@@ -2722,38 +2560,6 @@ func getClient(ctx context.Context, opt *Options) *http.Client {
}
}
// Default name resolver
var defaultResolver = endpoints.DefaultResolver()
// resolve (service, region) to endpoint
//
// Used to set endpoint for s3 services and not for other services
type resolver map[string]string
// Add a service to the resolver, ignoring empty urls
func (r resolver) addService(service, url string) {
if url == "" {
return
}
if !strings.HasPrefix(url, "http") {
url = "https://" + url
}
r[service] = url
}
// EndpointFor return the endpoint for s3 if set or the default if not
func (r resolver) EndpointFor(service, region string, opts ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
fs.Debugf(nil, "Resolving service %q region %q", service, region)
url, ok := r[service]
if ok {
return endpoints.ResolvedEndpoint{
URL: url,
SigningRegion: region,
}, nil
}
return defaultResolver.EndpointFor(service, region, opts...)
}
// s3Connection makes a connection to s3
func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S3, *session.Session, error) {
ci := fs.GetConfig(ctx)
@@ -2832,12 +2638,8 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
if opt.Region != "" {
awsConfig.WithRegion(opt.Region)
}
if opt.Endpoint != "" || opt.STSEndpoint != "" {
// If endpoints are set, override the relevant services only
r := make(resolver)
r.addService("s3", opt.Endpoint)
r.addService("sts", opt.STSEndpoint)
awsConfig.WithEndpointResolver(r)
if opt.Endpoint != "" {
awsConfig.WithEndpoint(opt.Endpoint)
}
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
@@ -2855,7 +2657,7 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
}
// The session constructor (aws/session/mergeConfigSrcs) will only use the user's preferred credential source
// (from the shared config file) if the passed-in Options.Config.Credentials is nil.
awsSessionOpts.Config.Credentials = nil
// awsSessionOpts.Config.Credentials = nil
}
ses, err := session.NewSessionWithOptions(awsSessionOpts)
if err != nil {
@@ -2947,12 +2749,11 @@ func setEndpointValueForIDriveE2(m configmap.Mapper) (err error) {
// These should be differences from AWS S3
func setQuirks(opt *Options) {
var (
listObjectsV2 = true
virtualHostStyle = true
urlEncodeListings = true
useMultipartEtag = true
useAcceptEncodingGzip = true
mightGzip = true // assume all providers might gzip until proven otherwise
listObjectsV2 = true
virtualHostStyle = true
urlEncodeListings = true
useMultipartEtag = true
mightGzip = true // assume all providers might gzip until proven otherwise
)
switch opt.Provider {
case "AWS":
@@ -2994,8 +2795,6 @@ func setQuirks(opt *Options) {
// listObjectsV2 supported - https://api.ionos.com/docs/s3/#Basic-Operations-get-Bucket-list-type-2
virtualHostStyle = false
urlEncodeListings = false
case "Petabox":
// No quirks
case "Liara":
virtualHostStyle = false
urlEncodeListings = false
@@ -3031,23 +2830,14 @@ func setQuirks(opt *Options) {
if opt.ChunkSize < 64*fs.Mebi {
opt.ChunkSize = 64 * fs.Mebi
}
case "Synology":
useMultipartEtag = false
case "TencentCOS":
listObjectsV2 = false // untested
useMultipartEtag = false // untested
case "Wasabi":
// No quirks
case "Leviia":
// No quirks
case "Qiniu":
useMultipartEtag = false
urlEncodeListings = false
virtualHostStyle = false
case "GCS":
// Google break request Signature by mutating accept-encoding HTTP header
// https://github.com/rclone/rclone/issues/6670
useAcceptEncodingGzip = false
case "Other":
listObjectsV2 = false
virtualHostStyle = false
@@ -3092,12 +2882,6 @@ func setQuirks(opt *Options) {
opt.MightGzip.Valid = true
opt.MightGzip.Value = mightGzip
}
// set UseAcceptEncodingGzip if not manually set
if !opt.UseAcceptEncodingGzip.Valid {
opt.UseAcceptEncodingGzip.Valid = true
opt.UseAcceptEncodingGzip.Value = useAcceptEncodingGzip
}
}
// setRoot changes the root of the Fs
@@ -3122,7 +2906,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, err
}
fs.Debugf(nil, "name = %q, root = %q, opt = %#v", name, root, opt)
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, fmt.Errorf("s3: chunk size: %w", err)
@@ -3132,7 +2915,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, fmt.Errorf("s3: upload cutoff: %w", err)
}
if opt.Versions && opt.VersionAt.IsSet() {
return nil, errors.New("s3: can't use --s3-versions and --s3-version-at at the same time")
return nil, errors.New("s3: cant use --s3-versions and --s3-version-at at the same time")
}
if opt.BucketACL == "" {
opt.BucketACL = opt.ACL
@@ -3213,9 +2996,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.Provider == "IDrive" {
f.features.SetTier = false
}
if opt.DirectoryMarkers {
f.features.CanHaveEmptyDirectories = true
}
// f.listMultipartUploads()
if f.rootBucket != "" && f.rootDirectory != "" && !opt.NoHeadObject && !strings.HasSuffix(root, "/") {
@@ -3256,7 +3036,6 @@ func (f *Fs) getMetaDataListing(ctx context.Context, wantRemote string) (info *s
err = f.list(ctx, listOpt{
bucket: bucket,
directory: bucketPath,
prefix: f.rootDirectory,
recurse: true,
withVersions: f.opt.Versions,
findFile: true,
@@ -3647,25 +3426,24 @@ var errEndList = errors.New("end list")
// list options
type listOpt struct {
bucket string // bucket to list
directory string // directory with bucket
prefix string // prefix to remove from listing
addBucket bool // if set, the bucket is added to the start of the remote
recurse bool // if set, recurse to read sub directories
withVersions bool // if set, versions are produced
hidden bool // if set, return delete markers as objects with size == isDeleteMarker
findFile bool // if set, it will look for files called (bucket, directory)
versionAt fs.Time // if set only show versions <= this time
noSkipMarkers bool // if set return dir marker objects
bucket string // bucket to list
directory string // directory with bucket
prefix string // prefix to remove from listing
addBucket bool // if set, the bucket is added to the start of the remote
recurse bool // if set, recurse to read sub directories
withVersions bool // if set, versions are produced
hidden bool // if set, return delete markers as objects with size == isDeleteMarker
findFile bool // if set, it will look for files called (bucket, directory)
versionAt fs.Time // if set only show versions <= this time
}
// list lists the objects into the function supplied with the opt
// supplied.
func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
if opt.prefix != "" {
opt.prefix += "/"
}
if !opt.findFile {
if opt.prefix != "" {
opt.prefix += "/"
}
if opt.directory != "" {
opt.directory += "/"
}
@@ -3708,7 +3486,6 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
default:
listBucket = f.newV2List(&req)
}
foundItems := 0
for {
var resp *s3.ListObjectsV2Output
var err error
@@ -3750,7 +3527,6 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
return err
}
if !opt.recurse {
foundItems += len(resp.CommonPrefixes)
for _, commonPrefix := range resp.CommonPrefixes {
if commonPrefix.Prefix == nil {
fs.Logf(f, "Nil common prefix received")
@@ -3771,7 +3547,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
}
remote = remote[len(opt.prefix):]
if opt.addBucket {
remote = bucket.Join(opt.bucket, remote)
remote = path.Join(opt.bucket, remote)
}
remote = strings.TrimSuffix(remote, "/")
err = fn(remote, &s3.Object{Key: &remote}, nil, true)
@@ -3783,7 +3559,6 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
}
}
}
foundItems += len(resp.Contents)
for i, object := range resp.Contents {
remote := aws.StringValue(object.Key)
if urlEncodeListings {
@@ -3798,29 +3573,19 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
fs.Logf(f, "Odd name received %q", remote)
continue
}
isDirectory := (remote == "" || strings.HasSuffix(remote, "/")) && object.Size != nil && *object.Size == 0
// is this a directory marker?
if isDirectory {
if opt.noSkipMarkers {
// process directory markers as files
isDirectory = false
} else {
// Don't insert the root directory
if remote == opt.directory {
continue
}
// process directory markers as directories
remote = strings.TrimRight(remote, "/")
}
}
remote = remote[len(opt.prefix):]
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
if opt.addBucket {
remote = bucket.Join(opt.bucket, remote)
remote = path.Join(opt.bucket, remote)
}
// is this a directory marker?
if isDirectory && object.Size != nil && *object.Size == 0 {
continue // skip directory marker
}
if versionIDs != nil {
err = fn(remote, object, versionIDs[i], isDirectory)
err = fn(remote, object, versionIDs[i], false)
} else {
err = fn(remote, object, nil, isDirectory)
err = fn(remote, object, nil, false)
}
if err != nil {
if err == errEndList {
@@ -3833,20 +3598,6 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
break
}
}
if f.opt.DirectoryMarkers && foundItems == 0 && opt.directory != "" {
// Determine whether the directory exists or not by whether it has a marker
req := s3.HeadObjectInput{
Bucket: &opt.bucket,
Key: &opt.directory,
}
_, err := f.headObject(ctx, &req)
if err != nil {
if err == fs.ErrorObjectNotFound {
return fs.ErrorDirNotFound
}
return err
}
}
return nil
}
@@ -4037,70 +3788,10 @@ func (f *Fs) bucketExists(ctx context.Context, bucket string) (bool, error) {
return false, err
}
// Create directory marker file and parents
func (f *Fs) createDirectoryMarker(ctx context.Context, bucket, dir string) error {
if !f.opt.DirectoryMarkers || bucket == "" {
return nil
}
// Object to be uploaded
o := &Object{
fs: f,
meta: map[string]string{
metaMtime: swift.TimeToFloatString(time.Now()),
},
}
for {
_, bucketPath := f.split(dir)
// Don't create the directory marker if it is the bucket or at the very root
if bucketPath == "" {
break
}
o.remote = dir + "/"
// Check to see if object already exists
_, err := o.headObject(ctx)
if err == nil {
return nil
}
// Upload it if not
fs.Debugf(o, "Creating directory marker")
content := io.Reader(strings.NewReader(""))
err = o.Update(ctx, content, o)
if err != nil {
return fmt.Errorf("creating directory marker failed: %w", err)
}
// Now check parent directory exists
dir = path.Dir(dir)
if dir == "/" || dir == "." {
break
}
}
return nil
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
bucket, _ := f.split(dir)
e := f.makeBucket(ctx, bucket)
if e != nil {
return e
}
return f.createDirectoryMarker(ctx, bucket, dir)
}
// mkdirParent creates the parent bucket/directory if it doesn't exist
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
remote = strings.TrimRight(remote, "/")
dir := path.Dir(remote)
if dir == "/" || dir == "." {
dir = ""
}
return f.Mkdir(ctx, dir)
return f.makeBucket(ctx, bucket)
}
// makeBucket creates the bucket if it doesn't exist
@@ -4141,18 +3832,6 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
bucket, directory := f.split(dir)
// Remove directory marker file
if f.opt.DirectoryMarkers && bucket != "" && dir != "" {
o := &Object{
fs: f,
remote: dir + "/",
}
fs.Debugf(o, "Removing directory marker")
err := o.Remove(ctx)
if err != nil {
return fmt.Errorf("removing directory marker failed: %w", err)
}
}
if bucket == "" || directory != "" {
return nil
}
@@ -4190,7 +3869,7 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
req.Bucket = &dstBucket
req.ACL = stringPointerOrNil(f.opt.ACL)
req.Key = &dstPath
source := pathEscape(bucket.Join(srcBucket, srcPath))
source := pathEscape(path.Join(srcBucket, srcPath))
if src.versionID != nil {
source += fmt.Sprintf("?versionId=%s", *src.versionID)
}
@@ -4351,7 +4030,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, errNotWithVersionAt
}
dstBucket, dstPath := f.split(remote)
err := f.mkdirParent(ctx, remote)
err := f.makeBucket(ctx, dstBucket)
if err != nil {
return nil, err
}
@@ -4421,17 +4100,17 @@ to normal storage.
Usage Examples:
rclone backend restore s3:bucket/path/to/object -o priority=PRIORITY -o lifetime=DAYS
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS
rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS
rclone backend restore s3:bucket/path/to/object [-o priority=PRIORITY] [-o lifetime=DAYS]
rclone backend restore s3:bucket/path/to/directory [-o priority=PRIORITY] [-o lifetime=DAYS]
rclone backend restore s3:bucket [-o priority=PRIORITY] [-o lifetime=DAYS]
This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
rclone --interactive backend restore --include "*.txt" s3:bucket/path -o priority=Standard -o lifetime=1
rclone --interactive backend restore --include "*.txt" s3:bucket/path -o priority=Standard
All the objects shown will be marked for restore, then
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard -o lifetime=1
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard
It returns a list of status dictionaries with Remote and Status
keys. The Status will be OK if it was successful or an error message
@@ -4847,14 +4526,13 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
delErr <- operations.DeleteFiles(ctx, delChan)
}()
checkErr(f.list(ctx, listOpt{
bucket: bucket,
directory: directory,
prefix: f.rootDirectory,
addBucket: f.rootBucket == "",
recurse: true,
withVersions: versioned,
hidden: true,
noSkipMarkers: true,
bucket: bucket,
directory: directory,
prefix: f.rootDirectory,
addBucket: f.rootBucket == "",
recurse: true,
withVersions: versioned,
hidden: true,
}, func(remote string, object *s3.Object, versionID *string, isDirectory bool) error {
if isDirectory {
return nil
@@ -4864,7 +4542,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
fs.Errorf(object, "Can't create object %+v", err)
return nil
}
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
// Work out whether the file is the current version or not
isCurrentVersion := !versioned || !version.Match(remote)
fs.Debugf(nil, "%q version %v", remote, version.Match(remote))
@@ -4978,26 +4656,22 @@ func (o *Object) headObject(ctx context.Context) (resp *s3.HeadObjectOutput, err
Key: &bucketPath,
VersionId: o.versionID,
}
return o.fs.headObject(ctx, &req)
}
func (f *Fs) headObject(ctx context.Context, req *s3.HeadObjectInput) (resp *s3.HeadObjectOutput, err error) {
if f.opt.RequesterPays {
if o.fs.opt.RequesterPays {
req.RequestPayer = aws.String(s3.RequestPayerRequester)
}
if f.opt.SSECustomerAlgorithm != "" {
req.SSECustomerAlgorithm = &f.opt.SSECustomerAlgorithm
if o.fs.opt.SSECustomerAlgorithm != "" {
req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm
}
if f.opt.SSECustomerKey != "" {
req.SSECustomerKey = &f.opt.SSECustomerKey
if o.fs.opt.SSECustomerKey != "" {
req.SSECustomerKey = &o.fs.opt.SSECustomerKey
}
if f.opt.SSECustomerKeyMD5 != "" {
req.SSECustomerKeyMD5 = &f.opt.SSECustomerKeyMD5
if o.fs.opt.SSECustomerKeyMD5 != "" {
req.SSECustomerKeyMD5 = &o.fs.opt.SSECustomerKeyMD5
}
err = f.pacer.Call(func() (bool, error) {
err = o.fs.pacer.Call(func() (bool, error) {
var err error
resp, err = f.c.HeadObjectWithContext(ctx, req)
return f.shouldRetry(ctx, err)
resp, err = o.fs.c.HeadObjectWithContext(ctx, &req)
return o.fs.shouldRetry(ctx, err)
})
if err != nil {
if awsErr, ok := err.(awserr.RequestFailure); ok {
@@ -5007,9 +4681,7 @@ func (f *Fs) headObject(ctx context.Context, req *s3.HeadObjectInput) (resp *s3.
}
return nil, err
}
if req.Bucket != nil {
f.cache.MarkOK(*req.Bucket)
}
o.fs.cache.MarkOK(bucket)
return resp, nil
}
@@ -5246,9 +4918,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Override the automatic decompression in the transport to
// download compressed files as-is
if o.fs.opt.UseAcceptEncodingGzip.Value {
httpReq.HTTPRequest.Header.Set("Accept-Encoding", "gzip")
}
httpReq.HTTPRequest.Header.Set("Accept-Encoding", "gzip")
for _, option := range options {
switch option.(type) {
@@ -5364,9 +5034,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
}
uid := cout.UploadId
uploadCtx, cancel := context.WithCancel(ctx)
defer atexit.OnError(&err, func() {
cancel()
if o.fs.opt.LeavePartsOnError {
return
}
@@ -5386,7 +5054,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
})()
var (
g, gCtx = errgroup.WithContext(uploadCtx)
g, gCtx = errgroup.WithContext(ctx)
finished = false
partsMu sync.Mutex // to protect parts
parts []*s3.CompletedPart
@@ -5468,7 +5136,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
uout, err := f.c.UploadPartWithContext(gCtx, uploadPartReq)
if err != nil {
if partNum <= int64(concurrency) {
return f.shouldRetry(gCtx, err)
return f.shouldRetry(ctx, err)
}
// retry all chunks once have done the first batch
return true, err
@@ -5500,7 +5168,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
var resp *s3.CompleteMultipartUploadOutput
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.CompleteMultipartUploadWithContext(uploadCtx, &s3.CompleteMultipartUploadInput{
resp, err = f.c.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{
Bucket: req.Bucket,
Key: req.Key,
MultipartUpload: &s3.CompletedMultipartUpload{
@@ -5509,7 +5177,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
RequestPayer: req.RequestPayer,
UploadId: uid,
})
return f.shouldRetry(uploadCtx, err)
return f.shouldRetry(ctx, err)
})
if err != nil {
return wantETag, gotETag, nil, fmt.Errorf("multipart upload failed to finalise: %w", err)
@@ -5658,12 +5326,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return errNotWithVersionAt
}
bucket, bucketPath := o.split()
// Create parent dir/bucket if not saving directory marker
if !strings.HasSuffix(o.remote, "/") {
err := o.fs.mkdirParent(ctx, o.remote)
if err != nil {
return err
}
err := o.fs.makeBucket(ctx, bucket)
if err != nil {
return err
}
modTime := src.ModTime(ctx)
size := src.Size()
@@ -5986,7 +5651,7 @@ func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error)
setMetadata("content-disposition", o.contentDisposition)
setMetadata("content-encoding", o.contentEncoding)
setMetadata("content-language", o.contentLanguage)
metadata["tier"] = o.GetTier()
setMetadata("tier", o.storageClass)
return metadata, nil
}

View File

@@ -6,19 +6,15 @@ import (
"context"
"crypto/md5"
"fmt"
"path"
"strings"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/version"
"github.com/stretchr/testify/assert"
@@ -254,8 +250,7 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
time.Sleep(2 * time.Second)
// Create an object
const dirName = "versions"
const fileName = dirName + "/" + "test-versions.txt"
const fileName = "test-versions.txt"
contents := random.String(100)
item := fstest.NewItem(fileName, contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
@@ -285,12 +280,11 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
}()
// Read the contents
entries, err := f.List(ctx, dirName)
entries, err := f.List(ctx, "")
require.NoError(t, err)
tests := 0
var fileNameVersion string
for _, entry := range entries {
t.Log(entry)
remote := entry.Remote()
if remote == fileName {
t.Run("ReadCurrent", func(t *testing.T) {
@@ -315,23 +309,6 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
require.NotNil(t, o)
assert.Equal(t, int64(100), o.Size(), o.Remote())
})
// Check we can make a NewFs from that object with a version suffix
t.Run("NewFs", func(t *testing.T) {
newPath := bucket.Join(fs.ConfigStringFull(f), fileNameVersion)
// Make sure --s3-versions is set in the config of the new remote
fs.Debugf(nil, "oldPath = %q", newPath)
lastColon := strings.LastIndex(newPath, ":")
require.True(t, lastColon >= 0)
newPath = newPath[:lastColon] + ",versions" + newPath[lastColon:]
fs.Debugf(nil, "newPath = %q", newPath)
fNew, err := cache.Get(ctx, newPath)
// This should return pointing to a file
require.Equal(t, fs.ErrorIsFile, err)
require.NotNil(t, fNew)
// With the directory the directory above
assert.Equal(t, dirName, path.Base(fs.ConfigStringFull(fNew)))
})
})
t.Run("VersionAt", func(t *testing.T) {

View File

@@ -5,7 +5,6 @@ import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
@@ -21,24 +20,6 @@ func TestIntegration(t *testing.T) {
})
}
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
name := "TestS3"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*Object)(nil),
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "directory_markers", Value: "true"},
},
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}

View File

@@ -14,7 +14,6 @@ import (
// URL parameters that need to be added to the signature
var s3ParamsToSign = map[string]struct{}{
"delete": {},
"acl": {},
"location": {},
"logging": {},

View File

@@ -1,6 +1,7 @@
package seafile
import (
"sync"
"sync/atomic"
"testing"
"time"
@@ -16,19 +17,19 @@ func TestShouldAllowShutdownTwice(t *testing.T) {
renew.Shutdown()
}
func TestRenewalInTimeLimit(t *testing.T) {
func TestRenewal(t *testing.T) {
var count int64
renew := NewRenew(100*time.Millisecond, func() error {
wg := sync.WaitGroup{}
wg.Add(2) // run the renewal twice
renew := NewRenew(time.Millisecond, func() error {
atomic.AddInt64(&count, 1)
wg.Done()
return nil
})
time.Sleep(time.Second)
wg.Wait()
renew.Shutdown()
// there's no guarantee the CI agent can handle a simple goroutine
renewCount := atomic.LoadInt64(&count)
t.Logf("renew count = %d", renewCount)
assert.Greater(t, renewCount, int64(0))
assert.Less(t, renewCount, int64(11))
// it is technically possible that a third renewal gets triggered between Wait() and Shutdown()
assert.GreaterOrEqual(t, atomic.LoadInt64(&count), int64(2))
}

View File

@@ -67,18 +67,15 @@ func init() {
Value: "https://cloud.seafile.com/",
Help: "Connect to cloud.seafile.com.",
}},
Sensitive: true,
}, {
Name: configUser,
Help: "User name (usually email address).",
Required: true,
Sensitive: true,
Name: configUser,
Help: "User name (usually email address).",
Required: true,
}, {
// Password is not required, it will be left blank for 2FA
Name: configPassword,
Help: "Password.",
IsPassword: true,
Sensitive: true,
}, {
Name: config2FA,
Help: "Two-factor authentication ('true' if the account has 2FA enabled).",
@@ -90,7 +87,6 @@ func init() {
Name: configLibraryKey,
Help: "Library password (for encrypted libraries only).\n\nLeave blank if you pass it through the command line.",
IsPassword: true,
Sensitive: true,
}, {
Name: configCreateLibrary,
Help: "Should rclone create a library if it doesn't exist.",
@@ -98,10 +94,9 @@ func init() {
Default: false,
}, {
// Keep the authentication token after entering the 2FA code
Name: configAuthToken,
Help: "Authentication token.",
Hide: fs.OptionHideBoth,
Sensitive: true,
Name: configAuthToken,
Help: "Authentication token.",
Hide: fs.OptionHideBoth,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,

View File

@@ -953,9 +953,11 @@ func (f *Fs) emptyLibraryTrash(ctx context.Context, libraryID string) error {
return nil
}
// === API v2 from the official documentation, but that have been replaced by the much better v2.1 (undocumented as of Apr 2020)
// === getDirectoryEntriesAPIv2 is needed to keep compatibility with seafile v6,
// === the others can probably be removed after the API v2.1 is documented
func (f *Fs) getDirectoryEntriesAPIv2(ctx context.Context, libraryID, dirPath string) ([]api.DirEntry, error) {
// API v2 from the official documentation, but that have been replaced by the much better v2.1 (undocumented as of Apr 2020)
// getDirectoryEntriesAPIv2 is needed to keep compatibility with seafile v6.
// API Documentation
// https://download.seafile.com/published/web-api/v2.1/directories.md#user-content-List%20Items%20in%20Directory
if libraryID == "" {
@@ -999,3 +1001,95 @@ func (f *Fs) getDirectoryEntriesAPIv2(ctx context.Context, libraryID, dirPath st
}
return result, nil
}
func (f *Fs) copyFileAPIv2(ctx context.Context, srcLibraryID, srcPath, dstLibraryID, dstPath string) (*api.FileInfo, error) {
// API Documentation
// https://download.seafile.com/published/web-api/v2.1/file.md#user-content-Copy%20File
if srcLibraryID == "" || dstLibraryID == "" {
return nil, errors.New("libraryID and/or file path argument(s) missing")
}
srcPath = path.Join("/", srcPath)
dstPath = path.Join("/", dstPath)
// Older API does not seem to accept JSON input here either
postParameters := url.Values{
"operation": {"copy"},
"dst_repo": {dstLibraryID},
"dst_dir": {f.opt.Enc.FromStandardPath(dstPath)},
}
opts := rest.Opts{
Method: "POST",
Path: APIv20 + srcLibraryID + "/file/",
Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(srcPath)}},
ContentType: "application/x-www-form-urlencoded",
Body: bytes.NewBuffer([]byte(postParameters.Encode())),
}
result := &api.FileInfo{}
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
if resp != nil {
if resp.StatusCode == 401 || resp.StatusCode == 403 {
return nil, fs.ErrorPermissionDenied
}
}
return nil, fmt.Errorf("failed to copy file %s:'%s' to %s:'%s': %w", srcLibraryID, srcPath, dstLibraryID, dstPath, err)
}
err = rest.DecodeJSON(resp, &result)
if err != nil {
return nil, err
}
return f.decodeFileInfo(result), nil
}
func (f *Fs) renameFileAPIv2(ctx context.Context, libraryID, filePath, newname string) error {
// API Documentation
// https://download.seafile.com/published/web-api/v2.1/file.md#user-content-Rename%20File
if libraryID == "" || newname == "" {
return errors.New("libraryID and/or file path argument(s) missing")
}
filePath = path.Join("/", filePath)
// No luck with JSON input with the older api2
postParameters := url.Values{
"operation": {"rename"},
"reloaddir": {"true"}, // This is an undocumented trick to avoid an http code 301 response (found in https://github.com/haiwen/seahub/blob/master/seahub/api2/views.py)
"newname": {f.opt.Enc.FromStandardName(newname)},
}
opts := rest.Opts{
Method: "POST",
Path: APIv20 + libraryID + "/file/",
Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(filePath)}},
ContentType: "application/x-www-form-urlencoded",
Body: bytes.NewBuffer([]byte(postParameters.Encode())),
NoRedirect: true,
NoResponse: true,
}
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
if resp != nil {
if resp.StatusCode == 301 {
// This is the normal response from the server
return nil
}
if resp.StatusCode == 401 || resp.StatusCode == 403 {
return fs.ErrorPermissionDenied
}
if resp.StatusCode == 404 {
return fs.ErrorObjectNotFound
}
}
return fmt.Errorf("failed to rename file: %w", err)
}
return nil
}

View File

@@ -10,7 +10,6 @@ import (
"errors"
"fmt"
"io"
iofs "io/fs"
"os"
"path"
"regexp"
@@ -27,6 +26,7 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/pacer"
@@ -58,15 +58,13 @@ func init() {
Description: "SSH/SFTP",
NewFs: NewFs,
Options: []fs.Option{{
Name: "host",
Help: "SSH host to connect to.\n\nE.g. \"example.com\".",
Required: true,
Sensitive: true,
Name: "host",
Help: "SSH host to connect to.\n\nE.g. \"example.com\".",
Required: true,
}, {
Name: "user",
Help: "SSH username.",
Default: currentUser,
Sensitive: true,
Name: "user",
Help: "SSH username.",
Default: currentUser,
}, {
Name: "port",
Help: "SSH port number.",
@@ -76,9 +74,8 @@ func init() {
Help: "SSH password, leave blank to use ssh-agent.",
IsPassword: true,
}, {
Name: "key_pem",
Help: "Raw PEM-encoded private key.\n\nIf specified, will override key_file parameter.",
Sensitive: true,
Name: "key_pem",
Help: "Raw PEM-encoded private key.\n\nIf specified, will override key_file parameter.",
}, {
Name: "key_file",
Help: "Path to PEM-encoded private key file.\n\nLeave blank or set key-use-agent to use ssh-agent." + env.ShellExpandHelp,
@@ -89,7 +86,6 @@ func init() {
Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
in the new OpenSSH format can't be used.`,
IsPassword: true,
Sensitive: true,
}, {
Name: "pubkey_file",
Help: `Optional path to public key file.
@@ -168,19 +164,7 @@ E.g. if shared folders can be found in directories representing volumes:
E.g. if home directory can be found in a shared folder called "home":
rclone sync /home/local/directory remote:/home/directory --sftp-path-override /volume1/homes/USER/directory
To specify only the path to the SFTP remote's root, and allow rclone to add any relative subpaths automatically (including unwrapping/decrypting remotes as necessary), add the '@' character to the beginning of the path.
E.g. the first example above could be rewritten as:
rclone sync /home/local/directory remote:/directory --sftp-path-override @/volume2
Note that when using this method with Synology "home" folders, the full "/homes/USER" path should be specified instead of "/home".
E.g. the second example above should be rewritten as:
rclone sync /home/local/directory remote:/homes/USER/directory --sftp-path-override @/volume1`,
rclone sync /home/local/directory remote:/home/directory --sftp-path-override /volume1/homes/USER/directory`,
Advanced: true,
}, {
Name: "set_modtime",
@@ -339,7 +323,7 @@ Pass multiple variables space separated, eg
VAR1=value VAR2=value
and pass variables with spaces in quotes, eg
and pass variables with spaces in in quotes, eg
"VAR3=value with space" "VAR4=value with space" VAR5=nospacehere
@@ -385,61 +369,6 @@ Example:
umac-64-etm@openssh.com umac-128-etm@openssh.com hmac-sha2-256-etm@openssh.com
`,
Advanced: true,
}, {
Name: "host_key_algorithms",
Default: fs.SpaceSepList{},
Help: `Space separated list of host key algorithms, ordered by preference.
At least one must match with server configuration. This can be checked for example using ssh -Q HostKeyAlgorithms.
Note: This can affect the outcome of key negotiation with the server even if server host key validation is not enabled.
Example:
ssh-ed25519 ssh-rsa ssh-dss
`,
Advanced: true,
}, {
Name: "ssh",
Default: fs.SpaceSepList{},
Help: `Path and arguments to external ssh binary.
Normally rclone will use its internal ssh library to connect to the
SFTP server. However it does not implement all possible ssh options so
it may be desirable to use an external ssh binary.
Rclone ignores all the internal config if you use this option and
expects you to configure the ssh binary with the user/host/port and
any other options you need.
**Important** The ssh command must log in without asking for a
password so needs to be configured with keys or certificates.
Rclone will run the command supplied either with the additional
arguments "-s sftp" to access the SFTP subsystem or with commands such
as "md5sum /path/to/file" appended to read checksums.
Any arguments with spaces in should be surrounded by "double quotes".
An example setting might be:
ssh -o ServerAliveInterval=20 user@example.com
Note that when using an external ssh binary rclone makes a new ssh
connection for every hash it calculates.
`,
}, {
Name: "socks_proxy",
Default: "",
Help: `Socks 5 proxy host.
Supports the format user:pass@host:port, user@host:port, host:port.
Example:
myUser:myPass@localhost:9005
`,
Advanced: true,
}},
}
fs.Register(fsi)
@@ -478,9 +407,6 @@ type Options struct {
Ciphers fs.SpaceSepList `config:"ciphers"`
KeyExchange fs.SpaceSepList `config:"key_exchange"`
MACs fs.SpaceSepList `config:"macs"`
HostKeyAlgorithms fs.SpaceSepList `config:"host_key_algorithms"`
SSH fs.SpaceSepList `config:"ssh"`
SocksProxy string `config:"socks_proxy"`
}
// Fs stores the interface to the remote SFTP files
@@ -517,16 +443,41 @@ type Object struct {
sha1sum *string // Cached SHA1 checksum
}
// dial starts a client connection to the given SSH server. It is a
// convenience function that connects to the given network address,
// initiates the SSH handshake, and then sets up a Client.
func (f *Fs) dial(ctx context.Context, network, addr string, sshConfig *ssh.ClientConfig) (*ssh.Client, error) {
dialer := fshttp.NewDialer(ctx)
conn, err := dialer.Dial(network, addr)
if err != nil {
return nil, err
}
c, chans, reqs, err := ssh.NewClientConn(conn, addr, sshConfig)
if err != nil {
return nil, err
}
fs.Debugf(f, "New connection %s->%s to %q", c.LocalAddr(), c.RemoteAddr(), c.ServerVersion())
return ssh.NewClient(c, chans, reqs), nil
}
// conn encapsulates an ssh client and corresponding sftp client
type conn struct {
sshClient sshClient
sshClient *ssh.Client
sftpClient *sftp.Client
err chan error
}
// Wait for connection to close
func (c *conn) wait() {
c.err <- c.sshClient.Wait()
c.err <- c.sshClient.Conn.Wait()
}
// Send a keepalive over the ssh connection
func (c *conn) sendKeepAlive() {
_, _, err := c.sshClient.SendRequest("keepalive@openssh.com", true, nil)
if err != nil {
fs.Debugf(nil, "Failed to send keep alive: %v", err)
}
}
// Send keepalives every interval over the ssh connection until done is closed
@@ -538,7 +489,7 @@ func (c *conn) sendKeepAlives(interval time.Duration) (done chan struct{}) {
for {
select {
case <-t.C:
c.sshClient.SendKeepAlive()
c.sendKeepAlive()
case <-done:
return
}
@@ -590,11 +541,7 @@ func (f *Fs) sftpConnection(ctx context.Context) (c *conn, err error) {
c = &conn{
err: make(chan error, 1),
}
if len(f.opt.SSH) == 0 {
c.sshClient, err = f.newSSHClientInternal(ctx, "tcp", f.opt.Host+":"+f.opt.Port, f.config)
} else {
c.sshClient, err = f.newSSHClientExternal()
}
c.sshClient, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port, f.config)
if err != nil {
return nil, fmt.Errorf("couldn't connect SSH: %w", err)
}
@@ -608,7 +555,7 @@ func (f *Fs) sftpConnection(ctx context.Context) (c *conn, err error) {
}
// Set any environment variables on the ssh.Session
func (f *Fs) setEnv(s sshSession) error {
func (f *Fs) setEnv(s *ssh.Session) error {
for _, env := range f.opt.SetEnv {
equal := strings.IndexRune(env, '=')
if equal < 0 {
@@ -625,8 +572,8 @@ func (f *Fs) setEnv(s sshSession) error {
// Creates a new SFTP client on conn, using the specified subsystem
// or sftp server, and zero or more option functions
func (f *Fs) newSftpClient(client sshClient, opts ...sftp.ClientOption) (*sftp.Client, error) {
s, err := client.NewSession()
func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.Client, error) {
s, err := conn.NewSession()
if err != nil {
return nil, err
}
@@ -699,9 +646,6 @@ func (f *Fs) getSftpConnection(ctx context.Context) (c *conn, err error) {
// Getwd request
func (f *Fs) putSftpConnection(pc **conn, err error) {
c := *pc
if !c.sshClient.CanReuse() {
return
}
*pc = nil
if err != nil {
// work out if this is an expected error
@@ -780,10 +724,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, err
}
if len(opt.SSH) != 0 && (opt.User != "" || opt.Host != "" || opt.Port != "") {
fs.Logf(name, "--sftp-ssh is in use - ignoring user/host/port from config - set in the parameters to --sftp-ssh (remove them from the config to silence this warning)")
}
if opt.User == "" {
opt.User = currentUser
}
@@ -799,10 +739,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ClientVersion: "SSH-2.0-" + f.ci.UserAgent,
}
if len(opt.HostKeyAlgorithms) != 0 {
sshConfig.HostKeyAlgorithms = []string(opt.HostKeyAlgorithms)
}
if opt.KnownHostsFile != "" {
hostcallback, err := knownhosts.New(env.ShellExpand(opt.KnownHostsFile))
if err != nil {
@@ -846,32 +782,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, fmt.Errorf("couldn't read ssh agent signers: %w", err)
}
if keyFile != "" {
// If `opt.KeyUseAgent` is false, then it's expected that `opt.KeyFile` contains the private key
// and `${opt.KeyFile}.pub` contains the public key.
//
// If `opt.KeyUseAgent` is true, then it's expected that `opt.KeyFile` contains the public key.
// This is how it works with openssh; the `IdentityFile` in openssh config points to the public key.
// It's not necessary to specify the public key explicitly when using ssh-agent, since openssh and rclone
// will try all the keys they find in the ssh-agent until they find one that works. But just like
// `IdentityFile` is used in openssh config to limit the search to one specific key, so does
// `opt.KeyFile` in rclone config limit the search to that specific key.
//
// However, previous versions of rclone would always expect to find the public key in
// `${opt.KeyFile}.pub` even if `opt.KeyUseAgent` was true. So for the sake of backward compatibility
// we still first attempt to read the public key from `${opt.KeyFile}.pub`. But if it fails with
// an `fs.ErrNotExist` then we also try to read the public key from `opt.KeyFile`.
pubBytes, err := os.ReadFile(keyFile + ".pub")
if err != nil {
if errors.Is(err, iofs.ErrNotExist) && opt.KeyUseAgent {
pubBytes, err = os.ReadFile(keyFile)
if err != nil {
return nil, fmt.Errorf("failed to read public key file: %w", err)
}
} else {
return nil, fmt.Errorf("failed to read public key file: %w", err)
}
return nil, fmt.Errorf("failed to read public key file: %w", err)
}
pub, _, _, _, err := ssh.ParseAuthorizedKey(pubBytes)
if err != nil {
return nil, fmt.Errorf("failed to parse public key file: %w", err)
@@ -893,8 +807,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
}
// Load key file as a private key, if specified. This is only needed when not using an ssh agent.
if (keyFile != "" && !opt.KeyUseAgent) || opt.KeyPem != "" {
// Load key file if specified
if keyFile != "" || opt.KeyPem != "" {
var key []byte
if opt.KeyPem == "" {
key, err = os.ReadFile(keyFile)
@@ -1038,7 +952,6 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
SlowHash: true,
PartialUploads: true,
}).Fill(ctx, f)
// Make a connection and pool it to return errors early
c, err := f.getSftpConnection(ctx)
@@ -1056,8 +969,8 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
fs.Debugf(f, "Failed to get shell session for shell type detection command: %v", err)
} else {
var stdout, stderr bytes.Buffer
session.SetStdout(&stdout)
session.SetStderr(&stderr)
session.Stdout = &stdout
session.Stderr = &stderr
shellCmd := "echo ${ShellId}%ComSpec%"
fs.Debugf(f, "Running shell type detection remote command: %s", shellCmd)
err = session.Run(shellCmd)
@@ -1110,7 +1023,7 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
}
}
f.putSftpConnection(&c, err)
if root != "" && !strings.HasSuffix(root, "/") {
if root != "" {
// Check to see if the root is actually an existing file,
// and if so change the filesystem root to its parent directory.
oldAbsRoot := f.absRoot
@@ -1213,6 +1126,13 @@ func (f *Fs) dirExists(ctx context.Context, dir string) (bool, error) {
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
root := path.Join(f.absRoot, dir)
ok, err := f.dirExists(ctx, root)
if err != nil {
return nil, fmt.Errorf("List failed: %w", err)
}
if !ok {
return nil, fs.ErrorDirNotFound
}
sftpDir := root
if sftpDir == "" {
sftpDir = "."
@@ -1224,9 +1144,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
infos, err := c.sftpClient.ReadDir(sftpDir)
f.putSftpConnection(&c, err)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil, fs.ErrorDirNotFound
}
return nil, fmt.Errorf("error listing %q: %w", dir, err)
}
for _, info := range infos {
@@ -1370,17 +1287,10 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if err != nil {
return nil, fmt.Errorf("Move: %w", err)
}
srcPath, dstPath := srcObj.path(), path.Join(f.absRoot, remote)
if _, ok := c.sftpClient.HasExtension("posix-rename@openssh.com"); ok {
err = c.sftpClient.PosixRename(srcPath, dstPath)
} else {
// If haven't got PosixRename then remove source first before renaming
err = c.sftpClient.Remove(dstPath)
if err != nil && !errors.Is(err, iofs.ErrNotExist) {
fs.Errorf(f, "Move: Failed to remove existing file %q: %v", dstPath, err)
}
err = c.sftpClient.Rename(srcPath, dstPath)
}
err = c.sftpClient.Rename(
srcObj.path(),
path.Join(f.absRoot, remote),
)
f.putSftpConnection(&c, err)
if err != nil {
return nil, fmt.Errorf("Move Rename failed: %w", err)
@@ -1467,8 +1377,8 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
}()
var stdout, stderr bytes.Buffer
session.SetStdout(&stdout)
session.SetStderr(&stderr)
session.Stdout = &stdout
session.Stderr = &stderr
fs.Debugf(f, "Running remote command: %s", cmd)
err = session.Run(cmd)
@@ -1775,9 +1685,6 @@ func (f *Fs) remotePath(remote string) string {
func (f *Fs) remoteShellPath(remote string) string {
if f.opt.PathOverride != "" {
shellPath := path.Join(f.opt.PathOverride, remote)
if f.opt.PathOverride[0] == '@' {
shellPath = path.Join(strings.TrimPrefix(f.opt.PathOverride, "@"), f.absRoot, remote)
}
fs.Debugf(f, "Shell path redirected to %q with option path_override", shellPath)
return shellPath
}
@@ -2035,10 +1942,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return fmt.Errorf("Update: %w", err)
}
// Hang on to the connection for the whole upload so it doesn't get re-used while we are uploading
file, err := c.sftpClient.OpenFile(o.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
o.fs.putSftpConnection(&c, err)
if err != nil {
o.fs.putSftpConnection(&c, err)
return fmt.Errorf("Update Create failed: %w", err)
}
// remove the file if upload failed
@@ -2058,18 +1964,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
_, err = file.ReadFrom(&sizeReader{Reader: in, size: src.Size()})
if err != nil {
o.fs.putSftpConnection(&c, err)
remove()
return fmt.Errorf("Update ReadFrom failed: %w", err)
}
err = file.Close()
if err != nil {
o.fs.putSftpConnection(&c, err)
remove()
return fmt.Errorf("Update Close failed: %w", err)
}
// Release connection only when upload has finished so we don't upload multiple files on the same connection
o.fs.putSftpConnection(&c, err)
// Set the mod time - this stats the object if o.fs.opt.SetModTime == true
err = o.SetModTime(ctx, src.ModTime(ctx))

View File

@@ -30,13 +30,3 @@ func TestIntegration2(t *testing.T) {
NilObject: (*sftp.Object)(nil),
})
}
func TestIntegration3(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: "TestSFTPRcloneSSH:",
NilObject: (*sftp.Object)(nil),
})
}

View File

@@ -1,73 +0,0 @@
//go:build !plan9
// +build !plan9
package sftp
import "io"
// Interfaces for ssh client and session implemented in ssh_internal.go and ssh_external.go
// An interface for an ssh client to abstract over internal ssh library and external binary
type sshClient interface {
// Wait blocks until the connection has shut down, and returns the
// error causing the shutdown.
Wait() error
// SendKeepAlive sends a keepalive message to keep the connection open
SendKeepAlive()
// Close the connection
Close() error
// NewSession opens a new sshSession for this sshClient. (A
// session is a remote execution of a program.)
NewSession() (sshSession, error)
// CanReuse indicates if this client can be reused
CanReuse() bool
}
// An interface for an ssh session to abstract over internal ssh library and external binary
type sshSession interface {
// Setenv sets an environment variable that will be applied to any
// command executed by Shell or Run.
Setenv(name, value string) error
// Start runs cmd on the remote host. Typically, the remote
// server passes cmd to the shell for interpretation.
// A Session only accepts one call to Run, Start or Shell.
Start(cmd string) error
// StdinPipe returns a pipe that will be connected to the
// remote command's standard input when the command starts.
StdinPipe() (io.WriteCloser, error)
// StdoutPipe returns a pipe that will be connected to the
// remote command's standard output when the command starts.
// There is a fixed amount of buffering that is shared between
// stdout and stderr streams. If the StdoutPipe reader is
// not serviced fast enough it may eventually cause the
// remote command to block.
StdoutPipe() (io.Reader, error)
// RequestSubsystem requests the association of a subsystem
// with the session on the remote host. A subsystem is a
// predefined command that runs in the background when the ssh
// session is initiated
RequestSubsystem(subsystem string) error
// Run runs cmd on the remote host. Typically, the remote
// server passes cmd to the shell for interpretation.
// A Session only accepts one call to Run, Start, Shell, Output,
// or CombinedOutput.
Run(cmd string) error
// Close the session
Close() error
// Set the stdout
SetStdout(io.Writer)
// Set the stderr
SetStderr(io.Writer)
}

View File

@@ -1,223 +0,0 @@
//go:build !plan9
// +build !plan9
package sftp
import (
"context"
"errors"
"fmt"
"io"
"os/exec"
"strings"
"github.com/rclone/rclone/fs"
)
// Implement the sshClient interface for external ssh programs
type sshClientExternal struct {
f *Fs
session *sshSessionExternal
}
func (f *Fs) newSSHClientExternal() (sshClient, error) {
return &sshClientExternal{f: f}, nil
}
// Wait for connection to close
func (s *sshClientExternal) Wait() error {
if s.session == nil {
return nil
}
return s.session.Wait()
}
// Send a keepalive over the ssh connection
func (s *sshClientExternal) SendKeepAlive() {
// Up to the user to configure -o ServerAliveInterval=20 on their ssh connections
}
// Close the connection
func (s *sshClientExternal) Close() error {
if s.session == nil {
return nil
}
return s.session.Close()
}
// NewSession makes a new external SSH connection
func (s *sshClientExternal) NewSession() (sshSession, error) {
session := s.f.newSshSessionExternal()
if s.session == nil {
fs.Debugf(s.f, "ssh external: creating additional session")
}
return session, nil
}
// CanReuse indicates if this client can be reused
func (s *sshClientExternal) CanReuse() bool {
if s.session == nil {
return true
}
exited := s.session.exited()
canReuse := !exited && s.session.runningSFTP
// fs.Debugf(s.f, "ssh external: CanReuse %v, exited=%v runningSFTP=%v", canReuse, exited, s.session.runningSFTP)
return canReuse
}
// Check interfaces
var _ sshClient = &sshClientExternal{}
// implement the sshSession interface for external ssh binary
type sshSessionExternal struct {
f *Fs
cmd *exec.Cmd
cancel func()
startCalled bool
runningSFTP bool
}
func (f *Fs) newSshSessionExternal() *sshSessionExternal {
s := &sshSessionExternal{
f: f,
}
// Make a cancellation function for this to call in Close()
ctx, cancel := context.WithCancel(context.Background())
s.cancel = cancel
// Connect to a remote host and request the sftp subsystem via
// the 'ssh' command. This assumes that passwordless login is
// correctly configured.
ssh := append([]string(nil), s.f.opt.SSH...)
s.cmd = exec.CommandContext(ctx, ssh[0], ssh[1:]...)
// Allow the command a short time only to shut down
// FIXME enable when we get rid of go1.19
// s.cmd.WaitDelay = time.Second
return s
}
// Setenv sets an environment variable that will be applied to any
// command executed by Shell or Run.
func (s *sshSessionExternal) Setenv(name, value string) error {
return errors.New("ssh external: can't set environment variables")
}
const requestSubsystem = "***Subsystem***:"
// Start runs cmd on the remote host. Typically, the remote
// server passes cmd to the shell for interpretation.
// A Session only accepts one call to Run, Start or Shell.
func (s *sshSessionExternal) Start(cmd string) error {
if s.startCalled {
return errors.New("internal error: ssh external: command already running")
}
s.startCalled = true
// Adjust the args
if strings.HasPrefix(cmd, requestSubsystem) {
s.cmd.Args = append(s.cmd.Args, "-s", cmd[len(requestSubsystem):])
s.runningSFTP = true
} else {
s.cmd.Args = append(s.cmd.Args, cmd)
s.runningSFTP = false
}
fs.Debugf(s.f, "ssh external: running: %v", fs.SpaceSepList(s.cmd.Args))
// start the process
err := s.cmd.Start()
if err != nil {
return fmt.Errorf("ssh external: start process: %w", err)
}
return nil
}
// RequestSubsystem requests the association of a subsystem
// with the session on the remote host. A subsystem is a
// predefined command that runs in the background when the ssh
// session is initiated
func (s *sshSessionExternal) RequestSubsystem(subsystem string) error {
return s.Start(requestSubsystem + subsystem)
}
// StdinPipe returns a pipe that will be connected to the
// remote command's standard input when the command starts.
func (s *sshSessionExternal) StdinPipe() (io.WriteCloser, error) {
rd, err := s.cmd.StdinPipe()
if err != nil {
return nil, fmt.Errorf("ssh external: stdin pipe: %w", err)
}
return rd, nil
}
// StdoutPipe returns a pipe that will be connected to the
// remote command's standard output when the command starts.
// There is a fixed amount of buffering that is shared between
// stdout and stderr streams. If the StdoutPipe reader is
// not serviced fast enough it may eventually cause the
// remote command to block.
func (s *sshSessionExternal) StdoutPipe() (io.Reader, error) {
wr, err := s.cmd.StdoutPipe()
if err != nil {
return nil, fmt.Errorf("ssh external: stdout pipe: %w", err)
}
return wr, nil
}
// Return whether the command has finished or not
func (s *sshSessionExternal) exited() bool {
return s.cmd.ProcessState != nil
}
// Wait for the command to exit
func (s *sshSessionExternal) Wait() error {
if s.exited() {
return nil
}
err := s.cmd.Wait()
if err == nil {
fs.Debugf(s.f, "ssh external: command exited OK")
} else {
fs.Debugf(s.f, "ssh external: command exited with error: %v", err)
}
return err
}
// Run runs cmd on the remote host. Typically, the remote
// server passes cmd to the shell for interpretation.
// A Session only accepts one call to Run, Start, Shell, Output,
// or CombinedOutput.
func (s *sshSessionExternal) Run(cmd string) error {
err := s.Start(cmd)
if err != nil {
return err
}
return s.Wait()
}
// Close the external ssh
func (s *sshSessionExternal) Close() error {
fs.Debugf(s.f, "ssh external: close")
// Cancel the context which kills the process
s.cancel()
// Wait for it to finish
_ = s.Wait()
return nil
}
// Set the stdout
func (s *sshSessionExternal) SetStdout(wr io.Writer) {
s.cmd.Stdout = wr
}
// Set the stderr
func (s *sshSessionExternal) SetStderr(wr io.Writer) {
s.cmd.Stderr = wr
}
// Check interfaces
var _ sshSession = &sshSessionExternal{}

View File

@@ -1,101 +0,0 @@
//go:build !plan9
// +build !plan9
package sftp
import (
"context"
"io"
"net"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/proxy"
"golang.org/x/crypto/ssh"
)
// Internal ssh connections with "golang.org/x/crypto/ssh"
type sshClientInternal struct {
srv *ssh.Client
}
// newSSHClientInternal starts a client connection to the given SSH server. It is a
// convenience function that connects to the given network address,
// initiates the SSH handshake, and then sets up a Client.
func (f *Fs) newSSHClientInternal(ctx context.Context, network, addr string, sshConfig *ssh.ClientConfig) (sshClient, error) {
baseDialer := fshttp.NewDialer(ctx)
var (
conn net.Conn
err error
)
if f.opt.SocksProxy != "" {
conn, err = proxy.SOCKS5Dial(network, addr, f.opt.SocksProxy, baseDialer)
} else {
conn, err = baseDialer.Dial(network, addr)
}
if err != nil {
return nil, err
}
c, chans, reqs, err := ssh.NewClientConn(conn, addr, sshConfig)
if err != nil {
return nil, err
}
fs.Debugf(f, "New connection %s->%s to %q", c.LocalAddr(), c.RemoteAddr(), c.ServerVersion())
srv := ssh.NewClient(c, chans, reqs)
return sshClientInternal{srv}, nil
}
// Wait for connection to close
func (s sshClientInternal) Wait() error {
return s.srv.Conn.Wait()
}
// Send a keepalive over the ssh connection
func (s sshClientInternal) SendKeepAlive() {
_, _, err := s.srv.SendRequest("keepalive@openssh.com", true, nil)
if err != nil {
fs.Debugf(nil, "Failed to send keep alive: %v", err)
}
}
// Close the connection
func (s sshClientInternal) Close() error {
return s.srv.Close()
}
// CanReuse indicates if this client can be reused
func (s sshClientInternal) CanReuse() bool {
return true
}
// Check interfaces
var _ sshClient = sshClientInternal{}
// Thin wrapper for *ssh.Session to implement sshSession interface
type sshSessionInternal struct {
*ssh.Session
}
// Set the stdout
func (s sshSessionInternal) SetStdout(wr io.Writer) {
s.Session.Stdout = wr
}
// Set the stderr
func (s sshSessionInternal) SetStderr(wr io.Writer) {
s.Session.Stderr = wr
}
// NewSession makes an sshSession from an sshClient
func (s sshClientInternal) NewSession() (sshSession, error) {
session, err := s.srv.NewSession()
if err != nil {
return nil, err
}
return sshSessionInternal{Session: session}, nil
}
// Check interfaces
var _ sshSession = sshSessionInternal{}

View File

@@ -155,7 +155,7 @@ func init() {
CheckAuth: checkAuth,
})
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Options: []fs.Option{{
Name: "upload_cutoff",
Help: "Cutoff for switching to multipart upload.",
Default: defaultUploadCutoff,
@@ -182,7 +182,6 @@ standard values here or any folder ID (long hex number ID).`,
Value: "top",
Help: "Access the home, favorites, and shared folders as well as the connectors.",
}},
Sensitive: true,
}, {
Name: "chunk_size",
Default: defaultChunkSize,
@@ -217,7 +216,7 @@ be set manually to something like: https://XXX.sharefile.com
encoder.EncodeLeftSpace |
encoder.EncodeLeftPeriod |
encoder.EncodeInvalidUtf8),
}}...),
}},
})
}
@@ -776,13 +775,8 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
}
}
// FIXMEPutStream uploads to the remote path with the modTime given of indeterminate size
//
// PutStream no longer appears to work - the streamed uploads need the
// size specified at the start otherwise we get this error:
//
// upload failed: file size does not match (-2)
func (f *Fs) FIXMEPutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
@@ -1459,12 +1453,12 @@ func (o *Object) ID() string {
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
// _ fs.PutStreamer = (*Fs)(nil)
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)

View File

@@ -45,8 +45,7 @@ func init() {
Note that siad must run with --disable-api-security to open API port for other hosts (not recommended).
Keep default if Sia daemon runs on localhost.`,
Default: "http://127.0.0.1:9980",
Sensitive: true,
Default: "http://127.0.0.1:9980",
}, {
Name: "api_password",
Help: `Sia Daemon API Password.

View File

@@ -34,10 +34,9 @@ func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
d := &smb2.Dialer{
Initiator: &smb2.NTLMInitiator{
User: f.opt.User,
Password: pass,
Domain: f.opt.Domain,
TargetSPN: f.opt.SPN,
User: f.opt.User,
Password: pass,
Domain: f.opt.Domain,
},
}
@@ -106,9 +105,9 @@ func (f *Fs) getSessions() int32 {
func (f *Fs) newConnection(ctx context.Context, share string) (c *conn, err error) {
// As we are pooling these connections we need to decouple
// them from the current context
bgCtx := context.Background()
ctx = context.Background()
c, err = f.dial(bgCtx, "tcp", f.opt.Host+":"+f.opt.Port)
c, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port)
if err != nil {
return nil, fmt.Errorf("couldn't connect SMB: %w", err)
}
@@ -119,7 +118,7 @@ func (f *Fs) newConnection(ctx context.Context, share string) (c *conn, err erro
_ = c.smbSession.Logoff()
return nil, fmt.Errorf("couldn't initialize SMB: %w", err)
}
c.smbShare = c.smbShare.WithContext(bgCtx)
c.smbShare = c.smbShare.WithContext(ctx)
}
return c, nil
}

View File

@@ -41,15 +41,13 @@ func init() {
NewFs: NewFs,
Options: []fs.Option{{
Name: "host",
Help: "SMB server hostname to connect to.\n\nE.g. \"example.com\".",
Required: true,
Sensitive: true,
Name: "host",
Help: "SMB server hostname to connect to.\n\nE.g. \"example.com\".",
Required: true,
}, {
Name: "user",
Help: "SMB username.",
Default: currentUser,
Sensitive: true,
Name: "user",
Help: "SMB username.",
Default: currentUser,
}, {
Name: "port",
Help: "SMB port number.",
@@ -59,22 +57,9 @@ func init() {
Help: "SMB password.",
IsPassword: true,
}, {
Name: "domain",
Help: "Domain name for NTLM authentication.",
Default: "WORKGROUP",
Sensitive: true,
}, {
Name: "spn",
Help: `Service principal name.
Rclone presents this name to the server. Some servers use this as further
authentication, and it often needs to be set for clusters. For example:
cifs/remotehost:1020
Leave blank if not sure.
`,
Sensitive: true,
Name: "domain",
Help: "Domain name for NTLM authentication.",
Default: "WORKGROUP",
}, {
Name: "idle_timeout",
Default: fs.Duration(60 * time.Second),
@@ -124,7 +109,6 @@ type Options struct {
User string `config:"user"`
Pass string `config:"pass"`
Domain string `config:"domain"`
SPN string `config:"spn"`
HideSpecial bool `config:"hide_special_share"`
CaseInsensitive bool `config:"case_insensitive"`
IdleTimeout fs.Duration `config:"idle_timeout"`
@@ -451,8 +435,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
func (f *Fs) About(ctx context.Context) (_ *fs.Usage, err error) {
share, dir := f.split("/")
if share == "" {
// Just return empty info rather than an error if called on the root
return &fs.Usage{}, nil
return nil, fs.ErrorListBucketRequired
}
dir = f.toSambaPath(dir)
@@ -475,45 +458,6 @@ func (f *Fs) About(ctx context.Context) (_ *fs.Usage, err error) {
return usage, nil
}
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
//
// It truncates any existing object
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
var err error
o := &Object{
fs: f,
remote: remote,
}
share, filename := o.split()
if share == "" || filename == "" {
return nil, fs.ErrorIsDir
}
err = o.fs.ensureDirectory(ctx, share, filename)
if err != nil {
return nil, fmt.Errorf("failed to make parent directories: %w", err)
}
filename = o.fs.toSambaPath(filename)
o.fs.addSession() // Show session in use
defer o.fs.removeSession()
cn, err := o.fs.getConnection(ctx, share)
if err != nil {
return nil, err
}
fl, err := cn.smbShare.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
if err != nil {
return nil, fmt.Errorf("failed to open: %w", err)
}
return fl, nil
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {

View File

@@ -98,10 +98,9 @@ func init() {
},
}},
{
Name: "access_grant",
Help: "Access grant.",
Provider: "existing",
Sensitive: true,
Name: "access_grant",
Help: "Access grant.",
Provider: "existing",
},
{
Name: "satellite_address",
@@ -121,16 +120,14 @@ func init() {
},
},
{
Name: "api_key",
Help: "API key.",
Provider: newProvider,
Sensitive: true,
Name: "api_key",
Help: "API key.",
Provider: newProvider,
},
{
Name: "passphrase",
Help: "Encryption passphrase.\n\nTo access existing objects enter passphrase used for uploading.",
Provider: newProvider,
Sensitive: true,
Name: "passphrase",
Help: "Encryption passphrase.\n\nTo access existing objects enter passphrase used for uploading.",
Provider: newProvider,
},
},
})
@@ -531,11 +528,7 @@ func (f *Fs) NewObject(ctx context.Context, relative string) (_ fs.Object, err e
// May create the object even if it returns an error - if so will return the
// object and the error, otherwise will return nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (_ fs.Object, err error) {
return f.put(ctx, in, src, src.Remote(), options...)
}
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options ...fs.OpenOption) (_ fs.Object, err error) {
fs.Debugf(f, "cp input ./%s # %+v %d", remote, options, src.Size())
fs.Debugf(f, "cp input ./%s # %+v %d", src.Remote(), options, src.Size())
// Reject options we don't support.
for _, option := range options {
@@ -546,7 +539,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
}
}
bucketName, bucketPath := f.absolute(remote)
bucketName, bucketPath := f.absolute(src.Remote())
upload, err := f.project.UploadObject(ctx, bucketName, bucketPath, nil)
if err != nil {
@@ -556,7 +549,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
if err != nil {
aerr := upload.Abort()
if aerr != nil && !errors.Is(aerr, uplink.ErrUploadDone) {
fs.Errorf(f, "cp input ./%s %+v: %+v", remote, options, aerr)
fs.Errorf(f, "cp input ./%s %+v: %+v", src.Remote(), options, aerr)
}
}
}()
@@ -581,7 +574,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
}
err = fserrors.RetryError(err)
fs.Errorf(f, "cp input ./%s %+v: %+v\n", remote, options, err)
fs.Errorf(f, "cp input ./%s %+v: %+v\n", src.Remote(), options, err)
return nil, err
}
@@ -596,19 +589,11 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
return nil, err
}
err = fserrors.RetryError(errors.New("bucket was not available, now created, the upload must be retried"))
} else if errors.Is(err, uplink.ErrTooManyRequests) {
// Storj has a rate limit of 1 per second of uploading to the same file.
// This produces ErrTooManyRequests here, so we wait 1 second and retry.
//
// See: https://github.com/storj/uplink/issues/149
fs.Debugf(f, "uploading too fast - sleeping for 1 second: %v", err)
time.Sleep(time.Second)
err = fserrors.RetryError(err)
}
return nil, err
}
return newObjectFromUplink(f, remote, upload.Info()), nil
return newObjectFromUplink(f, src.Remote(), upload.Info()), nil
}
// PutStream uploads to the remote path with the modTime given of indeterminate

View File

@@ -176,9 +176,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (_ io.ReadC
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
fs.Debugf(o, "cp input ./%s %+v", o.Remote(), options)
fs.Debugf(o, "cp input ./%s %+v", src.Remote(), options)
oNew, err := o.fs.put(ctx, in, src, o.Remote(), options...)
oNew, err := o.fs.Put(ctx, in, src, options...)
if err == nil {
*o = *(oNew.(*Object))

View File

@@ -132,50 +132,42 @@ func init() {
}
return nil, fmt.Errorf("unknown state %q", config.State)
}, Options: []fs.Option{{
Name: "app_id",
Help: "Sugarsync App ID.\n\nLeave blank to use rclone's.",
Sensitive: true,
Name: "app_id",
Help: "Sugarsync App ID.\n\nLeave blank to use rclone's.",
}, {
Name: "access_key_id",
Help: "Sugarsync Access Key ID.\n\nLeave blank to use rclone's.",
Sensitive: true,
Name: "access_key_id",
Help: "Sugarsync Access Key ID.\n\nLeave blank to use rclone's.",
}, {
Name: "private_access_key",
Help: "Sugarsync Private Access Key.\n\nLeave blank to use rclone's.",
Sensitive: true,
Name: "private_access_key",
Help: "Sugarsync Private Access Key.\n\nLeave blank to use rclone's.",
}, {
Name: "hard_delete",
Help: "Permanently delete files if true\notherwise put them in the deleted files.",
Default: false,
}, {
Name: "refresh_token",
Help: "Sugarsync refresh token.\n\nLeave blank normally, will be auto configured by rclone.",
Advanced: true,
Sensitive: true,
Name: "refresh_token",
Help: "Sugarsync refresh token.\n\nLeave blank normally, will be auto configured by rclone.",
Advanced: true,
}, {
Name: "authorization",
Help: "Sugarsync authorization.\n\nLeave blank normally, will be auto configured by rclone.",
Advanced: true,
Sensitive: true,
Name: "authorization",
Help: "Sugarsync authorization.\n\nLeave blank normally, will be auto configured by rclone.",
Advanced: true,
}, {
Name: "authorization_expiry",
Help: "Sugarsync authorization expiry.\n\nLeave blank normally, will be auto configured by rclone.",
Advanced: true,
}, {
Name: "user",
Help: "Sugarsync user.\n\nLeave blank normally, will be auto configured by rclone.",
Advanced: true,
Sensitive: true,
Name: "user",
Help: "Sugarsync user.\n\nLeave blank normally, will be auto configured by rclone.",
Advanced: true,
}, {
Name: "root_id",
Help: "Sugarsync root id.\n\nLeave blank normally, will be auto configured by rclone.",
Advanced: true,
Sensitive: true,
Name: "root_id",
Help: "Sugarsync root id.\n\nLeave blank normally, will be auto configured by rclone.",
Advanced: true,
}, {
Name: "deleted_id",
Help: "Sugarsync deleted folder id.\n\nLeave blank normally, will be auto configured by rclone.",
Advanced: true,
Sensitive: true,
Name: "deleted_id",
Help: "Sugarsync deleted folder id.\n\nLeave blank normally, will be auto configured by rclone.",
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,

View File

@@ -8,6 +8,7 @@ import (
"errors"
"fmt"
"io"
"net/url"
"path"
"strconv"
"strings"
@@ -100,7 +101,7 @@ but other operations such as Remove and Copy will fail.
func init() {
fs.Register(&fs.RegInfo{
Name: "swift",
Description: "OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)",
Description: "OpenStack Swift (Rackspace Cloud Files, Memset Memstore, OVH)",
NewFs: NewFs,
Options: append([]fs.Option{{
Name: "env_auth",
@@ -116,13 +117,11 @@ func init() {
},
},
}, {
Name: "user",
Help: "User name to log in (OS_USERNAME).",
Sensitive: true,
Name: "user",
Help: "User name to log in (OS_USERNAME).",
}, {
Name: "key",
Help: "API key or password (OS_PASSWORD).",
Sensitive: true,
Name: "key",
Help: "API key or password (OS_PASSWORD).",
}, {
Name: "auth",
Help: "Authentication URL for server (OS_AUTH_URL).",
@@ -144,30 +143,22 @@ func init() {
}, {
Value: "https://auth.cloud.ovh.net/v3",
Help: "OVH",
}, {
Value: "https://authenticate.ain.net",
Help: "Blomp Cloud Storage",
}},
}, {
Name: "user_id",
Help: "User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).",
Sensitive: true,
Name: "user_id",
Help: "User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).",
}, {
Name: "domain",
Help: "User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)",
Sensitive: true,
Name: "domain",
Help: "User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)",
}, {
Name: "tenant",
Help: "Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME).",
Sensitive: true,
Name: "tenant",
Help: "Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME).",
}, {
Name: "tenant_id",
Help: "Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID).",
Sensitive: true,
Name: "tenant_id",
Help: "Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID).",
}, {
Name: "tenant_domain",
Help: "Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME).",
Sensitive: true,
Name: "tenant_domain",
Help: "Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME).",
}, {
Name: "region",
Help: "Region name - optional (OS_REGION_NAME).",
@@ -175,21 +166,17 @@ func init() {
Name: "storage_url",
Help: "Storage URL - optional (OS_STORAGE_URL).",
}, {
Name: "auth_token",
Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN).",
Sensitive: true,
Name: "auth_token",
Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN).",
}, {
Name: "application_credential_id",
Help: "Application Credential ID (OS_APPLICATION_CREDENTIAL_ID).",
Sensitive: true,
Name: "application_credential_id",
Help: "Application Credential ID (OS_APPLICATION_CREDENTIAL_ID).",
}, {
Name: "application_credential_name",
Help: "Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME).",
Sensitive: true,
Name: "application_credential_name",
Help: "Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME).",
}, {
Name: "application_credential_secret",
Help: "Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET).",
Sensitive: true,
Name: "application_credential_secret",
Help: "Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET).",
}, {
Name: "auth_version",
Help: "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION).",
@@ -1341,6 +1328,23 @@ func (o *Object) removeSegmentsLargeObject(ctx context.Context, containerSegment
return nil
}
func (o *Object) getSegmentsDlo(ctx context.Context) (segmentsContainer string, prefix string, err error) {
if err = o.readMetaData(ctx); err != nil {
return
}
dirManifest := o.headers["X-Object-Manifest"]
dirManifest, err = url.PathUnescape(dirManifest)
if err != nil {
return
}
delimiter := strings.Index(dirManifest, "/")
if len(dirManifest) == 0 || delimiter < 0 {
err = errors.New("missing or wrong structure of manifest of Dynamic large object")
return
}
return dirManifest[:delimiter], dirManifest[delimiter+1:], nil
}
// urlEncode encodes a string so that it is a valid URL
//
// We don't use any of Go's standard methods as we need `/` not
@@ -1572,10 +1576,6 @@ func (o *Object) Remove(ctx context.Context) (err error) {
// Remove file/manifest first
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectDelete(ctx, container, containerPath)
if err == swift.ObjectNotFound {
fs.Errorf(o, "Dangling object - ignoring: %v", err)
err = nil
}
return shouldRetry(ctx, err)
})
if err != nil {

View File

@@ -49,7 +49,8 @@ func (e Errors) Error() string {
if len(e) == 0 {
buf.WriteString("no error")
} else if len(e) == 1 {
}
if len(e) == 1 {
buf.WriteString("1 error: ")
} else {
fmt.Fprintf(&buf, "%d errors: ", len(e))
@@ -60,17 +61,8 @@ func (e Errors) Error() string {
buf.WriteString("; ")
}
if err != nil {
buf.WriteString(err.Error())
} else {
buf.WriteString("nil error")
}
buf.WriteString(err.Error())
}
return buf.String()
}
// Unwrap returns the wrapped errors
func (e Errors) Unwrap() []error {
return e
}

View File

@@ -1,94 +0,0 @@
//go:build go1.20
// +build go1.20
package union
import (
"errors"
"testing"
"github.com/stretchr/testify/assert"
)
var (
err1 = errors.New("Error 1")
err2 = errors.New("Error 2")
err3 = errors.New("Error 3")
)
func TestErrorsMap(t *testing.T) {
es := Errors{
nil,
err1,
err2,
}
want := Errors{
err2,
}
got := es.Map(func(e error) error {
if e == err1 {
return nil
}
return e
})
assert.Equal(t, want, got)
}
func TestErrorsFilterNil(t *testing.T) {
es := Errors{
nil,
err1,
nil,
err2,
nil,
}
want := Errors{
err1,
err2,
}
got := es.FilterNil()
assert.Equal(t, want, got)
}
func TestErrorsErr(t *testing.T) {
// Check not all nil case
es := Errors{
nil,
err1,
nil,
err2,
nil,
}
want := Errors{
err1,
err2,
}
got := es.Err()
// Check all nil case
assert.Equal(t, want, got)
es = Errors{
nil,
nil,
nil,
}
assert.Nil(t, es.Err())
}
func TestErrorsError(t *testing.T) {
assert.Equal(t, "no error", Errors{}.Error())
assert.Equal(t, "1 error: Error 1", Errors{err1}.Error())
assert.Equal(t, "1 error: nil error", Errors{nil}.Error())
assert.Equal(t, "2 errors: Error 1; Error 2", Errors{err1, err2}.Error())
}
func TestErrorsUnwrap(t *testing.T) {
es := Errors{
err1,
err2,
}
assert.Equal(t, []error{err1, err2}, es.Unwrap())
assert.True(t, errors.Is(es, err1))
assert.True(t, errors.Is(es, err2))
assert.False(t, errors.Is(es, err3))
}

View File

@@ -3,6 +3,7 @@ package policy
import (
"context"
"fmt"
"math/rand"
"path"
"strings"
"time"
@@ -108,7 +109,9 @@ func findEntry(ctx context.Context, f fs.Fs, remote string) fs.DirEntry {
if err != nil {
return nil
}
return fs.NewDir("", time.Time{})
// random modtime for root
randomNow := time.Unix(time.Now().Unix()-rand.Int63n(10000), 0)
return fs.NewDir("", randomNow)
}
found := false
for _, e := range entries {

Some files were not shown because too many files have changed in this diff Show More