1
0
mirror of https://github.com/rclone/rclone.git synced 2026-02-28 18:33:35 +00:00

Compare commits

..

2 Commits

Author SHA1 Message Date
albertony
0093e23e42 mount: changed handling of volume name (Windows and OSX)
Fixes an issue on Windows where mounting the local filesystem in network mode failed
when not using option --volname. Reason was that the volume name in network mode
is a network share path in the basic UNC format, and characters that are invalid
in regular file and directory names are also invalid in such a path. And the default
volume name would typically include a '?', which is invalid, from the unc path of
the local, e.g. "\\server\\? C  Temp".

The fix is to use an encoder to encode invalid characters such as '?' with the unicode
equivalent, similar to how rclone encodes filesystem paths in normal operations,
when mounting in network mode. Also performs some automatic cleanup of path separators,
but in general, tries to be conservative on restrictions, and instead rely on --volname
being set to something realistic.

Existing strategy to replace the two characters ':' and '/' with space, regardless of
mounting mode variant, was removed. For network mode the new approach handles these in
a better way. Also the existing method did not apply at all when using the implicit
network mode where volume names are taken from mountpath instead of volname option
("rclone mount remote:path/to/files \\cloud\remote"). For non-network mode they were not
needed.

Default volume names, when not specified by user, will be different with this change.

See: #6234
2023-03-03 20:59:45 +01:00
albertony
11443e4491 cmount: use network mode by default on windows 2023-03-03 20:59:45 +01:00
197 changed files with 1749 additions and 9919 deletions

View File

@@ -1,10 +0,0 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "daily"

View File

@@ -8,9 +8,9 @@ name: build
on: on:
push: push:
branches: branches:
- '**' - '*'
tags: tags:
- '**' - '*'
pull_request: pull_request:
workflow_dispatch: workflow_dispatch:
inputs: inputs:
@@ -104,7 +104,7 @@ jobs:
fetch-depth: 0 fetch-depth: 0
- name: Install Go - name: Install Go
uses: actions/setup-go@v4 uses: actions/setup-go@v3
with: with:
go-version: ${{ matrix.go }} go-version: ${{ matrix.go }}
check-latest: true check-latest: true
@@ -217,7 +217,7 @@ jobs:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }} RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# working-directory: '$(modulePath)' # working-directory: '$(modulePath)'
# Deploy binaries if enabled in config && not a PR && not a fork # Deploy binaries if enabled in config && not a PR && not a fork
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone' if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
lint: lint:
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }} if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
@@ -237,7 +237,7 @@ jobs:
# Run govulncheck on the latest go version, the one we build binaries with # Run govulncheck on the latest go version, the one we build binaries with
- name: Install Go - name: Install Go
uses: actions/setup-go@v4 uses: actions/setup-go@v3
with: with:
go-version: '1.20' go-version: '1.20'
check-latest: true check-latest: true
@@ -262,7 +262,7 @@ jobs:
# Upgrade together with NDK version # Upgrade together with NDK version
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v4 uses: actions/setup-go@v3
with: with:
go-version: '1.20' go-version: '1.20'
@@ -352,4 +352,4 @@ jobs:
env: env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }} RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork # Upload artifacts if not a PR && not a fork
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone' if: github.head_ref == '' && github.repository == 'rclone/rclone'

View File

@@ -1,61 +0,0 @@
name: Docker beta build
on:
push:
branches:
- master
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v4
with:
images: ghcr.io/${{ github.repository }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
# GitHub Actions at the start of a workflow run to identify the job.
# This is used to authenticate against GitHub Container Registry.
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
# for more detailed information.
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and publish image
uses: docker/build-push-action@v4
with:
file: Dockerfile
context: .
push: true # push the image to ghcr
tags: |
ghcr.io/rclone/rclone:beta
rclone/rclone:beta
labels: ${{ steps.meta.outputs.labels }}
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
cache-from: type=gha
cache-to: type=gha,mode=max
provenance: false
# Eventually cache will need to be cleared if builds more frequent than once a week
# https://github.com/docker/build-push-action/issues/252

View File

@@ -0,0 +1,26 @@
name: Docker beta build
on:
push:
branches:
- master
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Build and publish image
uses: ilteoood/docker_buildx@1.1.0
with:
tag: beta
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}

View File

@@ -1,14 +0,0 @@
name: Publish to Winget
on:
release:
types: [released]
jobs:
publish:
runs-on: windows-latest # Action can only run on Windows
steps:
- uses: vedantmgoyal2009/winget-releaser@v2
with:
identifier: Rclone.Rclone
installers-regex: '-windows-\w+\.zip$'
token: ${{ secrets.WINGET_TOKEN }}

View File

@@ -2,17 +2,15 @@
linters: linters:
enable: enable:
- deadcode
- errcheck - errcheck
- goimports - goimports
- revive - revive
- ineffassign - ineffassign
- structcheck
- varcheck
- govet - govet
- unconvert - unconvert
- staticcheck
- gosimple
- stylecheck
- unused
- misspell
#- prealloc #- prealloc
#- maligned #- maligned
disable-all: true disable-all: true
@@ -27,30 +25,6 @@ issues:
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3. # Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0 max-same-issues: 0
exclude-rules:
- linters:
- staticcheck
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
run: run:
# timeout for analysis, e.g. 30s, 5m, default is 1m # timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 10m timeout: 10m
linters-settings:
revive:
rules:
- name: unreachable-code
disabled: true
- name: unused-parameter
disabled: true
- name: empty-block
disabled: true
- name: redefines-builtin-id
disabled: true
- name: superfluous-else
disabled: true
stylecheck:
# Only enable the checks performed by the staticcheck stand-alone tool,
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]

View File

@@ -11,7 +11,7 @@ RUN ./rclone version
# Begin final image # Begin final image
FROM alpine:latest FROM alpine:latest
RUN apk --no-cache add ca-certificates fuse3 tzdata && \ RUN apk --no-cache add ca-certificates fuse tzdata && \
echo "user_allow_other" >> /etc/fuse.conf echo "user_allow_other" >> /etc/fuse.conf
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/ COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/

View File

@@ -16,7 +16,6 @@ Current active maintainers of rclone are:
| Max Sum | @Max-Sum | union backend | | Max Sum | @Max-Sum | union backend |
| Fred | @creativeprojects | seafile backend | | Fred | @creativeprojects | seafile backend |
| Caleb Case | @calebcase | storj backend | | Caleb Case | @calebcase | storj backend |
| wiserain | @wiserain | pikpak backend |
**This is a work in progress Draft** **This is a work in progress Draft**

825
MANUAL.html generated

File diff suppressed because it is too large Load Diff

1061
MANUAL.md generated

File diff suppressed because it is too large Load Diff

1138
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -37,7 +37,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost) * Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/) * Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/) * Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
* FTP [:page_facing_up:](https://rclone.org/ftp/) * FTP [:page_facing_up:](https://rclone.org/ftp/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/) * Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/) * Google Drive [:page_facing_up:](https://rclone.org/drive/)
@@ -67,7 +66,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/) * Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud) * ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/) * pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/) * premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/) * put.io [:page_facing_up:](https://rclone.org/putio/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/) * QingStor [:page_facing_up:](https://rclone.org/qingstor/)

View File

@@ -10,7 +10,7 @@ This file describes how to make the various kinds of releases
## Making a release ## Making a release
* git checkout master # see below for stable branch * git checkout master # see below for stable branch
* git pull # IMPORTANT * git pull
* git status - make sure everything is checked in * git status - make sure everything is checked in
* Check GitHub actions build for master is Green * Check GitHub actions build for master is Green
* make test # see integration test server or run locally * make test # see integration test server or run locally
@@ -21,7 +21,6 @@ This file describes how to make the various kinds of releases
* git status - to check for new man pages - git add them * git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX.0" * git commit -a -v -m "Version v1.XX.0"
* make retag * make retag
* git push origin # without --follow-tags so it doesn't push the tag if it fails
* git push --follow-tags origin * git push --follow-tags origin
* # Wait for the GitHub builds to complete then... * # Wait for the GitHub builds to complete then...
* make fetch_binaries * make fetch_binaries

View File

@@ -1 +1 @@
v1.63.0 v1.62.0

View File

@@ -36,7 +36,6 @@ import (
_ "github.com/rclone/rclone/backend/opendrive" _ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/oracleobjectstorage" _ "github.com/rclone/rclone/backend/oracleobjectstorage"
_ "github.com/rclone/rclone/backend/pcloud" _ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/pikpak"
_ "github.com/rclone/rclone/backend/premiumizeme" _ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/putio" _ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor" _ "github.com/rclone/rclone/backend/qingstor"

View File

@@ -690,7 +690,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
cred, err = azidentity.NewDefaultAzureCredential(&options) cred, err = azidentity.NewDefaultAzureCredential(&options)
if err != nil { if err != nil {
return nil, fmt.Errorf("create azure environment credential failed: %w", err) return nil, fmt.Errorf("create azure enviroment credential failed: %w", err)
} }
case opt.UseEmulator: case opt.UseEmulator:
if opt.Account == "" { if opt.Account == "" {
@@ -953,7 +953,7 @@ func (o *Object) updateMetadataWithModTime(modTime time.Time) {
} }
// Returns whether file is a directory marker or not // Returns whether file is a directory marker or not
func isDirectoryMarker(size int64, metadata map[string]*string, remote string) bool { func isDirectoryMarker(size int64, metadata map[string]string, remote string) bool {
// Directory markers are 0 length // Directory markers are 0 length
if size == 0 { if size == 0 {
endsWithSlash := strings.HasSuffix(remote, "/") endsWithSlash := strings.HasSuffix(remote, "/")
@@ -964,7 +964,7 @@ func isDirectoryMarker(size int64, metadata map[string]*string, remote string) b
// defacto standard for marking blobs as directories. // defacto standard for marking blobs as directories.
// Note also that the metadata hasn't been normalised to lower case yet // Note also that the metadata hasn't been normalised to lower case yet
for k, v := range metadata { for k, v := range metadata {
if v != nil && strings.EqualFold(k, "hdi_isfolder") && *v == "true" { if strings.EqualFold(k, "hdi_isfolder") && v == "true" {
return true return true
} }
} }
@@ -1471,8 +1471,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
srcBlobSVC := srcObj.getBlobSVC() srcBlobSVC := srcObj.getBlobSVC()
srcURL := srcBlobSVC.URL() srcURL := srcBlobSVC.URL()
tier := blob.AccessTier(f.opt.AccessTier)
options := blob.StartCopyFromURLOptions{ options := blob.StartCopyFromURLOptions{
Tier: parseTier(f.opt.AccessTier), Tier: &tier,
} }
var startCopy blob.StartCopyFromURLResponse var startCopy blob.StartCopyFromURLResponse
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
@@ -1551,15 +1552,12 @@ func (o *Object) Size() int64 {
return o.size return o.size
} }
// Set o.metadata from metadata func (o *Object) setMetadata(metadata map[string]string) {
func (o *Object) setMetadata(metadata map[string]*string) {
if len(metadata) > 0 { if len(metadata) > 0 {
// Lower case the metadata // Lower case the metadata
o.meta = make(map[string]string, len(metadata)) o.meta = make(map[string]string, len(metadata))
for k, v := range metadata { for k, v := range metadata {
if v != nil { o.meta[strings.ToLower(k)] = v
o.meta[strings.ToLower(k)] = *v
}
} }
// Set o.modTime from metadata if it exists and // Set o.modTime from metadata if it exists and
// UseServerModTime isn't in use. // UseServerModTime isn't in use.
@@ -1575,16 +1573,20 @@ func (o *Object) setMetadata(metadata map[string]*string) {
} }
} }
// Get metadata from o.meta // Duplicte of setMetadata but taking pointers to strings
func (o *Object) getMetadata() (metadata map[string]*string) { func (o *Object) setMetadataP(metadata map[string]*string) {
if len(o.meta) == 0 { if len(metadata) > 0 {
return nil // Convert the format of the metadata
newMeta := make(map[string]string, len(metadata))
for k, v := range metadata {
if v != nil {
newMeta[k] = *v
}
}
o.setMetadata(newMeta)
} else {
o.meta = nil
} }
metadata = make(map[string]*string, len(o.meta))
for k, v := range o.meta {
metadata[k] = &v
}
return metadata
} }
// decodeMetaDataFromPropertiesResponse sets the metadata from the data passed in // decodeMetaDataFromPropertiesResponse sets the metadata from the data passed in
@@ -1716,7 +1718,7 @@ func (o *Object) decodeMetaDataFromBlob(info *container.BlobItem) (err error) {
} else { } else {
o.accessTier = *info.Properties.AccessTier o.accessTier = *info.Properties.AccessTier
} }
o.setMetadata(metadata) o.setMetadataP(metadata)
return nil return nil
} }
@@ -1727,6 +1729,12 @@ func (o *Object) getBlobSVC() *blob.Client {
return o.fs.getBlobSVC(container, directory) return o.fs.getBlobSVC(container, directory)
} }
// getBlockBlobSVC creates a block blob client
func (o *Object) getBlockBlobSVC() *blockblob.Client {
container, directory := o.split()
return o.fs.getBlockBlobSVC(container, directory)
}
// clearMetaData clears enough metadata so readMetaData will re-read it // clearMetaData clears enough metadata so readMetaData will re-read it
func (o *Object) clearMetaData() { func (o *Object) clearMetaData() {
o.modTime = time.Time{} o.modTime = time.Time{}
@@ -1792,7 +1800,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
blb := o.getBlobSVC() blb := o.getBlobSVC()
opt := blob.SetMetadataOptions{} opt := blob.SetMetadataOptions{}
err := o.fs.pacer.Call(func() (bool, error) { err := o.fs.pacer.Call(func() (bool, error) {
_, err := blb.SetMetadata(ctx, o.getMetadata(), &opt) _, err := blb.SetMetadata(ctx, o.meta, &opt)
return o.fs.shouldRetry(ctx, err) return o.fs.shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
@@ -1863,6 +1871,48 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return downloadResponse.Body, nil return downloadResponse.Body, nil
} }
// poolWrapper wraps a pool.Pool as an azblob.TransferManager
type poolWrapper struct {
pool *pool.Pool
bufToken chan struct{}
runToken chan struct{}
}
// newPoolWrapper creates an azblob.TransferManager that will use a
// pool.Pool with maximum concurrency as specified.
func (f *Fs) newPoolWrapper(concurrency int) *poolWrapper {
return &poolWrapper{
pool: f.pool,
bufToken: make(chan struct{}, concurrency),
runToken: make(chan struct{}, concurrency),
}
}
// Get implements TransferManager.Get().
func (pw *poolWrapper) Get() []byte {
pw.bufToken <- struct{}{}
return pw.pool.Get()
}
// Put implements TransferManager.Put().
func (pw *poolWrapper) Put(b []byte) {
pw.pool.Put(b)
<-pw.bufToken
}
// Run implements TransferManager.Run().
func (pw *poolWrapper) Run(f func()) {
pw.runToken <- struct{}{}
go func() {
f()
<-pw.runToken
}()
}
// Close implements TransferManager.Close().
func (pw *poolWrapper) Close() {
}
// Converts a string into a pointer to a string // Converts a string into a pointer to a string
func pString(s string) *string { func pString(s string) *string {
return &s return &s
@@ -2044,9 +2094,10 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
return err return err
} }
tier := blob.AccessTier(o.fs.opt.AccessTier)
options := blockblob.CommitBlockListOptions{ options := blockblob.CommitBlockListOptions{
Metadata: o.getMetadata(), Metadata: o.meta,
Tier: parseTier(o.fs.opt.AccessTier), Tier: &tier,
HTTPHeaders: httpHeaders, HTTPHeaders: httpHeaders,
} }
@@ -2090,9 +2141,10 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
b := bytes.NewReader(buf[:n]) b := bytes.NewReader(buf[:n])
rs := &readSeekCloser{Reader: b, Seeker: b} rs := &readSeekCloser{Reader: b, Seeker: b}
tier := blob.AccessTier(o.fs.opt.AccessTier)
options := blockblob.UploadOptions{ options := blockblob.UploadOptions{
Metadata: o.getMetadata(), Metadata: o.meta,
Tier: parseTier(o.fs.opt.AccessTier), Tier: &tier,
HTTPHeaders: httpHeaders, HTTPHeaders: httpHeaders,
} }
@@ -2262,14 +2314,6 @@ func (o *Object) GetTier() string {
return string(o.accessTier) return string(o.accessTier)
} }
func parseTier(tier string) *blob.AccessTier {
if tier == "" {
return nil
}
msTier := blob.AccessTier(tier)
return &msTier
}
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = &Fs{} _ fs.Fs = &Fs{}

View File

@@ -27,7 +27,6 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/golang-jwt/jwt/v4"
"github.com/rclone/rclone/backend/box/api" "github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
@@ -46,6 +45,7 @@ import (
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
"github.com/youmark/pkcs8" "github.com/youmark/pkcs8"
"golang.org/x/oauth2" "golang.org/x/oauth2"
"golang.org/x/oauth2/jws"
) )
const ( const (
@@ -76,11 +76,6 @@ var (
} }
) )
type boxCustomClaims struct {
jwt.RegisteredClaims
BoxSubType string `json:"box_sub_type,omitempty"`
}
// Register with Fs // Register with Fs
func init() { func init() {
fs.Register(&fs.RegInfo{ fs.Register(&fs.RegInfo{
@@ -183,7 +178,7 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
signingHeaders := getSigningHeaders(boxConfig) signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig) queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(ctx) client := fshttp.NewClient(ctx)
err = jwtutil.Config("box", name, tokenURL, *claims, signingHeaders, queryParams, privateKey, m, client) err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
return err return err
} }
@@ -199,29 +194,34 @@ func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
return boxConfig, nil return boxConfig, nil
} }
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomClaims, err error) { func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
val, err := jwtutil.RandomHex(20) val, err := jwtutil.RandomHex(20)
if err != nil { if err != nil {
return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err) return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err)
} }
claims = &boxCustomClaims{ claims = &jws.ClaimSet{
RegisteredClaims: jwt.RegisteredClaims{ Iss: boxConfig.BoxAppSettings.ClientID,
ID: val, Sub: boxConfig.EnterpriseID,
Issuer: boxConfig.BoxAppSettings.ClientID, Aud: tokenURL,
Subject: boxConfig.EnterpriseID, Exp: time.Now().Add(time.Second * 45).Unix(),
Audience: jwt.ClaimStrings{tokenURL}, PrivateClaims: map[string]interface{}{
ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Second * 45)), "box_sub_type": boxSubType,
"aud": tokenURL,
"jti": val,
}, },
BoxSubType: boxSubType,
} }
return claims, nil return claims, nil
} }
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]interface{} { func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
signingHeaders := map[string]interface{}{ signingHeaders := &jws.Header{
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID, Algorithm: "RS256",
Typ: "JWT",
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
} }
return signingHeaders return signingHeaders
} }

View File

@@ -1787,7 +1787,7 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
} }
} }
// StopBackgroundRunners will signal all the runners to stop their work // StopBackgroundRunners will signall all the runners to stop their work
// can be triggered from a terminate signal or from testing between runs // can be triggered from a terminate signal or from testing between runs
func (f *Fs) StopBackgroundRunners() { func (f *Fs) StopBackgroundRunners() {
f.cleanupChan <- false f.cleanupChan <- false

View File

@@ -1098,6 +1098,27 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
return l, err return l, err
} }
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer func() {
_ = in.Close()
}()
out, err := os.Create(dst)
if err != nil {
return err
}
defer func() {
_ = out.Close()
}()
_, err = io.Copy(out, in)
return err
}
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error { func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error var err error

View File

@@ -1,4 +1,4 @@
// Package combine implements a backend to combine multiple remotes in a directory tree // Package combine implents a backend to combine multiple remotes in a directory tree
package combine package combine
/* /*
@@ -351,7 +351,7 @@ func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream
return g.Wait() return g.Wait()
} }
// join the elements together but unlike path.Join return empty string // join the elements together but unline path.Join return empty string
func join(elem ...string) string { func join(elem ...string) string {
result := path.Join(elem...) result := path.Join(elem...)
if result == "." { if result == "." {

View File

@@ -21,7 +21,6 @@ import (
"github.com/rclone/rclone/backend/crypt/pkcs7" "github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/version" "github.com/rclone/rclone/lib/version"
"github.com/rfjakob/eme" "github.com/rfjakob/eme"
"golang.org/x/crypto/nacl/secretbox" "golang.org/x/crypto/nacl/secretbox"
@@ -179,7 +178,6 @@ type Cipher struct {
buffers sync.Pool // encrypt/decrypt buffers buffers sync.Pool // encrypt/decrypt buffers
cryptoRand io.Reader // read crypto random numbers from here cryptoRand io.Reader // read crypto random numbers from here
dirNameEncrypt bool dirNameEncrypt bool
passBadBlocks bool // if set passed bad blocks as zeroed blocks
} }
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val // newCipher initialises the cipher. If salt is "" then it uses a built in salt val
@@ -191,7 +189,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
dirNameEncrypt: dirNameEncrypt, dirNameEncrypt: dirNameEncrypt,
} }
c.buffers.New = func() interface{} { c.buffers.New = func() interface{} {
return new([blockSize]byte) return make([]byte, blockSize)
} }
err := c.Key(password, salt) err := c.Key(password, salt)
if err != nil { if err != nil {
@@ -200,16 +198,11 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
return c, nil return c, nil
} }
// Call to set bad block pass through
func (c *Cipher) setPassBadBlocks(passBadBlocks bool) {
c.passBadBlocks = passBadBlocks
}
// Key creates all the internal keys from the password passed in using // Key creates all the internal keys from the password passed in using
// scrypt. // scrypt.
// //
// If salt is "" we use a fixed salt just to make attackers lives // If salt is "" we use a fixed salt just to make attackers lives
// slightly harder than using no salt. // slighty harder than using no salt.
// //
// Note that empty password makes all 0x00 keys which is used in the // Note that empty password makes all 0x00 keys which is used in the
// tests. // tests.
@@ -237,12 +230,15 @@ func (c *Cipher) Key(password, salt string) (err error) {
} }
// getBlock gets a block from the pool of size blockSize // getBlock gets a block from the pool of size blockSize
func (c *Cipher) getBlock() *[blockSize]byte { func (c *Cipher) getBlock() []byte {
return c.buffers.Get().(*[blockSize]byte) return c.buffers.Get().([]byte)
} }
// putBlock returns a block to the pool of size blockSize // putBlock returns a block to the pool of size blockSize
func (c *Cipher) putBlock(buf *[blockSize]byte) { func (c *Cipher) putBlock(buf []byte) {
if len(buf) != blockSize {
panic("bad blocksize returned to pool")
}
c.buffers.Put(buf) c.buffers.Put(buf)
} }
@@ -613,7 +609,7 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
// fromReader fills the nonce from an io.Reader - normally the OSes // fromReader fills the nonce from an io.Reader - normally the OSes
// crypto random number generator // crypto random number generator
func (n *nonce) fromReader(in io.Reader) error { func (n *nonce) fromReader(in io.Reader) error {
read, err := readers.ReadFill(in, (*n)[:]) read, err := io.ReadFull(in, (*n)[:])
if read != fileNonceSize { if read != fileNonceSize {
return fmt.Errorf("short read of nonce: %w", err) return fmt.Errorf("short read of nonce: %w", err)
} }
@@ -668,8 +664,8 @@ type encrypter struct {
in io.Reader in io.Reader
c *Cipher c *Cipher
nonce nonce nonce nonce
buf *[blockSize]byte buf []byte
readBuf *[blockSize]byte readBuf []byte
bufIndex int bufIndex int
bufSize int bufSize int
err error err error
@@ -694,9 +690,9 @@ func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
} }
} }
// Copy magic into buffer // Copy magic into buffer
copy((*fh.buf)[:], fileMagicBytes) copy(fh.buf, fileMagicBytes)
// Copy nonce into buffer // Copy nonce into buffer
copy((*fh.buf)[fileMagicSize:], fh.nonce[:]) copy(fh.buf[fileMagicSize:], fh.nonce[:])
return fh, nil return fh, nil
} }
@@ -711,20 +707,22 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
if fh.bufIndex >= fh.bufSize { if fh.bufIndex >= fh.bufSize {
// Read data // Read data
// FIXME should overlap the reads with a go-routine and 2 buffers? // FIXME should overlap the reads with a go-routine and 2 buffers?
readBuf := (*fh.readBuf)[:blockDataSize] readBuf := fh.readBuf[:blockDataSize]
n, err = readers.ReadFill(fh.in, readBuf) n, err = io.ReadFull(fh.in, readBuf)
if n == 0 { if n == 0 {
// err can't be nil since:
// n == len(buf) if and only if err == nil.
return fh.finish(err) return fh.finish(err)
} }
// possibly err != nil here, but we will process the // possibly err != nil here, but we will process the
// data and the next call to ReadFill will return 0, err // data and the next call to ReadFull will return 0, err
// Encrypt the block using the nonce // Encrypt the block using the nonce
secretbox.Seal((*fh.buf)[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey) secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
fh.bufIndex = 0 fh.bufIndex = 0
fh.bufSize = blockHeaderSize + n fh.bufSize = blockHeaderSize + n
fh.nonce.increment() fh.nonce.increment()
} }
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufSize]) n = copy(p, fh.buf[fh.bufIndex:fh.bufSize])
fh.bufIndex += n fh.bufIndex += n
return n, nil return n, nil
} }
@@ -765,8 +763,8 @@ type decrypter struct {
nonce nonce nonce nonce
initialNonce nonce initialNonce nonce
c *Cipher c *Cipher
buf *[blockSize]byte buf []byte
readBuf *[blockSize]byte readBuf []byte
bufIndex int bufIndex int
bufSize int bufSize int
err error err error
@@ -784,9 +782,9 @@ func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
limit: -1, limit: -1,
} }
// Read file header (magic + nonce) // Read file header (magic + nonce)
readBuf := (*fh.readBuf)[:fileHeaderSize] readBuf := fh.readBuf[:fileHeaderSize]
n, err := readers.ReadFill(fh.rc, readBuf) _, err := io.ReadFull(fh.rc, readBuf)
if n < fileHeaderSize && err == io.EOF { if err == io.EOF || err == io.ErrUnexpectedEOF {
// This read from 0..fileHeaderSize-1 bytes // This read from 0..fileHeaderSize-1 bytes
return nil, fh.finishAndClose(ErrorEncryptedFileTooShort) return nil, fh.finishAndClose(ErrorEncryptedFileTooShort)
} else if err != nil { } else if err != nil {
@@ -847,8 +845,10 @@ func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offse
func (fh *decrypter) fillBuffer() (err error) { func (fh *decrypter) fillBuffer() (err error) {
// FIXME should overlap the reads with a go-routine and 2 buffers? // FIXME should overlap the reads with a go-routine and 2 buffers?
readBuf := fh.readBuf readBuf := fh.readBuf
n, err := readers.ReadFill(fh.rc, (*readBuf)[:]) n, err := io.ReadFull(fh.rc, readBuf)
if n == 0 { if n == 0 {
// err can't be nil since:
// n == len(buf) if and only if err == nil.
return err return err
} }
// possibly err != nil here, but we will process the data and // possibly err != nil here, but we will process the data and
@@ -856,25 +856,18 @@ func (fh *decrypter) fillBuffer() (err error) {
// Check header + 1 byte exists // Check header + 1 byte exists
if n <= blockHeaderSize { if n <= blockHeaderSize {
if err != nil && err != io.EOF { if err != nil {
return err // return pending error as it is likely more accurate return err // return pending error as it is likely more accurate
} }
return ErrorEncryptedFileBadHeader return ErrorEncryptedFileBadHeader
} }
// Decrypt the block using the nonce // Decrypt the block using the nonce
_, ok := secretbox.Open((*fh.buf)[:0], (*readBuf)[:n], fh.nonce.pointer(), &fh.c.dataKey) _, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
if !ok { if !ok {
if err != nil && err != io.EOF { if err != nil {
return err // return pending error as it is likely more accurate return err // return pending error as it is likely more accurate
} }
if !fh.c.passBadBlocks { return ErrorEncryptedBadBlock
return ErrorEncryptedBadBlock
}
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
// Zero out the bad block and continue
for i := range (*fh.buf)[:n] {
(*fh.buf)[i] = 0
}
} }
fh.bufIndex = 0 fh.bufIndex = 0
fh.bufSize = n - blockHeaderSize fh.bufSize = n - blockHeaderSize
@@ -900,7 +893,7 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
if fh.limit >= 0 && fh.limit < int64(toCopy) { if fh.limit >= 0 && fh.limit < int64(toCopy) {
toCopy = int(fh.limit) toCopy = int(fh.limit)
} }
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufIndex+toCopy]) n = copy(p, fh.buf[fh.bufIndex:fh.bufIndex+toCopy])
fh.bufIndex += n fh.bufIndex += n
if fh.limit >= 0 { if fh.limit >= 0 {
fh.limit -= int64(n) fh.limit -= int64(n)
@@ -911,8 +904,9 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
return n, nil return n, nil
} }
// calculateUnderlying converts an (offset, limit) in an encrypted file // calculateUnderlying converts an (offset, limit) in a crypted file
// into an (underlyingOffset, underlyingLimit) for the underlying file. // into an (underlyingOffset, underlyingLimit) for the underlying
// file.
// //
// It also returns number of bytes to discard after reading the first // It also returns number of bytes to discard after reading the first
// block and number of blocks this is from the start so the nonce can // block and number of blocks this is from the start so the nonce can

View File

@@ -27,14 +27,14 @@ func TestNewNameEncryptionMode(t *testing.T) {
{"off", NameEncryptionOff, ""}, {"off", NameEncryptionOff, ""},
{"standard", NameEncryptionStandard, ""}, {"standard", NameEncryptionStandard, ""},
{"obfuscate", NameEncryptionObfuscated, ""}, {"obfuscate", NameEncryptionObfuscated, ""},
{"potato", NameEncryptionOff, "unknown file name encryption mode \"potato\""}, {"potato", NameEncryptionOff, "Unknown file name encryption mode \"potato\""},
} { } {
actual, actualErr := NewNameEncryptionMode(test.in) actual, actualErr := NewNameEncryptionMode(test.in)
assert.Equal(t, actual, test.expected) assert.Equal(t, actual, test.expected)
if test.expectedErr == "" { if test.expectedErr == "" {
assert.NoError(t, actualErr) assert.NoError(t, actualErr)
} else { } else {
assert.EqualError(t, actualErr, test.expectedErr) assert.Error(t, actualErr, test.expectedErr)
} }
} }
} }
@@ -726,7 +726,7 @@ func TestNonceFromReader(t *testing.T) {
assert.Equal(t, nonce{'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o'}, x) assert.Equal(t, nonce{'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o'}, x)
buf = bytes.NewBufferString("123456789abcdefghijklmn") buf = bytes.NewBufferString("123456789abcdefghijklmn")
err = x.fromReader(buf) err = x.fromReader(buf)
assert.EqualError(t, err, "short read of nonce: EOF") assert.Error(t, err, "short read of nonce")
} }
func TestNonceFromBuf(t *testing.T) { func TestNonceFromBuf(t *testing.T) {
@@ -1050,7 +1050,7 @@ func TestRandomSource(t *testing.T) {
_, _ = source.Read(buf) _, _ = source.Read(buf)
sink = newRandomSource(1e8) sink = newRandomSource(1e8)
_, err = io.Copy(sink, source) _, err = io.Copy(sink, source)
assert.EqualError(t, err, "Error in stream at 1") assert.Error(t, err, "Error in stream")
} }
type zeroes struct{} type zeroes struct{}
@@ -1167,13 +1167,13 @@ func TestNewEncrypter(t *testing.T) {
fh, err := c.newEncrypter(z, nil) fh, err := c.newEncrypter(z, nil)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, nonce{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.nonce) assert.Equal(t, nonce{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.nonce)
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, (*fh.buf)[:32]) assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.buf[:32])
// Test error path // Test error path
c.cryptoRand = bytes.NewBufferString("123456789abcdefghijklmn") c.cryptoRand = bytes.NewBufferString("123456789abcdefghijklmn")
fh, err = c.newEncrypter(z, nil) fh, err = c.newEncrypter(z, nil)
assert.Nil(t, fh) assert.Nil(t, fh)
assert.EqualError(t, err, "short read of nonce: EOF") assert.Error(t, err, "short read of nonce")
} }
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to // Test the stream returning 0, io.ErrUnexpectedEOF - this used to
@@ -1224,7 +1224,7 @@ func TestNewDecrypter(t *testing.T) {
cd := newCloseDetector(bytes.NewBuffer(file0[:i])) cd := newCloseDetector(bytes.NewBuffer(file0[:i]))
fh, err = c.newDecrypter(cd) fh, err = c.newDecrypter(cd)
assert.Nil(t, fh) assert.Nil(t, fh)
assert.EqualError(t, err, ErrorEncryptedFileTooShort.Error()) assert.Error(t, err, ErrorEncryptedFileTooShort.Error())
assert.Equal(t, 1, cd.closed) assert.Equal(t, 1, cd.closed)
} }
@@ -1232,7 +1232,7 @@ func TestNewDecrypter(t *testing.T) {
cd = newCloseDetector(er) cd = newCloseDetector(er)
fh, err = c.newDecrypter(cd) fh, err = c.newDecrypter(cd)
assert.Nil(t, fh) assert.Nil(t, fh)
assert.EqualError(t, err, "potato") assert.Error(t, err, "potato")
assert.Equal(t, 1, cd.closed) assert.Equal(t, 1, cd.closed)
// bad magic // bad magic
@@ -1243,7 +1243,7 @@ func TestNewDecrypter(t *testing.T) {
cd := newCloseDetector(bytes.NewBuffer(file0copy)) cd := newCloseDetector(bytes.NewBuffer(file0copy))
fh, err := c.newDecrypter(cd) fh, err := c.newDecrypter(cd)
assert.Nil(t, fh) assert.Nil(t, fh)
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error()) assert.Error(t, err, ErrorEncryptedBadMagic.Error())
file0copy[i] ^= 0x1 file0copy[i] ^= 0x1
assert.Equal(t, 1, cd.closed) assert.Equal(t, 1, cd.closed)
} }
@@ -1495,10 +1495,8 @@ func TestDecrypterRead(t *testing.T) {
case i == fileHeaderSize: case i == fileHeaderSize:
// This would normally produce an error *except* on the first block // This would normally produce an error *except* on the first block
expectedErr = nil expectedErr = nil
case i <= fileHeaderSize+blockHeaderSize:
expectedErr = ErrorEncryptedFileBadHeader
default: default:
expectedErr = ErrorEncryptedBadBlock expectedErr = io.ErrUnexpectedEOF
} }
if expectedErr != nil { if expectedErr != nil {
assert.EqualError(t, err, expectedErr.Error(), what) assert.EqualError(t, err, expectedErr.Error(), what)
@@ -1516,7 +1514,7 @@ func TestDecrypterRead(t *testing.T) {
fh, err := c.newDecrypter(cd) fh, err := c.newDecrypter(cd)
assert.NoError(t, err) assert.NoError(t, err)
_, err = io.ReadAll(fh) _, err = io.ReadAll(fh)
assert.EqualError(t, err, "potato") assert.Error(t, err, "potato")
assert.Equal(t, 0, cd.closed) assert.Equal(t, 0, cd.closed)
// Test corrupting the input // Test corrupting the input
@@ -1527,26 +1525,15 @@ func TestDecrypterRead(t *testing.T) {
file16copy[i] ^= 0xFF file16copy[i] ^= 0xFF
fh, err := c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy))) fh, err := c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
if i < fileMagicSize { if i < fileMagicSize {
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error()) assert.Error(t, err, ErrorEncryptedBadMagic.Error())
assert.Nil(t, fh) assert.Nil(t, fh)
} else { } else {
assert.NoError(t, err) assert.NoError(t, err)
_, err = io.ReadAll(fh) _, err = io.ReadAll(fh)
assert.EqualError(t, err, ErrorEncryptedBadBlock.Error()) assert.Error(t, err, ErrorEncryptedFileBadHeader.Error())
} }
file16copy[i] ^= 0xFF file16copy[i] ^= 0xFF
} }
// Test that we can corrupt a byte and read zeroes if
// passBadBlocks is set
copy(file16copy, file16)
file16copy[len(file16copy)-1] ^= 0xFF
c.passBadBlocks = true
fh, err = c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
assert.NoError(t, err)
buf, err := io.ReadAll(fh)
assert.NoError(t, err)
assert.Equal(t, make([]byte, 16), buf)
} }
func TestDecrypterClose(t *testing.T) { func TestDecrypterClose(t *testing.T) {
@@ -1567,7 +1554,7 @@ func TestDecrypterClose(t *testing.T) {
// double close // double close
err = fh.Close() err = fh.Close()
assert.EqualError(t, err, ErrorFileClosed.Error()) assert.Error(t, err, ErrorFileClosed.Error())
assert.Equal(t, 1, cd.closed) assert.Equal(t, 1, cd.closed)
// try again reading the file this time // try again reading the file this time
@@ -1594,6 +1581,8 @@ func TestPutGetBlock(t *testing.T) {
block := c.getBlock() block := c.getBlock()
c.putBlock(block) c.putBlock(block)
c.putBlock(block) c.putBlock(block)
assert.Panics(t, func() { c.putBlock(block[:len(block)-1]) })
} }
func TestKey(t *testing.T) { func TestKey(t *testing.T) {

View File

@@ -119,15 +119,6 @@ names, or for debugging purposes.`,
Help: "Encrypt file data.", Help: "Encrypt file data.",
}, },
}, },
}, {
Name: "pass_bad_blocks",
Help: `If set this will pass bad blocks through as all 0.
This should not be set in normal operation, it should only be set if
trying to recover a crypted file with errors and it is desired to
recover as much of the file as possible.`,
Default: false,
Advanced: true,
}, { }, {
Name: "filename_encoding", Name: "filename_encoding",
Help: `How to encode the encrypted filename to text string. Help: `How to encode the encrypted filename to text string.
@@ -147,7 +138,7 @@ length and if it's case sensitive.`,
}, },
{ {
Value: "base32768", Value: "base32768",
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive, Dropbox)", Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)",
}, },
}, },
Advanced: true, Advanced: true,
@@ -183,7 +174,6 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to make cipher: %w", err) return nil, fmt.Errorf("failed to make cipher: %w", err)
} }
cipher.setPassBadBlocks(opt.PassBadBlocks)
return cipher, nil return cipher, nil
} }
@@ -272,7 +262,6 @@ type Options struct {
Password2 string `config:"password2"` Password2 string `config:"password2"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"` ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ShowMapping bool `config:"show_mapping"` ShowMapping bool `config:"show_mapping"`
PassBadBlocks bool `config:"pass_bad_blocks"`
FilenameEncoding string `config:"filename_encoding"` FilenameEncoding string `config:"filename_encoding"`
} }
@@ -465,7 +454,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil { if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err) fs.Errorf(o, "Failed to remove corrupted object: %v", err)
} }
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hash differ src %q vs dst %q", ht, srcHash, dstHash) return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
} }
fs.Debugf(src, "%v = %s OK", ht, srcHash) fs.Debugf(src, "%v = %s OK", ht, srcHash)
} }

View File

@@ -202,7 +202,7 @@ func init() {
m.Set("root_folder_id", "appDataFolder") m.Set("root_folder_id", "appDataFolder")
} }
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" && !opt.EnvAuth { if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" {
return oauthutil.ConfigOut("teamdrive", &oauthutil.Options{ return oauthutil.ConfigOut("teamdrive", &oauthutil.Options{
OAuth2Config: driveConfig, OAuth2Config: driveConfig,
}) })
@@ -598,18 +598,6 @@ resource key is no needed.
// Encode invalid UTF-8 bytes as json doesn't handle them properly. // Encode invalid UTF-8 bytes as json doesn't handle them properly.
// Don't encode / as it's a valid name character in drive. // Don't encode / as it's a valid name character in drive.
Default: encoder.EncodeInvalidUtf8, Default: encoder.EncodeInvalidUtf8,
}, {
Name: "env_auth",
Help: "Get IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
Default: false,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter credentials in the next step.",
}, {
Value: "true",
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
}},
}}...), }}...),
}) })
@@ -666,7 +654,6 @@ type Options struct {
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"` SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
ResourceKey string `config:"resource_key"` ResourceKey string `config:"resource_key"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
EnvAuth bool `config:"env_auth"`
} }
// Fs represents a remote drive server // Fs represents a remote drive server
@@ -774,7 +761,7 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" { } else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
fs.Errorf(f, "Received download limit error: %v", err) fs.Errorf(f, "Received download limit error: %v", err)
return false, fserrors.FatalError(err) return false, fserrors.FatalError(err)
} else if f.opt.StopOnUploadLimit && (reason == "quotaExceeded" || reason == "storageQuotaExceeded") { } else if f.opt.StopOnUploadLimit && reason == "quotaExceeded" {
fs.Errorf(f, "Received upload limit error: %v", err) fs.Errorf(f, "Received upload limit error: %v", err)
return false, fserrors.FatalError(err) return false, fserrors.FatalError(err)
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" { } else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
@@ -1135,12 +1122,6 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create oauth client from service account: %w", err) return nil, fmt.Errorf("failed to create oauth client from service account: %w", err)
} }
} else if opt.EnvAuth {
scopes := driveScopes(opt.Scope)
oAuthClient, err = google.DefaultClient(ctx, scopes...)
if err != nil {
return nil, fmt.Errorf("failed to create client from environment: %w", err)
}
} else { } else {
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt)) oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt))
if err != nil { if err != nil {
@@ -2899,7 +2880,6 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
if f.rootFolderID == "appDataFolder" { if f.rootFolderID == "appDataFolder" {
changesCall.Spaces("appDataFolder") changesCall.Spaces("appDataFolder")
} }
changesCall.RestrictToMyDrive(!f.opt.SharedWithMe)
changeList, err = changesCall.Context(ctx).Do() changeList, err = changesCall.Context(ctx).Do()
return f.shouldRetry(ctx, err) return f.shouldRetry(ctx, err)
}) })

View File

@@ -243,15 +243,6 @@ func (f *Fs) InternalTestShouldRetry(t *testing.T) {
quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403) quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403)
assert.False(t, quotaExceededRetry) assert.False(t, quotaExceededRetry)
assert.Equal(t, quotaExceededError, expectedQuotaError) assert.Equal(t, quotaExceededError, expectedQuotaError)
sqEItem := googleapi.ErrorItem{
Reason: "storageQuotaExceeded",
}
generic403.Errors[0] = sqEItem
expectedStorageQuotaError := fserrors.FatalError(&generic403)
storageQuotaExceededRetry, storageQuotaExceededError := f.shouldRetry(ctx, &generic403)
assert.False(t, storageQuotaExceededRetry)
assert.Equal(t, storageQuotaExceededError, expectedStorageQuotaError)
} }
func (f *Fs) InternalTestDocumentImport(t *testing.T) { func (f *Fs) InternalTestDocumentImport(t *testing.T) {

View File

@@ -13,6 +13,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
@@ -139,6 +140,49 @@ func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionF
return complete, nil return complete, nil
} }
// finishBatchJobStatus waits for the batch to complete returning completed entries
func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
if launchBatchStatus.AsyncJobId == "" {
return nil, errors.New("wait for batch completion: empty job ID")
}
var batchStatus *files.UploadSessionFinishBatchJobStatus
sleepTime := 100 * time.Millisecond
const maxSleepTime = 1 * time.Second
startTime := time.Now()
try := 1
for {
remaining := time.Duration(b.f.opt.BatchCommitTimeout) - time.Since(startTime)
if remaining < 0 {
break
}
err = b.f.pacer.Call(func() (bool, error) {
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
AsyncJobId: launchBatchStatus.AsyncJobId,
})
return shouldRetry(ctx, err)
})
if err != nil {
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d remaining %v", sleepTime, err, try, remaining)
} else {
if batchStatus.Tag == "complete" {
fs.Debugf(b.f, "Upload batch completed in %v", time.Since(startTime))
return batchStatus.Complete, nil
}
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d remaining %v", sleepTime, batchStatus.Tag, try, remaining)
}
time.Sleep(sleepTime)
sleepTime *= 2
if sleepTime > maxSleepTime {
sleepTime = maxSleepTime
}
try++
}
if err == nil {
err = errors.New("batch didn't complete")
}
return nil, fmt.Errorf("wait for batch failed after %d tries in %v: %w", try, time.Since(startTime), err)
}
// commit a batch // commit a batch
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) { func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
// If commit fails then signal clients if sync // If commit fails then signal clients if sync

View File

@@ -536,7 +536,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
default: default:
return nil, err return nil, err
} }
// if the mount failed we have to abort here // if the moint failed we have to abort here
} }
// if the mount succeeded it's now a normal folder in the users root namespace // if the mount succeeded it's now a normal folder in the users root namespace
// we disable shared folder mode and proceed normally // we disable shared folder mode and proceed normally

View File

@@ -473,7 +473,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, fmt.Errorf("didn't get an upload node: %w", err) return nil, fmt.Errorf("didnt got an upload node: %w", err)
} }
// fs.Debugf(f, "Got Upload node") // fs.Debugf(f, "Got Upload node")

View File

@@ -333,7 +333,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// checking to see if there is one already - use Put() for that. // checking to see if there is one already - use Put() for that.
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
if size > int64(300e9) { if size > int64(300e9) {
return nil, errors.New("File too big, can't upload") return nil, errors.New("File too big, cant upload")
} else if size == 0 { } else if size == 0 {
return nil, fs.ErrorCantUploadEmptyFiles return nil, fs.ErrorCantUploadEmptyFiles
} }

View File

@@ -15,7 +15,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/jlaffaye/ftp" "github.com/rclone/ftp"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
@@ -315,33 +315,18 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
return len(p), nil return len(p), nil
} }
// Return a *textproto.Error if err contains one or nil otherwise
func textprotoError(err error) (errX *textproto.Error) {
if errors.As(err, &errX) {
return errX
}
return nil
}
// returns true if this FTP error should be retried
func isRetriableFtpError(err error) bool {
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusNotAvailable, ftp.StatusTransfertAborted:
return true
}
}
return false
}
// shouldRetry returns a boolean as to whether this err deserve to be // shouldRetry returns a boolean as to whether this err deserve to be
// retried. It returns the err as a convenience // retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) { func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) { if fserrors.ContextError(ctx, &err) {
return false, err return false, err
} }
if isRetriableFtpError(err) { switch errX := err.(type) {
return true, err case *textproto.Error:
switch errX.Code {
case ftp.StatusNotAvailable:
return true, err
}
} }
return fserrors.ShouldRetry(err), err return fserrors.ShouldRetry(err), err
} }
@@ -478,7 +463,8 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
*pc = nil *pc = nil
if err != nil { if err != nil {
// If not a regular FTP error code then check the connection // If not a regular FTP error code then check the connection
if tpErr := textprotoError(err); tpErr != nil { var tpErr *textproto.Error
if !errors.As(err, &tpErr) {
nopErr := c.NoOp() nopErr := c.NoOp()
if nopErr != nil { if nopErr != nil {
fs.Debugf(f, "Connection failed, closing: %v", nopErr) fs.Debugf(f, "Connection failed, closing: %v", nopErr)
@@ -627,7 +613,8 @@ func (f *Fs) Shutdown(ctx context.Context) error {
// translateErrorFile turns FTP errors into rclone errors if possible for a file // translateErrorFile turns FTP errors into rclone errors if possible for a file
func translateErrorFile(err error) error { func translateErrorFile(err error) error {
if errX := textprotoError(err); errX != nil { switch errX := err.(type) {
case *textproto.Error:
switch errX.Code { switch errX.Code {
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored: case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
err = fs.ErrorObjectNotFound err = fs.ErrorObjectNotFound
@@ -638,7 +625,8 @@ func translateErrorFile(err error) error {
// translateErrorDir turns FTP errors into rclone errors if possible for a directory // translateErrorDir turns FTP errors into rclone errors if possible for a directory
func translateErrorDir(err error) error { func translateErrorDir(err error) error {
if errX := textprotoError(err); errX != nil { switch errX := err.(type) {
case *textproto.Error:
switch errX.Code { switch errX.Code {
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored: case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
err = fs.ErrorDirNotFound err = fs.ErrorDirNotFound
@@ -929,7 +917,8 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
} }
err = c.MakeDir(f.dirFromStandardPath(abspath)) err = c.MakeDir(f.dirFromStandardPath(abspath))
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
if errX := textprotoError(err); errX != nil { switch errX := err.(type) {
case *textproto.Error:
switch errX.Code { switch errX.Code {
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181 case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
err = nil err = nil
@@ -1170,7 +1159,8 @@ func (f *ftpReadCloser) Close() error {
// mask the error if it was caused by a premature close // mask the error if it was caused by a premature close
// NB StatusAboutToSend is to work around a bug in pureftpd // NB StatusAboutToSend is to work around a bug in pureftpd
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257 // See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
if errX := textprotoError(err); errX != nil { switch errX := err.(type) {
case *textproto.Error:
switch errX.Code { switch errX.Code {
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend: case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
err = nil err = nil
@@ -1196,26 +1186,15 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
} }
} }
} }
c, err := o.fs.getFtpConnection(ctx)
var (
fd *ftp.Response
c *ftp.ServerConn
)
err = o.fs.pacer.Call(func() (bool, error) {
c, err = o.fs.getFtpConnection(ctx)
if err != nil {
return false, err // getFtpConnection has retries already
}
fd, err = c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
}
return shouldRetry(ctx, err)
})
if err != nil { if err != nil {
return nil, fmt.Errorf("open: %w", err) return nil, fmt.Errorf("open: %w", err)
} }
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
return nil, fmt.Errorf("open: %w", err)
}
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs} rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
return rc, nil return rc, nil
} }
@@ -1248,10 +1227,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in) err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
// Ignore error 250 here - send by some servers // Ignore error 250 here - send by some servers
if errX := textprotoError(err); errX != nil { if err != nil {
switch errX.Code { switch errX := err.(type) {
case ftp.StatusRequestedFileActionOK: case *textproto.Error:
err = nil switch errX.Code {
case ftp.StatusRequestedFileActionOK:
err = nil
}
} }
} }
if err != nil { if err != nil {

View File

@@ -82,8 +82,7 @@ func init() {
saFile, _ := m.Get("service_account_file") saFile, _ := m.Get("service_account_file")
saCreds, _ := m.Get("service_account_credentials") saCreds, _ := m.Get("service_account_credentials")
anonymous, _ := m.Get("anonymous") anonymous, _ := m.Get("anonymous")
envAuth, _ := m.Get("env_auth") if saFile != "" || saCreds != "" || anonymous == "true" {
if saFile != "" || saCreds != "" || anonymous == "true" || envAuth == "true" {
return nil, nil return nil, nil
} }
return oauthutil.ConfigOut("", &oauthutil.Options{ return oauthutil.ConfigOut("", &oauthutil.Options{
@@ -93,9 +92,6 @@ func init() {
Options: append(oauthutil.SharedOptions, []fs.Option{{ Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "project_number", Name: "project_number",
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.", Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
}, {
Name: "user_project",
Help: "User project.\n\nOptional - needed only for requester pays.",
}, { }, {
Name: "service_account_file", Name: "service_account_file",
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp, Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
@@ -334,17 +330,6 @@ can't check the size and hash but the file contents will be decompressed.
Default: (encoder.Base | Default: (encoder.Base |
encoder.EncodeCrLf | encoder.EncodeCrLf |
encoder.EncodeInvalidUtf8), encoder.EncodeInvalidUtf8),
}, {
Name: "env_auth",
Help: "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
Default: false,
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter credentials in the next step.",
}, {
Value: "true",
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
}},
}}...), }}...),
}) })
} }
@@ -352,7 +337,6 @@ can't check the size and hash but the file contents will be decompressed.
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
ProjectNumber string `config:"project_number"` ProjectNumber string `config:"project_number"`
UserProject string `config:"user_project"`
ServiceAccountFile string `config:"service_account_file"` ServiceAccountFile string `config:"service_account_file"`
ServiceAccountCredentials string `config:"service_account_credentials"` ServiceAccountCredentials string `config:"service_account_credentials"`
Anonymous bool `config:"anonymous"` Anonymous bool `config:"anonymous"`
@@ -365,7 +349,6 @@ type Options struct {
Decompress bool `config:"decompress"` Decompress bool `config:"decompress"`
Endpoint string `config:"endpoint"` Endpoint string `config:"endpoint"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
EnvAuth bool `config:"env_auth"`
} }
// Fs represents a remote storage server // Fs represents a remote storage server
@@ -517,11 +500,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil { if err != nil {
return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err) return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err)
} }
} else if opt.EnvAuth {
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
if err != nil {
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
}
} else { } else {
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig) oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
if err != nil { if err != nil {
@@ -563,11 +541,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Check to see if the object exists // Check to see if the object exists
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory) encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
get := f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx) _, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
if f.opt.UserProject != "" {
get = get.UserProject(f.opt.UserProject)
}
_, err = get.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err == nil { if err == nil {
@@ -627,9 +601,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
directory += "/" directory += "/"
} }
list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks) list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
if f.opt.UserProject != "" {
list = list.UserProject(f.opt.UserProject)
}
if !recurse { if !recurse {
list = list.Delimiter("/") list = list.Delimiter("/")
} }
@@ -736,9 +707,6 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
return nil, errors.New("can't list buckets without project number") return nil, errors.New("can't list buckets without project number")
} }
listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks) listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks)
if f.opt.UserProject != "" {
listBuckets = listBuckets.UserProject(f.opt.UserProject)
}
for { for {
var buckets *storage.Buckets var buckets *storage.Buckets
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
@@ -868,11 +836,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
// List something from the bucket to see if it exists. Doing it like this enables the use of a // List something from the bucket to see if it exists. Doing it like this enables the use of a
// service account that only has the "Storage Object Admin" role. See #2193 for details. // service account that only has the "Storage Object Admin" role. See #2193 for details.
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
list := f.svc.Objects.List(bucket).MaxResults(1).Context(ctx) _, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
if f.opt.UserProject != "" {
list = list.UserProject(f.opt.UserProject)
}
_, err = list.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err == nil { if err == nil {
@@ -907,11 +871,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
if !f.opt.BucketPolicyOnly { if !f.opt.BucketPolicyOnly {
insertBucket.PredefinedAcl(f.opt.BucketACL) insertBucket.PredefinedAcl(f.opt.BucketACL)
} }
insertBucket = insertBucket.Context(ctx) _, err = insertBucket.Context(ctx).Do()
if f.opt.UserProject != "" {
insertBucket = insertBucket.UserProject(f.opt.UserProject)
}
_, err = insertBucket.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
}, nil) }, nil)
@@ -936,11 +896,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
} }
return f.cache.Remove(bucket, func() error { return f.cache.Remove(bucket, func() error {
return f.pacer.Call(func() (bool, error) { return f.pacer.Call(func() (bool, error) {
deleteBucket := f.svc.Buckets.Delete(bucket).Context(ctx) err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
if f.opt.UserProject != "" {
deleteBucket = deleteBucket.UserProject(f.opt.UserProject)
}
err = deleteBucket.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
}) })
@@ -986,11 +942,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var rewriteResponse *storage.RewriteResponse var rewriteResponse *storage.RewriteResponse
for { for {
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
rewriteRequest = rewriteRequest.Context(ctx) rewriteResponse, err = rewriteRequest.Context(ctx).Do()
if f.opt.UserProject != "" {
rewriteRequest.UserProject(f.opt.UserProject)
}
rewriteResponse, err = rewriteRequest.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
@@ -1101,11 +1053,7 @@ func (o *Object) setMetaData(info *storage.Object) {
func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) { func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) {
bucket, bucketPath := o.split() bucket, bucketPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
get := o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx) object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
if o.fs.opt.UserProject != "" {
get = get.UserProject(o.fs.opt.UserProject)
}
object, err = get.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
@@ -1177,11 +1125,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
if !o.fs.opt.BucketPolicyOnly { if !o.fs.opt.BucketPolicyOnly {
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL) copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
} }
copyObject = copyObject.Context(ctx) newObject, err = copyObject.Context(ctx).Do()
if o.fs.opt.UserProject != "" {
copyObject = copyObject.UserProject(o.fs.opt.UserProject)
}
newObject, err = copyObject.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
@@ -1198,9 +1142,6 @@ func (o *Object) Storable() bool {
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.fs.opt.UserProject != "" {
o.url = o.url + "&userProject=" + o.fs.opt.UserProject
}
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil) req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -1293,11 +1234,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if !o.fs.opt.BucketPolicyOnly { if !o.fs.opt.BucketPolicyOnly {
insertObject.PredefinedAcl(o.fs.opt.ObjectACL) insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
} }
insertObject = insertObject.Context(ctx) newObject, err = insertObject.Context(ctx).Do()
if o.fs.opt.UserProject != "" {
insertObject = insertObject.UserProject(o.fs.opt.UserProject)
}
newObject, err = insertObject.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
@@ -1312,11 +1249,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
func (o *Object) Remove(ctx context.Context) (err error) { func (o *Object) Remove(ctx context.Context) (err error) {
bucket, bucketPath := o.split() bucket, bucketPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
deleteBucket := o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx) err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
if o.fs.opt.UserProject != "" {
deleteBucket = deleteBucket.UserProject(o.fs.opt.UserProject)
}
err = deleteBucket.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
return err return err

View File

@@ -42,9 +42,9 @@ for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
Help: `Kerberos data transfer protection: authentication|integrity|privacy. Help: `Kerberos data transfer protection: authentication|integrity|privacy.
Specifies whether or not authentication, data signature integrity Specifies whether or not authentication, data signature integrity
checks, and wire encryption are required when communicating with checks, and wire encryption is required when communicating the the
the datanodes. Possible values are 'authentication', 'integrity' datanodes. Possible values are 'authentication', 'integrity' and
and 'privacy'. Used only with KERBEROS enabled.`, 'privacy'. Used only with KERBEROS enabled.`,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "privacy", Value: "privacy",
Help: "Ensure authentication, integrity and encryption enabled.", Help: "Ensure authentication, integrity and encryption enabled.",

View File

@@ -294,6 +294,15 @@ func (f *Fs) copyOrMove(ctx context.Context, isDirectory bool, operationType Cop
return &result, nil return &result, nil
} }
// copyDirectory moves the directory at the source-path to the destination-path and
// returns the resulting api-object if successful.
//
// The operation will only be successful
// if the parent-directory of the destination-path exists.
func (f *Fs) copyDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
return f.copyOrMove(ctx, true, CopyOriginalPreserveModTime, source, destination, onExist)
}
// moveDirectory moves the directory at the source-path to the destination-path and // moveDirectory moves the directory at the source-path to the destination-path and
// returns the resulting api-object if successful. // returns the resulting api-object if successful.
// //

View File

@@ -2,7 +2,7 @@
package hidrive package hidrive
// FIXME HiDrive only supports file or folder names of 255 characters or less. // FIXME HiDrive only supports file or folder names of 255 characters or less.
// Operations that create files or folders with longer names will throw an HTTP error: // Operations that create files oder folder with longer names will throw a HTTP error:
// - 422 Unprocessable Entity // - 422 Unprocessable Entity
// A more graceful way for rclone to handle this may be desirable. // A more graceful way for rclone to handle this may be desirable.
@@ -338,7 +338,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, fmt.Errorf("could not access root-prefix: %w", err) return nil, fmt.Errorf("could not access root-prefix: %w", err)
} }
if item.Type != api.HiDriveObjectTypeDirectory { if item.Type != api.HiDriveObjectTypeDirectory {
return nil, errors.New("the root-prefix needs to point to a valid directory or be empty") return nil, errors.New("The root-prefix needs to point to a valid directory or be empty")
} }
} }

View File

@@ -1838,12 +1838,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err == nil { if err == nil {
// if the object exists delete it // if the object exists delete it
err = o.remove(ctx, true) err = o.remove(ctx, true)
if err != nil && err != fs.ErrorObjectNotFound { if err != nil {
// if delete failed then report that, unless it was because the file did not exist after all
return fmt.Errorf("failed to remove old object: %w", err) return fmt.Errorf("failed to remove old object: %w", err)
} }
} else if err != fs.ErrorObjectNotFound { }
// if the object does not exist we can just continue but if the error is something different we should report that // if the object does not exist we can just continue but if the error is something different we should report that
if err != fs.ErrorObjectNotFound {
return err return err
} }
} }
@@ -1930,7 +1930,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
o.md5 = result.Md5 o.md5 = result.Md5
o.modTime = time.Unix(result.Modified/1000, 0) o.modTime = time.Unix(result.Modified/1000, 0)
} else { } else {
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still need to update our metadata // If the file state is COMPLETE we don't need to upload it because the file was already found but we still ned to update our metadata
return o.readMetaData(ctx, true) return o.readMetaData(ctx, true)
} }
@@ -1951,17 +1951,10 @@ func (o *Object) remove(ctx context.Context, hard bool) error {
opts.Parameters.Set("dl", "true") opts.Parameters.Set("dl", "true")
} }
err := o.fs.pacer.Call(func() (bool, error) { return o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.jfsSrv.CallXML(ctx, &opts, nil, nil) resp, err := o.fs.jfsSrv.CallXML(ctx, &opts, nil, nil)
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if apiErr, ok := err.(*api.Error); ok {
// attempting to hard delete will fail if path does not exist, but standard delete will succeed
if apiErr.StatusCode == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
}
return err
} }
// Remove an object // Remove an object

View File

@@ -266,10 +266,7 @@ type Object struct {
// ------------------------------------------------------------ // ------------------------------------------------------------
var ( var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
errLinksNeedsSuffix = errors.New("need \"" + linkSuffix + "\" suffix to refer to symlink when using -l/--links")
)
// NewFs constructs an Fs from the path // NewFs constructs an Fs from the path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
@@ -313,16 +310,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err == nil { if err == nil {
f.dev = readDevice(fi, f.opt.OneFileSystem) f.dev = readDevice(fi, f.opt.OneFileSystem)
} }
// Check to see if this is a .rclonelink if not found
hasLinkSuffix := strings.HasSuffix(f.root, linkSuffix)
if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) {
fi, err = f.lstat(strings.TrimSuffix(f.root, linkSuffix))
}
if err == nil && f.isRegular(fi.Mode()) { if err == nil && f.isRegular(fi.Mode()) {
// Handle the odd case, that a symlink was specified by name without the link suffix
if !hasLinkSuffix && opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
return nil, errLinksNeedsSuffix
}
// It is a file, so use the parent as the root // It is a file, so use the parent as the root
f.root = filepath.Dir(f.root) f.root = filepath.Dir(f.root)
// return an error with an fs which points to the parent // return an error with an fs which points to the parent
@@ -536,10 +524,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 { if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
localPath := filepath.Join(fsDirPath, name) localPath := filepath.Join(fsDirPath, name)
fi, err = os.Stat(localPath) fi, err = os.Stat(localPath)
// Quietly skip errors on excluded files and directories
if err != nil && useFilter && !filter.IncludeRemote(newRemote) {
continue
}
if os.IsNotExist(err) || isCircularSymlinkError(err) { if os.IsNotExist(err) || isCircularSymlinkError(err) {
// Skip bad symlinks and circular symlinks // Skip bad symlinks and circular symlinks
err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err)) err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err))
@@ -552,6 +536,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} }
mode = fi.Mode() mode = fi.Mode()
} }
// Don't include non directory if not included
// we leave directory filtering to the layer above
if useFilter && !fi.IsDir() && !filter.IncludeRemote(newRemote) {
continue
}
if fi.IsDir() { if fi.IsDir() {
// Ignore directories which are symlinks. These are junction points under windows which // Ignore directories which are symlinks. These are junction points under windows which
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks. // are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
@@ -564,11 +553,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 { if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
newRemote += linkSuffix newRemote += linkSuffix
} }
// Don't include non directory if not included
// we leave directory filtering to the layer above
if useFilter && !filter.IncludeRemote(newRemote) {
continue
}
fso, err := f.newObjectWithInfo(newRemote, fi) fso, err := f.newObjectWithInfo(newRemote, fi)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@@ -14,7 +14,6 @@ import (
"time" "time"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
@@ -146,20 +145,6 @@ func TestSymlink(t *testing.T) {
_, err = r.Flocal.NewObject(ctx, "symlink2.txt") _, err = r.Flocal.NewObject(ctx, "symlink2.txt")
require.Equal(t, fs.ErrorObjectNotFound, err) require.Equal(t, fs.ErrorObjectNotFound, err)
// Check that NewFs works with the suffixed version and --links
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+linkSuffix), configmap.Simple{
"links": "true",
})
require.Equal(t, fs.ErrorIsFile, err)
require.Equal(t, dir, f2.(*Fs).root)
// Check that NewFs doesn't see the non suffixed version with --links
f2, err = NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"), configmap.Simple{
"links": "true",
})
require.Equal(t, errLinksNeedsSuffix, err)
require.Nil(t, f2)
// Check reading the object // Check reading the object
in, err := o.Open(ctx) in, err := o.Open(ctx)
require.NoError(t, err) require.NoError(t, err)
@@ -410,107 +395,3 @@ func TestFilter(t *testing.T) {
sort.Sort(entries) sort.Sort(entries)
require.Equal(t, "[included]", fmt.Sprint(entries)) require.Equal(t, "[included]", fmt.Sprint(entries))
} }
func testFilterSymlink(t *testing.T, copyLinks bool) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
when := time.Now()
f := r.Flocal.(*Fs)
// Create a file, a directory, a symlink to a file, a symlink to a directory and a dangling symlink
r.WriteFile("included.file", "included file", when)
r.WriteFile("included.dir/included.sub.file", "included sub file", when)
require.NoError(t, os.Symlink("included.file", filepath.Join(r.LocalName, "included.file.link")))
require.NoError(t, os.Symlink("included.dir", filepath.Join(r.LocalName, "included.dir.link")))
require.NoError(t, os.Symlink("dangling", filepath.Join(r.LocalName, "dangling.link")))
defer func() {
// Reset -L/-l mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = false
f.lstat = os.Lstat
}()
if copyLinks {
// Set fs into "-L" mode
f.opt.FollowSymlinks = true
f.opt.TranslateSymlinks = false
f.lstat = os.Stat
} else {
// Set fs into "-l" mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = true
f.lstat = os.Lstat
}
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
// Reset global error count
accounting.Stats(ctx).ResetErrors()
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
// Add a filter
ctx, fi := filter.AddConfig(ctx)
require.NoError(t, fi.AddRule("+ included.file"))
require.NoError(t, fi.AddRule("+ included.dir/**"))
if copyLinks {
require.NoError(t, fi.AddRule("+ included.file.link"))
require.NoError(t, fi.AddRule("+ included.dir.link/**"))
} else {
require.NoError(t, fi.AddRule("+ included.file.link.rclonelink"))
require.NoError(t, fi.AddRule("+ included.dir.link.rclonelink"))
}
require.NoError(t, fi.AddRule("- *"))
// Check listing without use filter flag
entries, err := f.List(ctx, "")
require.NoError(t, err)
if copyLinks {
// Check 1 global errors one for each dangling symlink
assert.Equal(t, int64(1), accounting.Stats(ctx).GetErrors(), "global errors found")
} else {
// Check 0 global errors as dangling symlink copied properly
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
}
accounting.Stats(ctx).ResetErrors()
sort.Sort(entries)
if copyLinks {
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
} else {
require.Equal(t, "[dangling.link.rclonelink included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries))
}
// Add user filter flag
ctx = filter.SetUseFilter(ctx, true)
// Check listing with use filter flag
entries, err = f.List(ctx, "")
require.NoError(t, err)
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
sort.Sort(entries)
if copyLinks {
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
} else {
require.Equal(t, "[included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries))
}
// Check listing through a symlink still works
entries, err = f.List(ctx, "included.dir")
require.NoError(t, err)
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
sort.Sort(entries)
require.Equal(t, "[included.dir/included.sub.file]", fmt.Sprint(entries))
}
func TestFilterSymlinkCopyLinks(t *testing.T) {
testFilterSymlink(t, true)
}
func TestFilterSymlinkLinks(t *testing.T) {
testFilterSymlink(t, false)
}

View File

@@ -91,7 +91,7 @@ func readMetadataFromFileFstatat(o *Object, m *fs.Metadata) (err error) {
// The types of t.Sec and t.Nsec vary from int32 to int64 on // The types of t.Sec and t.Nsec vary from int32 to int64 on
// different Linux architectures so we need to cast them to // different Linux architectures so we need to cast them to
// int64 here and hence need to quiet the linter about // int64 here and hence need to quiet the linter about
// unnecessary casts. // unecessary casts.
// //
// nolint: unconvert // nolint: unconvert
m.Set(key, time.Unix(int64(t.Sec), int64(t.Nsec)).Format(metadataTimeFormat)) m.Set(key, time.Unix(int64(t.Sec), int64(t.Nsec)).Format(metadataTimeFormat))

View File

@@ -90,7 +90,7 @@ permanently delete objects instead.`,
MEGA uses plain text HTTP connections by default. MEGA uses plain text HTTP connections by default.
Some ISPs throttle HTTP connections, this causes transfers to become very slow. Some ISPs throttle HTTP connections, this causes transfers to become very slow.
Enabling this will force MEGA to use HTTPS for all transfers. Enabling this will force MEGA to use HTTPS for all transfers.
HTTPS is normally not necessary since all data is already encrypted anyway. HTTPS is normally not necesary since all data is already encrypted anyway.
Enabling it will increase CPU usage and add network overhead.`, Enabling it will increase CPU usage and add network overhead.`,
Default: false, Default: false,
Advanced: true, Advanced: true,

View File

@@ -819,8 +819,6 @@ func (f *Fs) getAuth(req *http.Request) error {
// Set Authorization header // Set Authorization header
dataHeader := generateDataHeader(f) dataHeader := generateDataHeader(f)
path := req.URL.RequestURI() path := req.URL.RequestURI()
//lint:ignore SA1008 false positive when running staticcheck, the header name is according to docs even if not canonical
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1008
actionHeader := req.Header["X-Akamai-ACS-Action"][0] actionHeader := req.Header["X-Akamai-ACS-Action"][0]
fs.Debugf(nil, "NetStorage API %s call %s for path %q", req.Method, actionHeader, path) fs.Debugf(nil, "NetStorage API %s call %s for path %q", req.Method, actionHeader, path)
req.Header.Set("X-Akamai-ACS-Auth-Data", dataHeader) req.Header.Set("X-Akamai-ACS-Auth-Data", dataHeader)

View File

@@ -265,7 +265,7 @@ At the time of writing this only works with OneDrive personal paid accounts.
Help: `Specify the hash in use for the backend. Help: `Specify the hash in use for the backend.
This specifies the hash type in use. If set to "auto" it will use the This specifies the hash type in use. If set to "auto" it will use the
default hash which is QuickXorHash. default hash which is is QuickXorHash.
Before rclone 1.62 an SHA1 hash was used by default for Onedrive Before rclone 1.62 an SHA1 hash was used by default for Onedrive
Personal. For 1.62 and later the default is to use a QuickXorHash for Personal. For 1.62 and later the default is to use a QuickXorHash for
@@ -1724,10 +1724,6 @@ func (f *Fs) CleanUp(ctx context.Context) error {
token := make(chan struct{}, f.ci.Checkers) token := make(chan struct{}, f.ci.Checkers)
var wg sync.WaitGroup var wg sync.WaitGroup
err := walk.Walk(ctx, f, "", true, -1, func(path string, entries fs.DirEntries, err error) error { err := walk.Walk(ctx, f, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
if err != nil {
fs.Errorf(f, "Failed to list %q: %v", path, err)
return nil
}
err = entries.ForObjectError(func(obj fs.Object) error { err = entries.ForObjectError(func(obj fs.Object) error {
o, ok := obj.(*Object) o, ok := obj.(*Object)
if !ok { if !ok {

View File

@@ -61,7 +61,7 @@ func New() hash.Hash {
func (q *quickXorHash) Write(p []byte) (n int, err error) { func (q *quickXorHash) Write(p []byte) (n int, err error) {
var i int var i int
// fill last remain // fill last remain
lastRemain := q.size % dataSize lastRemain := int(q.size) % dataSize
if lastRemain != 0 { if lastRemain != 0 {
i += xorBytes(q.data[lastRemain:], p) i += xorBytes(q.data[lastRemain:], p)
} }

View File

@@ -289,7 +289,7 @@ Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/using
}}, }},
}, { }, {
Name: "sse_kms_key_id", Name: "sse_kms_key_id",
Help: `if using your own master key in vault, this header specifies the Help: `if using using your own master key in vault, this header specifies the
OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call
the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key. the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key.
Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.`, Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.`,

View File

@@ -589,7 +589,12 @@ func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Durat
if operations.SkipDestructive(ctx, what, "remove pending upload") { if operations.SkipDestructive(ctx, what, "remove pending upload") {
continue continue
} }
_ = f.abortMultiPartUpload(ctx, *upload.Bucket, *upload.Object, *upload.UploadId) ignoreErr := f.abortMultiPartUpload(ctx, *upload.Bucket, *upload.Object, *upload.UploadId)
if ignoreErr != nil {
// fs.Debugf(f, "ignoring error %s", ignoreErr)
}
} else {
// fs.Debugf(f, "ignoring %s", what)
} }
} else { } else {
fs.Infof(f, "MultipartUpload doesn't have sufficient details to abort.") fs.Infof(f, "MultipartUpload doesn't have sufficient details to abort.")

View File

@@ -1,535 +0,0 @@
// Package api has type definitions for pikpak
//
// Manually obtained from the API responses using Browse Dev. Tool and https://mholt.github.io/json-to-go/
package api
import (
"fmt"
"reflect"
"strconv"
"time"
)
const (
// "2022-09-17T14:31:06.056+08:00"
timeFormat = `"` + time.RFC3339 + `"`
)
// Time represents date and time information for the pikpak API, by using RFC3339
type Time time.Time
// MarshalJSON turns a Time into JSON (in UTC)
func (t *Time) MarshalJSON() (out []byte, err error) {
timeString := (*time.Time)(t).Format(timeFormat)
return []byte(timeString), nil
}
// UnmarshalJSON turns JSON into a Time
func (t *Time) UnmarshalJSON(data []byte) error {
if string(data) == "null" || string(data) == `""` {
return nil
}
newT, err := time.Parse(timeFormat, string(data))
if err != nil {
return err
}
*t = Time(newT)
return nil
}
// Types of things in Item
const (
KindOfFolder = "drive#folder"
KindOfFile = "drive#file"
KindOfFileList = "drive#fileList"
KindOfResumable = "drive#resumable"
KindOfForm = "drive#form"
ThumbnailSizeS = "SIZE_SMALL"
ThumbnailSizeM = "SIZE_MEDIUM"
ThumbnailSizeL = "SIZE_LARGE"
PhaseTypeComplete = "PHASE_TYPE_COMPLETE"
PhaseTypeRunning = "PHASE_TYPE_RUNNING"
PhaseTypeError = "PHASE_TYPE_ERROR"
PhaseTypePending = "PHASE_TYPE_PENDING"
UploadTypeForm = "UPLOAD_TYPE_FORM"
UploadTypeResumable = "UPLOAD_TYPE_RESUMABLE"
ListLimit = 100
)
// ------------------------------------------------------------
// Error details api error from pikpak
type Error struct {
Reason string `json:"error"` // short description of the reason, e.g. "file_name_empty" "invalid_request"
Code int `json:"error_code"`
URL string `json:"error_url,omitempty"`
Message string `json:"error_description,omitempty"`
// can have either of `error_details` or `details``
ErrorDetails []*ErrorDetails `json:"error_details,omitempty"`
Details []*ErrorDetails `json:"details,omitempty"`
}
// ErrorDetails contains further details of api error
type ErrorDetails struct {
Type string `json:"@type,omitempty"`
Reason string `json:"reason,omitempty"`
Domain string `json:"domain,omitempty"`
Metadata struct {
} `json:"metadata,omitempty"` // TODO: undiscovered yet
Locale string `json:"locale,omitempty"` // e.g. "en"
Message string `json:"message,omitempty"`
StackEntries []interface{} `json:"stack_entries,omitempty"` // TODO: undiscovered yet
Detail string `json:"detail,omitempty"`
}
// Error returns a string for the error and satisfies the error interface
func (e *Error) Error() string {
out := fmt.Sprintf("Error %q (%d)", e.Reason, e.Code)
if e.Message != "" {
out += ": " + e.Message
}
return out
}
// Check Error satisfies the error interface
var _ error = (*Error)(nil)
// ------------------------------------------------------------
// Filters contains parameters for filters when listing.
//
// possible operators
// * in: a list of comma-separated string
// * eq: "true" or "false"
// * gt or lt: time format string, e.g. "2023-01-28T10:56:49.757+08:00"
type Filters struct {
Phase map[string]string `json:"phase,omitempty"` // "in" or "eq"
Trashed map[string]bool `json:"trashed,omitempty"` // "eq"
Kind map[string]string `json:"kind,omitempty"` // "eq"
Starred map[string]bool `json:"starred,omitempty"` // "eq"
ModifiedTime map[string]string `json:"modified_time,omitempty"` // "gt" or "lt"
}
// Set sets filter values using field name, operator and corresponding value
func (f *Filters) Set(field, operator, value string) {
if value == "" {
// UNSET for empty values
return
}
r := reflect.ValueOf(f)
fd := reflect.Indirect(r).FieldByName(field)
if v, err := strconv.ParseBool(value); err == nil {
fd.Set(reflect.ValueOf(map[string]bool{operator: v}))
} else {
fd.Set(reflect.ValueOf(map[string]string{operator: value}))
}
}
// ------------------------------------------------------------
// Common Elements
// Link contains a download URL for opening files
type Link struct {
URL string `json:"url"`
Token string `json:"token"`
Expire Time `json:"expire"`
Type string `json:"type,omitempty"`
}
// Valid reports whether l is non-nil, has an URL, and is not expired.
func (l *Link) Valid() bool {
return l != nil && l.URL != "" && time.Now().Add(10*time.Second).Before(time.Time(l.Expire))
}
// URL is a basic form of URL
type URL struct {
Kind string `json:"kind,omitempty"` // e.g. "upload#url"
URL string `json:"url,omitempty"`
}
// ------------------------------------------------------------
// Base Elements
// FileList contains a list of File elements
type FileList struct {
Kind string `json:"kind,omitempty"` // drive#fileList
Files []*File `json:"files,omitempty"`
NextPageToken string `json:"next_page_token"`
Version string `json:"version,omitempty"`
VersionOutdated bool `json:"version_outdated,omitempty"`
}
// File is a basic element representing a single file object
//
// There are two types of download links,
// 1) one from File.WebContentLink or File.Links.ApplicationOctetStream.URL and
// 2) the other from File.Medias[].Link.URL.
// Empirically, 2) is less restrictive to multiple concurrent range-requests
// for a single file, i.e. supports for higher `--multi-thread-streams=N`.
// However, it is not generally applicable as it is only for meadia.
type File struct {
Apps []*FileApp `json:"apps,omitempty"`
Audit *FileAudit `json:"audit,omitempty"`
Collection string `json:"collection,omitempty"` // TODO
CreatedTime Time `json:"created_time,omitempty"`
DeleteTime Time `json:"delete_time,omitempty"`
FileCategory string `json:"file_category,omitempty"`
FileExtension string `json:"file_extension,omitempty"`
FolderType string `json:"folder_type,omitempty"`
Hash string `json:"hash,omitempty"` // sha1 but NOT a valid file hash. looks like a torrent hash
IconLink string `json:"icon_link,omitempty"`
ID string `json:"id,omitempty"`
Kind string `json:"kind,omitempty"` // "drive#file"
Links *FileLinks `json:"links,omitempty"`
Md5Checksum string `json:"md5_checksum,omitempty"`
Medias []*Media `json:"medias,omitempty"`
MimeType string `json:"mime_type,omitempty"`
ModifiedTime Time `json:"modified_time,omitempty"` // updated when renamed or moved
Name string `json:"name,omitempty"`
OriginalFileIndex int `json:"original_file_index,omitempty"` // TODO
OriginalURL string `json:"original_url,omitempty"`
Params *FileParams `json:"params,omitempty"`
ParentID string `json:"parent_id,omitempty"`
Phase string `json:"phase,omitempty"`
Revision int `json:"revision,omitempty,string"`
Size int64 `json:"size,omitempty,string"`
SortName string `json:"sort_name,omitempty"`
Space string `json:"space,omitempty"`
SpellName []interface{} `json:"spell_name,omitempty"` // TODO maybe list of something?
Starred bool `json:"starred,omitempty"`
ThumbnailLink string `json:"thumbnail_link,omitempty"`
Trashed bool `json:"trashed,omitempty"`
UserID string `json:"user_id,omitempty"`
UserModifiedTime Time `json:"user_modified_time,omitempty"`
WebContentLink string `json:"web_content_link,omitempty"`
Writable bool `json:"writable,omitempty"`
}
// FileLinks includes links to file at backend
type FileLinks struct {
ApplicationOctetStream *Link `json:"application/octet-stream,omitempty"`
}
// FileAudit contains audit information for the file
type FileAudit struct {
Status string `json:"status,omitempty"` // "STATUS_OK"
Message string `json:"message,omitempty"`
Title string `json:"title,omitempty"`
}
// Media contains info about supported version of media, e.g. original, transcoded, etc
type Media struct {
MediaID string `json:"media_id,omitempty"`
MediaName string `json:"media_name,omitempty"`
Video struct {
Height int `json:"height,omitempty"`
Width int `json:"width,omitempty"`
Duration int64 `json:"duration,omitempty"`
BitRate int `json:"bit_rate,omitempty"`
FrameRate int `json:"frame_rate,omitempty"`
VideoCodec string `json:"video_codec,omitempty"`
AudioCodec string `json:"audio_codec,omitempty"`
VideoType string `json:"video_type,omitempty"`
} `json:"video,omitempty"`
Link *Link `json:"link,omitempty"`
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
VipTypes []interface{} `json:"vip_types,omitempty"` // TODO maybe list of something?
RedirectLink string `json:"redirect_link,omitempty"`
IconLink string `json:"icon_link,omitempty"`
IsDefault bool `json:"is_default,omitempty"`
Priority int `json:"priority,omitempty"`
IsOrigin bool `json:"is_origin,omitempty"`
ResolutionName string `json:"resolution_name,omitempty"`
IsVisible bool `json:"is_visible,omitempty"`
Category string `json:"category,omitempty"`
}
// FileParams includes parameters for instant open
type FileParams struct {
Duration int64 `json:"duration,omitempty,string"` // in seconds
Height int `json:"height,omitempty,string"`
Platform string `json:"platform,omitempty"` // "Upload"
PlatformIcon string `json:"platform_icon,omitempty"`
URL string `json:"url,omitempty"`
Width int `json:"width,omitempty,string"`
}
// FileApp includes parameters for instant open
type FileApp struct {
ID string `json:"id,omitempty"` // "decompress" for rar files
Name string `json:"name,omitempty"` // decompress" for rar files
Access []interface{} `json:"access,omitempty"`
Link string `json:"link,omitempty"` // "https://mypikpak.com/drive/decompression/{File.Id}?gcid={File.Hash}\u0026wv-style=topbar%3Ahide"
RedirectLink string `json:"redirect_link,omitempty"`
VipTypes []interface{} `json:"vip_types,omitempty"`
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
IconLink string `json:"icon_link,omitempty"`
IsDefault bool `json:"is_default,omitempty"`
Params struct {
} `json:"params,omitempty"` // TODO
CategoryIds []interface{} `json:"category_ids,omitempty"`
AdSceneType int `json:"ad_scene_type,omitempty"`
Space string `json:"space,omitempty"`
Links struct {
} `json:"links,omitempty"` // TODO
}
// ------------------------------------------------------------
// TaskList contains a list of Task elements
type TaskList struct {
Tasks []*Task `json:"tasks,omitempty"` // "drive#task"
NextPageToken string `json:"next_page_token"`
ExpiresIn int `json:"expires_in,omitempty"`
}
// Task is a basic element representing a single task such as offline download and upload
type Task struct {
Kind string `json:"kind,omitempty"` // "drive#task"
ID string `json:"id,omitempty"` // task id?
Name string `json:"name,omitempty"` // torrent name?
Type string `json:"type,omitempty"` // "offline"
UserID string `json:"user_id,omitempty"`
Statuses []interface{} `json:"statuses,omitempty"` // TODO
StatusSize int `json:"status_size,omitempty"` // TODO
Params *TaskParams `json:"params,omitempty"` // TODO
FileID string `json:"file_id,omitempty"`
FileName string `json:"file_name,omitempty"`
FileSize string `json:"file_size,omitempty"`
Message string `json:"message,omitempty"` // e.g. "Saving"
CreatedTime Time `json:"created_time,omitempty"`
UpdatedTime Time `json:"updated_time,omitempty"`
ThirdTaskID string `json:"third_task_id,omitempty"` // TODO
Phase string `json:"phase,omitempty"` // e.g. "PHASE_TYPE_RUNNING"
Progress int `json:"progress,omitempty"`
IconLink string `json:"icon_link,omitempty"`
Callback string `json:"callback,omitempty"`
ReferenceResource interface{} `json:"reference_resource,omitempty"` // TODO
Space string `json:"space,omitempty"`
}
// TaskParams includes parameters informing status of Task
type TaskParams struct {
Age string `json:"age,omitempty"`
PredictSpeed string `json:"predict_speed,omitempty"`
PredictType string `json:"predict_type,omitempty"`
URL string `json:"url,omitempty"`
}
// Form contains parameters for upload by multipart/form-data
type Form struct {
Headers struct{} `json:"headers"`
Kind string `json:"kind"` // "drive#form"
Method string `json:"method"` // "POST"
MultiParts struct {
OSSAccessKeyID string `json:"OSSAccessKeyId"`
Signature string `json:"Signature"`
Callback string `json:"callback"`
Key string `json:"key"`
Policy string `json:"policy"`
XUserData string `json:"x:user_data"`
} `json:"multi_parts"`
URL string `json:"url"`
}
// Resumable contains parameters for upload by resumable
type Resumable struct {
Kind string `json:"kind,omitempty"` // "drive#resumable"
Provider string `json:"provider,omitempty"` // e.g. "PROVIDER_ALIYUN"
Params *ResumableParams `json:"params,omitempty"`
}
// ResumableParams specifies resumable paramegers
type ResumableParams struct {
AccessKeyID string `json:"access_key_id,omitempty"`
AccessKeySecret string `json:"access_key_secret,omitempty"`
Bucket string `json:"bucket,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
Expiration Time `json:"expiration,omitempty"`
Key string `json:"key,omitempty"`
SecurityToken string `json:"security_token,omitempty"`
}
// FileInArchive is a basic element in archive
type FileInArchive struct {
Index int `json:"index,omitempty"`
Filename string `json:"filename,omitempty"`
Filesize string `json:"filesize,omitempty"`
MimeType string `json:"mime_type,omitempty"`
Gcid string `json:"gcid,omitempty"`
Kind string `json:"kind,omitempty"`
IconLink string `json:"icon_link,omitempty"`
Path string `json:"path,omitempty"`
}
// ------------------------------------------------------------
// NewFile is a response to RequestNewFile
type NewFile struct {
File *File `json:"file,omitempty"`
Form *Form `json:"form,omitempty"`
Resumable *Resumable `json:"resumable,omitempty"`
Task *Task `json:"task,omitempty"` // null in this case
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_FORM" or "UPLOAD_TYPE_RESUMABLE"
}
// NewTask is a response to RequestNewTask
type NewTask struct {
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_URL"
File *File `json:"file,omitempty"` // null in this case
Task *Task `json:"task,omitempty"`
URL *URL `json:"url,omitempty"` // {"kind": "upload#url"}
}
// About informs drive status
type About struct {
Kind string `json:"kind,omitempty"` // "drive#about"
Quota *Quota `json:"quota,omitempty"`
ExpiresAt string `json:"expires_at,omitempty"`
Quotas struct {
} `json:"quotas,omitempty"` // maybe []*Quota?
}
// Quota informs drive quota
type Quota struct {
Kind string `json:"kind,omitempty"` // "drive#quota"
Limit int64 `json:"limit,omitempty,string"` // limit in bytes
Usage int64 `json:"usage,omitempty,string"` // bytes in use
UsageInTrash int64 `json:"usage_in_trash,omitempty,string"` // bytes in trash but this seems not working
PlayTimesLimit string `json:"play_times_limit,omitempty"` // maybe in seconds
PlayTimesUsage string `json:"play_times_usage,omitempty"` // maybe in seconds
}
// Share is a response to RequestShare
//
// used in PublicLink()
type Share struct {
ShareID string `json:"share_id,omitempty"`
ShareURL string `json:"share_url,omitempty"`
PassCode string `json:"pass_code,omitempty"`
ShareText string `json:"share_text,omitempty"`
}
// User contains user account information
//
// GET https://user.mypikpak.com/v1/user/me
type User struct {
Sub string `json:"sub,omitempty"` // userid for internal use
Name string `json:"name,omitempty"` // Username
Picture string `json:"picture,omitempty"` // URL to Avatar image
Email string `json:"email,omitempty"` // redacted email address
Providers *[]UserProvider `json:"providers,omitempty"` // OAuth provider
PhoneNumber string `json:"phone_number,omitempty"`
Password string `json:"password,omitempty"` // "SET" if configured
Status string `json:"status,omitempty"` // "ACTIVE"
CreatedAt Time `json:"created_at,omitempty"`
PasswordUpdatedAt Time `json:"password_updated_at,omitempty"`
}
// UserProvider details third-party authentication
type UserProvider struct {
ID string `json:"id,omitempty"` // e.g. "google.com"
ProviderUserID string `json:"provider_user_id,omitempty"`
Name string `json:"name,omitempty"` // username
}
// VIP includes subscription details about premium account
//
// GET https://api-drive.mypikpak.com/drive/v1/privilege/vip
type VIP struct {
Result string `json:"result,omitempty"` // "ACCEPTED"
Message string `json:"message,omitempty"`
RedirectURI string `json:"redirect_uri,omitempty"`
Data struct {
Expire Time `json:"expire,omitempty"`
Status string `json:"status,omitempty"` // "invalid" or "ok"
Type string `json:"type,omitempty"` // "novip" or "platinum"
UserID string `json:"user_id,omitempty"` // same as User.Sub
} `json:"data,omitempty"`
}
// DecompressResult is a response to RequestDecompress
type DecompressResult struct {
Status string `json:"status,omitempty"` // "OK"
StatusText string `json:"status_text,omitempty"`
TaskID string `json:"task_id,omitempty"` // same as File.Id
FilesNum int `json:"files_num,omitempty"` // number of files in archive
RedirectLink string `json:"redirect_link,omitempty"`
}
// ------------------------------------------------------------
// RequestShare is to request for file share
type RequestShare struct {
FileIds []string `json:"file_ids,omitempty"`
ShareTo string `json:"share_to,omitempty"` // "publiclink",
ExpirationDays int `json:"expiration_days,omitempty"` // -1 = 'forever'
PassCodeOption string `json:"pass_code_option,omitempty"` // "NOT_REQUIRED"
}
// RequestBatch is to request for batch actions
type RequestBatch struct {
Ids []string `json:"ids,omitempty"`
To map[string]string `json:"to,omitempty"`
}
// RequestNewFile is to request for creating a new `drive#folder` or `drive#file`
type RequestNewFile struct {
// always required
Kind string `json:"kind"` // "drive#folder" or "drive#file"
Name string `json:"name"`
ParentID string `json:"parent_id"`
FolderType string `json:"folder_type"`
// only when uploading a new file
Hash string `json:"hash,omitempty"` // sha1sum
Resumable map[string]string `json:"resumable,omitempty"` // {"provider": "PROVIDER_ALIYUN"}
Size int64 `json:"size,omitempty"`
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_FORM" or "UPLOAD_TYPE_RESUMABLE"
}
// RequestNewTask is to request for creating a new task like offline downloads
//
// Name and ParentID can be left empty.
type RequestNewTask struct {
Kind string `json:"kind,omitempty"` // "drive#file"
Name string `json:"name,omitempty"`
ParentID string `json:"parent_id,omitempty"`
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_URL"
URL *URL `json:"url,omitempty"` // {"url": downloadUrl}
FolderType string `json:"folder_type,omitempty"` // "" if parent_id else "DOWNLOAD"
}
// RequestDecompress is to request for decompress of archive files
type RequestDecompress struct {
Gcid string `json:"gcid,omitempty"` // same as File.Hash
Password string `json:"password,omitempty"` // ""
FileID string `json:"file_id,omitempty"`
Files []*FileInArchive `json:"files,omitempty"` // can request selected files to be decompressed
DefaultParent bool `json:"default_parent,omitempty"`
}
// ------------------------------------------------------------
// NOT implemented YET
// RequestArchiveFileList is to request for a list of files in archive
//
// POST https://api-drive.mypikpak.com/decompress/v1/list
type RequestArchiveFileList struct {
Gcid string `json:"gcid,omitempty"` // same as api.File.Hash
Path string `json:"path,omitempty"` // "" by default
Password string `json:"password,omitempty"` // "" by default
FileID string `json:"file_id,omitempty"`
}
// ArchiveFileList is a response to RequestArchiveFileList
type ArchiveFileList struct {
Status string `json:"status,omitempty"` // "OK"
StatusText string `json:"status_text,omitempty"` // ""
TaskID string `json:"task_id,omitempty"` // ""
CurrentPath string `json:"current_path,omitempty"` // ""
Title string `json:"title,omitempty"`
FileSize int64 `json:"file_size,omitempty"`
Gcid string `json:"gcid,omitempty"` // same as File.Hash
Files []*FileInArchive `json:"files,omitempty"`
}

View File

@@ -1,253 +0,0 @@
package pikpak
import (
"bytes"
"context"
"crypto/sha1"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"os"
"github.com/rclone/rclone/backend/pikpak/api"
"github.com/rclone/rclone/lib/rest"
)
// Globals
const (
cachePrefix = "rclone-pikpak-sha1sum-"
)
// requestDecompress requests decompress of compressed files
func (f *Fs) requestDecompress(ctx context.Context, file *api.File, password string) (info *api.DecompressResult, err error) {
req := &api.RequestDecompress{
Gcid: file.Hash,
Password: password,
FileID: file.ID,
Files: []*api.FileInArchive{},
DefaultParent: true,
}
opts := rest.Opts{
Method: "POST",
Path: "/decompress/v1/decompress",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
return f.shouldRetry(ctx, resp, err)
})
return
}
// getUserInfo gets UserInfo from API
func (f *Fs) getUserInfo(ctx context.Context) (info *api.User, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: "https://user.mypikpak.com/v1/user/me",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get userinfo: %w", err)
}
return
}
// getVIPInfo gets VIPInfo from API
func (f *Fs) getVIPInfo(ctx context.Context) (info *api.VIP, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: "https://api-drive.mypikpak.com/drive/v1/privilege/vip",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get vip info: %w", err)
}
return
}
// requestBatchAction requests batch actions to API
//
// action can be one of batch{Copy,Delete,Trash,Untrash}
func (f *Fs) requestBatchAction(ctx context.Context, action string, req *api.RequestBatch) (err error) {
opts := rest.Opts{
Method: "POST",
Path: "/drive/v1/files:" + action,
NoResponse: true, // Only returns `{"task_id":""}
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, nil)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return fmt.Errorf("batch action %q failed: %w", action, err)
}
return nil
}
// requestNewTask requests a new api.NewTask and returns api.Task
func (f *Fs) requestNewTask(ctx context.Context, req *api.RequestNewTask) (info *api.Task, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/drive/v1/files",
}
var newTask api.NewTask
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, &newTask)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
}
return newTask.Task, nil
}
// requestNewFile requests a new api.NewFile and returns api.File
func (f *Fs) requestNewFile(ctx context.Context, req *api.RequestNewFile) (info *api.NewFile, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/drive/v1/files",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
return f.shouldRetry(ctx, resp, err)
})
return
}
// getFile gets api.File from API for the ID passed
// and returns rich information containing additional fields below
// * web_content_link
// * thumbnail_link
// * links
// * medias
func (f *Fs) getFile(ctx context.Context, ID string) (info *api.File, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/drive/v1/files/" + ID,
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
if err == nil && info.Phase != api.PhaseTypeComplete {
// could be pending right after file is created/uploaded.
return true, errors.New("not PHASE_TYPE_COMPLETE")
}
return f.shouldRetry(ctx, resp, err)
})
return
}
// patchFile updates attributes of the file by ID
//
// currently known patchable fields are
// * name
func (f *Fs) patchFile(ctx context.Context, ID string, req *api.File) (info *api.File, err error) {
opts := rest.Opts{
Method: "PATCH",
Path: "/drive/v1/files/" + ID,
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
return f.shouldRetry(ctx, resp, err)
})
return
}
// getAbout gets drive#quota information from server
func (f *Fs) getAbout(ctx context.Context) (info *api.About, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/drive/v1/about",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
return f.shouldRetry(ctx, resp, err)
})
return
}
// requestShare returns information about ssharable links
func (f *Fs) requestShare(ctx context.Context, req *api.RequestShare) (info *api.Share, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/drive/v1/share",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
return f.shouldRetry(ctx, resp, err)
})
return
}
// Read the sha1 of in returning a reader which will read the same contents
//
// The cleanup function should be called when out is finished with
// regardless of whether this function returned an error or not.
func readSHA1(in io.Reader, size, threshold int64) (sha1sum string, out io.Reader, cleanup func(), err error) {
// we need an SHA1
hash := sha1.New()
// use the teeReader to write to the local file AND calculate the SHA1 while doing so
teeReader := io.TeeReader(in, hash)
// nothing to clean up by default
cleanup = func() {}
// don't cache small files on disk to reduce wear of the disk
if size > threshold {
var tempFile *os.File
// create the cache file
tempFile, err = os.CreateTemp("", cachePrefix)
if err != nil {
return
}
_ = os.Remove(tempFile.Name()) // Delete the file - may not work on Windows
// clean up the file after we are done downloading
cleanup = func() {
// the file should normally already be close, but just to make sure
_ = tempFile.Close()
_ = os.Remove(tempFile.Name()) // delete the cache file after we are done - may be deleted already
}
// copy the ENTIRE file to disc and calculate the SHA1 in the process
if _, err = io.Copy(tempFile, teeReader); err != nil {
return
}
// jump to the start of the local file so we can pass it along
if _, err = tempFile.Seek(0, 0); err != nil {
return
}
// replace the already read source with a reader of our cached file
out = tempFile
} else {
// that's a small file, just read it into memory
var inData []byte
inData, err = io.ReadAll(teeReader)
if err != nil {
return
}
// set the reader to our read memory block
out = bytes.NewReader(inData)
}
return hex.EncodeToString(hash.Sum(nil)), out, cleanup, nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +0,0 @@
// Test PikPak filesystem interface
package pikpak_test
import (
"testing"
"github.com/rclone/rclone/backend/pikpak"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestPikPak:",
NilObject: (*pikpak.Object)(nil),
})
}

View File

@@ -1,4 +1,4 @@
// Package s3 provides an interface to Amazon S3 object storage // Package s3 provides an interface to Amazon S3 oject storage
package s3 package s3
//go:generate go run gen_setfrom.go -o setfrom.go //go:generate go run gen_setfrom.go -o setfrom.go
@@ -66,7 +66,7 @@ import (
func init() { func init() {
fs.Register(&fs.RegInfo{ fs.Register(&fs.RegInfo{
Name: "s3", Name: "s3",
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, GCS, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi", Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi",
NewFs: NewFs, NewFs: NewFs,
CommandHelp: commandHelp, CommandHelp: commandHelp,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
@@ -109,9 +109,6 @@ func init() {
}, { }, {
Value: "Dreamhost", Value: "Dreamhost",
Help: "Dreamhost DreamObjects", Help: "Dreamhost DreamObjects",
}, {
Value: "GCS",
Help: "Google Cloud Storage",
}, { }, {
Value: "HuaweiOBS", Value: "HuaweiOBS",
Help: "Huawei Object Storage Service", Help: "Huawei Object Storage Service",
@@ -937,14 +934,6 @@ func init() {
Value: "s3.eu-central-1.stackpathstorage.com", Value: "s3.eu-central-1.stackpathstorage.com",
Help: "EU Endpoint", Help: "EU Endpoint",
}}, }},
}, {
Name: "endpoint",
Help: "Endpoint for Google Cloud Storage.",
Provider: "GCS",
Examples: []fs.OptionExample{{
Value: "https://storage.googleapis.com",
Help: "Google Cloud Storage endpoint",
}},
}, { }, {
Name: "endpoint", Name: "endpoint",
Help: "Endpoint for Storj Gateway.", Help: "Endpoint for Storj Gateway.",
@@ -1109,7 +1098,7 @@ func init() {
}, { }, {
Name: "endpoint", Name: "endpoint",
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.", Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
Provider: "!AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu", Provider: "!AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,Liara,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "objects-us-east-1.dream.io", Value: "objects-us-east-1.dream.io",
Help: "Dream Objects endpoint", Help: "Dream Objects endpoint",
@@ -2269,24 +2258,6 @@ will decompress the object on the fly.
If this is set to unset (the default) then rclone will choose If this is set to unset (the default) then rclone will choose
according to the provider setting what to apply, but you can override according to the provider setting what to apply, but you can override
rclone's choice here. rclone's choice here.
`, "|", "`"),
Default: fs.Tristate{},
Advanced: true,
}, {
Name: "use_accept_encoding_gzip",
Help: strings.ReplaceAll(`Whether to send |Accept-Encoding: gzip| header.
By default, rclone will append |Accept-Encoding: gzip| to the request to download
compressed objects whenever possible.
However some providers such as Google Cloud Storage may alter the HTTP headers, breaking
the signature of the request.
A symptom of this would be receiving errors like
SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided.
In this case, you might want to try disabling this option.
`, "|", "`"), `, "|", "`"),
Default: fs.Tristate{}, Default: fs.Tristate{},
Advanced: true, Advanced: true,
@@ -2428,7 +2399,6 @@ type Options struct {
VersionAt fs.Time `config:"version_at"` VersionAt fs.Time `config:"version_at"`
Decompress bool `config:"decompress"` Decompress bool `config:"decompress"`
MightGzip fs.Tristate `config:"might_gzip"` MightGzip fs.Tristate `config:"might_gzip"`
UseAcceptEncodingGzip fs.Tristate `config:"use_accept_encoding_gzip"`
NoSystemMetadata bool `config:"no_system_metadata"` NoSystemMetadata bool `config:"no_system_metadata"`
} }
@@ -2821,12 +2791,11 @@ func setEndpointValueForIDriveE2(m configmap.Mapper) (err error) {
// These should be differences from AWS S3 // These should be differences from AWS S3
func setQuirks(opt *Options) { func setQuirks(opt *Options) {
var ( var (
listObjectsV2 = true listObjectsV2 = true
virtualHostStyle = true virtualHostStyle = true
urlEncodeListings = true urlEncodeListings = true
useMultipartEtag = true useMultipartEtag = true
useAcceptEncodingGzip = true mightGzip = true // assume all providers might gzip until proven otherwise
mightGzip = true // assume all providers might gzip until proven otherwise
) )
switch opt.Provider { switch opt.Provider {
case "AWS": case "AWS":
@@ -2911,10 +2880,6 @@ func setQuirks(opt *Options) {
case "Qiniu": case "Qiniu":
useMultipartEtag = false useMultipartEtag = false
urlEncodeListings = false urlEncodeListings = false
case "GCS":
// Google break request Signature by mutating accept-encoding HTTP header
// https://github.com/rclone/rclone/issues/6670
useAcceptEncodingGzip = false
case "Other": case "Other":
listObjectsV2 = false listObjectsV2 = false
virtualHostStyle = false virtualHostStyle = false
@@ -2959,12 +2924,6 @@ func setQuirks(opt *Options) {
opt.MightGzip.Valid = true opt.MightGzip.Valid = true
opt.MightGzip.Value = mightGzip opt.MightGzip.Value = mightGzip
} }
// set UseAcceptEncodingGzip if not manually set
if !opt.UseAcceptEncodingGzip.Valid {
opt.UseAcceptEncodingGzip.Valid = true
opt.UseAcceptEncodingGzip.Value = useAcceptEncodingGzip
}
} }
// setRoot changes the root of the Fs // setRoot changes the root of the Fs
@@ -2998,7 +2957,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, fmt.Errorf("s3: upload cutoff: %w", err) return nil, fmt.Errorf("s3: upload cutoff: %w", err)
} }
if opt.Versions && opt.VersionAt.IsSet() { if opt.Versions && opt.VersionAt.IsSet() {
return nil, errors.New("s3: can't use --s3-versions and --s3-version-at at the same time") return nil, errors.New("s3: cant use --s3-versions and --s3-version-at at the same time")
} }
if opt.BucketACL == "" { if opt.BucketACL == "" {
opt.BucketACL = opt.ACL opt.BucketACL = opt.ACL
@@ -3119,7 +3078,6 @@ func (f *Fs) getMetaDataListing(ctx context.Context, wantRemote string) (info *s
err = f.list(ctx, listOpt{ err = f.list(ctx, listOpt{
bucket: bucket, bucket: bucket,
directory: bucketPath, directory: bucketPath,
prefix: f.rootDirectory,
recurse: true, recurse: true,
withVersions: f.opt.Versions, withVersions: f.opt.Versions,
findFile: true, findFile: true,
@@ -3525,10 +3483,10 @@ type listOpt struct {
// list lists the objects into the function supplied with the opt // list lists the objects into the function supplied with the opt
// supplied. // supplied.
func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error { func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
if opt.prefix != "" {
opt.prefix += "/"
}
if !opt.findFile { if !opt.findFile {
if opt.prefix != "" {
opt.prefix += "/"
}
if opt.directory != "" { if opt.directory != "" {
opt.directory += "/" opt.directory += "/"
} }
@@ -5004,9 +4962,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Override the automatic decompression in the transport to // Override the automatic decompression in the transport to
// download compressed files as-is // download compressed files as-is
if o.fs.opt.UseAcceptEncodingGzip.Value { httpReq.HTTPRequest.Header.Set("Accept-Encoding", "gzip")
httpReq.HTTPRequest.Header.Set("Accept-Encoding", "gzip")
}
for _, option := range options { for _, option := range options {
switch option.(type) { switch option.(type) {
@@ -5122,9 +5078,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
} }
uid := cout.UploadId uid := cout.UploadId
uploadCtx, cancel := context.WithCancel(ctx)
defer atexit.OnError(&err, func() { defer atexit.OnError(&err, func() {
cancel()
if o.fs.opt.LeavePartsOnError { if o.fs.opt.LeavePartsOnError {
return return
} }
@@ -5144,7 +5098,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
})() })()
var ( var (
g, gCtx = errgroup.WithContext(uploadCtx) g, gCtx = errgroup.WithContext(ctx)
finished = false finished = false
partsMu sync.Mutex // to protect parts partsMu sync.Mutex // to protect parts
parts []*s3.CompletedPart parts []*s3.CompletedPart
@@ -5226,7 +5180,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
uout, err := f.c.UploadPartWithContext(gCtx, uploadPartReq) uout, err := f.c.UploadPartWithContext(gCtx, uploadPartReq)
if err != nil { if err != nil {
if partNum <= int64(concurrency) { if partNum <= int64(concurrency) {
return f.shouldRetry(gCtx, err) return f.shouldRetry(ctx, err)
} }
// retry all chunks once have done the first batch // retry all chunks once have done the first batch
return true, err return true, err
@@ -5258,7 +5212,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
var resp *s3.CompleteMultipartUploadOutput var resp *s3.CompleteMultipartUploadOutput
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.CompleteMultipartUploadWithContext(uploadCtx, &s3.CompleteMultipartUploadInput{ resp, err = f.c.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{
Bucket: req.Bucket, Bucket: req.Bucket,
Key: req.Key, Key: req.Key,
MultipartUpload: &s3.CompletedMultipartUpload{ MultipartUpload: &s3.CompletedMultipartUpload{
@@ -5267,7 +5221,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
RequestPayer: req.RequestPayer, RequestPayer: req.RequestPayer,
UploadId: uid, UploadId: uid,
}) })
return f.shouldRetry(uploadCtx, err) return f.shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return wantETag, gotETag, nil, fmt.Errorf("multipart upload failed to finalise: %w", err) return wantETag, gotETag, nil, fmt.Errorf("multipart upload failed to finalise: %w", err)

View File

@@ -6,15 +6,12 @@ import (
"context" "context"
"crypto/md5" "crypto/md5"
"fmt" "fmt"
"path"
"strings"
"testing" "testing"
"time" "time"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
@@ -253,8 +250,7 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
// Create an object // Create an object
const dirName = "versions" const fileName = "test-versions.txt"
const fileName = dirName + "/" + "test-versions.txt"
contents := random.String(100) contents := random.String(100)
item := fstest.NewItem(fileName, contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) item := fstest.NewItem(fileName, contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true) obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
@@ -284,12 +280,11 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
}() }()
// Read the contents // Read the contents
entries, err := f.List(ctx, dirName) entries, err := f.List(ctx, "")
require.NoError(t, err) require.NoError(t, err)
tests := 0 tests := 0
var fileNameVersion string var fileNameVersion string
for _, entry := range entries { for _, entry := range entries {
t.Log(entry)
remote := entry.Remote() remote := entry.Remote()
if remote == fileName { if remote == fileName {
t.Run("ReadCurrent", func(t *testing.T) { t.Run("ReadCurrent", func(t *testing.T) {
@@ -314,18 +309,6 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
require.NotNil(t, o) require.NotNil(t, o)
assert.Equal(t, int64(100), o.Size(), o.Remote()) assert.Equal(t, int64(100), o.Size(), o.Remote())
}) })
// Check we can make a NewFs from that object with a version suffix
t.Run("NewFs", func(t *testing.T) {
newPath := path.Join(fs.ConfigString(f), fileNameVersion)
// Make sure --s3-versions is set in the config of the new remote
confPath := strings.Replace(newPath, ":", ",versions:", 1)
fNew, err := cache.Get(ctx, confPath)
// This should return pointing to a file
assert.Equal(t, fs.ErrorIsFile, err)
// With the directory the directory above
assert.Equal(t, dirName, path.Base(fs.ConfigString(fNew)))
})
}) })
t.Run("VersionAt", func(t *testing.T) { t.Run("VersionAt", func(t *testing.T) {

View File

@@ -953,9 +953,11 @@ func (f *Fs) emptyLibraryTrash(ctx context.Context, libraryID string) error {
return nil return nil
} }
// === API v2 from the official documentation, but that have been replaced by the much better v2.1 (undocumented as of Apr 2020)
// === getDirectoryEntriesAPIv2 is needed to keep compatibility with seafile v6,
// === the others can probably be removed after the API v2.1 is documented
func (f *Fs) getDirectoryEntriesAPIv2(ctx context.Context, libraryID, dirPath string) ([]api.DirEntry, error) { func (f *Fs) getDirectoryEntriesAPIv2(ctx context.Context, libraryID, dirPath string) ([]api.DirEntry, error) {
// API v2 from the official documentation, but that have been replaced by the much better v2.1 (undocumented as of Apr 2020)
// getDirectoryEntriesAPIv2 is needed to keep compatibility with seafile v6.
// API Documentation // API Documentation
// https://download.seafile.com/published/web-api/v2.1/directories.md#user-content-List%20Items%20in%20Directory // https://download.seafile.com/published/web-api/v2.1/directories.md#user-content-List%20Items%20in%20Directory
if libraryID == "" { if libraryID == "" {
@@ -999,3 +1001,95 @@ func (f *Fs) getDirectoryEntriesAPIv2(ctx context.Context, libraryID, dirPath st
} }
return result, nil return result, nil
} }
func (f *Fs) copyFileAPIv2(ctx context.Context, srcLibraryID, srcPath, dstLibraryID, dstPath string) (*api.FileInfo, error) {
// API Documentation
// https://download.seafile.com/published/web-api/v2.1/file.md#user-content-Copy%20File
if srcLibraryID == "" || dstLibraryID == "" {
return nil, errors.New("libraryID and/or file path argument(s) missing")
}
srcPath = path.Join("/", srcPath)
dstPath = path.Join("/", dstPath)
// Older API does not seem to accept JSON input here either
postParameters := url.Values{
"operation": {"copy"},
"dst_repo": {dstLibraryID},
"dst_dir": {f.opt.Enc.FromStandardPath(dstPath)},
}
opts := rest.Opts{
Method: "POST",
Path: APIv20 + srcLibraryID + "/file/",
Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(srcPath)}},
ContentType: "application/x-www-form-urlencoded",
Body: bytes.NewBuffer([]byte(postParameters.Encode())),
}
result := &api.FileInfo{}
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
if resp != nil {
if resp.StatusCode == 401 || resp.StatusCode == 403 {
return nil, fs.ErrorPermissionDenied
}
}
return nil, fmt.Errorf("failed to copy file %s:'%s' to %s:'%s': %w", srcLibraryID, srcPath, dstLibraryID, dstPath, err)
}
err = rest.DecodeJSON(resp, &result)
if err != nil {
return nil, err
}
return f.decodeFileInfo(result), nil
}
func (f *Fs) renameFileAPIv2(ctx context.Context, libraryID, filePath, newname string) error {
// API Documentation
// https://download.seafile.com/published/web-api/v2.1/file.md#user-content-Rename%20File
if libraryID == "" || newname == "" {
return errors.New("libraryID and/or file path argument(s) missing")
}
filePath = path.Join("/", filePath)
// No luck with JSON input with the older api2
postParameters := url.Values{
"operation": {"rename"},
"reloaddir": {"true"}, // This is an undocumented trick to avoid an http code 301 response (found in https://github.com/haiwen/seahub/blob/master/seahub/api2/views.py)
"newname": {f.opt.Enc.FromStandardName(newname)},
}
opts := rest.Opts{
Method: "POST",
Path: APIv20 + libraryID + "/file/",
Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(filePath)}},
ContentType: "application/x-www-form-urlencoded",
Body: bytes.NewBuffer([]byte(postParameters.Encode())),
NoRedirect: true,
NoResponse: true,
}
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
if resp != nil {
if resp.StatusCode == 301 {
// This is the normal response from the server
return nil
}
if resp.StatusCode == 401 || resp.StatusCode == 403 {
return fs.ErrorPermissionDenied
}
if resp.StatusCode == 404 {
return fs.ErrorObjectNotFound
}
}
return fmt.Errorf("failed to rename file: %w", err)
}
return nil
}

View File

@@ -10,7 +10,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
iofs "io/fs"
"os" "os"
"path" "path"
"regexp" "regexp"
@@ -324,7 +323,7 @@ Pass multiple variables space separated, eg
VAR1=value VAR2=value VAR1=value VAR2=value
and pass variables with spaces in quotes, eg and pass variables with spaces in in quotes, eg
"VAR3=value with space" "VAR4=value with space" VAR5=nospacehere "VAR3=value with space" "VAR4=value with space" VAR5=nospacehere
@@ -368,20 +367,6 @@ At least one must match with server configuration. This can be checked for examp
Example: Example:
umac-64-etm@openssh.com umac-128-etm@openssh.com hmac-sha2-256-etm@openssh.com umac-64-etm@openssh.com umac-128-etm@openssh.com hmac-sha2-256-etm@openssh.com
`,
Advanced: true,
}, {
Name: "host_key_algorithms",
Default: fs.SpaceSepList{},
Help: `Space separated list of host key algorithms, ordered by preference.
At least one must match with server configuration. This can be checked for example using ssh -Q HostKeyAlgorithms.
Note: This can affect the outcome of key negotiation with the server even if server host key validation is not enabled.
Example:
ssh-ed25519 ssh-rsa ssh-dss
`, `,
Advanced: true, Advanced: true,
}}, }},
@@ -422,7 +407,6 @@ type Options struct {
Ciphers fs.SpaceSepList `config:"ciphers"` Ciphers fs.SpaceSepList `config:"ciphers"`
KeyExchange fs.SpaceSepList `config:"key_exchange"` KeyExchange fs.SpaceSepList `config:"key_exchange"`
MACs fs.SpaceSepList `config:"macs"` MACs fs.SpaceSepList `config:"macs"`
HostKeyAlgorithms fs.SpaceSepList `config:"host_key_algorithms"`
} }
// Fs stores the interface to the remote SFTP files // Fs stores the interface to the remote SFTP files
@@ -755,10 +739,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ClientVersion: "SSH-2.0-" + f.ci.UserAgent, ClientVersion: "SSH-2.0-" + f.ci.UserAgent,
} }
if len(opt.HostKeyAlgorithms) != 0 {
sshConfig.HostKeyAlgorithms = []string(opt.HostKeyAlgorithms)
}
if opt.KnownHostsFile != "" { if opt.KnownHostsFile != "" {
hostcallback, err := knownhosts.New(env.ShellExpand(opt.KnownHostsFile)) hostcallback, err := knownhosts.New(env.ShellExpand(opt.KnownHostsFile))
if err != nil { if err != nil {
@@ -802,32 +782,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, fmt.Errorf("couldn't read ssh agent signers: %w", err) return nil, fmt.Errorf("couldn't read ssh agent signers: %w", err)
} }
if keyFile != "" { if keyFile != "" {
// If `opt.KeyUseAgent` is false, then it's expected that `opt.KeyFile` contains the private key
// and `${opt.KeyFile}.pub` contains the public key.
//
// If `opt.KeyUseAgent` is true, then it's expected that `opt.KeyFile` contains the public key.
// This is how it works with openssh; the `IdentityFile` in openssh config points to the public key.
// It's not necessary to specify the public key explicitly when using ssh-agent, since openssh and rclone
// will try all the keys they find in the ssh-agent until they find one that works. But just like
// `IdentityFile` is used in openssh config to limit the search to one specific key, so does
// `opt.KeyFile` in rclone config limit the search to that specific key.
//
// However, previous versions of rclone would always expect to find the public key in
// `${opt.KeyFile}.pub` even if `opt.KeyUseAgent` was true. So for the sake of backward compatibility
// we still first attempt to read the public key from `${opt.KeyFile}.pub`. But if it fails with
// an `fs.ErrNotExist` then we also try to read the public key from `opt.KeyFile`.
pubBytes, err := os.ReadFile(keyFile + ".pub") pubBytes, err := os.ReadFile(keyFile + ".pub")
if err != nil { if err != nil {
if errors.Is(err, iofs.ErrNotExist) && opt.KeyUseAgent { return nil, fmt.Errorf("failed to read public key file: %w", err)
pubBytes, err = os.ReadFile(keyFile)
if err != nil {
return nil, fmt.Errorf("failed to read public key file: %w", err)
}
} else {
return nil, fmt.Errorf("failed to read public key file: %w", err)
}
} }
pub, _, _, _, err := ssh.ParseAuthorizedKey(pubBytes) pub, _, _, _, err := ssh.ParseAuthorizedKey(pubBytes)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse public key file: %w", err) return nil, fmt.Errorf("failed to parse public key file: %w", err)
@@ -849,8 +807,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
} }
// Load key file as a private key, if specified. This is only needed when not using an ssh agent. // Load key file if specified
if (keyFile != "" && !opt.KeyUseAgent) || opt.KeyPem != "" { if keyFile != "" || opt.KeyPem != "" {
var key []byte var key []byte
if opt.KeyPem == "" { if opt.KeyPem == "" {
key, err = os.ReadFile(keyFile) key, err = os.ReadFile(keyFile)

View File

@@ -34,10 +34,9 @@ func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
d := &smb2.Dialer{ d := &smb2.Dialer{
Initiator: &smb2.NTLMInitiator{ Initiator: &smb2.NTLMInitiator{
User: f.opt.User, User: f.opt.User,
Password: pass, Password: pass,
Domain: f.opt.Domain, Domain: f.opt.Domain,
TargetSPN: f.opt.SPN,
}, },
} }
@@ -106,9 +105,9 @@ func (f *Fs) getSessions() int32 {
func (f *Fs) newConnection(ctx context.Context, share string) (c *conn, err error) { func (f *Fs) newConnection(ctx context.Context, share string) (c *conn, err error) {
// As we are pooling these connections we need to decouple // As we are pooling these connections we need to decouple
// them from the current context // them from the current context
bgCtx := context.Background() ctx = context.Background()
c, err = f.dial(bgCtx, "tcp", f.opt.Host+":"+f.opt.Port) c, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port)
if err != nil { if err != nil {
return nil, fmt.Errorf("couldn't connect SMB: %w", err) return nil, fmt.Errorf("couldn't connect SMB: %w", err)
} }
@@ -119,7 +118,7 @@ func (f *Fs) newConnection(ctx context.Context, share string) (c *conn, err erro
_ = c.smbSession.Logoff() _ = c.smbSession.Logoff()
return nil, fmt.Errorf("couldn't initialize SMB: %w", err) return nil, fmt.Errorf("couldn't initialize SMB: %w", err)
} }
c.smbShare = c.smbShare.WithContext(bgCtx) c.smbShare = c.smbShare.WithContext(ctx)
} }
return c, nil return c, nil
} }

View File

@@ -60,17 +60,6 @@ func init() {
Name: "domain", Name: "domain",
Help: "Domain name for NTLM authentication.", Help: "Domain name for NTLM authentication.",
Default: "WORKGROUP", Default: "WORKGROUP",
}, {
Name: "spn",
Help: `Service principal name.
Rclone presents this name to the server. Some servers use this as further
authentication, and it often needs to be set for clusters. For example:
cifs/remotehost:1020
Leave blank if not sure.
`,
}, { }, {
Name: "idle_timeout", Name: "idle_timeout",
Default: fs.Duration(60 * time.Second), Default: fs.Duration(60 * time.Second),
@@ -120,7 +109,6 @@ type Options struct {
User string `config:"user"` User string `config:"user"`
Pass string `config:"pass"` Pass string `config:"pass"`
Domain string `config:"domain"` Domain string `config:"domain"`
SPN string `config:"spn"`
HideSpecial bool `config:"hide_special_share"` HideSpecial bool `config:"hide_special_share"`
CaseInsensitive bool `config:"case_insensitive"` CaseInsensitive bool `config:"case_insensitive"`
IdleTimeout fs.Duration `config:"idle_timeout"` IdleTimeout fs.Duration `config:"idle_timeout"`

View File

@@ -8,6 +8,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"net/url"
"path" "path"
"strconv" "strconv"
"strings" "strings"
@@ -1327,6 +1328,23 @@ func (o *Object) removeSegmentsLargeObject(ctx context.Context, containerSegment
return nil return nil
} }
func (o *Object) getSegmentsDlo(ctx context.Context) (segmentsContainer string, prefix string, err error) {
if err = o.readMetaData(ctx); err != nil {
return
}
dirManifest := o.headers["X-Object-Manifest"]
dirManifest, err = url.PathUnescape(dirManifest)
if err != nil {
return
}
delimiter := strings.Index(dirManifest, "/")
if len(dirManifest) == 0 || delimiter < 0 {
err = errors.New("missing or wrong structure of manifest of Dynamic large object")
return
}
return dirManifest[:delimiter], dirManifest[delimiter+1:], nil
}
// urlEncode encodes a string so that it is a valid URL // urlEncode encodes a string so that it is a valid URL
// //
// We don't use any of Go's standard methods as we need `/` not // We don't use any of Go's standard methods as we need `/` not

View File

@@ -756,6 +756,14 @@ func (f *Fs) create(ctx context.Context, path string) ([]*upstream.Fs, error) {
return f.createPolicy.Create(ctx, f.upstreams, path) return f.createPolicy.Create(ctx, f.upstreams, path)
} }
func (f *Fs) createEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
return f.createPolicy.CreateEntries(entries...)
}
func (f *Fs) search(ctx context.Context, path string) (*upstream.Fs, error) {
return f.searchPolicy.Search(ctx, f.upstreams, path)
}
func (f *Fs) searchEntries(entries ...upstream.Entry) (upstream.Entry, error) { func (f *Fs) searchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
return f.searchPolicy.SearchEntries(entries...) return f.searchPolicy.SearchEntries(entries...)
} }

View File

@@ -214,7 +214,7 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe
client := fshttp.NewClient(ctx) client := fshttp.NewClient(ctx)
f.srv = rest.NewClient(client).SetRoot(apiBaseURL) f.srv = rest.NewClient(client).SetRoot(apiBaseURL)
f.IDRegexp = regexp.MustCompile(`^https://uptobox\.com/([a-zA-Z0-9]+)`) f.IDRegexp = regexp.MustCompile(`https://uptobox\.com/([a-zA-Z0-9]+)`)
_, err = f.readMetaDataForPath(ctx, f.dirPath(""), &api.MetadataRequestOptions{Limit: 10}) _, err = f.readMetaDataForPath(ctx, f.dirPath(""), &api.MetadataRequestOptions{Limit: 10})
if err != nil { if err != nil {

View File

@@ -75,7 +75,6 @@ type Prop struct {
Size int64 `xml:"DAV: prop>getcontentlength,omitempty"` Size int64 `xml:"DAV: prop>getcontentlength,omitempty"`
Modified Time `xml:"DAV: prop>getlastmodified,omitempty"` Modified Time `xml:"DAV: prop>getlastmodified,omitempty"`
Checksums []string `xml:"prop>checksums>checksum,omitempty"` Checksums []string `xml:"prop>checksums>checksum,omitempty"`
MESha1Hex *string `xml:"ME: prop>sha1hex,omitempty"` // Fastmail-specific sha1 checksum
} }
// Parse a status of the form "HTTP/1.1 200 OK" or "HTTP/1.1 200" // Parse a status of the form "HTTP/1.1 200 OK" or "HTTP/1.1 200"
@@ -103,27 +102,22 @@ func (p *Prop) StatusOK() bool {
// Hashes returns a map of all checksums - may be nil // Hashes returns a map of all checksums - may be nil
func (p *Prop) Hashes() (hashes map[hash.Type]string) { func (p *Prop) Hashes() (hashes map[hash.Type]string) {
if len(p.Checksums) > 0 { if len(p.Checksums) == 0 {
hashes = make(map[hash.Type]string)
for _, checksums := range p.Checksums {
checksums = strings.ToLower(checksums)
for _, checksum := range strings.Split(checksums, " ") {
switch {
case strings.HasPrefix(checksum, "sha1:"):
hashes[hash.SHA1] = checksum[5:]
case strings.HasPrefix(checksum, "md5:"):
hashes[hash.MD5] = checksum[4:]
}
}
}
return hashes
} else if p.MESha1Hex != nil {
hashes = make(map[hash.Type]string)
hashes[hash.SHA1] = *p.MESha1Hex
return hashes
} else {
return nil return nil
} }
hashes = make(map[hash.Type]string)
for _, checksums := range p.Checksums {
checksums = strings.ToLower(checksums)
for _, checksum := range strings.Split(checksums, " ") {
switch {
case strings.HasPrefix(checksum, "sha1:"):
hashes[hash.SHA1] = checksum[5:]
case strings.HasPrefix(checksum, "md5:"):
hashes[hash.MD5] = checksum[4:]
}
}
}
return hashes
} }
// PropValue is a tagged name and value // PropValue is a tagged name and value

View File

@@ -1,215 +0,0 @@
package webdav
/*
chunked update for Nextcloud
see https://docs.nextcloud.com/server/20/developer_manual/client_apis/WebDAV/chunking.html
*/
import (
"context"
"crypto/md5"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"path"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
)
func (f *Fs) shouldRetryChunkMerge(ctx context.Context, resp *http.Response, err error) (bool, error) {
// Not found. Can be returned by NextCloud when merging chunks of an upload.
if resp != nil && resp.StatusCode == 404 {
return true, err
}
// 423 LOCKED
if resp != nil && resp.StatusCode == 423 {
return false, fmt.Errorf("merging the uploaded chunks failed with 423 LOCKED. This usually happens when the chunks merging is still in progress on NextCloud, but it may also indicate a failed transfer: %w", err)
}
return f.shouldRetry(ctx, resp, err)
}
// set the chunk size for testing
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
return
}
func (f *Fs) getChunksUploadURL() string {
return strings.Replace(f.endpointURL, "/dav/files/", "/dav/uploads/", 1)
}
func (o *Object) getChunksUploadDir() (string, error) {
hasher := md5.New()
_, err := hasher.Write([]byte(o.filePath()))
if err != nil {
return "", fmt.Errorf("chunked upload couldn't hash URL: %w", err)
}
uploadDir := "rclone-chunked-upload-" + hex.EncodeToString(hasher.Sum(nil))
return uploadDir, nil
}
func (f *Fs) verifyChunkConfig() error {
if f.opt.ChunkSize != 0 && !validateNextCloudChunkedURL.MatchString(f.endpointURL) {
return errors.New("chunked upload with nextcloud must use /dav/files/USER endpoint not /webdav")
}
return nil
}
func (o *Object) shouldUseChunkedUpload(src fs.ObjectInfo) bool {
return o.fs.canChunk && o.fs.opt.ChunkSize > 0 && src.Size() > int64(o.fs.opt.ChunkSize)
}
func (o *Object) updateChunked(ctx context.Context, in0 io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
var uploadDir string
// see https://docs.nextcloud.com/server/24/developer_manual/client_apis/WebDAV/chunking.html#starting-a-chunked-upload
uploadDir, err = o.createChunksUploadDirectory(ctx)
if err != nil {
return err
}
partObj := &Object{
fs: o.fs,
}
// see https://docs.nextcloud.com/server/24/developer_manual/client_apis/WebDAV/chunking.html#uploading-chunks
err = o.uploadChunks(ctx, in0, src.Size(), partObj, uploadDir, options)
if err != nil {
return err
}
// see https://docs.nextcloud.com/server/24/developer_manual/client_apis/WebDAV/chunking.html#assembling-the-chunks
err = o.mergeChunks(ctx, uploadDir, options, src)
if err != nil {
return err
}
return nil
}
func (o *Object) uploadChunks(ctx context.Context, in0 io.Reader, size int64, partObj *Object, uploadDir string, options []fs.OpenOption) error {
chunkSize := int64(partObj.fs.opt.ChunkSize)
// TODO: upload chunks in parallel for faster transfer speeds
for offset := int64(0); offset < size; offset += chunkSize {
if err := ctx.Err(); err != nil {
return err
}
contentLength := chunkSize
// Last chunk may be smaller
if size-offset < contentLength {
contentLength = size - offset
}
endOffset := offset + contentLength - 1
partObj.remote = fmt.Sprintf("%s/%015d-%015d", uploadDir, offset, endOffset)
// Enable low-level HTTP 2 retries.
// 2022-04-28 15:59:06 ERROR : stuff/video.avi: Failed to copy: uploading chunk failed: Put "https://censored.com/remote.php/dav/uploads/Admin/rclone-chunked-upload-censored/000006113198080-000006123683840": http2: Transport: cannot retry err [http2: Transport received Server's graceful shutdown GOAWAY] after Request.Body was written; define Request.GetBody to avoid this error
buf := make([]byte, chunkSize)
in := readers.NewRepeatableLimitReaderBuffer(in0, buf, chunkSize)
getBody := func() (io.ReadCloser, error) {
// RepeatableReader{} plays well with accounting so rewinding doesn't make the progress buggy
if _, err := in.Seek(0, io.SeekStart); err == nil {
return nil, err
}
return io.NopCloser(in), nil
}
err := partObj.updateSimple(ctx, in, getBody, partObj.remote, contentLength, "application/x-www-form-urlencoded", nil, o.fs.chunksUploadURL, options...)
if err != nil {
return fmt.Errorf("uploading chunk failed: %w", err)
}
}
return nil
}
func (o *Object) createChunksUploadDirectory(ctx context.Context) (string, error) {
uploadDir, err := o.getChunksUploadDir()
if err != nil {
return uploadDir, err
}
err = o.purgeUploadedChunks(ctx, uploadDir)
if err != nil {
return "", fmt.Errorf("chunked upload couldn't purge upload directory: %w", err)
}
opts := rest.Opts{
Method: "MKCOL",
Path: uploadDir + "/",
NoResponse: true,
RootURL: o.fs.chunksUploadURL,
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err := o.fs.srv.Call(ctx, &opts)
return o.fs.shouldRetry(ctx, resp, err)
})
if err != nil {
return "", fmt.Errorf("making upload directory failed: %w", err)
}
return uploadDir, err
}
func (o *Object) mergeChunks(ctx context.Context, uploadDir string, options []fs.OpenOption, src fs.ObjectInfo) error {
var resp *http.Response
// see https://docs.nextcloud.com/server/24/developer_manual/client_apis/WebDAV/chunking.html?highlight=chunk#assembling-the-chunks
opts := rest.Opts{
Method: "MOVE",
Path: path.Join(uploadDir, ".file"),
NoResponse: true,
Options: options,
RootURL: o.fs.chunksUploadURL,
}
destinationURL, err := rest.URLJoin(o.fs.endpoint, o.filePath())
if err != nil {
return fmt.Errorf("finalize chunked upload couldn't join URL: %w", err)
}
opts.ExtraHeaders = o.extraHeaders(ctx, src)
opts.ExtraHeaders["Destination"] = destinationURL.String()
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return o.fs.shouldRetryChunkMerge(ctx, resp, err)
})
if err != nil {
return fmt.Errorf("finalize chunked upload failed, destinationURL: \"%s\": %w", destinationURL, err)
}
return err
}
func (o *Object) purgeUploadedChunks(ctx context.Context, uploadDir string) error {
// clean the upload directory if it exists (this means that a previous try didn't clean up properly).
opts := rest.Opts{
Method: "DELETE",
Path: uploadDir + "/",
NoResponse: true,
RootURL: o.fs.chunksUploadURL,
}
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil)
// directory doesn't exist, no need to purge
if resp.StatusCode == http.StatusNotFound {
return false, nil
}
return o.fs.shouldRetry(ctx, resp, err)
})
return err
}

View File

@@ -19,7 +19,6 @@ import (
"net/url" "net/url"
"os/exec" "os/exec"
"path" "path"
"regexp"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@@ -43,7 +42,7 @@ import (
) )
const ( const (
minSleep = fs.Duration(10 * time.Millisecond) minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential decayConstant = 2 // bigger for slower decay, exponential
defaultDepth = "1" // depth for PROPFIND defaultDepth = "1" // depth for PROPFIND
@@ -77,9 +76,6 @@ func init() {
Name: "vendor", Name: "vendor",
Help: "Name of the WebDAV site/service/software you are using.", Help: "Name of the WebDAV site/service/software you are using.",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "fastmail",
Help: "Fastmail Files",
}, {
Value: "nextcloud", Value: "nextcloud",
Help: "Nextcloud", Help: "Nextcloud",
}, { }, {
@@ -128,22 +124,6 @@ You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'
`, `,
Default: fs.CommaSepList{}, Default: fs.CommaSepList{},
Advanced: true, Advanced: true,
}, {
Name: "pacer_min_sleep",
Help: "Minimum time to sleep between API calls.",
Default: minSleep,
Advanced: true,
}, {
Name: "nextcloud_chunk_size",
Help: `Nextcloud upload chunk size.
We recommend configuring your NextCloud instance to increase the max chunk size to 1 GB for better upload performances.
See https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/big_file_upload_configuration.html#adjust-chunk-size-on-nextcloud-side
Set to 0 to disable chunked uploading.
`,
Advanced: true,
Default: 10 * fs.Mebi, // Default NextCloud `max_chunk_size` is `10 MiB`. See https://github.com/nextcloud/server/blob/0447b53bda9fe95ea0cbed765aa332584605d652/apps/files/lib/App.php#L57
}}, }},
}) })
} }
@@ -158,8 +138,6 @@ type Options struct {
BearerTokenCommand string `config:"bearer_token_command"` BearerTokenCommand string `config:"bearer_token_command"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
Headers fs.CommaSepList `config:"headers"` Headers fs.CommaSepList `config:"headers"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
ChunkSize fs.SizeSuffix `config:"nextcloud_chunk_size"`
} }
// Fs represents a remote webdav // Fs represents a remote webdav
@@ -177,12 +155,9 @@ type Fs struct {
useOCMtime bool // set if can use X-OC-Mtime useOCMtime bool // set if can use X-OC-Mtime
retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default) retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default)
checkBeforePurge bool // enables extra check that directory to purge really exists checkBeforePurge bool // enables extra check that directory to purge really exists
hasOCMD5 bool // set if can use owncloud style checksums for MD5 hasMD5 bool // set if can use owncloud style checksums for MD5
hasOCSHA1 bool // set if can use owncloud style checksums for SHA1 hasSHA1 bool // set if can use owncloud style checksums for SHA1
hasMESHA1 bool // set if can use fastmail style checksums for SHA1
ntlmAuthMu sync.Mutex // mutex to serialize NTLM auth roundtrips ntlmAuthMu sync.Mutex // mutex to serialize NTLM auth roundtrips
chunksUploadURL string // upload URL for nextcloud chunked
canChunk bool // set if nextcloud and nextcloud_chunk_size is set
} }
// Object describes a webdav object // Object describes a webdav object
@@ -303,7 +278,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string)
}, },
NoRedirect: true, NoRedirect: true,
} }
if f.hasOCMD5 || f.hasOCSHA1 { if f.hasMD5 || f.hasSHA1 {
opts.Body = bytes.NewBuffer(owncloudProps) opts.Body = bytes.NewBuffer(owncloudProps)
} }
var result api.Multistatus var result api.Multistatus
@@ -436,7 +411,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
opt: *opt, opt: *opt,
endpoint: u, endpoint: u,
endpointURL: u.String(), endpointURL: u.String(),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
precision: fs.ModTimeNotSupported, precision: fs.ModTimeNotSupported,
} }
@@ -568,32 +543,19 @@ func (f *Fs) fetchAndSetBearerToken() error {
return nil return nil
} }
var validateNextCloudChunkedURL = regexp.MustCompile(`^.*/dav/files/[^/]+/?$`)
// setQuirks adjusts the Fs for the vendor passed in // setQuirks adjusts the Fs for the vendor passed in
func (f *Fs) setQuirks(ctx context.Context, vendor string) error { func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
switch vendor { switch vendor {
case "fastmail":
f.canStream = true
f.precision = time.Second
f.useOCMtime = true
f.hasMESHA1 = true
case "owncloud": case "owncloud":
f.canStream = true f.canStream = true
f.precision = time.Second f.precision = time.Second
f.useOCMtime = true f.useOCMtime = true
f.hasOCMD5 = true f.hasMD5 = true
f.hasOCSHA1 = true f.hasSHA1 = true
case "nextcloud": case "nextcloud":
f.precision = time.Second f.precision = time.Second
f.useOCMtime = true f.useOCMtime = true
f.hasOCSHA1 = true f.hasSHA1 = true
f.canChunk = true
if err := f.verifyChunkConfig(); err != nil {
return err
}
f.chunksUploadURL = f.getChunksUploadURL()
fs.Logf(nil, "Chunks temporary upload directory: %s", f.chunksUploadURL)
case "sharepoint": case "sharepoint":
// To mount sharepoint, two Cookies are required // To mount sharepoint, two Cookies are required
// They have to be set instead of BasicAuth // They have to be set instead of BasicAuth
@@ -705,7 +667,7 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
"Depth": depth, "Depth": depth,
}, },
} }
if f.hasOCMD5 || f.hasOCSHA1 { if f.hasMD5 || f.hasSHA1 {
opts.Body = bytes.NewBuffer(owncloudProps) opts.Body = bytes.NewBuffer(owncloudProps)
} }
var result api.Multistatus var result api.Multistatus
@@ -1034,7 +996,7 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
dstPath := f.filePath(remote) dstPath := f.filePath(remote)
err := f.mkParentDir(ctx, dstPath) err := f.mkParentDir(ctx, dstPath)
if err != nil { if err != nil {
return nil, fmt.Errorf("copy mkParentDir failed: %w", err) return nil, fmt.Errorf("Copy mkParentDir failed: %w", err)
} }
destinationURL, err := rest.URLJoin(f.endpoint, dstPath) destinationURL, err := rest.URLJoin(f.endpoint, dstPath)
if err != nil { if err != nil {
@@ -1059,11 +1021,11 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
return srcFs.shouldRetry(ctx, resp, err) return srcFs.shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, fmt.Errorf("copy call failed: %w", err) return nil, fmt.Errorf("Copy call failed: %w", err)
} }
dstObj, err := f.NewObject(ctx, remote) dstObj, err := f.NewObject(ctx, remote)
if err != nil { if err != nil {
return nil, fmt.Errorf("copy NewObject failed: %w", err) return nil, fmt.Errorf("Copy NewObject failed: %w", err)
} }
return dstObj, nil return dstObj, nil
} }
@@ -1164,10 +1126,10 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// Hashes returns the supported hash sets. // Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set { func (f *Fs) Hashes() hash.Set {
hashes := hash.Set(hash.None) hashes := hash.Set(hash.None)
if f.hasOCMD5 { if f.hasMD5 {
hashes.Add(hash.MD5) hashes.Add(hash.MD5)
} }
if f.hasOCSHA1 || f.hasMESHA1 { if f.hasSHA1 {
hashes.Add(hash.SHA1) hashes.Add(hash.SHA1)
} }
return hashes return hashes
@@ -1235,10 +1197,10 @@ func (o *Object) Remote() string {
// Hash returns the SHA1 or MD5 of an object returning a lowercase hex string // Hash returns the SHA1 or MD5 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t == hash.MD5 && o.fs.hasOCMD5 { if t == hash.MD5 && o.fs.hasMD5 {
return o.md5, nil return o.md5, nil
} }
if t == hash.SHA1 && (o.fs.hasOCSHA1 || o.fs.hasMESHA1) { if t == hash.SHA1 && o.fs.hasSHA1 {
return o.sha1, nil return o.sha1, nil
} }
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
@@ -1260,12 +1222,12 @@ func (o *Object) setMetaData(info *api.Prop) (err error) {
o.hasMetaData = true o.hasMetaData = true
o.size = info.Size o.size = info.Size
o.modTime = time.Time(info.Modified) o.modTime = time.Time(info.Modified)
if o.fs.hasOCMD5 || o.fs.hasOCSHA1 || o.fs.hasMESHA1 { if o.fs.hasMD5 || o.fs.hasSHA1 {
hashes := info.Hashes() hashes := info.Hashes()
if o.fs.hasOCSHA1 || o.fs.hasMESHA1 { if o.fs.hasSHA1 {
o.sha1 = hashes[hash.SHA1] o.sha1 = hashes[hash.SHA1]
} }
if o.fs.hasOCMD5 { if o.fs.hasMD5 {
o.md5 = hashes[hash.MD5] o.md5 = hashes[hash.MD5]
} }
} }
@@ -1342,72 +1304,36 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return fmt.Errorf("Update mkParentDir failed: %w", err) return fmt.Errorf("Update mkParentDir failed: %w", err)
} }
if o.shouldUseChunkedUpload(src) { size := src.Size()
fs.Debugf(src, "Update will use the chunked upload strategy") var resp *http.Response
err = o.updateChunked(ctx, in, src, options...) opts := rest.Opts{
if err != nil { Method: "PUT",
return err Path: o.filePath(),
} Body: in,
} else { NoResponse: true,
fs.Debugf(src, "Update will use the normal upload strategy (no chunks)") ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
contentType := fs.MimeType(ctx, src) ContentType: fs.MimeType(ctx, src),
filePath := o.filePath() Options: options,
extraHeaders := o.extraHeaders(ctx, src)
// TODO: define getBody() to enable low-level HTTP/2 retries
err = o.updateSimple(ctx, in, nil, filePath, src.Size(), contentType, extraHeaders, o.fs.endpointURL, options...)
if err != nil {
return err
}
} }
if o.fs.useOCMtime || o.fs.hasMD5 || o.fs.hasSHA1 {
// read metadata from remote opts.ExtraHeaders = map[string]string{}
o.hasMetaData = false
return o.readMetaData(ctx)
}
func (o *Object) extraHeaders(ctx context.Context, src fs.ObjectInfo) map[string]string {
extraHeaders := map[string]string{}
if o.fs.useOCMtime || o.fs.hasOCMD5 || o.fs.hasOCSHA1 {
if o.fs.useOCMtime { if o.fs.useOCMtime {
extraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix()) opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix())
} }
// Set one upload checksum // Set one upload checksum
// Owncloud uses one checksum only to check the upload and stores its own SHA1 and MD5 // Owncloud uses one checksum only to check the upload and stores its own SHA1 and MD5
// Nextcloud stores the checksum you supply (SHA1 or MD5) but only stores one // Nextcloud stores the checksum you supply (SHA1 or MD5) but only stores one
if o.fs.hasOCSHA1 { if o.fs.hasSHA1 {
if sha1, _ := src.Hash(ctx, hash.SHA1); sha1 != "" { if sha1, _ := src.Hash(ctx, hash.SHA1); sha1 != "" {
extraHeaders["OC-Checksum"] = "SHA1:" + sha1 opts.ExtraHeaders["OC-Checksum"] = "SHA1:" + sha1
} }
} }
if o.fs.hasOCMD5 && extraHeaders["OC-Checksum"] == "" { if o.fs.hasMD5 && opts.ExtraHeaders["OC-Checksum"] == "" {
if md5, _ := src.Hash(ctx, hash.MD5); md5 != "" { if md5, _ := src.Hash(ctx, hash.MD5); md5 != "" {
extraHeaders["OC-Checksum"] = "MD5:" + md5 opts.ExtraHeaders["OC-Checksum"] = "MD5:" + md5
} }
} }
} }
return extraHeaders
}
// Standard update in one request (no chunks)
func (o *Object) updateSimple(ctx context.Context, body io.Reader, getBody func() (io.ReadCloser, error), filePath string, size int64, contentType string, extraHeaders map[string]string, rootURL string, options ...fs.OpenOption) (err error) {
var resp *http.Response
if extraHeaders == nil {
extraHeaders = map[string]string{}
}
opts := rest.Opts{
Method: "PUT",
Path: filePath,
GetBody: getBody,
Body: body,
NoResponse: true,
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
ContentType: contentType,
Options: options,
ExtraHeaders: extraHeaders,
RootURL: rootURL,
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) { err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts) resp, err = o.fs.srv.Call(ctx, &opts)
return o.fs.shouldRetry(ctx, resp, err) return o.fs.shouldRetry(ctx, resp, err)
@@ -1423,8 +1349,9 @@ func (o *Object) updateSimple(ctx context.Context, body io.Reader, getBody func(
_ = o.Remove(ctx) _ = o.Remove(ctx)
return err return err
} }
return nil // read metadata from remote
o.hasMetaData = false
return o.readMetaData(ctx)
} }
// Remove an object // Remove an object

View File

@@ -1,10 +1,10 @@
// Test Webdav filesystem interface // Test Webdav filesystem interface
package webdav package webdav_test
import ( import (
"testing" "testing"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/backend/webdav"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
@@ -13,10 +13,7 @@ import (
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestWebdavNextcloud:", RemoteName: "TestWebdavNextcloud:",
NilObject: (*Object)(nil), NilObject: (*webdav.Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: 1 * fs.Mebi,
},
}) })
} }
@@ -27,10 +24,7 @@ func TestIntegration2(t *testing.T) {
} }
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestWebdavOwncloud:", RemoteName: "TestWebdavOwncloud:",
NilObject: (*Object)(nil), NilObject: (*webdav.Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
Skip: true,
},
}) })
} }
@@ -41,10 +35,7 @@ func TestIntegration3(t *testing.T) {
} }
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestWebdavRclone:", RemoteName: "TestWebdavRclone:",
NilObject: (*Object)(nil), NilObject: (*webdav.Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
Skip: true,
},
}) })
} }
@@ -55,10 +46,6 @@ func TestIntegration4(t *testing.T) {
} }
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestWebdavNTLM:", RemoteName: "TestWebdavNTLM:",
NilObject: (*Object)(nil), NilObject: (*webdav.Object)(nil),
}) })
} }
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}

View File

@@ -331,6 +331,15 @@ func parsePath(path string) (root string) {
return return
} }
func (f *Fs) splitPath(remote string) (directory, leaf string) {
directory, leaf = dircache.SplitPath(remote)
if f.root != "" {
// Adds the root folder to the path to get a full path
directory = path.Join(f.root, directory)
}
return
}
// readMetaDataForPath reads the metadata from the path // readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) { func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err) // defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)

View File

@@ -225,7 +225,7 @@ func buildWindowsResourceSyso(goarch string, versionTag string) string {
"StringFileInfo": M{ "StringFileInfo": M{
"CompanyName": "https://rclone.org", "CompanyName": "https://rclone.org",
"ProductName": "Rclone", "ProductName": "Rclone",
"FileDescription": "Rclone", "FileDescription": "Rsync for cloud storage",
"InternalName": "rclone", "InternalName": "rclone",
"OriginalFilename": "rclone.exe", "OriginalFilename": "rclone.exe",
"LegalCopyright": "The Rclone Authors", "LegalCopyright": "The Rclone Authors",

View File

@@ -64,7 +64,6 @@ docs = [
"sia.md", "sia.md",
"swift.md", "swift.md",
"pcloud.md", "pcloud.md",
"pikpak.md",
"premiumizeme.md", "premiumizeme.md",
"putio.md", "putio.md",
"seafile.md", "seafile.md",

View File

@@ -26,8 +26,7 @@ echo "Making release ${VERSION} anchor ${ANCHOR} to repo ${REPO}"
gh release create "${VERSION}" \ gh release create "${VERSION}" \
--repo ${REPO} \ --repo ${REPO} \
--title "rclone ${VERSION}" \ --title "rclone ${VERSION}" \
--notes-file "/tmp/${VERSION}-release-notes" \ --notes-file "/tmp/${VERSION}-release-notes"
--draft=true
for build in build/*; do for build in build/*; do
case $build in case $build in
@@ -41,10 +40,6 @@ for build in build/*; do
"${build}" "${build}"
done done
gh release edit "${VERSION}" \
--repo ${REPO} \
--draft=false
gh release view "${VERSION}" \ gh release view "${VERSION}" \
--repo ${REPO} --repo ${REPO}

View File

@@ -128,6 +128,7 @@ var commandDefinition = &cobra.Command{
ctx := context.Background() ctx := context.Background()
opt := Opt opt := Opt
opt.applyContext(ctx) opt.applyContext(ctx)
if tzLocal { if tzLocal {
TZ = time.Local TZ = time.Local
} }

View File

@@ -34,7 +34,7 @@ func rcBisync(ctx context.Context, in rc.Params) (out rc.Params, err error) {
if maxDelete < 0 || maxDelete > 100 { if maxDelete < 0 || maxDelete > 100 {
return nil, rc.NewErrParamInvalid(errors.New("maxDelete must be a percentage between 0 and 100")) return nil, rc.NewErrParamInvalid(errors.New("maxDelete must be a percentage between 0 and 100"))
} }
opt.MaxDelete = int(maxDelete) ci.MaxDelete = maxDelete
} else if rc.NotErrParamNotFound(err) { } else if rc.NotErrParamNotFound(err) {
return nil, err return nil, err
} }

View File

@@ -3,8 +3,8 @@ test check-access-filters
# NOTE: Include Other tests may result in listing diffs due to rclone processing order change. False fail. # NOTE: Include Other tests may result in listing diffs due to rclone processing order change. False fail.
# #
# Tests are done in two phases: # Tests are done in two phases:
# - EXCLUDE OTHER tests check that RCLONE_TEST files are only found in the explicitly included directories # - EXCLUDE OTHER tests check that RCLONE_TEST files are only found in the explicity included directories
# - INCLUDE OTHER tesss check that RCLONE_TEST files are found in all directories not explicitly excluded # - INCLUDE OTHER tesss check that RCLONE_TEST files are found in all directories not explicity excluded
# #
# Each phase checks that: # Each phase checks that:
# - missing RCLONE_TEST files in don't care directories don't cause failures # - missing RCLONE_TEST files in don't care directories don't cause failures

View File

@@ -72,9 +72,6 @@ you what happened to it. These are reminiscent of diff files.
- |+ path| means path was missing on the destination, so only in the source - |+ path| means path was missing on the destination, so only in the source
- |* path| means path was present in source and destination but different. - |* path| means path was present in source and destination but different.
- |! path| means there was an error reading or hashing the source or dest. - |! path| means there was an error reading or hashing the source or dest.
The default number of parallel checks is 8. See the [--checkers=N](/docs/#checkers-n)
option for more information.
`, "|", "`") `, "|", "`")
// GetCheckOpt gets the options corresponding to the check flags // GetCheckOpt gets the options corresponding to the check flags
@@ -145,7 +142,7 @@ match. It doesn't alter the source or destination.
For the [crypt](/crypt/) remote there is a dedicated command, For the [crypt](/crypt/) remote there is a dedicated command,
[cryptcheck](/commands/rclone_cryptcheck/), that are able to check [cryptcheck](/commands/rclone_cryptcheck/), that are able to check
the checksums of the encrypted files. the checksums of the crypted files.
If you supply the |--size-only| flag, it will only compare the sizes not If you supply the |--size-only| flag, it will only compare the sizes not
the hashes as well. Use this for a quick check. the hashes as well. Use this for a quick check.

View File

@@ -160,11 +160,7 @@ func mount(VFS *vfs.VFS, mountPath string, opt *mountlib.Options) (<-chan error,
fsys := NewFS(VFS) fsys := NewFS(VFS)
host := fuse.NewFileSystemHost(fsys) host := fuse.NewFileSystemHost(fsys)
host.SetCapReaddirPlus(true) // only works on Windows host.SetCapReaddirPlus(true) // only works on Windows
if opt.CaseInsensitive.Valid { host.SetCapCaseInsensitive(f.Features().CaseInsensitive)
host.SetCapCaseInsensitive(opt.CaseInsensitive.Value)
} else {
host.SetCapCaseInsensitive(f.Features().CaseInsensitive)
}
// Create options // Create options
options := mountOptions(VFS, opt.DeviceName, mountpoint, opt) options := mountOptions(VFS, opt.DeviceName, mountpoint, opt)

View File

@@ -28,7 +28,7 @@ func init() {
// returns an error, and an error channel for the serve process to // returns an error, and an error channel for the serve process to
// report an error when fusermount is called. // report an error when fusermount is called.
func mount(_ *vfs.VFS, _ string, _ *mountlib.Options) (<-chan error, func() error, error) { func mount(_ *vfs.VFS, _ string, _ *mountlib.Options) (<-chan error, func() error, error) {
return nil, nil, errors.New("rclone mount is not supported on MacOS when rclone is installed via Homebrew. " + return nil, nil, errors.New("mount is not supported on MacOS when installed via Homebrew. " +
"Please install the rclone binaries available at https://rclone.org/downloads/ " + "Please install the binaries available at https://rclone." +
"instead if you want to use the rclone mount command") "org/downloads/ instead if you want to use the mount command")
} }

View File

@@ -26,5 +26,6 @@ func getMountpoint(f fs.Fs, mountPath string, opt *mountlib.Options) (string, er
if err = mountlib.CheckAllowNonEmpty(mountPath, opt); err != nil { if err = mountlib.CheckAllowNonEmpty(mountPath, opt); err != nil {
return "", err return "", err
} }
opt.VolumeName = mountlib.MakeVolumeNameValidOnUnix(opt.VolumeName)
return mountPath, nil return mountPath, nil
} }

View File

@@ -9,9 +9,11 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"regexp" "regexp"
"strings"
"github.com/rclone/rclone/cmd/mountlib" "github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/file" "github.com/rclone/rclone/lib/file"
) )
@@ -19,10 +21,13 @@ var isDriveRegex = regexp.MustCompile(`^[a-zA-Z]\:$`)
var isDriveRootPathRegex = regexp.MustCompile(`^[a-zA-Z]\:\\$`) var isDriveRootPathRegex = regexp.MustCompile(`^[a-zA-Z]\:\\$`)
var isDriveOrRootPathRegex = regexp.MustCompile(`^[a-zA-Z]\:\\?$`) var isDriveOrRootPathRegex = regexp.MustCompile(`^[a-zA-Z]\:\\?$`)
var isNetworkSharePathRegex = regexp.MustCompile(`^\\\\[^\\\?]+\\[^\\]`) var isNetworkSharePathRegex = regexp.MustCompile(`^\\\\[^\\\?]+\\[^\\]`)
var isAnyPathSeparatorRegex = regexp.MustCompile(`[/\\]+`) // Matches any path separators, slash or backslash, or sequences of them
// isNetworkSharePath returns true if the given string is a valid network share path, // isNetworkSharePath returns true if the given string is a network share path,
// in the basic UNC format "\\Server\Share\Path", where the first two path components // in the basic UNC format "\\Server\Share\Path". The first two path components
// are required ("\\Server\Share", which represents the volume). // are required ("\\Server\Share"), and represents the volume. The rest of the
// string can be anything, i.e. can be a nested path ("\\Server\Share\Path\Path\Path").
// Actual validity of the path, e.g. if it contains invalid characters, is not considered.
// Extended-length UNC format "\\?\UNC\Server\Share\Path" is not considered, as it is // Extended-length UNC format "\\?\UNC\Server\Share\Path" is not considered, as it is
// not supported by cgofuse/winfsp, so returns false for any paths with prefix "\\?\". // not supported by cgofuse/winfsp, so returns false for any paths with prefix "\\?\".
// Note: There is a UNCPath function in lib/file, but it refers to any extended-length // Note: There is a UNCPath function in lib/file, but it refers to any extended-length
@@ -111,7 +116,7 @@ func handleLocalMountpath(f fs.Fs, mountpath string, opt *mountlib.Options) (str
// Drive letter string can be used as is, since we have already checked it does not exist, // Drive letter string can be used as is, since we have already checked it does not exist,
// but directory path needs more checks. // but directory path needs more checks.
if opt.NetworkMode { if opt.NetworkMode {
fs.Errorf(nil, "Ignoring --network-mode as it is not supported with directory mountpoint") fs.Debugf(nil, "Ignoring --network-mode as it is not supported with directory mountpoint")
opt.NetworkMode = false opt.NetworkMode = false
} }
var err error var err error
@@ -132,30 +137,47 @@ func handleLocalMountpath(f fs.Fs, mountpath string, opt *mountlib.Options) (str
return mountpath, nil return mountpath, nil
} }
// networkSharePathEncoder is an encoder used to make strings valid as (part of) Windows network share UNC paths
const networkSharePathEncoder = (encoder.EncodeZero | // NUL(0x00)
encoder.EncodeCtl | // CTRL(0x01-0x1F)
encoder.EncodeDel | // DEL(0x7F)
encoder.EncodeWin | // :?"*<>|
encoder.EncodeInvalidUtf8) // Also encode invalid UTF-8 bytes as Go can't convert them to UTF-16.
// encodeNetworkSharePath makes a string valid to use as (part of) a Windows network share UNC path.
// Using backslash as path separator here, but forward slashes would also be treated as
// path separators by the library, and therefore does not encode either of them. For convenience,
// normalizes to backslashes-only. UNC paths always start with two path separators, but WinFsp
// requires volume prefix as UNC-like path but with only a single backslash prefix, and multiple
// separators are not valid in any other parts of network share paths, so therefore (unlike what
// filepath.FromSlash would do) replaces multiple separators with a single one (like filpath.Clean
// would do, but it does also more). A trailing path separator would just be ignored, but we
// remove it here as well for convenience.
func encodeNetworkSharePath(volumeName string) string {
return networkSharePathEncoder.Encode(strings.TrimRight(isAnyPathSeparatorRegex.ReplaceAllString(volumeName, `\`), `\`))
}
// handleVolumeName handles the volume name option. // handleVolumeName handles the volume name option.
func handleVolumeName(opt *mountlib.Options, volumeName string) { func handleVolumeName(opt *mountlib.Options) {
// If volumeName parameter is set, then just set that into options replacing any existing value. // Ensure the volume name option is a valid network share UNC path if network mode,
// Else, ensure the volume name option is a valid network share UNC path if network mode,
// and ensure network mode if configured volume name is already UNC path. // and ensure network mode if configured volume name is already UNC path.
if volumeName != "" { if opt.VolumeName != "" { // Should always be true due to code in mountlib caller
opt.VolumeName = volumeName
} else if opt.VolumeName != "" { // Should always be true due to code in mountlib caller
// Use value of given volume name option, but check if it is disk volume name or network volume prefix // Use value of given volume name option, but check if it is disk volume name or network volume prefix
if isNetworkSharePath(opt.VolumeName) { if isNetworkSharePath(opt.VolumeName) {
// Specified volume name is network share UNC path, assume network mode and use it as volume prefix // Specified volume name is network share UNC path, assume network mode and use it as volume prefix
opt.VolumeName = opt.VolumeName[1:] // WinFsp requires volume prefix as UNC-like path but with only a single backslash opt.VolumeName = encodeNetworkSharePath(opt.VolumeName[1:]) // We know from isNetworkSharePath it has a duplicate path separator prefix, so removes that right away (but encodeNetworkSharePath would remove it also)
if !opt.NetworkMode { if !opt.NetworkMode {
// Specified volume name is network share UNC path, force network mode and use it as volume prefix // Specified volume name is network share UNC path, force network mode and use it as volume prefix
fs.Debugf(nil, "Forcing network mode due to network share (UNC) volume name") fs.Debugf(nil, "Forcing network mode due to network share (UNC) volume name")
opt.NetworkMode = true opt.NetworkMode = true
} }
} else if opt.NetworkMode { } else if opt.NetworkMode {
// Plain volume name treated as share name in network mode, append to hard coded "\\server" prefix to get full volume prefix. // Specified volume name is not a valid network share UNC path, but network mode is enabled, so append to a hard coded server prefix and use it as volume prefix
opt.VolumeName = "\\server\\" + opt.VolumeName opt.VolumeName = `\server\` + strings.TrimLeft(encodeNetworkSharePath(opt.VolumeName), `\`)
} }
} else if opt.NetworkMode { } else if opt.NetworkMode {
// Hard coded default // Use hard coded default
opt.VolumeName = "\\server\\share" opt.VolumeName = `\server\share`
} }
} }
@@ -174,22 +196,27 @@ func getMountpoint(f fs.Fs, mountpath string, opt *mountlib.Options) (mountpoint
} }
// Handle mountpath // Handle mountpath
var volumeName string
if isDefaultPath(mountpath) { if isDefaultPath(mountpath) {
// Mount path indicates defaults, which will automatically pick an unused drive letter. // Mount path indicates defaults, which will automatically pick an unused drive letter.
mountpoint, err = handleDefaultMountpath() if mountpoint, err = handleDefaultMountpath(); err != nil {
return
}
} else if isNetworkSharePath(mountpath) { } else if isNetworkSharePath(mountpath) {
// Mount path is a valid network share path (UNC format, "\\Server\Share" prefix). // Mount path is a valid network share path (UNC format, "\\Server\Share" prefix).
mountpoint, err = handleNetworkShareMountpath(mountpath, opt) if mountpoint, err = handleNetworkShareMountpath(mountpath, opt); err != nil {
// In this case the volume name is taken from the mount path, will replace any existing volume name option. return
volumeName = mountpath[1:] // WinFsp requires volume prefix as UNC-like path but with only a single backslash }
// In this case the volume name is taken from the mount path, it replaces any existing volume name option.
opt.VolumeName = mountpath
} else { } else {
// Mount path is drive letter or directory path. // Mount path is drive letter or directory path.
mountpoint, err = handleLocalMountpath(f, mountpath, opt) if mountpoint, err = handleLocalMountpath(f, mountpath, opt); err != nil {
return
}
} }
// Handle volume name // Handle volume name
handleVolumeName(opt, volumeName) handleVolumeName(opt)
// Done, return mountpoint to be used, together with updated mount options. // Done, return mountpoint to be used, together with updated mount options.
if opt.NetworkMode { if opt.NetworkMode {

View File

@@ -22,11 +22,11 @@ func init() {
var commandDefinition = &cobra.Command{ var commandDefinition = &cobra.Command{
Use: "cryptcheck remote:path cryptedremote:path", Use: "cryptcheck remote:path cryptedremote:path",
Short: `Cryptcheck checks the integrity of an encrypted remote.`, Short: `Cryptcheck checks the integrity of a crypted remote.`,
Long: ` Long: `
rclone cryptcheck checks a remote against a [crypted](/crypt/) remote. rclone cryptcheck checks a remote against a [crypted](/crypt/) remote.
This is the equivalent of running rclone [check](/commands/rclone_check/), This is the equivalent of running rclone [check](/commands/rclone_check/),
but able to check the checksums of the encrypted remote. but able to check the checksums of the crypted remote.
For it to work the underlying remote of the cryptedremote must support For it to work the underlying remote of the cryptedremote must support
some kind of checksum. some kind of checksum.
@@ -59,7 +59,7 @@ After it has run it will log the status of the encryptedremote:.
}, },
} }
// cryptCheck checks the integrity of an encrypted remote // cryptCheck checks the integrity of a crypted remote
func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error { func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error {
// Check to see fcrypt is a crypt // Check to see fcrypt is a crypt
fcrypt, ok := fdst.(*crypt.Fs) fcrypt, ok := fdst.(*crypt.Fs)

View File

@@ -250,7 +250,7 @@ func (d *Dir) Mknod(ctx context.Context, req *fuse.MknodRequest) (node fusefs.No
defer log.Trace(d, "name=%v, mode=%d, rdev=%d", req.Name, req.Mode, req.Rdev)("node=%v, err=%v", &node, &err) defer log.Trace(d, "name=%v, mode=%d, rdev=%d", req.Name, req.Mode, req.Rdev)("node=%v, err=%v", &node, &err)
if req.Rdev != 0 { if req.Rdev != 0 {
fs.Errorf(d, "Can't create device node %q", req.Name) fs.Errorf(d, "Can't create device node %q", req.Name)
return nil, fuse.Errno(syscall.EIO) return nil, fuse.EIO
} }
var cReq = fuse.CreateRequest{ var cReq = fuse.CreateRequest{
Name: req.Name, Name: req.Name,

View File

@@ -82,11 +82,11 @@ func translateError(err error) error {
case vfs.OK: case vfs.OK:
return nil return nil
case vfs.ENOENT, fs.ErrorDirNotFound, fs.ErrorObjectNotFound: case vfs.ENOENT, fs.ErrorDirNotFound, fs.ErrorObjectNotFound:
return fuse.Errno(syscall.ENOENT) return fuse.ENOENT
case vfs.EEXIST, fs.ErrorDirExists: case vfs.EEXIST, fs.ErrorDirExists:
return fuse.Errno(syscall.EEXIST) return fuse.EEXIST
case vfs.EPERM, fs.ErrorPermissionDenied: case vfs.EPERM, fs.ErrorPermissionDenied:
return fuse.Errno(syscall.EPERM) return fuse.EPERM
case vfs.ECLOSED: case vfs.ECLOSED:
return fuse.Errno(syscall.EBADF) return fuse.Errno(syscall.EBADF)
case vfs.ENOTEMPTY: case vfs.ENOTEMPTY:

View File

@@ -79,6 +79,7 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
if err := mountlib.CheckAllowNonEmpty(mountpoint, opt); err != nil { if err := mountlib.CheckAllowNonEmpty(mountpoint, opt); err != nil {
return nil, nil, err return nil, nil, err
} }
opt.VolumeName = mountlib.MakeVolumeNameValidOnUnix(opt.VolumeName)
fs.Debugf(f, "Mounting on %q", mountpoint) fs.Debugf(f, "Mounting on %q", mountpoint)
if opt.DebugFUSE { if opt.DebugFUSE {

View File

@@ -151,6 +151,7 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
if err := mountlib.CheckAllowNonEmpty(mountpoint, opt); err != nil { if err := mountlib.CheckAllowNonEmpty(mountpoint, opt); err != nil {
return nil, nil, err return nil, nil, err
} }
opt.VolumeName = mountlib.MakeVolumeNameValidOnUnix(opt.VolumeName)
fs.Debugf(f, "Mounting on %q", mountpoint) fs.Debugf(f, "Mounting on %q", mountpoint)
fsys := NewFS(VFS, opt) fsys := NewFS(VFS, opt)

View File

@@ -254,44 +254,6 @@ example above.
Note that mapping to a directory path, instead of a drive letter, Note that mapping to a directory path, instead of a drive letter,
does not suffer from the same limitations. does not suffer from the same limitations.
### Mounting on macOS
Mounting on macOS can be done either via [macFUSE](https://osxfuse.github.io/)
(also known as osxfuse) or [FUSE-T](https://www.fuse-t.org/). macFUSE is a traditional
FUSE driver utilizing a macOS kernel extension (kext). FUSE-T is an alternative FUSE system
which "mounts" via an NFSv4 local server.
#### FUSE-T Limitations, Caveats, and Notes
There are some limitations, caveats, and notes about how it works. These are current as
of FUSE-T version 1.0.14.
##### ModTime update on read
As per the [FUSE-T wiki](https://github.com/macos-fuse-t/fuse-t/wiki#caveats):
> File access and modification times cannot be set separately as it seems to be an
> issue with the NFS client which always modifies both. Can be reproduced with
> 'touch -m' and 'touch -a' commands
This means that viewing files with various tools, notably macOS Finder, will cause rlcone
to update the modification time of the file. This may make rclone upload a full new copy
of the file.
##### Unicode Normalization
Rclone includes flags for unicode normalization with macFUSE that should be updated
for FUSE-T. See [this forum post](https://forum.rclone.org/t/some-unicode-forms-break-mount-on-macos-with-fuse-t/36403)
and [FUSE-T issue #16](https://github.com/macos-fuse-t/fuse-t/issues/16). The following
flag should be added to the |rclone mount| command.
-o modules=iconv,from_code=UTF-8,to_code=UTF-8
##### Read Only mounts
When mounting with |--read-only|, attempts to write to files will fail *silently* as
opposed to with a clear warning as in macFUSE.
### Limitations ### Limitations
Without the use of |--vfs-cache-mode| this can only write files Without the use of |--vfs-cache-mode| this can only write files

View File

@@ -48,7 +48,6 @@ type Options struct {
DaemonTimeout time.Duration // OSXFUSE only DaemonTimeout time.Duration // OSXFUSE only
AsyncRead bool AsyncRead bool
NetworkMode bool // Windows only NetworkMode bool // Windows only
CaseInsensitive fs.Tristate
} }
// DefaultOpt is the default values for creating the mount // DefaultOpt is the default values for creating the mount
@@ -58,6 +57,7 @@ var DefaultOpt = Options{
NoAppleDouble: true, // use noappledouble by default NoAppleDouble: true, // use noappledouble by default
NoAppleXattr: false, // do not use noapplexattr by default NoAppleXattr: false, // do not use noapplexattr by default
AsyncRead: true, // do async reads by default AsyncRead: true, // do async reads by default
NetworkMode: true, // use network mode by default (Windows only)
} }
type ( type (
@@ -140,7 +140,6 @@ func AddFlags(flagSet *pflag.FlagSet) {
flags.FVarP(flagSet, &Opt.MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads (not supported on Windows)") flags.FVarP(flagSet, &Opt.MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads (not supported on Windows)")
flags.BoolVarP(flagSet, &Opt.WritebackCache, "write-back-cache", "", Opt.WritebackCache, "Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows)") flags.BoolVarP(flagSet, &Opt.WritebackCache, "write-back-cache", "", Opt.WritebackCache, "Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows)")
flags.StringVarP(flagSet, &Opt.DeviceName, "devname", "", Opt.DeviceName, "Set the device name - default is remote:path") flags.StringVarP(flagSet, &Opt.DeviceName, "devname", "", Opt.DeviceName, "Set the device name - default is remote:path")
flags.FVarP(flagSet, &Opt.CaseInsensitive, "mount-case-insensitive", "", "Tell the OS the mount is case insensitive (true) or sensitive (false) regardless of the backend (auto)")
// Windows and OSX // Windows and OSX
flags.StringVarP(flagSet, &Opt.VolumeName, "volname", "", Opt.VolumeName, "Set the volume name (supported on Windows and OSX only)") flags.StringVarP(flagSet, &Opt.VolumeName, "volname", "", Opt.VolumeName, "Set the volume name (supported on Windows and OSX only)")
// OSX only // OSX only
@@ -241,8 +240,12 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
func (m *MountPoint) Mount() (daemon *os.Process, err error) { func (m *MountPoint) Mount() (daemon *os.Process, err error) {
// Ensure sensible defaults // Ensure sensible defaults
m.SetVolumeName(m.MountOpt.VolumeName) if m.MountOpt.VolumeName == "" {
m.SetDeviceName(m.MountOpt.DeviceName) m.MountOpt.VolumeName = fs.ConfigString(m.Fs)
}
if m.MountOpt.DeviceName == "" {
m.MountOpt.DeviceName = fs.ConfigString(m.Fs)
}
// Start background task if --daemon is specified // Start background task if --daemon is specified
if m.MountOpt.Daemon { if m.MountOpt.Daemon {

View File

@@ -98,7 +98,7 @@ func mountRc(ctx context.Context, in rc.Params) (out rc.Params, err error) {
} }
if mountOpt.Daemon { if mountOpt.Daemon {
return nil, errors.New("daemon option not supported over the API") return nil, errors.New("Daemon Option not supported over the API")
} }
mountType, err := in.GetString("mountType") mountType, err := in.GetString("mountType")
@@ -111,7 +111,7 @@ func mountRc(ctx context.Context, in rc.Params) (out rc.Params, err error) {
} }
mountType, mountFn := ResolveMountMethod(mountType) mountType, mountFn := ResolveMountMethod(mountType)
if mountFn == nil { if mountFn == nil {
return nil, errors.New("mount option specified is not registered, or is invalid") return nil, errors.New("Mount Option specified is not registered, or is invalid")
} }
// Get Fs.fs to be mounted from fs parameter in the params // Get Fs.fs to be mounted from fs parameter in the params

View File

@@ -97,29 +97,10 @@ func checkMountEmpty(mountpoint string) error {
return fmt.Errorf(msg+": %w", mountpoint, err) return fmt.Errorf(msg+": %w", mountpoint, err)
} }
// SetVolumeName with sensible default // MakeVolumeNameValidOnUnix takes a volume name and returns a variant that is valid on unix systems.
func (m *MountPoint) SetVolumeName(vol string) { func MakeVolumeNameValidOnUnix(volumeName string) string {
if vol == "" { volumeName = strings.ReplaceAll(volumeName, ":", " ")
vol = fs.ConfigString(m.Fs) volumeName = strings.ReplaceAll(volumeName, "/", " ")
} volumeName = strings.TrimSpace(volumeName)
m.MountOpt.SetVolumeName(vol) return volumeName
}
// SetVolumeName removes special characters from volume name if necessary
func (o *Options) SetVolumeName(vol string) {
vol = strings.ReplaceAll(vol, ":", " ")
vol = strings.ReplaceAll(vol, "/", " ")
vol = strings.TrimSpace(vol)
if runtime.GOOS == "windows" && len(vol) > 32 {
vol = vol[:32]
}
o.VolumeName = vol
}
// SetDeviceName with sensible default
func (m *MountPoint) SetDeviceName(dev string) {
if dev == "" {
dev = fs.ConfigString(m.Fs)
}
m.MountOpt.DeviceName = dev
} }

View File

@@ -19,7 +19,6 @@ import (
"github.com/rclone/rclone/cmd/ncdu/scan" "github.com/rclone/rclone/cmd/ncdu/scan"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
"github.com/rivo/uniseg" "github.com/rivo/uniseg"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@@ -82,7 +81,7 @@ the remote you can also use the [size](/commands/rclone_size/) command.
cmd.CheckArgs(1, 1, command, args) cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args) fsrc := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error { cmd.Run(false, false, command, func() error {
return NewUI(fsrc).Run() return NewUI(fsrc).Show()
}) })
}, },
} }
@@ -354,7 +353,7 @@ func (u *UI) hasEmptyDir() bool {
} }
// Draw the current screen // Draw the current screen
func (u *UI) Draw() { func (u *UI) Draw() error {
ctx := context.Background() ctx := context.Background()
w, h := u.s.Size() w, h := u.s.Size()
u.dirListHeight = h - 3 u.dirListHeight = h - 3
@@ -490,6 +489,8 @@ func (u *UI) Draw() {
if u.showBox { if u.showBox {
u.Box() u.Box()
} }
u.s.Show()
return nil
} }
// Move the cursor this many spaces adjusting the viewport as necessary // Move the cursor this many spaces adjusting the viewport as necessary
@@ -899,8 +900,8 @@ func NewUI(f fs.Fs) *UI {
} }
} }
// Run shows the user interface // Show shows the user interface
func (u *UI) Run() error { func (u *UI) Show() error {
var err error var err error
u.s, err = tcell.NewScreen() u.s, err = tcell.NewScreen()
if err != nil { if err != nil {
@@ -910,28 +911,6 @@ func (u *UI) Run() error {
if err != nil { if err != nil {
return fmt.Errorf("screen init: %w", err) return fmt.Errorf("screen init: %w", err)
} }
// Hijack fs.LogPrint so that it doesn't corrupt the screen.
if logPrint := fs.LogPrint; !log.Redirected() {
type log struct {
text string
level fs.LogLevel
}
var logs []log
fs.LogPrint = func(level fs.LogLevel, text string) {
if len(logs) > 100 {
logs = logs[len(logs)-100:]
}
logs = append(logs, log{level: level, text: text})
}
defer func() {
fs.LogPrint = logPrint
for i := range logs {
logPrint(logs[i].level, logs[i].text)
}
}()
}
defer u.s.Fini() defer u.s.Fini()
// scan the disk in the background // scan the disk in the background
@@ -945,6 +924,10 @@ func (u *UI) Run() error {
// Main loop, waiting for events and channels // Main loop, waiting for events and channels
outer: outer:
for { for {
err := u.Draw()
if err != nil {
return fmt.Errorf("draw failed: %w", err)
}
select { select {
case root := <-rootChan: case root := <-rootChan:
u.root = root u.root = root
@@ -955,14 +938,16 @@ outer:
} }
u.listing = false u.listing = false
case <-updated: case <-updated:
// redraw
// TODO: might want to limit updates per second // TODO: might want to limit updates per second
u.sortCurrentDir() u.sortCurrentDir()
case ev := <-events: case ev := <-events:
switch ev := ev.(type) { switch ev := ev.(type) {
case *tcell.EventResize: case *tcell.EventResize:
u.Draw() if u.root != nil {
u.sortCurrentDir() // redraw
}
u.s.Sync() u.s.Sync()
continue // don't draw again
case *tcell.EventKey: case *tcell.EventKey:
var c rune var c rune
if k := ev.Key(); k == tcell.KeyRune { if k := ev.Key(); k == tcell.KeyRune {
@@ -1041,15 +1026,11 @@ outer:
// Refresh the screen. Not obvious what key to map // Refresh the screen. Not obvious what key to map
// this onto, but ^L is a common choice. // this onto, but ^L is a common choice.
case key(tcell.KeyCtrlL): case key(tcell.KeyCtrlL):
u.Draw()
u.s.Sync() u.s.Sync()
continue // don't draw again
} }
} }
} }
// listen to key presses, etc.
u.Draw()
u.s.Show()
} }
return nil return nil
} }

View File

@@ -75,13 +75,14 @@ func startProgress() func() {
// state for the progress printing // state for the progress printing
var ( var (
nlines = 0 // number of lines in the previous stats block nlines = 0 // number of lines in the previous stats block
progressMu sync.Mutex
) )
// printProgress prints the progress with an optional log // printProgress prints the progress with an optional log
func printProgress(logMessage string) { func printProgress(logMessage string) {
operations.StdoutMutex.Lock() progressMu.Lock()
defer operations.StdoutMutex.Unlock() defer progressMu.Unlock()
var buf bytes.Buffer var buf bytes.Buffer
w, _ := terminal.GetSize() w, _ := terminal.GetSize()

View File

@@ -52,11 +52,10 @@ and actually stream it, even if remote backend doesn't support streaming.
size of the stream is different in length to the ` + "`--size`" + ` passed in size of the stream is different in length to the ` + "`--size`" + ` passed in
then the transfer will likely fail. then the transfer will likely fail.
Note that the upload cannot be retried because the data is not stored. Note that the upload can also not be retried because the data is
If the backend supports multipart uploading then individual chunks can not kept around until the upload succeeds. If you need to transfer
be retried. If you need to transfer a lot of data, you may be better a lot of data, you're better off caching locally and then
off caching it locally and then ` + "`rclone move`" + ` it to the ` + "`rclone move`" + ` it to the destination.`,
destination which can use retries.`,
Annotations: map[string]string{ Annotations: map[string]string{
"versionIntroduced": "v1.38", "versionIntroduced": "v1.38",
}, },

View File

@@ -32,7 +32,7 @@ for GET requests on the URL passed in. It will also open the URL in
the browser when rclone is run. the browser when rclone is run.
See the [rc documentation](/rc/) for more info on the rc flags. See the [rc documentation](/rc/) for more info on the rc flags.
` + libhttp.Help(rcflags.FlagPrefix) + libhttp.TemplateHelp(rcflags.FlagPrefix) + libhttp.AuthHelp(rcflags.FlagPrefix), ` + libhttp.Help + libhttp.TemplateHelp + libhttp.AuthHelp,
Annotations: map[string]string{ Annotations: map[string]string{
"versionIntroduced": "v1.45", "versionIntroduced": "v1.45",
}, },

View File

@@ -469,7 +469,7 @@ func (d *Driver) PutFile(path string, data io.Reader, appendData bool) (n int64,
} }
defer closeIO(path, of) defer closeIO(path, of)
_, err = of.Seek(0, io.SeekEnd) _, err = of.Seek(0, os.SEEK_END)
if err != nil { if err != nil {
return 0, err return 0, err
} }

View File

@@ -44,15 +44,11 @@ var DefaultOpt = Options{
// Opt is options set by command line flags // Opt is options set by command line flags
var Opt = DefaultOpt var Opt = DefaultOpt
// flagPrefix is the prefix used to uniquely identify command line flags.
// It is intentionally empty for this package.
const flagPrefix = ""
func init() { func init() {
flagSet := Command.Flags() flagSet := Command.Flags()
libhttp.AddAuthFlagsPrefix(flagSet, flagPrefix, &Opt.Auth) libhttp.AddAuthFlagsPrefix(flagSet, "", &Opt.Auth)
libhttp.AddHTTPFlagsPrefix(flagSet, flagPrefix, &Opt.HTTP) libhttp.AddHTTPFlagsPrefix(flagSet, "", &Opt.HTTP)
libhttp.AddTemplateFlagsPrefix(flagSet, flagPrefix, &Opt.Template) libhttp.AddTemplateFlagsPrefix(flagSet, "", &Opt.Template)
vfsflags.AddFlags(flagSet) vfsflags.AddFlags(flagSet)
proxyflags.AddFlags(flagSet) proxyflags.AddFlags(flagSet)
} }
@@ -72,7 +68,7 @@ The server will log errors. Use ` + "`-v`" + ` to see access logs.
` + "`--bwlimit`" + ` will be respected for file transfers. Use ` + "`--stats`" + ` to ` + "`--bwlimit`" + ` will be respected for file transfers. Use ` + "`--stats`" + ` to
control the stats printing. control the stats printing.
` + libhttp.Help(flagPrefix) + libhttp.TemplateHelp(flagPrefix) + libhttp.AuthHelp(flagPrefix) + vfs.Help + proxy.Help, ` + libhttp.Help + libhttp.TemplateHelp + libhttp.AuthHelp + vfs.Help + proxy.Help,
Annotations: map[string]string{ Annotations: map[string]string{
"versionIntroduced": "v1.39", "versionIntroduced": "v1.39",
}, },

View File

@@ -13,8 +13,6 @@ import (
"strings" "strings"
"time" "time"
sysdnotify "github.com/iguanesolutions/go-systemd/v5/notify"
"github.com/go-chi/chi/v5" "github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware" "github.com/go-chi/chi/v5/middleware"
"github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd"
@@ -49,14 +47,10 @@ var DefaultOpt = Options{
// Opt is options set by command line flags // Opt is options set by command line flags
var Opt = DefaultOpt var Opt = DefaultOpt
// flagPrefix is the prefix used to uniquely identify command line flags.
// It is intentionally empty for this package.
const flagPrefix = ""
func init() { func init() {
flagSet := Command.Flags() flagSet := Command.Flags()
libhttp.AddAuthFlagsPrefix(flagSet, flagPrefix, &Opt.Auth) libhttp.AddAuthFlagsPrefix(flagSet, "", &Opt.Auth)
libhttp.AddHTTPFlagsPrefix(flagSet, flagPrefix, &Opt.HTTP) libhttp.AddHTTPFlagsPrefix(flagSet, "", &Opt.HTTP)
flags.BoolVarP(flagSet, &Opt.Stdio, "stdio", "", false, "Run an HTTP2 server on stdin/stdout") flags.BoolVarP(flagSet, &Opt.Stdio, "stdio", "", false, "Run an HTTP2 server on stdin/stdout")
flags.BoolVarP(flagSet, &Opt.AppendOnly, "append-only", "", false, "Disallow deletion of repository data") flags.BoolVarP(flagSet, &Opt.AppendOnly, "append-only", "", false, "Disallow deletion of repository data")
flags.BoolVarP(flagSet, &Opt.PrivateRepos, "private-repos", "", false, "Users can only access their private repo") flags.BoolVarP(flagSet, &Opt.PrivateRepos, "private-repos", "", false, "Users can only access their private repo")
@@ -148,7 +142,7 @@ these **must** end with /. Eg
The` + "`--private-repos`" + ` flag can be used to limit users to repositories starting The` + "`--private-repos`" + ` flag can be used to limit users to repositories starting
with a path of ` + "`/<username>/`" + `. with a path of ` + "`/<username>/`" + `.
` + libhttp.Help(flagPrefix) + libhttp.AuthHelp(flagPrefix), ` + libhttp.Help + libhttp.AuthHelp,
Annotations: map[string]string{ Annotations: map[string]string{
"versionIntroduced": "v1.40", "versionIntroduced": "v1.40",
}, },
@@ -179,17 +173,7 @@ with a path of ` + "`/<username>/`" + `.
return nil return nil
} }
fs.Logf(s.f, "Serving restic REST API on %s", s.URLs()) fs.Logf(s.f, "Serving restic REST API on %s", s.URLs())
if err := sysdnotify.Ready(); err != nil {
fs.Logf(s.f, "failed to notify ready to systemd: %v", err)
}
s.Wait() s.Wait()
if err := sysdnotify.Stopping(); err != nil {
fs.Logf(s.f, "failed to notify stopping to systemd: %v", err)
}
return nil return nil
}) })
}, },

View File

@@ -108,7 +108,7 @@ which can lead to "corrupted on transfer" errors. This is the case because
the client chooses indiscriminately which server to send commands to while the client chooses indiscriminately which server to send commands to while
the servers all have different views of the state of the filing system. the servers all have different views of the state of the filing system.
The "restrict" in authorized_keys prevents SHA1SUMs and MD5SUMs from being The "restrict" in authorized_keys prevents SHA1SUMs and MD5SUMs from beeing
used. Omitting "restrict" and using ` + "`--sftp-path-override`" + ` to enable used. Omitting "restrict" and using ` + "`--sftp-path-override`" + ` to enable
checksumming is possible but less secure and you could use the SFTP server checksumming is possible but less secure and you could use the SFTP server
provided by OpenSSH in this case. provided by OpenSSH in this case.

View File

@@ -48,14 +48,10 @@ var DefaultOpt = Options{
// Opt is options set by command line flags // Opt is options set by command line flags
var Opt = DefaultOpt var Opt = DefaultOpt
// flagPrefix is the prefix used to uniquely identify command line flags.
// It is intentionally empty for this package.
const flagPrefix = ""
func init() { func init() {
flagSet := Command.Flags() flagSet := Command.Flags()
libhttp.AddAuthFlagsPrefix(flagSet, flagPrefix, &Opt.Auth) libhttp.AddAuthFlagsPrefix(flagSet, "", &Opt.Auth)
libhttp.AddHTTPFlagsPrefix(flagSet, flagPrefix, &Opt.HTTP) libhttp.AddHTTPFlagsPrefix(flagSet, "", &Opt.HTTP)
libhttp.AddTemplateFlagsPrefix(flagSet, "", &Opt.Template) libhttp.AddTemplateFlagsPrefix(flagSet, "", &Opt.Template)
vfsflags.AddFlags(flagSet) vfsflags.AddFlags(flagSet)
proxyflags.AddFlags(flagSet) proxyflags.AddFlags(flagSet)
@@ -107,7 +103,7 @@ Create a new DWORD BasicAuthLevel with value 2.
https://learn.microsoft.com/en-us/office/troubleshoot/powerpoint/office-opens-blank-from-sharepoint https://learn.microsoft.com/en-us/office/troubleshoot/powerpoint/office-opens-blank-from-sharepoint
` + libhttp.Help(flagPrefix) + libhttp.TemplateHelp(flagPrefix) + libhttp.AuthHelp(flagPrefix) + vfs.Help + proxy.Help, ` + libhttp.Help + libhttp.TemplateHelp + libhttp.AuthHelp + vfs.Help + proxy.Help,
Annotations: map[string]string{ Annotations: map[string]string{
"versionIntroduced": "v1.39", "versionIntroduced": "v1.39",
}, },

View File

@@ -39,7 +39,7 @@ recursion.
Some backends do not always provide file sizes, see for example Some backends do not always provide file sizes, see for example
[Google Photos](/googlephotos/#size) and [Google Photos](/googlephotos/#size) and
[Google Docs](/drive/#limitations-of-google-docs). [Google Drive](/drive/#limitations-of-google-docs).
Rclone will then show a notice in the log indicating how many such Rclone will then show a notice in the log indicating how many such
files were encountered, and count them in as empty files in the output files were encountered, and count them in as empty files in the output
of the size command. of the size command.

View File

@@ -16,7 +16,7 @@ var Command = &cobra.Command{
Short: `Run a test command`, Short: `Run a test command`,
Long: `Rclone test is used to run test commands. Long: `Rclone test is used to run test commands.
Select which test command you want with the subcommand, eg Select which test comand you want with the subcommand, eg
rclone test memory remote: rclone test memory remote:

View File

@@ -141,7 +141,7 @@ func Touch(ctx context.Context, f fs.Fs, remote string) error {
file, err := f.NewObject(ctx, remote) file, err := f.NewObject(ctx, remote)
if err != nil { if err != nil {
if errors.Is(err, fs.ErrorObjectNotFound) { if errors.Is(err, fs.ErrorObjectNotFound) {
// Touching non-existent path, possibly creating it as new file // Touching non-existant path, possibly creating it as new file
if remote == "" { if remote == "" {
fs.Logf(f, "Not touching empty directory") fs.Logf(f, "Not touching empty directory")
return nil return nil

View File

@@ -18,7 +18,6 @@ import (
"github.com/rclone/rclone/fs/dirtree" "github.com/rclone/rclone/fs/dirtree"
"github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/terminal" "github.com/rclone/rclone/lib/terminal"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@@ -28,7 +27,6 @@ var (
outFileName string outFileName string
noReport bool noReport bool
sort string sort string
enc = encoder.OS
) )
func init() { func init() {
@@ -165,7 +163,7 @@ type FileInfo struct {
// Name is base name of the file // Name is base name of the file
func (to *FileInfo) Name() string { func (to *FileInfo) Name() string {
return enc.FromStandardName(path.Base(to.entry.Remote())) return path.Base(to.entry.Remote())
} }
// Size in bytes for regular files; system-dependent for others // Size in bytes for regular files; system-dependent for others
@@ -199,7 +197,7 @@ func (to *FileInfo) Sys() interface{} {
// String returns the full path // String returns the full path
func (to *FileInfo) String() string { func (to *FileInfo) String() string {
return filepath.FromSlash(enc.FromStandardPath(to.entry.Remote())) return to.entry.Remote()
} }
// Fs maps an fs.Fs into a tree.Fs // Fs maps an fs.Fs into a tree.Fs
@@ -214,7 +212,6 @@ func NewFs(dirs dirtree.DirTree) Fs {
func (dirs Fs) Stat(filePath string) (fi os.FileInfo, err error) { func (dirs Fs) Stat(filePath string) (fi os.FileInfo, err error) {
defer log.Trace(nil, "filePath=%q", filePath)("fi=%+v, err=%v", &fi, &err) defer log.Trace(nil, "filePath=%q", filePath)("fi=%+v, err=%v", &fi, &err)
filePath = filepath.ToSlash(filePath) filePath = filepath.ToSlash(filePath)
filePath = enc.ToStandardPath(filePath)
filePath = strings.TrimLeft(filePath, "/") filePath = strings.TrimLeft(filePath, "/")
if filePath == "" { if filePath == "" {
return &FileInfo{fs.NewDir("", time.Now())}, nil return &FileInfo{fs.NewDir("", time.Now())}, nil
@@ -230,14 +227,13 @@ func (dirs Fs) Stat(filePath string) (fi os.FileInfo, err error) {
func (dirs Fs) ReadDir(dir string) (names []string, err error) { func (dirs Fs) ReadDir(dir string) (names []string, err error) {
defer log.Trace(nil, "dir=%s", dir)("names=%+v, err=%v", &names, &err) defer log.Trace(nil, "dir=%s", dir)("names=%+v, err=%v", &names, &err)
dir = filepath.ToSlash(dir) dir = filepath.ToSlash(dir)
dir = enc.ToStandardPath(dir)
dir = strings.TrimLeft(dir, "/") dir = strings.TrimLeft(dir, "/")
entries, ok := dirs[dir] entries, ok := dirs[dir]
if !ok { if !ok {
return nil, fmt.Errorf("couldn't find directory %q", dir) return nil, fmt.Errorf("couldn't find directory %q", dir)
} }
for _, entry := range entries { for _, entry := range entries {
names = append(names, enc.FromStandardName(path.Base(entry.Remote()))) names = append(names, path.Base(entry.Remote()))
} }
return return
} }

View File

@@ -43,7 +43,7 @@ const rcloneTestMain = "RCLONE_TEST_MAIN"
// rcloneExecMain calls rclone with the given environment and arguments. // rcloneExecMain calls rclone with the given environment and arguments.
// The environment variables are in a single string separated by ; // The environment variables are in a single string separated by ;
// The terminal output is returned as a string. // The terminal output is retuned as a string.
func rcloneExecMain(env string, args ...string) (string, error) { func rcloneExecMain(env string, args ...string) (string, error) {
_, found := os.LookupEnv(rcloneTestMain) _, found := os.LookupEnv(rcloneTestMain)
if !found { if !found {
@@ -62,7 +62,7 @@ func rcloneExecMain(env string, args ...string) (string, error) {
// rcloneEnv calls rclone with the given environment and arguments. // rcloneEnv calls rclone with the given environment and arguments.
// The environment variables are in a single string separated by ; // The environment variables are in a single string separated by ;
// The test config file is automatically configured in RCLONE_CONFIG. // The test config file is automatically configured in RCLONE_CONFIG.
// The terminal output is returned as a string. // The terminal output is retuned as a string.
func rcloneEnv(env string, args ...string) (string, error) { func rcloneEnv(env string, args ...string) (string, error) {
envConfig := env envConfig := env
if testConfig != "" { if testConfig != "" {
@@ -76,7 +76,7 @@ func rcloneEnv(env string, args ...string) (string, error) {
// rclone calls rclone with the given arguments, E.g. "version","--help". // rclone calls rclone with the given arguments, E.g. "version","--help".
// The test config file is automatically configured in RCLONE_CONFIG. // The test config file is automatically configured in RCLONE_CONFIG.
// The terminal output is returned as a string. // The terminal output is retuned as a string.
func rclone(args ...string) (string, error) { func rclone(args ...string) (string, error) {
return rcloneEnv("", args...) return rcloneEnv("", args...)
} }

View File

@@ -8,7 +8,7 @@ FROM alpine:latest
COPY --from=binaries /usr/local/bin/rclone /usr/bin/rclone COPY --from=binaries /usr/local/bin/rclone /usr/bin/rclone
RUN mkdir -p /data/config /data/cache /mnt \ RUN mkdir -p /data/config /data/cache /mnt \
&& apk --no-cache add ca-certificates fuse3 tzdata \ && apk --no-cache add ca-certificates fuse tzdata \
&& echo "user_allow_other" >> /etc/fuse.conf \ && echo "user_allow_other" >> /etc/fuse.conf \
&& rclone version && rclone version

View File

@@ -121,7 +121,6 @@ WebDAV or S3, that work out of the box.)
{{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}} {{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
{{< provider name="Dropbox" home="https://www.dropbox.com/" config="/dropbox/" >}} {{< provider name="Dropbox" home="https://www.dropbox.com/" config="/dropbox/" >}}
{{< provider name="Enterprise File Fabric" home="https://storagemadeeasy.com/about/" config="/filefabric/" >}} {{< provider name="Enterprise File Fabric" home="https://storagemadeeasy.com/about/" config="/filefabric/" >}}
{{< provider name="Fastmail Files" home="https://www.fastmail.com/" config="/webdav/#fastmail-files" >}}
{{< provider name="FTP" home="https://en.wikipedia.org/wiki/File_Transfer_Protocol" config="/ftp/" >}} {{< provider name="FTP" home="https://en.wikipedia.org/wiki/File_Transfer_Protocol" config="/ftp/" >}}
{{< provider name="Google Cloud Storage" home="https://cloud.google.com/storage/" config="/googlecloudstorage/" >}} {{< provider name="Google Cloud Storage" home="https://cloud.google.com/storage/" config="/googlecloudstorage/" >}}
{{< provider name="Google Drive" home="https://www.google.com/drive/" config="/drive/" >}} {{< provider name="Google Drive" home="https://www.google.com/drive/" config="/drive/" >}}
@@ -133,7 +132,7 @@ WebDAV or S3, that work out of the box.)
{{< provider name="Internet Archive" home="https://archive.org/" config="/internetarchive/" >}} {{< provider name="Internet Archive" home="https://archive.org/" config="/internetarchive/" >}}
{{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}} {{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}}
{{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}} {{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
{{< provider name="IDrive e2" home="https://www.idrive.com/e2/?refer=rclone" config="/s3/#idrive-e2" >}} {{< provider name="IDrive e2" home="https://www.idrive.com/e2/" config="/s3/#idrive-e2" >}}
{{< provider name="IONOS Cloud" home="https://cloud.ionos.com/storage/object-storage" config="/s3/#ionos" >}} {{< provider name="IONOS Cloud" home="https://cloud.ionos.com/storage/object-storage" config="/s3/#ionos" >}}
{{< provider name="Koofr" home="https://koofr.eu/" config="/koofr/" >}} {{< provider name="Koofr" home="https://koofr.eu/" config="/koofr/" >}}
{{< provider name="Liara Object Storage" home="https://liara.ir/landing/object-storage" config="/s3/#liara-object-storage" >}} {{< provider name="Liara Object Storage" home="https://liara.ir/landing/object-storage" config="/s3/#liara-object-storage" >}}
@@ -152,7 +151,6 @@ WebDAV or S3, that work out of the box.)
{{< provider name="Oracle Object Storage" home="https://www.oracle.com/cloud/storage/object-storage" config="/oracleobjectstorage/" >}} {{< provider name="Oracle Object Storage" home="https://www.oracle.com/cloud/storage/object-storage" config="/oracleobjectstorage/" >}}
{{< provider name="ownCloud" home="https://owncloud.org/" config="/webdav/#owncloud" >}} {{< provider name="ownCloud" home="https://owncloud.org/" config="/webdav/#owncloud" >}}
{{< provider name="pCloud" home="https://www.pcloud.com/" config="/pcloud/" >}} {{< provider name="pCloud" home="https://www.pcloud.com/" config="/pcloud/" >}}
{{< provider name="PikPak" home="https://mypikpak.com/" config="/pikpak/" >}}
{{< provider name="premiumize.me" home="https://premiumize.me/" config="/premiumizeme/" >}} {{< provider name="premiumize.me" home="https://premiumize.me/" config="/premiumizeme/" >}}
{{< provider name="put.io" home="https://put.io/" config="/putio/" >}} {{< provider name="put.io" home="https://put.io/" config="/putio/" >}}
{{< provider name="QingStor" home="https://www.qingcloud.com/products/storage" config="/qingstor/" >}} {{< provider name="QingStor" home="https://www.qingcloud.com/products/storage" config="/qingstor/" >}}

View File

@@ -13,7 +13,7 @@ Authors
Contributors Contributors
------------ ------------
{{< rem `email addresses removed from here need to be added to {{< rem `email addresses removed from here need to be addeed to
bin/.ignore-emails to make sure update-authors.py doesn't immediately bin/.ignore-emails to make sure update-authors.py doesn't immediately
put them back in again.` >}} put them back in again.` >}}
@@ -597,7 +597,6 @@ put them back in again.` >}}
* Christian Galo <36752715+cgalo5758@users.noreply.github.com> * Christian Galo <36752715+cgalo5758@users.noreply.github.com>
* Erik van Velzen <erik@evanv.nl> * Erik van Velzen <erik@evanv.nl>
* Derek Battams <derek@battams.ca> * Derek Battams <derek@battams.ca>
* Paul <devnoname120@gmail.com>
* SimonLiu <simonliu009@users.noreply.github.com> * SimonLiu <simonliu009@users.noreply.github.com>
* Hugo Laloge <hla@lescompanions.com> * Hugo Laloge <hla@lescompanions.com>
* Mr-Kanister <68117355+Mr-Kanister@users.noreply.github.com> * Mr-Kanister <68117355+Mr-Kanister@users.noreply.github.com>
@@ -690,26 +689,3 @@ put them back in again.` >}}
* Gerard Bosch <30733556+gerardbosch@users.noreply.github.com> * Gerard Bosch <30733556+gerardbosch@users.noreply.github.com>
* ToBeFree <github@tfrei.de> * ToBeFree <github@tfrei.de>
* NodudeWasTaken <75137537+NodudeWasTaken@users.noreply.github.com> * NodudeWasTaken <75137537+NodudeWasTaken@users.noreply.github.com>
* Peter Brunner <peter@lugoues.net>
* Ninh Pham <dongian.rapclubkhtn@gmail.com>
* Ryan Caezar Itang <sitiom@proton.me>
* Peter Brunner <peter@psykhe.com>
* Leandro Sacchet <leandro.sacchet@animati.com.br>
* dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* cycneuramus <56681631+cycneuramus@users.noreply.github.com>
* Arnavion <me@arnavion.dev>
* Christopher Merry <christopher.merry@mlb.com>
* Thibault Coupin <thibault.coupin@gmail.com>
* Richard Tweed <RichardoC@users.noreply.github.com>
* Zach Kipp <Zacho2@users.noreply.github.com>
* yuudi <26199752+yuudi@users.noreply.github.com>
* NickIAm <NickIAm@users.noreply.github.com>
* Juang, Yi-Lin <frankyjuang@gmail.com>
* jumbi77 <jumbi77@users.noreply.github.com>
* Aditya Basu <ab.aditya.basu@gmail.com>
* ed <s@ocv.me>
* Drew Parsons <dparsons@emerall.com>
* Joel <joelnb@users.noreply.github.com>
* wiserain <mail275@gmail.com>
* Roel Arents <roel.arents@kadaster.nl>
* Shyim <github@shyim.de>

View File

@@ -596,7 +596,7 @@ quashed by adding `--quiet` to the bisync command line.
# NOTICE: If you make changes to this file you MUST do a --resync run. # NOTICE: If you make changes to this file you MUST do a --resync run.
# Run with --dry-run to see what changes will be made. # Run with --dry-run to see what changes will be made.
# Dropbox won't sync some files so filter them away here. # Dropbox wont sync some files so filter them away here.
# See https://help.dropbox.com/installs-integrations/sync-uploads/files-not-syncing # See https://help.dropbox.com/installs-integrations/sync-uploads/files-not-syncing
- .dropbox.attr - .dropbox.attr
- ~*.tmp - ~*.tmp
@@ -1008,7 +1008,7 @@ Your normal workflow might be as follows:
Delete a single file. Delete a single file.
- `delete-glob <dir> <pattern>` - `delete-glob <dir> <pattern>`
Delete a group of files located one level deep in the given directory Delete a group of files located one level deep in the given directory
with names matching a given glob pattern. with names maching a given glob pattern.
- `touch-glob YYYY-MM-DD <dir> <pattern>` - `touch-glob YYYY-MM-DD <dir> <pattern>`
Change modification time on a group of files. Change modification time on a group of files.
- `touch-copy YYYY-MM-DD <source-file> <dest-dir>` - `touch-copy YYYY-MM-DD <source-file> <dest-dir>`

Some files were not shown because too many files have changed in this diff Show More