mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
12 Commits
fix-vfs-em
...
v1.61-stab
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
486e713337 | ||
|
|
46e96918dc | ||
|
|
639b61de95 | ||
|
|
b03ee4e9e7 | ||
|
|
176af2b217 | ||
|
|
6be0644178 | ||
|
|
0ce5e57c30 | ||
|
|
bc214291d5 | ||
|
|
d3e09d86e0 | ||
|
|
5a9706ab61 | ||
|
|
cce4340d48 | ||
|
|
577693e501 |
10
.github/dependabot.yml
vendored
10
.github/dependabot.yml
vendored
@@ -1,10 +0,0 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
50
.github/workflows/build.yml
vendored
50
.github/workflows/build.yml
vendored
@@ -8,31 +8,29 @@ name: build
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
- '*'
|
||||
tags:
|
||||
- '**'
|
||||
- '*'
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
required: true
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.18', 'go1.19']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.17', 'go1.18']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
go: '1.19'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -43,14 +41,14 @@ jobs:
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
go: '1.19'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-11
|
||||
go: '1.20'
|
||||
go: '1.19'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -59,14 +57,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-11
|
||||
go: '1.20'
|
||||
go: '1.19'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '1.20'
|
||||
go: '1.19'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
@@ -76,20 +74,20 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
go: '1.19'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.18
|
||||
- job_name: go1.17
|
||||
os: ubuntu-latest
|
||||
go: '1.18'
|
||||
go: '1.17'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.19
|
||||
- job_name: go1.18
|
||||
os: ubuntu-latest
|
||||
go: '1.19'
|
||||
go: '1.18'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
@@ -104,7 +102,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
check-latest: true
|
||||
@@ -124,7 +122,7 @@ jobs:
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
sudo apt-get install fuse3 libfuse-dev rpm pkg-config
|
||||
sudo apt-get install fuse libfuse-dev rpm pkg-config
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
|
||||
- name: Install Libraries on macOS
|
||||
@@ -217,10 +215,10 @@ jobs:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# working-directory: '$(modulePath)'
|
||||
# Deploy binaries if enabled in config && not a PR && not a fork
|
||||
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
lint:
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
timeout-minutes: 30
|
||||
name: "lint"
|
||||
runs-on: ubuntu-latest
|
||||
@@ -237,9 +235,9 @@ jobs:
|
||||
|
||||
# Run govulncheck on the latest go version, the one we build binaries with
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: 1.19
|
||||
check-latest: true
|
||||
|
||||
- name: Install govulncheck
|
||||
@@ -249,7 +247,7 @@ jobs:
|
||||
run: govulncheck ./...
|
||||
|
||||
android:
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
timeout-minutes: 30
|
||||
name: "android-all"
|
||||
runs-on: ubuntu-latest
|
||||
@@ -262,9 +260,9 @@ jobs:
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: 1.19
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v3
|
||||
@@ -352,4 +350,4 @@ jobs:
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# Upload artifacts if not a PR && not a fork
|
||||
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
if: github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
name: Docker beta build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
|
||||
# GitHub Actions at the start of a workflow run to identify the job.
|
||||
# This is used to authenticate against GitHub Container Registry.
|
||||
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
|
||||
# for more detailed information.
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
push: true # push the image to ghcr
|
||||
tags: |
|
||||
ghcr.io/rclone/rclone:beta
|
||||
rclone/rclone:beta
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
provenance: false
|
||||
# Eventually cache will need to be cleared if builds more frequent than once a week
|
||||
# https://github.com/docker/build-push-action/issues/252
|
||||
26
.github/workflows/build_publish_docker_image.yml
vendored
Normal file
26
.github/workflows/build_publish_docker_image.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
name: Docker beta build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish image
|
||||
uses: ilteoood/docker_buildx@1.1.0
|
||||
with:
|
||||
tag: beta
|
||||
imageName: rclone/rclone
|
||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
publish: true
|
||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
14
.github/workflows/winget.yml
vendored
14
.github/workflows/winget.yml
vendored
@@ -1,14 +0,0 @@
|
||||
name: Publish to Winget
|
||||
on:
|
||||
release:
|
||||
types: [released]
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: windows-latest # Action can only run on Windows
|
||||
steps:
|
||||
- uses: vedantmgoyal2009/winget-releaser@v2
|
||||
with:
|
||||
identifier: Rclone.Rclone
|
||||
installers-regex: '-windows-\w+\.zip$'
|
||||
token: ${{ secrets.WINGET_TOKEN }}
|
||||
@@ -2,17 +2,15 @@
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- deadcode
|
||||
- errcheck
|
||||
- goimports
|
||||
- revive
|
||||
- ineffassign
|
||||
- structcheck
|
||||
- varcheck
|
||||
- govet
|
||||
- unconvert
|
||||
- staticcheck
|
||||
- gosimple
|
||||
- stylecheck
|
||||
- unused
|
||||
- misspell
|
||||
#- prealloc
|
||||
#- maligned
|
||||
disable-all: true
|
||||
@@ -27,30 +25,6 @@ issues:
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
|
||||
exclude-rules:
|
||||
|
||||
- linters:
|
||||
- staticcheck
|
||||
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
|
||||
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
timeout: 10m
|
||||
|
||||
linters-settings:
|
||||
revive:
|
||||
rules:
|
||||
- name: unreachable-code
|
||||
disabled: true
|
||||
- name: unused-parameter
|
||||
disabled: true
|
||||
- name: empty-block
|
||||
disabled: true
|
||||
- name: redefines-builtin-id
|
||||
disabled: true
|
||||
- name: superfluous-else
|
||||
disabled: true
|
||||
stylecheck:
|
||||
# Only enable the checks performed by the staticcheck stand-alone tool,
|
||||
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
||||
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
||||
|
||||
@@ -11,7 +11,7 @@ RUN ./rclone version
|
||||
# Begin final image
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
|
||||
RUN apk --no-cache add ca-certificates fuse tzdata && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
|
||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||
|
||||
@@ -16,7 +16,6 @@ Current active maintainers of rclone are:
|
||||
| Max Sum | @Max-Sum | union backend |
|
||||
| Fred | @creativeprojects | seafile backend |
|
||||
| Caleb Case | @calebcase | storj backend |
|
||||
| wiserain | @wiserain | pikpak backend |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
757
MANUAL.html
generated
757
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
1090
MANUAL.txt
generated
1090
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
@@ -37,7 +37,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
@@ -67,7 +66,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
|
||||
@@ -10,7 +10,7 @@ This file describes how to make the various kinds of releases
|
||||
## Making a release
|
||||
|
||||
* git checkout master # see below for stable branch
|
||||
* git pull # IMPORTANT
|
||||
* git pull
|
||||
* git status - make sure everything is checked in
|
||||
* Check GitHub actions build for master is Green
|
||||
* make test # see integration test server or run locally
|
||||
@@ -21,7 +21,6 @@ This file describes how to make the various kinds of releases
|
||||
* git status - to check for new man pages - git add them
|
||||
* git commit -a -v -m "Version v1.XX.0"
|
||||
* make retag
|
||||
* git push origin # without --follow-tags so it doesn't push the tag if it fails
|
||||
* git push --follow-tags origin
|
||||
* # Wait for the GitHub builds to complete then...
|
||||
* make fetch_binaries
|
||||
@@ -75,7 +74,8 @@ Set vars
|
||||
First make the release branch. If this is a second point release then
|
||||
this will be done already.
|
||||
|
||||
* git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
|
||||
* git branch ${BASE_TAG} ${BASE_TAG}-stable
|
||||
* git co ${BASE_TAG}-stable
|
||||
* make startstable
|
||||
|
||||
Now
|
||||
|
||||
@@ -36,7 +36,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/opendrive"
|
||||
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
_ "github.com/rclone/rclone/backend/pikpak"
|
||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||
_ "github.com/rclone/rclone/backend/putio"
|
||||
_ "github.com/rclone/rclone/backend/qingstor"
|
||||
|
||||
@@ -4,6 +4,32 @@
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
package azureblob
|
||||
|
||||
/* FIXME
|
||||
|
||||
Note these Azure SDK bugs which are affecting the backend
|
||||
|
||||
azblob UploadStream produces panic: send on closed channel if input stream has error #19612
|
||||
https://github.com/Azure/azure-sdk-for-go/issues/19612
|
||||
- FIXED by re-implementing UploadStream
|
||||
|
||||
azblob: when using SharedKey credentials, can't reference some blob names with ? in #19613
|
||||
https://github.com/Azure/azure-sdk-for-go/issues/19613
|
||||
- FIXED by url encoding getBlobSVC and getBlockBlobSVC
|
||||
|
||||
Azure Blob Storage paths are not URL-escaped #19475
|
||||
https://github.com/Azure/azure-sdk-for-go/issues/19475
|
||||
- FIXED by url encoding getBlobSVC and getBlockBlobSVC
|
||||
|
||||
Controlling TransferManager #19579
|
||||
https://github.com/Azure/azure-sdk-for-go/issues/19579
|
||||
- FIXED by re-implementing UploadStream
|
||||
|
||||
azblob: blob.StartCopyFromURL doesn't work with UTF-8 characters in the source blob #19614
|
||||
https://github.com/Azure/azure-sdk-for-go/issues/19614
|
||||
- FIXED by url encoding getBlobSVC and getBlockBlobSVC
|
||||
|
||||
*/
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
@@ -690,7 +716,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
cred, err = azidentity.NewDefaultAzureCredential(&options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create azure environment credential failed: %w", err)
|
||||
return nil, fmt.Errorf("create azure enviroment credential failed: %w", err)
|
||||
}
|
||||
case opt.UseEmulator:
|
||||
if opt.Account == "" {
|
||||
@@ -933,12 +959,18 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
// getBlobSVC creates a blob client
|
||||
func (f *Fs) getBlobSVC(container, containerPath string) *blob.Client {
|
||||
return f.cntSVC(container).NewBlobClient(containerPath)
|
||||
// FIXME the urlEncode here is a workaround for
|
||||
// https://github.com/Azure/azure-sdk-for-go/issues/19613
|
||||
// https://github.com/Azure/azure-sdk-for-go/issues/19475
|
||||
return f.cntSVC(container).NewBlobClient(urlEncode(containerPath))
|
||||
}
|
||||
|
||||
// getBlockBlobSVC creates a block blob client
|
||||
func (f *Fs) getBlockBlobSVC(container, containerPath string) *blockblob.Client {
|
||||
return f.cntSVC(container).NewBlockBlobClient(containerPath)
|
||||
// FIXME the urlEncode here is a workaround for
|
||||
// https://github.com/Azure/azure-sdk-for-go/issues/19613
|
||||
// https://github.com/Azure/azure-sdk-for-go/issues/19475
|
||||
return f.cntSVC(container).NewBlockBlobClient(urlEncode(containerPath))
|
||||
}
|
||||
|
||||
// updateMetadataWithModTime adds the modTime passed in to o.meta.
|
||||
@@ -953,7 +985,7 @@ func (o *Object) updateMetadataWithModTime(modTime time.Time) {
|
||||
}
|
||||
|
||||
// Returns whether file is a directory marker or not
|
||||
func isDirectoryMarker(size int64, metadata map[string]*string, remote string) bool {
|
||||
func isDirectoryMarker(size int64, metadata map[string]string, remote string) bool {
|
||||
// Directory markers are 0 length
|
||||
if size == 0 {
|
||||
endsWithSlash := strings.HasSuffix(remote, "/")
|
||||
@@ -964,7 +996,7 @@ func isDirectoryMarker(size int64, metadata map[string]*string, remote string) b
|
||||
// defacto standard for marking blobs as directories.
|
||||
// Note also that the metadata hasn't been normalised to lower case yet
|
||||
for k, v := range metadata {
|
||||
if v != nil && strings.EqualFold(k, "hdi_isfolder") && *v == "true" {
|
||||
if strings.EqualFold(k, "hdi_isfolder") && v == "true" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -1471,8 +1503,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
srcBlobSVC := srcObj.getBlobSVC()
|
||||
srcURL := srcBlobSVC.URL()
|
||||
|
||||
tier := blob.AccessTier(f.opt.AccessTier)
|
||||
options := blob.StartCopyFromURLOptions{
|
||||
Tier: parseTier(f.opt.AccessTier),
|
||||
Tier: &tier,
|
||||
}
|
||||
var startCopy blob.StartCopyFromURLResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -1551,15 +1584,12 @@ func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// Set o.metadata from metadata
|
||||
func (o *Object) setMetadata(metadata map[string]*string) {
|
||||
func (o *Object) setMetadata(metadata map[string]string) {
|
||||
if len(metadata) > 0 {
|
||||
// Lower case the metadata
|
||||
o.meta = make(map[string]string, len(metadata))
|
||||
for k, v := range metadata {
|
||||
if v != nil {
|
||||
o.meta[strings.ToLower(k)] = *v
|
||||
}
|
||||
o.meta[strings.ToLower(k)] = v
|
||||
}
|
||||
// Set o.modTime from metadata if it exists and
|
||||
// UseServerModTime isn't in use.
|
||||
@@ -1575,16 +1605,20 @@ func (o *Object) setMetadata(metadata map[string]*string) {
|
||||
}
|
||||
}
|
||||
|
||||
// Get metadata from o.meta
|
||||
func (o *Object) getMetadata() (metadata map[string]*string) {
|
||||
if len(o.meta) == 0 {
|
||||
return nil
|
||||
// Duplicte of setMetadata but taking pointers to strings
|
||||
func (o *Object) setMetadataP(metadata map[string]*string) {
|
||||
if len(metadata) > 0 {
|
||||
// Convert the format of the metadata
|
||||
newMeta := make(map[string]string, len(metadata))
|
||||
for k, v := range metadata {
|
||||
if v != nil {
|
||||
newMeta[k] = *v
|
||||
}
|
||||
}
|
||||
o.setMetadata(newMeta)
|
||||
} else {
|
||||
o.meta = nil
|
||||
}
|
||||
metadata = make(map[string]*string, len(o.meta))
|
||||
for k, v := range o.meta {
|
||||
metadata[k] = &v
|
||||
}
|
||||
return metadata
|
||||
}
|
||||
|
||||
// decodeMetaDataFromPropertiesResponse sets the metadata from the data passed in
|
||||
@@ -1716,7 +1750,7 @@ func (o *Object) decodeMetaDataFromBlob(info *container.BlobItem) (err error) {
|
||||
} else {
|
||||
o.accessTier = *info.Properties.AccessTier
|
||||
}
|
||||
o.setMetadata(metadata)
|
||||
o.setMetadataP(metadata)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1727,6 +1761,12 @@ func (o *Object) getBlobSVC() *blob.Client {
|
||||
return o.fs.getBlobSVC(container, directory)
|
||||
}
|
||||
|
||||
// getBlockBlobSVC creates a block blob client
|
||||
func (o *Object) getBlockBlobSVC() *blockblob.Client {
|
||||
container, directory := o.split()
|
||||
return o.fs.getBlockBlobSVC(container, directory)
|
||||
}
|
||||
|
||||
// clearMetaData clears enough metadata so readMetaData will re-read it
|
||||
func (o *Object) clearMetaData() {
|
||||
o.modTime = time.Time{}
|
||||
@@ -1792,7 +1832,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
blb := o.getBlobSVC()
|
||||
opt := blob.SetMetadataOptions{}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blb.SetMetadata(ctx, o.getMetadata(), &opt)
|
||||
_, err := blb.SetMetadata(ctx, o.meta, &opt)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1863,6 +1903,83 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return downloadResponse.Body, nil
|
||||
}
|
||||
|
||||
// dontEncode is the characters that do not need percent-encoding
|
||||
//
|
||||
// The characters that do not need percent-encoding are a subset of
|
||||
// the printable ASCII characters: upper-case letters, lower-case
|
||||
// letters, digits, ".", "_", "-", "/", "~", "!", "$", "'", "(", ")",
|
||||
// "*", ";", "=", ":", and "@". All other byte values in a UTF-8 must
|
||||
// be replaced with "%" and the two-digit hex value of the byte.
|
||||
const dontEncode = (`abcdefghijklmnopqrstuvwxyz` +
|
||||
`ABCDEFGHIJKLMNOPQRSTUVWXYZ` +
|
||||
`0123456789` +
|
||||
`._-/~!$'()*;=:@`)
|
||||
|
||||
// noNeedToEncode is a bitmap of characters which don't need % encoding
|
||||
var noNeedToEncode [256]bool
|
||||
|
||||
func init() {
|
||||
for _, c := range dontEncode {
|
||||
noNeedToEncode[c] = true
|
||||
}
|
||||
}
|
||||
|
||||
// urlEncode encodes in with % encoding
|
||||
func urlEncode(in string) string {
|
||||
var out bytes.Buffer
|
||||
for i := 0; i < len(in); i++ {
|
||||
c := in[i]
|
||||
if noNeedToEncode[c] {
|
||||
_ = out.WriteByte(c)
|
||||
} else {
|
||||
_, _ = out.WriteString(fmt.Sprintf("%%%02X", c))
|
||||
}
|
||||
}
|
||||
return out.String()
|
||||
}
|
||||
|
||||
// poolWrapper wraps a pool.Pool as an azblob.TransferManager
|
||||
type poolWrapper struct {
|
||||
pool *pool.Pool
|
||||
bufToken chan struct{}
|
||||
runToken chan struct{}
|
||||
}
|
||||
|
||||
// newPoolWrapper creates an azblob.TransferManager that will use a
|
||||
// pool.Pool with maximum concurrency as specified.
|
||||
func (f *Fs) newPoolWrapper(concurrency int) *poolWrapper {
|
||||
return &poolWrapper{
|
||||
pool: f.pool,
|
||||
bufToken: make(chan struct{}, concurrency),
|
||||
runToken: make(chan struct{}, concurrency),
|
||||
}
|
||||
}
|
||||
|
||||
// Get implements TransferManager.Get().
|
||||
func (pw *poolWrapper) Get() []byte {
|
||||
pw.bufToken <- struct{}{}
|
||||
return pw.pool.Get()
|
||||
}
|
||||
|
||||
// Put implements TransferManager.Put().
|
||||
func (pw *poolWrapper) Put(b []byte) {
|
||||
pw.pool.Put(b)
|
||||
<-pw.bufToken
|
||||
}
|
||||
|
||||
// Run implements TransferManager.Run().
|
||||
func (pw *poolWrapper) Run(f func()) {
|
||||
pw.runToken <- struct{}{}
|
||||
go func() {
|
||||
f()
|
||||
<-pw.runToken
|
||||
}()
|
||||
}
|
||||
|
||||
// Close implements TransferManager.Close().
|
||||
func (pw *poolWrapper) Close() {
|
||||
}
|
||||
|
||||
// Converts a string into a pointer to a string
|
||||
func pString(s string) *string {
|
||||
return &s
|
||||
@@ -2022,7 +2139,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
rs := readSeekCloser{wrappedReader, bufferReader}
|
||||
options := blockblob.StageBlockOptions{
|
||||
// Specify the transactional md5 for the body, to be validated by the service.
|
||||
TransactionalValidation: blob.TransferValidationTypeMD5(transactionalMD5),
|
||||
TransactionalContentMD5: transactionalMD5,
|
||||
}
|
||||
_, err = blb.StageBlock(ctx, blockID, &rs, &options)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
@@ -2044,9 +2161,10 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
return err
|
||||
}
|
||||
|
||||
tier := blob.AccessTier(o.fs.opt.AccessTier)
|
||||
options := blockblob.CommitBlockListOptions{
|
||||
Metadata: o.getMetadata(),
|
||||
Tier: parseTier(o.fs.opt.AccessTier),
|
||||
Metadata: o.meta,
|
||||
Tier: &tier,
|
||||
HTTPHeaders: httpHeaders,
|
||||
}
|
||||
|
||||
@@ -2090,9 +2208,10 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
|
||||
b := bytes.NewReader(buf[:n])
|
||||
rs := &readSeekCloser{Reader: b, Seeker: b}
|
||||
|
||||
tier := blob.AccessTier(o.fs.opt.AccessTier)
|
||||
options := blockblob.UploadOptions{
|
||||
Metadata: o.getMetadata(),
|
||||
Tier: parseTier(o.fs.opt.AccessTier),
|
||||
Metadata: o.meta,
|
||||
Tier: &tier,
|
||||
HTTPHeaders: httpHeaders,
|
||||
}
|
||||
|
||||
@@ -2262,14 +2381,6 @@ func (o *Object) GetTier() string {
|
||||
return string(o.accessTier)
|
||||
}
|
||||
|
||||
func parseTier(tier string) *blob.AccessTier {
|
||||
if tier == "" {
|
||||
return nil
|
||||
}
|
||||
msTier := blob.AccessTier(tier)
|
||||
return &msTier
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
|
||||
@@ -1221,7 +1221,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
fs.Errorf(object.Name, "Can't create object %v", err)
|
||||
continue
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "deleting")
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
err = f.deleteByID(ctx, object.ID, object.Name)
|
||||
checkErr(err)
|
||||
tr.Done(ctx, err)
|
||||
@@ -1235,7 +1235,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
if err != nil {
|
||||
fs.Errorf(object, "Can't create object %+v", err)
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
if oldOnly && last != remote {
|
||||
// Check current version of the file
|
||||
if object.Action == "hide" {
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/b2/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -22,7 +21,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/pool"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
@@ -430,47 +428,18 @@ func (up *largeUpload) Upload(ctx context.Context) (err error) {
|
||||
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
|
||||
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
remaining = up.size
|
||||
uploadPool *pool.Pool
|
||||
ci = fs.GetConfig(ctx)
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
remaining = up.size
|
||||
)
|
||||
// If using large chunk size then make a temporary pool
|
||||
if up.chunkSize <= int64(up.f.opt.ChunkSize) {
|
||||
uploadPool = up.f.pool
|
||||
} else {
|
||||
uploadPool = pool.New(
|
||||
time.Duration(up.f.opt.MemoryPoolFlushTime),
|
||||
int(up.chunkSize),
|
||||
ci.Transfers,
|
||||
up.f.opt.MemoryPoolUseMmap,
|
||||
)
|
||||
defer uploadPool.Flush()
|
||||
}
|
||||
// Get an upload token and a buffer
|
||||
getBuf := func() (buf []byte) {
|
||||
up.f.getBuf(true)
|
||||
if !up.doCopy {
|
||||
buf = uploadPool.Get()
|
||||
}
|
||||
return buf
|
||||
}
|
||||
// Put an upload token and a buffer
|
||||
putBuf := func(buf []byte) {
|
||||
if !up.doCopy {
|
||||
uploadPool.Put(buf)
|
||||
}
|
||||
up.f.putBuf(nil, true)
|
||||
}
|
||||
g.Go(func() error {
|
||||
for part := int64(1); part <= up.parts; part++ {
|
||||
// Get a block of memory from the pool and token which limits concurrency.
|
||||
buf := getBuf()
|
||||
buf := up.f.getBuf(up.doCopy)
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
putBuf(buf)
|
||||
up.f.putBuf(buf, up.doCopy)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -484,14 +453,14 @@ func (up *largeUpload) Upload(ctx context.Context) (err error) {
|
||||
buf = buf[:reqSize]
|
||||
_, err = io.ReadFull(up.in, buf)
|
||||
if err != nil {
|
||||
putBuf(buf)
|
||||
up.f.putBuf(buf, up.doCopy)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
part := part // for the closure
|
||||
g.Go(func() (err error) {
|
||||
defer putBuf(buf)
|
||||
defer up.f.putBuf(buf, up.doCopy)
|
||||
if !up.doCopy {
|
||||
err = up.transferChunk(gCtx, part, buf)
|
||||
} else {
|
||||
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang-jwt/jwt/v4"
|
||||
"github.com/rclone/rclone/backend/box/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -46,6 +45,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/youmark/pkcs8"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/jws"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -76,11 +76,6 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
type boxCustomClaims struct {
|
||||
jwt.RegisteredClaims
|
||||
BoxSubType string `json:"box_sub_type,omitempty"`
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -183,7 +178,7 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
|
||||
signingHeaders := getSigningHeaders(boxConfig)
|
||||
queryParams := getQueryParams(boxConfig)
|
||||
client := fshttp.NewClient(ctx)
|
||||
err = jwtutil.Config("box", name, tokenURL, *claims, signingHeaders, queryParams, privateKey, m, client)
|
||||
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -199,29 +194,34 @@ func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||
return boxConfig, nil
|
||||
}
|
||||
|
||||
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomClaims, err error) {
|
||||
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
|
||||
val, err := jwtutil.RandomHex(20)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err)
|
||||
}
|
||||
|
||||
claims = &boxCustomClaims{
|
||||
RegisteredClaims: jwt.RegisteredClaims{
|
||||
ID: val,
|
||||
Issuer: boxConfig.BoxAppSettings.ClientID,
|
||||
Subject: boxConfig.EnterpriseID,
|
||||
Audience: jwt.ClaimStrings{tokenURL},
|
||||
ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Second * 45)),
|
||||
claims = &jws.ClaimSet{
|
||||
Iss: boxConfig.BoxAppSettings.ClientID,
|
||||
Sub: boxConfig.EnterpriseID,
|
||||
Aud: tokenURL,
|
||||
Exp: time.Now().Add(time.Second * 45).Unix(),
|
||||
PrivateClaims: map[string]interface{}{
|
||||
"box_sub_type": boxSubType,
|
||||
"aud": tokenURL,
|
||||
"jti": val,
|
||||
},
|
||||
BoxSubType: boxSubType,
|
||||
}
|
||||
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]interface{} {
|
||||
signingHeaders := map[string]interface{}{
|
||||
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
||||
func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
|
||||
signingHeaders := &jws.Header{
|
||||
Algorithm: "RS256",
|
||||
Typ: "JWT",
|
||||
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
||||
}
|
||||
|
||||
return signingHeaders
|
||||
}
|
||||
|
||||
|
||||
4
backend/cache/cache.go
vendored
4
backend/cache/cache.go
vendored
@@ -1038,7 +1038,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
fs.Debugf(dir, "list: remove entry: %v", entryRemote)
|
||||
}
|
||||
entries = nil //nolint:ineffassign
|
||||
entries = nil
|
||||
|
||||
// and then iterate over the ones from source (temp Objects will override source ones)
|
||||
var batchDirectories []*Directory
|
||||
@@ -1787,7 +1787,7 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// StopBackgroundRunners will signal all the runners to stop their work
|
||||
// StopBackgroundRunners will signall all the runners to stop their work
|
||||
// can be triggered from a terminate signal or from testing between runs
|
||||
func (f *Fs) StopBackgroundRunners() {
|
||||
f.cleanupChan <- false
|
||||
|
||||
21
backend/cache/cache_internal_test.go
vendored
21
backend/cache/cache_internal_test.go
vendored
@@ -1098,6 +1098,27 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
|
||||
return l, err
|
||||
}
|
||||
|
||||
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
|
||||
in, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = in.Close()
|
||||
}()
|
||||
|
||||
out, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = out.Close()
|
||||
}()
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||
var err error
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Package combine implements a backend to combine multiple remotes in a directory tree
|
||||
// Package combine implents a backend to combine multiple remotes in a directory tree
|
||||
package combine
|
||||
|
||||
/*
|
||||
@@ -351,7 +351,7 @@ func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream
|
||||
return g.Wait()
|
||||
}
|
||||
|
||||
// join the elements together but unlike path.Join return empty string
|
||||
// join the elements together but unline path.Join return empty string
|
||||
func join(elem ...string) string {
|
||||
result := path.Join(elem...)
|
||||
if result == "." {
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
"github.com/rfjakob/eme"
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
@@ -179,7 +178,6 @@ type Cipher struct {
|
||||
buffers sync.Pool // encrypt/decrypt buffers
|
||||
cryptoRand io.Reader // read crypto random numbers from here
|
||||
dirNameEncrypt bool
|
||||
passBadBlocks bool // if set passed bad blocks as zeroed blocks
|
||||
}
|
||||
|
||||
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
|
||||
@@ -191,7 +189,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
||||
dirNameEncrypt: dirNameEncrypt,
|
||||
}
|
||||
c.buffers.New = func() interface{} {
|
||||
return new([blockSize]byte)
|
||||
return make([]byte, blockSize)
|
||||
}
|
||||
err := c.Key(password, salt)
|
||||
if err != nil {
|
||||
@@ -200,16 +198,11 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Call to set bad block pass through
|
||||
func (c *Cipher) setPassBadBlocks(passBadBlocks bool) {
|
||||
c.passBadBlocks = passBadBlocks
|
||||
}
|
||||
|
||||
// Key creates all the internal keys from the password passed in using
|
||||
// scrypt.
|
||||
//
|
||||
// If salt is "" we use a fixed salt just to make attackers lives
|
||||
// slightly harder than using no salt.
|
||||
// slighty harder than using no salt.
|
||||
//
|
||||
// Note that empty password makes all 0x00 keys which is used in the
|
||||
// tests.
|
||||
@@ -237,12 +230,15 @@ func (c *Cipher) Key(password, salt string) (err error) {
|
||||
}
|
||||
|
||||
// getBlock gets a block from the pool of size blockSize
|
||||
func (c *Cipher) getBlock() *[blockSize]byte {
|
||||
return c.buffers.Get().(*[blockSize]byte)
|
||||
func (c *Cipher) getBlock() []byte {
|
||||
return c.buffers.Get().([]byte)
|
||||
}
|
||||
|
||||
// putBlock returns a block to the pool of size blockSize
|
||||
func (c *Cipher) putBlock(buf *[blockSize]byte) {
|
||||
func (c *Cipher) putBlock(buf []byte) {
|
||||
if len(buf) != blockSize {
|
||||
panic("bad blocksize returned to pool")
|
||||
}
|
||||
c.buffers.Put(buf)
|
||||
}
|
||||
|
||||
@@ -613,7 +609,7 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
|
||||
// fromReader fills the nonce from an io.Reader - normally the OSes
|
||||
// crypto random number generator
|
||||
func (n *nonce) fromReader(in io.Reader) error {
|
||||
read, err := readers.ReadFill(in, (*n)[:])
|
||||
read, err := io.ReadFull(in, (*n)[:])
|
||||
if read != fileNonceSize {
|
||||
return fmt.Errorf("short read of nonce: %w", err)
|
||||
}
|
||||
@@ -668,8 +664,8 @@ type encrypter struct {
|
||||
in io.Reader
|
||||
c *Cipher
|
||||
nonce nonce
|
||||
buf *[blockSize]byte
|
||||
readBuf *[blockSize]byte
|
||||
buf []byte
|
||||
readBuf []byte
|
||||
bufIndex int
|
||||
bufSize int
|
||||
err error
|
||||
@@ -694,9 +690,9 @@ func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
|
||||
}
|
||||
}
|
||||
// Copy magic into buffer
|
||||
copy((*fh.buf)[:], fileMagicBytes)
|
||||
copy(fh.buf, fileMagicBytes)
|
||||
// Copy nonce into buffer
|
||||
copy((*fh.buf)[fileMagicSize:], fh.nonce[:])
|
||||
copy(fh.buf[fileMagicSize:], fh.nonce[:])
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
@@ -711,20 +707,22 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
|
||||
if fh.bufIndex >= fh.bufSize {
|
||||
// Read data
|
||||
// FIXME should overlap the reads with a go-routine and 2 buffers?
|
||||
readBuf := (*fh.readBuf)[:blockDataSize]
|
||||
n, err = readers.ReadFill(fh.in, readBuf)
|
||||
readBuf := fh.readBuf[:blockDataSize]
|
||||
n, err = io.ReadFull(fh.in, readBuf)
|
||||
if n == 0 {
|
||||
// err can't be nil since:
|
||||
// n == len(buf) if and only if err == nil.
|
||||
return fh.finish(err)
|
||||
}
|
||||
// possibly err != nil here, but we will process the
|
||||
// data and the next call to ReadFill will return 0, err
|
||||
// data and the next call to ReadFull will return 0, err
|
||||
// Encrypt the block using the nonce
|
||||
secretbox.Seal((*fh.buf)[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
fh.bufIndex = 0
|
||||
fh.bufSize = blockHeaderSize + n
|
||||
fh.nonce.increment()
|
||||
}
|
||||
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufSize])
|
||||
n = copy(p, fh.buf[fh.bufIndex:fh.bufSize])
|
||||
fh.bufIndex += n
|
||||
return n, nil
|
||||
}
|
||||
@@ -765,8 +763,8 @@ type decrypter struct {
|
||||
nonce nonce
|
||||
initialNonce nonce
|
||||
c *Cipher
|
||||
buf *[blockSize]byte
|
||||
readBuf *[blockSize]byte
|
||||
buf []byte
|
||||
readBuf []byte
|
||||
bufIndex int
|
||||
bufSize int
|
||||
err error
|
||||
@@ -784,9 +782,9 @@ func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
|
||||
limit: -1,
|
||||
}
|
||||
// Read file header (magic + nonce)
|
||||
readBuf := (*fh.readBuf)[:fileHeaderSize]
|
||||
n, err := readers.ReadFill(fh.rc, readBuf)
|
||||
if n < fileHeaderSize && err == io.EOF {
|
||||
readBuf := fh.readBuf[:fileHeaderSize]
|
||||
_, err := io.ReadFull(fh.rc, readBuf)
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
// This read from 0..fileHeaderSize-1 bytes
|
||||
return nil, fh.finishAndClose(ErrorEncryptedFileTooShort)
|
||||
} else if err != nil {
|
||||
@@ -847,8 +845,10 @@ func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offse
|
||||
func (fh *decrypter) fillBuffer() (err error) {
|
||||
// FIXME should overlap the reads with a go-routine and 2 buffers?
|
||||
readBuf := fh.readBuf
|
||||
n, err := readers.ReadFill(fh.rc, (*readBuf)[:])
|
||||
n, err := io.ReadFull(fh.rc, readBuf)
|
||||
if n == 0 {
|
||||
// err can't be nil since:
|
||||
// n == len(buf) if and only if err == nil.
|
||||
return err
|
||||
}
|
||||
// possibly err != nil here, but we will process the data and
|
||||
@@ -856,25 +856,18 @@ func (fh *decrypter) fillBuffer() (err error) {
|
||||
|
||||
// Check header + 1 byte exists
|
||||
if n <= blockHeaderSize {
|
||||
if err != nil && err != io.EOF {
|
||||
if err != nil {
|
||||
return err // return pending error as it is likely more accurate
|
||||
}
|
||||
return ErrorEncryptedFileBadHeader
|
||||
}
|
||||
// Decrypt the block using the nonce
|
||||
_, ok := secretbox.Open((*fh.buf)[:0], (*readBuf)[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
_, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||
if !ok {
|
||||
if err != nil && err != io.EOF {
|
||||
if err != nil {
|
||||
return err // return pending error as it is likely more accurate
|
||||
}
|
||||
if !fh.c.passBadBlocks {
|
||||
return ErrorEncryptedBadBlock
|
||||
}
|
||||
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
|
||||
// Zero out the bad block and continue
|
||||
for i := range (*fh.buf)[:n] {
|
||||
(*fh.buf)[i] = 0
|
||||
}
|
||||
return ErrorEncryptedBadBlock
|
||||
}
|
||||
fh.bufIndex = 0
|
||||
fh.bufSize = n - blockHeaderSize
|
||||
@@ -900,7 +893,7 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
|
||||
if fh.limit >= 0 && fh.limit < int64(toCopy) {
|
||||
toCopy = int(fh.limit)
|
||||
}
|
||||
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufIndex+toCopy])
|
||||
n = copy(p, fh.buf[fh.bufIndex:fh.bufIndex+toCopy])
|
||||
fh.bufIndex += n
|
||||
if fh.limit >= 0 {
|
||||
fh.limit -= int64(n)
|
||||
@@ -911,8 +904,9 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// calculateUnderlying converts an (offset, limit) in an encrypted file
|
||||
// into an (underlyingOffset, underlyingLimit) for the underlying file.
|
||||
// calculateUnderlying converts an (offset, limit) in a crypted file
|
||||
// into an (underlyingOffset, underlyingLimit) for the underlying
|
||||
// file.
|
||||
//
|
||||
// It also returns number of bytes to discard after reading the first
|
||||
// block and number of blocks this is from the start so the nonce can
|
||||
|
||||
@@ -27,14 +27,14 @@ func TestNewNameEncryptionMode(t *testing.T) {
|
||||
{"off", NameEncryptionOff, ""},
|
||||
{"standard", NameEncryptionStandard, ""},
|
||||
{"obfuscate", NameEncryptionObfuscated, ""},
|
||||
{"potato", NameEncryptionOff, "unknown file name encryption mode \"potato\""},
|
||||
{"potato", NameEncryptionOff, "Unknown file name encryption mode \"potato\""},
|
||||
} {
|
||||
actual, actualErr := NewNameEncryptionMode(test.in)
|
||||
assert.Equal(t, actual, test.expected)
|
||||
if test.expectedErr == "" {
|
||||
assert.NoError(t, actualErr)
|
||||
} else {
|
||||
assert.EqualError(t, actualErr, test.expectedErr)
|
||||
assert.Error(t, actualErr, test.expectedErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -726,7 +726,7 @@ func TestNonceFromReader(t *testing.T) {
|
||||
assert.Equal(t, nonce{'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o'}, x)
|
||||
buf = bytes.NewBufferString("123456789abcdefghijklmn")
|
||||
err = x.fromReader(buf)
|
||||
assert.EqualError(t, err, "short read of nonce: EOF")
|
||||
assert.Error(t, err, "short read of nonce")
|
||||
}
|
||||
|
||||
func TestNonceFromBuf(t *testing.T) {
|
||||
@@ -1050,7 +1050,7 @@ func TestRandomSource(t *testing.T) {
|
||||
_, _ = source.Read(buf)
|
||||
sink = newRandomSource(1e8)
|
||||
_, err = io.Copy(sink, source)
|
||||
assert.EqualError(t, err, "Error in stream at 1")
|
||||
assert.Error(t, err, "Error in stream")
|
||||
}
|
||||
|
||||
type zeroes struct{}
|
||||
@@ -1167,13 +1167,13 @@ func TestNewEncrypter(t *testing.T) {
|
||||
fh, err := c.newEncrypter(z, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, nonce{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.nonce)
|
||||
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, (*fh.buf)[:32])
|
||||
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.buf[:32])
|
||||
|
||||
// Test error path
|
||||
c.cryptoRand = bytes.NewBufferString("123456789abcdefghijklmn")
|
||||
fh, err = c.newEncrypter(z, nil)
|
||||
assert.Nil(t, fh)
|
||||
assert.EqualError(t, err, "short read of nonce: EOF")
|
||||
assert.Error(t, err, "short read of nonce")
|
||||
}
|
||||
|
||||
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to
|
||||
@@ -1224,7 +1224,7 @@ func TestNewDecrypter(t *testing.T) {
|
||||
cd := newCloseDetector(bytes.NewBuffer(file0[:i]))
|
||||
fh, err = c.newDecrypter(cd)
|
||||
assert.Nil(t, fh)
|
||||
assert.EqualError(t, err, ErrorEncryptedFileTooShort.Error())
|
||||
assert.Error(t, err, ErrorEncryptedFileTooShort.Error())
|
||||
assert.Equal(t, 1, cd.closed)
|
||||
}
|
||||
|
||||
@@ -1232,7 +1232,7 @@ func TestNewDecrypter(t *testing.T) {
|
||||
cd = newCloseDetector(er)
|
||||
fh, err = c.newDecrypter(cd)
|
||||
assert.Nil(t, fh)
|
||||
assert.EqualError(t, err, "potato")
|
||||
assert.Error(t, err, "potato")
|
||||
assert.Equal(t, 1, cd.closed)
|
||||
|
||||
// bad magic
|
||||
@@ -1243,7 +1243,7 @@ func TestNewDecrypter(t *testing.T) {
|
||||
cd := newCloseDetector(bytes.NewBuffer(file0copy))
|
||||
fh, err := c.newDecrypter(cd)
|
||||
assert.Nil(t, fh)
|
||||
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error())
|
||||
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
|
||||
file0copy[i] ^= 0x1
|
||||
assert.Equal(t, 1, cd.closed)
|
||||
}
|
||||
@@ -1495,10 +1495,8 @@ func TestDecrypterRead(t *testing.T) {
|
||||
case i == fileHeaderSize:
|
||||
// This would normally produce an error *except* on the first block
|
||||
expectedErr = nil
|
||||
case i <= fileHeaderSize+blockHeaderSize:
|
||||
expectedErr = ErrorEncryptedFileBadHeader
|
||||
default:
|
||||
expectedErr = ErrorEncryptedBadBlock
|
||||
expectedErr = io.ErrUnexpectedEOF
|
||||
}
|
||||
if expectedErr != nil {
|
||||
assert.EqualError(t, err, expectedErr.Error(), what)
|
||||
@@ -1516,7 +1514,7 @@ func TestDecrypterRead(t *testing.T) {
|
||||
fh, err := c.newDecrypter(cd)
|
||||
assert.NoError(t, err)
|
||||
_, err = io.ReadAll(fh)
|
||||
assert.EqualError(t, err, "potato")
|
||||
assert.Error(t, err, "potato")
|
||||
assert.Equal(t, 0, cd.closed)
|
||||
|
||||
// Test corrupting the input
|
||||
@@ -1527,26 +1525,15 @@ func TestDecrypterRead(t *testing.T) {
|
||||
file16copy[i] ^= 0xFF
|
||||
fh, err := c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
|
||||
if i < fileMagicSize {
|
||||
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error())
|
||||
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
|
||||
assert.Nil(t, fh)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
_, err = io.ReadAll(fh)
|
||||
assert.EqualError(t, err, ErrorEncryptedBadBlock.Error())
|
||||
assert.Error(t, err, ErrorEncryptedFileBadHeader.Error())
|
||||
}
|
||||
file16copy[i] ^= 0xFF
|
||||
}
|
||||
|
||||
// Test that we can corrupt a byte and read zeroes if
|
||||
// passBadBlocks is set
|
||||
copy(file16copy, file16)
|
||||
file16copy[len(file16copy)-1] ^= 0xFF
|
||||
c.passBadBlocks = true
|
||||
fh, err = c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
|
||||
assert.NoError(t, err)
|
||||
buf, err := io.ReadAll(fh)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, make([]byte, 16), buf)
|
||||
}
|
||||
|
||||
func TestDecrypterClose(t *testing.T) {
|
||||
@@ -1567,7 +1554,7 @@ func TestDecrypterClose(t *testing.T) {
|
||||
|
||||
// double close
|
||||
err = fh.Close()
|
||||
assert.EqualError(t, err, ErrorFileClosed.Error())
|
||||
assert.Error(t, err, ErrorFileClosed.Error())
|
||||
assert.Equal(t, 1, cd.closed)
|
||||
|
||||
// try again reading the file this time
|
||||
@@ -1594,6 +1581,8 @@ func TestPutGetBlock(t *testing.T) {
|
||||
block := c.getBlock()
|
||||
c.putBlock(block)
|
||||
c.putBlock(block)
|
||||
|
||||
assert.Panics(t, func() { c.putBlock(block[:len(block)-1]) })
|
||||
}
|
||||
|
||||
func TestKey(t *testing.T) {
|
||||
|
||||
@@ -119,15 +119,6 @@ names, or for debugging purposes.`,
|
||||
Help: "Encrypt file data.",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "pass_bad_blocks",
|
||||
Help: `If set this will pass bad blocks through as all 0.
|
||||
|
||||
This should not be set in normal operation, it should only be set if
|
||||
trying to recover a crypted file with errors and it is desired to
|
||||
recover as much of the file as possible.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "filename_encoding",
|
||||
Help: `How to encode the encrypted filename to text string.
|
||||
@@ -147,7 +138,7 @@ length and if it's case sensitive.`,
|
||||
},
|
||||
{
|
||||
Value: "base32768",
|
||||
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive, Dropbox)",
|
||||
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)",
|
||||
},
|
||||
},
|
||||
Advanced: true,
|
||||
@@ -183,7 +174,6 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make cipher: %w", err)
|
||||
}
|
||||
cipher.setPassBadBlocks(opt.PassBadBlocks)
|
||||
return cipher, nil
|
||||
}
|
||||
|
||||
@@ -245,7 +235,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff,
|
||||
CaseInsensitive: cipher.NameEncryptionMode() == NameEncryptionOff,
|
||||
DuplicateFiles: true,
|
||||
ReadMimeType: false, // MimeTypes not supported with crypt
|
||||
WriteMimeType: false,
|
||||
@@ -272,7 +262,6 @@ type Options struct {
|
||||
Password2 string `config:"password2"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
ShowMapping bool `config:"show_mapping"`
|
||||
PassBadBlocks bool `config:"pass_bad_blocks"`
|
||||
FilenameEncoding string `config:"filename_encoding"`
|
||||
}
|
||||
|
||||
@@ -407,8 +396,6 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
|
||||
|
||||
// put implements Put or PutStream
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
if f.opt.NoDataEncryption {
|
||||
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
|
||||
if err == nil && o != nil {
|
||||
@@ -426,9 +413,6 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
||||
// Find a hash the destination supports to compute a hash of
|
||||
// the encrypted data
|
||||
ht := f.Fs.Hashes().GetOne()
|
||||
if ci.IgnoreChecksum {
|
||||
ht = hash.None
|
||||
}
|
||||
var hasher *hash.MultiHasher
|
||||
if ht != hash.None {
|
||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||
@@ -465,7 +449,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||
}
|
||||
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
|
||||
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
|
||||
}
|
||||
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
||||
}
|
||||
|
||||
@@ -202,7 +202,7 @@ func init() {
|
||||
m.Set("root_folder_id", "appDataFolder")
|
||||
}
|
||||
|
||||
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" && !opt.EnvAuth {
|
||||
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" {
|
||||
return oauthutil.ConfigOut("teamdrive", &oauthutil.Options{
|
||||
OAuth2Config: driveConfig,
|
||||
})
|
||||
@@ -451,11 +451,7 @@ If downloading a file returns the error "This file has been identified
|
||||
as malware or spam and cannot be downloaded" with the error code
|
||||
"cannotDownloadAbusiveFile" then supply this flag to rclone to
|
||||
indicate you acknowledge the risks of downloading the file and rclone
|
||||
will download it anyway.
|
||||
|
||||
Note that if you are using service account it will need Manager
|
||||
permission (not Content Manager) to for this flag to work. If the SA
|
||||
does not have the right permission, Google will just ignore the flag.`,
|
||||
will download it anyway.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "keep_revision_forever",
|
||||
@@ -598,18 +594,6 @@ resource key is no needed.
|
||||
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
|
||||
// Don't encode / as it's a valid name character in drive.
|
||||
Default: encoder.EncodeInvalidUtf8,
|
||||
}, {
|
||||
Name: "env_auth",
|
||||
Help: "Get IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "false",
|
||||
Help: "Enter credentials in the next step.",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
|
||||
@@ -666,7 +650,6 @@ type Options struct {
|
||||
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
|
||||
ResourceKey string `config:"resource_key"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
}
|
||||
|
||||
// Fs represents a remote drive server
|
||||
@@ -774,7 +757,7 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
|
||||
fs.Errorf(f, "Received download limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if f.opt.StopOnUploadLimit && (reason == "quotaExceeded" || reason == "storageQuotaExceeded") {
|
||||
} else if f.opt.StopOnUploadLimit && reason == "quotaExceeded" {
|
||||
fs.Errorf(f, "Received upload limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
|
||||
@@ -1135,12 +1118,6 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create oauth client from service account: %w", err)
|
||||
}
|
||||
} else if opt.EnvAuth {
|
||||
scopes := driveScopes(opt.Scope)
|
||||
oAuthClient, err = google.DefaultClient(ctx, scopes...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create client from environment: %w", err)
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt))
|
||||
if err != nil {
|
||||
@@ -2899,7 +2876,6 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
if f.rootFolderID == "appDataFolder" {
|
||||
changesCall.Spaces("appDataFolder")
|
||||
}
|
||||
changesCall.RestrictToMyDrive(!f.opt.SharedWithMe)
|
||||
changeList, err = changesCall.Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -3346,9 +3322,9 @@ This takes an optional directory to trash which make this easier to
|
||||
use via the API.
|
||||
|
||||
rclone backend untrash drive:directory
|
||||
rclone backend --interactive untrash drive:directory subdir
|
||||
rclone backend -i untrash drive:directory subdir
|
||||
|
||||
Use the --interactive/-i or --dry-run flag to see what would be restored before restoring it.
|
||||
Use the -i flag to see what would be restored before restoring it.
|
||||
|
||||
Result:
|
||||
|
||||
@@ -3378,7 +3354,7 @@ component will be used as the file name.
|
||||
If the destination is a drive backend then server-side copying will be
|
||||
attempted if possible.
|
||||
|
||||
Use the --interactive/-i or --dry-run flag to see what would be copied before copying.
|
||||
Use the -i flag to see what would be copied before copying.
|
||||
`,
|
||||
}, {
|
||||
Name: "exportformats",
|
||||
|
||||
@@ -243,15 +243,6 @@ func (f *Fs) InternalTestShouldRetry(t *testing.T) {
|
||||
quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, quotaExceededRetry)
|
||||
assert.Equal(t, quotaExceededError, expectedQuotaError)
|
||||
|
||||
sqEItem := googleapi.ErrorItem{
|
||||
Reason: "storageQuotaExceeded",
|
||||
}
|
||||
generic403.Errors[0] = sqEItem
|
||||
expectedStorageQuotaError := fserrors.FatalError(&generic403)
|
||||
storageQuotaExceededRetry, storageQuotaExceededError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, storageQuotaExceededRetry)
|
||||
assert.Equal(t, storageQuotaExceededError, expectedStorageQuotaError)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
@@ -139,6 +140,49 @@ func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionF
|
||||
return complete, nil
|
||||
}
|
||||
|
||||
// finishBatchJobStatus waits for the batch to complete returning completed entries
|
||||
func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
|
||||
if launchBatchStatus.AsyncJobId == "" {
|
||||
return nil, errors.New("wait for batch completion: empty job ID")
|
||||
}
|
||||
var batchStatus *files.UploadSessionFinishBatchJobStatus
|
||||
sleepTime := 100 * time.Millisecond
|
||||
const maxSleepTime = 1 * time.Second
|
||||
startTime := time.Now()
|
||||
try := 1
|
||||
for {
|
||||
remaining := time.Duration(b.f.opt.BatchCommitTimeout) - time.Since(startTime)
|
||||
if remaining < 0 {
|
||||
break
|
||||
}
|
||||
err = b.f.pacer.Call(func() (bool, error) {
|
||||
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
|
||||
AsyncJobId: launchBatchStatus.AsyncJobId,
|
||||
})
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d remaining %v", sleepTime, err, try, remaining)
|
||||
} else {
|
||||
if batchStatus.Tag == "complete" {
|
||||
fs.Debugf(b.f, "Upload batch completed in %v", time.Since(startTime))
|
||||
return batchStatus.Complete, nil
|
||||
}
|
||||
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d remaining %v", sleepTime, batchStatus.Tag, try, remaining)
|
||||
}
|
||||
time.Sleep(sleepTime)
|
||||
sleepTime *= 2
|
||||
if sleepTime > maxSleepTime {
|
||||
sleepTime = maxSleepTime
|
||||
}
|
||||
try++
|
||||
}
|
||||
if err == nil {
|
||||
err = errors.New("batch didn't complete")
|
||||
}
|
||||
return nil, fmt.Errorf("wait for batch failed after %d tries in %v: %w", try, time.Since(startTime), err)
|
||||
}
|
||||
|
||||
// commit a batch
|
||||
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
|
||||
// If commit fails then signal clients if sync
|
||||
|
||||
@@ -536,7 +536,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
// if the mount failed we have to abort here
|
||||
// if the moint failed we have to abort here
|
||||
}
|
||||
// if the mount succeeded it's now a normal folder in the users root namespace
|
||||
// we disable shared folder mode and proceed normally
|
||||
|
||||
@@ -473,7 +473,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("didn't get an upload node: %w", err)
|
||||
return nil, fmt.Errorf("didnt got an upload node: %w", err)
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Got Upload node")
|
||||
|
||||
@@ -333,7 +333,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if size > int64(300e9) {
|
||||
return nil, errors.New("File too big, can't upload")
|
||||
return nil, errors.New("File too big, cant upload")
|
||||
} else if size == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/rclone/ftp"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -315,33 +315,18 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Return a *textproto.Error if err contains one or nil otherwise
|
||||
func textprotoError(err error) (errX *textproto.Error) {
|
||||
if errors.As(err, &errX) {
|
||||
return errX
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// returns true if this FTP error should be retried
|
||||
func isRetriableFtpError(err error) bool {
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusNotAvailable, ftp.StatusTransfertAborted:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserve to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
if isRetriableFtpError(err) {
|
||||
return true, err
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusNotAvailable:
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
@@ -478,7 +463,8 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
*pc = nil
|
||||
if err != nil {
|
||||
// If not a regular FTP error code then check the connection
|
||||
if tpErr := textprotoError(err); tpErr != nil {
|
||||
var tpErr *textproto.Error
|
||||
if !errors.As(err, &tpErr) {
|
||||
nopErr := c.NoOp()
|
||||
if nopErr != nil {
|
||||
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
|
||||
@@ -627,7 +613,8 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
|
||||
// translateErrorFile turns FTP errors into rclone errors if possible for a file
|
||||
func translateErrorFile(err error) error {
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
||||
err = fs.ErrorObjectNotFound
|
||||
@@ -638,7 +625,8 @@ func translateErrorFile(err error) error {
|
||||
|
||||
// translateErrorDir turns FTP errors into rclone errors if possible for a directory
|
||||
func translateErrorDir(err error) error {
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
||||
err = fs.ErrorDirNotFound
|
||||
@@ -929,7 +917,8 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
|
||||
}
|
||||
err = c.MakeDir(f.dirFromStandardPath(abspath))
|
||||
f.putFtpConnection(&c, err)
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
|
||||
err = nil
|
||||
@@ -1170,7 +1159,8 @@ func (f *ftpReadCloser) Close() error {
|
||||
// mask the error if it was caused by a premature close
|
||||
// NB StatusAboutToSend is to work around a bug in pureftpd
|
||||
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
|
||||
err = nil
|
||||
@@ -1196,26 +1186,15 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
fd *ftp.Response
|
||||
c *ftp.ServerConn
|
||||
)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
c, err = o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return false, err // getFtpConnection has retries already
|
||||
}
|
||||
fd, err = c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
|
||||
if err != nil {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
}
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open: %w", err)
|
||||
}
|
||||
|
||||
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
|
||||
if err != nil {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
return nil, fmt.Errorf("open: %w", err)
|
||||
}
|
||||
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
|
||||
return rc, nil
|
||||
}
|
||||
@@ -1248,10 +1227,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
|
||||
// Ignore error 250 here - send by some servers
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusRequestedFileActionOK:
|
||||
err = nil
|
||||
if err != nil {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusRequestedFileActionOK:
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
|
||||
@@ -82,8 +82,7 @@ func init() {
|
||||
saFile, _ := m.Get("service_account_file")
|
||||
saCreds, _ := m.Get("service_account_credentials")
|
||||
anonymous, _ := m.Get("anonymous")
|
||||
envAuth, _ := m.Get("env_auth")
|
||||
if saFile != "" || saCreds != "" || anonymous == "true" || envAuth == "true" {
|
||||
if saFile != "" || saCreds != "" || anonymous == "true" {
|
||||
return nil, nil
|
||||
}
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
@@ -93,9 +92,6 @@ func init() {
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "project_number",
|
||||
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||
}, {
|
||||
Name: "user_project",
|
||||
Help: "User project.\n\nOptional - needed only for requester pays.",
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
@@ -334,17 +330,6 @@ can't check the size and hash but the file contents will be decompressed.
|
||||
Default: (encoder.Base |
|
||||
encoder.EncodeCrLf |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}, {
|
||||
Name: "env_auth",
|
||||
Help: "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
|
||||
Default: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "false",
|
||||
Help: "Enter credentials in the next step.",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
@@ -352,7 +337,6 @@ can't check the size and hash but the file contents will be decompressed.
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ProjectNumber string `config:"project_number"`
|
||||
UserProject string `config:"user_project"`
|
||||
ServiceAccountFile string `config:"service_account_file"`
|
||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||
Anonymous bool `config:"anonymous"`
|
||||
@@ -365,7 +349,6 @@ type Options struct {
|
||||
Decompress bool `config:"decompress"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
@@ -517,11 +500,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err)
|
||||
}
|
||||
} else if opt.EnvAuth {
|
||||
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
||||
if err != nil {
|
||||
@@ -563,11 +541,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// Check to see if the object exists
|
||||
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
get := f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
get = get.UserProject(f.opt.UserProject)
|
||||
}
|
||||
_, err = get.Do()
|
||||
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -627,9 +601,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
directory += "/"
|
||||
}
|
||||
list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
|
||||
if f.opt.UserProject != "" {
|
||||
list = list.UserProject(f.opt.UserProject)
|
||||
}
|
||||
if !recurse {
|
||||
list = list.Delimiter("/")
|
||||
}
|
||||
@@ -736,9 +707,6 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
return nil, errors.New("can't list buckets without project number")
|
||||
}
|
||||
listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks)
|
||||
if f.opt.UserProject != "" {
|
||||
listBuckets = listBuckets.UserProject(f.opt.UserProject)
|
||||
}
|
||||
for {
|
||||
var buckets *storage.Buckets
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -868,11 +836,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
// List something from the bucket to see if it exists. Doing it like this enables the use of a
|
||||
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
list := f.svc.Objects.List(bucket).MaxResults(1).Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
list = list.UserProject(f.opt.UserProject)
|
||||
}
|
||||
_, err = list.Do()
|
||||
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -907,11 +871,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
||||
}
|
||||
insertBucket = insertBucket.Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
insertBucket = insertBucket.UserProject(f.opt.UserProject)
|
||||
}
|
||||
_, err = insertBucket.Do()
|
||||
_, err = insertBucket.Context(ctx).Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}, nil)
|
||||
@@ -936,11 +896,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
}
|
||||
return f.cache.Remove(bucket, func() error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
deleteBucket := f.svc.Buckets.Delete(bucket).Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
deleteBucket = deleteBucket.UserProject(f.opt.UserProject)
|
||||
}
|
||||
err = deleteBucket.Do()
|
||||
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
})
|
||||
@@ -986,11 +942,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var rewriteResponse *storage.RewriteResponse
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
rewriteRequest = rewriteRequest.Context(ctx)
|
||||
if f.opt.UserProject != "" {
|
||||
rewriteRequest.UserProject(f.opt.UserProject)
|
||||
}
|
||||
rewriteResponse, err = rewriteRequest.Do()
|
||||
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1101,11 +1053,7 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
get := o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx)
|
||||
if o.fs.opt.UserProject != "" {
|
||||
get = get.UserProject(o.fs.opt.UserProject)
|
||||
}
|
||||
object, err = get.Do()
|
||||
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1177,11 +1125,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
copyObject = copyObject.Context(ctx)
|
||||
if o.fs.opt.UserProject != "" {
|
||||
copyObject = copyObject.UserProject(o.fs.opt.UserProject)
|
||||
}
|
||||
newObject, err = copyObject.Do()
|
||||
newObject, err = copyObject.Context(ctx).Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1198,9 +1142,6 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if o.fs.opt.UserProject != "" {
|
||||
o.url = o.url + "&userProject=" + o.fs.opt.UserProject
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1293,11 +1234,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
insertObject = insertObject.Context(ctx)
|
||||
if o.fs.opt.UserProject != "" {
|
||||
insertObject = insertObject.UserProject(o.fs.opt.UserProject)
|
||||
}
|
||||
newObject, err = insertObject.Do()
|
||||
newObject, err = insertObject.Context(ctx).Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1312,11 +1249,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
deleteBucket := o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx)
|
||||
if o.fs.opt.UserProject != "" {
|
||||
deleteBucket = deleteBucket.UserProject(o.fs.opt.UserProject)
|
||||
}
|
||||
err = deleteBucket.Do()
|
||||
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return err
|
||||
|
||||
@@ -161,7 +161,7 @@ func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bo
|
||||
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
|
||||
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
|
||||
}
|
||||
accounting.Stats(ctx).NewCheckingTransfer(obj, "importing").Done(ctx, err)
|
||||
accounting.Stats(ctx).NewCheckingTransfer(obj).Done(ctx, err)
|
||||
doneCount++
|
||||
}
|
||||
})
|
||||
|
||||
@@ -42,9 +42,9 @@ for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
|
||||
Help: `Kerberos data transfer protection: authentication|integrity|privacy.
|
||||
|
||||
Specifies whether or not authentication, data signature integrity
|
||||
checks, and wire encryption are required when communicating with
|
||||
the datanodes. Possible values are 'authentication', 'integrity'
|
||||
and 'privacy'. Used only with KERBEROS enabled.`,
|
||||
checks, and wire encryption is required when communicating the the
|
||||
datanodes. Possible values are 'authentication', 'integrity' and
|
||||
'privacy'. Used only with KERBEROS enabled.`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "privacy",
|
||||
Help: "Ensure authentication, integrity and encryption enabled.",
|
||||
|
||||
@@ -294,6 +294,15 @@ func (f *Fs) copyOrMove(ctx context.Context, isDirectory bool, operationType Cop
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// copyDirectory moves the directory at the source-path to the destination-path and
|
||||
// returns the resulting api-object if successful.
|
||||
//
|
||||
// The operation will only be successful
|
||||
// if the parent-directory of the destination-path exists.
|
||||
func (f *Fs) copyDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
|
||||
return f.copyOrMove(ctx, true, CopyOriginalPreserveModTime, source, destination, onExist)
|
||||
}
|
||||
|
||||
// moveDirectory moves the directory at the source-path to the destination-path and
|
||||
// returns the resulting api-object if successful.
|
||||
//
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
package hidrive
|
||||
|
||||
// FIXME HiDrive only supports file or folder names of 255 characters or less.
|
||||
// Operations that create files or folders with longer names will throw an HTTP error:
|
||||
// Operations that create files oder folder with longer names will throw a HTTP error:
|
||||
// - 422 Unprocessable Entity
|
||||
// A more graceful way for rclone to handle this may be desirable.
|
||||
|
||||
@@ -338,7 +338,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, fmt.Errorf("could not access root-prefix: %w", err)
|
||||
}
|
||||
if item.Type != api.HiDriveObjectTypeDirectory {
|
||||
return nil, errors.New("the root-prefix needs to point to a valid directory or be empty")
|
||||
return nil, errors.New("The root-prefix needs to point to a valid directory or be empty")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1838,12 +1838,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err == nil {
|
||||
// if the object exists delete it
|
||||
err = o.remove(ctx, true)
|
||||
if err != nil && err != fs.ErrorObjectNotFound {
|
||||
// if delete failed then report that, unless it was because the file did not exist after all
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove old object: %w", err)
|
||||
}
|
||||
} else if err != fs.ErrorObjectNotFound {
|
||||
// if the object does not exist we can just continue but if the error is something different we should report that
|
||||
}
|
||||
// if the object does not exist we can just continue but if the error is something different we should report that
|
||||
if err != fs.ErrorObjectNotFound {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -1930,7 +1930,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
o.md5 = result.Md5
|
||||
o.modTime = time.Unix(result.Modified/1000, 0)
|
||||
} else {
|
||||
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still need to update our metadata
|
||||
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still ned to update our metadata
|
||||
return o.readMetaData(ctx, true)
|
||||
}
|
||||
|
||||
@@ -1951,17 +1951,10 @@ func (o *Object) remove(ctx context.Context, hard bool) error {
|
||||
opts.Parameters.Set("dl", "true")
|
||||
}
|
||||
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.jfsSrv.CallXML(ctx, &opts, nil, nil)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// attempting to hard delete will fail if path does not exist, but standard delete will succeed
|
||||
if apiErr.StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
|
||||
@@ -266,10 +266,7 @@ type Object struct {
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
var (
|
||||
errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
||||
errLinksNeedsSuffix = errors.New("need \"" + linkSuffix + "\" suffix to refer to symlink when using -l/--links")
|
||||
)
|
||||
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
@@ -313,16 +310,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err == nil {
|
||||
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
||||
}
|
||||
// Check to see if this is a .rclonelink if not found
|
||||
hasLinkSuffix := strings.HasSuffix(f.root, linkSuffix)
|
||||
if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) {
|
||||
fi, err = f.lstat(strings.TrimSuffix(f.root, linkSuffix))
|
||||
}
|
||||
if err == nil && f.isRegular(fi.Mode()) {
|
||||
// Handle the odd case, that a symlink was specified by name without the link suffix
|
||||
if !hasLinkSuffix && opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
return nil, errLinksNeedsSuffix
|
||||
}
|
||||
// It is a file, so use the parent as the root
|
||||
f.root = filepath.Dir(f.root)
|
||||
// return an error with an fs which points to the parent
|
||||
@@ -515,7 +503,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
continue
|
||||
}
|
||||
}
|
||||
err = fmt.Errorf("failed to read directory %q: %w", namepath, fierr)
|
||||
err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
|
||||
fs.Errorf(dir, "%v", fierr)
|
||||
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
||||
continue
|
||||
@@ -536,10 +524,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
localPath := filepath.Join(fsDirPath, name)
|
||||
fi, err = os.Stat(localPath)
|
||||
// Quietly skip errors on excluded files and directories
|
||||
if err != nil && useFilter && !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
if os.IsNotExist(err) || isCircularSymlinkError(err) {
|
||||
// Skip bad symlinks and circular symlinks
|
||||
err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err))
|
||||
@@ -552,6 +536,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
mode = fi.Mode()
|
||||
}
|
||||
// Don't include non directory if not included
|
||||
// we leave directory filtering to the layer above
|
||||
if useFilter && !fi.IsDir() && !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
if fi.IsDir() {
|
||||
// Ignore directories which are symlinks. These are junction points under windows which
|
||||
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
|
||||
@@ -564,11 +553,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
newRemote += linkSuffix
|
||||
}
|
||||
// Don't include non directory if not included
|
||||
// we leave directory filtering to the layer above
|
||||
if useFilter && !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
fso, err := f.newObjectWithInfo(newRemote, fi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -146,20 +145,6 @@ func TestSymlink(t *testing.T) {
|
||||
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")
|
||||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
|
||||
// Check that NewFs works with the suffixed version and --links
|
||||
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+linkSuffix), configmap.Simple{
|
||||
"links": "true",
|
||||
})
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
require.Equal(t, dir, f2.(*Fs).root)
|
||||
|
||||
// Check that NewFs doesn't see the non suffixed version with --links
|
||||
f2, err = NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"), configmap.Simple{
|
||||
"links": "true",
|
||||
})
|
||||
require.Equal(t, errLinksNeedsSuffix, err)
|
||||
require.Nil(t, f2)
|
||||
|
||||
// Check reading the object
|
||||
in, err := o.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -410,107 +395,3 @@ func TestFilter(t *testing.T) {
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
func testFilterSymlink(t *testing.T, copyLinks bool) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
when := time.Now()
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Create a file, a directory, a symlink to a file, a symlink to a directory and a dangling symlink
|
||||
r.WriteFile("included.file", "included file", when)
|
||||
r.WriteFile("included.dir/included.sub.file", "included sub file", when)
|
||||
require.NoError(t, os.Symlink("included.file", filepath.Join(r.LocalName, "included.file.link")))
|
||||
require.NoError(t, os.Symlink("included.dir", filepath.Join(r.LocalName, "included.dir.link")))
|
||||
require.NoError(t, os.Symlink("dangling", filepath.Join(r.LocalName, "dangling.link")))
|
||||
|
||||
defer func() {
|
||||
// Reset -L/-l mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Lstat
|
||||
}()
|
||||
if copyLinks {
|
||||
// Set fs into "-L" mode
|
||||
f.opt.FollowSymlinks = true
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Stat
|
||||
} else {
|
||||
// Set fs into "-l" mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = true
|
||||
f.lstat = os.Lstat
|
||||
}
|
||||
|
||||
// Check set up for filtering
|
||||
assert.True(t, f.Features().FilterAware)
|
||||
|
||||
// Reset global error count
|
||||
accounting.Stats(ctx).ResetErrors()
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
|
||||
// Add a filter
|
||||
ctx, fi := filter.AddConfig(ctx)
|
||||
require.NoError(t, fi.AddRule("+ included.file"))
|
||||
require.NoError(t, fi.AddRule("+ included.dir/**"))
|
||||
if copyLinks {
|
||||
require.NoError(t, fi.AddRule("+ included.file.link"))
|
||||
require.NoError(t, fi.AddRule("+ included.dir.link/**"))
|
||||
} else {
|
||||
require.NoError(t, fi.AddRule("+ included.file.link.rclonelink"))
|
||||
require.NoError(t, fi.AddRule("+ included.dir.link.rclonelink"))
|
||||
}
|
||||
require.NoError(t, fi.AddRule("- *"))
|
||||
|
||||
// Check listing without use filter flag
|
||||
entries, err := f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
if copyLinks {
|
||||
// Check 1 global errors one for each dangling symlink
|
||||
assert.Equal(t, int64(1), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
} else {
|
||||
// Check 0 global errors as dangling symlink copied properly
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
}
|
||||
accounting.Stats(ctx).ResetErrors()
|
||||
|
||||
sort.Sort(entries)
|
||||
if copyLinks {
|
||||
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
|
||||
} else {
|
||||
require.Equal(t, "[dangling.link.rclonelink included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
// Add user filter flag
|
||||
ctx = filter.SetUseFilter(ctx, true)
|
||||
|
||||
// Check listing with use filter flag
|
||||
entries, err = f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
|
||||
sort.Sort(entries)
|
||||
if copyLinks {
|
||||
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
|
||||
} else {
|
||||
require.Equal(t, "[included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
// Check listing through a symlink still works
|
||||
entries, err = f.List(ctx, "included.dir")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included.dir/included.sub.file]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
func TestFilterSymlinkCopyLinks(t *testing.T) {
|
||||
testFilterSymlink(t, true)
|
||||
}
|
||||
|
||||
func TestFilterSymlinkLinks(t *testing.T) {
|
||||
testFilterSymlink(t, false)
|
||||
}
|
||||
|
||||
@@ -91,7 +91,7 @@ func readMetadataFromFileFstatat(o *Object, m *fs.Metadata) (err error) {
|
||||
// The types of t.Sec and t.Nsec vary from int32 to int64 on
|
||||
// different Linux architectures so we need to cast them to
|
||||
// int64 here and hence need to quiet the linter about
|
||||
// unnecessary casts.
|
||||
// unecessary casts.
|
||||
//
|
||||
// nolint: unconvert
|
||||
m.Set(key, time.Unix(int64(t.Sec), int64(t.Nsec)).Format(metadataTimeFormat))
|
||||
|
||||
@@ -83,17 +83,6 @@ than permanently deleting them. If you specify this then rclone will
|
||||
permanently delete objects instead.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_https",
|
||||
Help: `Use HTTPS for transfers.
|
||||
|
||||
MEGA uses plain text HTTP connections by default.
|
||||
Some ISPs throttle HTTP connections, this causes transfers to become very slow.
|
||||
Enabling this will force MEGA to use HTTPS for all transfers.
|
||||
HTTPS is normally not necessary since all data is already encrypted anyway.
|
||||
Enabling it will increase CPU usage and add network overhead.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -111,7 +100,6 @@ type Options struct {
|
||||
Pass string `config:"pass"`
|
||||
Debug bool `config:"debug"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UseHTTPS bool `config:"use_https"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -216,7 +204,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if srv == nil {
|
||||
srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
||||
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
|
||||
srv.SetHTTPS(opt.UseHTTPS)
|
||||
srv.SetLogger(func(format string, v ...interface{}) {
|
||||
fs.Infof("*go-mega*", format, v...)
|
||||
})
|
||||
|
||||
@@ -819,8 +819,6 @@ func (f *Fs) getAuth(req *http.Request) error {
|
||||
// Set Authorization header
|
||||
dataHeader := generateDataHeader(f)
|
||||
path := req.URL.RequestURI()
|
||||
//lint:ignore SA1008 false positive when running staticcheck, the header name is according to docs even if not canonical
|
||||
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1008
|
||||
actionHeader := req.Header["X-Akamai-ACS-Action"][0]
|
||||
fs.Debugf(nil, "NetStorage API %s call %s for path %q", req.Method, actionHeader, path)
|
||||
req.Header.Set("X-Akamai-ACS-Auth-Data", dataHeader)
|
||||
|
||||
@@ -126,7 +126,6 @@ type HashesType struct {
|
||||
Sha1Hash string `json:"sha1Hash"` // hex encoded SHA1 hash for the contents of the file (if available)
|
||||
Crc32Hash string `json:"crc32Hash"` // hex encoded CRC32 value of the file (if available)
|
||||
QuickXorHash string `json:"quickXorHash"` // base64 encoded QuickXorHash value of the file (if available)
|
||||
Sha256Hash string `json:"sha256Hash"` // hex encoded SHA256 value of the file (if available)
|
||||
}
|
||||
|
||||
// FileFacet groups file-related data on OneDrive into a single structure.
|
||||
|
||||
@@ -259,48 +259,6 @@ this flag there.
|
||||
At the time of writing this only works with OneDrive personal paid accounts.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hash_type",
|
||||
Default: "auto",
|
||||
Help: `Specify the hash in use for the backend.
|
||||
|
||||
This specifies the hash type in use. If set to "auto" it will use the
|
||||
default hash which is QuickXorHash.
|
||||
|
||||
Before rclone 1.62 an SHA1 hash was used by default for Onedrive
|
||||
Personal. For 1.62 and later the default is to use a QuickXorHash for
|
||||
all onedrive types. If an SHA1 hash is desired then set this option
|
||||
accordingly.
|
||||
|
||||
From July 2023 QuickXorHash will be the only available hash for
|
||||
both OneDrive for Business and OneDriver Personal.
|
||||
|
||||
This can be set to "none" to not use any hashes.
|
||||
|
||||
If the hash requested does not exist on the object, it will be
|
||||
returned as an empty string which is treated as a missing hash by
|
||||
rclone.
|
||||
`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "auto",
|
||||
Help: "Rclone chooses the best hash",
|
||||
}, {
|
||||
Value: "quickxor",
|
||||
Help: "QuickXor",
|
||||
}, {
|
||||
Value: "sha1",
|
||||
Help: "SHA1",
|
||||
}, {
|
||||
Value: "sha256",
|
||||
Help: "SHA256",
|
||||
}, {
|
||||
Value: "crc32",
|
||||
Help: "CRC32",
|
||||
}, {
|
||||
Value: "none",
|
||||
Help: "None - don't use any hashes",
|
||||
}},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -553,7 +511,7 @@ Example: "https://contoso.sharepoint.com/sites/mysite" or "mysite"
|
||||
`)
|
||||
case "url_end":
|
||||
siteURL := config.Result
|
||||
re := regexp.MustCompile(`https://.*\.sharepoint\.com/sites/(.*)`)
|
||||
re := regexp.MustCompile(`https://.*\.sharepoint.com/sites/(.*)`)
|
||||
match := re.FindStringSubmatch(siteURL)
|
||||
if len(match) == 2 {
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
@@ -639,7 +597,6 @@ type Options struct {
|
||||
LinkScope string `config:"link_scope"`
|
||||
LinkType string `config:"link_type"`
|
||||
LinkPassword string `config:"link_password"`
|
||||
HashType string `config:"hash_type"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -656,7 +613,6 @@ type Fs struct {
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
driveID string // ID to use for querying Microsoft Graph
|
||||
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
|
||||
hashType hash.Type // type of the hash we are using
|
||||
}
|
||||
|
||||
// Object describes a OneDrive object
|
||||
@@ -670,7 +626,8 @@ type Object struct {
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
id string // ID of the object
|
||||
hash string // Hash of the content, usually QuickXorHash but set as hash_type
|
||||
sha1 string // SHA-1 of the object content
|
||||
quickxorhash string // QuickXorHash of the object content
|
||||
mimeType string // Content-Type of object from server (may not be as uploaded)
|
||||
}
|
||||
|
||||
@@ -925,7 +882,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
driveType: opt.DriveType,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
hashType: QuickXorHashType,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@@ -935,15 +891,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}).Fill(ctx, f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// Set the user defined hash
|
||||
if opt.HashType == "auto" || opt.HashType == "" {
|
||||
opt.HashType = QuickXorHashType.String()
|
||||
}
|
||||
err = f.hashType.Set(opt.HashType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Disable change polling in China region
|
||||
// See: https://github.com/rclone/rclone/issues/6444
|
||||
if f.opt.Region == regionCN {
|
||||
@@ -1609,7 +1556,10 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(f.hashType)
|
||||
if f.driveType == driveTypePersonal {
|
||||
return hash.Set(hash.SHA1)
|
||||
}
|
||||
return hash.Set(QuickXorHashType)
|
||||
}
|
||||
|
||||
// PublicLink returns a link for downloading without account.
|
||||
@@ -1724,10 +1674,6 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
token := make(chan struct{}, f.ci.Checkers)
|
||||
var wg sync.WaitGroup
|
||||
err := walk.Walk(ctx, f, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to list %q: %v", path, err)
|
||||
return nil
|
||||
}
|
||||
err = entries.ForObjectError(func(obj fs.Object) error {
|
||||
o, ok := obj.(*Object)
|
||||
if !ok {
|
||||
@@ -1822,8 +1768,14 @@ func (o *Object) rootPath() string {
|
||||
|
||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t == o.fs.hashType {
|
||||
return o.hash, nil
|
||||
if o.fs.driveType == driveTypePersonal {
|
||||
if t == hash.SHA1 {
|
||||
return o.sha1, nil
|
||||
}
|
||||
} else {
|
||||
if t == QuickXorHashType {
|
||||
return o.quickxorhash, nil
|
||||
}
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
@@ -1854,23 +1806,16 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
file := info.GetFile()
|
||||
if file != nil {
|
||||
o.mimeType = file.MimeType
|
||||
o.hash = ""
|
||||
switch o.fs.hashType {
|
||||
case QuickXorHashType:
|
||||
if file.Hashes.QuickXorHash != "" {
|
||||
h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
|
||||
} else {
|
||||
o.hash = hex.EncodeToString(h)
|
||||
}
|
||||
if file.Hashes.Sha1Hash != "" {
|
||||
o.sha1 = strings.ToLower(file.Hashes.Sha1Hash)
|
||||
}
|
||||
if file.Hashes.QuickXorHash != "" {
|
||||
h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
|
||||
} else {
|
||||
o.quickxorhash = hex.EncodeToString(h)
|
||||
}
|
||||
case hash.SHA1:
|
||||
o.hash = strings.ToLower(file.Hashes.Sha1Hash)
|
||||
case hash.SHA256:
|
||||
o.hash = strings.ToLower(file.Hashes.Sha256Hash)
|
||||
case hash.CRC32:
|
||||
o.hash = strings.ToLower(file.Hashes.Crc32Hash)
|
||||
}
|
||||
}
|
||||
fileSystemInfo := info.GetFileSystemInfo()
|
||||
|
||||
@@ -7,40 +7,51 @@
|
||||
// See: https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
|
||||
package quickxorhash
|
||||
|
||||
// This code was ported from a fast C-implementation from
|
||||
// https://github.com/namazso/QuickXorHash
|
||||
// which has licenced as BSD Zero Clause License
|
||||
//
|
||||
// BSD Zero Clause License
|
||||
//
|
||||
// Copyright (c) 2022 namazso <admin@namazso.eu>
|
||||
//
|
||||
// Permission to use, copy, modify, and/or distribute this software for any
|
||||
// purpose with or without fee is hereby granted.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
|
||||
// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
||||
// AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|
||||
// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
|
||||
// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
|
||||
// OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
// PERFORMANCE OF THIS SOFTWARE.
|
||||
// This code was ported from the code snippet linked from
|
||||
// https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
|
||||
// Which has the copyright
|
||||
|
||||
import "hash"
|
||||
// ------------------------------------------------------------------------------
|
||||
// Copyright (c) 2016 Microsoft Corporation
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
// ------------------------------------------------------------------------------
|
||||
|
||||
import (
|
||||
"hash"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockSize is the preferred size for hashing
|
||||
BlockSize = 64
|
||||
// Size of the output checksum
|
||||
Size = 20
|
||||
shift = 11
|
||||
widthInBits = 8 * Size
|
||||
dataSize = shift * widthInBits
|
||||
Size = 20
|
||||
bitsInLastCell = 32
|
||||
shift = 11
|
||||
widthInBits = 8 * Size
|
||||
dataSize = (widthInBits-1)/64 + 1
|
||||
)
|
||||
|
||||
type quickXorHash struct {
|
||||
data [dataSize]byte
|
||||
size uint64
|
||||
data [dataSize]uint64
|
||||
lengthSoFar uint64
|
||||
shiftSoFar int
|
||||
}
|
||||
|
||||
// New returns a new hash.Hash computing the quickXorHash checksum.
|
||||
@@ -59,37 +70,94 @@ func New() hash.Hash {
|
||||
//
|
||||
// Implementations must not retain p.
|
||||
func (q *quickXorHash) Write(p []byte) (n int, err error) {
|
||||
var i int
|
||||
// fill last remain
|
||||
lastRemain := q.size % dataSize
|
||||
if lastRemain != 0 {
|
||||
i += xorBytes(q.data[lastRemain:], p)
|
||||
currentshift := q.shiftSoFar
|
||||
|
||||
// The bitvector where we'll start xoring
|
||||
vectorArrayIndex := currentshift / 64
|
||||
|
||||
// The position within the bit vector at which we begin xoring
|
||||
vectorOffset := currentshift % 64
|
||||
iterations := len(p)
|
||||
if iterations > widthInBits {
|
||||
iterations = widthInBits
|
||||
}
|
||||
|
||||
if i != len(p) {
|
||||
for len(p)-i >= dataSize {
|
||||
i += xorBytes(q.data[:], p[i:])
|
||||
for i := 0; i < iterations; i++ {
|
||||
isLastCell := vectorArrayIndex == len(q.data)-1
|
||||
var bitsInVectorCell int
|
||||
if isLastCell {
|
||||
bitsInVectorCell = bitsInLastCell
|
||||
} else {
|
||||
bitsInVectorCell = 64
|
||||
}
|
||||
|
||||
// There's at least 2 bitvectors before we reach the end of the array
|
||||
if vectorOffset <= bitsInVectorCell-8 {
|
||||
for j := i; j < len(p); j += widthInBits {
|
||||
q.data[vectorArrayIndex] ^= uint64(p[j]) << uint(vectorOffset)
|
||||
}
|
||||
} else {
|
||||
index1 := vectorArrayIndex
|
||||
var index2 int
|
||||
if isLastCell {
|
||||
index2 = 0
|
||||
} else {
|
||||
index2 = vectorArrayIndex + 1
|
||||
}
|
||||
low := byte(bitsInVectorCell - vectorOffset)
|
||||
|
||||
xoredByte := byte(0)
|
||||
for j := i; j < len(p); j += widthInBits {
|
||||
xoredByte ^= p[j]
|
||||
}
|
||||
q.data[index1] ^= uint64(xoredByte) << uint(vectorOffset)
|
||||
q.data[index2] ^= uint64(xoredByte) >> low
|
||||
}
|
||||
vectorOffset += shift
|
||||
for vectorOffset >= bitsInVectorCell {
|
||||
if isLastCell {
|
||||
vectorArrayIndex = 0
|
||||
} else {
|
||||
vectorArrayIndex = vectorArrayIndex + 1
|
||||
}
|
||||
vectorOffset -= bitsInVectorCell
|
||||
}
|
||||
xorBytes(q.data[:], p[i:])
|
||||
}
|
||||
q.size += uint64(len(p))
|
||||
|
||||
// Update the starting position in a circular shift pattern
|
||||
q.shiftSoFar = (q.shiftSoFar + shift*(len(p)%widthInBits)) % widthInBits
|
||||
|
||||
q.lengthSoFar += uint64(len(p))
|
||||
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Calculate the current checksum
|
||||
func (q *quickXorHash) checkSum() (h [Size + 1]byte) {
|
||||
for i := 0; i < dataSize; i++ {
|
||||
shift := (i * 11) % 160
|
||||
shiftBytes := shift / 8
|
||||
shiftBits := shift % 8
|
||||
shifted := int(q.data[i]) << shiftBits
|
||||
h[shiftBytes] ^= byte(shifted)
|
||||
h[shiftBytes+1] ^= byte(shifted >> 8)
|
||||
func (q *quickXorHash) checkSum() (h [Size]byte) {
|
||||
// Output the data as little endian bytes
|
||||
ph := 0
|
||||
for i := 0; i < len(q.data)-1; i++ {
|
||||
d := q.data[i]
|
||||
_ = h[ph+7] // bounds check
|
||||
h[ph+0] = byte(d >> (8 * 0))
|
||||
h[ph+1] = byte(d >> (8 * 1))
|
||||
h[ph+2] = byte(d >> (8 * 2))
|
||||
h[ph+3] = byte(d >> (8 * 3))
|
||||
h[ph+4] = byte(d >> (8 * 4))
|
||||
h[ph+5] = byte(d >> (8 * 5))
|
||||
h[ph+6] = byte(d >> (8 * 6))
|
||||
h[ph+7] = byte(d >> (8 * 7))
|
||||
ph += 8
|
||||
}
|
||||
h[0] ^= h[20]
|
||||
// remaining 32 bits
|
||||
d := q.data[len(q.data)-1]
|
||||
h[Size-4] = byte(d >> (8 * 0))
|
||||
h[Size-3] = byte(d >> (8 * 1))
|
||||
h[Size-2] = byte(d >> (8 * 2))
|
||||
h[Size-1] = byte(d >> (8 * 3))
|
||||
|
||||
// XOR the file length with the least significant bits in little endian format
|
||||
d := q.size
|
||||
d = q.lengthSoFar
|
||||
h[Size-8] ^= byte(d >> (8 * 0))
|
||||
h[Size-7] ^= byte(d >> (8 * 1))
|
||||
h[Size-6] ^= byte(d >> (8 * 2))
|
||||
@@ -106,7 +174,7 @@ func (q *quickXorHash) checkSum() (h [Size + 1]byte) {
|
||||
// It does not change the underlying hash state.
|
||||
func (q *quickXorHash) Sum(b []byte) []byte {
|
||||
hash := q.checkSum()
|
||||
return append(b, hash[:Size]...)
|
||||
return append(b, hash[:]...)
|
||||
}
|
||||
|
||||
// Reset resets the Hash to its initial state.
|
||||
@@ -128,10 +196,8 @@ func (q *quickXorHash) BlockSize() int {
|
||||
}
|
||||
|
||||
// Sum returns the quickXorHash checksum of the data.
|
||||
func Sum(data []byte) (h [Size]byte) {
|
||||
func Sum(data []byte) [Size]byte {
|
||||
var d quickXorHash
|
||||
_, _ = d.Write(data)
|
||||
s := d.checkSum()
|
||||
copy(h[:], s[:])
|
||||
return h
|
||||
return d.checkSum()
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"hash"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -167,16 +166,3 @@ func TestReset(t *testing.T) {
|
||||
|
||||
// check interface
|
||||
var _ hash.Hash = (*quickXorHash)(nil)
|
||||
|
||||
func BenchmarkQuickXorHash(b *testing.B) {
|
||||
b.SetBytes(1 << 20)
|
||||
buf := make([]byte, 1<<20)
|
||||
rand.Read(buf)
|
||||
h := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
h.Reset()
|
||||
h.Write(buf)
|
||||
h.Sum(nil)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
//go:build !go1.20
|
||||
|
||||
package quickxorhash
|
||||
|
||||
func xorBytes(dst, src []byte) int {
|
||||
n := len(dst)
|
||||
if len(src) < n {
|
||||
n = len(src)
|
||||
}
|
||||
if n == 0 {
|
||||
return 0
|
||||
}
|
||||
dst = dst[:n]
|
||||
//src = src[:n]
|
||||
src = src[:len(dst)] // remove bounds check in loop
|
||||
for i := range dst {
|
||||
dst[i] ^= src[i]
|
||||
}
|
||||
return n
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
//go:build go1.20
|
||||
|
||||
package quickxorhash
|
||||
|
||||
import "crypto/subtle"
|
||||
|
||||
func xorBytes(dst, src []byte) int {
|
||||
return subtle.XORBytes(dst, src, dst)
|
||||
}
|
||||
@@ -1,145 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
|
||||
)
|
||||
|
||||
const (
|
||||
sseDefaultAlgorithm = "AES256"
|
||||
)
|
||||
|
||||
func getSha256(p []byte) []byte {
|
||||
h := sha256.New()
|
||||
h.Write(p)
|
||||
return h.Sum(nil)
|
||||
}
|
||||
|
||||
func validateSSECustomerKeyOptions(opt *Options) error {
|
||||
if opt.SSEKMSKeyID != "" && (opt.SSECustomerKeyFile != "" || opt.SSECustomerKey != "") {
|
||||
return errors.New("oos: can't use vault sse_kms_key_id and local sse_customer_key at the same time")
|
||||
}
|
||||
if opt.SSECustomerKey != "" && opt.SSECustomerKeyFile != "" {
|
||||
return errors.New("oos: can't use sse_customer_key and sse_customer_key_file at the same time")
|
||||
}
|
||||
if opt.SSEKMSKeyID != "" {
|
||||
return nil
|
||||
}
|
||||
err := populateSSECustomerKeys(opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func populateSSECustomerKeys(opt *Options) error {
|
||||
if opt.SSECustomerKeyFile != "" {
|
||||
// Reads the base64-encoded AES key data from the specified file and computes its SHA256 checksum
|
||||
data, err := os.ReadFile(expandPath(opt.SSECustomerKeyFile))
|
||||
if err != nil {
|
||||
return fmt.Errorf("oos: error reading sse_customer_key_file: %v", err)
|
||||
}
|
||||
opt.SSECustomerKey = strings.TrimSpace(string(data))
|
||||
}
|
||||
if opt.SSECustomerKey != "" {
|
||||
decoded, err := base64.StdEncoding.DecodeString(opt.SSECustomerKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("oos: Could not decode sse_customer_key_file: %w", err)
|
||||
}
|
||||
sha256Checksum := base64.StdEncoding.EncodeToString(getSha256(decoded))
|
||||
if opt.SSECustomerKeySha256 == "" {
|
||||
opt.SSECustomerKeySha256 = sha256Checksum
|
||||
} else {
|
||||
if opt.SSECustomerKeySha256 != sha256Checksum {
|
||||
return fmt.Errorf("the computed SHA256 checksum "+
|
||||
"(%v) of the key doesn't match the config entry sse_customer_key_sha256=(%v)",
|
||||
sha256Checksum, opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
if opt.SSECustomerAlgorithm == "" {
|
||||
opt.SSECustomerAlgorithm = sseDefaultAlgorithm
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// https://docs.oracle.com/en-us/iaas/Content/Object/Tasks/usingyourencryptionkeys.htm
|
||||
func useBYOKPutObject(fs *Fs, request *objectstorage.PutObjectRequest) {
|
||||
if fs.opt.SSEKMSKeyID != "" {
|
||||
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
|
||||
}
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
|
||||
func useBYOKHeadObject(fs *Fs, request *objectstorage.HeadObjectRequest) {
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
|
||||
func useBYOKGetObject(fs *Fs, request *objectstorage.GetObjectRequest) {
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
|
||||
func useBYOKCopyObject(fs *Fs, request *objectstorage.CopyObjectRequest) {
|
||||
if fs.opt.SSEKMSKeyID != "" {
|
||||
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
|
||||
}
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
|
||||
func useBYOKUpload(fs *Fs, request *transfer.UploadRequest) {
|
||||
if fs.opt.SSEKMSKeyID != "" {
|
||||
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
|
||||
}
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
@@ -9,8 +9,6 @@ import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/common/auth"
|
||||
@@ -20,33 +18,15 @@ import (
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
)
|
||||
|
||||
func expandPath(filepath string) (expandedPath string) {
|
||||
if filepath == "" {
|
||||
return filepath
|
||||
}
|
||||
cleanedPath := path.Clean(filepath)
|
||||
expandedPath = cleanedPath
|
||||
if strings.HasPrefix(cleanedPath, "~") {
|
||||
rest := cleanedPath[2:]
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return expandedPath
|
||||
}
|
||||
expandedPath = path.Join(home, rest)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getConfigurationProvider(opt *Options) (common.ConfigurationProvider, error) {
|
||||
switch opt.Provider {
|
||||
case instancePrincipal:
|
||||
return auth.InstancePrincipalConfigurationProvider()
|
||||
case userPrincipal:
|
||||
expandConfigFilePath := expandPath(opt.ConfigFile)
|
||||
if expandConfigFilePath != "" && !fileExists(expandConfigFilePath) {
|
||||
fs.Errorf(userPrincipal, "oci config file doesn't exist at %v", expandConfigFilePath)
|
||||
if opt.ConfigFile != "" && !fileExists(opt.ConfigFile) {
|
||||
fs.Errorf(userPrincipal, "oci config file doesn't exist at %v", opt.ConfigFile)
|
||||
}
|
||||
return common.CustomProfileConfigProvider(expandConfigFilePath, opt.ConfigProfile), nil
|
||||
return common.CustomProfileConfigProvider(opt.ConfigFile, opt.ConfigProfile), nil
|
||||
case resourcePrincipal:
|
||||
return auth.ResourcePrincipalConfigurationProvider()
|
||||
case noAuth:
|
||||
|
||||
@@ -65,8 +65,8 @@ a bucket or with a bucket and path.
|
||||
Long: `This command removes unfinished multipart uploads of age greater than
|
||||
max-age which defaults to 24 hours.
|
||||
|
||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||
it would do.
|
||||
Note that you can use -i/--dry-run with this command to see what it
|
||||
would do.
|
||||
|
||||
rclone backend cleanup oos:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
|
||||
|
||||
@@ -74,7 +74,6 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object) (err erro
|
||||
BucketName: common.String(srcBucket),
|
||||
CopyObjectDetails: copyObjectDetails,
|
||||
}
|
||||
useBYOKCopyObject(f, &req)
|
||||
var resp objectstorage.CopyObjectResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CopyObject(ctx, req)
|
||||
|
||||
@@ -87,7 +87,6 @@ func (o *Object) headObject(ctx context.Context) (info *objectstorage.HeadObject
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(objectPath),
|
||||
}
|
||||
useBYOKHeadObject(o.fs, &req)
|
||||
var response objectstorage.HeadObjectResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
@@ -100,7 +99,6 @@ func (o *Object) headObject(ctx context.Context) (info *objectstorage.HeadObject
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
fs.Errorf(o, "Failed to head object: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
o.fs.cache.MarkOK(bucketName)
|
||||
@@ -333,7 +331,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
||||
ObjectName: common.String(bucketPath),
|
||||
}
|
||||
o.applyGetObjectOptions(&req, options...)
|
||||
useBYOKGetObject(o.fs, &req)
|
||||
|
||||
var resp objectstorage.GetObjectResponse
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
@@ -435,7 +433,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
uploadRequest.StorageTier = storageTier
|
||||
}
|
||||
o.applyMultiPutOptions(&uploadRequest, options...)
|
||||
useBYOKUpload(o.fs, &uploadRequest)
|
||||
uploadStreamRequest := transfer.UploadStreamRequest{
|
||||
UploadRequest: uploadRequest,
|
||||
StreamReader: in,
|
||||
@@ -509,10 +506,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
req.StorageTier = storageTier
|
||||
}
|
||||
o.applyPutOptions(&req, options...)
|
||||
useBYOKPutObject(o.fs, &req)
|
||||
var resp objectstorage.PutObjectResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.PutObject(ctx, req)
|
||||
resp, err := o.fs.srv.PutObject(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -17,7 +17,9 @@ const (
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
defaultUploadConcurrency = 10
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
minSleep = 10 * time.Millisecond
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 5 * time.Minute
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
defaultCopyTimeoutDuration = fs.Duration(time.Minute)
|
||||
)
|
||||
|
||||
@@ -45,28 +47,23 @@ https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfromins
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Provider string `config:"provider"`
|
||||
Compartment string `config:"compartment"`
|
||||
Namespace string `config:"namespace"`
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ConfigFile string `config:"config_file"`
|
||||
ConfigProfile string `config:"config_profile"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
CopyTimeout fs.Duration `config:"copy_timeout"`
|
||||
StorageTier string `config:"storage_tier"`
|
||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
|
||||
SSECustomerKey string `config:"sse_customer_key"`
|
||||
SSECustomerKeyFile string `config:"sse_customer_key_file"`
|
||||
SSECustomerKeySha256 string `config:"sse_customer_key_sha256"`
|
||||
Provider string `config:"provider"`
|
||||
Compartment string `config:"compartment"`
|
||||
Namespace string `config:"namespace"`
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ConfigFile string `config:"config_file"`
|
||||
ConfigProfile string `config:"config_profile"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
CopyTimeout fs.Duration `config:"copy_timeout"`
|
||||
StorageTier string `config:"storage_tier"`
|
||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
}
|
||||
|
||||
func newOptions() []fs.Option {
|
||||
@@ -126,22 +123,6 @@ func newOptions() []fs.Option {
|
||||
Value: "Default",
|
||||
Help: "Use the default profile",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://github.com/oracle/oci-go-sdk/blob/master/objectstorage/storage_tier.go
|
||||
Name: "storage_tier",
|
||||
Help: "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm",
|
||||
Default: "Standard",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "Standard",
|
||||
Help: "Standard storage tier, this is the default tier",
|
||||
}, {
|
||||
Value: "InfrequentAccess",
|
||||
Help: "InfrequentAccess storage tier",
|
||||
}, {
|
||||
Value: "Archive",
|
||||
Help: "Archive storage tier",
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload.
|
||||
@@ -257,59 +238,5 @@ creation permissions.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "sse_customer_key_file",
|
||||
Help: `To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated
|
||||
with the object. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.'`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_key",
|
||||
Help: `To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to
|
||||
encrypt or decrypt the data. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is
|
||||
needed. For more information, see Using Your Own Keys for Server-Side Encryption
|
||||
(https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm)`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_key_sha256",
|
||||
Help: `If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption
|
||||
key. This value is used to check the integrity of the encryption key. see Using Your Own Keys for
|
||||
Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm).`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_kms_key_id",
|
||||
Help: `if using your own master key in vault, this header specifies the
|
||||
OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call
|
||||
the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key.
|
||||
Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_algorithm",
|
||||
Help: `If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm.
|
||||
Object Storage supports "AES256" as the encryption algorithm. For more information, see
|
||||
Using Your Own Keys for Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm).`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}, {
|
||||
Value: sseDefaultAlgorithm,
|
||||
Help: sseDefaultAlgorithm,
|
||||
}},
|
||||
}}
|
||||
}
|
||||
|
||||
@@ -59,27 +59,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = validateSSECustomerKeyOptions(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
objectStorageClient, err := newObjectStorageClient(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pc := fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep)))
|
||||
// Set pacer retries to 2 (1 try and 1 retry) because we are
|
||||
// relying on SDK retry mechanism, but we allow 2 attempts to
|
||||
// retry directory listings after XMLSyntaxError
|
||||
pc.SetRetries(2)
|
||||
p := pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
srv: objectStorageClient,
|
||||
cache: bucket.NewCache(),
|
||||
pacer: pc,
|
||||
pacer: fs.NewPacer(ctx, p),
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
@@ -589,7 +581,12 @@ func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Durat
|
||||
if operations.SkipDestructive(ctx, what, "remove pending upload") {
|
||||
continue
|
||||
}
|
||||
_ = f.abortMultiPartUpload(ctx, *upload.Bucket, *upload.Object, *upload.UploadId)
|
||||
ignoreErr := f.abortMultiPartUpload(ctx, *upload.Bucket, *upload.Object, *upload.UploadId)
|
||||
if ignoreErr != nil {
|
||||
// fs.Debugf(f, "ignoring error %s", ignoreErr)
|
||||
}
|
||||
} else {
|
||||
// fs.Debugf(f, "ignoring %s", what)
|
||||
}
|
||||
} else {
|
||||
fs.Infof(f, "MultipartUpload doesn't have sufficient details to abort.")
|
||||
|
||||
@@ -1,535 +0,0 @@
|
||||
// Package api has type definitions for pikpak
|
||||
//
|
||||
// Manually obtained from the API responses using Browse Dev. Tool and https://mholt.github.io/json-to-go/
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// "2022-09-17T14:31:06.056+08:00"
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
)
|
||||
|
||||
// Time represents date and time information for the pikpak API, by using RFC3339
|
||||
type Time time.Time
|
||||
|
||||
// MarshalJSON turns a Time into JSON (in UTC)
|
||||
func (t *Time) MarshalJSON() (out []byte, err error) {
|
||||
timeString := (*time.Time)(t).Format(timeFormat)
|
||||
return []byte(timeString), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Time
|
||||
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
if string(data) == "null" || string(data) == `""` {
|
||||
return nil
|
||||
}
|
||||
newT, err := time.Parse(timeFormat, string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Time(newT)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
KindOfFolder = "drive#folder"
|
||||
KindOfFile = "drive#file"
|
||||
KindOfFileList = "drive#fileList"
|
||||
KindOfResumable = "drive#resumable"
|
||||
KindOfForm = "drive#form"
|
||||
ThumbnailSizeS = "SIZE_SMALL"
|
||||
ThumbnailSizeM = "SIZE_MEDIUM"
|
||||
ThumbnailSizeL = "SIZE_LARGE"
|
||||
PhaseTypeComplete = "PHASE_TYPE_COMPLETE"
|
||||
PhaseTypeRunning = "PHASE_TYPE_RUNNING"
|
||||
PhaseTypeError = "PHASE_TYPE_ERROR"
|
||||
PhaseTypePending = "PHASE_TYPE_PENDING"
|
||||
UploadTypeForm = "UPLOAD_TYPE_FORM"
|
||||
UploadTypeResumable = "UPLOAD_TYPE_RESUMABLE"
|
||||
ListLimit = 100
|
||||
)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Error details api error from pikpak
|
||||
type Error struct {
|
||||
Reason string `json:"error"` // short description of the reason, e.g. "file_name_empty" "invalid_request"
|
||||
Code int `json:"error_code"`
|
||||
URL string `json:"error_url,omitempty"`
|
||||
Message string `json:"error_description,omitempty"`
|
||||
// can have either of `error_details` or `details``
|
||||
ErrorDetails []*ErrorDetails `json:"error_details,omitempty"`
|
||||
Details []*ErrorDetails `json:"details,omitempty"`
|
||||
}
|
||||
|
||||
// ErrorDetails contains further details of api error
|
||||
type ErrorDetails struct {
|
||||
Type string `json:"@type,omitempty"`
|
||||
Reason string `json:"reason,omitempty"`
|
||||
Domain string `json:"domain,omitempty"`
|
||||
Metadata struct {
|
||||
} `json:"metadata,omitempty"` // TODO: undiscovered yet
|
||||
Locale string `json:"locale,omitempty"` // e.g. "en"
|
||||
Message string `json:"message,omitempty"`
|
||||
StackEntries []interface{} `json:"stack_entries,omitempty"` // TODO: undiscovered yet
|
||||
Detail string `json:"detail,omitempty"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q (%d)", e.Reason, e.Code)
|
||||
if e.Message != "" {
|
||||
out += ": " + e.Message
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Filters contains parameters for filters when listing.
|
||||
//
|
||||
// possible operators
|
||||
// * in: a list of comma-separated string
|
||||
// * eq: "true" or "false"
|
||||
// * gt or lt: time format string, e.g. "2023-01-28T10:56:49.757+08:00"
|
||||
type Filters struct {
|
||||
Phase map[string]string `json:"phase,omitempty"` // "in" or "eq"
|
||||
Trashed map[string]bool `json:"trashed,omitempty"` // "eq"
|
||||
Kind map[string]string `json:"kind,omitempty"` // "eq"
|
||||
Starred map[string]bool `json:"starred,omitempty"` // "eq"
|
||||
ModifiedTime map[string]string `json:"modified_time,omitempty"` // "gt" or "lt"
|
||||
}
|
||||
|
||||
// Set sets filter values using field name, operator and corresponding value
|
||||
func (f *Filters) Set(field, operator, value string) {
|
||||
if value == "" {
|
||||
// UNSET for empty values
|
||||
return
|
||||
}
|
||||
r := reflect.ValueOf(f)
|
||||
fd := reflect.Indirect(r).FieldByName(field)
|
||||
if v, err := strconv.ParseBool(value); err == nil {
|
||||
fd.Set(reflect.ValueOf(map[string]bool{operator: v}))
|
||||
} else {
|
||||
fd.Set(reflect.ValueOf(map[string]string{operator: value}))
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Common Elements
|
||||
|
||||
// Link contains a download URL for opening files
|
||||
type Link struct {
|
||||
URL string `json:"url"`
|
||||
Token string `json:"token"`
|
||||
Expire Time `json:"expire"`
|
||||
Type string `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
// Valid reports whether l is non-nil, has an URL, and is not expired.
|
||||
func (l *Link) Valid() bool {
|
||||
return l != nil && l.URL != "" && time.Now().Add(10*time.Second).Before(time.Time(l.Expire))
|
||||
}
|
||||
|
||||
// URL is a basic form of URL
|
||||
type URL struct {
|
||||
Kind string `json:"kind,omitempty"` // e.g. "upload#url"
|
||||
URL string `json:"url,omitempty"`
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Base Elements
|
||||
|
||||
// FileList contains a list of File elements
|
||||
type FileList struct {
|
||||
Kind string `json:"kind,omitempty"` // drive#fileList
|
||||
Files []*File `json:"files,omitempty"`
|
||||
NextPageToken string `json:"next_page_token"`
|
||||
Version string `json:"version,omitempty"`
|
||||
VersionOutdated bool `json:"version_outdated,omitempty"`
|
||||
}
|
||||
|
||||
// File is a basic element representing a single file object
|
||||
//
|
||||
// There are two types of download links,
|
||||
// 1) one from File.WebContentLink or File.Links.ApplicationOctetStream.URL and
|
||||
// 2) the other from File.Medias[].Link.URL.
|
||||
// Empirically, 2) is less restrictive to multiple concurrent range-requests
|
||||
// for a single file, i.e. supports for higher `--multi-thread-streams=N`.
|
||||
// However, it is not generally applicable as it is only for meadia.
|
||||
type File struct {
|
||||
Apps []*FileApp `json:"apps,omitempty"`
|
||||
Audit *FileAudit `json:"audit,omitempty"`
|
||||
Collection string `json:"collection,omitempty"` // TODO
|
||||
CreatedTime Time `json:"created_time,omitempty"`
|
||||
DeleteTime Time `json:"delete_time,omitempty"`
|
||||
FileCategory string `json:"file_category,omitempty"`
|
||||
FileExtension string `json:"file_extension,omitempty"`
|
||||
FolderType string `json:"folder_type,omitempty"`
|
||||
Hash string `json:"hash,omitempty"` // sha1 but NOT a valid file hash. looks like a torrent hash
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
Kind string `json:"kind,omitempty"` // "drive#file"
|
||||
Links *FileLinks `json:"links,omitempty"`
|
||||
Md5Checksum string `json:"md5_checksum,omitempty"`
|
||||
Medias []*Media `json:"medias,omitempty"`
|
||||
MimeType string `json:"mime_type,omitempty"`
|
||||
ModifiedTime Time `json:"modified_time,omitempty"` // updated when renamed or moved
|
||||
Name string `json:"name,omitempty"`
|
||||
OriginalFileIndex int `json:"original_file_index,omitempty"` // TODO
|
||||
OriginalURL string `json:"original_url,omitempty"`
|
||||
Params *FileParams `json:"params,omitempty"`
|
||||
ParentID string `json:"parent_id,omitempty"`
|
||||
Phase string `json:"phase,omitempty"`
|
||||
Revision int `json:"revision,omitempty,string"`
|
||||
Size int64 `json:"size,omitempty,string"`
|
||||
SortName string `json:"sort_name,omitempty"`
|
||||
Space string `json:"space,omitempty"`
|
||||
SpellName []interface{} `json:"spell_name,omitempty"` // TODO maybe list of something?
|
||||
Starred bool `json:"starred,omitempty"`
|
||||
ThumbnailLink string `json:"thumbnail_link,omitempty"`
|
||||
Trashed bool `json:"trashed,omitempty"`
|
||||
UserID string `json:"user_id,omitempty"`
|
||||
UserModifiedTime Time `json:"user_modified_time,omitempty"`
|
||||
WebContentLink string `json:"web_content_link,omitempty"`
|
||||
Writable bool `json:"writable,omitempty"`
|
||||
}
|
||||
|
||||
// FileLinks includes links to file at backend
|
||||
type FileLinks struct {
|
||||
ApplicationOctetStream *Link `json:"application/octet-stream,omitempty"`
|
||||
}
|
||||
|
||||
// FileAudit contains audit information for the file
|
||||
type FileAudit struct {
|
||||
Status string `json:"status,omitempty"` // "STATUS_OK"
|
||||
Message string `json:"message,omitempty"`
|
||||
Title string `json:"title,omitempty"`
|
||||
}
|
||||
|
||||
// Media contains info about supported version of media, e.g. original, transcoded, etc
|
||||
type Media struct {
|
||||
MediaID string `json:"media_id,omitempty"`
|
||||
MediaName string `json:"media_name,omitempty"`
|
||||
Video struct {
|
||||
Height int `json:"height,omitempty"`
|
||||
Width int `json:"width,omitempty"`
|
||||
Duration int64 `json:"duration,omitempty"`
|
||||
BitRate int `json:"bit_rate,omitempty"`
|
||||
FrameRate int `json:"frame_rate,omitempty"`
|
||||
VideoCodec string `json:"video_codec,omitempty"`
|
||||
AudioCodec string `json:"audio_codec,omitempty"`
|
||||
VideoType string `json:"video_type,omitempty"`
|
||||
} `json:"video,omitempty"`
|
||||
Link *Link `json:"link,omitempty"`
|
||||
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
|
||||
VipTypes []interface{} `json:"vip_types,omitempty"` // TODO maybe list of something?
|
||||
RedirectLink string `json:"redirect_link,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
IsDefault bool `json:"is_default,omitempty"`
|
||||
Priority int `json:"priority,omitempty"`
|
||||
IsOrigin bool `json:"is_origin,omitempty"`
|
||||
ResolutionName string `json:"resolution_name,omitempty"`
|
||||
IsVisible bool `json:"is_visible,omitempty"`
|
||||
Category string `json:"category,omitempty"`
|
||||
}
|
||||
|
||||
// FileParams includes parameters for instant open
|
||||
type FileParams struct {
|
||||
Duration int64 `json:"duration,omitempty,string"` // in seconds
|
||||
Height int `json:"height,omitempty,string"`
|
||||
Platform string `json:"platform,omitempty"` // "Upload"
|
||||
PlatformIcon string `json:"platform_icon,omitempty"`
|
||||
URL string `json:"url,omitempty"`
|
||||
Width int `json:"width,omitempty,string"`
|
||||
}
|
||||
|
||||
// FileApp includes parameters for instant open
|
||||
type FileApp struct {
|
||||
ID string `json:"id,omitempty"` // "decompress" for rar files
|
||||
Name string `json:"name,omitempty"` // decompress" for rar files
|
||||
Access []interface{} `json:"access,omitempty"`
|
||||
Link string `json:"link,omitempty"` // "https://mypikpak.com/drive/decompression/{File.Id}?gcid={File.Hash}\u0026wv-style=topbar%3Ahide"
|
||||
RedirectLink string `json:"redirect_link,omitempty"`
|
||||
VipTypes []interface{} `json:"vip_types,omitempty"`
|
||||
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
IsDefault bool `json:"is_default,omitempty"`
|
||||
Params struct {
|
||||
} `json:"params,omitempty"` // TODO
|
||||
CategoryIds []interface{} `json:"category_ids,omitempty"`
|
||||
AdSceneType int `json:"ad_scene_type,omitempty"`
|
||||
Space string `json:"space,omitempty"`
|
||||
Links struct {
|
||||
} `json:"links,omitempty"` // TODO
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// TaskList contains a list of Task elements
|
||||
type TaskList struct {
|
||||
Tasks []*Task `json:"tasks,omitempty"` // "drive#task"
|
||||
NextPageToken string `json:"next_page_token"`
|
||||
ExpiresIn int `json:"expires_in,omitempty"`
|
||||
}
|
||||
|
||||
// Task is a basic element representing a single task such as offline download and upload
|
||||
type Task struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#task"
|
||||
ID string `json:"id,omitempty"` // task id?
|
||||
Name string `json:"name,omitempty"` // torrent name?
|
||||
Type string `json:"type,omitempty"` // "offline"
|
||||
UserID string `json:"user_id,omitempty"`
|
||||
Statuses []interface{} `json:"statuses,omitempty"` // TODO
|
||||
StatusSize int `json:"status_size,omitempty"` // TODO
|
||||
Params *TaskParams `json:"params,omitempty"` // TODO
|
||||
FileID string `json:"file_id,omitempty"`
|
||||
FileName string `json:"file_name,omitempty"`
|
||||
FileSize string `json:"file_size,omitempty"`
|
||||
Message string `json:"message,omitempty"` // e.g. "Saving"
|
||||
CreatedTime Time `json:"created_time,omitempty"`
|
||||
UpdatedTime Time `json:"updated_time,omitempty"`
|
||||
ThirdTaskID string `json:"third_task_id,omitempty"` // TODO
|
||||
Phase string `json:"phase,omitempty"` // e.g. "PHASE_TYPE_RUNNING"
|
||||
Progress int `json:"progress,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
Callback string `json:"callback,omitempty"`
|
||||
ReferenceResource interface{} `json:"reference_resource,omitempty"` // TODO
|
||||
Space string `json:"space,omitempty"`
|
||||
}
|
||||
|
||||
// TaskParams includes parameters informing status of Task
|
||||
type TaskParams struct {
|
||||
Age string `json:"age,omitempty"`
|
||||
PredictSpeed string `json:"predict_speed,omitempty"`
|
||||
PredictType string `json:"predict_type,omitempty"`
|
||||
URL string `json:"url,omitempty"`
|
||||
}
|
||||
|
||||
// Form contains parameters for upload by multipart/form-data
|
||||
type Form struct {
|
||||
Headers struct{} `json:"headers"`
|
||||
Kind string `json:"kind"` // "drive#form"
|
||||
Method string `json:"method"` // "POST"
|
||||
MultiParts struct {
|
||||
OSSAccessKeyID string `json:"OSSAccessKeyId"`
|
||||
Signature string `json:"Signature"`
|
||||
Callback string `json:"callback"`
|
||||
Key string `json:"key"`
|
||||
Policy string `json:"policy"`
|
||||
XUserData string `json:"x:user_data"`
|
||||
} `json:"multi_parts"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
// Resumable contains parameters for upload by resumable
|
||||
type Resumable struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#resumable"
|
||||
Provider string `json:"provider,omitempty"` // e.g. "PROVIDER_ALIYUN"
|
||||
Params *ResumableParams `json:"params,omitempty"`
|
||||
}
|
||||
|
||||
// ResumableParams specifies resumable paramegers
|
||||
type ResumableParams struct {
|
||||
AccessKeyID string `json:"access_key_id,omitempty"`
|
||||
AccessKeySecret string `json:"access_key_secret,omitempty"`
|
||||
Bucket string `json:"bucket,omitempty"`
|
||||
Endpoint string `json:"endpoint,omitempty"`
|
||||
Expiration Time `json:"expiration,omitempty"`
|
||||
Key string `json:"key,omitempty"`
|
||||
SecurityToken string `json:"security_token,omitempty"`
|
||||
}
|
||||
|
||||
// FileInArchive is a basic element in archive
|
||||
type FileInArchive struct {
|
||||
Index int `json:"index,omitempty"`
|
||||
Filename string `json:"filename,omitempty"`
|
||||
Filesize string `json:"filesize,omitempty"`
|
||||
MimeType string `json:"mime_type,omitempty"`
|
||||
Gcid string `json:"gcid,omitempty"`
|
||||
Kind string `json:"kind,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
Path string `json:"path,omitempty"`
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// NewFile is a response to RequestNewFile
|
||||
type NewFile struct {
|
||||
File *File `json:"file,omitempty"`
|
||||
Form *Form `json:"form,omitempty"`
|
||||
Resumable *Resumable `json:"resumable,omitempty"`
|
||||
Task *Task `json:"task,omitempty"` // null in this case
|
||||
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_FORM" or "UPLOAD_TYPE_RESUMABLE"
|
||||
}
|
||||
|
||||
// NewTask is a response to RequestNewTask
|
||||
type NewTask struct {
|
||||
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_URL"
|
||||
File *File `json:"file,omitempty"` // null in this case
|
||||
Task *Task `json:"task,omitempty"`
|
||||
URL *URL `json:"url,omitempty"` // {"kind": "upload#url"}
|
||||
}
|
||||
|
||||
// About informs drive status
|
||||
type About struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#about"
|
||||
Quota *Quota `json:"quota,omitempty"`
|
||||
ExpiresAt string `json:"expires_at,omitempty"`
|
||||
Quotas struct {
|
||||
} `json:"quotas,omitempty"` // maybe []*Quota?
|
||||
}
|
||||
|
||||
// Quota informs drive quota
|
||||
type Quota struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#quota"
|
||||
Limit int64 `json:"limit,omitempty,string"` // limit in bytes
|
||||
Usage int64 `json:"usage,omitempty,string"` // bytes in use
|
||||
UsageInTrash int64 `json:"usage_in_trash,omitempty,string"` // bytes in trash but this seems not working
|
||||
PlayTimesLimit string `json:"play_times_limit,omitempty"` // maybe in seconds
|
||||
PlayTimesUsage string `json:"play_times_usage,omitempty"` // maybe in seconds
|
||||
}
|
||||
|
||||
// Share is a response to RequestShare
|
||||
//
|
||||
// used in PublicLink()
|
||||
type Share struct {
|
||||
ShareID string `json:"share_id,omitempty"`
|
||||
ShareURL string `json:"share_url,omitempty"`
|
||||
PassCode string `json:"pass_code,omitempty"`
|
||||
ShareText string `json:"share_text,omitempty"`
|
||||
}
|
||||
|
||||
// User contains user account information
|
||||
//
|
||||
// GET https://user.mypikpak.com/v1/user/me
|
||||
type User struct {
|
||||
Sub string `json:"sub,omitempty"` // userid for internal use
|
||||
Name string `json:"name,omitempty"` // Username
|
||||
Picture string `json:"picture,omitempty"` // URL to Avatar image
|
||||
Email string `json:"email,omitempty"` // redacted email address
|
||||
Providers *[]UserProvider `json:"providers,omitempty"` // OAuth provider
|
||||
PhoneNumber string `json:"phone_number,omitempty"`
|
||||
Password string `json:"password,omitempty"` // "SET" if configured
|
||||
Status string `json:"status,omitempty"` // "ACTIVE"
|
||||
CreatedAt Time `json:"created_at,omitempty"`
|
||||
PasswordUpdatedAt Time `json:"password_updated_at,omitempty"`
|
||||
}
|
||||
|
||||
// UserProvider details third-party authentication
|
||||
type UserProvider struct {
|
||||
ID string `json:"id,omitempty"` // e.g. "google.com"
|
||||
ProviderUserID string `json:"provider_user_id,omitempty"`
|
||||
Name string `json:"name,omitempty"` // username
|
||||
}
|
||||
|
||||
// VIP includes subscription details about premium account
|
||||
//
|
||||
// GET https://api-drive.mypikpak.com/drive/v1/privilege/vip
|
||||
type VIP struct {
|
||||
Result string `json:"result,omitempty"` // "ACCEPTED"
|
||||
Message string `json:"message,omitempty"`
|
||||
RedirectURI string `json:"redirect_uri,omitempty"`
|
||||
Data struct {
|
||||
Expire Time `json:"expire,omitempty"`
|
||||
Status string `json:"status,omitempty"` // "invalid" or "ok"
|
||||
Type string `json:"type,omitempty"` // "novip" or "platinum"
|
||||
UserID string `json:"user_id,omitempty"` // same as User.Sub
|
||||
} `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
// DecompressResult is a response to RequestDecompress
|
||||
type DecompressResult struct {
|
||||
Status string `json:"status,omitempty"` // "OK"
|
||||
StatusText string `json:"status_text,omitempty"`
|
||||
TaskID string `json:"task_id,omitempty"` // same as File.Id
|
||||
FilesNum int `json:"files_num,omitempty"` // number of files in archive
|
||||
RedirectLink string `json:"redirect_link,omitempty"`
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// RequestShare is to request for file share
|
||||
type RequestShare struct {
|
||||
FileIds []string `json:"file_ids,omitempty"`
|
||||
ShareTo string `json:"share_to,omitempty"` // "publiclink",
|
||||
ExpirationDays int `json:"expiration_days,omitempty"` // -1 = 'forever'
|
||||
PassCodeOption string `json:"pass_code_option,omitempty"` // "NOT_REQUIRED"
|
||||
}
|
||||
|
||||
// RequestBatch is to request for batch actions
|
||||
type RequestBatch struct {
|
||||
Ids []string `json:"ids,omitempty"`
|
||||
To map[string]string `json:"to,omitempty"`
|
||||
}
|
||||
|
||||
// RequestNewFile is to request for creating a new `drive#folder` or `drive#file`
|
||||
type RequestNewFile struct {
|
||||
// always required
|
||||
Kind string `json:"kind"` // "drive#folder" or "drive#file"
|
||||
Name string `json:"name"`
|
||||
ParentID string `json:"parent_id"`
|
||||
FolderType string `json:"folder_type"`
|
||||
// only when uploading a new file
|
||||
Hash string `json:"hash,omitempty"` // sha1sum
|
||||
Resumable map[string]string `json:"resumable,omitempty"` // {"provider": "PROVIDER_ALIYUN"}
|
||||
Size int64 `json:"size,omitempty"`
|
||||
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_FORM" or "UPLOAD_TYPE_RESUMABLE"
|
||||
}
|
||||
|
||||
// RequestNewTask is to request for creating a new task like offline downloads
|
||||
//
|
||||
// Name and ParentID can be left empty.
|
||||
type RequestNewTask struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#file"
|
||||
Name string `json:"name,omitempty"`
|
||||
ParentID string `json:"parent_id,omitempty"`
|
||||
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_URL"
|
||||
URL *URL `json:"url,omitempty"` // {"url": downloadUrl}
|
||||
FolderType string `json:"folder_type,omitempty"` // "" if parent_id else "DOWNLOAD"
|
||||
}
|
||||
|
||||
// RequestDecompress is to request for decompress of archive files
|
||||
type RequestDecompress struct {
|
||||
Gcid string `json:"gcid,omitempty"` // same as File.Hash
|
||||
Password string `json:"password,omitempty"` // ""
|
||||
FileID string `json:"file_id,omitempty"`
|
||||
Files []*FileInArchive `json:"files,omitempty"` // can request selected files to be decompressed
|
||||
DefaultParent bool `json:"default_parent,omitempty"`
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// NOT implemented YET
|
||||
|
||||
// RequestArchiveFileList is to request for a list of files in archive
|
||||
//
|
||||
// POST https://api-drive.mypikpak.com/decompress/v1/list
|
||||
type RequestArchiveFileList struct {
|
||||
Gcid string `json:"gcid,omitempty"` // same as api.File.Hash
|
||||
Path string `json:"path,omitempty"` // "" by default
|
||||
Password string `json:"password,omitempty"` // "" by default
|
||||
FileID string `json:"file_id,omitempty"`
|
||||
}
|
||||
|
||||
// ArchiveFileList is a response to RequestArchiveFileList
|
||||
type ArchiveFileList struct {
|
||||
Status string `json:"status,omitempty"` // "OK"
|
||||
StatusText string `json:"status_text,omitempty"` // ""
|
||||
TaskID string `json:"task_id,omitempty"` // ""
|
||||
CurrentPath string `json:"current_path,omitempty"` // ""
|
||||
Title string `json:"title,omitempty"`
|
||||
FileSize int64 `json:"file_size,omitempty"`
|
||||
Gcid string `json:"gcid,omitempty"` // same as File.Hash
|
||||
Files []*FileInArchive `json:"files,omitempty"`
|
||||
}
|
||||
@@ -1,253 +0,0 @@
|
||||
package pikpak
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/rclone/rclone/backend/pikpak/api"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Globals
|
||||
const (
|
||||
cachePrefix = "rclone-pikpak-sha1sum-"
|
||||
)
|
||||
|
||||
// requestDecompress requests decompress of compressed files
|
||||
func (f *Fs) requestDecompress(ctx context.Context, file *api.File, password string) (info *api.DecompressResult, err error) {
|
||||
req := &api.RequestDecompress{
|
||||
Gcid: file.Hash,
|
||||
Password: password,
|
||||
FileID: file.ID,
|
||||
Files: []*api.FileInArchive{},
|
||||
DefaultParent: true,
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/decompress/v1/decompress",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// getUserInfo gets UserInfo from API
|
||||
func (f *Fs) getUserInfo(ctx context.Context) (info *api.User, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: "https://user.mypikpak.com/v1/user/me",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get userinfo: %w", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// getVIPInfo gets VIPInfo from API
|
||||
func (f *Fs) getVIPInfo(ctx context.Context) (info *api.VIP, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: "https://api-drive.mypikpak.com/drive/v1/privilege/vip",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get vip info: %w", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// requestBatchAction requests batch actions to API
|
||||
//
|
||||
// action can be one of batch{Copy,Delete,Trash,Untrash}
|
||||
func (f *Fs) requestBatchAction(ctx context.Context, action string, req *api.RequestBatch) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/v1/files:" + action,
|
||||
NoResponse: true, // Only returns `{"task_id":""}
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, nil)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("batch action %q failed: %w", action, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// requestNewTask requests a new api.NewTask and returns api.Task
|
||||
func (f *Fs) requestNewTask(ctx context.Context, req *api.RequestNewTask) (info *api.Task, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/v1/files",
|
||||
}
|
||||
var newTask api.NewTask
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &newTask)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newTask.Task, nil
|
||||
}
|
||||
|
||||
// requestNewFile requests a new api.NewFile and returns api.File
|
||||
func (f *Fs) requestNewFile(ctx context.Context, req *api.RequestNewFile) (info *api.NewFile, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/v1/files",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// getFile gets api.File from API for the ID passed
|
||||
// and returns rich information containing additional fields below
|
||||
// * web_content_link
|
||||
// * thumbnail_link
|
||||
// * links
|
||||
// * medias
|
||||
func (f *Fs) getFile(ctx context.Context, ID string) (info *api.File, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/drive/v1/files/" + ID,
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
if err == nil && info.Phase != api.PhaseTypeComplete {
|
||||
// could be pending right after file is created/uploaded.
|
||||
return true, errors.New("not PHASE_TYPE_COMPLETE")
|
||||
}
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// patchFile updates attributes of the file by ID
|
||||
//
|
||||
// currently known patchable fields are
|
||||
// * name
|
||||
func (f *Fs) patchFile(ctx context.Context, ID string, req *api.File) (info *api.File, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PATCH",
|
||||
Path: "/drive/v1/files/" + ID,
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// getAbout gets drive#quota information from server
|
||||
func (f *Fs) getAbout(ctx context.Context) (info *api.About, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/drive/v1/about",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// requestShare returns information about ssharable links
|
||||
func (f *Fs) requestShare(ctx context.Context, req *api.RequestShare) (info *api.Share, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/v1/share",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Read the sha1 of in returning a reader which will read the same contents
|
||||
//
|
||||
// The cleanup function should be called when out is finished with
|
||||
// regardless of whether this function returned an error or not.
|
||||
func readSHA1(in io.Reader, size, threshold int64) (sha1sum string, out io.Reader, cleanup func(), err error) {
|
||||
// we need an SHA1
|
||||
hash := sha1.New()
|
||||
// use the teeReader to write to the local file AND calculate the SHA1 while doing so
|
||||
teeReader := io.TeeReader(in, hash)
|
||||
|
||||
// nothing to clean up by default
|
||||
cleanup = func() {}
|
||||
|
||||
// don't cache small files on disk to reduce wear of the disk
|
||||
if size > threshold {
|
||||
var tempFile *os.File
|
||||
|
||||
// create the cache file
|
||||
tempFile, err = os.CreateTemp("", cachePrefix)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_ = os.Remove(tempFile.Name()) // Delete the file - may not work on Windows
|
||||
|
||||
// clean up the file after we are done downloading
|
||||
cleanup = func() {
|
||||
// the file should normally already be close, but just to make sure
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name()) // delete the cache file after we are done - may be deleted already
|
||||
}
|
||||
|
||||
// copy the ENTIRE file to disc and calculate the SHA1 in the process
|
||||
if _, err = io.Copy(tempFile, teeReader); err != nil {
|
||||
return
|
||||
}
|
||||
// jump to the start of the local file so we can pass it along
|
||||
if _, err = tempFile.Seek(0, 0); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// replace the already read source with a reader of our cached file
|
||||
out = tempFile
|
||||
} else {
|
||||
// that's a small file, just read it into memory
|
||||
var inData []byte
|
||||
inData, err = io.ReadAll(teeReader)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// set the reader to our read memory block
|
||||
out = bytes.NewReader(inData)
|
||||
}
|
||||
return hex.EncodeToString(hash.Sum(nil)), out, cleanup, nil
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,17 +0,0 @@
|
||||
// Test PikPak filesystem interface
|
||||
package pikpak_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/pikpak"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestPikPak:",
|
||||
NilObject: (*pikpak.Object)(nil),
|
||||
})
|
||||
}
|
||||
257
backend/s3/s3.go
257
backend/s3/s3.go
@@ -1,4 +1,4 @@
|
||||
// Package s3 provides an interface to Amazon S3 object storage
|
||||
// Package s3 provides an interface to Amazon S3 oject storage
|
||||
package s3
|
||||
|
||||
//go:generate go run gen_setfrom.go -o setfrom.go
|
||||
@@ -66,7 +66,7 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, GCS, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
@@ -109,9 +109,6 @@ func init() {
|
||||
}, {
|
||||
Value: "Dreamhost",
|
||||
Help: "Dreamhost DreamObjects",
|
||||
}, {
|
||||
Value: "GCS",
|
||||
Help: "Google Cloud Storage",
|
||||
}, {
|
||||
Value: "HuaweiOBS",
|
||||
Help: "Huawei Object Storage Service",
|
||||
@@ -937,14 +934,6 @@ func init() {
|
||||
Value: "s3.eu-central-1.stackpathstorage.com",
|
||||
Help: "EU Endpoint",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Google Cloud Storage.",
|
||||
Provider: "GCS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "https://storage.googleapis.com",
|
||||
Help: "Google Cloud Storage endpoint",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Storj Gateway.",
|
||||
@@ -1109,7 +1098,7 @@ func init() {
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu",
|
||||
Provider: "!AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,Liara,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -2269,24 +2258,6 @@ will decompress the object on the fly.
|
||||
If this is set to unset (the default) then rclone will choose
|
||||
according to the provider setting what to apply, but you can override
|
||||
rclone's choice here.
|
||||
`, "|", "`"),
|
||||
Default: fs.Tristate{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_accept_encoding_gzip",
|
||||
Help: strings.ReplaceAll(`Whether to send |Accept-Encoding: gzip| header.
|
||||
|
||||
By default, rclone will append |Accept-Encoding: gzip| to the request to download
|
||||
compressed objects whenever possible.
|
||||
|
||||
However some providers such as Google Cloud Storage may alter the HTTP headers, breaking
|
||||
the signature of the request.
|
||||
|
||||
A symptom of this would be receiving errors like
|
||||
|
||||
SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided.
|
||||
|
||||
In this case, you might want to try disabling this option.
|
||||
`, "|", "`"),
|
||||
Default: fs.Tristate{},
|
||||
Advanced: true,
|
||||
@@ -2295,11 +2266,6 @@ In this case, you might want to try disabling this option.
|
||||
Help: `Suppress setting and reading of system metadata`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "sts_endpoint",
|
||||
Help: "Endpoint for STS.\n\nLeave blank if using AWS to use the default endpoint for the region.",
|
||||
Provider: "AWS",
|
||||
Advanced: true,
|
||||
},
|
||||
}})
|
||||
}
|
||||
@@ -2386,7 +2352,6 @@ type Options struct {
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
STSEndpoint string `config:"sts_endpoint"`
|
||||
LocationConstraint string `config:"location_constraint"`
|
||||
ACL string `config:"acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
@@ -2428,7 +2393,6 @@ type Options struct {
|
||||
VersionAt fs.Time `config:"version_at"`
|
||||
Decompress bool `config:"decompress"`
|
||||
MightGzip fs.Tristate `config:"might_gzip"`
|
||||
UseAcceptEncodingGzip fs.Tristate `config:"use_accept_encoding_gzip"`
|
||||
NoSystemMetadata bool `config:"no_system_metadata"`
|
||||
}
|
||||
|
||||
@@ -2564,7 +2528,7 @@ func parsePath(path string) (root string) {
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
@@ -2596,38 +2560,6 @@ func getClient(ctx context.Context, opt *Options) *http.Client {
|
||||
}
|
||||
}
|
||||
|
||||
// Default name resolver
|
||||
var defaultResolver = endpoints.DefaultResolver()
|
||||
|
||||
// resolve (service, region) to endpoint
|
||||
//
|
||||
// Used to set endpoint for s3 services and not for other services
|
||||
type resolver map[string]string
|
||||
|
||||
// Add a service to the resolver, ignoring empty urls
|
||||
func (r resolver) addService(service, url string) {
|
||||
if url == "" {
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(url, "http") {
|
||||
url = "https://" + url
|
||||
}
|
||||
r[service] = url
|
||||
}
|
||||
|
||||
// EndpointFor return the endpoint for s3 if set or the default if not
|
||||
func (r resolver) EndpointFor(service, region string, opts ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
|
||||
fs.Debugf(nil, "Resolving service %q region %q", service, region)
|
||||
url, ok := r[service]
|
||||
if ok {
|
||||
return endpoints.ResolvedEndpoint{
|
||||
URL: url,
|
||||
SigningRegion: region,
|
||||
}, nil
|
||||
}
|
||||
return defaultResolver.EndpointFor(service, region, opts...)
|
||||
}
|
||||
|
||||
// s3Connection makes a connection to s3
|
||||
func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S3, *session.Session, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
@@ -2706,12 +2638,8 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
|
||||
if opt.Region != "" {
|
||||
awsConfig.WithRegion(opt.Region)
|
||||
}
|
||||
if opt.Endpoint != "" || opt.STSEndpoint != "" {
|
||||
// If endpoints are set, override the relevant services only
|
||||
r := make(resolver)
|
||||
r.addService("s3", opt.Endpoint)
|
||||
r.addService("sts", opt.STSEndpoint)
|
||||
awsConfig.WithEndpointResolver(r)
|
||||
if opt.Endpoint != "" {
|
||||
awsConfig.WithEndpoint(opt.Endpoint)
|
||||
}
|
||||
|
||||
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
||||
@@ -2821,12 +2749,11 @@ func setEndpointValueForIDriveE2(m configmap.Mapper) (err error) {
|
||||
// These should be differences from AWS S3
|
||||
func setQuirks(opt *Options) {
|
||||
var (
|
||||
listObjectsV2 = true
|
||||
virtualHostStyle = true
|
||||
urlEncodeListings = true
|
||||
useMultipartEtag = true
|
||||
useAcceptEncodingGzip = true
|
||||
mightGzip = true // assume all providers might gzip until proven otherwise
|
||||
listObjectsV2 = true
|
||||
virtualHostStyle = true
|
||||
urlEncodeListings = true
|
||||
useMultipartEtag = true
|
||||
mightGzip = true // assume all providers might gzip until proven otherwise
|
||||
)
|
||||
switch opt.Provider {
|
||||
case "AWS":
|
||||
@@ -2911,10 +2838,6 @@ func setQuirks(opt *Options) {
|
||||
case "Qiniu":
|
||||
useMultipartEtag = false
|
||||
urlEncodeListings = false
|
||||
case "GCS":
|
||||
// Google break request Signature by mutating accept-encoding HTTP header
|
||||
// https://github.com/rclone/rclone/issues/6670
|
||||
useAcceptEncodingGzip = false
|
||||
case "Other":
|
||||
listObjectsV2 = false
|
||||
virtualHostStyle = false
|
||||
@@ -2959,12 +2882,6 @@ func setQuirks(opt *Options) {
|
||||
opt.MightGzip.Valid = true
|
||||
opt.MightGzip.Value = mightGzip
|
||||
}
|
||||
|
||||
// set UseAcceptEncodingGzip if not manually set
|
||||
if !opt.UseAcceptEncodingGzip.Valid {
|
||||
opt.UseAcceptEncodingGzip.Valid = true
|
||||
opt.UseAcceptEncodingGzip.Value = useAcceptEncodingGzip
|
||||
}
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
@@ -2998,7 +2915,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, fmt.Errorf("s3: upload cutoff: %w", err)
|
||||
}
|
||||
if opt.Versions && opt.VersionAt.IsSet() {
|
||||
return nil, errors.New("s3: can't use --s3-versions and --s3-version-at at the same time")
|
||||
return nil, errors.New("s3: cant use --s3-versions and --s3-version-at at the same time")
|
||||
}
|
||||
if opt.BucketACL == "" {
|
||||
opt.BucketACL = opt.ACL
|
||||
@@ -3072,15 +2989,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
GetTier: true,
|
||||
SlowModTime: true,
|
||||
}).Fill(ctx, f)
|
||||
if opt.Provider == "Storj" {
|
||||
f.features.SetTier = false
|
||||
f.features.GetTier = false
|
||||
}
|
||||
if opt.Provider == "IDrive" {
|
||||
f.features.SetTier = false
|
||||
}
|
||||
// f.listMultipartUploads()
|
||||
|
||||
if f.rootBucket != "" && f.rootDirectory != "" && !opt.NoHeadObject && !strings.HasSuffix(root, "/") {
|
||||
// Check to see if the (bucket,directory) is actually an existing file
|
||||
oldRoot := f.root
|
||||
@@ -3095,6 +3003,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
if opt.Provider == "Storj" {
|
||||
f.features.SetTier = false
|
||||
f.features.GetTier = false
|
||||
}
|
||||
if opt.Provider == "IDrive" {
|
||||
f.features.SetTier = false
|
||||
}
|
||||
// f.listMultipartUploads()
|
||||
return f, nil
|
||||
}
|
||||
|
||||
@@ -3119,7 +3035,6 @@ func (f *Fs) getMetaDataListing(ctx context.Context, wantRemote string) (info *s
|
||||
err = f.list(ctx, listOpt{
|
||||
bucket: bucket,
|
||||
directory: bucketPath,
|
||||
prefix: f.rootDirectory,
|
||||
recurse: true,
|
||||
withVersions: f.opt.Versions,
|
||||
findFile: true,
|
||||
@@ -3510,25 +3425,24 @@ var errEndList = errors.New("end list")
|
||||
|
||||
// list options
|
||||
type listOpt struct {
|
||||
bucket string // bucket to list
|
||||
directory string // directory with bucket
|
||||
prefix string // prefix to remove from listing
|
||||
addBucket bool // if set, the bucket is added to the start of the remote
|
||||
recurse bool // if set, recurse to read sub directories
|
||||
withVersions bool // if set, versions are produced
|
||||
hidden bool // if set, return delete markers as objects with size == isDeleteMarker
|
||||
findFile bool // if set, it will look for files called (bucket, directory)
|
||||
versionAt fs.Time // if set only show versions <= this time
|
||||
noSkipMarkers bool // if set return dir marker objects
|
||||
bucket string // bucket to list
|
||||
directory string // directory with bucket
|
||||
prefix string // prefix to remove from listing
|
||||
addBucket bool // if set, the bucket is added to the start of the remote
|
||||
recurse bool // if set, recurse to read sub directories
|
||||
withVersions bool // if set, versions are produced
|
||||
hidden bool // if set, return delete markers as objects with size == isDeleteMarker
|
||||
findFile bool // if set, it will look for files called (bucket, directory)
|
||||
versionAt fs.Time // if set only show versions <= this time
|
||||
}
|
||||
|
||||
// list lists the objects into the function supplied with the opt
|
||||
// supplied.
|
||||
func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
if opt.prefix != "" {
|
||||
opt.prefix += "/"
|
||||
}
|
||||
if !opt.findFile {
|
||||
if opt.prefix != "" {
|
||||
opt.prefix += "/"
|
||||
}
|
||||
if opt.directory != "" {
|
||||
opt.directory += "/"
|
||||
}
|
||||
@@ -3632,7 +3546,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
}
|
||||
remote = remote[len(opt.prefix):]
|
||||
if opt.addBucket {
|
||||
remote = bucket.Join(opt.bucket, remote)
|
||||
remote = path.Join(opt.bucket, remote)
|
||||
}
|
||||
remote = strings.TrimSuffix(remote, "/")
|
||||
err = fn(remote, &s3.Object{Key: &remote}, nil, true)
|
||||
@@ -3661,10 +3575,10 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
remote = remote[len(opt.prefix):]
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
if opt.addBucket {
|
||||
remote = bucket.Join(opt.bucket, remote)
|
||||
remote = path.Join(opt.bucket, remote)
|
||||
}
|
||||
// is this a directory marker?
|
||||
if isDirectory && object.Size != nil && *object.Size == 0 && !opt.noSkipMarkers {
|
||||
if isDirectory && object.Size != nil && *object.Size == 0 {
|
||||
continue // skip directory marker
|
||||
}
|
||||
if versionIDs != nil {
|
||||
@@ -3954,7 +3868,7 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
|
||||
req.Bucket = &dstBucket
|
||||
req.ACL = stringPointerOrNil(f.opt.ACL)
|
||||
req.Key = &dstPath
|
||||
source := pathEscape(bucket.Join(srcBucket, srcPath))
|
||||
source := pathEscape(path.Join(srcBucket, srcPath))
|
||||
if src.versionID != nil {
|
||||
source += fmt.Sprintf("?versionId=%s", *src.versionID)
|
||||
}
|
||||
@@ -4189,9 +4103,9 @@ Usage Examples:
|
||||
rclone backend restore s3:bucket/path/to/directory [-o priority=PRIORITY] [-o lifetime=DAYS]
|
||||
rclone backend restore s3:bucket [-o priority=PRIORITY] [-o lifetime=DAYS]
|
||||
|
||||
This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
|
||||
This flag also obeys the filters. Test first with -i/--interactive or --dry-run flags
|
||||
|
||||
rclone --interactive backend restore --include "*.txt" s3:bucket/path -o priority=Standard
|
||||
rclone -i backend restore --include "*.txt" s3:bucket/path -o priority=Standard
|
||||
|
||||
All the objects shown will be marked for restore, then
|
||||
|
||||
@@ -4259,8 +4173,8 @@ a bucket or with a bucket and path.
|
||||
Long: `This command removes unfinished multipart uploads of age greater than
|
||||
max-age which defaults to 24 hours.
|
||||
|
||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||
it would do.
|
||||
Note that you can use -i/--dry-run with this command to see what it
|
||||
would do.
|
||||
|
||||
rclone backend cleanup s3:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w s3:bucket/path/to/object
|
||||
@@ -4276,8 +4190,8 @@ Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||
Long: `This command removes any old hidden versions of files
|
||||
on a versions enabled bucket.
|
||||
|
||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||
it would do.
|
||||
Note that you can use -i/--dry-run with this command to see what it
|
||||
would do.
|
||||
|
||||
rclone backend cleanup-hidden s3:bucket/path/to/dir
|
||||
`,
|
||||
@@ -4611,14 +4525,13 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
delErr <- operations.DeleteFiles(ctx, delChan)
|
||||
}()
|
||||
checkErr(f.list(ctx, listOpt{
|
||||
bucket: bucket,
|
||||
directory: directory,
|
||||
prefix: f.rootDirectory,
|
||||
addBucket: f.rootBucket == "",
|
||||
recurse: true,
|
||||
withVersions: versioned,
|
||||
hidden: true,
|
||||
noSkipMarkers: true,
|
||||
bucket: bucket,
|
||||
directory: directory,
|
||||
prefix: f.rootDirectory,
|
||||
addBucket: f.rootBucket == "",
|
||||
recurse: true,
|
||||
withVersions: versioned,
|
||||
hidden: true,
|
||||
}, func(remote string, object *s3.Object, versionID *string, isDirectory bool) error {
|
||||
if isDirectory {
|
||||
return nil
|
||||
@@ -4628,7 +4541,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
fs.Errorf(object, "Can't create object %+v", err)
|
||||
return nil
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
// Work out whether the file is the current version or not
|
||||
isCurrentVersion := !versioned || !version.Match(remote)
|
||||
fs.Debugf(nil, "%q version %v", remote, version.Match(remote))
|
||||
@@ -5004,9 +4917,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// Override the automatic decompression in the transport to
|
||||
// download compressed files as-is
|
||||
if o.fs.opt.UseAcceptEncodingGzip.Value {
|
||||
httpReq.HTTPRequest.Header.Set("Accept-Encoding", "gzip")
|
||||
}
|
||||
httpReq.HTTPRequest.Header.Set("Accept-Encoding", "gzip")
|
||||
|
||||
for _, option := range options {
|
||||
switch option.(type) {
|
||||
@@ -5074,7 +4985,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
var warnStreamUpload sync.Once
|
||||
|
||||
func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, size int64, in io.Reader) (wantETag, gotETag string, versionID *string, err error) {
|
||||
func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, size int64, in io.Reader) (etag string, versionID *string, err error) {
|
||||
f := o.fs
|
||||
|
||||
// make concurrency machinery
|
||||
@@ -5118,13 +5029,11 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return wantETag, gotETag, nil, fmt.Errorf("multipart upload failed to initialise: %w", err)
|
||||
return etag, nil, fmt.Errorf("multipart upload failed to initialise: %w", err)
|
||||
}
|
||||
uid := cout.UploadId
|
||||
|
||||
uploadCtx, cancel := context.WithCancel(ctx)
|
||||
defer atexit.OnError(&err, func() {
|
||||
cancel()
|
||||
if o.fs.opt.LeavePartsOnError {
|
||||
return
|
||||
}
|
||||
@@ -5144,7 +5053,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
})()
|
||||
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(uploadCtx)
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
finished = false
|
||||
partsMu sync.Mutex // to protect parts
|
||||
parts []*s3.CompletedPart
|
||||
@@ -5193,7 +5102,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
finished = true
|
||||
} else if err != nil {
|
||||
free()
|
||||
return wantETag, gotETag, nil, fmt.Errorf("multipart upload failed to read source: %w", err)
|
||||
return etag, nil, fmt.Errorf("multipart upload failed to read source: %w", err)
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
@@ -5226,7 +5135,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
uout, err := f.c.UploadPartWithContext(gCtx, uploadPartReq)
|
||||
if err != nil {
|
||||
if partNum <= int64(concurrency) {
|
||||
return f.shouldRetry(gCtx, err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
}
|
||||
// retry all chunks once have done the first batch
|
||||
return true, err
|
||||
@@ -5248,7 +5157,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
}
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return wantETag, gotETag, nil, err
|
||||
return etag, nil, err
|
||||
}
|
||||
|
||||
// sort the completed parts by part number
|
||||
@@ -5258,7 +5167,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
|
||||
var resp *s3.CompleteMultipartUploadOutput
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.c.CompleteMultipartUploadWithContext(uploadCtx, &s3.CompleteMultipartUploadInput{
|
||||
resp, err = f.c.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{
|
||||
Bucket: req.Bucket,
|
||||
Key: req.Key,
|
||||
MultipartUpload: &s3.CompletedMultipartUpload{
|
||||
@@ -5267,20 +5176,17 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
RequestPayer: req.RequestPayer,
|
||||
UploadId: uid,
|
||||
})
|
||||
return f.shouldRetry(uploadCtx, err)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return wantETag, gotETag, nil, fmt.Errorf("multipart upload failed to finalise: %w", err)
|
||||
return etag, nil, fmt.Errorf("multipart upload failed to finalise: %w", err)
|
||||
}
|
||||
hashOfHashes := md5.Sum(md5s)
|
||||
wantETag = fmt.Sprintf("%s-%d", hex.EncodeToString(hashOfHashes[:]), len(parts))
|
||||
etag = fmt.Sprintf("%s-%d", hex.EncodeToString(hashOfHashes[:]), len(parts))
|
||||
if resp != nil {
|
||||
if resp.ETag != nil {
|
||||
gotETag = *resp.ETag
|
||||
}
|
||||
versionID = resp.VersionId
|
||||
}
|
||||
return wantETag, gotETag, versionID, nil
|
||||
return etag, versionID, nil
|
||||
}
|
||||
|
||||
// unWrapAwsError unwraps AWS errors, looking for a non AWS error
|
||||
@@ -5580,16 +5486,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
var wantETag string // Multipart upload Etag to check
|
||||
var gotETag string // Etag we got from the upload
|
||||
var gotEtag string // Etag we got from the upload
|
||||
var lastModified time.Time // Time we got from the upload
|
||||
var versionID *string // versionID we got from the upload
|
||||
if multipart {
|
||||
wantETag, gotETag, versionID, err = o.uploadMultipart(ctx, &req, size, in)
|
||||
wantETag, versionID, err = o.uploadMultipart(ctx, &req, size, in)
|
||||
} else {
|
||||
if o.fs.opt.UsePresignedRequest {
|
||||
gotETag, lastModified, versionID, err = o.uploadSinglepartPresignedRequest(ctx, &req, size, in)
|
||||
gotEtag, lastModified, versionID, err = o.uploadSinglepartPresignedRequest(ctx, &req, size, in)
|
||||
} else {
|
||||
gotETag, lastModified, versionID, err = o.uploadSinglepartPutObject(ctx, &req, size, in)
|
||||
gotEtag, lastModified, versionID, err = o.uploadSinglepartPutObject(ctx, &req, size, in)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@@ -5605,33 +5511,32 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// User requested we don't HEAD the object after uploading it
|
||||
// so make up the object as best we can assuming it got
|
||||
// uploaded properly. If size < 0 then we need to do the HEAD.
|
||||
var head *s3.HeadObjectOutput
|
||||
if o.fs.opt.NoHead && size >= 0 {
|
||||
head = new(s3.HeadObjectOutput)
|
||||
//structs.SetFrom(head, &req)
|
||||
setFrom_s3HeadObjectOutput_s3PutObjectInput(head, &req)
|
||||
var head s3.HeadObjectOutput
|
||||
//structs.SetFrom(&head, &req)
|
||||
setFrom_s3HeadObjectOutput_s3PutObjectInput(&head, &req)
|
||||
head.ETag = &md5sumHex // doesn't matter quotes are missing
|
||||
head.ContentLength = &size
|
||||
// We get etag back from single and multipart upload so fill it in here
|
||||
if gotETag != "" {
|
||||
head.ETag = &gotETag
|
||||
// If we have done a single part PUT request then we can read these
|
||||
if gotEtag != "" {
|
||||
head.ETag = &gotEtag
|
||||
}
|
||||
if lastModified.IsZero() {
|
||||
lastModified = time.Now()
|
||||
}
|
||||
head.LastModified = &lastModified
|
||||
head.VersionId = versionID
|
||||
} else {
|
||||
// Read the metadata from the newly created object
|
||||
o.meta = nil // wipe old metadata
|
||||
head, err = o.headObject(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.setMetaData(&head)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read the metadata from the newly created object
|
||||
o.meta = nil // wipe old metadata
|
||||
head, err := o.headObject(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.setMetaData(head)
|
||||
|
||||
// Check multipart upload ETag if required
|
||||
if o.fs.opt.UseMultipartEtag.Value && !o.fs.etagIsNotMD5 && wantETag != "" && head.ETag != nil && *head.ETag != "" {
|
||||
gotETag := strings.Trim(strings.ToLower(*head.ETag), `"`)
|
||||
if wantETag != gotETag {
|
||||
|
||||
@@ -6,15 +6,12 @@ import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
@@ -253,8 +250,7 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// Create an object
|
||||
const dirName = "versions"
|
||||
const fileName = dirName + "/" + "test-versions.txt"
|
||||
const fileName = "test-versions.txt"
|
||||
contents := random.String(100)
|
||||
item := fstest.NewItem(fileName, contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
@@ -284,12 +280,11 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
||||
}()
|
||||
|
||||
// Read the contents
|
||||
entries, err := f.List(ctx, dirName)
|
||||
entries, err := f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
tests := 0
|
||||
var fileNameVersion string
|
||||
for _, entry := range entries {
|
||||
t.Log(entry)
|
||||
remote := entry.Remote()
|
||||
if remote == fileName {
|
||||
t.Run("ReadCurrent", func(t *testing.T) {
|
||||
@@ -314,18 +309,6 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
||||
require.NotNil(t, o)
|
||||
assert.Equal(t, int64(100), o.Size(), o.Remote())
|
||||
})
|
||||
|
||||
// Check we can make a NewFs from that object with a version suffix
|
||||
t.Run("NewFs", func(t *testing.T) {
|
||||
newPath := path.Join(fs.ConfigString(f), fileNameVersion)
|
||||
// Make sure --s3-versions is set in the config of the new remote
|
||||
confPath := strings.Replace(newPath, ":", ",versions:", 1)
|
||||
fNew, err := cache.Get(ctx, confPath)
|
||||
// This should return pointing to a file
|
||||
assert.Equal(t, fs.ErrorIsFile, err)
|
||||
// With the directory the directory above
|
||||
assert.Equal(t, dirName, path.Base(fs.ConfigString(fNew)))
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("VersionAt", func(t *testing.T) {
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
package seafile
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Renew allows tokens to be renewed on expiry.
|
||||
type Renew struct {
|
||||
ts *time.Ticker // timer indicating when it's time to renew the token
|
||||
run func() error // the callback to do the renewal
|
||||
done chan interface{} // channel to end the go routine
|
||||
shutdown *sync.Once
|
||||
}
|
||||
|
||||
// NewRenew creates a new Renew struct and starts a background process
|
||||
// which renews the token whenever it expires. It uses the run() call
|
||||
// to do the renewal.
|
||||
func NewRenew(every time.Duration, run func() error) *Renew {
|
||||
r := &Renew{
|
||||
ts: time.NewTicker(every),
|
||||
run: run,
|
||||
done: make(chan interface{}),
|
||||
shutdown: &sync.Once{},
|
||||
}
|
||||
go r.renewOnExpiry()
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *Renew) renewOnExpiry() {
|
||||
for {
|
||||
select {
|
||||
case <-r.ts.C:
|
||||
err := r.run()
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "error while refreshing decryption token: %s", err)
|
||||
}
|
||||
|
||||
case <-r.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown stops the ticker and no more renewal will take place.
|
||||
func (r *Renew) Shutdown() {
|
||||
// closing a channel can only be done once
|
||||
r.shutdown.Do(func() {
|
||||
r.ts.Stop()
|
||||
close(r.done)
|
||||
})
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
package seafile
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestShouldAllowShutdownTwice(t *testing.T) {
|
||||
renew := NewRenew(time.Hour, func() error {
|
||||
return nil
|
||||
})
|
||||
renew.Shutdown()
|
||||
renew.Shutdown()
|
||||
}
|
||||
|
||||
func TestRenewalInTimeLimit(t *testing.T) {
|
||||
var count int64
|
||||
|
||||
renew := NewRenew(100*time.Millisecond, func() error {
|
||||
atomic.AddInt64(&count, 1)
|
||||
return nil
|
||||
})
|
||||
time.Sleep(time.Second)
|
||||
renew.Shutdown()
|
||||
|
||||
// there's no guarantee the CI agent can handle a simple goroutine
|
||||
renewCount := atomic.LoadInt64(&count)
|
||||
t.Logf("renew count = %d", renewCount)
|
||||
assert.Greater(t, renewCount, int64(0))
|
||||
assert.Less(t, renewCount, int64(11))
|
||||
}
|
||||
@@ -143,7 +143,6 @@ type Fs struct {
|
||||
createDirMutex sync.Mutex // Protect creation of directories
|
||||
useOldDirectoryAPI bool // Use the old API v2 if seafile < 7
|
||||
moveDirNotAvailable bool // Version < 7.0 don't have an API to move a directory
|
||||
renew *Renew // Renew an encrypted library token
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -269,11 +268,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
// And remove the public link feature
|
||||
f.features.PublicLink = nil
|
||||
|
||||
// renew the library password every 45 minutes
|
||||
f.renew = NewRenew(45*time.Minute, func() error {
|
||||
return f.authorizeLibrary(context.Background(), libraryID)
|
||||
})
|
||||
}
|
||||
} else {
|
||||
// Deactivate the cleaner feature since there's no library selected
|
||||
@@ -389,15 +383,6 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
}
|
||||
|
||||
// Shutdown the Fs
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
if f.renew == nil {
|
||||
return nil
|
||||
}
|
||||
f.renew.Shutdown()
|
||||
return nil
|
||||
}
|
||||
|
||||
// sets the AuthorizationToken up
|
||||
func (f *Fs) setAuthorizationToken(token string) {
|
||||
f.srv.SetHeader("Authorization", "Token "+token)
|
||||
@@ -1346,7 +1331,6 @@ var (
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.PublicLinker = &Fs{}
|
||||
_ fs.UserInfoer = &Fs{}
|
||||
_ fs.Shutdowner = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
)
|
||||
|
||||
@@ -953,9 +953,11 @@ func (f *Fs) emptyLibraryTrash(ctx context.Context, libraryID string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// === API v2 from the official documentation, but that have been replaced by the much better v2.1 (undocumented as of Apr 2020)
|
||||
// === getDirectoryEntriesAPIv2 is needed to keep compatibility with seafile v6,
|
||||
// === the others can probably be removed after the API v2.1 is documented
|
||||
|
||||
func (f *Fs) getDirectoryEntriesAPIv2(ctx context.Context, libraryID, dirPath string) ([]api.DirEntry, error) {
|
||||
// API v2 from the official documentation, but that have been replaced by the much better v2.1 (undocumented as of Apr 2020)
|
||||
// getDirectoryEntriesAPIv2 is needed to keep compatibility with seafile v6.
|
||||
// API Documentation
|
||||
// https://download.seafile.com/published/web-api/v2.1/directories.md#user-content-List%20Items%20in%20Directory
|
||||
if libraryID == "" {
|
||||
@@ -999,3 +1001,95 @@ func (f *Fs) getDirectoryEntriesAPIv2(ctx context.Context, libraryID, dirPath st
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (f *Fs) copyFileAPIv2(ctx context.Context, srcLibraryID, srcPath, dstLibraryID, dstPath string) (*api.FileInfo, error) {
|
||||
// API Documentation
|
||||
// https://download.seafile.com/published/web-api/v2.1/file.md#user-content-Copy%20File
|
||||
if srcLibraryID == "" || dstLibraryID == "" {
|
||||
return nil, errors.New("libraryID and/or file path argument(s) missing")
|
||||
}
|
||||
srcPath = path.Join("/", srcPath)
|
||||
dstPath = path.Join("/", dstPath)
|
||||
|
||||
// Older API does not seem to accept JSON input here either
|
||||
postParameters := url.Values{
|
||||
"operation": {"copy"},
|
||||
"dst_repo": {dstLibraryID},
|
||||
"dst_dir": {f.opt.Enc.FromStandardPath(dstPath)},
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: APIv20 + srcLibraryID + "/file/",
|
||||
Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(srcPath)}},
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
Body: bytes.NewBuffer([]byte(postParameters.Encode())),
|
||||
}
|
||||
result := &api.FileInfo{}
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
if resp.StatusCode == 401 || resp.StatusCode == 403 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("failed to copy file %s:'%s' to %s:'%s': %w", srcLibraryID, srcPath, dstLibraryID, dstPath, err)
|
||||
}
|
||||
err = rest.DecodeJSON(resp, &result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.decodeFileInfo(result), nil
|
||||
}
|
||||
|
||||
func (f *Fs) renameFileAPIv2(ctx context.Context, libraryID, filePath, newname string) error {
|
||||
// API Documentation
|
||||
// https://download.seafile.com/published/web-api/v2.1/file.md#user-content-Rename%20File
|
||||
if libraryID == "" || newname == "" {
|
||||
return errors.New("libraryID and/or file path argument(s) missing")
|
||||
}
|
||||
filePath = path.Join("/", filePath)
|
||||
|
||||
// No luck with JSON input with the older api2
|
||||
postParameters := url.Values{
|
||||
"operation": {"rename"},
|
||||
"reloaddir": {"true"}, // This is an undocumented trick to avoid an http code 301 response (found in https://github.com/haiwen/seahub/blob/master/seahub/api2/views.py)
|
||||
"newname": {f.opt.Enc.FromStandardName(newname)},
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: APIv20 + libraryID + "/file/",
|
||||
Parameters: url.Values{"p": {f.opt.Enc.FromStandardPath(filePath)}},
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
Body: bytes.NewBuffer([]byte(postParameters.Encode())),
|
||||
NoRedirect: true,
|
||||
NoResponse: true,
|
||||
}
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
if resp.StatusCode == 301 {
|
||||
// This is the normal response from the server
|
||||
return nil
|
||||
}
|
||||
if resp.StatusCode == 401 || resp.StatusCode == 403 {
|
||||
return fs.ErrorPermissionDenied
|
||||
}
|
||||
if resp.StatusCode == 404 {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("failed to rename file: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
iofs "io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
@@ -324,7 +323,7 @@ Pass multiple variables space separated, eg
|
||||
|
||||
VAR1=value VAR2=value
|
||||
|
||||
and pass variables with spaces in quotes, eg
|
||||
and pass variables with spaces in in quotes, eg
|
||||
|
||||
"VAR3=value with space" "VAR4=value with space" VAR5=nospacehere
|
||||
|
||||
@@ -368,20 +367,6 @@ At least one must match with server configuration. This can be checked for examp
|
||||
Example:
|
||||
|
||||
umac-64-etm@openssh.com umac-128-etm@openssh.com hmac-sha2-256-etm@openssh.com
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "host_key_algorithms",
|
||||
Default: fs.SpaceSepList{},
|
||||
Help: `Space separated list of host key algorithms, ordered by preference.
|
||||
|
||||
At least one must match with server configuration. This can be checked for example using ssh -Q HostKeyAlgorithms.
|
||||
|
||||
Note: This can affect the outcome of key negotiation with the server even if server host key validation is not enabled.
|
||||
|
||||
Example:
|
||||
|
||||
ssh-ed25519 ssh-rsa ssh-dss
|
||||
`,
|
||||
Advanced: true,
|
||||
}},
|
||||
@@ -422,7 +407,6 @@ type Options struct {
|
||||
Ciphers fs.SpaceSepList `config:"ciphers"`
|
||||
KeyExchange fs.SpaceSepList `config:"key_exchange"`
|
||||
MACs fs.SpaceSepList `config:"macs"`
|
||||
HostKeyAlgorithms fs.SpaceSepList `config:"host_key_algorithms"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
@@ -755,10 +739,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
ClientVersion: "SSH-2.0-" + f.ci.UserAgent,
|
||||
}
|
||||
|
||||
if len(opt.HostKeyAlgorithms) != 0 {
|
||||
sshConfig.HostKeyAlgorithms = []string(opt.HostKeyAlgorithms)
|
||||
}
|
||||
|
||||
if opt.KnownHostsFile != "" {
|
||||
hostcallback, err := knownhosts.New(env.ShellExpand(opt.KnownHostsFile))
|
||||
if err != nil {
|
||||
@@ -802,32 +782,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, fmt.Errorf("couldn't read ssh agent signers: %w", err)
|
||||
}
|
||||
if keyFile != "" {
|
||||
// If `opt.KeyUseAgent` is false, then it's expected that `opt.KeyFile` contains the private key
|
||||
// and `${opt.KeyFile}.pub` contains the public key.
|
||||
//
|
||||
// If `opt.KeyUseAgent` is true, then it's expected that `opt.KeyFile` contains the public key.
|
||||
// This is how it works with openssh; the `IdentityFile` in openssh config points to the public key.
|
||||
// It's not necessary to specify the public key explicitly when using ssh-agent, since openssh and rclone
|
||||
// will try all the keys they find in the ssh-agent until they find one that works. But just like
|
||||
// `IdentityFile` is used in openssh config to limit the search to one specific key, so does
|
||||
// `opt.KeyFile` in rclone config limit the search to that specific key.
|
||||
//
|
||||
// However, previous versions of rclone would always expect to find the public key in
|
||||
// `${opt.KeyFile}.pub` even if `opt.KeyUseAgent` was true. So for the sake of backward compatibility
|
||||
// we still first attempt to read the public key from `${opt.KeyFile}.pub`. But if it fails with
|
||||
// an `fs.ErrNotExist` then we also try to read the public key from `opt.KeyFile`.
|
||||
pubBytes, err := os.ReadFile(keyFile + ".pub")
|
||||
if err != nil {
|
||||
if errors.Is(err, iofs.ErrNotExist) && opt.KeyUseAgent {
|
||||
pubBytes, err = os.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read public key file: %w", err)
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("failed to read public key file: %w", err)
|
||||
}
|
||||
return nil, fmt.Errorf("failed to read public key file: %w", err)
|
||||
}
|
||||
|
||||
pub, _, _, _, err := ssh.ParseAuthorizedKey(pubBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse public key file: %w", err)
|
||||
@@ -849,8 +807,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
}
|
||||
|
||||
// Load key file as a private key, if specified. This is only needed when not using an ssh agent.
|
||||
if (keyFile != "" && !opt.KeyUseAgent) || opt.KeyPem != "" {
|
||||
// Load key file if specified
|
||||
if keyFile != "" || opt.KeyPem != "" {
|
||||
var key []byte
|
||||
if opt.KeyPem == "" {
|
||||
key, err = os.ReadFile(keyFile)
|
||||
|
||||
@@ -34,10 +34,9 @@ func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
|
||||
|
||||
d := &smb2.Dialer{
|
||||
Initiator: &smb2.NTLMInitiator{
|
||||
User: f.opt.User,
|
||||
Password: pass,
|
||||
Domain: f.opt.Domain,
|
||||
TargetSPN: f.opt.SPN,
|
||||
User: f.opt.User,
|
||||
Password: pass,
|
||||
Domain: f.opt.Domain,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -82,7 +81,7 @@ func (c *conn) closed() bool {
|
||||
// list the shares
|
||||
_, nopErr = c.smbSession.ListSharenames()
|
||||
}
|
||||
return nopErr != nil
|
||||
return nopErr == nil
|
||||
}
|
||||
|
||||
// Show that we are using a SMB session
|
||||
@@ -106,9 +105,9 @@ func (f *Fs) getSessions() int32 {
|
||||
func (f *Fs) newConnection(ctx context.Context, share string) (c *conn, err error) {
|
||||
// As we are pooling these connections we need to decouple
|
||||
// them from the current context
|
||||
bgCtx := context.Background()
|
||||
ctx = context.Background()
|
||||
|
||||
c, err = f.dial(bgCtx, "tcp", f.opt.Host+":"+f.opt.Port)
|
||||
c, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't connect SMB: %w", err)
|
||||
}
|
||||
@@ -119,7 +118,7 @@ func (f *Fs) newConnection(ctx context.Context, share string) (c *conn, err erro
|
||||
_ = c.smbSession.Logoff()
|
||||
return nil, fmt.Errorf("couldn't initialize SMB: %w", err)
|
||||
}
|
||||
c.smbShare = c.smbShare.WithContext(bgCtx)
|
||||
c.smbShare = c.smbShare.WithContext(ctx)
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
@@ -60,17 +60,6 @@ func init() {
|
||||
Name: "domain",
|
||||
Help: "Domain name for NTLM authentication.",
|
||||
Default: "WORKGROUP",
|
||||
}, {
|
||||
Name: "spn",
|
||||
Help: `Service principal name.
|
||||
|
||||
Rclone presents this name to the server. Some servers use this as further
|
||||
authentication, and it often needs to be set for clusters. For example:
|
||||
|
||||
cifs/remotehost:1020
|
||||
|
||||
Leave blank if not sure.
|
||||
`,
|
||||
}, {
|
||||
Name: "idle_timeout",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
@@ -120,7 +109,6 @@ type Options struct {
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Domain string `config:"domain"`
|
||||
SPN string `config:"spn"`
|
||||
HideSpecial bool `config:"hide_special_share"`
|
||||
CaseInsensitive bool `config:"case_insensitive"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
"golang.org/x/text/unicode/norm"
|
||||
|
||||
"storj.io/uplink"
|
||||
"storj.io/uplink/edge"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -32,9 +31,9 @@ const (
|
||||
)
|
||||
|
||||
var satMap = map[string]string{
|
||||
"us1.storj.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us1.storj.io:7777",
|
||||
"eu1.storj.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@eu1.storj.io:7777",
|
||||
"ap1.storj.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@ap1.storj.io:7777",
|
||||
"us-central-1.storj.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777",
|
||||
"europe-west-1.storj.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777",
|
||||
"asia-east-1.storj.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777",
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
@@ -106,16 +105,16 @@ func init() {
|
||||
Name: "satellite_address",
|
||||
Help: "Satellite address.\n\nCustom satellite address should match the format: `<nodeid>@<address>:<port>`.",
|
||||
Provider: newProvider,
|
||||
Default: "us1.storj.io",
|
||||
Default: "us-central-1.storj.io",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "us1.storj.io",
|
||||
Help: "US1",
|
||||
Value: "us-central-1.storj.io",
|
||||
Help: "US Central 1",
|
||||
}, {
|
||||
Value: "eu1.storj.io",
|
||||
Help: "EU1",
|
||||
Value: "europe-west-1.storj.io",
|
||||
Help: "Europe West 1",
|
||||
}, {
|
||||
Value: "ap1.storj.io",
|
||||
Help: "AP1",
|
||||
Value: "asia-east-1.storj.io",
|
||||
Help: "Asia East 1",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -157,13 +156,11 @@ type Fs struct {
|
||||
|
||||
// Check the interfaces are satisfied.
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.PublicLinker = &Fs{}
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
)
|
||||
|
||||
// NewFs creates a filesystem backed by Storj.
|
||||
@@ -548,7 +545,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
defer func() {
|
||||
if err != nil {
|
||||
aerr := upload.Abort()
|
||||
if aerr != nil && !errors.Is(aerr, uplink.ErrUploadDone) {
|
||||
if aerr != nil {
|
||||
fs.Errorf(f, "cp input ./%s %+v: %+v", src.Remote(), options, aerr)
|
||||
}
|
||||
}
|
||||
@@ -563,16 +560,6 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
|
||||
_, err = io.Copy(upload, in)
|
||||
if err != nil {
|
||||
if errors.Is(err, uplink.ErrBucketNotFound) {
|
||||
// Rclone assumes the backend will create the bucket if not existing yet.
|
||||
// Here we create the bucket and return a retry error for rclone to retry the upload.
|
||||
_, err = f.project.EnsureBucket(ctx, bucketName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, fserrors.RetryError(errors.New("bucket was not available, now created, the upload must be retried"))
|
||||
}
|
||||
|
||||
err = fserrors.RetryError(err)
|
||||
fs.Errorf(f, "cp input ./%s %+v: %+v\n", src.Remote(), options, err)
|
||||
|
||||
@@ -774,103 +761,3 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// Return the new object
|
||||
return newObjectFromUplink(f, remote, newObject), nil
|
||||
}
|
||||
|
||||
// Purge all files in the directory specified
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
bucket, directory := f.absolute(dir)
|
||||
if bucket == "" {
|
||||
return errors.New("can't purge from root")
|
||||
}
|
||||
|
||||
if directory == "" {
|
||||
_, err := f.project.DeleteBucketWithObjects(ctx, bucket)
|
||||
if errors.Is(err, uplink.ErrBucketNotFound) {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
fs.Infof(directory, "Quick delete is available only for entire bucket. Falling back to list and delete.")
|
||||
objects := f.project.ListObjects(ctx, bucket,
|
||||
&uplink.ListObjectsOptions{
|
||||
Prefix: directory + "/",
|
||||
Recursive: true,
|
||||
},
|
||||
)
|
||||
if err := objects.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
empty := true
|
||||
for objects.Next() {
|
||||
empty = false
|
||||
_, err := f.project.DeleteObject(ctx, bucket, objects.Item().Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Infof(objects.Item().Key, "Deleted")
|
||||
}
|
||||
|
||||
if empty {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
bucket, key := f.absolute(remote)
|
||||
if bucket == "" {
|
||||
return "", errors.New("path must be specified")
|
||||
}
|
||||
|
||||
// Rclone requires that a link is only generated if the remote path exists
|
||||
if key == "" {
|
||||
_, err := f.project.StatBucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
_, err := f.project.StatObject(ctx, bucket, key)
|
||||
if err != nil {
|
||||
if !errors.Is(err, uplink.ErrObjectNotFound) {
|
||||
return "", err
|
||||
}
|
||||
// No object found, check if there is such a prefix
|
||||
iter := f.project.ListObjects(ctx, bucket, &uplink.ListObjectsOptions{Prefix: key + "/"})
|
||||
if iter.Err() != nil {
|
||||
return "", iter.Err()
|
||||
}
|
||||
if !iter.Next() {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sharedPrefix := uplink.SharePrefix{Bucket: bucket, Prefix: key}
|
||||
|
||||
permission := uplink.ReadOnlyPermission()
|
||||
if expire.IsSet() {
|
||||
permission.NotAfter = time.Now().Add(time.Duration(expire))
|
||||
}
|
||||
|
||||
sharedAccess, err := f.access.Share(permission, sharedPrefix)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("sharing access to object failed: %w", err)
|
||||
}
|
||||
|
||||
creds, err := (&edge.Config{
|
||||
AuthServiceAddress: "auth.storjshare.io:7777",
|
||||
}).RegisterAccess(ctx, sharedAccess, &edge.RegisterAccessOptions{Public: true})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("creating public link failed: %w", err)
|
||||
}
|
||||
|
||||
return edge.JoinShareURL("https://link.storjshare.io", creds.AccessKeyID, bucket, key, nil)
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -1327,6 +1328,23 @@ func (o *Object) removeSegmentsLargeObject(ctx context.Context, containerSegment
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) getSegmentsDlo(ctx context.Context) (segmentsContainer string, prefix string, err error) {
|
||||
if err = o.readMetaData(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
dirManifest := o.headers["X-Object-Manifest"]
|
||||
dirManifest, err = url.PathUnescape(dirManifest)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
delimiter := strings.Index(dirManifest, "/")
|
||||
if len(dirManifest) == 0 || delimiter < 0 {
|
||||
err = errors.New("missing or wrong structure of manifest of Dynamic large object")
|
||||
return
|
||||
}
|
||||
return dirManifest[:delimiter], dirManifest[delimiter+1:], nil
|
||||
}
|
||||
|
||||
// urlEncode encodes a string so that it is a valid URL
|
||||
//
|
||||
// We don't use any of Go's standard methods as we need `/` not
|
||||
|
||||
@@ -756,6 +756,14 @@ func (f *Fs) create(ctx context.Context, path string) ([]*upstream.Fs, error) {
|
||||
return f.createPolicy.Create(ctx, f.upstreams, path)
|
||||
}
|
||||
|
||||
func (f *Fs) createEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
return f.createPolicy.CreateEntries(entries...)
|
||||
}
|
||||
|
||||
func (f *Fs) search(ctx context.Context, path string) (*upstream.Fs, error) {
|
||||
return f.searchPolicy.Search(ctx, f.upstreams, path)
|
||||
}
|
||||
|
||||
func (f *Fs) searchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
return f.searchPolicy.SearchEntries(entries...)
|
||||
}
|
||||
|
||||
@@ -214,7 +214,7 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
f.srv = rest.NewClient(client).SetRoot(apiBaseURL)
|
||||
f.IDRegexp = regexp.MustCompile(`^https://uptobox\.com/([a-zA-Z0-9]+)`)
|
||||
f.IDRegexp = regexp.MustCompile("https://uptobox.com/([a-zA-Z0-9]+)")
|
||||
|
||||
_, err = f.readMetaDataForPath(ctx, f.dirPath(""), &api.MetadataRequestOptions{Limit: 10})
|
||||
if err != nil {
|
||||
|
||||
@@ -75,7 +75,6 @@ type Prop struct {
|
||||
Size int64 `xml:"DAV: prop>getcontentlength,omitempty"`
|
||||
Modified Time `xml:"DAV: prop>getlastmodified,omitempty"`
|
||||
Checksums []string `xml:"prop>checksums>checksum,omitempty"`
|
||||
MESha1Hex *string `xml:"ME: prop>sha1hex,omitempty"` // Fastmail-specific sha1 checksum
|
||||
}
|
||||
|
||||
// Parse a status of the form "HTTP/1.1 200 OK" or "HTTP/1.1 200"
|
||||
@@ -103,27 +102,22 @@ func (p *Prop) StatusOK() bool {
|
||||
|
||||
// Hashes returns a map of all checksums - may be nil
|
||||
func (p *Prop) Hashes() (hashes map[hash.Type]string) {
|
||||
if len(p.Checksums) > 0 {
|
||||
hashes = make(map[hash.Type]string)
|
||||
for _, checksums := range p.Checksums {
|
||||
checksums = strings.ToLower(checksums)
|
||||
for _, checksum := range strings.Split(checksums, " ") {
|
||||
switch {
|
||||
case strings.HasPrefix(checksum, "sha1:"):
|
||||
hashes[hash.SHA1] = checksum[5:]
|
||||
case strings.HasPrefix(checksum, "md5:"):
|
||||
hashes[hash.MD5] = checksum[4:]
|
||||
}
|
||||
}
|
||||
}
|
||||
return hashes
|
||||
} else if p.MESha1Hex != nil {
|
||||
hashes = make(map[hash.Type]string)
|
||||
hashes[hash.SHA1] = *p.MESha1Hex
|
||||
return hashes
|
||||
} else {
|
||||
if len(p.Checksums) == 0 {
|
||||
return nil
|
||||
}
|
||||
hashes = make(map[hash.Type]string)
|
||||
for _, checksums := range p.Checksums {
|
||||
checksums = strings.ToLower(checksums)
|
||||
for _, checksum := range strings.Split(checksums, " ") {
|
||||
switch {
|
||||
case strings.HasPrefix(checksum, "sha1:"):
|
||||
hashes[hash.SHA1] = checksum[5:]
|
||||
case strings.HasPrefix(checksum, "md5:"):
|
||||
hashes[hash.MD5] = checksum[4:]
|
||||
}
|
||||
}
|
||||
}
|
||||
return hashes
|
||||
}
|
||||
|
||||
// PropValue is a tagged name and value
|
||||
|
||||
@@ -1,215 +0,0 @@
|
||||
package webdav
|
||||
|
||||
/*
|
||||
chunked update for Nextcloud
|
||||
see https://docs.nextcloud.com/server/20/developer_manual/client_apis/WebDAV/chunking.html
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
func (f *Fs) shouldRetryChunkMerge(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
// Not found. Can be returned by NextCloud when merging chunks of an upload.
|
||||
if resp != nil && resp.StatusCode == 404 {
|
||||
return true, err
|
||||
}
|
||||
|
||||
// 423 LOCKED
|
||||
if resp != nil && resp.StatusCode == 423 {
|
||||
return false, fmt.Errorf("merging the uploaded chunks failed with 423 LOCKED. This usually happens when the chunks merging is still in progress on NextCloud, but it may also indicate a failed transfer: %w", err)
|
||||
}
|
||||
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
}
|
||||
|
||||
// set the chunk size for testing
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) getChunksUploadURL() string {
|
||||
return strings.Replace(f.endpointURL, "/dav/files/", "/dav/uploads/", 1)
|
||||
}
|
||||
|
||||
func (o *Object) getChunksUploadDir() (string, error) {
|
||||
hasher := md5.New()
|
||||
_, err := hasher.Write([]byte(o.filePath()))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("chunked upload couldn't hash URL: %w", err)
|
||||
}
|
||||
uploadDir := "rclone-chunked-upload-" + hex.EncodeToString(hasher.Sum(nil))
|
||||
return uploadDir, nil
|
||||
}
|
||||
|
||||
func (f *Fs) verifyChunkConfig() error {
|
||||
if f.opt.ChunkSize != 0 && !validateNextCloudChunkedURL.MatchString(f.endpointURL) {
|
||||
return errors.New("chunked upload with nextcloud must use /dav/files/USER endpoint not /webdav")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) shouldUseChunkedUpload(src fs.ObjectInfo) bool {
|
||||
return o.fs.canChunk && o.fs.opt.ChunkSize > 0 && src.Size() > int64(o.fs.opt.ChunkSize)
|
||||
}
|
||||
|
||||
func (o *Object) updateChunked(ctx context.Context, in0 io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
var uploadDir string
|
||||
|
||||
// see https://docs.nextcloud.com/server/24/developer_manual/client_apis/WebDAV/chunking.html#starting-a-chunked-upload
|
||||
uploadDir, err = o.createChunksUploadDirectory(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
partObj := &Object{
|
||||
fs: o.fs,
|
||||
}
|
||||
|
||||
// see https://docs.nextcloud.com/server/24/developer_manual/client_apis/WebDAV/chunking.html#uploading-chunks
|
||||
err = o.uploadChunks(ctx, in0, src.Size(), partObj, uploadDir, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// see https://docs.nextcloud.com/server/24/developer_manual/client_apis/WebDAV/chunking.html#assembling-the-chunks
|
||||
err = o.mergeChunks(ctx, uploadDir, options, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) uploadChunks(ctx context.Context, in0 io.Reader, size int64, partObj *Object, uploadDir string, options []fs.OpenOption) error {
|
||||
chunkSize := int64(partObj.fs.opt.ChunkSize)
|
||||
|
||||
// TODO: upload chunks in parallel for faster transfer speeds
|
||||
for offset := int64(0); offset < size; offset += chunkSize {
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
contentLength := chunkSize
|
||||
|
||||
// Last chunk may be smaller
|
||||
if size-offset < contentLength {
|
||||
contentLength = size - offset
|
||||
}
|
||||
|
||||
endOffset := offset + contentLength - 1
|
||||
|
||||
partObj.remote = fmt.Sprintf("%s/%015d-%015d", uploadDir, offset, endOffset)
|
||||
// Enable low-level HTTP 2 retries.
|
||||
// 2022-04-28 15:59:06 ERROR : stuff/video.avi: Failed to copy: uploading chunk failed: Put "https://censored.com/remote.php/dav/uploads/Admin/rclone-chunked-upload-censored/000006113198080-000006123683840": http2: Transport: cannot retry err [http2: Transport received Server's graceful shutdown GOAWAY] after Request.Body was written; define Request.GetBody to avoid this error
|
||||
|
||||
buf := make([]byte, chunkSize)
|
||||
in := readers.NewRepeatableLimitReaderBuffer(in0, buf, chunkSize)
|
||||
|
||||
getBody := func() (io.ReadCloser, error) {
|
||||
// RepeatableReader{} plays well with accounting so rewinding doesn't make the progress buggy
|
||||
if _, err := in.Seek(0, io.SeekStart); err == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return io.NopCloser(in), nil
|
||||
}
|
||||
|
||||
err := partObj.updateSimple(ctx, in, getBody, partObj.remote, contentLength, "application/x-www-form-urlencoded", nil, o.fs.chunksUploadURL, options...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("uploading chunk failed: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) createChunksUploadDirectory(ctx context.Context) (string, error) {
|
||||
uploadDir, err := o.getChunksUploadDir()
|
||||
if err != nil {
|
||||
return uploadDir, err
|
||||
}
|
||||
|
||||
err = o.purgeUploadedChunks(ctx, uploadDir)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("chunked upload couldn't purge upload directory: %w", err)
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "MKCOL",
|
||||
Path: uploadDir + "/",
|
||||
NoResponse: true,
|
||||
RootURL: o.fs.chunksUploadURL,
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("making upload directory failed: %w", err)
|
||||
}
|
||||
return uploadDir, err
|
||||
}
|
||||
|
||||
func (o *Object) mergeChunks(ctx context.Context, uploadDir string, options []fs.OpenOption, src fs.ObjectInfo) error {
|
||||
var resp *http.Response
|
||||
|
||||
// see https://docs.nextcloud.com/server/24/developer_manual/client_apis/WebDAV/chunking.html?highlight=chunk#assembling-the-chunks
|
||||
opts := rest.Opts{
|
||||
Method: "MOVE",
|
||||
Path: path.Join(uploadDir, ".file"),
|
||||
NoResponse: true,
|
||||
Options: options,
|
||||
RootURL: o.fs.chunksUploadURL,
|
||||
}
|
||||
destinationURL, err := rest.URLJoin(o.fs.endpoint, o.filePath())
|
||||
if err != nil {
|
||||
return fmt.Errorf("finalize chunked upload couldn't join URL: %w", err)
|
||||
}
|
||||
opts.ExtraHeaders = o.extraHeaders(ctx, src)
|
||||
opts.ExtraHeaders["Destination"] = destinationURL.String()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetryChunkMerge(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("finalize chunked upload failed, destinationURL: \"%s\": %w", destinationURL, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (o *Object) purgeUploadedChunks(ctx context.Context, uploadDir string) error {
|
||||
// clean the upload directory if it exists (this means that a previous try didn't clean up properly).
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: uploadDir + "/",
|
||||
NoResponse: true,
|
||||
RootURL: o.fs.chunksUploadURL,
|
||||
}
|
||||
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil)
|
||||
|
||||
// directory doesn't exist, no need to purge
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
"net/url"
|
||||
"os/exec"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -43,7 +42,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
minSleep = fs.Duration(10 * time.Millisecond)
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultDepth = "1" // depth for PROPFIND
|
||||
@@ -77,9 +76,6 @@ func init() {
|
||||
Name: "vendor",
|
||||
Help: "Name of the WebDAV site/service/software you are using.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "fastmail",
|
||||
Help: "Fastmail Files",
|
||||
}, {
|
||||
Value: "nextcloud",
|
||||
Help: "Nextcloud",
|
||||
}, {
|
||||
@@ -128,22 +124,6 @@ You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'
|
||||
`,
|
||||
Default: fs.CommaSepList{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "pacer_min_sleep",
|
||||
Help: "Minimum time to sleep between API calls.",
|
||||
Default: minSleep,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "nextcloud_chunk_size",
|
||||
Help: `Nextcloud upload chunk size.
|
||||
|
||||
We recommend configuring your NextCloud instance to increase the max chunk size to 1 GB for better upload performances.
|
||||
See https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/big_file_upload_configuration.html#adjust-chunk-size-on-nextcloud-side
|
||||
|
||||
Set to 0 to disable chunked uploading.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: 10 * fs.Mebi, // Default NextCloud `max_chunk_size` is `10 MiB`. See https://github.com/nextcloud/server/blob/0447b53bda9fe95ea0cbed765aa332584605d652/apps/files/lib/App.php#L57
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -158,8 +138,6 @@ type Options struct {
|
||||
BearerTokenCommand string `config:"bearer_token_command"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
Headers fs.CommaSepList `config:"headers"`
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
ChunkSize fs.SizeSuffix `config:"nextcloud_chunk_size"`
|
||||
}
|
||||
|
||||
// Fs represents a remote webdav
|
||||
@@ -177,12 +155,9 @@ type Fs struct {
|
||||
useOCMtime bool // set if can use X-OC-Mtime
|
||||
retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default)
|
||||
checkBeforePurge bool // enables extra check that directory to purge really exists
|
||||
hasOCMD5 bool // set if can use owncloud style checksums for MD5
|
||||
hasOCSHA1 bool // set if can use owncloud style checksums for SHA1
|
||||
hasMESHA1 bool // set if can use fastmail style checksums for SHA1
|
||||
hasMD5 bool // set if can use owncloud style checksums for MD5
|
||||
hasSHA1 bool // set if can use owncloud style checksums for SHA1
|
||||
ntlmAuthMu sync.Mutex // mutex to serialize NTLM auth roundtrips
|
||||
chunksUploadURL string // upload URL for nextcloud chunked
|
||||
canChunk bool // set if nextcloud and nextcloud_chunk_size is set
|
||||
}
|
||||
|
||||
// Object describes a webdav object
|
||||
@@ -303,7 +278,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string)
|
||||
},
|
||||
NoRedirect: true,
|
||||
}
|
||||
if f.hasOCMD5 || f.hasOCSHA1 {
|
||||
if f.hasMD5 || f.hasSHA1 {
|
||||
opts.Body = bytes.NewBuffer(owncloudProps)
|
||||
}
|
||||
var result api.Multistatus
|
||||
@@ -436,7 +411,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
opt: *opt,
|
||||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
precision: fs.ModTimeNotSupported,
|
||||
}
|
||||
|
||||
@@ -568,32 +543,19 @@ func (f *Fs) fetchAndSetBearerToken() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var validateNextCloudChunkedURL = regexp.MustCompile(`^.*/dav/files/[^/]+/?$`)
|
||||
|
||||
// setQuirks adjusts the Fs for the vendor passed in
|
||||
func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
|
||||
switch vendor {
|
||||
case "fastmail":
|
||||
f.canStream = true
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
f.hasMESHA1 = true
|
||||
case "owncloud":
|
||||
f.canStream = true
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
f.hasOCMD5 = true
|
||||
f.hasOCSHA1 = true
|
||||
f.hasMD5 = true
|
||||
f.hasSHA1 = true
|
||||
case "nextcloud":
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
f.hasOCSHA1 = true
|
||||
f.canChunk = true
|
||||
if err := f.verifyChunkConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
f.chunksUploadURL = f.getChunksUploadURL()
|
||||
fs.Logf(nil, "Chunks temporary upload directory: %s", f.chunksUploadURL)
|
||||
f.hasSHA1 = true
|
||||
case "sharepoint":
|
||||
// To mount sharepoint, two Cookies are required
|
||||
// They have to be set instead of BasicAuth
|
||||
@@ -705,7 +667,7 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
|
||||
"Depth": depth,
|
||||
},
|
||||
}
|
||||
if f.hasOCMD5 || f.hasOCSHA1 {
|
||||
if f.hasMD5 || f.hasSHA1 {
|
||||
opts.Body = bytes.NewBuffer(owncloudProps)
|
||||
}
|
||||
var result api.Multistatus
|
||||
@@ -750,7 +712,6 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
|
||||
continue
|
||||
}
|
||||
subPath := u.Path[len(baseURL.Path):]
|
||||
subPath = strings.TrimPrefix(subPath, "/") // ignore leading / here for davrods
|
||||
if f.opt.Enc != encoder.EncodeZero {
|
||||
subPath = f.opt.Enc.ToStandardPath(subPath)
|
||||
}
|
||||
@@ -1034,7 +995,7 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
|
||||
dstPath := f.filePath(remote)
|
||||
err := f.mkParentDir(ctx, dstPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy mkParentDir failed: %w", err)
|
||||
return nil, fmt.Errorf("Copy mkParentDir failed: %w", err)
|
||||
}
|
||||
destinationURL, err := rest.URLJoin(f.endpoint, dstPath)
|
||||
if err != nil {
|
||||
@@ -1059,11 +1020,11 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
|
||||
return srcFs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy call failed: %w", err)
|
||||
return nil, fmt.Errorf("Copy call failed: %w", err)
|
||||
}
|
||||
dstObj, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy NewObject failed: %w", err)
|
||||
return nil, fmt.Errorf("Copy NewObject failed: %w", err)
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
@@ -1164,10 +1125,10 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
hashes := hash.Set(hash.None)
|
||||
if f.hasOCMD5 {
|
||||
if f.hasMD5 {
|
||||
hashes.Add(hash.MD5)
|
||||
}
|
||||
if f.hasOCSHA1 || f.hasMESHA1 {
|
||||
if f.hasSHA1 {
|
||||
hashes.Add(hash.SHA1)
|
||||
}
|
||||
return hashes
|
||||
@@ -1235,10 +1196,10 @@ func (o *Object) Remote() string {
|
||||
|
||||
// Hash returns the SHA1 or MD5 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t == hash.MD5 && o.fs.hasOCMD5 {
|
||||
if t == hash.MD5 && o.fs.hasMD5 {
|
||||
return o.md5, nil
|
||||
}
|
||||
if t == hash.SHA1 && (o.fs.hasOCSHA1 || o.fs.hasMESHA1) {
|
||||
if t == hash.SHA1 && o.fs.hasSHA1 {
|
||||
return o.sha1, nil
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
@@ -1260,12 +1221,12 @@ func (o *Object) setMetaData(info *api.Prop) (err error) {
|
||||
o.hasMetaData = true
|
||||
o.size = info.Size
|
||||
o.modTime = time.Time(info.Modified)
|
||||
if o.fs.hasOCMD5 || o.fs.hasOCSHA1 || o.fs.hasMESHA1 {
|
||||
if o.fs.hasMD5 || o.fs.hasSHA1 {
|
||||
hashes := info.Hashes()
|
||||
if o.fs.hasOCSHA1 || o.fs.hasMESHA1 {
|
||||
if o.fs.hasSHA1 {
|
||||
o.sha1 = hashes[hash.SHA1]
|
||||
}
|
||||
if o.fs.hasOCMD5 {
|
||||
if o.fs.hasMD5 {
|
||||
o.md5 = hashes[hash.MD5]
|
||||
}
|
||||
}
|
||||
@@ -1342,72 +1303,36 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return fmt.Errorf("Update mkParentDir failed: %w", err)
|
||||
}
|
||||
|
||||
if o.shouldUseChunkedUpload(src) {
|
||||
fs.Debugf(src, "Update will use the chunked upload strategy")
|
||||
err = o.updateChunked(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(src, "Update will use the normal upload strategy (no chunks)")
|
||||
contentType := fs.MimeType(ctx, src)
|
||||
filePath := o.filePath()
|
||||
extraHeaders := o.extraHeaders(ctx, src)
|
||||
// TODO: define getBody() to enable low-level HTTP/2 retries
|
||||
err = o.updateSimple(ctx, in, nil, filePath, src.Size(), contentType, extraHeaders, o.fs.endpointURL, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
size := src.Size()
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: o.filePath(),
|
||||
Body: in,
|
||||
NoResponse: true,
|
||||
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
Options: options,
|
||||
}
|
||||
|
||||
// read metadata from remote
|
||||
o.hasMetaData = false
|
||||
return o.readMetaData(ctx)
|
||||
}
|
||||
|
||||
func (o *Object) extraHeaders(ctx context.Context, src fs.ObjectInfo) map[string]string {
|
||||
extraHeaders := map[string]string{}
|
||||
if o.fs.useOCMtime || o.fs.hasOCMD5 || o.fs.hasOCSHA1 {
|
||||
if o.fs.useOCMtime || o.fs.hasMD5 || o.fs.hasSHA1 {
|
||||
opts.ExtraHeaders = map[string]string{}
|
||||
if o.fs.useOCMtime {
|
||||
extraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix())
|
||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix())
|
||||
}
|
||||
// Set one upload checksum
|
||||
// Owncloud uses one checksum only to check the upload and stores its own SHA1 and MD5
|
||||
// Nextcloud stores the checksum you supply (SHA1 or MD5) but only stores one
|
||||
if o.fs.hasOCSHA1 {
|
||||
if o.fs.hasSHA1 {
|
||||
if sha1, _ := src.Hash(ctx, hash.SHA1); sha1 != "" {
|
||||
extraHeaders["OC-Checksum"] = "SHA1:" + sha1
|
||||
opts.ExtraHeaders["OC-Checksum"] = "SHA1:" + sha1
|
||||
}
|
||||
}
|
||||
if o.fs.hasOCMD5 && extraHeaders["OC-Checksum"] == "" {
|
||||
if o.fs.hasMD5 && opts.ExtraHeaders["OC-Checksum"] == "" {
|
||||
if md5, _ := src.Hash(ctx, hash.MD5); md5 != "" {
|
||||
extraHeaders["OC-Checksum"] = "MD5:" + md5
|
||||
opts.ExtraHeaders["OC-Checksum"] = "MD5:" + md5
|
||||
}
|
||||
}
|
||||
}
|
||||
return extraHeaders
|
||||
}
|
||||
|
||||
// Standard update in one request (no chunks)
|
||||
func (o *Object) updateSimple(ctx context.Context, body io.Reader, getBody func() (io.ReadCloser, error), filePath string, size int64, contentType string, extraHeaders map[string]string, rootURL string, options ...fs.OpenOption) (err error) {
|
||||
var resp *http.Response
|
||||
|
||||
if extraHeaders == nil {
|
||||
extraHeaders = map[string]string{}
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: filePath,
|
||||
GetBody: getBody,
|
||||
Body: body,
|
||||
NoResponse: true,
|
||||
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
|
||||
ContentType: contentType,
|
||||
Options: options,
|
||||
ExtraHeaders: extraHeaders,
|
||||
RootURL: rootURL,
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
@@ -1423,8 +1348,9 @@ func (o *Object) updateSimple(ctx context.Context, body io.Reader, getBody func(
|
||||
_ = o.Remove(ctx)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
// read metadata from remote
|
||||
o.hasMetaData = false
|
||||
return o.readMetaData(ctx)
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
|
||||
@@ -24,23 +24,15 @@ var (
|
||||
// prepareServer the test server and return a function to tidy it up afterwards
|
||||
// with each request the headers option tests are executed
|
||||
func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// test the headers are there send send a dummy response to About
|
||||
// file server
|
||||
fileServer := http.FileServer(http.Dir(""))
|
||||
|
||||
// test the headers are there then pass on to fileServer
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
|
||||
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
|
||||
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
|
||||
fmt.Fprintf(w, `<d:multistatus xmlns:d="DAV:" xmlns:s="http://sabredav.org/ns" xmlns:oc="http://owncloud.org/ns" xmlns:nc="http://nextcloud.org/ns">
|
||||
<d:response>
|
||||
<d:href>/remote.php/webdav/</d:href>
|
||||
<d:propstat>
|
||||
<d:prop>
|
||||
<d:quota-available-bytes>-3</d:quota-available-bytes>
|
||||
<d:quota-used-bytes>376461895</d:quota-used-bytes>
|
||||
</d:prop>
|
||||
<d:status>HTTP/1.1 200 OK</d:status>
|
||||
</d:propstat>
|
||||
</d:response>
|
||||
</d:multistatus>`)
|
||||
fileServer.ServeHTTP(w, r)
|
||||
})
|
||||
|
||||
// Make the test server
|
||||
@@ -76,7 +68,7 @@ func TestHeaders(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
|
||||
// send an About response since that is all the dummy server can return
|
||||
// any request will do
|
||||
_, err := f.Features().About(context.Background())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Test Webdav filesystem interface
|
||||
package webdav
|
||||
package webdav_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/backend/webdav"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
@@ -13,10 +13,7 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestWebdavNextcloud:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: 1 * fs.Mebi,
|
||||
},
|
||||
NilObject: (*webdav.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -27,10 +24,7 @@ func TestIntegration2(t *testing.T) {
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestWebdavOwncloud:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
Skip: true,
|
||||
},
|
||||
NilObject: (*webdav.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -41,10 +35,7 @@ func TestIntegration3(t *testing.T) {
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestWebdavRclone:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
Skip: true,
|
||||
},
|
||||
NilObject: (*webdav.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -55,10 +46,6 @@ func TestIntegration4(t *testing.T) {
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestWebdavNTLM:",
|
||||
NilObject: (*Object)(nil),
|
||||
NilObject: (*webdav.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
@@ -331,6 +331,15 @@ func parsePath(path string) (root string) {
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) splitPath(remote string) (directory, leaf string) {
|
||||
directory, leaf = dircache.SplitPath(remote)
|
||||
if f.root != "" {
|
||||
// Adds the root folder to the path to get a full path
|
||||
directory = path.Join(f.root, directory)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
||||
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
||||
|
||||
@@ -57,7 +57,6 @@ var osarches = []string{
|
||||
"linux/386",
|
||||
"linux/amd64",
|
||||
"linux/arm",
|
||||
"linux/arm-v6",
|
||||
"linux/arm-v7",
|
||||
"linux/arm64",
|
||||
"linux/mips",
|
||||
@@ -65,12 +64,10 @@ var osarches = []string{
|
||||
"freebsd/386",
|
||||
"freebsd/amd64",
|
||||
"freebsd/arm",
|
||||
"freebsd/arm-v6",
|
||||
"freebsd/arm-v7",
|
||||
"netbsd/386",
|
||||
"netbsd/amd64",
|
||||
"netbsd/arm",
|
||||
"netbsd/arm-v6",
|
||||
"netbsd/arm-v7",
|
||||
"openbsd/386",
|
||||
"openbsd/amd64",
|
||||
@@ -85,16 +82,13 @@ var archFlags = map[string][]string{
|
||||
"386": {"GO386=softfloat"},
|
||||
"mips": {"GOMIPS=softfloat"},
|
||||
"mipsle": {"GOMIPS=softfloat"},
|
||||
"arm": {"GOARM=5"},
|
||||
"arm-v6": {"GOARM=6"},
|
||||
"arm-v7": {"GOARM=7"},
|
||||
}
|
||||
|
||||
// Map Go architectures to NFPM architectures
|
||||
// Any missing are passed straight through
|
||||
var goarchToNfpm = map[string]string{
|
||||
"arm": "arm5",
|
||||
"arm-v6": "arm6",
|
||||
"arm": "arm6",
|
||||
"arm-v7": "arm7",
|
||||
}
|
||||
|
||||
@@ -225,7 +219,7 @@ func buildWindowsResourceSyso(goarch string, versionTag string) string {
|
||||
"StringFileInfo": M{
|
||||
"CompanyName": "https://rclone.org",
|
||||
"ProductName": "Rclone",
|
||||
"FileDescription": "Rclone",
|
||||
"FileDescription": "Rsync for cloud storage",
|
||||
"InternalName": "rclone",
|
||||
"OriginalFilename": "rclone.exe",
|
||||
"LegalCopyright": "The Rclone Authors",
|
||||
|
||||
@@ -64,7 +64,6 @@ docs = [
|
||||
"sia.md",
|
||||
"swift.md",
|
||||
"pcloud.md",
|
||||
"pikpak.md",
|
||||
"premiumizeme.md",
|
||||
"putio.md",
|
||||
"seafile.md",
|
||||
|
||||
@@ -26,8 +26,7 @@ echo "Making release ${VERSION} anchor ${ANCHOR} to repo ${REPO}"
|
||||
gh release create "${VERSION}" \
|
||||
--repo ${REPO} \
|
||||
--title "rclone ${VERSION}" \
|
||||
--notes-file "/tmp/${VERSION}-release-notes" \
|
||||
--draft=true
|
||||
--notes-file "/tmp/${VERSION}-release-notes"
|
||||
|
||||
for build in build/*; do
|
||||
case $build in
|
||||
@@ -41,10 +40,6 @@ for build in build/*; do
|
||||
"${build}"
|
||||
done
|
||||
|
||||
gh release edit "${VERSION}" \
|
||||
--repo ${REPO} \
|
||||
--draft=false
|
||||
|
||||
gh release view "${VERSION}" \
|
||||
--repo ${REPO}
|
||||
|
||||
|
||||
@@ -12,14 +12,12 @@ import (
|
||||
|
||||
var (
|
||||
noAutoBrowser bool
|
||||
template string
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &noAutoBrowser, "auth-no-open-browser", "", false, "Do not automatically open auth link in default browser")
|
||||
flags.StringVarP(cmdFlags, &template, "template", "", "", "The path to a custom Go template for generating HTML responses")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -30,15 +28,13 @@ Remote authorization. Used to authorize a remote or headless
|
||||
rclone from a machine with a browser - use as instructed by
|
||||
rclone config.
|
||||
|
||||
Use --auth-no-open-browser to prevent rclone to open auth
|
||||
link in default browser automatically.
|
||||
|
||||
Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.`,
|
||||
Use the --auth-no-open-browser to prevent rclone to open auth
|
||||
link in default browser automatically.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 3, command, args)
|
||||
return config.Authorize(context.Background(), args, noAutoBrowser, template)
|
||||
return config.Authorize(context.Background(), args, noAutoBrowser)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -128,6 +128,7 @@ var commandDefinition = &cobra.Command{
|
||||
ctx := context.Background()
|
||||
opt := Opt
|
||||
opt.applyContext(ctx)
|
||||
|
||||
if tzLocal {
|
||||
TZ = time.Local
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ func rcBisync(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
if maxDelete < 0 || maxDelete > 100 {
|
||||
return nil, rc.NewErrParamInvalid(errors.New("maxDelete must be a percentage between 0 and 100"))
|
||||
}
|
||||
opt.MaxDelete = int(maxDelete)
|
||||
ci.MaxDelete = maxDelete
|
||||
} else if rc.NotErrParamNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -3,8 +3,8 @@ test check-access-filters
|
||||
# NOTE: Include Other tests may result in listing diffs due to rclone processing order change. False fail.
|
||||
#
|
||||
# Tests are done in two phases:
|
||||
# - EXCLUDE OTHER tests check that RCLONE_TEST files are only found in the explicitly included directories
|
||||
# - INCLUDE OTHER tesss check that RCLONE_TEST files are found in all directories not explicitly excluded
|
||||
# - EXCLUDE OTHER tests check that RCLONE_TEST files are only found in the explicity included directories
|
||||
# - INCLUDE OTHER tesss check that RCLONE_TEST files are found in all directories not explicity excluded
|
||||
#
|
||||
# Each phase checks that:
|
||||
# - missing RCLONE_TEST files in don't care directories don't cause failures
|
||||
|
||||
@@ -72,9 +72,6 @@ you what happened to it. These are reminiscent of diff files.
|
||||
- |+ path| means path was missing on the destination, so only in the source
|
||||
- |* path| means path was present in source and destination but different.
|
||||
- |! path| means there was an error reading or hashing the source or dest.
|
||||
|
||||
The default number of parallel checks is 8. See the [--checkers=N](/docs/#checkers-n)
|
||||
option for more information.
|
||||
`, "|", "`")
|
||||
|
||||
// GetCheckOpt gets the options corresponding to the check flags
|
||||
@@ -145,7 +142,7 @@ match. It doesn't alter the source or destination.
|
||||
|
||||
For the [crypt](/crypt/) remote there is a dedicated command,
|
||||
[cryptcheck](/commands/rclone_cryptcheck/), that are able to check
|
||||
the checksums of the encrypted files.
|
||||
the checksums of the crypted files.
|
||||
|
||||
If you supply the |--size-only| flag, it will only compare the sizes not
|
||||
the hashes as well. Use this for a quick check.
|
||||
|
||||
12
cmd/cmd.go
12
cmd/cmd.go
@@ -73,13 +73,11 @@ func ShowVersion() {
|
||||
|
||||
linking, tagString := buildinfo.GetLinkingAndTags()
|
||||
|
||||
arch := buildinfo.GetArch()
|
||||
|
||||
fmt.Printf("rclone %s\n", fs.Version)
|
||||
fmt.Printf("- os/version: %s\n", osVersion)
|
||||
fmt.Printf("- os/kernel: %s\n", osKernel)
|
||||
fmt.Printf("- os/type: %s\n", runtime.GOOS)
|
||||
fmt.Printf("- os/arch: %s\n", arch)
|
||||
fmt.Printf("- os/arch: %s\n", runtime.GOARCH)
|
||||
fmt.Printf("- go/version: %s\n", runtime.Version())
|
||||
fmt.Printf("- go/linking: %s\n", linking)
|
||||
fmt.Printf("- go/tags: %s\n", tagString)
|
||||
@@ -401,15 +399,9 @@ func initConfig() {
|
||||
// Start accounting
|
||||
accounting.Start(ctx)
|
||||
|
||||
// Configure console
|
||||
// Hide console window
|
||||
if ci.NoConsole {
|
||||
// Hide the console window
|
||||
terminal.HideConsole()
|
||||
} else {
|
||||
// Enable color support on stdout if possible.
|
||||
// This enables virtual terminal processing on Windows 10,
|
||||
// adding native support for ANSI/VT100 escape sequences.
|
||||
terminal.EnableColorsStdout()
|
||||
}
|
||||
|
||||
// Load filters
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@@ -568,21 +567,6 @@ func (fsys *FS) Listxattr(path string, fill func(name string) bool) (errc int) {
|
||||
return -fuse.ENOSYS
|
||||
}
|
||||
|
||||
// Getpath allows a case-insensitive file system to report the correct case of
|
||||
// a file path.
|
||||
func (fsys *FS) Getpath(path string, fh uint64) (errc int, normalisedPath string) {
|
||||
defer log.Trace(path, "Getpath fh=%d", fh)("errc=%d, normalisedPath=%q", &errc, &normalisedPath)
|
||||
node, _, errc := fsys.getNode(path, fh)
|
||||
if errc != 0 {
|
||||
return errc, ""
|
||||
}
|
||||
normalisedPath = node.Path()
|
||||
if !strings.HasPrefix("/", normalisedPath) {
|
||||
normalisedPath = "/" + normalisedPath
|
||||
}
|
||||
return 0, normalisedPath
|
||||
}
|
||||
|
||||
// Translate errors from mountlib
|
||||
func translateError(err error) (errc int) {
|
||||
if err == nil {
|
||||
@@ -647,7 +631,6 @@ func translateOpenFlags(inFlags int) (outFlags int) {
|
||||
var (
|
||||
_ fuse.FileSystemInterface = (*FS)(nil)
|
||||
_ fuse.FileSystemOpenEx = (*FS)(nil)
|
||||
_ fuse.FileSystemGetpath = (*FS)(nil)
|
||||
//_ fuse.FileSystemChflags = (*FS)(nil)
|
||||
//_ fuse.FileSystemSetcrtime = (*FS)(nil)
|
||||
//_ fuse.FileSystemSetchgtime = (*FS)(nil)
|
||||
|
||||
@@ -84,6 +84,9 @@ func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.
|
||||
if opt.DaemonTimeout != 0 {
|
||||
options = append(options, "-o", fmt.Sprintf("daemon_timeout=%d", int(opt.DaemonTimeout.Seconds())))
|
||||
}
|
||||
if opt.AllowNonEmpty {
|
||||
options = append(options, "-o", "nonempty")
|
||||
}
|
||||
if opt.AllowOther {
|
||||
options = append(options, "-o", "allow_other")
|
||||
}
|
||||
@@ -149,22 +152,18 @@ func waitFor(fn func() bool) (ok bool) {
|
||||
// report an error when fusermount is called.
|
||||
func mount(VFS *vfs.VFS, mountPath string, opt *mountlib.Options) (<-chan error, func() error, error) {
|
||||
// Get mountpoint using OS specific logic
|
||||
f := VFS.Fs()
|
||||
mountpoint, err := getMountpoint(f, mountPath, opt)
|
||||
mountpoint, err := getMountpoint(mountPath, opt)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
fs.Debugf(nil, "Mounting on %q (%q)", mountpoint, opt.VolumeName)
|
||||
|
||||
// Create underlying FS
|
||||
f := VFS.Fs()
|
||||
fsys := NewFS(VFS)
|
||||
host := fuse.NewFileSystemHost(fsys)
|
||||
host.SetCapReaddirPlus(true) // only works on Windows
|
||||
if opt.CaseInsensitive.Valid {
|
||||
host.SetCapCaseInsensitive(opt.CaseInsensitive.Value)
|
||||
} else {
|
||||
host.SetCapCaseInsensitive(f.Features().CaseInsensitive)
|
||||
}
|
||||
host.SetCapCaseInsensitive(f.Features().CaseInsensitive)
|
||||
|
||||
// Create options
|
||||
options := mountOptions(VFS, opt.DeviceName, mountpoint, opt)
|
||||
|
||||
@@ -28,7 +28,7 @@ func init() {
|
||||
// returns an error, and an error channel for the serve process to
|
||||
// report an error when fusermount is called.
|
||||
func mount(_ *vfs.VFS, _ string, _ *mountlib.Options) (<-chan error, func() error, error) {
|
||||
return nil, nil, errors.New("rclone mount is not supported on MacOS when rclone is installed via Homebrew. " +
|
||||
"Please install the rclone binaries available at https://rclone.org/downloads/ " +
|
||||
"instead if you want to use the rclone mount command")
|
||||
return nil, nil, errors.New("mount is not supported on MacOS when installed via Homebrew. " +
|
||||
"Please install the binaries available at https://rclone." +
|
||||
"org/downloads/ instead if you want to use the mount command")
|
||||
}
|
||||
|
||||
@@ -9,10 +9,9 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func getMountpoint(f fs.Fs, mountPath string, opt *mountlib.Options) (string, error) {
|
||||
func getMountpoint(mountPath string, opt *mountlib.Options) (string, error) {
|
||||
fi, err := os.Stat(mountPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to retrieve mount path information: %w", err)
|
||||
@@ -20,11 +19,5 @@ func getMountpoint(f fs.Fs, mountPath string, opt *mountlib.Options) (string, er
|
||||
if !fi.IsDir() {
|
||||
return "", errors.New("mount path is not a directory")
|
||||
}
|
||||
if err = mountlib.CheckOverlap(f, mountPath); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err = mountlib.CheckAllowNonEmpty(mountPath, opt); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return mountPath, nil
|
||||
}
|
||||
|
||||
@@ -18,13 +18,13 @@ import (
|
||||
var isDriveRegex = regexp.MustCompile(`^[a-zA-Z]\:$`)
|
||||
var isDriveRootPathRegex = regexp.MustCompile(`^[a-zA-Z]\:\\$`)
|
||||
var isDriveOrRootPathRegex = regexp.MustCompile(`^[a-zA-Z]\:\\?$`)
|
||||
var isNetworkSharePathRegex = regexp.MustCompile(`^\\\\[^\\\?]+\\[^\\]`)
|
||||
var isNetworkSharePathRegex = regexp.MustCompile(`^\\\\[^\\]+\\[^\\]`)
|
||||
|
||||
// isNetworkSharePath returns true if the given string is a valid network share path,
|
||||
// in the basic UNC format "\\Server\Share\Path", where the first two path components
|
||||
// are required ("\\Server\Share", which represents the volume).
|
||||
// Extended-length UNC format "\\?\UNC\Server\Share\Path" is not considered, as it is
|
||||
// not supported by cgofuse/winfsp, so returns false for any paths with prefix "\\?\".
|
||||
// not supported by cgofuse/winfsp.
|
||||
// Note: There is a UNCPath function in lib/file, but it refers to any extended-length
|
||||
// paths using prefix "\\?\", and not necessarily network resource UNC paths.
|
||||
func isNetworkSharePath(l string) bool {
|
||||
@@ -94,7 +94,7 @@ func handleNetworkShareMountpath(mountpath string, opt *mountlib.Options) (strin
|
||||
}
|
||||
|
||||
// handleLocalMountpath handles the case where mount path is a local file system path.
|
||||
func handleLocalMountpath(f fs.Fs, mountpath string, opt *mountlib.Options) (string, error) {
|
||||
func handleLocalMountpath(mountpath string, opt *mountlib.Options) (string, error) {
|
||||
// Assuming path is drive letter or directory path, not network share (UNC) path.
|
||||
// If drive letter: Must be given as a single character followed by ":" and nothing else.
|
||||
// Else, assume directory path: Directory must not exist, but its parent must.
|
||||
@@ -125,9 +125,6 @@ func handleLocalMountpath(f fs.Fs, mountpath string, opt *mountlib.Options) (str
|
||||
}
|
||||
return "", fmt.Errorf("failed to retrieve mountpoint directory parent information: %w", err)
|
||||
}
|
||||
if err = mountlib.CheckOverlap(f, mountpath); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return mountpath, nil
|
||||
}
|
||||
@@ -161,19 +158,9 @@ func handleVolumeName(opt *mountlib.Options, volumeName string) {
|
||||
|
||||
// getMountpoint handles mounting details on Windows,
|
||||
// where disk and network based file systems are treated different.
|
||||
func getMountpoint(f fs.Fs, mountpath string, opt *mountlib.Options) (mountpoint string, err error) {
|
||||
// Inform about some options not relevant in this mode
|
||||
if opt.AllowNonEmpty {
|
||||
fs.Logf(nil, "--allow-non-empty flag does nothing on Windows")
|
||||
}
|
||||
if opt.AllowRoot {
|
||||
fs.Logf(nil, "--allow-root flag does nothing on Windows")
|
||||
}
|
||||
if opt.AllowOther {
|
||||
fs.Logf(nil, "--allow-other flag does nothing on Windows")
|
||||
}
|
||||
func getMountpoint(mountpath string, opt *mountlib.Options) (mountpoint string, err error) {
|
||||
|
||||
// Handle mountpath
|
||||
// First handle mountpath
|
||||
var volumeName string
|
||||
if isDefaultPath(mountpath) {
|
||||
// Mount path indicates defaults, which will automatically pick an unused drive letter.
|
||||
@@ -185,10 +172,10 @@ func getMountpoint(f fs.Fs, mountpath string, opt *mountlib.Options) (mountpoint
|
||||
volumeName = mountpath[1:] // WinFsp requires volume prefix as UNC-like path but with only a single backslash
|
||||
} else {
|
||||
// Mount path is drive letter or directory path.
|
||||
mountpoint, err = handleLocalMountpath(f, mountpath, opt)
|
||||
mountpoint, err = handleLocalMountpath(mountpath, opt)
|
||||
}
|
||||
|
||||
// Handle volume name
|
||||
// Second handle volume name
|
||||
handleVolumeName(opt, volumeName)
|
||||
|
||||
// Done, return mountpoint to be used, together with updated mount options.
|
||||
|
||||
@@ -22,11 +22,11 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "cryptcheck remote:path cryptedremote:path",
|
||||
Short: `Cryptcheck checks the integrity of an encrypted remote.`,
|
||||
Short: `Cryptcheck checks the integrity of a crypted remote.`,
|
||||
Long: `
|
||||
rclone cryptcheck checks a remote against a [crypted](/crypt/) remote.
|
||||
This is the equivalent of running rclone [check](/commands/rclone_check/),
|
||||
but able to check the checksums of the encrypted remote.
|
||||
but able to check the checksums of the crypted remote.
|
||||
|
||||
For it to work the underlying remote of the cryptedremote must support
|
||||
some kind of checksum.
|
||||
@@ -59,7 +59,7 @@ After it has run it will log the status of the encryptedremote:.
|
||||
},
|
||||
}
|
||||
|
||||
// cryptCheck checks the integrity of an encrypted remote
|
||||
// cryptCheck checks the integrity of a crypted remote
|
||||
func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error {
|
||||
// Check to see fcrypt is a crypt
|
||||
fcrypt, ok := fdst.(*crypt.Fs)
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -28,12 +27,12 @@ it will always be removed.
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f, fileName := cmd.NewFsFile(args[0])
|
||||
fs, fileName := cmd.NewFsFile(args[0])
|
||||
cmd.Run(true, false, command, func() error {
|
||||
if fileName == "" {
|
||||
return fmt.Errorf("%s is a directory or doesn't exist: %w", args[0], fs.ErrorObjectNotFound)
|
||||
return fmt.Errorf("%s is a directory or doesn't exist", args[0])
|
||||
}
|
||||
fileObj, err := f.NewObject(context.Background(), fileName)
|
||||
fileObj, err := fs.NewObject(context.Background(), fileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -250,7 +250,7 @@ func (d *Dir) Mknod(ctx context.Context, req *fuse.MknodRequest) (node fusefs.No
|
||||
defer log.Trace(d, "name=%v, mode=%d, rdev=%d", req.Name, req.Mode, req.Rdev)("node=%v, err=%v", &node, &err)
|
||||
if req.Rdev != 0 {
|
||||
fs.Errorf(d, "Can't create device node %q", req.Name)
|
||||
return nil, fuse.Errno(syscall.EIO)
|
||||
return nil, fuse.EIO
|
||||
}
|
||||
var cReq = fuse.CreateRequest{
|
||||
Name: req.Name,
|
||||
|
||||
@@ -82,11 +82,11 @@ func translateError(err error) error {
|
||||
case vfs.OK:
|
||||
return nil
|
||||
case vfs.ENOENT, fs.ErrorDirNotFound, fs.ErrorObjectNotFound:
|
||||
return fuse.Errno(syscall.ENOENT)
|
||||
return fuse.ENOENT
|
||||
case vfs.EEXIST, fs.ErrorDirExists:
|
||||
return fuse.Errno(syscall.EEXIST)
|
||||
return fuse.EEXIST
|
||||
case vfs.EPERM, fs.ErrorPermissionDenied:
|
||||
return fuse.Errno(syscall.EPERM)
|
||||
return fuse.EPERM
|
||||
case vfs.ECLOSED:
|
||||
return fuse.Errno(syscall.EBADF)
|
||||
case vfs.ENOTEMPTY:
|
||||
|
||||
@@ -34,6 +34,9 @@ func mountOptions(VFS *vfs.VFS, device string, opt *mountlib.Options) (options [
|
||||
if opt.AsyncRead {
|
||||
options = append(options, fuse.AsyncRead())
|
||||
}
|
||||
if opt.AllowNonEmpty {
|
||||
options = append(options, fuse.AllowNonEmptyMount())
|
||||
}
|
||||
if opt.AllowOther {
|
||||
options = append(options, fuse.AllowOther())
|
||||
}
|
||||
@@ -69,17 +72,9 @@ func mountOptions(VFS *vfs.VFS, device string, opt *mountlib.Options) (options [
|
||||
// returns an error, and an error channel for the serve process to
|
||||
// report an error when fusermount is called.
|
||||
func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error, func() error, error) {
|
||||
f := VFS.Fs()
|
||||
if runtime.GOOS == "darwin" {
|
||||
fs.Logf(nil, "macOS users: please try \"rclone cmount\" as it will be the default in v1.54")
|
||||
}
|
||||
if err := mountlib.CheckOverlap(f, mountpoint); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := mountlib.CheckAllowNonEmpty(mountpoint, opt); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
|
||||
if opt.DebugFUSE {
|
||||
fuse.Debug = func(msg interface{}) {
|
||||
@@ -87,6 +82,8 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
|
||||
}
|
||||
}
|
||||
|
||||
f := VFS.Fs()
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
c, err := fuse.Mount(mountpoint, mountOptions(VFS, opt.DeviceName, opt)...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
||||
@@ -95,6 +95,9 @@ func mountOptions(fsys *FS, f fs.Fs, opt *mountlib.Options) (mountOpts *fuse.Mou
|
||||
}
|
||||
var opts []string
|
||||
// FIXME doesn't work opts = append(opts, fmt.Sprintf("max_readahead=%d", maxReadAhead))
|
||||
if fsys.opt.AllowNonEmpty {
|
||||
opts = append(opts, "nonempty")
|
||||
}
|
||||
if fsys.opt.AllowOther {
|
||||
opts = append(opts, "allow_other")
|
||||
}
|
||||
@@ -145,16 +148,9 @@ func mountOptions(fsys *FS, f fs.Fs, opt *mountlib.Options) (mountOpts *fuse.Mou
|
||||
// report an error when fusermount is called.
|
||||
func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error, func() error, error) {
|
||||
f := VFS.Fs()
|
||||
if err := mountlib.CheckOverlap(f, mountpoint); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := mountlib.CheckAllowNonEmpty(mountpoint, opt); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
|
||||
fsys := NewFS(VFS, opt)
|
||||
|
||||
// nodeFsOpts := &fusefs.PathNodeFsOptions{
|
||||
// ClientInodes: false,
|
||||
// Debug: mountlib.DebugFUSE,
|
||||
|
||||
@@ -33,20 +33,12 @@ func CheckMountEmpty(mountpoint string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read %s: %w", mtabPath, err)
|
||||
}
|
||||
foundAutofs := false
|
||||
for _, entry := range entries {
|
||||
if entry.Dir == mountpointAbs {
|
||||
if entry.Type != "autofs" {
|
||||
return fmt.Errorf(msg, mountpointAbs)
|
||||
}
|
||||
foundAutofs = true
|
||||
if entry.Dir == mountpointAbs && entry.Type != "autofs" {
|
||||
return fmt.Errorf(msg, mountpointAbs)
|
||||
}
|
||||
}
|
||||
// It isn't safe to list an autofs in the middle of mounting
|
||||
if foundAutofs {
|
||||
return nil
|
||||
}
|
||||
return checkMountEmpty(mountpoint)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckMountReady checks whether mountpoint is mounted by rclone.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user