1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-24 05:13:23 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
800ebb6480 protondrive: fix nil pointer crash when when downloading native document file
Fixes #7959
2024-07-18 11:19:18 +01:00
612 changed files with 49432 additions and 79815 deletions

View File

@@ -17,21 +17,22 @@ on:
manual:
description: Manual run (bypass default conditions)
type: boolean
required: true
default: true
jobs:
build:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.22', 'go1.23']
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.20', 'go1.21']
include:
- job_name: linux
os: ubuntu-latest
go: '>=1.24.0-rc.1'
go: '>=1.22.0-rc.1'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
@@ -42,14 +43,14 @@ jobs:
- job_name: linux_386
os: ubuntu-latest
go: '>=1.24.0-rc.1'
go: '>=1.22.0-rc.1'
goarch: 386
gotags: cmount
quicktest: true
- job_name: mac_amd64
os: macos-latest
go: '>=1.24.0-rc.1'
go: '>=1.22.0-rc.1'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
@@ -58,14 +59,14 @@ jobs:
- job_name: mac_arm64
os: macos-latest
go: '>=1.24.0-rc.1'
go: '>=1.22.0-rc.1'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows
os: windows-latest
go: '>=1.24.0-rc.1'
go: '>=1.22.0-rc.1'
gotags: cmount
cgo: '0'
build_flags: '-include "^windows/"'
@@ -75,20 +76,20 @@ jobs:
- job_name: other_os
os: ubuntu-latest
go: '>=1.24.0-rc.1'
go: '>=1.22.0-rc.1'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
- job_name: go1.22
- job_name: go1.20
os: ubuntu-latest
go: '1.22'
go: '1.20'
quicktest: true
racequicktest: true
- job_name: go1.23
- job_name: go1.21
os: ubuntu-latest
go: '1.23'
go: '1.21'
quicktest: true
racequicktest: true
@@ -123,8 +124,7 @@ jobs:
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
sudo apt-get update
sudo apt-get install -y fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
sudo apt-get install fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone
if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS
@@ -217,7 +217,7 @@ jobs:
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
lint:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
timeout-minutes: 30
name: "lint"
runs-on: ubuntu-latest
@@ -237,7 +237,7 @@ jobs:
id: setup-go
uses: actions/setup-go@v5
with:
go-version: '>=1.23.0-rc.1'
go-version: '>=1.22.0-rc.1'
check-latest: true
cache: false
@@ -296,7 +296,7 @@ jobs:
run: govulncheck ./...
android:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
timeout-minutes: 30
name: "android-all"
runs-on: ubuntu-latest
@@ -311,7 +311,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '>=1.24.0-rc.1'
go-version: '>=1.22.0-rc.1'
- name: Set global environment variables
shell: bash

View File

@@ -32,27 +32,15 @@ jobs:
- name: Get actual major version
id: actual_major_version
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_HUB_USER }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
- name: Build and publish image
uses: docker/build-push-action@v6
uses: ilteoood/docker_buildx@1.1.0
with:
file: Dockerfile
context: .
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
push: true
tags: |
rclone/rclone:latest
rclone/rclone:${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }}
rclone/rclone:${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }}
rclone/rclone:${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
build_docker_volume_plugin:
if: github.repository == 'rclone/rclone'

View File

@@ -13,7 +13,6 @@ linters:
- stylecheck
- unused
- misspell
- gocritic
#- prealloc
#- maligned
disable-all: true
@@ -99,46 +98,3 @@ linters-settings:
# Only enable the checks performed by the staticcheck stand-alone tool,
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
gocritic:
# Enable all default checks with some exceptions and some additions (commented).
# Cannot use both enabled-checks and disabled-checks, so must specify all to be used.
disable-all: true
enabled-checks:
#- appendAssign # Enabled by default
- argOrder
- assignOp
- badCall
- badCond
#- captLocal # Enabled by default
- caseOrder
- codegenComment
#- commentFormatting # Enabled by default
- defaultCaseOrder
- deprecatedComment
- dupArg
- dupBranchBody
- dupCase
- dupSubExpr
- elseif
#- exitAfterDefer # Enabled by default
- flagDeref
- flagName
#- ifElseChain # Enabled by default
- mapKey
- newDeref
- offBy1
- regexpMust
- ruleguard # Not enabled by default
#- singleCaseSwitch # Enabled by default
- sloppyLen
- sloppyTypeAssert
- switchTrue
- typeSwitchVar
- underef
- unlambda
- unslice
- valSwap
- wrapperFunc
settings:
ruleguard:
rules: "${configDir}/bin/rules.go"

View File

@@ -209,7 +209,7 @@ altogether with an HTML report and test retries then from the
project root:
go install github.com/rclone/rclone/fstest/test_all
test_all -backends drive
test_all -backend drive
### Full integration testing
@@ -490,7 +490,7 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
- `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
- make sure this has the `autogenerated options` comments in (see your reference backend docs)
- update them in your backend with `bin/make_backend_docs.py remote`
- `docs/content/overview.md` - overview docs - add an entry into the Features table and the Optional Features table.
- `docs/content/overview.md` - overview docs
- `docs/content/docs.md` - list of remotes in config section
- `docs/content/_index.md` - front page of rclone.org
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
@@ -508,7 +508,7 @@ You'll need to modify the following files
- `backend/s3/s3.go`
- Add the provider to `providerOption` at the top of the file
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
- Exclude your provider from generic config questions (eg `region` and `endpoint).
- Exclude your provider from genric config questions (eg `region` and `endpoint).
- Add the provider to the `setQuirks` function - see the documentation there.
- `docs/content/s3.md`
- Add the provider at the top of the page.

View File

@@ -12,8 +12,6 @@ RUN ./rclone version
# Begin final image
FROM alpine:latest
LABEL org.opencontainers.image.source="https://github.com/rclone/rclone"
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
echo "user_allow_other" >> /etc/fuse.conf

View File

@@ -21,8 +21,6 @@ Current active maintainers of rclone are:
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
| Hideo Aoyama | @boukendesho | snap packaging |
| nielash | @nielash | bisync |
| Dan McArdle | @dmcardle | gitannex |
| Sam Harrison | @childish-sambino | filescom |
**This is a work in progress Draft**

5881
MANUAL.html generated

File diff suppressed because it is too large Load Diff

6372
MANUAL.md generated

File diff suppressed because it is too large Load Diff

6311
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -144,14 +144,10 @@ MANUAL.txt: MANUAL.md
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone
-@rmdir -p '$$HOME/.config/rclone'
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
backenddocs: rclone bin/make_backend_docs.py
-@rmdir -p '$$HOME/.config/rclone'
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
rcdocs: rclone
bin/make_rc_docs.sh

View File

@@ -55,18 +55,14 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
* Files.com [:page_facing_up:](https://rclone.org/filescom/)
* FTP [:page_facing_up:](https://rclone.org/ftp/)
* GoFile [:page_facing_up:](https://rclone.org/gofile/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
* Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
* HTTP [:page_facing_up:](https://rclone.org/http/)
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
* iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
* ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
@@ -93,12 +89,10 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
* Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
* Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/)
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
@@ -107,11 +101,9 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
* rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
* Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)

View File

@@ -47,20 +47,13 @@ Early in the next release cycle update the dependencies.
* `git commit -a -v -m "build: update all dependencies"`
If the `make updatedirect` upgrades the version of go in the `go.mod`
go 1.22.0
then go to manual mode. `go1.22` here is the lowest supported version
then go to manual mode. `go1.20` here is the lowest supported version
in the `go.mod`.
If `make updatedirect` added a `toolchain` directive then remove it.
We don't want to force a toolchain on our users. Linux packagers are
often using a version of Go that is a few versions out of date.
```
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
go get -d $(cat /tmp/potential-upgrades)
go mod tidy -go=1.22 -compat=1.22
go mod tidy -go=1.20 -compat=1.20
```
If the `go mod tidy` fails use the output from it to remove the
@@ -93,16 +86,6 @@ build.
Once it compiles locally, push it on a test branch and commit fixes
until the tests pass.
### Major versions
The above procedure will not upgrade major versions, so v2 to v3.
However this tool can show which major versions might need to be
upgraded:
go run github.com/icholy/gomajor@latest list -major
Expect API breakage when updating major versions.
## Tidy beta
At some point after the release run
@@ -185,8 +168,6 @@ docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/a
To make a full build then set the tags correctly and add `--push`
Note that you can't only build one architecture - you need to build them all.
```
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
```

View File

@@ -1 +1 @@
v1.70.0
v1.68.0

View File

@@ -23,8 +23,8 @@ func prepare(t *testing.T, root string) {
configfile.Install()
// Configure the remote
config.FileSetValue(remoteName, "type", "alias")
config.FileSetValue(remoteName, "remote", root)
config.FileSet(remoteName, "type", "alias")
config.FileSet(remoteName, "remote", root)
}
func TestNewFS(t *testing.T) {

View File

@@ -10,7 +10,6 @@ import (
_ "github.com/rclone/rclone/backend/box"
_ "github.com/rclone/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/chunker"
_ "github.com/rclone/rclone/backend/cloudinary"
_ "github.com/rclone/rclone/backend/combine"
_ "github.com/rclone/rclone/backend/compress"
_ "github.com/rclone/rclone/backend/crypt"
@@ -18,16 +17,13 @@ import (
_ "github.com/rclone/rclone/backend/dropbox"
_ "github.com/rclone/rclone/backend/fichier"
_ "github.com/rclone/rclone/backend/filefabric"
_ "github.com/rclone/rclone/backend/filescom"
_ "github.com/rclone/rclone/backend/ftp"
_ "github.com/rclone/rclone/backend/gofile"
_ "github.com/rclone/rclone/backend/googlecloudstorage"
_ "github.com/rclone/rclone/backend/googlephotos"
_ "github.com/rclone/rclone/backend/hasher"
_ "github.com/rclone/rclone/backend/hdfs"
_ "github.com/rclone/rclone/backend/hidrive"
_ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/iclouddrive"
_ "github.com/rclone/rclone/backend/imagekit"
_ "github.com/rclone/rclone/backend/internetarchive"
_ "github.com/rclone/rclone/backend/jottacloud"
@@ -43,7 +39,6 @@ import (
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
_ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/pikpak"
_ "github.com/rclone/rclone/backend/pixeldrain"
_ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/protondrive"
_ "github.com/rclone/rclone/backend/putio"

File diff suppressed because it is too large Load Diff

View File

@@ -3,149 +3,16 @@
package azureblob
import (
"context"
"encoding/base64"
"strings"
"testing"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestBlockIDCreator(t *testing.T) {
// Check creation and random number
bic, err := newBlockIDCreator()
require.NoError(t, err)
bic2, err := newBlockIDCreator()
require.NoError(t, err)
assert.NotEqual(t, bic.random, bic2.random)
assert.NotEqual(t, bic.random, [8]byte{})
// Set random to known value for tests
bic.random = [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
chunkNumber := uint64(0xFEDCBA9876543210)
// Check creation of ID
want := base64.StdEncoding.EncodeToString([]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10, 1, 2, 3, 4, 5, 6, 7, 8})
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", want)
got := bic.newBlockID(chunkNumber)
assert.Equal(t, want, got)
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", got)
// Test checkID is working
assert.NoError(t, bic.checkID(chunkNumber, got))
assert.ErrorContains(t, bic.checkID(chunkNumber, "$"+got), "illegal base64")
assert.ErrorContains(t, bic.checkID(chunkNumber, "AAAA"+got), "bad block ID length")
assert.ErrorContains(t, bic.checkID(chunkNumber+1, got), "expecting decoded")
assert.ErrorContains(t, bic2.checkID(chunkNumber, got), "random bytes")
}
func (f *Fs) testFeatures(t *testing.T) {
// Check first feature flags are set on this remote
func (f *Fs) InternalTest(t *testing.T) {
// Check first feature flags are set on this
// remote
enabled := f.Features().SetTier
assert.True(t, enabled)
enabled = f.Features().GetTier
assert.True(t, enabled)
}
type ReadSeekCloser struct {
*strings.Reader
}
func (r *ReadSeekCloser) Close() error {
return nil
}
// Stage a block at remote but don't commit it
func (f *Fs) stageBlockWithoutCommit(ctx context.Context, t *testing.T, remote string) {
var (
containerName, blobPath = f.split(remote)
containerClient = f.cntSVC(containerName)
blobClient = containerClient.NewBlockBlobClient(blobPath)
data = "uncommitted data"
blockID = "1"
blockIDBase64 = base64.StdEncoding.EncodeToString([]byte(blockID))
)
r := &ReadSeekCloser{strings.NewReader(data)}
_, err := blobClient.StageBlock(ctx, blockIDBase64, r, nil)
require.NoError(t, err)
// Verify the block is staged but not committed
blockList, err := blobClient.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
require.NoError(t, err)
found := false
for _, block := range blockList.UncommittedBlocks {
if *block.Name == blockIDBase64 {
found = true
break
}
}
require.True(t, found, "Block ID not found in uncommitted blocks")
}
// This tests uploading a blob where it has uncommitted blocks with a different ID size.
//
// https://gauravmantri.com/2013/05/18/windows-azure-blob-storage-dealing-with-the-specified-blob-or-block-content-is-invalid-error/
//
// TestIntegration/FsMkdir/FsPutFiles/Internal/WriteUncommittedBlocks
func (f *Fs) testWriteUncommittedBlocks(t *testing.T) {
var (
ctx = context.Background()
remote = "testBlob"
)
// Multipart copy the blob please
oldUseCopyBlob, oldCopyCutoff := f.opt.UseCopyBlob, f.opt.CopyCutoff
f.opt.UseCopyBlob = false
f.opt.CopyCutoff = f.opt.ChunkSize
defer func() {
f.opt.UseCopyBlob, f.opt.CopyCutoff = oldUseCopyBlob, oldCopyCutoff
}()
// Create a blob with uncommitted blocks
f.stageBlockWithoutCommit(ctx, t, remote)
// Now attempt to overwrite the block with a different sized block ID to provoke this error
// Check the object does not exist
_, err := f.NewObject(ctx, remote)
require.Equal(t, fs.ErrorObjectNotFound, err)
// Upload a multipart file over the block with uncommitted chunks of a different ID size
size := 4*int(f.opt.ChunkSize) - 1
contents := random.String(size)
item := fstest.NewItem(remote, contents, fstest.Time("2001-05-06T04:05:06.499Z"))
o := fstests.PutTestContents(ctx, t, f, &item, contents, true)
// Check size
assert.Equal(t, int64(size), o.Size())
// Create a new blob with uncommitted blocks
newRemote := "testBlob2"
f.stageBlockWithoutCommit(ctx, t, newRemote)
// Copy over that block
dst, err := f.Copy(ctx, o, newRemote)
require.NoError(t, err)
// Check basics
assert.Equal(t, int64(size), dst.Size())
assert.Equal(t, newRemote, dst.Remote())
// Check contents
gotContents := fstests.ReadObject(ctx, t, dst, -1)
assert.Equal(t, contents, gotContents)
// Remove the object
require.NoError(t, dst.Remove(ctx))
}
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Features", f.testFeatures)
t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks)
}

View File

@@ -15,17 +15,13 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
name := "TestAzureBlob"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool", "Cold"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: defaultChunkSize,
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "use_copy_blob", Value: "false"},
},
})
}
@@ -44,7 +40,6 @@ func TestIntegration2(t *testing.T) {
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "directory_markers", Value: "true"},
{Name: name, Key: "use_copy_blob", Value: "false"},
},
})
}
@@ -53,13 +48,8 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setCopyCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetCopyCutoffer = (*Fs)(nil)
)
func TestValidateAccessTier(t *testing.T) {

View File

@@ -237,30 +237,6 @@ msi_client_id, or msi_mi_res_id parameters.`,
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
Advanced: true,
Sensitive: true,
}, {
Name: "disable_instance_discovery",
Help: `Skip requesting Microsoft Entra instance metadata
This should be set true only by applications authenticating in
disconnected clouds, or private clouds such as Azure Stack.
It determines whether rclone requests Microsoft Entra instance
metadata from ` + "`https://login.microsoft.com/`" + ` before
authenticating.
Setting this to true will skip this request, making you responsible
for ensuring the configured authority is valid and trustworthy.
`,
Default: false,
Advanced: true,
}, {
Name: "use_az",
Help: `Use Azure CLI tool az for authentication
Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/)
as the sole means of authentication.
Setting this can be useful if you wish to use the az CLI on a host with
a System Managed Identity that you do not want to use.
Don't set env_auth at the same time.
`,
Default: false,
Advanced: true,
}, {
Name: "endpoint",
Help: "Endpoint for the service.\n\nLeave blank normally.",
@@ -343,12 +319,10 @@ type Options struct {
Username string `config:"username"`
Password string `config:"password"`
ServicePrincipalFile string `config:"service_principal_file"`
DisableInstanceDiscovery bool `config:"disable_instance_discovery"`
UseMSI bool `config:"use_msi"`
MSIObjectID string `config:"msi_object_id"`
MSIClientID string `config:"msi_client_id"`
MSIResourceID string `config:"msi_mi_res_id"`
UseAZ bool `config:"use_az"`
Endpoint string `config:"endpoint"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
MaxStreamSize fs.SizeSuffix `config:"max_stream_size"`
@@ -419,10 +393,8 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
policyClientOptions := policy.ClientOptions{
Transport: newTransporter(ctx),
}
backup := service.ShareTokenIntentBackup
clientOpt := service.ClientOptions{
ClientOptions: policyClientOptions,
FileRequestIntent: &backup,
ClientOptions: policyClientOptions,
}
// Here we auth by setting one of cred, sharedKeyCred or f.client
@@ -440,8 +412,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
}
// Read credentials from the environment
options := azidentity.DefaultAzureCredentialOptions{
ClientOptions: policyClientOptions,
DisableInstanceDiscovery: opt.DisableInstanceDiscovery,
ClientOptions: policyClientOptions,
}
cred, err = azidentity.NewDefaultAzureCredential(&options)
if err != nil {
@@ -452,13 +423,6 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
if err != nil {
return nil, fmt.Errorf("create new shared key credential failed: %w", err)
}
case opt.UseAZ:
var options = azidentity.AzureCLICredentialOptions{}
cred, err = azidentity.NewAzureCLICredential(&options)
fmt.Println(cred)
if err != nil {
return nil, fmt.Errorf("failed to create Azure CLI credentials: %w", err)
}
case opt.SASURL != "":
client, err = service.NewClientWithNoCredential(opt.SASURL, &clientOpt)
if err != nil {
@@ -933,7 +897,7 @@ func (o *Object) getMetadata(ctx context.Context) error {
// Hash returns the MD5 of an object returning a lowercase hex string
//
// May make a network request because the [fs.List] method does not
// May make a network request becaue the [fs.List] method does not
// return MD5 hashes for DirEntry
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
if ty != hash.MD5 {
@@ -1071,10 +1035,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if _, createErr := fc.Create(ctx, size, nil); createErr != nil {
return fmt.Errorf("update: unable to create file: %w", createErr)
}
} else if size != o.Size() {
} else {
// Resize the file if needed
if _, resizeErr := fc.Resize(ctx, size, nil); resizeErr != nil {
return fmt.Errorf("update: unable to resize while trying to update: %w ", resizeErr)
if size != o.Size() {
if _, resizeErr := fc.Resize(ctx, size, nil); resizeErr != nil {
return fmt.Errorf("update: unable to resize while trying to update: %w ", resizeErr)
}
}
}

View File

@@ -42,10 +42,9 @@ type Bucket struct {
// LifecycleRule is a single lifecycle rule
type LifecycleRule struct {
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
DaysFromStartingToCancelingUnfinishedLargeFiles *int `json:"daysFromStartingToCancelingUnfinishedLargeFiles"`
FileNamePrefix string `json:"fileNamePrefix"`
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
FileNamePrefix string `json:"fileNamePrefix"`
}
// Timestamp is a UTC time when this file was uploaded. It is a base

View File

@@ -42,11 +42,11 @@ func TestTimestampIsZero(t *testing.T) {
}
func TestTimestampEqual(t *testing.T) {
assert.False(t, emptyT.Equal(emptyT)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
assert.False(t, emptyT.Equal(emptyT))
assert.False(t, t0.Equal(emptyT))
assert.False(t, emptyT.Equal(t0))
assert.False(t, t0.Equal(t1))
assert.False(t, t1.Equal(t0))
assert.True(t, t0.Equal(t0)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
assert.True(t, t1.Equal(t1)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
assert.True(t, t0.Equal(t0))
assert.True(t, t1.Equal(t1))
}

View File

@@ -30,7 +30,6 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
@@ -1319,22 +1318,16 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
// Check current version of the file
if deleteHidden && object.Action == "hide" {
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
if !operations.SkipDestructive(ctx, object.Name, "remove hide marker") {
toBeDeleted <- object
}
toBeDeleted <- object
} else if deleteUnfinished && object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
if !operations.SkipDestructive(ctx, object.Name, "remove pending upload") {
toBeDeleted <- object
}
toBeDeleted <- object
} else {
fs.Debugf(remote, "Not deleting current version (id %q) %q dated %v (%v ago)", object.ID, object.Action, time.Time(object.UploadTimestamp).Local(), time.Since(time.Time(object.UploadTimestamp)))
}
} else {
fs.Debugf(remote, "Deleting (id %q)", object.ID)
if !operations.SkipDestructive(ctx, object.Name, "delete") {
toBeDeleted <- object
}
toBeDeleted <- object
}
last = remote
tr.Done(ctx, nil)
@@ -2238,7 +2231,6 @@ This will dump something like this showing the lifecycle rules.
{
"daysFromHidingToDeleting": 1,
"daysFromUploadingToHiding": null,
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
"fileNamePrefix": ""
}
]
@@ -2265,9 +2257,8 @@ overwrites will still cause versions to be made.
See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
`,
Opts: map[string]string{
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
"daysFromStartingToCancelingUnfinishedLargeFiles": "Cancels any unfinished large file versions after this many days",
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
},
}
@@ -2287,23 +2278,14 @@ func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, op
}
newRule.DaysFromUploadingToHiding = &days
}
if daysStr := opt["daysFromStartingToCancelingUnfinishedLargeFiles"]; daysStr != "" {
days, err := strconv.Atoi(daysStr)
if err != nil {
return nil, fmt.Errorf("bad daysFromStartingToCancelingUnfinishedLargeFiles: %w", err)
}
newRule.DaysFromStartingToCancelingUnfinishedLargeFiles = &days
}
bucketName, _ := f.split("")
if bucketName == "" {
return nil, errors.New("bucket required")
}
skip := operations.SkipDestructive(ctx, name, "update lifecycle rules")
var bucket *api.Bucket
if !skip && (newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil || newRule.DaysFromStartingToCancelingUnfinishedLargeFiles != nil) {
if newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil {
bucketID, err := f.getBucketID(ctx, bucketName)
if err != nil {
return nil, err

View File

@@ -5,7 +5,6 @@ import (
"crypto/sha1"
"fmt"
"path"
"sort"
"strings"
"testing"
"time"
@@ -14,7 +13,6 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/bucket"
@@ -465,161 +463,24 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
})
t.Run("Cleanup", func(t *testing.T) {
t.Run("DryRun", func(t *testing.T) {
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
// Listing should be unchanged after dry run
before := listAllFiles(ctx, t, f, dirName)
ctx, ci := fs.AddConfig(ctx)
ci.DryRun = true
require.NoError(t, f.cleanUp(ctx, true, false, 0))
after := listAllFiles(ctx, t, f, dirName)
assert.Equal(t, before, after)
})
t.Run("RealThing", func(t *testing.T) {
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
// Listing should reflect current state after cleanup
require.NoError(t, f.cleanUp(ctx, true, false, 0))
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
fstest.CheckListing(t, f, items)
})
require.NoError(t, f.cleanUp(ctx, true, false, 0))
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
fstest.CheckListing(t, f, items)
// Set --b2-versions for this test
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
fstest.CheckListing(t, f, items)
})
// Purge gets tested later
}
func (f *Fs) InternalTestCleanupUnfinished(t *testing.T) {
ctx := context.Background()
// B2CleanupHidden tests cleaning up hidden files
t.Run("CleanupUnfinished", func(t *testing.T) {
dirName := "unfinished"
fileCount := 5
expectedFiles := []string{}
for i := 1; i < fileCount; i++ {
fileName := fmt.Sprintf("%s/unfinished-%d", dirName, i)
expectedFiles = append(expectedFiles, fileName)
obj := &Object{
fs: f,
remote: fileName,
}
objInfo := object.NewStaticObjectInfo(fileName, fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil)
_, err := f.newLargeUpload(ctx, obj, nil, objInfo, f.opt.ChunkSize, false, nil)
require.NoError(t, err)
}
checkListing(ctx, t, f, dirName, expectedFiles)
t.Run("DryRun", func(t *testing.T) {
// Listing should not change after dry run
ctx, ci := fs.AddConfig(ctx)
ci.DryRun = true
require.NoError(t, f.cleanUp(ctx, false, true, 0))
checkListing(ctx, t, f, dirName, expectedFiles)
})
t.Run("RealThing", func(t *testing.T) {
// Listing should be empty after real cleanup
require.NoError(t, f.cleanUp(ctx, false, true, 0))
checkListing(ctx, t, f, dirName, []string{})
})
})
}
func listAllFiles(ctx context.Context, t *testing.T, f *Fs, dirName string) []string {
bucket, directory := f.split(dirName)
foundFiles := []string{}
require.NoError(t, f.list(ctx, bucket, directory, "", false, true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
if !isDirectory {
foundFiles = append(foundFiles, object.Name)
}
return nil
}))
sort.Strings(foundFiles)
return foundFiles
}
func checkListing(ctx context.Context, t *testing.T, f *Fs, dirName string, expectedFiles []string) {
foundFiles := listAllFiles(ctx, t, f, dirName)
sort.Strings(expectedFiles)
assert.Equal(t, expectedFiles, foundFiles)
}
func (f *Fs) InternalTestLifecycleRules(t *testing.T) {
ctx := context.Background()
opt := map[string]string{}
t.Run("InitState", func(t *testing.T) {
// There should be no lifecycle rules at the outset
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
})
t.Run("DryRun", func(t *testing.T) {
// There should still be no lifecycle rules after each dry run operation
ctx, ci := fs.AddConfig(ctx)
ci.DryRun = true
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
delete(opt, "daysFromHidingToDeleting")
opt["daysFromUploadingToHiding"] = "40"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
})
t.Run("RealThing", func(t *testing.T) {
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
delete(opt, "daysFromHidingToDeleting")
opt["daysFromUploadingToHiding"] = "40"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
})
}
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Metadata", f.InternalTestMetadata)
t.Run("Versions", f.InternalTestVersions)
t.Run("CleanupUnfinished", f.InternalTestCleanupUnfinished)
t.Run("LifecycleRules", f.InternalTestLifecycleRules)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -43,9 +43,9 @@ import (
"github.com/rclone/rclone/lib/jwtutil"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/rest"
"github.com/youmark/pkcs8"
"golang.org/x/oauth2"
)
const (
@@ -64,10 +64,12 @@ const (
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauthutil.Config{
Scopes: nil,
AuthURL: "https://app.box.com/api/oauth2/authorize",
TokenURL: "https://app.box.com/api/oauth2/token",
oauthConfig = &oauth2.Config{
Scopes: nil,
Endpoint: oauth2.Endpoint{
AuthURL: "https://app.box.com/api/oauth2/authorize",
TokenURL: "https://app.box.com/api/oauth2/token",
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
@@ -254,10 +256,8 @@ func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
}
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
if block == nil {
return nil, errors.New("box: failed to PEM decode private key")
}
if len(rest) > 0 {
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
}
@@ -619,7 +619,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
return shouldRetry(ctx, resp, err)
})
if err != nil {
// fmt.Printf("...Error %v\n", err)
//fmt.Printf("...Error %v\n", err)
return "", err
}
// fmt.Printf("...Id %q\n", *info.Id)
@@ -966,26 +966,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, err
}
// check if dest already exists
item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
if err != nil {
return nil, err
}
if item != nil { // dest already exists, need to copy to temp name and then move
tempSuffix := "-rclone-copy-" + random.String(8)
fs.Debugf(remote, "dst already exists, copying to temp name %v", remote+tempSuffix)
tempObj, err := f.Copy(ctx, src, remote+tempSuffix)
if err != nil {
return nil, err
}
fs.Debugf(remote+tempSuffix, "moving to real name %v", remote)
err = f.deleteObject(ctx, item.ID)
if err != nil {
return nil, err
}
return f.Move(ctx, tempObj, remote)
}
// Copy the object
opts := rest.Opts{
Method: "POST",

View File

@@ -409,16 +409,18 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
if err != nil {
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
}
} else if opt.PlexPassword != "" && opt.PlexUsername != "" {
decPass, err := obscure.Reveal(opt.PlexPassword)
if err != nil {
decPass = opt.PlexPassword
}
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
m.Set("plex_token", token)
})
if err != nil {
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
} else {
if opt.PlexPassword != "" && opt.PlexUsername != "" {
decPass, err := obscure.Reveal(opt.PlexPassword)
if err != nil {
decPass = opt.PlexPassword
}
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
m.Set("plex_token", token)
})
if err != nil {
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
}
}
}
}

View File

@@ -10,6 +10,7 @@ import (
goflag "flag"
"fmt"
"io"
"log"
"math/rand"
"os"
"path"
@@ -92,7 +93,7 @@ func TestMain(m *testing.M) {
goflag.Parse()
var rc int
fs.Logf(nil, "Running with the following params: \n remote: %v", remoteName)
log.Printf("Running with the following params: \n remote: %v", remoteName)
runInstance = newRun()
rc = m.Run()
os.Exit(rc)
@@ -407,7 +408,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
// update in the wrapped fs
originalSize, err := runInstance.size(t, rootFs, "data.bin")
require.NoError(t, err)
fs.Logf(nil, "original size: %v", originalSize)
log.Printf("original size: %v", originalSize)
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err)
@@ -416,7 +417,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
if runInstance.rootIsCrypt {
data2, err = base64.StdEncoding.DecodeString(cryptedText3Base64)
require.NoError(t, err)
expectedSize++ // FIXME newline gets in, likely test data issue
expectedSize = expectedSize + 1 // FIXME newline gets in, likely test data issue
} else {
data2 = []byte("test content")
}
@@ -424,7 +425,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
require.NoError(t, err)
require.Equal(t, int64(len(data2)), o.Size())
fs.Logf(nil, "updated size: %v", len(data2))
log.Printf("updated size: %v", len(data2))
// get a new instance from the cache
if runInstance.wrappedIsExternal {
@@ -484,49 +485,49 @@ func TestInternalMoveWithNotify(t *testing.T) {
err = runInstance.retryBlock(func() error {
li, err := runInstance.list(t, rootFs, "test")
if err != nil {
fs.Logf(nil, "err: %v", err)
log.Printf("err: %v", err)
return err
}
if len(li) != 2 {
fs.Logf(nil, "not expected listing /test: %v", li)
log.Printf("not expected listing /test: %v", li)
return fmt.Errorf("not expected listing /test: %v", li)
}
li, err = runInstance.list(t, rootFs, "test/one")
if err != nil {
fs.Logf(nil, "err: %v", err)
log.Printf("err: %v", err)
return err
}
if len(li) != 0 {
fs.Logf(nil, "not expected listing /test/one: %v", li)
log.Printf("not expected listing /test/one: %v", li)
return fmt.Errorf("not expected listing /test/one: %v", li)
}
li, err = runInstance.list(t, rootFs, "test/second")
if err != nil {
fs.Logf(nil, "err: %v", err)
log.Printf("err: %v", err)
return err
}
if len(li) != 1 {
fs.Logf(nil, "not expected listing /test/second: %v", li)
log.Printf("not expected listing /test/second: %v", li)
return fmt.Errorf("not expected listing /test/second: %v", li)
}
if fi, ok := li[0].(os.FileInfo); ok {
if fi.Name() != "data.bin" {
fs.Logf(nil, "not expected name: %v", fi.Name())
log.Printf("not expected name: %v", fi.Name())
return fmt.Errorf("not expected name: %v", fi.Name())
}
} else if di, ok := li[0].(fs.DirEntry); ok {
if di.Remote() != "test/second/data.bin" {
fs.Logf(nil, "not expected remote: %v", di.Remote())
log.Printf("not expected remote: %v", di.Remote())
return fmt.Errorf("not expected remote: %v", di.Remote())
}
} else {
fs.Logf(nil, "unexpected listing: %v", li)
log.Printf("unexpected listing: %v", li)
return fmt.Errorf("unexpected listing: %v", li)
}
fs.Logf(nil, "complete listing: %v", li)
log.Printf("complete listing: %v", li)
return nil
}, 12, time.Second*10)
require.NoError(t, err)
@@ -576,43 +577,43 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
err = runInstance.retryBlock(func() error {
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
if !found {
fs.Logf(nil, "not found /test")
log.Printf("not found /test")
return fmt.Errorf("not found /test")
}
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
if !found {
fs.Logf(nil, "not found /test/one")
log.Printf("not found /test/one")
return fmt.Errorf("not found /test/one")
}
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
if !found {
fs.Logf(nil, "not found /test/one/test2")
log.Printf("not found /test/one/test2")
return fmt.Errorf("not found /test/one/test2")
}
li, err := runInstance.list(t, rootFs, "test/one")
if err != nil {
fs.Logf(nil, "err: %v", err)
log.Printf("err: %v", err)
return err
}
if len(li) != 1 {
fs.Logf(nil, "not expected listing /test/one: %v", li)
log.Printf("not expected listing /test/one: %v", li)
return fmt.Errorf("not expected listing /test/one: %v", li)
}
if fi, ok := li[0].(os.FileInfo); ok {
if fi.Name() != "test2" {
fs.Logf(nil, "not expected name: %v", fi.Name())
log.Printf("not expected name: %v", fi.Name())
return fmt.Errorf("not expected name: %v", fi.Name())
}
} else if di, ok := li[0].(fs.DirEntry); ok {
if di.Remote() != "test/one/test2" {
fs.Logf(nil, "not expected remote: %v", di.Remote())
log.Printf("not expected remote: %v", di.Remote())
return fmt.Errorf("not expected remote: %v", di.Remote())
}
} else {
fs.Logf(nil, "unexpected listing: %v", li)
log.Printf("unexpected listing: %v", li)
return fmt.Errorf("unexpected listing: %v", li)
}
fs.Logf(nil, "complete listing /test/one/test2")
log.Printf("complete listing /test/one/test2")
return nil
}, 12, time.Second*10)
require.NoError(t, err)
@@ -770,24 +771,24 @@ func TestInternalBug2117(t *testing.T) {
di, err := runInstance.list(t, rootFs, "test/dir1/dir2")
require.NoError(t, err)
fs.Logf(nil, "len: %v", len(di))
log.Printf("len: %v", len(di))
require.Len(t, di, 1)
time.Sleep(time.Second * 30)
di, err = runInstance.list(t, rootFs, "test/dir1/dir2")
require.NoError(t, err)
fs.Logf(nil, "len: %v", len(di))
log.Printf("len: %v", len(di))
require.Len(t, di, 1)
di, err = runInstance.list(t, rootFs, "test/dir1")
require.NoError(t, err)
fs.Logf(nil, "len: %v", len(di))
log.Printf("len: %v", len(di))
require.Len(t, di, 4)
di, err = runInstance.list(t, rootFs, "test")
require.NoError(t, err)
fs.Logf(nil, "len: %v", len(di))
log.Printf("len: %v", len(di))
require.Len(t, di, 4)
}
@@ -828,7 +829,7 @@ func newRun() *run {
} else {
r.tmpUploadDir = uploadDir
}
fs.Logf(nil, "Temp Upload Dir: %v", r.tmpUploadDir)
log.Printf("Temp Upload Dir: %v", r.tmpUploadDir)
return r
}
@@ -849,8 +850,8 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
fstest.Initialise()
remoteExists := false
for _, s := range config.GetRemotes() {
if s.Name == remote {
for _, s := range config.FileSections() {
if s == remote {
remoteExists = true
}
}
@@ -874,12 +875,12 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
cacheRemote := remote
if !remoteExists {
localRemote := remote + "-local"
config.FileSetValue(localRemote, "type", "local")
config.FileSetValue(localRemote, "nounc", "true")
config.FileSet(localRemote, "type", "local")
config.FileSet(localRemote, "nounc", "true")
m.Set("type", "cache")
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
} else {
remoteType := config.GetValue(remote, "type")
remoteType := config.FileGet(remote, "type")
if remoteType == "" {
t.Skipf("skipped due to invalid remote type for %v", remote)
return nil, nil
@@ -890,14 +891,14 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
m.Set("password", cryptPassword1)
m.Set("password2", cryptPassword2)
}
remoteRemote := config.GetValue(remote, "remote")
remoteRemote := config.FileGet(remote, "remote")
if remoteRemote == "" {
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
return nil, nil
}
remoteRemoteParts := strings.Split(remoteRemote, ":")
remoteWrapping := remoteRemoteParts[0]
remoteType := config.GetValue(remoteWrapping, "type")
remoteType := config.FileGet(remoteWrapping, "type")
if remoteType != "cache" {
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
return nil, nil
@@ -1191,7 +1192,7 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
func (r *run) cleanSize(t *testing.T, size int64) int64 {
if r.rootIsCrypt {
denominator := int64(65536 + 16)
size -= 32
size = size - 32
quotient := size / denominator
remainder := size % denominator
return (quotient*65536 + remainder - 16)

View File

@@ -208,7 +208,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
// we align the start offset of the first chunk to a likely chunk in the storage
chunkStart -= offset
chunkStart = chunkStart - offset
r.queueOffset(chunkStart)
found := false
@@ -327,7 +327,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
chunkStart -= int64(r.cacheFs().opt.ChunkSize)
chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
}
r.queueOffset(chunkStart)
@@ -415,8 +415,10 @@ func (w *worker) run() {
continue
}
}
} else if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
continue
} else {
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
continue
}
}
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)

View File

@@ -987,7 +987,7 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
}
}
if o.main == nil && len(o.chunks) == 0 {
if o.main == nil && (o.chunks == nil || len(o.chunks) == 0) {
// Scanning hasn't found data chunks with conforming names.
if f.useMeta || quickScan {
// Metadata is required but absent and there are no chunks.

View File

@@ -1,48 +0,0 @@
// Package api has type definitions for cloudinary
package api
import (
"fmt"
)
// CloudinaryEncoder extends the built-in encoder
type CloudinaryEncoder interface {
// FromStandardPath takes a / separated path in Standard encoding
// and converts it to a / separated path in this encoding.
FromStandardPath(string) string
// FromStandardName takes name in Standard encoding and converts
// it in this encoding.
FromStandardName(string) string
// ToStandardPath takes a / separated path in this encoding
// and converts it to a / separated path in Standard encoding.
ToStandardPath(string) string
// ToStandardName takes name in this encoding and converts
// it in Standard encoding.
ToStandardName(string) string
// Encoded root of the remote (as passed into NewFs)
FromStandardFullPath(string) string
}
// UpdateOptions was created to pass options from Update to Put
type UpdateOptions struct {
PublicID string
ResourceType string
DeliveryType string
AssetFolder string
DisplayName string
}
// Header formats the option as a string
func (o *UpdateOptions) Header() (string, string) {
return "UpdateOption", fmt.Sprintf("%s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
}
// Mandatory returns whether the option must be parsed or can be ignored
func (o *UpdateOptions) Mandatory() bool {
return false
}
// String formats the option into human-readable form
func (o *UpdateOptions) String() string {
return fmt.Sprintf("Fully qualified Public ID: %s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
}

View File

@@ -1,711 +0,0 @@
// Package cloudinary provides an interface to the Cloudinary DAM
package cloudinary
import (
"context"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"path"
"strconv"
"strings"
"time"
"github.com/cloudinary/cloudinary-go/v2"
SDKApi "github.com/cloudinary/cloudinary-go/v2/api"
"github.com/cloudinary/cloudinary-go/v2/api/admin"
"github.com/cloudinary/cloudinary-go/v2/api/admin/search"
"github.com/cloudinary/cloudinary-go/v2/api/uploader"
"github.com/rclone/rclone/backend/cloudinary/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"github.com/zeebo/blake3"
)
// Cloudinary shouldn't have a trailing dot if there is no path
func cldPathDir(somePath string) string {
if somePath == "" || somePath == "." {
return somePath
}
dir := path.Dir(somePath)
if dir == "." {
return ""
}
return dir
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "cloudinary",
Description: "Cloudinary",
NewFs: NewFs,
Options: []fs.Option{
{
Name: "cloud_name",
Help: "Cloudinary Environment Name",
Required: true,
Sensitive: true,
},
{
Name: "api_key",
Help: "Cloudinary API Key",
Required: true,
Sensitive: true,
},
{
Name: "api_secret",
Help: "Cloudinary API Secret",
Required: true,
Sensitive: true,
},
{
Name: "upload_prefix",
Help: "Specify the API endpoint for environments out of the US",
},
{
Name: "upload_preset",
Help: "Upload Preset to select asset manipulation on upload",
},
{
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
encoder.EncodeSlash |
encoder.EncodeLtGt |
encoder.EncodeDoubleQuote |
encoder.EncodeQuestion |
encoder.EncodeAsterisk |
encoder.EncodePipe |
encoder.EncodeHash |
encoder.EncodePercent |
encoder.EncodeBackSlash |
encoder.EncodeDel |
encoder.EncodeCtl |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8 |
encoder.EncodeDot),
},
{
Name: "eventually_consistent_delay",
Default: fs.Duration(0),
Advanced: true,
Help: "Wait N seconds for eventual consistency of the databases that support the backend operation",
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
CloudName string `config:"cloud_name"`
APIKey string `config:"api_key"`
APISecret string `config:"api_secret"`
UploadPrefix string `config:"upload_prefix"`
UploadPreset string `config:"upload_preset"`
Enc encoder.MultiEncoder `config:"encoding"`
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
}
// Fs represents a remote cloudinary server
type Fs struct {
name string
root string
opt Options
features *fs.Features
pacer *fs.Pacer
srv *rest.Client // For downloading assets via the Cloudinary CDN
cld *cloudinary.Cloudinary // API calls are going through the Cloudinary SDK
lastCRUD time.Time
}
// Object describes a cloudinary object
type Object struct {
fs *Fs
remote string
size int64
modTime time.Time
url string
md5sum string
publicID string
resourceType string
deliveryType string
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Initialize the Cloudinary client
cld, err := cloudinary.NewFromParams(opt.CloudName, opt.APIKey, opt.APISecret)
if err != nil {
return nil, fmt.Errorf("failed to create Cloudinary client: %w", err)
}
cld.Admin.Client = *fshttp.NewClient(ctx)
cld.Upload.Client = *fshttp.NewClient(ctx)
if opt.UploadPrefix != "" {
cld.Config.API.UploadPrefix = opt.UploadPrefix
}
client := fshttp.NewClient(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
cld: cld,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(1000), pacer.MaxSleep(10000), pacer.DecayConstant(2))),
srv: rest.NewClient(client),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
if root != "" {
// Check to see if the root actually an existing file
remote := path.Base(root)
f.root = cldPathDir(root)
_, err := f.NewObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
// File doesn't exist so return the previous root
f.root = root
return f, nil
}
return nil, err
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// ------------------------------------------------------------
// FromStandardPath implementation of the api.CloudinaryEncoder
func (f *Fs) FromStandardPath(s string) string {
return strings.ReplaceAll(f.opt.Enc.FromStandardPath(s), "&", "\uFF06")
}
// FromStandardName implementation of the api.CloudinaryEncoder
func (f *Fs) FromStandardName(s string) string {
return strings.ReplaceAll(f.opt.Enc.FromStandardName(s), "&", "\uFF06")
}
// ToStandardPath implementation of the api.CloudinaryEncoder
func (f *Fs) ToStandardPath(s string) string {
return strings.ReplaceAll(f.opt.Enc.ToStandardPath(s), "\uFF06", "&")
}
// ToStandardName implementation of the api.CloudinaryEncoder
func (f *Fs) ToStandardName(s string) string {
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&")
}
// FromStandardFullPath encodes a full path to Cloudinary standard
func (f *Fs) FromStandardFullPath(dir string) string {
return path.Join(api.CloudinaryEncoder.FromStandardPath(f, f.root), api.CloudinaryEncoder.FromStandardPath(f, dir))
}
// ToAssetFolderAPI encodes folders as expected by the Cloudinary SDK
func (f *Fs) ToAssetFolderAPI(dir string) string {
return strings.ReplaceAll(dir, "%", "%25")
}
// ToDisplayNameElastic encodes a special case of elasticsearch
func (f *Fs) ToDisplayNameElastic(dir string) string {
return strings.ReplaceAll(dir, "!", "\\!")
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// WaitEventuallyConsistent waits till the FS is eventually consistent
func (f *Fs) WaitEventuallyConsistent() {
if f.opt.EventuallyConsistentDelay == fs.Duration(0) {
return
}
delay := time.Duration(f.opt.EventuallyConsistentDelay)
timeSinceLastCRUD := time.Since(f.lastCRUD)
if timeSinceLastCRUD < delay {
time.Sleep(delay - timeSinceLastCRUD)
}
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Cloudinary root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// List the objects and directories in dir into entries
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
remotePrefix := f.FromStandardFullPath(dir)
if remotePrefix != "" && !strings.HasSuffix(remotePrefix, "/") {
remotePrefix += "/"
}
var entries fs.DirEntries
dirs := make(map[string]struct{})
nextCursor := ""
f.WaitEventuallyConsistent()
for {
// user the folders api to list folders.
folderParams := admin.SubFoldersParams{
Folder: f.ToAssetFolderAPI(remotePrefix),
MaxResults: 500,
}
if nextCursor != "" {
folderParams.NextCursor = nextCursor
}
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
if err != nil {
return nil, fmt.Errorf("failed to list sub-folders: %w", err)
}
if results.Error.Message != "" {
if strings.HasPrefix(results.Error.Message, "Can't find folder with path") {
return nil, fs.ErrorDirNotFound
}
return nil, fmt.Errorf("failed to list sub-folders: %s", results.Error.Message)
}
for _, folder := range results.Folders {
relativePath := api.CloudinaryEncoder.ToStandardPath(f, strings.TrimPrefix(folder.Path, remotePrefix))
parts := strings.Split(relativePath, "/")
// It's a directory
dirName := parts[len(parts)-1]
if _, found := dirs[dirName]; !found {
d := fs.NewDir(path.Join(dir, dirName), time.Time{})
entries = append(entries, d)
dirs[dirName] = struct{}{}
}
}
// Break if there are no more results
if results.NextCursor == "" {
break
}
nextCursor = results.NextCursor
}
for {
// Use the assets.AssetsByAssetFolder API to list assets
assetsParams := admin.AssetsByAssetFolderParams{
AssetFolder: remotePrefix,
MaxResults: 500,
}
if nextCursor != "" {
assetsParams.NextCursor = nextCursor
}
results, err := f.cld.Admin.AssetsByAssetFolder(ctx, assetsParams)
if err != nil {
return nil, fmt.Errorf("failed to list assets: %w", err)
}
for _, asset := range results.Assets {
remote := api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName)
if dir != "" {
remote = path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName))
}
o := &Object{
fs: f,
remote: remote,
size: int64(asset.Bytes),
modTime: asset.CreatedAt,
url: asset.SecureURL,
publicID: asset.PublicID,
resourceType: asset.AssetType,
deliveryType: asset.Type,
}
entries = append(entries, o)
}
// Break if there are no more results
if results.NextCursor == "" {
break
}
nextCursor = results.NextCursor
}
return entries, nil
}
// NewObject finds the Object at remote. If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
searchParams := search.Query{
Expression: fmt.Sprintf("asset_folder:\"%s\" AND display_name:\"%s\"",
f.FromStandardFullPath(cldPathDir(remote)),
f.ToDisplayNameElastic(api.CloudinaryEncoder.FromStandardName(f, path.Base(remote)))),
SortBy: []search.SortByField{{"uploaded_at": "desc"}},
MaxResults: 2,
}
var results *admin.SearchResult
f.WaitEventuallyConsistent()
err := f.pacer.Call(func() (bool, error) {
var err1 error
results, err1 = f.cld.Admin.Search(ctx, searchParams)
if err1 == nil && results.TotalCount != len(results.Assets) {
err1 = errors.New("partial response so waiting for eventual consistency")
}
return shouldRetry(ctx, nil, err1)
})
if err != nil {
return nil, fs.ErrorObjectNotFound
}
if results.TotalCount == 0 || len(results.Assets) == 0 {
return nil, fs.ErrorObjectNotFound
}
asset := results.Assets[0]
o := &Object{
fs: f,
remote: remote,
size: int64(asset.Bytes),
modTime: asset.UploadedAt,
url: asset.SecureURL,
md5sum: asset.Etag,
publicID: asset.PublicID,
resourceType: asset.ResourceType,
deliveryType: asset.Type,
}
return o, nil
}
func (f *Fs) getSuggestedPublicID(assetFolder string, displayName string, modTime time.Time) string {
payload := []byte(path.Join(assetFolder, displayName))
hash := blake3.Sum256(payload)
return hex.EncodeToString(hash[:])
}
// Put uploads content to Cloudinary
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if src.Size() == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}
params := uploader.UploadParams{
UploadPreset: f.opt.UploadPreset,
}
updateObject := false
var modTime time.Time
for _, option := range options {
if updateOptions, ok := option.(*api.UpdateOptions); ok {
if updateOptions.PublicID != "" {
updateObject = true
params.Overwrite = SDKApi.Bool(true)
params.Invalidate = SDKApi.Bool(true)
params.PublicID = updateOptions.PublicID
params.ResourceType = updateOptions.ResourceType
params.Type = SDKApi.DeliveryType(updateOptions.DeliveryType)
params.AssetFolder = updateOptions.AssetFolder
params.DisplayName = updateOptions.DisplayName
modTime = src.ModTime(ctx)
}
}
}
if !updateObject {
params.AssetFolder = f.FromStandardFullPath(cldPathDir(src.Remote()))
params.DisplayName = api.CloudinaryEncoder.FromStandardName(f, path.Base(src.Remote()))
// We want to conform to the unique asset ID of rclone, which is (asset_folder,display_name,last_modified).
// We also want to enable customers to choose their own public_id, in case duplicate names are not a crucial use case.
// Upload_presets that apply randomness to the public ID would not work well with rclone duplicate assets support.
params.FilenameOverride = f.getSuggestedPublicID(params.AssetFolder, params.DisplayName, src.ModTime(ctx))
}
uploadResult, err := f.cld.Upload.Upload(ctx, in, params)
f.lastCRUD = time.Now()
if err != nil {
return nil, fmt.Errorf("failed to upload to Cloudinary: %w", err)
}
if !updateObject {
modTime = uploadResult.CreatedAt
}
if uploadResult.Error.Message != "" {
return nil, errors.New(uploadResult.Error.Message)
}
o := &Object{
fs: f,
remote: src.Remote(),
size: int64(uploadResult.Bytes),
modTime: modTime,
url: uploadResult.SecureURL,
md5sum: uploadResult.Etag,
publicID: uploadResult.PublicID,
resourceType: uploadResult.ResourceType,
deliveryType: uploadResult.Type,
}
return o, nil
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Hashes returns the supported hash sets
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// Mkdir creates empty folders
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
params := admin.CreateFolderParams{Folder: f.ToAssetFolderAPI(f.FromStandardFullPath(dir))}
res, err := f.cld.Admin.CreateFolder(ctx, params)
f.lastCRUD = time.Now()
if err != nil {
return err
}
if res.Error.Message != "" {
return errors.New(res.Error.Message)
}
return nil
}
// Rmdir deletes empty folders
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// Additional test because Cloudinary will delete folders without
// assets, regardless of empty sub-folders
folder := f.ToAssetFolderAPI(f.FromStandardFullPath(dir))
folderParams := admin.SubFoldersParams{
Folder: folder,
MaxResults: 1,
}
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
if err != nil {
return err
}
if results.TotalCount > 0 {
return fs.ErrorDirectoryNotEmpty
}
params := admin.DeleteFolderParams{Folder: folder}
res, err := f.cld.Admin.DeleteFolder(ctx, params)
f.lastCRUD = time.Now()
if err != nil {
return err
}
if res.Error.Message != "" {
if strings.HasPrefix(res.Error.Message, "Can't find folder with path") {
return fs.ErrorDirNotFound
}
return errors.New(res.Error.Message)
}
return nil
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
420, // Too Many Requests (legacy)
429, // Too Many Requests
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
if err != nil {
tryAgain := "Try again on "
if idx := strings.Index(err.Error(), tryAgain); idx != -1 {
layout := "2006-01-02 15:04:05 UTC"
dateStr := err.Error()[idx+len(tryAgain) : idx+len(tryAgain)+len(layout)]
timestamp, err2 := time.Parse(layout, dateStr)
if err2 == nil {
return true, fserrors.NewErrorRetryAfter(time.Until(timestamp))
}
}
fs.Debugf(nil, "Retrying API error %v", err)
return true, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// ------------------------------------------------------------
// Hash returns the MD5 of an object
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
if ty != hash.MD5 {
return "", hash.ErrUnsupported
}
return o.md5sum, nil
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// Size of object in bytes
func (o *Object) Size() int64 {
return o.size
}
// Storable returns if this object is storable
func (o *Object) Storable() bool {
return true
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return fs.ErrorCantSetModTime
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
var resp *http.Response
opts := rest.Opts{
Method: "GET",
RootURL: o.url,
Options: options,
}
var offset int64
var count int64
var key string
var value string
fs.FixRangeOption(options, o.size)
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
offset, count = x.Decode(o.size)
if count < 0 {
count = o.size - offset
}
key, value = option.Header()
case *fs.SeekOption:
offset = x.Offset
count = o.size - offset
key, value = option.Header()
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
if key != "" && value != "" {
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders[key] = value
}
// Make sure that the asset is fully available
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
if err == nil {
cl, clErr := strconv.Atoi(resp.Header.Get("content-length"))
if clErr == nil && count == int64(cl) {
return false, nil
}
}
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed download of \"%s\": %w", o.url, err)
}
return resp.Body, err
}
// Update the object with the contents of the io.Reader
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
options = append(options, &api.UpdateOptions{
PublicID: o.publicID,
ResourceType: o.resourceType,
DeliveryType: o.deliveryType,
DisplayName: api.CloudinaryEncoder.FromStandardName(o.fs, path.Base(o.Remote())),
AssetFolder: o.fs.FromStandardFullPath(cldPathDir(o.Remote())),
})
updatedObj, err := o.fs.Put(ctx, in, src, options...)
if err != nil {
return err
}
if uo, ok := updatedObj.(*Object); ok {
o.size = uo.size
o.modTime = time.Now() // Skipping uo.modTime because the API returns the create time
o.url = uo.url
o.md5sum = uo.md5sum
o.publicID = uo.publicID
o.resourceType = uo.resourceType
o.deliveryType = uo.deliveryType
}
return nil
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
params := uploader.DestroyParams{
PublicID: o.publicID,
ResourceType: o.resourceType,
Type: o.deliveryType,
}
res, dErr := o.fs.cld.Upload.Destroy(ctx, params)
o.fs.lastCRUD = time.Now()
if dErr != nil {
return dErr
}
if res.Error.Message != "" {
return errors.New(res.Error.Message)
}
if res.Result != "ok" {
return errors.New(res.Result)
}
return nil
}

View File

@@ -1,23 +0,0 @@
// Test Cloudinary filesystem interface
package cloudinary_test
import (
"testing"
"github.com/rclone/rclone/backend/cloudinary"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
name := "TestCloudinary"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*cloudinary.Object)(nil),
SkipInvalidUTF8: true,
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "eventually_consistent_delay", Value: "7"},
},
})
}

View File

@@ -38,7 +38,6 @@ import (
const (
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
chunkStreams = 0 // Streams to use for reading
bufferSize = 8388608
heuristicBytes = 1048576
@@ -1363,7 +1362,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
}
}
// Get a chunkedreader for the wrapped object
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize, chunkStreams)
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize)
// Get file handle
var file io.Reader
if offset != 0 {

View File

@@ -329,7 +329,7 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
for _, runeValue := range plaintext {
dir += int(runeValue)
}
dir %= 256
dir = dir % 256
// We'll use this number to store in the result filename...
var result bytes.Buffer
@@ -450,7 +450,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
if pos >= 26 {
pos -= 6
}
pos -= thisdir
pos = pos - thisdir
if pos < 0 {
pos += 52
}
@@ -888,7 +888,7 @@ func (fh *decrypter) fillBuffer() (err error) {
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
// Zero out the bad block and continue
for i := range (*fh.buf)[:n] {
fh.buf[i] = 0
(*fh.buf)[i] = 0
}
}
fh.bufIndex = 0

View File

@@ -80,10 +80,9 @@ const (
// Globals
var (
// Description of how to auth for this app
driveConfig = &oauthutil.Config{
driveConfig = &oauth2.Config{
Scopes: []string{scopePrefix + "drive"},
AuthURL: google.Endpoint.AuthURL,
TokenURL: google.Endpoint.TokenURL,
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
@@ -121,7 +120,6 @@ var (
"text/html": ".html",
"text/plain": ".txt",
"text/tab-separated-values": ".tsv",
"text/markdown": ".md",
}
_mimeTypeToExtensionLinks = map[string]string{
"application/x-link-desktop": ".desktop",
@@ -2221,7 +2219,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
case in <- job:
default:
overflow = append(overflow, job)
wg.Done()
wg.Add(-1)
}
}
@@ -3525,14 +3523,14 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
return f.unTrash(ctx, dir, directoryID, true)
}
// copy or move file with id to dest
func (f *Fs) copyOrMoveID(ctx context.Context, operation string, id, dest string) (err error) {
// copy file with id to dest
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
info, err := f.getFile(ctx, id, f.getFileFields(ctx))
if err != nil {
return fmt.Errorf("couldn't find id: %w", err)
}
if info.MimeType == driveFolderType {
return fmt.Errorf("can't %s directory use: rclone %s --drive-root-folder-id %s %s %s", operation, operation, id, fs.ConfigString(f), dest)
return fmt.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
}
info.Name = f.opt.Enc.ToStandardName(info.Name)
o, err := f.newObjectWithInfo(ctx, info.Name, info)
@@ -3553,21 +3551,14 @@ func (f *Fs) copyOrMoveID(ctx context.Context, operation string, id, dest string
if err != nil {
return err
}
var opErr error
if operation == "moveid" {
_, opErr = operations.Move(ctx, dstFs, nil, destLeaf, o)
} else {
_, opErr = operations.Copy(ctx, dstFs, nil, destLeaf, o)
}
if opErr != nil {
return fmt.Errorf("%s failed: %w", operation, opErr)
_, err = operations.Copy(ctx, dstFs, nil, destLeaf, o)
if err != nil {
return fmt.Errorf("copy failed: %w", err)
}
return nil
}
// Run the drive query calling fn on each entry found
func (f *Fs) queryFn(ctx context.Context, query string, fn func(*drive.File)) (err error) {
func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, err error) {
list := f.svc.Files.List()
if query != "" {
list.Q(query)
@@ -3586,7 +3577,10 @@ func (f *Fs) queryFn(ctx context.Context, query string, fn func(*drive.File)) (e
if f.rootFolderID == "appDataFolder" {
list.Spaces("appDataFolder")
}
fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.getFileFields(ctx))
var results []*drive.File
for {
var files *drive.FileList
err = f.pacer.Call(func() (bool, error) {
@@ -3594,66 +3588,20 @@ func (f *Fs) queryFn(ctx context.Context, query string, fn func(*drive.File)) (e
return f.shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("failed to execute query: %w", err)
return nil, fmt.Errorf("failed to execute query: %w", err)
}
if files.IncompleteSearch {
fs.Errorf(f, "search result INCOMPLETE")
}
for _, item := range files.Files {
fn(item)
}
results = append(results, files.Files...)
if files.NextPageToken == "" {
break
}
list.PageToken(files.NextPageToken)
}
return nil
}
// Run the drive query returning the entries found
func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, err error) {
var results []*drive.File
err = f.queryFn(ctx, query, func(item *drive.File) {
results = append(results, item)
})
if err != nil {
return nil, err
}
return results, nil
}
// Rescue, list or delete orphaned files
func (f *Fs) rescue(ctx context.Context, dirID string, delete bool) (err error) {
return f.queryFn(ctx, "'me' in owners and trashed=false", func(item *drive.File) {
if len(item.Parents) != 0 {
return
}
// Have found an orphaned entry
if delete {
fs.Infof(item.Name, "Deleting orphan %q into trash", item.Id)
err = f.delete(ctx, item.Id, true)
if err != nil {
fs.Errorf(item.Name, "Failed to delete orphan %q: %v", item.Id, err)
}
} else if dirID == "" {
operations.SyncPrintf("%q, %q\n", item.Name, item.Id)
} else {
fs.Infof(item.Name, "Rescuing orphan %q", item.Id)
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Files.Update(item.Id, nil).
AddParents(dirID).
Fields(f.getFileFields(ctx)).
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
fs.Errorf(item.Name, "Failed to rescue orphan %q: %v", item.Id, err)
}
}
})
}
var commandHelp = []fs.CommandHelp{{
Name: "get",
Short: "Get command for fetching the drive config parameters",
@@ -3798,28 +3746,6 @@ attempted if possible.
Use the --interactive/-i or --dry-run flag to see what would be copied before copying.
`,
}, {
Name: "moveid",
Short: "Move files by ID",
Long: `This command moves files by ID
Usage:
rclone backend moveid drive: ID path
rclone backend moveid drive: ID1 path1 ID2 path2
It moves the drive file with ID given to the path (an rclone path which
will be passed internally to rclone moveto).
The path should end with a / to indicate move the file as named to
this directory. If it doesn't end with a / then the last path
component will be used as the file name.
If the destination is a drive backend then server-side moving will be
attempted if possible.
Use the --interactive/-i or --dry-run flag to see what would be moved beforehand.
`,
}, {
Name: "exportformats",
Short: "Dump the export formats for debug purposes",
@@ -3867,37 +3793,6 @@ The result is a JSON array of matches, for example:
"webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
}
]`,
}, {
Name: "rescue",
Short: "Rescue or delete any orphaned files",
Long: `This command rescues or deletes any orphaned files or directories.
Sometimes files can get orphaned in Google Drive. This means that they
are no longer in any folder in Google Drive.
This command finds those files and either rescues them to a directory
you specify or deletes them.
Usage:
This can be used in 3 ways.
First, list all orphaned files
rclone backend rescue drive:
Second rescue all orphaned files to the directory indicated
rclone backend rescue drive: "relative/path/to/rescue/directory"
e.g. To rescue all orphans to a directory called "Orphans" in the top level
rclone backend rescue drive: Orphans
Third delete all orphaned files to the trash
rclone backend rescue drive: -o delete
`,
}}
// Command the backend to run a named command
@@ -3998,16 +3893,16 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
dir = arg[0]
}
return f.unTrashDir(ctx, dir, true)
case "copyid", "moveid":
case "copyid":
if len(arg)%2 != 0 {
return nil, errors.New("need an even number of arguments")
}
for len(arg) > 0 {
id, dest := arg[0], arg[1]
arg = arg[2:]
err = f.copyOrMoveID(ctx, name, id, dest)
err = f.copyID(ctx, id, dest)
if err != nil {
return nil, fmt.Errorf("failed %s %q to %q: %w", name, id, dest, err)
return nil, fmt.Errorf("failed copying %q to %q: %w", id, dest, err)
}
}
return nil, nil
@@ -4026,22 +3921,6 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
} else {
return nil, errors.New("need a query argument")
}
case "rescue":
dirID := ""
_, delete := opt["delete"]
if len(arg) == 0 {
// no arguments - list only
} else if !delete && len(arg) == 1 {
dir := arg[0]
dirID, err = f.dirCache.FindDir(ctx, dir, true)
if err != nil {
return nil, fmt.Errorf("failed to find or create rescue directory %q: %w", dir, err)
}
fs.Infof(f, "Rescuing orphans into %q", dir)
} else {
return nil, errors.New("syntax error: need 0 or 1 args or -o delete")
}
return nil, f.rescue(ctx, dirID, delete)
default:
return nil, fs.ErrorCommandNotFound
}
@@ -4086,7 +3965,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 && t != hash.SHA1 && t != hash.SHA256 {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
return "", nil

View File

@@ -95,7 +95,7 @@ func TestInternalParseExtensions(t *testing.T) {
wantErr error
}{
{"doc", []string{".doc"}, nil},
{" docx ,XLSX, pptx,svg,md", []string{".docx", ".xlsx", ".pptx", ".svg", ".md"}, nil},
{" docx ,XLSX, pptx,svg", []string{".docx", ".xlsx", ".pptx", ".svg"}, nil},
{"docx,svg,Docx", []string{".docx", ".svg"}, nil},
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)},
} {
@@ -479,8 +479,8 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
require.NoError(t, f.Purge(ctx, "trashDir"))
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyOrMoveID
func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
func (f *Fs) InternalTestCopyID(t *testing.T) {
ctx := context.Background()
obj, err := f.NewObject(ctx, existingFile)
require.NoError(t, err)
@@ -498,7 +498,7 @@ func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
}
t.Run("BadID", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "moveid", "ID-NOT-FOUND", dir+"/")
err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
require.Error(t, err)
assert.Contains(t, err.Error(), "couldn't find id")
})
@@ -506,31 +506,19 @@ func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
t.Run("Directory", func(t *testing.T) {
rootID, err := f.dirCache.RootID(ctx, false)
require.NoError(t, err)
err = f.copyOrMoveID(ctx, "moveid", rootID, dir+"/")
err = f.copyID(ctx, rootID, dir+"/")
require.Error(t, err)
assert.Contains(t, err.Error(), "can't moveid directory")
assert.Contains(t, err.Error(), "can't copy directory")
})
t.Run("MoveWithoutDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/")
t.Run("WithoutDestName", func(t *testing.T) {
err = f.copyID(ctx, o.id, dir+"/")
require.NoError(t, err)
checkFile(path.Base(existingFile))
})
t.Run("CopyWithoutDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/")
require.NoError(t, err)
checkFile(path.Base(existingFile))
})
t.Run("MoveWithDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/potato.txt")
require.NoError(t, err)
checkFile("potato.txt")
})
t.Run("CopyWithDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/potato.txt")
t.Run("WithDestName", func(t *testing.T) {
err = f.copyID(ctx, o.id, dir+"/potato.txt")
require.NoError(t, err)
checkFile("potato.txt")
})
@@ -659,7 +647,7 @@ func (f *Fs) InternalTest(t *testing.T) {
})
t.Run("Shortcuts", f.InternalTestShortcuts)
t.Run("UnTrash", f.InternalTestUnTrash)
t.Run("CopyOrMoveID", f.InternalTestCopyOrMoveID)
t.Run("CopyID", f.InternalTestCopyID)
t.Run("Query", f.InternalTestQuery)
t.Run("AgeQuery", f.InternalTestAgeQuery)
t.Run("ShouldRetry", f.InternalTestShouldRetry)

View File

@@ -11,6 +11,7 @@ import (
"fmt"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fs/fserrors"
)
// finishBatch commits the batch, returning a batch status to poll or maybe complete
@@ -20,10 +21,14 @@ func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinish
}
err = f.pacer.Call(func() (bool, error) {
complete, err = f.srv.UploadSessionFinishBatchV2(arg)
if retry, err := shouldRetryExclude(ctx, err); !retry {
return retry, err
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
}
}
// after the first chunk is uploaded, we retry everything except the excluded errors
// after the first chunk is uploaded, we retry everything
return err != nil, err
})
if err != nil {

View File

@@ -47,7 +47,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/batcher"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
@@ -94,7 +93,7 @@ const (
var (
// Description of how to auth for this app
dropboxConfig = &oauthutil.Config{
dropboxConfig = &oauth2.Config{
Scopes: []string{
"files.metadata.write",
"files.content.write",
@@ -109,8 +108,7 @@ var (
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
// },
AuthURL: dropbox.OAuthEndpoint("").AuthURL,
TokenURL: dropbox.OAuthEndpoint("").TokenURL,
Endpoint: dropbox.OAuthEndpoint(""),
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -135,7 +133,7 @@ var (
)
// Gets an oauth config with the right scopes
func getOauthConfig(m configmap.Mapper) *oauthutil.Config {
func getOauthConfig(m configmap.Mapper) *oauth2.Config {
// If not impersonating, use standard scopes
if impersonate, _ := m.Get("impersonate"); impersonate == "" {
return dropboxConfig
@@ -318,46 +316,32 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// Some specific errors which should be excluded from retries
func shouldRetryExclude(ctx context.Context, err error) (bool, error) {
if err == nil {
return false, err
}
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
// First check for specific errors
//
// These come back from the SDK in a whole host of different
// error types, but there doesn't seem to be a consistent way
// of reading the error cause, so here we just check using the
// error string which isn't perfect but does the job.
if err == nil {
return false, err
}
errString := err.Error()
// First check for specific errors
if strings.Contains(errString, "insufficient_space") {
return false, fserrors.FatalError(err)
} else if strings.Contains(errString, "malformed_path") {
return false, fserrors.NoRetryError(err)
}
return true, err
}
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if retry, err := shouldRetryExclude(ctx, err); !retry {
return retry, err
}
// Then handle any official Retry-After header from Dropbox's SDK
switch e := err.(type) {
case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 {
fs.Logf(nil, "Error %v. Too many requests or write operations. Trying again in %d seconds.", err, e.RateLimitError.RetryAfter)
fs.Logf(errString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
}
return true, err
}
// Keep old behavior for backward compatibility
errString := err.Error()
if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
return true, err
}
@@ -402,7 +386,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
oldToken = strings.TrimSpace(oldToken)
if ok && oldToken != "" && oldToken[0] != '{' {
fs.Infof(name, "Converting token to new format")
newToken := fmt.Sprintf(`{"access_token":%q,"token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
if err != nil {
return nil, fmt.Errorf("NewFS convert token: %w", err)
@@ -1036,20 +1020,13 @@ func (f *Fs) Precision() time.Duration {
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
// Find and remove existing object
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
if err != nil {
return nil, err
}
defer cleanup(&err)
// Temporary Object under construction
dstObj := &Object{
fs: f,
@@ -1063,6 +1040,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()),
},
}
var err error
var result *files.RelocationResult
err = f.pacer.Call(func() (bool, error) {
result, err = f.srv.CopyV2(&arg)
@@ -1714,10 +1692,14 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
err = o.fs.pacer.Call(func() (bool, error) {
entry, err = o.fs.srv.UploadSessionFinish(args, nil)
if retry, err := shouldRetryExclude(ctx, err); !retry {
return retry, err
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
}
}
// after the first chunk is uploaded, we retry everything except the excluded errors
// after the first chunk is uploaded, we retry everything
return err != nil, err
})
if err != nil {

View File

@@ -61,7 +61,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
return false, err // No such user
case 186:
return false, err // IP blocked?
case 374, 412: // Flood detected seems to be #412 now
case 374:
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
time.Sleep(30 * time.Second)
default:

View File

@@ -441,28 +441,23 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
srcFs := srcObj.fs
// Find current directory ID
srcLeaf, srcDirectoryID, err := srcFs.dirCache.FindPath(ctx, srcObj.remote, false)
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
if err != nil {
return nil, err
}
// Create temporary object
dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote)
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
if err != nil {
return nil, err
}
// If it is in the correct directory, just rename it
var url string
if srcDirectoryID == dstDirectoryID {
// No rename needed
if srcLeaf == dstLeaf {
return src, nil
}
resp, err := f.renameFile(ctx, srcObj.file.URL, dstLeaf)
if currentDirectoryID == directoryID {
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
if err != nil {
return nil, fmt.Errorf("couldn't rename file: %w", err)
}
@@ -471,16 +466,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
url = resp.URLs[0].URL
} else {
dstFolderID, err := strconv.Atoi(dstDirectoryID)
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
rename := dstLeaf
// No rename needed
if srcLeaf == dstLeaf {
rename = ""
}
resp, err := f.moveFile(ctx, srcObj.file.URL, dstFolderID, rename)
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, fmt.Errorf("couldn't move file: %w", err)
}

View File

@@ -1,901 +0,0 @@
// Package filescom provides an interface to the Files.com
// object storage system.
package filescom
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"path"
"strings"
"time"
files_sdk "github.com/Files-com/files-sdk-go/v3"
"github.com/Files-com/files-sdk-go/v3/bundle"
"github.com/Files-com/files-sdk-go/v3/file"
file_migration "github.com/Files-com/files-sdk-go/v3/filemigration"
"github.com/Files-com/files-sdk-go/v3/folder"
"github.com/Files-com/files-sdk-go/v3/session"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
)
/*
Run of rclone info
stringNeedsEscaping = []rune{
'/', '\x00'
}
maxFileLength = 512 // for 1 byte unicode characters
maxFileLength = 512 // for 2 byte unicode characters
maxFileLength = 512 // for 3 byte unicode characters
maxFileLength = 512 // for 4 byte unicode characters
canWriteUnnormalized = true
canReadUnnormalized = true
canReadRenormalized = true
canStream = true
*/
const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
folderNotEmpty = "processing-failure/folder-not-empty"
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "filescom",
Description: "Files.com",
NewFs: NewFs,
Options: []fs.Option{
{
Name: "site",
Help: "Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com).",
}, {
Name: "username",
Help: "The username used to authenticate with Files.com.",
}, {
Name: "password",
Help: "The password used to authenticate with Files.com.",
IsPassword: true,
}, {
Name: "api_key",
Help: "The API key used to authenticate with Files.com.",
Advanced: true,
Sensitive: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeRightSpace |
encoder.EncodeRightCrLfHtVt |
encoder.EncodeInvalidUtf8),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Site string `config:"site"`
Username string `config:"username"`
Password string `config:"password"`
APIKey string `config:"api_key"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote files.com server
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
fileClient *file.Client // the connection to the file API
folderClient *folder.Client // the connection to the folder API
migrationClient *file_migration.Client // the connection to the file migration API
bundleClient *bundle.Client // the connection to the bundle API
pacer *fs.Pacer // pacer for API calls
}
// Object describes a files object
//
// Will definitely have info but maybe not meta
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
size int64 // size of the object
crc32 string // CRC32 of the object content
md5 string // MD5 of the object content
mimeType string // Content-Type of the object
modTime time.Time // modification time of the object
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("files root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Encode remote and turn it into an absolute path in the share
func (f *Fs) absPath(remote string) string {
return f.opt.Enc.FromStandardPath(path.Join(f.root, remote))
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
if apiErr, ok := err.(files_sdk.ResponseError); ok {
for _, e := range retryErrorCodes {
if apiErr.HttpCode == e {
fs.Debugf(nil, "Retrying API error %v", err)
return true, err
}
}
}
return fserrors.ShouldRetry(err), err
}
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *files_sdk.File, err error) {
params := files_sdk.FileFindParams{
Path: f.absPath(path),
}
var file files_sdk.File
err = f.pacer.Call(func() (bool, error) {
file, err = f.fileClient.Find(params, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
return &file, nil
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
root = strings.Trim(root, "/")
config, err := newClientConfig(ctx, opt)
if err != nil {
return nil, err
}
f := &Fs{
name: name,
root: root,
opt: *opt,
fileClient: &file.Client{Config: config},
folderClient: &folder.Client{Config: config},
migrationClient: &file_migration.Client{Config: config},
bundleClient: &bundle.Client{Config: config},
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
CaseInsensitive: true,
CanHaveEmptyDirectories: true,
ReadMimeType: true,
DirModTimeUpdatesOnWrite: true,
}).Fill(ctx, f)
if f.root != "" {
info, err := f.readMetaDataForPath(ctx, "")
if err == nil && !info.IsDir() {
f.root = path.Dir(f.root)
if f.root == "." {
f.root = ""
}
return f, fs.ErrorIsFile
}
}
return f, err
}
func newClientConfig(ctx context.Context, opt *Options) (config files_sdk.Config, err error) {
if opt.Site != "" {
if strings.Contains(opt.Site, ".") {
config.EndpointOverride = opt.Site
} else {
config.Subdomain = opt.Site
}
_, err = url.ParseRequestURI(config.Endpoint())
if err != nil {
err = fmt.Errorf("invalid domain or subdomain: %v", opt.Site)
return
}
}
config = config.Init().SetCustomClient(fshttp.NewClient(ctx))
if opt.APIKey != "" {
config.APIKey = opt.APIKey
return
}
if opt.Username == "" {
err = errors.New("username not found")
return
}
if opt.Password == "" {
err = errors.New("password not found")
return
}
opt.Password, err = obscure.Reveal(opt.Password)
if err != nil {
return
}
sessionClient := session.Client{Config: config}
params := files_sdk.SessionCreateParams{
Username: opt.Username,
Password: opt.Password,
}
thisSession, err := sessionClient.Create(params, files_sdk.WithContext(ctx))
if err != nil {
err = fmt.Errorf("couldn't create session: %w", err)
return
}
config.SessionId = thisSession.Id
return
}
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *files_sdk.File) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
var err error
if file != nil {
err = o.setMetaData(file)
} else {
err = o.readMetaData(ctx) // reads info and meta, returning an error
}
if err != nil {
return nil, err
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
var it *folder.Iter
params := files_sdk.FolderListForParams{
Path: f.absPath(dir),
}
err = f.pacer.Call(func() (bool, error) {
it, err = f.folderClient.ListFor(params, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
if err != nil {
return nil, fmt.Errorf("couldn't list files: %w", err)
}
for it.Next() {
item := ptr(it.File())
remote := f.opt.Enc.ToStandardPath(item.DisplayName)
remote = path.Join(dir, remote)
if remote == dir {
continue
}
if item.IsDir() {
d := fs.NewDir(remote, item.ModTime())
entries = append(entries, d)
} else {
o, err := f.newObjectWithInfo(ctx, remote, item)
if err != nil {
return nil, err
}
entries = append(entries, o)
}
}
err = it.Err()
if files_sdk.IsNotExist(err) {
return nil, fs.ErrorDirNotFound
}
return
}
// Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it
//
// Returns the object and error.
//
// Used to create new objects
func (f *Fs) createObject(ctx context.Context, remote string) (o *Object, err error) {
// Create the directory for the object if it doesn't exist
err = f.mkParentDir(ctx, remote)
if err != nil {
return
}
// Temporary Object under construction
o = &Object{
fs: f,
remote: remote,
}
return o, nil
}
// Put the object
//
// Copy the reader in to the new object which is returned.
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction
fs := &Object{
fs: f,
remote: src.Remote(),
}
return fs, fs.Update(ctx, in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
func (f *Fs) mkdir(ctx context.Context, path string) error {
if path == "" || path == "." {
return nil
}
params := files_sdk.FolderCreateParams{
Path: path,
MkdirParents: ptr(true),
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.folderClient.Create(params, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
if files_sdk.IsExist(err) {
return nil
}
return err
}
// Make the parent directory of remote
func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
return f.mkdir(ctx, path.Dir(f.absPath(remote)))
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.mkdir(ctx, f.absPath(dir))
}
// DirSetModTime sets the directory modtime for dir
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
o := Object{
fs: f,
remote: dir,
}
return o.SetModTime(ctx, modTime)
}
// purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
path := f.absPath(dir)
if path == "" || path == "." {
return errors.New("can't purge root directory")
}
params := files_sdk.FileDeleteParams{
Path: path,
Recursive: ptr(!check),
}
err := f.pacer.Call(func() (bool, error) {
err := f.fileClient.Delete(params, files_sdk.WithContext(ctx))
// Allow for eventual consistency deletion of child objects.
if isFolderNotEmpty(err) {
return true, err
}
return shouldRetry(ctx, err)
})
if err != nil {
if files_sdk.IsNotExist(err) {
return fs.ErrorDirNotFound
} else if isFolderNotEmpty(err) {
return fs.ErrorDirectoryNotEmpty
}
return fmt.Errorf("rmdir failed: %w", err)
}
return nil
}
// Rmdir deletes the root folder
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, true)
}
// Precision return the precision of this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dstObj fs.Object, err error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
err = srcObj.readMetaData(ctx)
if err != nil {
return
}
srcPath := srcObj.fs.absPath(srcObj.remote)
dstPath := f.absPath(remote)
if strings.EqualFold(srcPath, dstPath) {
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
}
// Create temporary object
dstObj, err = f.createObject(ctx, remote)
if err != nil {
return
}
// Copy the object
params := files_sdk.FileCopyParams{
Path: srcPath,
Destination: dstPath,
Overwrite: ptr(true),
}
var action files_sdk.FileAction
err = f.pacer.Call(func() (bool, error) {
action, err = f.fileClient.Copy(params, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
if err != nil {
return
}
err = f.waitForAction(ctx, action, "copy")
if err != nil {
return
}
err = dstObj.SetModTime(ctx, srcObj.modTime)
return
}
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
}
// move a file or folder
func (f *Fs) move(ctx context.Context, src *Fs, srcRemote string, dstRemote string) (info *files_sdk.File, err error) {
// Move the object
params := files_sdk.FileMoveParams{
Path: src.absPath(srcRemote),
Destination: f.absPath(dstRemote),
}
var action files_sdk.FileAction
err = f.pacer.Call(func() (bool, error) {
action, err = f.fileClient.Move(params, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
err = f.waitForAction(ctx, action, "move")
if err != nil {
return nil, err
}
info, err = f.readMetaDataForPath(ctx, dstRemote)
return
}
func (f *Fs) waitForAction(ctx context.Context, action files_sdk.FileAction, operation string) (err error) {
var migration files_sdk.FileMigration
err = f.pacer.Call(func() (bool, error) {
migration, err = f.migrationClient.Wait(action, func(migration files_sdk.FileMigration) {
// noop
}, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
if err == nil && migration.Status != "completed" {
return fmt.Errorf("%v did not complete successfully: %v", operation, migration.Status)
}
return
}
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Create temporary object
dstObj, err := f.createObject(ctx, remote)
if err != nil {
return nil, err
}
// Do the move
info, err := f.move(ctx, srcObj.fs, srcObj.remote, dstObj.remote)
if err != nil {
return nil, err
}
err = dstObj.setMetaData(info)
if err != nil {
return nil, err
}
return dstObj, nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
// Check if destination exists
_, err = f.readMetaDataForPath(ctx, dstRemote)
if err == nil {
return fs.ErrorDirExists
}
// Create temporary object
dstObj, err := f.createObject(ctx, dstRemote)
if err != nil {
return
}
// Do the move
_, err = f.move(ctx, srcFs, srcRemote, dstObj.remote)
return
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (url string, err error) {
params := files_sdk.BundleCreateParams{
Paths: []string{f.absPath(remote)},
}
if expire < fs.DurationOff {
params.ExpiresAt = ptr(time.Now().Add(time.Duration(expire)))
}
var bundle files_sdk.Bundle
err = f.pacer.Call(func() (bool, error) {
bundle, err = f.bundleClient.Create(params, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
url = bundle.Url
return
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.NewHashSet(hash.CRC32, hash.MD5)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the MD5 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
switch t {
case hash.CRC32:
if o.crc32 == "" {
return "", nil
}
return fmt.Sprintf("%08s", o.crc32), nil
case hash.MD5:
return o.md5, nil
}
return "", hash.ErrUnsupported
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.size
}
// setMetaData sets the metadata from info
func (o *Object) setMetaData(file *files_sdk.File) error {
o.modTime = file.ModTime()
if !file.IsDir() {
o.size = file.Size
o.crc32 = file.Crc32
o.md5 = file.Md5
o.mimeType = file.MimeType
}
return nil
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData(ctx context.Context) (err error) {
file, err := o.fs.readMetaDataForPath(ctx, o.remote)
if err != nil {
if files_sdk.IsNotExist(err) {
return fs.ErrorObjectNotFound
}
return err
}
if file.IsDir() {
return fs.ErrorIsDir
}
return o.setMetaData(file)
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
params := files_sdk.FileUpdateParams{
Path: o.fs.absPath(o.remote),
ProvidedMtime: &modTime,
}
var file files_sdk.File
err = o.fs.pacer.Call(func() (bool, error) {
file, err = o.fs.fileClient.Update(params, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
if err != nil {
return err
}
return o.setMetaData(&file)
}
// Storable returns a boolean showing whether this object storable
func (o *Object) Storable() bool {
return true
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
// Offset and Count for range download
var offset, count int64
fs.FixRangeOption(options, o.size)
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
offset, count = x.Decode(o.size)
if count < 0 {
count = o.size - offset
}
case *fs.SeekOption:
offset = x.Offset
count = o.size - offset
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
params := files_sdk.FileDownloadParams{
Path: o.fs.absPath(o.remote),
}
headers := &http.Header{}
headers.Set("Range", fmt.Sprintf("bytes=%v-%v", offset, offset+count-1))
err = o.fs.pacer.Call(func() (bool, error) {
_, err = o.fs.fileClient.Download(
params,
files_sdk.WithContext(ctx),
files_sdk.RequestHeadersOption(headers),
files_sdk.ResponseBodyOption(func(closer io.ReadCloser) error {
in = closer
return err
}),
)
return shouldRetry(ctx, err)
})
return
}
// Returns a pointer to t - useful for returning pointers to constants
func ptr[T any](t T) *T {
return &t
}
func isFolderNotEmpty(err error) bool {
var re files_sdk.ResponseError
ok := errors.As(err, &re)
return ok && re.Type == folderNotEmpty
}
// Update the object with the contents of the io.Reader, modTime and size
//
// If existing is set then it updates the object rather than creating a new one.
//
// The new object may have been created if an error is returned.
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
uploadOpts := []file.UploadOption{
file.UploadWithContext(ctx),
file.UploadWithReader(in),
file.UploadWithDestinationPath(o.fs.absPath(o.remote)),
file.UploadWithProvidedMtime(src.ModTime(ctx)),
}
err := o.fs.pacer.Call(func() (bool, error) {
err := o.fs.fileClient.Upload(uploadOpts...)
return shouldRetry(ctx, err)
})
if err != nil {
return err
}
return o.readMetaData(ctx)
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
params := files_sdk.FileDeleteParams{
Path: o.fs.absPath(o.remote),
}
return o.fs.pacer.Call(func() (bool, error) {
err := o.fs.fileClient.Delete(params, files_sdk.WithContext(ctx))
return shouldRetry(ctx, err)
})
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
return o.mimeType
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
)

View File

@@ -1,17 +0,0 @@
// Test Files filesystem interface
package filescom_test
import (
"testing"
"github.com/rclone/rclone/backend/filescom"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFilesCom:",
NilObject: (*filescom.Object)(nil),
})
}

View File

@@ -85,7 +85,7 @@ to an encrypted one. Cannot be used in combination with implicit FTPS.`,
Default: false,
}, {
Name: "concurrency",
Help: strings.ReplaceAll(`Maximum number of FTP simultaneous connections, 0 for unlimited.
Help: strings.Replace(`Maximum number of FTP simultaneous connections, 0 for unlimited.
Note that setting this is very likely to cause deadlocks so it should
be used with care.
@@ -99,7 +99,7 @@ maximum of |--checkers| and |--transfers|.
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
--check-first| or |--checkers 1 --transfers 1|.
`, "|", "`"),
`, "|", "`", -1),
Default: 0,
Advanced: true,
}, {
@@ -180,28 +180,12 @@ If this is set and no password is supplied then rclone will ask for a password
Default: "",
Help: `Socks 5 proxy host.
Supports the format user:pass@host:port, user@host:port, host:port.
Supports the format user:pass@host:port, user@host:port, host:port.
Example:
Example:
myUser:myPass@localhost:9005
`,
Advanced: true,
}, {
Name: "no_check_upload",
Default: false,
Help: `Don't check the upload is OK
Normally rclone will try to check the upload exists after it has
uploaded a file to make sure the size and modification time are as
expected.
This flag stops rclone doing these checks. This enables uploading to
folders which are write only.
You will likely need to use the --inplace flag also if uploading to
a write only folder.
`,
myUser:myPass@localhost:9005
`,
Advanced: true,
}, {
Name: config.ConfigEncoding,
@@ -248,7 +232,6 @@ type Options struct {
AskPassword bool `config:"ask_password"`
Enc encoder.MultiEncoder `config:"encoding"`
SocksProxy string `config:"socks_proxy"`
NoCheckUpload bool `config:"no_check_upload"`
}
// Fs represents a remote FTP server
@@ -1320,16 +1303,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return fmt.Errorf("update stor: %w", err)
}
o.fs.putFtpConnection(&c, nil)
if o.fs.opt.NoCheckUpload {
o.info = &FileInfo{
Name: o.remote,
Size: uint64(src.Size()),
ModTime: src.ModTime(ctx),
precise: true,
IsDir: false,
}
return nil
}
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
return fmt.Errorf("SetModTime: %w", err)
}

View File

@@ -1,311 +0,0 @@
// Package api has type definitions for gofile
//
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
package api
import (
"fmt"
"time"
)
const (
// 2017-05-03T07:26:10-07:00
timeFormat = `"` + time.RFC3339 + `"`
)
// Time represents date and time information for the
// gofile API, by using RFC3339
type Time time.Time
// MarshalJSON turns a Time into JSON (in UTC)
func (t *Time) MarshalJSON() (out []byte, err error) {
timeString := (*time.Time)(t).Format(timeFormat)
return []byte(timeString), nil
}
// UnmarshalJSON turns JSON into a Time
func (t *Time) UnmarshalJSON(data []byte) error {
newT, err := time.Parse(timeFormat, string(data))
if err != nil {
return err
}
*t = Time(newT)
return nil
}
// Error is returned from gofile when things go wrong
type Error struct {
Status string `json:"status"`
}
// Error returns a string for the error and satisfies the error interface
func (e Error) Error() string {
out := fmt.Sprintf("Error %q", e.Status)
return out
}
// IsError returns true if there is an error
func (e Error) IsError() bool {
return e.Status != "ok"
}
// Err returns err if not nil, or e if IsError or nil
func (e Error) Err(err error) error {
if err != nil {
return err
}
if e.IsError() {
return e
}
return nil
}
// Check Error satisfies the error interface
var _ error = (*Error)(nil)
// Types of things in Item
const (
ItemTypeFolder = "folder"
ItemTypeFile = "file"
)
// Item describes a folder or a file as returned by /contents
type Item struct {
ID string `json:"id"`
ParentFolder string `json:"parentFolder"`
Type string `json:"type"`
Name string `json:"name"`
Size int64 `json:"size"`
Code string `json:"code"`
CreateTime int64 `json:"createTime"`
ModTime int64 `json:"modTime"`
Link string `json:"link"`
MD5 string `json:"md5"`
MimeType string `json:"mimetype"`
ChildrenCount int `json:"childrenCount"`
DirectLinks map[string]*DirectLink `json:"directLinks"`
//Public bool `json:"public"`
//ServerSelected string `json:"serverSelected"`
//Thumbnail string `json:"thumbnail"`
//DownloadCount int `json:"downloadCount"`
//TotalDownloadCount int64 `json:"totalDownloadCount"`
//TotalSize int64 `json:"totalSize"`
//ChildrenIDs []string `json:"childrenIds"`
Children map[string]*Item `json:"children"`
}
// ToNativeTime converts a go time to a native time
func ToNativeTime(t time.Time) int64 {
return t.Unix()
}
// FromNativeTime converts native time to a go time
func FromNativeTime(t int64) time.Time {
return time.Unix(t, 0)
}
// DirectLink describes a direct link to a file so it can be
// downloaded by third parties.
type DirectLink struct {
ExpireTime int64 `json:"expireTime"`
SourceIpsAllowed []any `json:"sourceIpsAllowed"`
DomainsAllowed []any `json:"domainsAllowed"`
Auth []any `json:"auth"`
IsReqLink bool `json:"isReqLink"`
DirectLink string `json:"directLink"`
}
// Contents is returned from the /contents call
type Contents struct {
Error
Data struct {
Item
} `json:"data"`
Metadata Metadata `json:"metadata"`
}
// Metadata is returned when paging is in use
type Metadata struct {
TotalCount int `json:"totalCount"`
TotalPages int `json:"totalPages"`
Page int `json:"page"`
PageSize int `json:"pageSize"`
HasNextPage bool `json:"hasNextPage"`
}
// AccountsGetID is the result of /accounts/getid
type AccountsGetID struct {
Error
Data struct {
ID string `json:"id"`
} `json:"data"`
}
// Stats of storage and traffic
type Stats struct {
FolderCount int64 `json:"folderCount"`
FileCount int64 `json:"fileCount"`
Storage int64 `json:"storage"`
TrafficDirectGenerated int64 `json:"trafficDirectGenerated"`
TrafficReqDownloaded int64 `json:"trafficReqDownloaded"`
TrafficWebDownloaded int64 `json:"trafficWebDownloaded"`
}
// AccountsGet is the result of /accounts/{id}
type AccountsGet struct {
Error
Data struct {
ID string `json:"id"`
Email string `json:"email"`
Tier string `json:"tier"`
PremiumType string `json:"premiumType"`
Token string `json:"token"`
RootFolder string `json:"rootFolder"`
SubscriptionProvider string `json:"subscriptionProvider"`
SubscriptionEndDate int `json:"subscriptionEndDate"`
SubscriptionLimitDirectTraffic int64 `json:"subscriptionLimitDirectTraffic"`
SubscriptionLimitStorage int64 `json:"subscriptionLimitStorage"`
StatsCurrent Stats `json:"statsCurrent"`
// StatsHistory map[int]map[int]map[int]Stats `json:"statsHistory"`
} `json:"data"`
}
// CreateFolderRequest is the input to /contents/createFolder
type CreateFolderRequest struct {
ParentFolderID string `json:"parentFolderId"`
FolderName string `json:"folderName"`
ModTime int64 `json:"modTime,omitempty"`
}
// CreateFolderResponse is the output from /contents/createFolder
type CreateFolderResponse struct {
Error
Data Item `json:"data"`
}
// DeleteRequest is the input to DELETE /contents
type DeleteRequest struct {
ContentsID string `json:"contentsId"` // comma separated list of IDs
}
// DeleteResponse is the input to DELETE /contents
type DeleteResponse struct {
Error
Data map[string]Error
}
// Server is an upload server
type Server struct {
Name string `json:"name"`
Zone string `json:"zone"`
}
// String returns a string representation of the Server
func (s *Server) String() string {
return fmt.Sprintf("%s (%s)", s.Name, s.Zone)
}
// Root returns the root URL for the server
func (s *Server) Root() string {
return fmt.Sprintf("https://%s.gofile.io/", s.Name)
}
// URL returns the upload URL for the server
func (s *Server) URL() string {
return fmt.Sprintf("https://%s.gofile.io/contents/uploadfile", s.Name)
}
// ServersResponse is the output from /servers
type ServersResponse struct {
Error
Data struct {
Servers []Server `json:"servers"`
} `json:"data"`
}
// UploadResponse is returned by POST /contents/uploadfile
type UploadResponse struct {
Error
Data Item `json:"data"`
}
// DirectLinksRequest specifies the parameters for the direct link
type DirectLinksRequest struct {
ExpireTime int64 `json:"expireTime,omitempty"`
SourceIpsAllowed []any `json:"sourceIpsAllowed,omitempty"`
DomainsAllowed []any `json:"domainsAllowed,omitempty"`
Auth []any `json:"auth,omitempty"`
}
// DirectLinksResult is returned from POST /contents/{id}/directlinks
type DirectLinksResult struct {
Error
Data struct {
ExpireTime int64 `json:"expireTime"`
SourceIpsAllowed []any `json:"sourceIpsAllowed"`
DomainsAllowed []any `json:"domainsAllowed"`
Auth []any `json:"auth"`
IsReqLink bool `json:"isReqLink"`
ID string `json:"id"`
DirectLink string `json:"directLink"`
} `json:"data"`
}
// UpdateItemRequest describes the updates to be done to an item for PUT /contents/{id}/update
//
// The Value of the attribute to define :
// For Attribute "name" : The name of the content (file or folder)
// For Attribute "description" : The description displayed on the download page (folder only)
// For Attribute "tags" : A comma-separated list of tags (folder only)
// For Attribute "public" : either true or false (folder only)
// For Attribute "expiry" : A unix timestamp of the expiration date (folder only)
// For Attribute "password" : The password to set (folder only)
type UpdateItemRequest struct {
Attribute string `json:"attribute"`
Value any `json:"attributeValue"`
}
// UpdateItemResponse is returned by PUT /contents/{id}/update
type UpdateItemResponse struct {
Error
Data Item `json:"data"`
}
// MoveRequest is the input to /contents/move
type MoveRequest struct {
FolderID string `json:"folderId"`
ContentsID string `json:"contentsId"` // comma separated list of IDs
}
// MoveResponse is returned by POST /contents/move
type MoveResponse struct {
Error
Data map[string]struct {
Error
Item `json:"data"`
} `json:"data"`
}
// CopyRequest is the input to /contents/copy
type CopyRequest struct {
FolderID string `json:"folderId"`
ContentsID string `json:"contentsId"` // comma separated list of IDs
}
// CopyResponse is returned by POST /contents/copy
type CopyResponse struct {
Error
Data map[string]struct {
Error
Item `json:"data"`
} `json:"data"`
}
// UploadServerStatus is returned when fetching the root of an upload server
type UploadServerStatus struct {
Error
Data struct {
Server string `json:"server"`
Test string `json:"test"`
} `json:"data"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +0,0 @@
// Test Gofile filesystem interface
package gofile_test
import (
"testing"
"github.com/rclone/rclone/backend/gofile"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestGoFile:",
NilObject: (*gofile.Object)(nil),
})
}

View File

@@ -62,10 +62,9 @@ const (
var (
// Description of how to auth for this app
storageConfig = &oauthutil.Config{
storageConfig = &oauth2.Config{
Scopes: []string{storage.DevstorageReadWriteScope},
AuthURL: google.Endpoint.AuthURL,
TokenURL: google.Endpoint.TokenURL,
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
@@ -107,12 +106,6 @@ func init() {
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth,
Sensitive: true,
}, {
Name: "access_token",
Help: "Short-lived access token.\n\nLeave blank normally.\nNeeded only if you want use short-lived access token instead of interactive login.",
Hide: fs.OptionHideConfigurator,
Sensitive: true,
Advanced: true,
}, {
Name: "anonymous",
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
@@ -386,7 +379,6 @@ type Options struct {
Enc encoder.MultiEncoder `config:"encoding"`
EnvAuth bool `config:"env_auth"`
DirectoryMarkers bool `config:"directory_markers"`
AccessToken string `config:"access_token"`
}
// Fs represents a remote storage server
@@ -543,9 +535,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
}
} else if opt.AccessToken != "" {
ts := oauth2.Token{AccessToken: opt.AccessToken}
oAuthClient = oauth2.NewClient(ctx, oauth2.StaticTokenSource(&ts))
} else {
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
if err != nil {
@@ -955,6 +944,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
return e
}
return f.createDirectoryMarker(ctx, bucket, dir)
}
// mkdirParent creates the parent bucket/directory if it doesn't exist

View File

@@ -28,11 +28,13 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/lib/batcher"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
)
@@ -59,14 +61,13 @@ const (
var (
// Description of how to auth for this app
oauthConfig = &oauthutil.Config{
oauthConfig = &oauth2.Config{
Scopes: []string{
"openid",
"profile",
scopeReadWrite, // this must be at position scopeAccess
},
AuthURL: google.Endpoint.AuthURL,
TokenURL: google.Endpoint.TokenURL,
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
@@ -159,34 +160,6 @@ listings and transferred.
Without this flag, archived media will not be visible in directory
listings and won't be transferred.`,
Advanced: true,
}, {
Name: "proxy",
Default: "",
Help: strings.ReplaceAll(`Use the gphotosdl proxy for downloading the full resolution images
The Google API will deliver images and video which aren't full
resolution, and/or have EXIF data missing.
However if you ue the gphotosdl proxy tnen you can download original,
unchanged images.
This runs a headless browser in the background.
Download the software from [gphotosdl](https://github.com/rclone/gphotosdl)
First run with
gphotosdl -login
Then once you have logged into google photos close the browser window
and run
gphotosdl
Then supply the parameter |--gphotos-proxy "http://localhost:8282"| to make
rclone use the proxy.
`, "|", "`"),
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -208,7 +181,6 @@ type Options struct {
BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"`
Proxy string `config:"proxy"`
}
// Fs represents a remote storage server
@@ -482,7 +454,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Med
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// defer log.Trace(f, "remote=%q", remote)("")
defer log.Trace(f, "remote=%q", remote)("")
return f.newObjectWithInfo(ctx, remote, nil)
}
@@ -695,7 +667,7 @@ func (f *Fs) listUploads(ctx context.Context, dir string) (entries fs.DirEntries
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
match, prefix, pattern := patterns.match(f.root, dir, false)
if pattern == nil || pattern.isFile {
return nil, fs.ErrorDirNotFound
@@ -712,7 +684,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// defer log.Trace(f, "src=%+v", src)("")
defer log.Trace(f, "src=%+v", src)("")
// Temporary Object under construction
o := &Object{
fs: f,
@@ -765,7 +737,7 @@ func (f *Fs) getOrCreateAlbum(ctx context.Context, albumTitle string) (album *ap
// Mkdir creates the album if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
// defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
match, prefix, pattern := patterns.match(f.root, dir, false)
if pattern == nil {
return fs.ErrorDirNotFound
@@ -789,7 +761,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
// defer log.Trace(f, "dir=%q")("err=%v", &err)
defer log.Trace(f, "dir=%q")("err=%v", &err)
match, _, pattern := patterns.match(f.root, dir, false)
if pattern == nil {
return fs.ErrorDirNotFound
@@ -862,7 +834,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
// defer log.Trace(o, "")("")
defer log.Trace(o, "")("")
if !o.fs.opt.ReadSize || o.bytes >= 0 {
return o.bytes
}
@@ -963,7 +935,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
// defer log.Trace(o, "")("")
defer log.Trace(o, "")("")
err := o.readMetaData(ctx)
if err != nil {
fs.Debugf(o, "ModTime: Failed to read metadata: %v", err)
@@ -993,20 +965,16 @@ func (o *Object) downloadURL() string {
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
// defer log.Trace(o, "")("")
defer log.Trace(o, "")("")
err = o.readMetaData(ctx)
if err != nil {
fs.Debugf(o, "Open: Failed to read metadata: %v", err)
return nil, err
}
url := o.downloadURL()
if o.fs.opt.Proxy != "" {
url = strings.TrimRight(o.fs.opt.Proxy, "/") + "/id/" + o.id
}
var resp *http.Response
opts := rest.Opts{
Method: "GET",
RootURL: url,
RootURL: o.downloadURL(),
Options: options,
}
err = o.fs.pacer.Call(func() (bool, error) {
@@ -1099,7 +1067,7 @@ func (f *Fs) commitBatch(ctx context.Context, items []uploadedItem, results []*a
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
// defer log.Trace(o, "src=%+v", src)("err=%v", &err)
defer log.Trace(o, "src=%+v", src)("err=%v", &err)
match, _, pattern := patterns.match(o.fs.root, o.remote, true)
if pattern == nil || !pattern.isFile || !pattern.canUpload {
return errCantUpload
@@ -1168,7 +1136,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
errors := make([]error, 1)
results := make([]*api.MediaItem, 1)
err = o.fs.commitBatch(ctx, []uploadedItem{uploaded}, results, errors)
if err == nil {
if err != nil {
err = errors[0]
info = results[0]
}

View File

@@ -2,7 +2,6 @@ package googlephotos
import (
"context"
"errors"
"fmt"
"io"
"net/http"
@@ -36,7 +35,7 @@ func TestIntegration(t *testing.T) {
*fstest.RemoteName = "TestGooglePhotos:"
}
f, err := fs.NewFs(ctx, *fstest.RemoteName)
if errors.Is(err, fs.ErrorNotFoundInConfigFile) {
if err == fs.ErrorNotFoundInConfigFile {
t.Skipf("Couldn't create google photos backend - skipping tests: %v", err)
}
require.NoError(t, err)

View File

@@ -31,6 +31,7 @@ import (
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
)
const (
@@ -47,9 +48,11 @@ const (
// Globals
var (
// Description of how to auth for this app.
oauthConfig = &oauthutil.Config{
AuthURL: "https://my.hidrive.com/client/authorize",
TokenURL: "https://my.hidrive.com/oauth2/token",
oauthConfig = &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: "https://my.hidrive.com/client/authorize",
TokenURL: "https://my.hidrive.com/oauth2/token",
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.TitleBarRedirectURL,

View File

@@ -331,13 +331,12 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// Join's the remote onto the base URL
func (f *Fs) url(remote string) string {
trimmedRemote := strings.TrimLeft(remote, "/") // remove leading "/" since we always have it in f.endpointURL
if f.opt.NoEscape {
// Directly concatenate without escaping, no_escape behavior
return f.endpointURL + trimmedRemote
return f.endpointURL + remote
}
// Default behavior
return f.endpointURL + rest.URLPathEscape(trimmedRemote)
return f.endpointURL + rest.URLPathEscape(remote)
}
// Errors returned by parseName

View File

@@ -191,33 +191,6 @@ func TestNewObject(t *testing.T) {
assert.Equal(t, fs.ErrorObjectNotFound, err)
}
func TestNewObjectWithLeadingSlash(t *testing.T) {
f := prepare(t)
o, err := f.NewObject(context.Background(), "/four/under four.txt")
require.NoError(t, err)
assert.Equal(t, "/four/under four.txt", o.Remote())
assert.Equal(t, int64(8+lineEndSize), o.Size())
_, ok := o.(*Object)
assert.True(t, ok)
// Test the time is correct on the object
tObj := o.ModTime(context.Background())
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
require.NoError(t, err)
tFile := fi.ModTime()
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
// check object not found
o, err = f.NewObject(context.Background(), "/not found.txt")
assert.Nil(t, o)
assert.Equal(t, fs.ErrorObjectNotFound, err)
}
func TestOpen(t *testing.T) {
m := prepareServer(t)

View File

@@ -1,166 +0,0 @@
// Package api provides functionality for interacting with the iCloud API.
package api
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/rest"
)
const (
baseEndpoint = "https://www.icloud.com"
homeEndpoint = "https://www.icloud.com"
setupEndpoint = "https://setup.icloud.com/setup/ws/1"
authEndpoint = "https://idmsa.apple.com/appleauth/auth"
)
type sessionSave func(*Session)
// Client defines the client configuration
type Client struct {
appleID string
password string
srv *rest.Client
Session *Session
sessionSaveCallback sessionSave
drive *DriveService
}
// New creates a new Client instance with the provided Apple ID, password, trust token, cookies, and session save callback.
//
// Parameters:
// - appleID: the Apple ID of the user.
// - password: the password of the user.
// - trustToken: the trust token for the session.
// - clientID: the client id for the session.
// - cookies: the cookies for the session.
// - sessionSaveCallback: the callback function to save the session.
func New(appleID, password, trustToken string, clientID string, cookies []*http.Cookie, sessionSaveCallback sessionSave) (*Client, error) {
icloud := &Client{
appleID: appleID,
password: password,
srv: rest.NewClient(fshttp.NewClient(context.Background())),
Session: NewSession(),
sessionSaveCallback: sessionSaveCallback,
}
icloud.Session.TrustToken = trustToken
icloud.Session.Cookies = cookies
icloud.Session.ClientID = clientID
return icloud, nil
}
// DriveService returns the DriveService instance associated with the Client.
func (c *Client) DriveService() (*DriveService, error) {
var err error
if c.drive == nil {
c.drive, err = NewDriveService(c)
if err != nil {
return nil, err
}
}
return c.drive, nil
}
// Request makes a request and retries it if the session is invalid.
//
// This function is the main entry point for making requests to the iCloud
// API. If the initial request returns a 401 (Unauthorized), it will try to
// reauthenticate and retry the request.
func (c *Client) Request(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
resp, err = c.Session.Request(ctx, opts, request, response)
if err != nil && resp != nil {
// try to reauth
if resp.StatusCode == 401 || resp.StatusCode == 421 {
err = c.Authenticate(ctx)
if err != nil {
return nil, err
}
if c.Session.Requires2FA() {
return nil, errors.New("trust token expired, please reauth")
}
return c.RequestNoReAuth(ctx, opts, request, response)
}
}
return resp, err
}
// RequestNoReAuth makes a request without re-authenticating.
//
// This function is useful when you have a session that is already
// authenticated, but you need to make a request without triggering
// a re-authentication.
func (c *Client) RequestNoReAuth(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
// Make the request without re-authenticating
resp, err = c.Session.Request(ctx, opts, request, response)
return resp, err
}
// Authenticate authenticates the client with the iCloud API.
func (c *Client) Authenticate(ctx context.Context) error {
if c.Session.Cookies != nil {
if err := c.Session.ValidateSession(ctx); err == nil {
fs.Debugf("icloud", "Valid session, no need to reauth")
return nil
}
c.Session.Cookies = nil
}
fs.Debugf("icloud", "Authenticating as %s\n", c.appleID)
err := c.Session.SignIn(ctx, c.appleID, c.password)
if err == nil {
err = c.Session.AuthWithToken(ctx)
if err == nil && c.sessionSaveCallback != nil {
c.sessionSaveCallback(c.Session)
}
}
return err
}
// SignIn signs in the client using the provided context and credentials.
func (c *Client) SignIn(ctx context.Context) error {
return c.Session.SignIn(ctx, c.appleID, c.password)
}
// IntoReader marshals the provided values into a JSON encoded reader
func IntoReader(values any) (*bytes.Reader, error) {
m, err := json.Marshal(values)
if err != nil {
return nil, err
}
return bytes.NewReader(m), nil
}
// RequestError holds info on a result state, icloud can return a 200 but the result is unknown
type RequestError struct {
Status string
Text string
}
// Error satisfy the error interface.
func (e *RequestError) Error() string {
return fmt.Sprintf("%s: %s", e.Text, e.Status)
}
func newRequestError(Status string, Text string) *RequestError {
return &RequestError{
Status: strings.ToLower(Status),
Text: Text,
}
}
// newErr orf makes a new error from sprintf parameters.
func newRequestErrorf(Status string, Text string, Parameters ...interface{}) *RequestError {
return newRequestError(strings.ToLower(Status), fmt.Sprintf(Text, Parameters...))
}

View File

@@ -1,913 +0,0 @@
package api
import (
"bytes"
"context"
"io"
"mime"
"net/http"
"net/url"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/google/uuid"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
const (
defaultZone = "com.apple.CloudDocs"
statusOk = "OK"
statusEtagConflict = "ETAG_CONFLICT"
)
// DriveService represents an iCloud Drive service.
type DriveService struct {
icloud *Client
RootID string
endpoint string
docsEndpoint string
}
// NewDriveService creates a new DriveService instance.
func NewDriveService(icloud *Client) (*DriveService, error) {
return &DriveService{icloud: icloud, RootID: "FOLDER::com.apple.CloudDocs::root", endpoint: icloud.Session.AccountInfo.Webservices["drivews"].URL, docsEndpoint: icloud.Session.AccountInfo.Webservices["docws"].URL}, nil
}
// GetItemByDriveID retrieves a DriveItem by its Drive ID.
func (d *DriveService) GetItemByDriveID(ctx context.Context, id string, includeChildren bool) (*DriveItem, *http.Response, error) {
items, resp, err := d.GetItemsByDriveID(ctx, []string{id}, includeChildren)
if err != nil {
return nil, resp, err
}
return items[0], resp, err
}
// GetItemsByDriveID retrieves DriveItems by their Drive IDs.
func (d *DriveService) GetItemsByDriveID(ctx context.Context, ids []string, includeChildren bool) ([]*DriveItem, *http.Response, error) {
var err error
_items := []map[string]any{}
for _, id := range ids {
_items = append(_items, map[string]any{
"drivewsid": id,
"partialData": false,
"includeHierarchy": false,
})
}
var body *bytes.Reader
var path string
if !includeChildren {
values := []map[string]any{{
"items": _items,
}}
body, err = IntoReader(values)
if err != nil {
return nil, nil, err
}
path = "/retrieveItemDetails"
} else {
values := _items
body, err = IntoReader(values)
if err != nil {
return nil, nil, err
}
path = "/retrieveItemDetailsInFolders"
}
opts := rest.Opts{
Method: "POST",
Path: path,
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.endpoint,
Body: body,
}
var items []*DriveItem
resp, err := d.icloud.Request(ctx, opts, nil, &items)
if err != nil {
return nil, resp, err
}
return items, resp, err
}
// GetDocByPath retrieves a document by its path.
func (d *DriveService) GetDocByPath(ctx context.Context, path string) (*Document, *http.Response, error) {
values := url.Values{}
values.Set("unified_format", "false")
body, err := IntoReader(path)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/ws/" + defaultZone + "/list/lookup_by_path",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
Parameters: values,
Body: body,
}
var item []*Document
resp, err := d.icloud.Request(ctx, opts, nil, &item)
if err != nil {
return nil, resp, err
}
return item[0], resp, err
}
// GetItemByPath retrieves a DriveItem by its path.
func (d *DriveService) GetItemByPath(ctx context.Context, path string) (*DriveItem, *http.Response, error) {
values := url.Values{}
values.Set("unified_format", "true")
body, err := IntoReader(path)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/ws/" + defaultZone + "/list/lookup_by_path",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
Parameters: values,
Body: body,
}
var item []*DriveItem
resp, err := d.icloud.Request(ctx, opts, nil, &item)
if err != nil {
return nil, resp, err
}
return item[0], resp, err
}
// GetDocByItemID retrieves a document by its item ID.
func (d *DriveService) GetDocByItemID(ctx context.Context, id string) (*Document, *http.Response, error) {
values := url.Values{}
values.Set("document_id", id)
values.Set("unified_format", "false") // important
opts := rest.Opts{
Method: "GET",
Path: "/ws/" + defaultZone + "/list/lookup_by_id",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
Parameters: values,
}
var item *Document
resp, err := d.icloud.Request(ctx, opts, nil, &item)
if err != nil {
return nil, resp, err
}
return item, resp, err
}
// GetItemRawByItemID retrieves a DriveItemRaw by its item ID.
func (d *DriveService) GetItemRawByItemID(ctx context.Context, id string) (*DriveItemRaw, *http.Response, error) {
opts := rest.Opts{
Method: "GET",
Path: "/v1/item/" + id,
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
}
var item *DriveItemRaw
resp, err := d.icloud.Request(ctx, opts, nil, &item)
if err != nil {
return nil, resp, err
}
return item, resp, err
}
// GetItemsInFolder retrieves a list of DriveItemRaw objects in a folder with the given ID.
func (d *DriveService) GetItemsInFolder(ctx context.Context, id string, limit int64) ([]*DriveItemRaw, *http.Response, error) {
values := url.Values{}
values.Set("limit", strconv.FormatInt(limit, 10))
opts := rest.Opts{
Method: "GET",
Path: "/v1/enumerate/" + id,
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
Parameters: values,
}
items := struct {
Items []*DriveItemRaw `json:"drive_item"`
}{}
resp, err := d.icloud.Request(ctx, opts, nil, &items)
if err != nil {
return nil, resp, err
}
return items.Items, resp, err
}
// GetDownloadURLByDriveID retrieves the download URL for a file in the DriveService.
func (d *DriveService) GetDownloadURLByDriveID(ctx context.Context, id string) (string, *http.Response, error) {
_, zone, docid := DeconstructDriveID(id)
values := url.Values{}
values.Set("document_id", docid)
if zone == "" {
zone = defaultZone
}
opts := rest.Opts{
Method: "GET",
Path: "/ws/" + zone + "/download/by_id",
Parameters: values,
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
}
var filer *FileRequest
resp, err := d.icloud.Request(ctx, opts, nil, &filer)
if err != nil {
return "", resp, err
}
var url string
if filer.DataToken != nil {
url = filer.DataToken.URL
} else {
url = filer.PackageToken.URL
}
return url, resp, err
}
// DownloadFile downloads a file from the given URL using the provided options.
func (d *DriveService) DownloadFile(ctx context.Context, url string, opt []fs.OpenOption) (*http.Response, error) {
opts := &rest.Opts{
Method: "GET",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: url,
Options: opt,
}
resp, err := d.icloud.srv.Call(ctx, opts)
if err != nil {
// icloud has some weird http codes
if resp.StatusCode == 330 {
loc, err := resp.Location()
if err == nil {
return d.DownloadFile(ctx, loc.String(), opt)
}
}
return resp, err
}
return d.icloud.srv.Call(ctx, opts)
}
// MoveItemToTrashByItemID moves an item to the trash based on the item ID.
func (d *DriveService) MoveItemToTrashByItemID(ctx context.Context, id, etag string, force bool) (*DriveItem, *http.Response, error) {
doc, resp, err := d.GetDocByItemID(ctx, id)
if err != nil {
return nil, resp, err
}
return d.MoveItemToTrashByID(ctx, doc.DriveID(), etag, force)
}
// MoveItemToTrashByID moves an item to the trash based on the item ID.
func (d *DriveService) MoveItemToTrashByID(ctx context.Context, drivewsid, etag string, force bool) (*DriveItem, *http.Response, error) {
values := map[string]any{
"items": []map[string]any{{
"drivewsid": drivewsid,
"etag": etag,
"clientId": drivewsid,
}}}
body, err := IntoReader(values)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/moveItemsToTrash",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.endpoint,
Body: body,
}
item := struct {
Items []*DriveItem `json:"items"`
}{}
resp, err := d.icloud.Request(ctx, opts, nil, &item)
if err != nil {
return nil, resp, err
}
if item.Items[0].Status != statusOk {
// rerun with latest etag
if force && item.Items[0].Status == "ETAG_CONFLICT" {
return d.MoveItemToTrashByID(ctx, drivewsid, item.Items[0].Etag, false)
}
err = newRequestError(item.Items[0].Status, "unknown request status")
}
return item.Items[0], resp, err
}
// CreateNewFolderByItemID creates a new folder by item ID.
func (d *DriveService) CreateNewFolderByItemID(ctx context.Context, id, name string) (*DriveItem, *http.Response, error) {
doc, resp, err := d.GetDocByItemID(ctx, id)
if err != nil {
return nil, resp, err
}
return d.CreateNewFolderByDriveID(ctx, doc.DriveID(), name)
}
// CreateNewFolderByDriveID creates a new folder by its Drive ID.
func (d *DriveService) CreateNewFolderByDriveID(ctx context.Context, drivewsid, name string) (*DriveItem, *http.Response, error) {
values := map[string]any{
"destinationDrivewsId": drivewsid,
"folders": []map[string]any{{
"clientId": "FOLDER::UNKNOWN_ZONE::TempId-" + uuid.New().String(),
"name": name,
}},
}
body, err := IntoReader(values)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/createFolders",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.endpoint,
Body: body,
}
var fResp *CreateFoldersResponse
resp, err := d.icloud.Request(ctx, opts, nil, &fResp)
if err != nil {
return nil, resp, err
}
status := fResp.Folders[0].Status
if status != statusOk {
err = newRequestError(status, "unknown request status")
}
return fResp.Folders[0], resp, err
}
// RenameItemByItemID renames a DriveItem by its item ID.
func (d *DriveService) RenameItemByItemID(ctx context.Context, id, etag, name string, force bool) (*DriveItem, *http.Response, error) {
doc, resp, err := d.GetDocByItemID(ctx, id)
if err != nil {
return nil, resp, err
}
return d.RenameItemByDriveID(ctx, doc.DriveID(), doc.Etag, name, force)
}
// RenameItemByDriveID renames a DriveItem by its drive ID.
func (d *DriveService) RenameItemByDriveID(ctx context.Context, id, etag, name string, force bool) (*DriveItem, *http.Response, error) {
values := map[string]any{
"items": []map[string]any{{
"drivewsid": id,
"name": name,
"etag": etag,
// "extension": split[1],
}},
}
body, err := IntoReader(values)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/renameItems",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.endpoint,
Body: body,
}
var items *DriveItem
resp, err := d.icloud.Request(ctx, opts, nil, &items)
if err != nil {
return nil, resp, err
}
status := items.Items[0].Status
if status != statusOk {
// rerun with latest etag
if force && status == "ETAG_CONFLICT" {
return d.RenameItemByDriveID(ctx, id, items.Items[0].Etag, name, false)
}
err = newRequestErrorf(status, "unknown inner status for: %s %s", opts.Method, resp.Request.URL)
}
return items.Items[0], resp, err
}
// MoveItemByItemID moves an item by its item ID to a destination item ID.
func (d *DriveService) MoveItemByItemID(ctx context.Context, id, etag, dstID string, force bool) (*DriveItem, *http.Response, error) {
docSrc, resp, err := d.GetDocByItemID(ctx, id)
if err != nil {
return nil, resp, err
}
docDst, resp, err := d.GetDocByItemID(ctx, dstID)
if err != nil {
return nil, resp, err
}
return d.MoveItemByDriveID(ctx, docSrc.DriveID(), docSrc.Etag, docDst.DriveID(), force)
}
// MoveItemByDocID moves an item by its doc ID.
// func (d *DriveService) MoveItemByDocID(ctx context.Context, srcDocID, srcEtag, dstDocID string, force bool) (*DriveItem, *http.Response, error) {
// return d.MoveItemByDriveID(ctx, srcDocID, srcEtag, docDst.DriveID(), force)
// }
// MoveItemByDriveID moves an item by its drive ID.
func (d *DriveService) MoveItemByDriveID(ctx context.Context, id, etag, dstID string, force bool) (*DriveItem, *http.Response, error) {
values := map[string]any{
"destinationDrivewsId": dstID,
"items": []map[string]any{{
"drivewsid": id,
"etag": etag,
"clientId": id,
}},
}
body, err := IntoReader(values)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/moveItems",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.endpoint,
Body: body,
}
var items *DriveItem
resp, err := d.icloud.Request(ctx, opts, nil, &items)
if err != nil {
return nil, resp, err
}
status := items.Items[0].Status
if status != statusOk {
// rerun with latest etag
if force && status == "ETAG_CONFLICT" {
return d.MoveItemByDriveID(ctx, id, items.Items[0].Etag, dstID, false)
}
err = newRequestErrorf(status, "unknown inner status for: %s %s", opts.Method, resp.Request.URL)
}
return items.Items[0], resp, err
}
// CopyDocByItemID copies a document by its item ID.
func (d *DriveService) CopyDocByItemID(ctx context.Context, itemID string) (*DriveItemRaw, *http.Response, error) {
// putting name in info doesnt work. extension does work so assume this is a bug in the endpoint
values := map[string]any{
"info_to_update": map[string]any{},
}
body, err := IntoReader(values)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/v1/item/copy/" + itemID,
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
Body: body,
}
var info *DriveItemRaw
resp, err := d.icloud.Request(ctx, opts, nil, &info)
if err != nil {
return nil, resp, err
}
return info, resp, err
}
// CreateUpload creates an url for an upload.
func (d *DriveService) CreateUpload(ctx context.Context, size int64, name string) (*UploadResponse, *http.Response, error) {
// first we need to request an upload url
values := map[string]any{
"filename": name,
"type": "FILE",
"size": strconv.FormatInt(size, 10),
"content_type": GetContentTypeForFile(name),
}
body, err := IntoReader(values)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/ws/" + defaultZone + "/upload/web",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
Body: body,
}
var responseInfo []*UploadResponse
resp, err := d.icloud.Request(ctx, opts, nil, &responseInfo)
if err != nil {
return nil, resp, err
}
return responseInfo[0], resp, err
}
// Upload uploads a file to the given url
func (d *DriveService) Upload(ctx context.Context, in io.Reader, size int64, name, uploadURL string) (*SingleFileResponse, *http.Response, error) {
// TODO: implement multipart upload
opts := rest.Opts{
Method: "POST",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: uploadURL,
Body: in,
ContentLength: &size,
ContentType: GetContentTypeForFile(name),
// MultipartContentName: "files",
MultipartFileName: name,
}
var singleFileResponse *SingleFileResponse
resp, err := d.icloud.Request(ctx, opts, nil, &singleFileResponse)
if err != nil {
return nil, resp, err
}
return singleFileResponse, resp, err
}
// UpdateFile updates a file in the DriveService.
//
// ctx: the context.Context object for the request.
// r: a pointer to the UpdateFileInfo struct containing the information for the file update.
// Returns a pointer to the DriveItem struct representing the updated file, the http.Response object, and an error if any.
func (d *DriveService) UpdateFile(ctx context.Context, r *UpdateFileInfo) (*DriveItem, *http.Response, error) {
body, err := IntoReader(r)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/ws/" + defaultZone + "/update/documents",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
Body: body,
}
var responseInfo *DocumentUpdateResponse
resp, err := d.icloud.Request(ctx, opts, nil, &responseInfo)
if err != nil {
return nil, resp, err
}
doc := responseInfo.Results[0].Document
item := DriveItem{
Drivewsid: "FILE::com.apple.CloudDocs::" + doc.DocumentID,
Docwsid: doc.DocumentID,
Itemid: doc.ItemID,
Etag: doc.Etag,
ParentID: doc.ParentID,
DateModified: time.Unix(r.Mtime, 0),
DateCreated: time.Unix(r.Mtime, 0),
Type: doc.Type,
Name: doc.Name,
Size: doc.Size,
}
return &item, resp, err
}
// UpdateFileInfo represents the information for an update to a file in the DriveService.
type UpdateFileInfo struct {
AllowConflict bool `json:"allow_conflict"`
Btime int64 `json:"btime"`
Command string `json:"command"`
CreateShortGUID bool `json:"create_short_guid"`
Data struct {
Receipt string `json:"receipt,omitempty"`
ReferenceSignature string `json:"reference_signature,omitempty"`
Signature string `json:"signature,omitempty"`
Size int64 `json:"size,omitempty"`
WrappingKey string `json:"wrapping_key,omitempty"`
} `json:"data,omitempty"`
DocumentID string `json:"document_id"`
FileFlags FileFlags `json:"file_flags"`
Mtime int64 `json:"mtime"`
Path struct {
Path string `json:"path"`
StartingDocumentID string `json:"starting_document_id"`
} `json:"path"`
}
// FileFlags defines the file flags for a document.
type FileFlags struct {
IsExecutable bool `json:"is_executable"`
IsHidden bool `json:"is_hidden"`
IsWritable bool `json:"is_writable"`
}
// NewUpdateFileInfo creates a new UpdateFileInfo object with default values.
//
// Returns an UpdateFileInfo object.
func NewUpdateFileInfo() UpdateFileInfo {
return UpdateFileInfo{
Command: "add_file",
CreateShortGUID: true,
AllowConflict: true,
FileFlags: FileFlags{
IsExecutable: true,
IsHidden: false,
IsWritable: false,
},
}
}
// DriveItemRaw is a raw drive item.
// not suure what to call this but there seems to be a "unified" and non "unified" drive item response. This is the non unified.
type DriveItemRaw struct {
ItemID string `json:"item_id"`
ItemInfo *DriveItemRawInfo `json:"item_info"`
}
// SplitName splits the name of a DriveItemRaw into its name and extension.
//
// It returns the name and extension as separate strings. If the name ends with a dot,
// it means there is no extension, so an empty string is returned for the extension.
// If the name does not contain a dot, it means
func (d *DriveItemRaw) SplitName() (string, string) {
name := d.ItemInfo.Name
// ends with a dot, no extension
if strings.HasSuffix(name, ".") {
return name, ""
}
lastInd := strings.LastIndex(name, ".")
if lastInd == -1 {
return name, ""
}
return name[:lastInd], name[lastInd+1:]
}
// ModTime returns the modification time of the DriveItemRaw.
//
// It parses the ModifiedAt field of the ItemInfo struct and converts it to a time.Time value.
// If the parsing fails, it returns the zero value of time.Time.
// The returned time.Time value represents the modification time of the DriveItemRaw.
func (d *DriveItemRaw) ModTime() time.Time {
i, err := strconv.ParseInt(d.ItemInfo.ModifiedAt, 10, 64)
if err != nil {
return time.Time{}
}
return time.UnixMilli(i)
}
// CreatedTime returns the creation time of the DriveItemRaw.
//
// It parses the CreatedAt field of the ItemInfo struct and converts it to a time.Time value.
// If the parsing fails, it returns the zero value of time.Time.
// The returned time.Time
func (d *DriveItemRaw) CreatedTime() time.Time {
i, err := strconv.ParseInt(d.ItemInfo.CreatedAt, 10, 64)
if err != nil {
return time.Time{}
}
return time.UnixMilli(i)
}
// DriveItemRawInfo is the raw information about a drive item.
type DriveItemRawInfo struct {
Name string `json:"name"`
// Extension is absolutely borked on endpoints so dont use it.
Extension string `json:"extension"`
Size int64 `json:"size,string"`
Type string `json:"type"`
Version string `json:"version"`
ModifiedAt string `json:"modified_at"`
CreatedAt string `json:"created_at"`
Urls struct {
URLDownload string `json:"url_download"`
} `json:"urls"`
}
// IntoDriveItem converts a DriveItemRaw into a DriveItem.
//
// It takes no parameters.
// It returns a pointer to a DriveItem.
func (d *DriveItemRaw) IntoDriveItem() *DriveItem {
name, extension := d.SplitName()
return &DriveItem{
Itemid: d.ItemID,
Name: name,
Extension: extension,
Type: d.ItemInfo.Type,
Etag: d.ItemInfo.Version,
DateModified: d.ModTime(),
DateCreated: d.CreatedTime(),
Size: d.ItemInfo.Size,
Urls: d.ItemInfo.Urls,
}
}
// DocumentUpdateResponse is the response of a document update request.
type DocumentUpdateResponse struct {
Status struct {
StatusCode int `json:"status_code"`
ErrorMessage string `json:"error_message"`
} `json:"status"`
Results []struct {
Status struct {
StatusCode int `json:"status_code"`
ErrorMessage string `json:"error_message"`
} `json:"status"`
OperationID interface{} `json:"operation_id"`
Document *Document `json:"document"`
} `json:"results"`
}
// Document represents a document on iCloud.
type Document struct {
Status struct {
StatusCode int `json:"status_code"`
ErrorMessage string `json:"error_message"`
} `json:"status"`
DocumentID string `json:"document_id"`
ItemID string `json:"item_id"`
Urls struct {
URLDownload string `json:"url_download"`
} `json:"urls"`
Etag string `json:"etag"`
ParentID string `json:"parent_id"`
Name string `json:"name"`
Type string `json:"type"`
Deleted bool `json:"deleted"`
Mtime int64 `json:"mtime"`
LastEditorName string `json:"last_editor_name"`
Data DocumentData `json:"data"`
Size int64 `json:"size"`
Btime int64 `json:"btime"`
Zone string `json:"zone"`
FileFlags struct {
IsExecutable bool `json:"is_executable"`
IsWritable bool `json:"is_writable"`
IsHidden bool `json:"is_hidden"`
} `json:"file_flags"`
LastOpenedTime int64 `json:"lastOpenedTime"`
RestorePath interface{} `json:"restorePath"`
HasChainedParent bool `json:"hasChainedParent"`
}
// DriveID returns the drive ID of the Document.
func (d *Document) DriveID() string {
if d.Zone == "" {
d.Zone = defaultZone
}
return d.Type + "::" + d.Zone + "::" + d.DocumentID
}
// DocumentData represents the data of a document.
type DocumentData struct {
Signature string `json:"signature"`
Owner string `json:"owner"`
Size int64 `json:"size"`
ReferenceSignature string `json:"reference_signature"`
WrappingKey string `json:"wrapping_key"`
PcsInfo string `json:"pcsInfo"`
}
// SingleFileResponse is the response of a single file request.
type SingleFileResponse struct {
SingleFile *SingleFileInfo `json:"singleFile"`
}
// SingleFileInfo represents the information of a single file.
type SingleFileInfo struct {
ReferenceSignature string `json:"referenceChecksum"`
Size int64 `json:"size"`
Signature string `json:"fileChecksum"`
WrappingKey string `json:"wrappingKey"`
Receipt string `json:"receipt"`
}
// UploadResponse is the response of an upload request.
type UploadResponse struct {
URL string `json:"url"`
DocumentID string `json:"document_id"`
}
// FileRequestToken represents the token of a file request.
type FileRequestToken struct {
URL string `json:"url"`
Token string `json:"token"`
Signature string `json:"signature"`
WrappingKey string `json:"wrapping_key"`
ReferenceSignature string `json:"reference_signature"`
}
// FileRequest represents the request of a file.
type FileRequest struct {
DocumentID string `json:"document_id"`
ItemID string `json:"item_id"`
OwnerDsid int64 `json:"owner_dsid"`
DataToken *FileRequestToken `json:"data_token,omitempty"`
PackageToken *FileRequestToken `json:"package_token,omitempty"`
DoubleEtag string `json:"double_etag"`
}
// CreateFoldersResponse is the response of a create folders request.
type CreateFoldersResponse struct {
Folders []*DriveItem `json:"folders"`
}
// DriveItem represents an item on iCloud.
type DriveItem struct {
DateCreated time.Time `json:"dateCreated"`
Drivewsid string `json:"drivewsid"`
Docwsid string `json:"docwsid"`
Itemid string `json:"item_id"`
Zone string `json:"zone"`
Name string `json:"name"`
ParentID string `json:"parentId"`
Hierarchy []DriveItem `json:"hierarchy"`
Etag string `json:"etag"`
Type string `json:"type"`
AssetQuota int64 `json:"assetQuota"`
FileCount int64 `json:"fileCount"`
ShareCount int64 `json:"shareCount"`
ShareAliasCount int64 `json:"shareAliasCount"`
DirectChildrenCount int64 `json:"directChildrenCount"`
Items []*DriveItem `json:"items"`
NumberOfItems int64 `json:"numberOfItems"`
Status string `json:"status"`
Extension string `json:"extension,omitempty"`
DateModified time.Time `json:"dateModified,omitempty"`
DateChanged time.Time `json:"dateChanged,omitempty"`
Size int64 `json:"size,omitempty"`
LastOpenTime time.Time `json:"lastOpenTime,omitempty"`
Urls struct {
URLDownload string `json:"url_download"`
} `json:"urls"`
}
// IsFolder returns true if the item is a folder.
func (d *DriveItem) IsFolder() bool {
return d.Type == "FOLDER" || d.Type == "APP_CONTAINER" || d.Type == "APP_LIBRARY"
}
// DownloadURL returns the download URL of the item.
func (d *DriveItem) DownloadURL() string {
return d.Urls.URLDownload
}
// FullName returns the full name of the item.
// name + extension
func (d *DriveItem) FullName() string {
if d.Extension != "" {
return d.Name + "." + d.Extension
}
return d.Name
}
// GetDocIDFromDriveID returns the DocumentID from the drive ID.
func GetDocIDFromDriveID(id string) string {
split := strings.Split(id, "::")
return split[len(split)-1]
}
// DeconstructDriveID returns the document type, zone, and document ID from the drive ID.
func DeconstructDriveID(id string) (docType, zone, docid string) {
split := strings.Split(id, "::")
if len(split) < 3 {
return "", "", id
}
return split[0], split[1], split[2]
}
// ConstructDriveID constructs a drive ID from the given components.
func ConstructDriveID(id string, zone string, t string) string {
return strings.Join([]string{t, zone, id}, "::")
}
// GetContentTypeForFile detects content type for given file name.
func GetContentTypeForFile(name string) string {
// detect MIME type by looking at the filename only
mimeType := mime.TypeByExtension(filepath.Ext(name))
if mimeType == "" {
// api requires a mime type passed in
mimeType = "text/plain"
}
return strings.Split(mimeType, ";")[0]
}

View File

@@ -1,412 +0,0 @@
package api
import (
"context"
"fmt"
"net/http"
"net/url"
"slices"
"strings"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/rest"
)
// Session represents an iCloud session
type Session struct {
SessionToken string `json:"session_token"`
Scnt string `json:"scnt"`
SessionID string `json:"session_id"`
AccountCountry string `json:"account_country"`
TrustToken string `json:"trust_token"`
ClientID string `json:"client_id"`
Cookies []*http.Cookie `json:"cookies"`
AccountInfo AccountInfo `json:"account_info"`
srv *rest.Client `json:"-"`
}
// String returns the session as a string
// func (s *Session) String() string {
// jsession, _ := json.Marshal(s)
// return string(jsession)
// }
// Request makes a request
func (s *Session) Request(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (*http.Response, error) {
resp, err := s.srv.CallJSON(ctx, &opts, &request, &response)
if err != nil {
return resp, err
}
if val := resp.Header.Get("X-Apple-ID-Account-Country"); val != "" {
s.AccountCountry = val
}
if val := resp.Header.Get("X-Apple-ID-Session-Id"); val != "" {
s.SessionID = val
}
if val := resp.Header.Get("X-Apple-Session-Token"); val != "" {
s.SessionToken = val
}
if val := resp.Header.Get("X-Apple-TwoSV-Trust-Token"); val != "" {
s.TrustToken = val
}
if val := resp.Header.Get("scnt"); val != "" {
s.Scnt = val
}
return resp, nil
}
// Requires2FA returns true if the session requires 2FA
func (s *Session) Requires2FA() bool {
return s.AccountInfo.DsInfo.HsaVersion == 2 && s.AccountInfo.HsaChallengeRequired
}
// SignIn signs in the session
func (s *Session) SignIn(ctx context.Context, appleID, password string) error {
trustTokens := []string{}
if s.TrustToken != "" {
trustTokens = []string{s.TrustToken}
}
values := map[string]any{
"accountName": appleID,
"password": password,
"rememberMe": true,
"trustTokens": trustTokens,
}
body, err := IntoReader(values)
if err != nil {
return err
}
opts := rest.Opts{
Method: "POST",
Path: "/signin",
Parameters: url.Values{},
ExtraHeaders: s.GetAuthHeaders(map[string]string{}),
RootURL: authEndpoint,
IgnoreStatus: true, // need to handle 409 for hsa2
NoResponse: true,
Body: body,
}
opts.Parameters.Set("isRememberMeEnabled", "true")
_, err = s.Request(ctx, opts, nil, nil)
return err
}
// AuthWithToken authenticates the session
func (s *Session) AuthWithToken(ctx context.Context) error {
values := map[string]any{
"accountCountryCode": s.AccountCountry,
"dsWebAuthToken": s.SessionToken,
"extended_login": true,
"trustToken": s.TrustToken,
}
body, err := IntoReader(values)
if err != nil {
return err
}
opts := rest.Opts{
Method: "POST",
Path: "/accountLogin",
ExtraHeaders: GetCommonHeaders(map[string]string{}),
RootURL: setupEndpoint,
Body: body,
}
resp, err := s.Request(ctx, opts, nil, &s.AccountInfo)
if err == nil {
s.Cookies = resp.Cookies()
}
return err
}
// Validate2FACode validates the 2FA code
func (s *Session) Validate2FACode(ctx context.Context, code string) error {
values := map[string]interface{}{"securityCode": map[string]string{"code": code}}
body, err := IntoReader(values)
if err != nil {
return err
}
headers := s.GetAuthHeaders(map[string]string{})
headers["scnt"] = s.Scnt
headers["X-Apple-ID-Session-Id"] = s.SessionID
opts := rest.Opts{
Method: "POST",
Path: "/verify/trusteddevice/securitycode",
ExtraHeaders: headers,
RootURL: authEndpoint,
Body: body,
NoResponse: true,
}
_, err = s.Request(ctx, opts, nil, nil)
if err == nil {
if err := s.TrustSession(ctx); err != nil {
return err
}
return nil
}
return fmt.Errorf("validate2FACode failed: %w", err)
}
// TrustSession trusts the session
func (s *Session) TrustSession(ctx context.Context) error {
headers := s.GetAuthHeaders(map[string]string{})
headers["scnt"] = s.Scnt
headers["X-Apple-ID-Session-Id"] = s.SessionID
opts := rest.Opts{
Method: "GET",
Path: "/2sv/trust",
ExtraHeaders: headers,
RootURL: authEndpoint,
NoResponse: true,
ContentLength: common.Int64(0),
}
_, err := s.Request(ctx, opts, nil, nil)
if err != nil {
return fmt.Errorf("trustSession failed: %w", err)
}
return s.AuthWithToken(ctx)
}
// ValidateSession validates the session
func (s *Session) ValidateSession(ctx context.Context) error {
opts := rest.Opts{
Method: "POST",
Path: "/validate",
ExtraHeaders: s.GetHeaders(map[string]string{}),
RootURL: setupEndpoint,
ContentLength: common.Int64(0),
}
_, err := s.Request(ctx, opts, nil, &s.AccountInfo)
if err != nil {
return fmt.Errorf("validateSession failed: %w", err)
}
return nil
}
// GetAuthHeaders returns the authentication headers for the session.
//
// It takes an `overwrite` map[string]string parameter which allows
// overwriting the default headers. It returns a map[string]string.
func (s *Session) GetAuthHeaders(overwrite map[string]string) map[string]string {
headers := map[string]string{
"Accept": "application/json",
"Content-Type": "application/json",
"X-Apple-OAuth-Client-Id": s.ClientID,
"X-Apple-OAuth-Client-Type": "firstPartyAuth",
"X-Apple-OAuth-Redirect-URI": "https://www.icloud.com",
"X-Apple-OAuth-Require-Grant-Code": "true",
"X-Apple-OAuth-Response-Mode": "web_message",
"X-Apple-OAuth-Response-Type": "code",
"X-Apple-OAuth-State": s.ClientID,
"X-Apple-Widget-Key": s.ClientID,
"Origin": homeEndpoint,
"Referer": fmt.Sprintf("%s/", homeEndpoint),
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
}
for k, v := range overwrite {
headers[k] = v
}
return headers
}
// GetHeaders Gets the authentication headers required for a request
func (s *Session) GetHeaders(overwrite map[string]string) map[string]string {
headers := GetCommonHeaders(map[string]string{})
headers["Cookie"] = s.GetCookieString()
for k, v := range overwrite {
headers[k] = v
}
return headers
}
// GetCookieString returns the cookie header string for the session.
func (s *Session) GetCookieString() string {
cookieHeader := ""
// we only care about name and value.
for _, cookie := range s.Cookies {
cookieHeader = cookieHeader + cookie.Name + "=" + cookie.Value + ";"
}
return cookieHeader
}
// GetCommonHeaders generates common HTTP headers with optional overwrite.
func GetCommonHeaders(overwrite map[string]string) map[string]string {
headers := map[string]string{
"Content-Type": "application/json",
"Origin": baseEndpoint,
"Referer": fmt.Sprintf("%s/", baseEndpoint),
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
}
for k, v := range overwrite {
headers[k] = v
}
return headers
}
// MergeCookies merges two slices of http.Cookies, ensuring no duplicates are added.
func MergeCookies(left []*http.Cookie, right []*http.Cookie) ([]*http.Cookie, error) {
var hashes []string
for _, cookie := range right {
hashes = append(hashes, cookie.Raw)
}
for _, cookie := range left {
if !slices.Contains(hashes, cookie.Raw) {
right = append(right, cookie)
}
}
return right, nil
}
// GetCookiesForDomain filters the provided cookies based on the domain of the given URL.
func GetCookiesForDomain(url *url.URL, cookies []*http.Cookie) ([]*http.Cookie, error) {
var domainCookies []*http.Cookie
for _, cookie := range cookies {
if strings.HasSuffix(url.Host, cookie.Domain) {
domainCookies = append(domainCookies, cookie)
}
}
return domainCookies, nil
}
// NewSession creates a new Session instance with default values.
func NewSession() *Session {
session := &Session{}
session.srv = rest.NewClient(fshttp.NewClient(context.Background())).SetRoot(baseEndpoint)
//session.ClientID = "auth-" + uuid.New().String()
return session
}
// AccountInfo represents an account info
type AccountInfo struct {
DsInfo *ValidateDataDsInfo `json:"dsInfo"`
HasMinimumDeviceForPhotosWeb bool `json:"hasMinimumDeviceForPhotosWeb"`
ICDPEnabled bool `json:"iCDPEnabled"`
Webservices map[string]*webService `json:"webservices"`
PcsEnabled bool `json:"pcsEnabled"`
TermsUpdateNeeded bool `json:"termsUpdateNeeded"`
ConfigBag struct {
Urls struct {
AccountCreateUI string `json:"accountCreateUI"`
AccountLoginUI string `json:"accountLoginUI"`
AccountLogin string `json:"accountLogin"`
AccountRepairUI string `json:"accountRepairUI"`
DownloadICloudTerms string `json:"downloadICloudTerms"`
RepairDone string `json:"repairDone"`
AccountAuthorizeUI string `json:"accountAuthorizeUI"`
VettingURLForEmail string `json:"vettingUrlForEmail"`
AccountCreate string `json:"accountCreate"`
GetICloudTerms string `json:"getICloudTerms"`
VettingURLForPhone string `json:"vettingUrlForPhone"`
} `json:"urls"`
AccountCreateEnabled bool `json:"accountCreateEnabled"`
} `json:"configBag"`
HsaTrustedBrowser bool `json:"hsaTrustedBrowser"`
AppsOrder []string `json:"appsOrder"`
Version int `json:"version"`
IsExtendedLogin bool `json:"isExtendedLogin"`
PcsServiceIdentitiesIncluded bool `json:"pcsServiceIdentitiesIncluded"`
IsRepairNeeded bool `json:"isRepairNeeded"`
HsaChallengeRequired bool `json:"hsaChallengeRequired"`
RequestInfo struct {
Country string `json:"country"`
TimeZone string `json:"timeZone"`
Region string `json:"region"`
} `json:"requestInfo"`
PcsDeleted bool `json:"pcsDeleted"`
ICloudInfo struct {
SafariBookmarksHasMigratedToCloudKit bool `json:"SafariBookmarksHasMigratedToCloudKit"`
} `json:"iCloudInfo"`
Apps map[string]*ValidateDataApp `json:"apps"`
}
// ValidateDataDsInfo represents an validation info
type ValidateDataDsInfo struct {
HsaVersion int `json:"hsaVersion"`
LastName string `json:"lastName"`
ICDPEnabled bool `json:"iCDPEnabled"`
TantorMigrated bool `json:"tantorMigrated"`
Dsid string `json:"dsid"`
HsaEnabled bool `json:"hsaEnabled"`
IsHideMyEmailSubscriptionActive bool `json:"isHideMyEmailSubscriptionActive"`
IroncadeMigrated bool `json:"ironcadeMigrated"`
Locale string `json:"locale"`
BrZoneConsolidated bool `json:"brZoneConsolidated"`
ICDRSCapableDeviceList string `json:"ICDRSCapableDeviceList"`
IsManagedAppleID bool `json:"isManagedAppleID"`
IsCustomDomainsFeatureAvailable bool `json:"isCustomDomainsFeatureAvailable"`
IsHideMyEmailFeatureAvailable bool `json:"isHideMyEmailFeatureAvailable"`
ContinueOnDeviceEligibleDeviceInfo []string `json:"ContinueOnDeviceEligibleDeviceInfo"`
Gilligvited bool `json:"gilligvited"`
AppleIDAliases []interface{} `json:"appleIdAliases"`
UbiquityEOLEnabled bool `json:"ubiquityEOLEnabled"`
IsPaidDeveloper bool `json:"isPaidDeveloper"`
CountryCode string `json:"countryCode"`
NotificationID string `json:"notificationId"`
PrimaryEmailVerified bool `json:"primaryEmailVerified"`
ADsID string `json:"aDsID"`
Locked bool `json:"locked"`
ICDRSCapableDeviceCount int `json:"ICDRSCapableDeviceCount"`
HasICloudQualifyingDevice bool `json:"hasICloudQualifyingDevice"`
PrimaryEmail string `json:"primaryEmail"`
AppleIDEntries []struct {
IsPrimary bool `json:"isPrimary"`
Type string `json:"type"`
Value string `json:"value"`
} `json:"appleIdEntries"`
GilliganEnabled bool `json:"gilligan-enabled"`
IsWebAccessAllowed bool `json:"isWebAccessAllowed"`
FullName string `json:"fullName"`
MailFlags struct {
IsThreadingAvailable bool `json:"isThreadingAvailable"`
IsSearchV2Provisioned bool `json:"isSearchV2Provisioned"`
SCKMail bool `json:"sCKMail"`
IsMppSupportedInCurrentCountry bool `json:"isMppSupportedInCurrentCountry"`
} `json:"mailFlags"`
LanguageCode string `json:"languageCode"`
AppleID string `json:"appleId"`
HasUnreleasedOS bool `json:"hasUnreleasedOS"`
AnalyticsOptInStatus bool `json:"analyticsOptInStatus"`
FirstName string `json:"firstName"`
ICloudAppleIDAlias string `json:"iCloudAppleIdAlias"`
NotesMigrated bool `json:"notesMigrated"`
BeneficiaryInfo struct {
IsBeneficiary bool `json:"isBeneficiary"`
} `json:"beneficiaryInfo"`
HasPaymentInfo bool `json:"hasPaymentInfo"`
PcsDelet bool `json:"pcsDelet"`
AppleIDAlias string `json:"appleIdAlias"`
BrMigrated bool `json:"brMigrated"`
StatusCode int `json:"statusCode"`
FamilyEligible bool `json:"familyEligible"`
}
// ValidateDataApp represents an app
type ValidateDataApp struct {
CanLaunchWithOneFactor bool `json:"canLaunchWithOneFactor"`
IsQualifiedForBeta bool `json:"isQualifiedForBeta"`
}
// WebService represents a web service
type webService struct {
PcsRequired bool `json:"pcsRequired"`
URL string `json:"url"`
UploadURL string `json:"uploadUrl"`
Status string `json:"status"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,18 +0,0 @@
//go:build !plan9 && !solaris
package iclouddrive_test
import (
"testing"
"github.com/rclone/rclone/backend/iclouddrive"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestICloudDrive:",
NilObject: (*iclouddrive.Object)(nil),
})
}

View File

@@ -1,7 +0,0 @@
// Build for iclouddrive for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || solaris
// Package iclouddrive implements the iCloud Drive backend
package iclouddrive

View File

@@ -75,7 +75,7 @@ type MoveFolderParam struct {
DestinationPath string `validate:"nonzero" json:"destinationPath"`
}
// JobIDResponse represents response struct with JobID for folder operations
// JobIDResponse respresents response struct with JobID for folder operations
type JobIDResponse struct {
JobID string `json:"jobId"`
}

View File

@@ -56,7 +56,7 @@ func (ik *ImageKit) URL(params URLParam) (string, error) {
var expires = strconv.FormatInt(now+params.ExpireSeconds, 10)
var path = strings.Replace(resultURL, endpoint, "", 1)
path += expires
path = path + expires
mac := hmac.New(sha1.New, []byte(ik.PrivateKey))
mac.Write([]byte(path))
signature := hex.EncodeToString(mac.Sum(nil))

View File

@@ -151,19 +151,6 @@ Owner is able to add custom keys. Metadata feature grabs all the keys including
Help: "Host of InternetArchive Frontend.\n\nLeave blank for default value.",
Default: "https://archive.org",
Advanced: true,
}, {
Name: "item_metadata",
Help: `Metadata to be set on the IA item, this is different from file-level metadata that can be set using --metadata-set.
Format is key=value and the 'x-archive-meta-' prefix is automatically added.`,
Default: []string{},
Hide: fs.OptionHideConfigurator,
Advanced: true,
}, {
Name: "item_derive",
Help: `Whether to trigger derive on the IA item or not. If set to false, the item will not be derived by IA upon upload.
The derive process produces a number of secondary files from an upload to make an upload more usable on the web.
Setting this to false is useful for uploading files that are already in a format that IA can display or reduce burden on IA's infrastructure.`,
Default: true,
}, {
Name: "disable_checksum",
Help: `Don't ask the server to test against MD5 checksum calculated by rclone.
@@ -214,8 +201,6 @@ type Options struct {
Endpoint string `config:"endpoint"`
FrontEndpoint string `config:"front_endpoint"`
DisableChecksum bool `config:"disable_checksum"`
ItemMetadata []string `config:"item_metadata"`
ItemDerive bool `config:"item_derive"`
WaitArchive fs.Duration `config:"wait_archive"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -805,23 +790,17 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
"x-amz-filemeta-rclone-update-track": updateTracker,
// we add some more headers for intuitive actions
"x-amz-auto-make-bucket": "1", // create an item if does not exist, do nothing if already
"x-archive-auto-make-bucket": "1", // same as above in IAS3 original way
"x-archive-keep-old-version": "0", // do not keep old versions (a.k.a. trashes in other clouds)
"x-archive-cascade-delete": "1", // enable "cascate delete" (delete all derived files in addition to the file itself)
"x-amz-auto-make-bucket": "1", // create an item if does not exist, do nothing if already
"x-archive-auto-make-bucket": "1", // same as above in IAS3 original way
"x-archive-keep-old-version": "0", // do not keep old versions (a.k.a. trashes in other clouds)
"x-archive-meta-mediatype": "data", // mark media type of the uploading file as "data"
"x-archive-queue-derive": "0", // skip derivation process (e.g. encoding to smaller files, OCR on PDFs)
"x-archive-cascade-delete": "1", // enable "cascate delete" (delete all derived files in addition to the file itself)
}
if size >= 0 {
headers["Content-Length"] = fmt.Sprintf("%d", size)
headers["x-archive-size-hint"] = fmt.Sprintf("%d", size)
}
// This is IA's ITEM metadata, not file metadata
headers, err = o.appendItemMetadataHeaders(headers, o.fs.opt)
if err != nil {
return err
}
var mdata fs.Metadata
mdata, err = fs.GetMetadataOptions(ctx, o.fs, src, options)
if err == nil && mdata != nil {
@@ -884,51 +863,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
func (o *Object) appendItemMetadataHeaders(headers map[string]string, options Options) (newHeaders map[string]string, err error) {
metadataCounter := make(map[string]int)
metadataValues := make(map[string][]string)
// First pass: count occurrences and collect values
for _, v := range options.ItemMetadata {
parts := strings.SplitN(v, "=", 2)
if len(parts) != 2 {
return newHeaders, errors.New("item metadata key=value should be in the form key=value")
}
key, value := parts[0], parts[1]
metadataCounter[key]++
metadataValues[key] = append(metadataValues[key], value)
}
// Second pass: add headers with appropriate prefixes
for key, count := range metadataCounter {
if count == 1 {
// Only one occurrence, use x-archive-meta-
headers[fmt.Sprintf("x-archive-meta-%s", key)] = metadataValues[key][0]
} else {
// Multiple occurrences, use x-archive-meta01-, x-archive-meta02-, etc.
for i, value := range metadataValues[key] {
headers[fmt.Sprintf("x-archive-meta%02d-%s", i+1, key)] = value
}
}
}
if o.fs.opt.ItemDerive {
headers["x-archive-queue-derive"] = "1"
} else {
headers["x-archive-queue-derive"] = "0"
}
fs.Debugf(o, "Setting IA item derive: %t", o.fs.opt.ItemDerive)
for k, v := range headers {
if strings.HasPrefix(k, "x-archive-meta") {
fs.Debugf(o, "Setting IA item metadata: %s=%s", k, v)
}
}
return headers, nil
}
// Remove an object
func (o *Object) Remove(ctx context.Context) (err error) {
bucket, bucketPath := o.split()

View File

@@ -277,9 +277,11 @@ machines.`)
m.Set(configClientID, teliaseCloudClientID)
m.Set(configTokenURL, teliaseCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{
AuthURL: teliaseCloudAuthURL,
TokenURL: teliaseCloudTokenURL,
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: teliaseCloudAuthURL,
TokenURL: teliaseCloudTokenURL,
},
ClientID: teliaseCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -290,9 +292,11 @@ machines.`)
m.Set(configClientID, telianoCloudClientID)
m.Set(configTokenURL, telianoCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{
AuthURL: telianoCloudAuthURL,
TokenURL: telianoCloudTokenURL,
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: telianoCloudAuthURL,
TokenURL: telianoCloudTokenURL,
},
ClientID: telianoCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -303,9 +307,11 @@ machines.`)
m.Set(configClientID, tele2CloudClientID)
m.Set(configTokenURL, tele2CloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{
AuthURL: tele2CloudAuthURL,
TokenURL: tele2CloudTokenURL,
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: tele2CloudAuthURL,
TokenURL: tele2CloudTokenURL,
},
ClientID: tele2CloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -316,9 +322,11 @@ machines.`)
m.Set(configClientID, onlimeCloudClientID)
m.Set(configTokenURL, onlimeCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{
AuthURL: onlimeCloudAuthURL,
TokenURL: onlimeCloudTokenURL,
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: onlimeCloudAuthURL,
TokenURL: onlimeCloudTokenURL,
},
ClientID: onlimeCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -916,17 +924,19 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
}
baseClient := fshttp.NewClient(ctx)
oauthConfig := &oauthutil.Config{
AuthURL: defaultTokenURL,
TokenURL: defaultTokenURL,
oauthConfig := &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: defaultTokenURL,
TokenURL: defaultTokenURL,
},
}
if ver == configVersion {
oauthConfig.ClientID = defaultClientID
// if custom endpoints are set use them else stick with defaults
if tokenURL, ok := m.Get(configTokenURL); ok {
oauthConfig.TokenURL = tokenURL
oauthConfig.Endpoint.TokenURL = tokenURL
// jottacloud is weird. we need to use the tokenURL as authURL
oauthConfig.AuthURL = tokenURL
oauthConfig.Endpoint.AuthURL = tokenURL
}
} else if ver == legacyConfigVersion {
clientID, ok := m.Get(configClientID)
@@ -940,8 +950,8 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
oauthConfig.TokenURL = legacyTokenURL
oauthConfig.AuthURL = legacyTokenURL
oauthConfig.Endpoint.TokenURL = legacyTokenURL
oauthConfig.Endpoint.AuthURL = legacyTokenURL
// add the request filter to fix token refresh
if do, ok := baseClient.Transport.(interface {
@@ -1545,7 +1555,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
info, err := f.copyOrMove(ctx, "mv", srcObj.filePath(), remote)
if err == nil && meta != nil {
if err != nil && meta != nil {
createTime, createTimeMeta := srcObj.parseFsMetadataTime(meta, "btime")
if !createTimeMeta {
createTime = srcObj.createTime

View File

@@ -5,18 +5,18 @@ package local
import (
"context"
"fmt"
"syscall"
"unsafe"
"github.com/rclone/rclone/fs"
"golang.org/x/sys/windows"
)
var getFreeDiskSpace = windows.NewLazySystemDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
// About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var available, total, free int64
root, e := windows.UTF16PtrFromString(f.root)
root, e := syscall.UTF16PtrFromString(f.root)
if e != nil {
return nil, fmt.Errorf("failed to read disk usage: %w", e)
}
@@ -26,7 +26,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
)
if e1 != windows.Errno(0) {
if e1 != syscall.Errno(0) {
return nil, fmt.Errorf("failed to read disk usage: %w", e1)
}
usage := &fs.Usage{

View File

@@ -1,93 +0,0 @@
//go:build darwin && cgo
// Package local provides a filesystem interface
package local
import (
"context"
"fmt"
"path/filepath"
"runtime"
"github.com/go-darwin/apfs"
"github.com/rclone/rclone/fs"
)
// Copy src to this remote using server-side copy operations.
//
// # This is stored with the remote path given
//
// # It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
if runtime.GOOS != "darwin" || f.opt.NoClone {
return nil, fs.ErrorCantCopy
}
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't clone - not same remote type")
return nil, fs.ErrorCantCopy
}
if f.opt.TranslateSymlinks && srcObj.translatedLink { // in --links mode, use cloning only for regular files
return nil, fs.ErrorCantCopy
}
// Fetch metadata if --metadata is in use
meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
if err != nil {
return nil, fmt.Errorf("copy: failed to read metadata: %w", err)
}
// Create destination
dstObj := f.newObject(remote)
err = dstObj.mkdirAll()
if err != nil {
return nil, err
}
srcPath := srcObj.path
if f.opt.FollowSymlinks { // in --copy-links mode, find the real file being pointed to and pass that in instead
srcPath, err = filepath.EvalSymlinks(srcPath)
if err != nil {
return nil, err
}
}
err = Clone(srcPath, f.localPath(remote))
if err != nil {
return nil, err
}
// Set metadata if --metadata is in use
if meta != nil {
err = dstObj.writeMetadata(meta)
if err != nil {
return nil, fmt.Errorf("copy: failed to set metadata: %w", err)
}
}
return f.NewObject(ctx, remote)
}
// Clone uses APFS cloning if possible, otherwise falls back to copying (with full metadata preservation)
// note that this is closely related to unix.Clonefile(src, dst, unix.CLONE_NOFOLLOW) but not 100% identical
// https://opensource.apple.com/source/copyfile/copyfile-173.40.2/copyfile.c.auto.html
func Clone(src, dst string) error {
state := apfs.CopyFileStateAlloc()
defer func() {
if err := apfs.CopyFileStateFree(state); err != nil {
fs.Errorf(dst, "free state error: %v", err)
}
}()
cloned, err := apfs.CopyFile(src, dst, state, apfs.COPYFILE_CLONE)
fs.Debugf(dst, "isCloned: %v, error: %v", cloned, err)
return err
}
// Check the interfaces are satisfied
var (
_ fs.Copier = &Fs{}
)

View File

@@ -1,16 +0,0 @@
//go:build windows || plan9 || js || linux
package local
import "os"
const haveLChmod = false
// lChmod changes the mode of the named file to mode. If the file is a symbolic
// link, it changes the link, not the target. If there is an error,
// it will be of type *PathError.
func lChmod(name string, mode os.FileMode) error {
// Can't do this safely on this OS - chmoding a symlink always
// changes the destination.
return nil
}

View File

@@ -1,41 +0,0 @@
//go:build !windows && !plan9 && !js && !linux
package local
import (
"os"
"syscall"
"golang.org/x/sys/unix"
)
const haveLChmod = true
// syscallMode returns the syscall-specific mode bits from Go's portable mode bits.
//
// Borrowed from the syscall source since it isn't public.
func syscallMode(i os.FileMode) (o uint32) {
o |= uint32(i.Perm())
if i&os.ModeSetuid != 0 {
o |= syscall.S_ISUID
}
if i&os.ModeSetgid != 0 {
o |= syscall.S_ISGID
}
if i&os.ModeSticky != 0 {
o |= syscall.S_ISVTX
}
return o
}
// lChmod changes the mode of the named file to mode. If the file is a symbolic
// link, it changes the link, not the target. If there is an error,
// it will be of type *PathError.
func lChmod(name string, mode os.FileMode) error {
// NB linux does not support AT_SYMLINK_NOFOLLOW as a parameter to fchmodat
// and returns ENOTSUP if you try, so we don't support this on linux
if e := unix.Fchmodat(unix.AT_FDCWD, name, syscallMode(mode), unix.AT_SYMLINK_NOFOLLOW); e != nil {
return &os.PathError{Op: "lChmod", Path: name, Err: e}
}
return nil
}

View File

@@ -1,4 +1,4 @@
//go:build plan9 || js
//go:build windows || plan9 || js
package local

View File

@@ -1,19 +0,0 @@
//go:build windows
package local
import (
"time"
)
const haveLChtimes = true
// lChtimes changes the access and modification times of the named
// link, similar to the Unix utime() or utimes() functions.
//
// The underlying filesystem may truncate or round the values to a
// less precise time unit.
// If there is an error, it will be of type *PathError.
func lChtimes(name string, atime time.Time, mtime time.Time) error {
return setTimes(name, atime, mtime, time.Time{}, true)
}

View File

@@ -32,10 +32,9 @@ import (
)
// Constants
const (
devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
)
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
const linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
const useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
// timeType allows the user to choose what exactly ModTime() returns
type timeType = fs.Enum[timeTypeChoices]
@@ -79,44 +78,41 @@ supported by all file systems) under the "user.*" prefix.
Metadata is supported on files and directories.
`,
},
Options: []fs.Option{
{
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows.",
Default: false,
Advanced: runtime.GOOS != "windows",
Examples: []fs.OptionExample{{
Value: "true",
Help: "Disables long file names.",
}},
},
{
Name: "copy_links",
Help: "Follow symlinks and copy the pointed to item.",
Default: false,
NoPrefix: true,
ShortOpt: "L",
Advanced: true,
},
{
Name: "links",
Help: "Translate symlinks to/from regular files with a '" + fs.LinkSuffix + "' extension for the local backend.",
Default: false,
Advanced: true,
},
{
Name: "skip_links",
Help: `Don't warn about skipped symlinks.
Options: []fs.Option{{
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows.",
Default: false,
Advanced: runtime.GOOS != "windows",
Examples: []fs.OptionExample{{
Value: "true",
Help: "Disables long file names.",
}},
}, {
Name: "copy_links",
Help: "Follow symlinks and copy the pointed to item.",
Default: false,
NoPrefix: true,
ShortOpt: "L",
Advanced: true,
}, {
Name: "links",
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
Default: false,
NoPrefix: true,
ShortOpt: "l",
Advanced: true,
}, {
Name: "skip_links",
Help: `Don't warn about skipped symlinks.
This flag disables warning messages on skipped symlinks or junction
points, as you explicitly acknowledge that they should be skipped.`,
Default: false,
NoPrefix: true,
Advanced: true,
},
{
Name: "zero_size_links",
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
Default: false,
NoPrefix: true,
Advanced: true,
}, {
Name: "zero_size_links",
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
Rclone used to use the Stat size of links as the link size, but this fails in quite a few places:
@@ -126,12 +122,11 @@ Rclone used to use the Stat size of links as the link size, but this fails in qu
So rclone now always reads the link.
`,
Default: false,
Advanced: true,
},
{
Name: "unicode_normalization",
Help: `Apply unicode NFC normalization to paths and filenames.
Default: false,
Advanced: true,
}, {
Name: "unicode_normalization",
Help: `Apply unicode NFC normalization to paths and filenames.
This flag can be used to normalize file names into unicode NFC form
that are read from the local filesystem.
@@ -145,12 +140,11 @@ some OSes.
Note that rclone compares filenames with unicode normalization in the sync
routine so this flag shouldn't normally be used.`,
Default: false,
Advanced: true,
},
{
Name: "no_check_updated",
Help: `Don't check to see if the files change during upload.
Default: false,
Advanced: true,
}, {
Name: "no_check_updated",
Help: `Don't check to see if the files change during upload.
Normally rclone checks the size and modification time of files as they
are being uploaded and aborts with a message which starts "can't copy -
@@ -181,96 +175,68 @@ directory listing (where the initial stat value comes from on Windows)
and when stat is called on them directly. Other copy tools always use
the direct stat value and setting this flag will disable that.
`,
Default: false,
Advanced: true,
},
{
Name: "one_file_system",
Help: "Don't cross filesystem boundaries (unix/macOS only).",
Default: false,
NoPrefix: true,
ShortOpt: "x",
Advanced: true,
},
{
Name: "case_sensitive",
Help: `Force the filesystem to report itself as case sensitive.
Default: false,
Advanced: true,
}, {
Name: "one_file_system",
Help: "Don't cross filesystem boundaries (unix/macOS only).",
Default: false,
NoPrefix: true,
ShortOpt: "x",
Advanced: true,
}, {
Name: "case_sensitive",
Help: `Force the filesystem to report itself as case sensitive.
Normally the local backend declares itself as case insensitive on
Windows/macOS and case sensitive for everything else. Use this flag
to override the default choice.`,
Default: false,
Advanced: true,
},
{
Name: "case_insensitive",
Help: `Force the filesystem to report itself as case insensitive.
Default: false,
Advanced: true,
}, {
Name: "case_insensitive",
Help: `Force the filesystem to report itself as case insensitive.
Normally the local backend declares itself as case insensitive on
Windows/macOS and case sensitive for everything else. Use this flag
to override the default choice.`,
Default: false,
Advanced: true,
},
{
Name: "no_clone",
Help: `Disable reflink cloning for server-side copies.
Normally, for local-to-local transfers, rclone will "clone" the file when
possible, and fall back to "copying" only when cloning is not supported.
Cloning creates a shallow copy (or "reflink") which initially shares blocks with
the original file. Unlike a "hardlink", the two files are independent and
neither will affect the other if subsequently modified.
Cloning is usually preferable to copying, as it is much faster and is
deduplicated by default (i.e. having two identical files does not consume more
storage than having just one.) However, for use cases where data redundancy is
preferable, --local-no-clone can be used to disable cloning and force "deep" copies.
Currently, cloning is only supported when using APFS on macOS (support for other
platforms may be added in the future.)`,
Default: false,
Advanced: true,
},
{
Name: "no_preallocate",
Help: `Disable preallocation of disk space for transferred files.
Default: false,
Advanced: true,
}, {
Name: "no_preallocate",
Help: `Disable preallocation of disk space for transferred files.
Preallocation of disk space helps prevent filesystem fragmentation.
However, some virtual filesystem layers (such as Google Drive File
Stream) may incorrectly set the actual file size equal to the
preallocated space, causing checksum and file size checks to fail.
Use this flag to disable preallocation.`,
Default: false,
Advanced: true,
},
{
Name: "no_sparse",
Help: `Disable sparse files for multi-thread downloads.
Default: false,
Advanced: true,
}, {
Name: "no_sparse",
Help: `Disable sparse files for multi-thread downloads.
On Windows platforms rclone will make sparse files when doing
multi-thread downloads. This avoids long pauses on large files where
the OS zeros the file. However sparse files may be undesirable as they
cause disk fragmentation and can be slow to work with.`,
Default: false,
Advanced: true,
},
{
Name: "no_set_modtime",
Help: `Disable setting modtime.
Default: false,
Advanced: true,
}, {
Name: "no_set_modtime",
Help: `Disable setting modtime.
Normally rclone updates modification time of files after they are done
uploading. This can cause permissions issues on Linux platforms when
the user rclone is running as does not own the file uploaded, such as
when copying to a CIFS mount owned by another user. If this option is
enabled, rclone will no longer update the modtime after copying a file.`,
Default: false,
Advanced: true,
},
{
Name: "time_type",
Help: `Set what kind of time is returned.
Default: false,
Advanced: true,
}, {
Name: "time_type",
Help: `Set what kind of time is returned.
Normally rclone does all operations on the mtime or Modification time.
@@ -289,29 +255,27 @@ will silently replace it with the modification time which all OSes support.
Note that setting the time will still set the modified time so this is
only useful for reading.
`,
Default: mTime,
Advanced: true,
Examples: []fs.OptionExample{{
Value: mTime.String(),
Help: "The last modification time.",
}, {
Value: aTime.String(),
Help: "The last access time.",
}, {
Value: bTime.String(),
Help: "The creation time.",
}, {
Value: cTime.String(),
Help: "The last status change time.",
}},
},
{
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: encoder.OS,
},
},
Default: mTime,
Advanced: true,
Examples: []fs.OptionExample{{
Value: mTime.String(),
Help: "The last modification time.",
}, {
Value: aTime.String(),
Help: "The last access time.",
}, {
Value: bTime.String(),
Help: "The creation time.",
}, {
Value: cTime.String(),
Help: "The last status change time.",
}},
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: encoder.OS,
}},
}
fs.Register(fsi)
}
@@ -332,7 +296,6 @@ type Options struct {
NoSetModTime bool `config:"no_set_modtime"`
TimeType timeType `config:"time_type"`
Enc encoder.MultiEncoder `config:"encoding"`
NoClone bool `config:"no_clone"`
}
// Fs represents a local filesystem rooted at root
@@ -376,22 +339,17 @@ type Directory struct {
var (
errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
errLinksNeedsSuffix = errors.New("need \"" + fs.LinkSuffix + "\" suffix to refer to symlink when using -l/--links")
errLinksNeedsSuffix = errors.New("need \"" + linkSuffix + "\" suffix to refer to symlink when using -l/--links")
)
// NewFs constructs an Fs from the path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
ci := fs.GetConfig(ctx)
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Override --local-links with --links if set
if ci.Links {
opt.TranslateSymlinks = true
}
if opt.TranslateSymlinks && opt.FollowSymlinks {
return nil, errLinksAndCopyLinks
}
@@ -426,10 +384,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.FollowSymlinks {
f.lstat = os.Stat
}
if opt.NoClone {
// Disable server-side copy when --local-no-clone is set
f.features.Copy = nil
}
// Check to see if this points to a file
fi, err := f.lstat(f.root)
@@ -437,9 +391,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.dev = readDevice(fi, f.opt.OneFileSystem)
}
// Check to see if this is a .rclonelink if not found
hasLinkSuffix := strings.HasSuffix(f.root, fs.LinkSuffix)
hasLinkSuffix := strings.HasSuffix(f.root, linkSuffix)
if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) {
fi, err = f.lstat(strings.TrimSuffix(f.root, fs.LinkSuffix))
fi, err = f.lstat(strings.TrimSuffix(f.root, linkSuffix))
}
if err == nil && f.isRegular(fi.Mode()) {
// Handle the odd case, that a symlink was specified by name without the link suffix
@@ -510,8 +464,8 @@ func (f *Fs) caseInsensitive() bool {
//
// for regular files, localPath is returned unchanged
func translateLink(remote, localPath string) (newLocalPath string, isTranslatedLink bool) {
isTranslatedLink = strings.HasSuffix(remote, fs.LinkSuffix)
newLocalPath = strings.TrimSuffix(localPath, fs.LinkSuffix)
isTranslatedLink = strings.HasSuffix(remote, linkSuffix)
newLocalPath = strings.TrimSuffix(localPath, linkSuffix)
return newLocalPath, isTranslatedLink
}
@@ -694,7 +648,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} else {
// Check whether this link should be translated
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
newRemote += fs.LinkSuffix
newRemote += linkSuffix
}
// Don't include non directory if not included
// we leave directory filtering to the layer above

View File

@@ -73,6 +73,7 @@ func TestUpdatingCheck(t *testing.T) {
r.WriteFile(filePath, "content updated", time.Now())
_, err = in.Read(buf)
require.NoError(t, err)
}
// Test corrupted on transfer
@@ -110,7 +111,7 @@ func TestSymlink(t *testing.T) {
require.NoError(t, lChtimes(symlinkPath, modTime2, modTime2))
// Object viewed as symlink
file2 := fstest.NewItem("symlink.txt"+fs.LinkSuffix, "file.txt", modTime2)
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
// Object viewed as destination
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
@@ -139,7 +140,7 @@ func TestSymlink(t *testing.T) {
// Create a symlink
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+fs.LinkSuffix, "file.txt", modTime3, false)
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
if haveLChtimes {
r.CheckLocalItems(t, file1, file2, file3)
@@ -155,9 +156,9 @@ func TestSymlink(t *testing.T) {
assert.Equal(t, "file.txt", linkText)
// Check that NewObject gets the correct object
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+fs.LinkSuffix)
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
require.NoError(t, err)
assert.Equal(t, "symlink2.txt"+fs.LinkSuffix, o.Remote())
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
assert.Equal(t, int64(8), o.Size())
// Check that NewObject doesn't see the non suffixed version
@@ -165,7 +166,7 @@ func TestSymlink(t *testing.T) {
require.Equal(t, fs.ErrorObjectNotFound, err)
// Check that NewFs works with the suffixed version and --links
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+fs.LinkSuffix), configmap.Simple{
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+linkSuffix), configmap.Simple{
"links": "true",
})
require.Equal(t, fs.ErrorIsFile, err)
@@ -223,7 +224,7 @@ func TestHashOnUpdate(t *testing.T) {
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
// Reupload it with different contents but same size and timestamp
b := bytes.NewBufferString("CONTENT")
var b = bytes.NewBufferString("CONTENT")
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
err = o.Update(ctx, b, src)
require.NoError(t, err)
@@ -268,66 +269,22 @@ func TestMetadata(t *testing.T) {
r := fstest.NewRun(t)
const filePath = "metafile.txt"
when := time.Now()
const dayLength = len("2001-01-01")
whenRFC := when.Format(time.RFC3339Nano)
r.WriteFile(filePath, "metadata file contents", when)
f := r.Flocal.(*Fs)
// Set fs into "-l" / "--links" mode
f.opt.TranslateSymlinks = true
// Write a symlink to the file
symlinkPath := "metafile-link.txt"
osSymlinkPath := filepath.Join(f.root, symlinkPath)
symlinkPath += fs.LinkSuffix
require.NoError(t, os.Symlink(filePath, osSymlinkPath))
symlinkModTime := fstest.Time("2002-02-03T04:05:10.123123123Z")
require.NoError(t, lChtimes(osSymlinkPath, symlinkModTime, symlinkModTime))
// Get the object
obj, err := f.NewObject(ctx, filePath)
require.NoError(t, err)
o := obj.(*Object)
// Get the symlink object
symlinkObj, err := f.NewObject(ctx, symlinkPath)
require.NoError(t, err)
symlinkO := symlinkObj.(*Object)
// Record metadata for o
oMeta, err := o.Metadata(ctx)
require.NoError(t, err)
// Test symlink first to check it doesn't mess up file
t.Run("Symlink", func(t *testing.T) {
testMetadata(t, r, symlinkO, symlinkModTime)
})
// Read it again
oMetaNew, err := o.Metadata(ctx)
require.NoError(t, err)
// Check that operating on the symlink didn't change the file it was pointing to
// See: https://github.com/rclone/rclone/security/advisories/GHSA-hrxh-9w67-g4cv
assert.Equal(t, oMeta, oMetaNew, "metadata setting on symlink messed up file")
// Now run the same tests on the file
t.Run("File", func(t *testing.T) {
testMetadata(t, r, o, when)
})
}
func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
ctx := context.Background()
whenRFC := when.Format(time.RFC3339Nano)
const dayLength = len("2001-01-01")
f := r.Flocal.(*Fs)
features := f.Features()
var hasXID, hasAtime, hasBtime, canSetXattrOnLinks bool
var hasXID, hasAtime, hasBtime bool
switch runtime.GOOS {
case "darwin", "freebsd", "netbsd", "linux":
hasXID, hasAtime, hasBtime = true, true, true
canSetXattrOnLinks = runtime.GOOS != "linux"
case "openbsd", "solaris":
hasXID, hasAtime = true, true
case "windows":
@@ -350,10 +307,6 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
require.NoError(t, err)
assert.Nil(t, m)
if !canSetXattrOnLinks && o.translatedLink {
t.Skip("Skip remainder of test as can't set xattr on symlinks on this OS")
}
inM := fs.Metadata{
"potato": "chips",
"cabbage": "soup",
@@ -368,21 +321,18 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
})
checkTime := func(m fs.Metadata, key string, when time.Time) {
t.Helper()
mt, ok := o.parseMetadataTime(m, key)
assert.True(t, ok)
dt := mt.Sub(when)
precision := time.Second
assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v want %v got %v", key, dt, precision, mt, when))
assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v", key, dt, precision))
}
checkInt := func(m fs.Metadata, key string, base int) int {
t.Helper()
value, ok := o.parseMetadataInt(m, key, base)
assert.True(t, ok)
return value
}
t.Run("Read", func(t *testing.T) {
m, err := o.Metadata(ctx)
require.NoError(t, err)
@@ -392,12 +342,13 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
checkInt(m, "mode", 8)
checkTime(m, "mtime", when)
assert.Equal(t, len(whenRFC), len(m["mtime"]))
assert.Equal(t, whenRFC[:dayLength], m["mtime"][:dayLength])
if hasAtime && !o.translatedLink { // symlinks generally don't record atime
if hasAtime {
checkTime(m, "atime", when)
}
if hasBtime && !o.translatedLink { // symlinks generally don't record btime
if hasBtime {
checkTime(m, "btime", when)
}
if hasXID {
@@ -421,10 +372,6 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
"mode": "0767",
"potato": "wedges",
}
if !canSetXattrOnLinks && o.translatedLink {
// Don't change xattr if not supported on symlinks
delete(newM, "potato")
}
err := o.writeMetadata(newM)
require.NoError(t, err)
@@ -434,11 +381,7 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
mode := checkInt(m, "mode", 8)
if runtime.GOOS != "windows" {
expectedMode := 0767
if o.translatedLink && runtime.GOOS == "linux" {
expectedMode = 0777 // perms of symlinks always read as 0777 on linux
}
assert.Equal(t, expectedMode, mode&0777, fmt.Sprintf("mode wrong - expecting 0%o got 0%o", expectedMode, mode&0777))
assert.Equal(t, 0767, mode&0777, fmt.Sprintf("mode wrong - expecting 0767 got 0%o", mode&0777))
}
checkTime(m, "mtime", newMtime)
@@ -448,10 +391,11 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
if haveSetBTime {
checkTime(m, "btime", newBtime)
}
if xattrSupported && (canSetXattrOnLinks || !o.translatedLink) {
if xattrSupported {
assert.Equal(t, "wedges", m["potato"])
}
})
}
func TestFilter(t *testing.T) {
@@ -628,35 +572,4 @@ func TestCopySymlink(t *testing.T) {
linkContents, err := os.Readlink(dstPath)
require.NoError(t, err)
assert.Equal(t, "file.txt", linkContents)
// Set fs into "-L/--copy-links" mode
f.opt.FollowSymlinks = true
f.opt.TranslateSymlinks = false
f.lstat = os.Stat
// Create dst
require.NoError(t, f.Mkdir(ctx, "dst2"))
// Do copy from src into dst
src, err = f.NewObject(ctx, "src/link.txt")
require.NoError(t, err)
require.NotNil(t, src)
dst, err = operations.Copy(ctx, f, nil, "dst2/link.txt", src)
require.NoError(t, err)
require.NotNil(t, dst)
// Test that we made a NON-symlink and it has the right contents
dstPath = filepath.Join(r.LocalName, "dst2", "link.txt")
fi, err := os.Lstat(dstPath)
require.NoError(t, err)
assert.True(t, fi.Mode()&os.ModeSymlink == 0)
want := fstest.NewItem("dst2/link.txt", "hello world", when)
fstest.CompareItems(t, []fs.DirEntry{dst}, []fstest.Item{want}, nil, f.precision, "")
// Test that copying a normal file also works
dst, err = operations.Copy(ctx, f, nil, "dst2/file.txt", dst)
require.NoError(t, err)
require.NotNil(t, dst)
want = fstest.NewItem("dst2/file.txt", "hello world", when)
fstest.CompareItems(t, []fs.DirEntry{dst}, []fstest.Item{want}, nil, f.precision, "")
}

View File

@@ -2,7 +2,6 @@ package local
import (
"fmt"
"math"
"os"
"runtime"
"strconv"
@@ -73,12 +72,12 @@ func (o *Object) parseMetadataInt(m fs.Metadata, key string, base int) (result i
value, ok := m[key]
if ok {
var err error
parsed, err := strconv.ParseInt(value, base, 0)
result64, err := strconv.ParseInt(value, base, 64)
if err != nil {
fs.Debugf(o, "failed to parse metadata %s: %q: %v", key, value, err)
ok = false
}
result = int(parsed)
result = int(result64)
}
return result, ok
}
@@ -105,11 +104,7 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
}
if haveSetBTime {
if btimeOK {
if o.translatedLink {
err = lsetBTime(o.path, btime)
} else {
err = setBTime(o.path, btime)
}
err = setBTime(o.path, btime)
if err != nil {
outErr = fmt.Errorf("failed to set birth (creation) time: %w", err)
}
@@ -125,11 +120,7 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
fs.Debugf(o, "Ignoring request to set ownership %o.%o on this OS", gid, uid)
} else {
if o.translatedLink {
err = os.Lchown(o.path, uid, gid)
} else {
err = os.Chown(o.path, uid, gid)
}
err = os.Chown(o.path, uid, gid)
if err != nil {
outErr = fmt.Errorf("failed to change ownership: %w", err)
}
@@ -137,23 +128,9 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
}
mode, hasMode := o.parseMetadataInt(m, "mode", 8)
if hasMode {
if mode >= 0 {
umode := uint(mode)
if umode <= math.MaxUint32 {
if o.translatedLink {
if haveLChmod {
err = lChmod(o.path, os.FileMode(umode))
} else {
fs.Debugf(o, "Unable to set mode %v on a symlink on this OS", os.FileMode(umode))
err = nil
}
} else {
err = os.Chmod(o.path, os.FileMode(umode))
}
if err != nil {
outErr = fmt.Errorf("failed to change permissions: %w", err)
}
}
err = os.Chmod(o.path, os.FileMode(mode))
if err != nil {
outErr = fmt.Errorf("failed to change permissions: %w", err)
}
}
// FIXME not parsing rdev yet

View File

@@ -13,9 +13,3 @@ func setBTime(name string, btime time.Time) error {
// Does nothing
return nil
}
// lsetBTime changes the birth time of the link passed in
func lsetBTime(name string, btime time.Time) error {
// Does nothing
return nil
}

View File

@@ -9,20 +9,15 @@ import (
const haveSetBTime = true
// setTimes sets any of atime, mtime or btime
// if link is set it sets a link rather than the target
func setTimes(name string, atime, mtime, btime time.Time, link bool) (err error) {
// setBTime sets the birth time of the file passed in
func setBTime(name string, btime time.Time) (err error) {
pathp, err := syscall.UTF16PtrFromString(name)
if err != nil {
return err
}
fileFlag := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS)
if link {
fileFlag |= syscall.FILE_FLAG_OPEN_REPARSE_POINT
}
h, err := syscall.CreateFile(pathp,
syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil,
syscall.OPEN_EXISTING, fileFlag, 0)
syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
if err != nil {
return err
}
@@ -32,28 +27,6 @@ func setTimes(name string, atime, mtime, btime time.Time, link bool) (err error)
err = closeErr
}
}()
var patime, pmtime, pbtime *syscall.Filetime
if !atime.IsZero() {
t := syscall.NsecToFiletime(atime.UnixNano())
patime = &t
}
if !mtime.IsZero() {
t := syscall.NsecToFiletime(mtime.UnixNano())
pmtime = &t
}
if !btime.IsZero() {
t := syscall.NsecToFiletime(btime.UnixNano())
pbtime = &t
}
return syscall.SetFileTime(h, pbtime, patime, pmtime)
}
// setBTime sets the birth time of the file passed in
func setBTime(name string, btime time.Time) (err error) {
return setTimes(name, time.Time{}, time.Time{}, btime, false)
}
// lsetBTime changes the birth time of the link passed in
func lsetBTime(name string, btime time.Time) error {
return setTimes(name, time.Time{}, time.Time{}, btime, true)
bFileTime := syscall.NsecToFiletime(btime.UnixNano())
return syscall.SetFileTime(h, &bFileTime, nil, nil)
}

View File

@@ -68,12 +68,14 @@ var (
)
// Description of how to authorize
var oauthConfig = &oauthutil.Config{
var oauthConfig = &oauth2.Config{
ClientID: api.OAuthClientID,
ClientSecret: "",
AuthURL: api.OAuthURL,
TokenURL: api.OAuthURL,
AuthStyle: oauth2.AuthStyleInParams,
Endpoint: oauth2.Endpoint{
AuthURL: api.OAuthURL,
TokenURL: api.OAuthURL,
AuthStyle: oauth2.AuthStyleInParams,
},
}
// Register with Fs
@@ -436,9 +438,7 @@ func (f *Fs) authorize(ctx context.Context, force bool) (err error) {
if err != nil || !tokenIsValid(t) {
fs.Infof(f, "Valid token not found, authorizing.")
ctx := oauthutil.Context(ctx, f.cli)
oauth2Conf := oauthConfig.MakeOauth2Config()
t, err = oauth2Conf.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
t, err = oauthConfig.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
}
if err == nil && !tokenIsValid(t) {
err = errors.New("invalid token")

View File

@@ -923,7 +923,9 @@ func (f *Fs) netStorageStatRequest(ctx context.Context, URL string, directory bo
entrywanted := (directory && files[i].Type == "dir") ||
(!directory && files[i].Type != "dir")
if entrywanted {
files[0], files[i] = files[i], files[0]
filestamp := files[0]
files[0] = files[i]
files[i] = filestamp
}
}
return files, nil

View File

@@ -202,14 +202,9 @@ type SharingLinkType struct {
type LinkType string
const (
// ViewLinkType (role: read) A view-only sharing link, allowing read-only access.
ViewLinkType LinkType = "view"
// EditLinkType (role: write) An edit sharing link, allowing read-write access.
EditLinkType LinkType = "edit"
// EmbedLinkType (role: read) A view-only sharing link that can be used to embed
// content into a host webpage. Embed links are not available for OneDrive for
// Business or SharePoint.
EmbedLinkType LinkType = "embed"
ViewLinkType LinkType = "view" // ViewLinkType (role: read) A view-only sharing link, allowing read-only access.
EditLinkType LinkType = "edit" // EditLinkType (role: write) An edit sharing link, allowing read-write access.
EmbedLinkType LinkType = "embed" // EmbedLinkType (role: read) A view-only sharing link that can be used to embed content into a host webpage. Embed links are not available for OneDrive for Business or SharePoint.
)
// LinkScope represents the scope of the link represented by this permission.
@@ -217,12 +212,9 @@ const (
type LinkScope string
const (
// AnonymousScope = Anyone with the link has access, without needing to sign in.
// This may include people outside of your organization.
AnonymousScope LinkScope = "anonymous"
// OrganizationScope = Anyone signed into your organization (tenant) can use the
// link to get access. Only available in OneDrive for Business and SharePoint.
OrganizationScope LinkScope = "organization"
AnonymousScope LinkScope = "anonymous" // AnonymousScope = Anyone with the link has access, without needing to sign in. This may include people outside of your organization.
OrganizationScope LinkScope = "organization" // OrganizationScope = Anyone signed into your organization (tenant) can use the link to get access. Only available in OneDrive for Business and SharePoint.
)
// PermissionsType provides information about a sharing permission granted for a DriveItem resource.
@@ -244,14 +236,10 @@ type PermissionsType struct {
type Role string
const (
// ReadRole provides the ability to read the metadata and contents of the item.
ReadRole Role = "read"
// WriteRole provides the ability to read and modify the metadata and contents of the item.
WriteRole Role = "write"
// OwnerRole represents the owner role for SharePoint and OneDrive for Business.
OwnerRole Role = "owner"
// MemberRole represents the member role for SharePoint and OneDrive for Business.
MemberRole Role = "member"
ReadRole Role = "read" // ReadRole provides the ability to read the metadata and contents of the item.
WriteRole Role = "write" // WriteRole provides the ability to read and modify the metadata and contents of the item.
OwnerRole Role = "owner" // OwnerRole represents the owner role for SharePoint and OneDrive for Business.
MemberRole Role = "member" // MemberRole represents the member role for SharePoint and OneDrive for Business.
)
// PermissionsResponse is the response to the list permissions method

View File

@@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"net/http"
"slices"
"strings"
"time"
@@ -15,6 +14,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/errcount"
"golang.org/x/exp/slices" // replace with slices after go1.21 is the minimum version
)
const (

View File

@@ -40,6 +40,7 @@ import (
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
)
const (
@@ -64,21 +65,14 @@ const (
// Globals
var (
// Define the paths used for token operations
commonPathPrefix = "/common" // prefix for the paths if tenant isn't known
authPath = "/oauth2/v2.0/authorize"
tokenPath = "/oauth2/v2.0/token"
authPath = "/common/oauth2/v2.0/authorize"
tokenPath = "/common/oauth2/v2.0/token"
scopeAccess = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "Sites.Read.All", "offline_access"}
scopeAccessWithoutSites = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"}
// When using client credential OAuth flow, scope of .default is required in order
// to use the permissions configured for the application within the tenant
scopeAccessClientCred = fs.SpaceSepList{".default"}
// Base config for how to auth
oauthConfig = &oauthutil.Config{
// Description of how to auth for this app for a business account
oauthConfig = &oauth2.Config{
Scopes: scopeAccess,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
@@ -131,7 +125,7 @@ func init() {
Help: "Microsoft Cloud for US Government",
}, {
Value: regionDE,
Help: "Microsoft Cloud Germany (deprecated - try " + regionGlobal + " region first).",
Help: "Microsoft Cloud Germany",
}, {
Value: regionCN,
Help: "Azure and Office 365 operated by Vnet Group in China",
@@ -189,14 +183,6 @@ Choose or manually enter a custom space separated list with all scopes, that rcl
Help: "Read and write access to all resources, without the ability to browse SharePoint sites. \nSame as if disable_site_permission was set to true",
},
},
}, {
Name: "tenant",
Help: `ID of the service principal's tenant. Also called its directory ID.
Set this if using
- Client Credential flow
`,
Sensitive: true,
}, {
Name: "disable_site_permission",
Help: `Disable the request for Sites.Read.All permission.
@@ -541,54 +527,28 @@ func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest
})
}
// Make the oauth config for the backend
func makeOauthConfig(ctx context.Context, opt *Options) (*oauthutil.Config, error) {
// Copy the default oauthConfig
oauthConfig := *oauthConfig
// Set the scopes
oauthConfig.Scopes = opt.AccessScopes
if opt.DisableSitePermission {
oauthConfig.Scopes = scopeAccessWithoutSites
}
// Construct the auth URLs
prefix := commonPathPrefix
if opt.Tenant != "" {
prefix = "/" + opt.Tenant
}
oauthConfig.TokenURL = authEndpoint[opt.Region] + prefix + tokenPath
oauthConfig.AuthURL = authEndpoint[opt.Region] + prefix + authPath
// Check to see if we are using client credentials flow
if opt.ClientCredentials {
// Override scope to .default
oauthConfig.Scopes = scopeAccessClientCred
if opt.Tenant == "" {
return nil, fmt.Errorf("tenant parameter must be set when using %s", config.ConfigClientCredentials)
}
}
return &oauthConfig, nil
}
// Config the backend
func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.ConfigIn) (*fs.ConfigOut, error) {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
_, graphURL := getRegionURL(m)
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
region, graphURL := getRegionURL(m)
// Check to see if this is the start of the state machine execution
if conf.State == "" {
conf, err := makeOauthConfig(ctx, opt)
if config.State == "" {
var accessScopes fs.SpaceSepList
accessScopesString, _ := m.Get("access_scopes")
err := accessScopes.Set(accessScopesString)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to parse access_scopes: %w", err)
}
oauthConfig.Scopes = []string(accessScopes)
disableSitePermission, _ := m.Get("disable_site_permission")
if disableSitePermission == "true" {
oauthConfig.Scopes = scopeAccessWithoutSites
}
oauthConfig.Endpoint = oauth2.Endpoint{
AuthURL: authEndpoint[region] + authPath,
TokenURL: authEndpoint[region] + tokenPath,
}
return oauthutil.ConfigOut("choose_type", &oauthutil.Options{
OAuth2Config: conf,
OAuth2Config: oauthConfig,
})
}
@@ -596,11 +556,9 @@ func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.Config
if err != nil {
return nil, fmt.Errorf("failed to configure OneDrive: %w", err)
}
// Create a REST client, build on the OAuth client created above
srv := rest.NewClient(oAuthClient)
switch conf.State {
switch config.State {
case "choose_type":
return fs.ConfigChooseExclusiveFixed("choose_type_done", "config_type", "Type of connection", []fs.OptionExample{{
Value: "onedrive",
@@ -626,7 +584,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.Config
}})
case "choose_type_done":
// Jump to next state according to config chosen
return fs.ConfigGoto(conf.Result)
return fs.ConfigGoto(config.Result)
case "onedrive":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
opts: rest.Opts{
@@ -644,22 +602,16 @@ func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.Config
},
})
case "driveid":
out, err := fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID")
if err != nil {
return out, err
}
// Default the drive_id to the previous version in the config
out.Option.Default, _ = m.Get("drive_id")
return out, nil
return fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID")
case "driveid_end":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
finalDriveID: conf.Result,
finalDriveID: config.Result,
})
case "siteid":
return fs.ConfigInput("siteid_end", "config_siteid", "Site ID")
case "siteid_end":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
siteID: conf.Result,
siteID: config.Result,
})
case "url":
return fs.ConfigInput("url_end", "config_site_url", `Site URL
@@ -670,7 +622,7 @@ Examples:
- "https://XXX.sharepoint.com/teams/ID"
`)
case "url_end":
siteURL := conf.Result
siteURL := config.Result
re := regexp.MustCompile(`https://.*\.sharepoint\.com(/.*)`)
match := re.FindStringSubmatch(siteURL)
if len(match) == 2 {
@@ -685,12 +637,12 @@ Examples:
return fs.ConfigInput("path_end", "config_sharepoint_url", `Server-relative URL`)
case "path_end":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
relativePath: conf.Result,
relativePath: config.Result,
})
case "search":
return fs.ConfigInput("search_end", "config_search_term", `Search term`)
case "search_end":
searchTerm := conf.Result
searchTerm := config.Result
opts := rest.Opts{
Method: "GET",
RootURL: graphURL,
@@ -712,10 +664,10 @@ Examples:
})
case "search_sites":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
siteID: conf.Result,
siteID: config.Result,
})
case "driveid_final":
finalDriveID := conf.Result
finalDriveID := config.Result
// Test the driveID and get drive type
opts := rest.Opts{
@@ -734,12 +686,12 @@ Examples:
return fs.ConfigConfirm("driveid_final_end", true, "config_drive_ok", fmt.Sprintf("Drive OK?\n\nFound drive %q of type %q\nURL: %s\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL))
case "driveid_final_end":
if conf.Result == "true" {
if config.Result == "true" {
return nil, nil
}
return fs.ConfigGoto("choose_type")
}
return nil, fmt.Errorf("unknown state %q", conf.State)
return nil, fmt.Errorf("unknown state %q", config.State)
}
// Options defines the configuration for this backend
@@ -750,9 +702,7 @@ type Options struct {
DriveType string `config:"drive_type"`
RootFolderID string `config:"root_folder_id"`
DisableSitePermission bool `config:"disable_site_permission"`
ClientCredentials bool `config:"client_credentials"`
AccessScopes fs.SpaceSepList `config:"access_scopes"`
Tenant string `config:"tenant"`
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ListChunk int64 `config:"list_chunk"`
@@ -877,7 +827,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
retry = true
fs.Debugf(nil, "HTTP 401: Unable to initialize RPS. Trying again.")
}
case 429, 503: // Too Many Requests, Server Too Busy
case 429: // Too Many Requests.
// see https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online
if values := resp.Header["Retry-After"]; len(values) == 1 && values[0] != "" {
retryAfter, parseErr := strconv.Atoi(values[0])
@@ -992,8 +942,7 @@ func errorHandler(resp *http.Response) error {
// Decode error response
errResponse := new(api.Error)
err := rest.DecodeJSON(resp, &errResponse)
// Redirects have no body so don't report an error
if err != nil && resp.Header.Get("Location") == "" {
if err != nil {
fs.Debugf(nil, "Couldn't decode error response: %v", err)
}
if errResponse.ErrorInfo.Code == "" {
@@ -1040,10 +989,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
rootURL := graphAPIEndpoint[opt.Region] + "/v1.0" + "/drives/" + opt.DriveID
oauthConfig, err := makeOauthConfig(ctx, opt)
if err != nil {
return nil, err
oauthConfig.Scopes = opt.AccessScopes
if opt.DisableSitePermission {
oauthConfig.Scopes = scopeAccessWithoutSites
}
oauthConfig.Endpoint = oauth2.Endpoint{
AuthURL: authEndpoint[opt.Region] + authPath,
TokenURL: authEndpoint[opt.Region] + tokenPath,
}
client := fshttp.NewClient(ctx)
@@ -1592,12 +1544,9 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// Precision return the precision of this Fs
func (f *Fs) Precision() time.Duration {
// While this is true for some OneDrive personal accounts, it
// isn't true for all of them. See #8101 for details
//
// if f.driveType == driveTypePersonal {
// return time.Millisecond
// }
if f.driveType == driveTypePersonal {
return time.Millisecond
}
return time.Second
}
@@ -1656,7 +1605,7 @@ func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
@@ -1671,18 +1620,11 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
return nil, fs.ErrorCantCopy
}
err = srcObj.readMetaData(ctx)
err := srcObj.readMetaData(ctx)
if err != nil {
return nil, err
}
// Find and remove existing object
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
if err != nil {
return nil, err
}
defer cleanup(&err)
// Check we aren't overwriting a file on the same remote
if srcObj.fs == f {
srcPath := srcObj.rootPath()
@@ -1985,7 +1927,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return shareURL, nil
}
const cnvFailMsg = "Don't know how to convert share link to direct link - returning the link as is"
cnvFailMsg := "Don't know how to convert share link to direct link - returning the link as is"
directURL := ""
segments := strings.Split(shareURL, "/")
switch f.driveType {
@@ -2610,11 +2552,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return errors.New("can't upload content to a OneNote file")
}
// Only start the renewer if we have a valid one
if o.fs.tokenRenewer != nil {
o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop()
}
o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop()
size := src.Size()

View File

@@ -4,7 +4,6 @@ import (
"context"
"encoding/json"
"fmt"
"slices"
"testing"
"time"
@@ -17,6 +16,7 @@ import (
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/exp/slices" // replace with slices after go1.21 is the minimum version
)
// go test -timeout 30m -run ^TestIntegration/FsMkdir/FsPutFiles/Internal$ github.com/rclone/rclone/backend/onedrive -remote TestOneDrive:meta -v
@@ -215,11 +215,11 @@ func (f *Fs) TestDirectoryMetadata(t *testing.T, r *fstest.Run) {
compareDirMeta(expectedMeta, actualMeta, false)
// modtime
fstest.AssertTimeEqualWithPrecision(t, newDst.Remote(), t1, newDst.ModTime(ctx), f.Precision())
assert.Equal(t, t1.Truncate(f.Precision()), newDst.ModTime(ctx))
// try changing it and re-check it
newDst, err = operations.SetDirModTime(ctx, f, newDst, "", t2)
assert.NoError(t, err)
fstest.AssertTimeEqualWithPrecision(t, newDst.Remote(), t2, newDst.ModTime(ctx), f.Precision())
assert.Equal(t, t2.Truncate(f.Precision()), newDst.ModTime(ctx))
// ensure that f.DirSetModTime also works
err = f.DirSetModTime(ctx, "subdir", t3)
assert.NoError(t, err)
@@ -227,7 +227,7 @@ func (f *Fs) TestDirectoryMetadata(t *testing.T, r *fstest.Run) {
assert.NoError(t, err)
entries.ForDir(func(dir fs.Directory) {
if dir.Remote() == "subdir" {
fstest.AssertTimeEqualWithPrecision(t, dir.Remote(), t3, dir.ModTime(ctx), f.Precision())
assert.True(t, t3.Truncate(f.Precision()).Equal(dir.ModTime(ctx)), fmt.Sprintf("got %v", dir.ModTime(ctx)))
}
})

View File

@@ -26,10 +26,7 @@ package quickxorhash
// OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
import (
"crypto/subtle"
"hash"
)
import "hash"
const (
// BlockSize is the preferred size for hashing
@@ -51,11 +48,6 @@ func New() hash.Hash {
return &quickXorHash{}
}
// xor dst with src
func xorBytes(dst, src []byte) int {
return subtle.XORBytes(dst, src, dst)
}
// Write (via the embedded io.Writer interface) adds more data to the running hash.
// It never returns an error.
//

View File

@@ -0,0 +1,20 @@
//go:build !go1.20
package quickxorhash
func xorBytes(dst, src []byte) int {
n := len(dst)
if len(src) < n {
n = len(src)
}
if n == 0 {
return 0
}
dst = dst[:n]
//src = src[:n]
src = src[:len(dst)] // remove bounds check in loop
for i := range dst {
dst[i] ^= src[i]
}
return n
}

View File

@@ -0,0 +1,9 @@
//go:build go1.20
package quickxorhash
import "crypto/subtle"
func xorBytes(dst, src []byte) int {
return subtle.XORBytes(dst, src, dst)
}

View File

@@ -404,32 +404,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return dstObj, nil
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
var uInfo usersInfoResponse
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
opts := rest.Opts{
Method: "GET",
Path: "/users/info.json/" + f.session.SessionID,
}
resp, err = f.srv.CallJSON(ctx, &opts, nil, &uInfo)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
}
usage = &fs.Usage{
Used: fs.NewUsageValue(uInfo.StorageUsed),
Total: fs.NewUsageValue(uInfo.MaxStorage * 1024 * 1024), // MaxStorage appears to be in MB
Free: fs.NewUsageValue(uInfo.MaxStorage*1024*1024 - uInfo.StorageUsed),
}
return usage, nil
}
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
@@ -1173,7 +1147,6 @@ var (
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
_ fs.ParentIDer = (*Object)(nil)

View File

@@ -231,10 +231,3 @@ type permissions struct {
type uploadFileChunkReply struct {
TotalWritten int64 `json:"TotalWritten"`
}
// usersInfoResponse describes OpenDrive users/info.json response
type usersInfoResponse struct {
// This response contains many other values but these are the only ones currently in use
StorageUsed int64 `json:"StorageUsed,string"`
MaxStorage int64 `json:"MaxStorage,string"`
}

View File

@@ -58,10 +58,12 @@ func populateSSECustomerKeys(opt *Options) error {
sha256Checksum := base64.StdEncoding.EncodeToString(getSha256(decoded))
if opt.SSECustomerKeySha256 == "" {
opt.SSECustomerKeySha256 = sha256Checksum
} else if opt.SSECustomerKeySha256 != sha256Checksum {
return fmt.Errorf("the computed SHA256 checksum "+
"(%v) of the key doesn't match the config entry sse_customer_key_sha256=(%v)",
sha256Checksum, opt.SSECustomerKeySha256)
} else {
if opt.SSECustomerKeySha256 != sha256Checksum {
return fmt.Errorf("the computed SHA256 checksum "+
"(%v) of the key doesn't match the config entry sse_customer_key_sha256=(%v)",
sha256Checksum, opt.SSECustomerKeySha256)
}
}
if opt.SSECustomerAlgorithm == "" {
opt.SSECustomerAlgorithm = sseDefaultAlgorithm

View File

@@ -22,7 +22,6 @@ import (
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
)
@@ -149,7 +148,7 @@ func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, rea
}
md5sumBinary := m.Sum([]byte{})
w.addMd5(&md5sumBinary, int64(chunkNumber))
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary)
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:])
// Object storage requires 1 <= PartNumber <= 10000
ossPartNumber := chunkNumber + 1
@@ -184,9 +183,6 @@ func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, rea
if ossPartNumber <= 8 {
return shouldRetry(ctx, resp.HTTPResponse(), err)
}
if fserrors.ContextError(ctx, &err) {
return false, err
}
// retry all chunks once have done the first few
return true, err
}
@@ -283,7 +279,7 @@ func (w *objectChunkWriter) addMd5(md5binary *[]byte, chunkNumber int64) {
if extend := end - int64(len(w.md5s)); extend > 0 {
w.md5s = append(w.md5s, make([]byte, extend)...)
}
copy(w.md5s[start:end], (*md5binary))
copy(w.md5s[start:end], (*md5binary)[:])
}
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (ui uploadInfo, err error) {

View File

@@ -106,9 +106,9 @@ func newOptions() []fs.Option {
Sensitive: true,
}, {
Name: "compartment",
Help: "Specify compartment OCID, if you need to list buckets.\n\nList objects works without compartment OCID.",
Help: "Object storage compartment OCID",
Provider: "!no_auth",
Required: false,
Required: true,
Sensitive: true,
}, {
Name: "region",

View File

@@ -109,37 +109,6 @@ type Hashes struct {
SHA256 string `json:"sha256"`
}
// FileTruncateResponse is the response from /file_truncate
type FileTruncateResponse struct {
Error
}
// FileCloseResponse is the response from /file_close
type FileCloseResponse struct {
Error
}
// FileOpenResponse is the response from /file_open
type FileOpenResponse struct {
Error
Fileid int64 `json:"fileid"`
FileDescriptor int64 `json:"fd"`
}
// FileChecksumResponse is the response from /file_checksum
type FileChecksumResponse struct {
Error
MD5 string `json:"md5"`
SHA1 string `json:"sha1"`
SHA256 string `json:"sha256"`
}
// FilePWriteResponse is the response from /file_pwrite
type FilePWriteResponse struct {
Error
Bytes int64 `json:"bytes"`
}
// UploadFileResponse is the response from /uploadfile
type UploadFileResponse struct {
Error

View File

@@ -14,7 +14,6 @@ import (
"net/http"
"net/url"
"path"
"strconv"
"strings"
"time"
@@ -48,10 +47,12 @@ const (
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauthutil.Config{
Scopes: nil,
AuthURL: "https://my.pcloud.com/oauth2/authorize",
// TokenURL: "https://api.pcloud.com/oauth2_token", set by updateTokenURL
oauthConfig = &oauth2.Config{
Scopes: nil,
Endpoint: oauth2.Endpoint{
AuthURL: "https://my.pcloud.com/oauth2/authorize",
// TokenURL: "https://api.pcloud.com/oauth2_token", set by updateTokenURL
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -59,8 +60,8 @@ var (
)
// Update the TokenURL with the actual hostname
func updateTokenURL(oauthConfig *oauthutil.Config, hostname string) {
oauthConfig.TokenURL = "https://" + hostname + "/oauth2_token"
func updateTokenURL(oauthConfig *oauth2.Config, hostname string) {
oauthConfig.Endpoint.TokenURL = "https://" + hostname + "/oauth2_token"
}
// Register with Fs
@@ -77,7 +78,7 @@ func init() {
fs.Errorf(nil, "Failed to read config: %v", err)
}
updateTokenURL(oauthConfig, optc.Hostname)
checkAuth := func(oauthConfig *oauthutil.Config, auth *oauthutil.AuthResult) error {
checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
if auth == nil || auth.Form == nil {
return errors.New("form not found in response")
}
@@ -145,8 +146,7 @@ we have to rely on user password authentication for it.`,
Help: "Your pcloud password.",
IsPassword: true,
Advanced: true,
},
}...),
}}...),
})
}
@@ -161,16 +161,15 @@ type Options struct {
// Fs represents a remote pcloud
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
ts *oauthutil.TokenSource // the token source, used to create new clients
srv *rest.Client // the connection to the server
cleanupSrv *rest.Client // the connection used for the cleanup method
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the server
cleanupSrv *rest.Client // the connection used for the cleanup method
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry
}
// Object describes a pcloud object
@@ -318,7 +317,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
name: name,
root: root,
opt: *opt,
ts: ts,
srv: rest.NewClient(oAuthClient).SetRoot("https://" + opt.Hostname),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
@@ -328,7 +326,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.features = (&fs.Features{
CaseInsensitive: false,
CanHaveEmptyDirectories: true,
PartialUploads: true,
}).Fill(ctx, f)
if !canCleanup {
f.features.CleanUp = nil
@@ -336,7 +333,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.srv.SetErrorHandler(errorHandler)
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), f.ts, func() error {
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "")
return err
})
@@ -378,57 +375,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return f, nil
}
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
//
// It truncates any existing object
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
client, err := f.newSingleConnClient(ctx)
if err != nil {
return nil, fmt.Errorf("create client: %w", err)
}
// init an empty file
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
if err != nil {
return nil, fmt.Errorf("resolve src: %w", err)
}
openResult, err := fileOpenNew(ctx, client, f, directoryID, leaf)
if err != nil {
return nil, fmt.Errorf("open file: %w", err)
}
if _, err := fileClose(ctx, client, f.pacer, openResult.FileDescriptor); err != nil {
return nil, fmt.Errorf("close file: %w", err)
}
writer := &writerAt{
ctx: ctx,
fs: f,
size: size,
remote: remote,
fileID: openResult.Fileid,
}
return writer, nil
}
// Create a new http client, accepting keep-alive headers, limited to single connection.
// Necessary for pcloud fileops API, as it binds the session to the underlying TCP connection.
// File descriptors are only valid within the same connection and auto-closed when the connection is closed,
// hence we need a separate client (with single connection) for each fd to avoid all sorts of errors and race conditions.
func (f *Fs) newSingleConnClient(ctx context.Context) (*rest.Client, error) {
baseClient := fshttp.NewClient(ctx)
baseClient.Transport = fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
t.MaxConnsPerHost = 1
t.DisableKeepAlives = false
})
// Set our own http client in the context
ctx = oauthutil.Context(ctx, baseClient)
// create a new oauth client, reuse the token source
oAuthClient := oauth2.NewClient(ctx, f.ts)
return rest.NewClient(oAuthClient).SetRoot("https://" + f.opt.Hostname), nil
}
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
@@ -1148,42 +1094,9 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
filename, directoryID, err := o.fs.dirCache.FindPath(ctx, o.Remote(), true)
if err != nil {
return err
}
fileID := fileIDtoNumber(o.id)
filename = o.fs.opt.Enc.FromStandardName(filename)
opts := rest.Opts{
Method: "PUT",
Path: "/copyfile",
Parameters: url.Values{},
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
ExtraHeaders: map[string]string{
"Connection": "keep-alive",
},
}
opts.Parameters.Set("fileid", fileID)
opts.Parameters.Set("folderid", dirIDtoNumber(directoryID))
opts.Parameters.Set("toname", filename)
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
opts.Parameters.Set("ctime", strconv.FormatInt(modTime.Unix(), 10))
opts.Parameters.Set("mtime", strconv.FormatInt(modTime.Unix(), 10))
result := &api.ItemResult{}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return fmt.Errorf("update mtime: copyfile: %w", err)
}
if err := o.setMetaData(&result.Metadata); err != nil {
return err
}
return nil
// Pcloud doesn't have a way of doing this so returning this
// error will cause the file to be re-uploaded to set the time.
return fs.ErrorCantSetModTime
}
// Storable returns a boolean showing whether this object storable

View File

@@ -1,261 +0,0 @@
package pcloud
import (
"bytes"
"context"
"crypto/sha1"
"encoding/hex"
"fmt"
"net/url"
"strconv"
"time"
"github.com/rclone/rclone/backend/pcloud/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
// writerAt implements fs.WriterAtCloser, adding the OpenWrtierAt feature to pcloud.
type writerAt struct {
ctx context.Context
fs *Fs
size int64
remote string
fileID int64
}
// Close implements WriterAt.Close.
func (c *writerAt) Close() error {
// Avoiding race conditions: Depending on the tcp connection, there might be
// caching issues when checking the size immediately after write.
// Hence we try avoiding them by checking the resulting size on a different connection.
if c.size < 0 {
// Without knowing the size, we cannot do size checks.
// Falling back to a sleep of 1s for sake of hope.
time.Sleep(1 * time.Second)
return nil
}
sizeOk := false
sizeLastSeen := int64(0)
for retry := 0; retry < 5; retry++ {
fs.Debugf(c.remote, "checking file size: try %d/5", retry)
obj, err := c.fs.NewObject(c.ctx, c.remote)
if err != nil {
return fmt.Errorf("get uploaded obj: %w", err)
}
sizeLastSeen = obj.Size()
if obj.Size() == c.size {
sizeOk = true
break
}
time.Sleep(1 * time.Second)
}
if !sizeOk {
return fmt.Errorf("incorrect size after upload: got %d, want %d", sizeLastSeen, c.size)
}
return nil
}
// WriteAt implements fs.WriteAt.
func (c *writerAt) WriteAt(buffer []byte, offset int64) (n int, err error) {
contentLength := len(buffer)
inSHA1Bytes := sha1.Sum(buffer)
inSHA1 := hex.EncodeToString(inSHA1Bytes[:])
client, err := c.fs.newSingleConnClient(c.ctx)
if err != nil {
return 0, fmt.Errorf("create client: %w", err)
}
openResult, err := fileOpen(c.ctx, client, c.fs, c.fileID)
if err != nil {
return 0, fmt.Errorf("open file: %w", err)
}
// get target hash
outChecksum, err := fileChecksum(c.ctx, client, c.fs.pacer, openResult.FileDescriptor, offset, int64(contentLength))
if err != nil {
return 0, err
}
outSHA1 := outChecksum.SHA1
if outSHA1 == "" || inSHA1 == "" {
return 0, fmt.Errorf("expect both hashes to be filled: src: %q, target: %q", inSHA1, outSHA1)
}
// check hash of buffer, skip if fits
if inSHA1 == outSHA1 {
return contentLength, nil
}
// upload buffer with offset if necessary
if _, err := filePWrite(c.ctx, client, c.fs.pacer, openResult.FileDescriptor, offset, buffer); err != nil {
return 0, err
}
// close fd
if _, err := fileClose(c.ctx, client, c.fs.pacer, openResult.FileDescriptor); err != nil {
return contentLength, fmt.Errorf("close fd: %w", err)
}
return contentLength, nil
}
// Call pcloud file_open using folderid and name with O_CREAT and O_WRITE flags, see [API Doc.]
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_open.html
func fileOpenNew(ctx context.Context, c *rest.Client, srcFs *Fs, directoryID, filename string) (*api.FileOpenResponse, error) {
opts := rest.Opts{
Method: "PUT",
Path: "/file_open",
Parameters: url.Values{},
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
ExtraHeaders: map[string]string{
"Connection": "keep-alive",
},
}
filename = srcFs.opt.Enc.FromStandardName(filename)
opts.Parameters.Set("name", filename)
opts.Parameters.Set("folderid", dirIDtoNumber(directoryID))
opts.Parameters.Set("flags", "0x0042") // O_CREAT, O_WRITE
result := &api.FileOpenResponse{}
err := srcFs.pacer.CallNoRetry(func() (bool, error) {
resp, err := c.CallJSON(ctx, &opts, nil, result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("open new file descriptor: %w", err)
}
return result, nil
}
// Call pcloud file_open using fileid with O_WRITE flags, see [API Doc.]
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_open.html
func fileOpen(ctx context.Context, c *rest.Client, srcFs *Fs, fileID int64) (*api.FileOpenResponse, error) {
opts := rest.Opts{
Method: "PUT",
Path: "/file_open",
Parameters: url.Values{},
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
ExtraHeaders: map[string]string{
"Connection": "keep-alive",
},
}
opts.Parameters.Set("fileid", strconv.FormatInt(fileID, 10))
opts.Parameters.Set("flags", "0x0002") // O_WRITE
result := &api.FileOpenResponse{}
err := srcFs.pacer.CallNoRetry(func() (bool, error) {
resp, err := c.CallJSON(ctx, &opts, nil, result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("open new file descriptor: %w", err)
}
return result, nil
}
// Call pcloud file_checksum, see [API Doc.]
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_checksum.html
func fileChecksum(
ctx context.Context,
client *rest.Client,
pacer *fs.Pacer,
fd, offset, count int64,
) (*api.FileChecksumResponse, error) {
opts := rest.Opts{
Method: "PUT",
Path: "/file_checksum",
Parameters: url.Values{},
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
ExtraHeaders: map[string]string{
"Connection": "keep-alive",
},
}
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
opts.Parameters.Set("count", strconv.FormatInt(count, 10))
result := &api.FileChecksumResponse{}
err := pacer.CallNoRetry(func() (bool, error) {
resp, err := client.CallJSON(ctx, &opts, nil, result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("checksum of fd %d with offset %d and size %d: %w", fd, offset, count, err)
}
return result, nil
}
// Call pcloud file_pwrite, see [API Doc.]
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_pwrite.html
func filePWrite(
ctx context.Context,
client *rest.Client,
pacer *fs.Pacer,
fd int64,
offset int64,
buf []byte,
) (*api.FilePWriteResponse, error) {
contentLength := int64(len(buf))
opts := rest.Opts{
Method: "PUT",
Path: "/file_pwrite",
Body: bytes.NewReader(buf),
ContentLength: &contentLength,
Parameters: url.Values{},
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
Close: false,
ExtraHeaders: map[string]string{
"Connection": "keep-alive",
},
}
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
result := &api.FilePWriteResponse{}
err := pacer.CallNoRetry(func() (bool, error) {
resp, err := client.CallJSON(ctx, &opts, nil, result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("write %d bytes to fd %d with offset %d: %w", contentLength, fd, offset, err)
}
return result, nil
}
// Call pcloud file_close, see [API Doc.]
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_close.html
func fileClose(
ctx context.Context,
client *rest.Client,
pacer *fs.Pacer,
fd int64,
) (*api.FileCloseResponse, error) {
opts := rest.Opts{
Method: "PUT",
Path: "/file_close",
Parameters: url.Values{},
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
Close: true,
}
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
result := &api.FileCloseResponse{}
err := pacer.CallNoRetry(func() (bool, error) {
resp, err := client.CallJSON(ctx, &opts, nil, result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("close file descriptor: %w", err)
}
return result, nil
}

View File

@@ -513,72 +513,6 @@ type RequestDecompress struct {
DefaultParent bool `json:"default_parent,omitempty"`
}
// ------------------------------------------------------------ authorization
// CaptchaToken is a response to requestCaptchaToken api call
type CaptchaToken struct {
CaptchaToken string `json:"captcha_token"`
ExpiresIn int64 `json:"expires_in"` // currently 300s
// API doesn't provide Expiry field and thus it should be populated from ExpiresIn on retrieval
Expiry time.Time `json:"expiry,omitempty"`
URL string `json:"url,omitempty"` // a link for users to solve captcha
}
// expired reports whether the token is expired.
// t must be non-nil.
func (t *CaptchaToken) expired() bool {
if t.Expiry.IsZero() {
return false
}
expiryDelta := time.Duration(10) * time.Second // same as oauth2's defaultExpiryDelta
return t.Expiry.Round(0).Add(-expiryDelta).Before(time.Now())
}
// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
func (t *CaptchaToken) Valid() bool {
return t != nil && t.CaptchaToken != "" && !t.expired()
}
// CaptchaTokenRequest is to request for captcha token
type CaptchaTokenRequest struct {
Action string `json:"action,omitempty"`
CaptchaToken string `json:"captcha_token,omitempty"`
ClientID string `json:"client_id,omitempty"`
DeviceID string `json:"device_id,omitempty"`
Meta *CaptchaTokenMeta `json:"meta,omitempty"`
}
// CaptchaTokenMeta contains meta info for CaptchaTokenRequest
type CaptchaTokenMeta struct {
CaptchaSign string `json:"captcha_sign,omitempty"`
ClientVersion string `json:"client_version,omitempty"`
PackageName string `json:"package_name,omitempty"`
Timestamp string `json:"timestamp,omitempty"`
UserID string `json:"user_id,omitempty"` // webdrive uses this instead of UserName
UserName string `json:"username,omitempty"`
Email string `json:"email,omitempty"`
PhoneNumber string `json:"phone_number,omitempty"`
}
// Token represents oauth2 token used for pikpak which needs to be converted to be compatible with oauth2.Token
type Token struct {
TokenType string `json:"token_type"`
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int `json:"expires_in"`
Sub string `json:"sub"`
}
// Expiry returns expiry from expires in, so it should be called on retrieval
// e must be non-nil.
func (e *Token) Expiry() (t time.Time) {
if v := e.ExpiresIn; v != 0 {
return time.Now().Add(time.Duration(v) * time.Second)
}
return
}
// ------------------------------------------------------------
// NOT implemented YET

View File

@@ -3,10 +3,8 @@ package pikpak
import (
"bytes"
"context"
"crypto/md5"
"crypto/sha1"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
@@ -16,13 +14,10 @@ import (
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/rclone/rclone/backend/pikpak/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/rest"
)
@@ -267,20 +262,15 @@ func (f *Fs) getGcid(ctx context.Context, src fs.ObjectInfo) (gcid string, err e
if err != nil {
return
}
if src.Size() == 0 {
// If src is zero-length, the API will return
// Error "cid and file_size is required" (400)
// In this case, we can simply return cid == gcid
return cid, nil
}
params := url.Values{}
params.Set("cid", cid)
params.Set("file_size", strconv.FormatInt(src.Size(), 10))
opts := rest.Opts{
Method: "GET",
Path: "/drive/v1/resource/cid",
Parameters: params,
Method: "GET",
Path: "/drive/v1/resource/cid",
Parameters: params,
ExtraHeaders: map[string]string{"x-device-id": f.deviceID},
}
info := struct {
@@ -357,7 +347,7 @@ func calcGcid(r io.Reader, size int64) (string, error) {
calcBlockSize := func(j int64) int64 {
var psize int64 = 0x40000
for float64(j)/float64(psize) > 0x200 && psize < 0x200000 {
psize <<= 1
psize = psize << 1
}
return psize
}
@@ -378,23 +368,11 @@ func calcGcid(r io.Reader, size int64) (string, error) {
return hex.EncodeToString(totalHash.Sum(nil)), nil
}
// unWrapObjectInfo returns the underlying Object unwrapped as much as
// possible or nil even if it is an OverrideRemote
func unWrapObjectInfo(oi fs.ObjectInfo) fs.Object {
if o, ok := oi.(fs.Object); ok {
return fs.UnWrapObject(o)
} else if do, ok := oi.(*fs.OverrideRemote); ok {
// Unwrap if it is an operations.OverrideRemote
return do.UnWrap()
}
return nil
}
// calcCid calculates Cid from source
//
// Cid is a simplified version of Gcid
func calcCid(ctx context.Context, src fs.ObjectInfo) (cid string, err error) {
srcObj := unWrapObjectInfo(src)
srcObj := fs.UnWrapObjectInfo(src)
if srcObj == nil {
return "", fmt.Errorf("failed to unwrap object from src: %s", src)
}
@@ -430,8 +408,6 @@ func calcCid(ctx context.Context, src fs.ObjectInfo) (cid string, err error) {
return
}
// ------------------------------------------------------------ authorization
// randomly generates device id used for request header 'x-device-id'
//
// original javascript implementation
@@ -452,206 +428,3 @@ func genDeviceID() string {
}
return string(base)
}
var md5Salt = []string{
"C9qPpZLN8ucRTaTiUMWYS9cQvWOE",
"+r6CQVxjzJV6LCV",
"F",
"pFJRC",
"9WXYIDGrwTCz2OiVlgZa90qpECPD6olt",
"/750aCr4lm/Sly/c",
"RB+DT/gZCrbV",
"",
"CyLsf7hdkIRxRm215hl",
"7xHvLi2tOYP0Y92b",
"ZGTXXxu8E/MIWaEDB+Sm/",
"1UI3",
"E7fP5Pfijd+7K+t6Tg/NhuLq0eEUVChpJSkrKxpO",
"ihtqpG6FMt65+Xk+tWUH2",
"NhXXU9rg4XXdzo7u5o",
}
func md5Sum(text string) string {
hash := md5.Sum([]byte(text))
return hex.EncodeToString(hash[:])
}
func calcCaptchaSign(deviceID string) (timestamp, sign string) {
timestamp = fmt.Sprint(time.Now().UnixMilli())
str := fmt.Sprint(clientID, clientVersion, packageName, deviceID, timestamp)
for _, salt := range md5Salt {
str = md5Sum(str + salt)
}
sign = "1." + str
return
}
func newCaptchaTokenRequest(action, oldToken string, opt *Options) (req *api.CaptchaTokenRequest) {
req = &api.CaptchaTokenRequest{
Action: action,
CaptchaToken: oldToken, // can be empty initially
ClientID: clientID,
DeviceID: opt.DeviceID,
Meta: new(api.CaptchaTokenMeta),
}
switch action {
case "POST:/v1/auth/signin":
req.Meta.UserName = opt.Username
default:
timestamp, captchaSign := calcCaptchaSign(opt.DeviceID)
req.Meta.CaptchaSign = captchaSign
req.Meta.Timestamp = timestamp
req.Meta.ClientVersion = clientVersion
req.Meta.PackageName = packageName
req.Meta.UserID = opt.UserID
}
return
}
// CaptchaTokenSource stores updated captcha tokens in the config file
type CaptchaTokenSource struct {
mu sync.Mutex
m configmap.Mapper
opt *Options
token *api.CaptchaToken
ctx context.Context
rst *pikpakClient
}
// initialize CaptchaTokenSource from rclone.conf if possible
func newCaptchaTokenSource(ctx context.Context, opt *Options, m configmap.Mapper) *CaptchaTokenSource {
token := new(api.CaptchaToken)
tokenString, ok := m.Get("captcha_token")
if !ok || tokenString == "" {
fs.Debugf(nil, "failed to read captcha token out of config file")
} else {
if err := json.Unmarshal([]byte(tokenString), token); err != nil {
fs.Debugf(nil, "failed to parse captcha token out of config file: %v", err)
}
}
return &CaptchaTokenSource{
m: m,
opt: opt,
token: token,
ctx: ctx,
rst: newPikpakClient(getClient(ctx, opt), opt),
}
}
// requestToken retrieves captcha token from API
func (cts *CaptchaTokenSource) requestToken(ctx context.Context, req *api.CaptchaTokenRequest) (err error) {
opts := rest.Opts{
Method: "POST",
RootURL: "https://user.mypikpak.com/v1/shield/captcha/init",
}
var info *api.CaptchaToken
_, err = cts.rst.CallJSON(ctx, &opts, &req, &info)
if err == nil && info.ExpiresIn != 0 {
// populate to Expiry
info.Expiry = time.Now().Add(time.Duration(info.ExpiresIn) * time.Second)
cts.token = info // update with a new one
}
return
}
func (cts *CaptchaTokenSource) refreshToken(opts *rest.Opts) (string, error) {
oldToken := ""
if cts.token != nil {
oldToken = cts.token.CaptchaToken
}
action := "GET:/drive/v1/about"
if opts.RootURL == "" && opts.Path != "" {
action = fmt.Sprintf("%s:%s", opts.Method, opts.Path)
} else if u, err := url.Parse(opts.RootURL); err == nil {
action = fmt.Sprintf("%s:%s", opts.Method, u.Path)
}
req := newCaptchaTokenRequest(action, oldToken, cts.opt)
if err := cts.requestToken(cts.ctx, req); err != nil {
return "", fmt.Errorf("failed to retrieve captcha token from api: %w", err)
}
// put it into rclone.conf
tokenBytes, err := json.Marshal(cts.token)
if err != nil {
return "", fmt.Errorf("failed to marshal captcha token: %w", err)
}
cts.m.Set("captcha_token", string(tokenBytes))
return cts.token.CaptchaToken, nil
}
// Invalidate resets existing captcha token for a forced refresh
func (cts *CaptchaTokenSource) Invalidate() {
cts.mu.Lock()
cts.token.CaptchaToken = ""
cts.mu.Unlock()
}
// Token returns a valid captcha token
func (cts *CaptchaTokenSource) Token(opts *rest.Opts) (string, error) {
cts.mu.Lock()
defer cts.mu.Unlock()
if cts.token.Valid() {
return cts.token.CaptchaToken, nil
}
return cts.refreshToken(opts)
}
// pikpakClient wraps rest.Client with a handle of captcha token
type pikpakClient struct {
opt *Options
client *rest.Client
captcha *CaptchaTokenSource
}
// newPikpakClient takes an (oauth) http.Client and makes a new api instance for pikpak with
// * error handler
// * root url
// * default headers
func newPikpakClient(c *http.Client, opt *Options) *pikpakClient {
client := rest.NewClient(c).SetErrorHandler(errorHandler).SetRoot(rootURL)
for key, val := range map[string]string{
"Referer": "https://mypikpak.com/",
"x-client-id": clientID,
"x-client-version": clientVersion,
"x-device-id": opt.DeviceID,
// "x-device-model": "firefox%2F129.0",
// "x-device-name": "PC-Firefox",
// "x-device-sign": fmt.Sprintf("wdi10.%sxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", opt.DeviceID),
// "x-net-work-type": "NONE",
// "x-os-version": "Win32",
// "x-platform-version": "1",
// "x-protocol-version": "301",
// "x-provider-name": "NONE",
// "x-sdk-version": "8.0.3",
} {
client.SetHeader(key, val)
}
return &pikpakClient{
client: client,
opt: opt,
}
}
// This should be called right after pikpakClient initialized
func (c *pikpakClient) SetCaptchaTokener(ctx context.Context, m configmap.Mapper) *pikpakClient {
c.captcha = newCaptchaTokenSource(ctx, c.opt, m)
return c
}
func (c *pikpakClient) CallJSON(ctx context.Context, opts *rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
if c.captcha != nil {
token, err := c.captcha.Token(opts)
if err != nil || token == "" {
return nil, fserrors.FatalError(fmt.Errorf("couldn't get captcha token: %v", err))
}
if opts.ExtraHeaders == nil {
opts.ExtraHeaders = make(map[string]string)
}
opts.ExtraHeaders["x-captcha-token"] = token
}
return c.client.CallJSON(ctx, opts, request, response)
}
func (c *pikpakClient) Call(ctx context.Context, opts *rest.Opts) (resp *http.Response, err error) {
return c.client.Call(ctx, opts)
}

View File

@@ -23,7 +23,6 @@ package pikpak
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
@@ -38,11 +37,10 @@ import (
"sync"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
awsconfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/rclone/rclone/backend/pikpak/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
@@ -52,7 +50,6 @@ import (
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/dircache"
@@ -66,74 +63,64 @@ import (
// Constants
const (
clientID = "YUMx5nI8ZU8Ap8pm"
clientVersion = "2.0.0"
packageName = "mypikpak.com"
defaultUserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0"
minSleep = 100 * time.Millisecond
maxSleep = 2 * time.Second
taskWaitTime = 500 * time.Millisecond
decayConstant = 2 // bigger for slower decay, exponential
rootURL = "https://api-drive.mypikpak.com"
minChunkSize = fs.SizeSuffix(manager.MinUploadPartSize)
defaultUploadConcurrency = manager.DefaultUploadConcurrency
rcloneClientID = "YNxT9w7GMdWvEOKa"
rcloneEncryptedClientSecret = "aqrmB6M1YJ1DWCBxVxFSjFo7wzWEky494YMmkqgAl1do1WKOe2E"
minSleep = 100 * time.Millisecond
maxSleep = 2 * time.Second
taskWaitTime = 500 * time.Millisecond
decayConstant = 2 // bigger for slower decay, exponential
rootURL = "https://api-drive.mypikpak.com"
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
defaultUploadConcurrency = s3manager.DefaultUploadConcurrency
)
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauthutil.Config{
Scopes: nil,
AuthURL: "https://user.mypikpak.com/v1/auth/signin",
TokenURL: "https://user.mypikpak.com/v1/auth/token",
AuthStyle: oauth2.AuthStyleInParams,
ClientID: clientID,
RedirectURL: oauthutil.RedirectURL,
oauthConfig = &oauth2.Config{
Scopes: nil,
Endpoint: oauth2.Endpoint{
AuthURL: "https://user.mypikpak.com/v1/auth/signin",
TokenURL: "https://user.mypikpak.com/v1/auth/token",
AuthStyle: oauth2.AuthStyleInParams,
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
}
)
// Returns OAuthOptions modified for pikpak
func pikpakOAuthOptions() []fs.Option {
opts := []fs.Option{}
for _, opt := range oauthutil.SharedOptions {
if opt.Name == config.ConfigClientID {
opt.Advanced = true
} else if opt.Name == config.ConfigClientSecret {
opt.Advanced = true
}
opts = append(opts, opt)
}
return opts
}
// pikpakAutorize retrieves OAuth token using user/pass and save it to rclone.conf
func pikpakAuthorize(ctx context.Context, opt *Options, name string, m configmap.Mapper) error {
if opt.Username == "" {
return errors.New("no username")
// override default client id/secret
if id, ok := m.Get("client_id"); ok && id != "" {
oauthConfig.ClientID = id
}
if secret, ok := m.Get("client_secret"); ok && secret != "" {
oauthConfig.ClientSecret = secret
}
pass, err := obscure.Reveal(opt.Password)
if err != nil {
return fmt.Errorf("failed to decode password - did you obscure it?: %w", err)
}
// new device id if necessary
if len(opt.DeviceID) != 32 {
opt.DeviceID = genDeviceID()
m.Set("device_id", opt.DeviceID)
fs.Infof(nil, "Using new device id %q", opt.DeviceID)
}
opts := rest.Opts{
Method: "POST",
RootURL: "https://user.mypikpak.com/v1/auth/signin",
}
req := map[string]string{
"username": opt.Username,
"password": pass,
"client_id": clientID,
}
var token api.Token
rst := newPikpakClient(getClient(ctx, opt), opt).SetCaptchaTokener(ctx, m)
_, err = rst.CallJSON(ctx, &opts, req, &token)
if apiErr, ok := err.(*api.Error); ok {
if apiErr.Reason == "captcha_invalid" && apiErr.Code == 4002 {
rst.captcha.Invalidate()
_, err = rst.CallJSON(ctx, &opts, req, &token)
}
}
t, err := oauthConfig.PasswordCredentialsToken(ctx, opt.Username, pass)
if err != nil {
return fmt.Errorf("failed to retrieve token using username/password: %w", err)
}
t := &oauth2.Token{
AccessToken: token.AccessToken,
TokenType: token.TokenType,
RefreshToken: token.RefreshToken,
Expiry: token.Expiry(),
}
return oauthutil.PutToken(name, m, t, false)
}
@@ -172,7 +159,7 @@ func init() {
}
return nil, fmt.Errorf("unknown state %q", config.State)
},
Options: []fs.Option{{
Options: append(pikpakOAuthOptions(), []fs.Option{{
Name: "user",
Help: "Pikpak username.",
Required: true,
@@ -182,18 +169,6 @@ func init() {
Help: "Pikpak password.",
Required: true,
IsPassword: true,
}, {
Name: "device_id",
Help: "Device ID used for authorization.",
Advanced: true,
Sensitive: true,
}, {
Name: "user_agent",
Default: defaultUserAgent,
Advanced: true,
Help: fmt.Sprintf(`HTTP user agent for pikpak.
Defaults to "%s" or "--pikpak-user-agent" provided on command line.`, defaultUserAgent),
}, {
Name: "root_folder_id",
Help: `ID of the root folder.
@@ -213,11 +188,6 @@ Fill in for rclone to use a non root folder as its starting point.
Default: false,
Help: "Only show files that are in the trash.\n\nThis will show trashed files in their original directory structure.",
Advanced: true,
}, {
Name: "no_media_link",
Default: false,
Help: "Use original file links instead of media links.\n\nThis avoids issues caused by invalid media links, but may reduce download speeds.",
Advanced: true,
}, {
Name: "hash_memory_limit",
Help: "Files bigger than this will be cached on disk to calculate hash if required.",
@@ -277,7 +247,7 @@ this may help to speed up the transfers.`,
encoder.EncodeRightSpace |
encoder.EncodeRightPeriod |
encoder.EncodeInvalidUtf8),
}},
}}...),
})
}
@@ -285,13 +255,9 @@ this may help to speed up the transfers.`,
type Options struct {
Username string `config:"user"`
Password string `config:"pass"`
UserID string `config:"user_id"` // only available during runtime
DeviceID string `config:"device_id"`
UserAgent string `config:"user_agent"`
RootFolderID string `config:"root_folder_id"`
UseTrash bool `config:"use_trash"`
TrashedOnly bool `config:"trashed_only"`
NoMediaLink bool `config:"no_media_link"`
HashMemoryThreshold fs.SizeSuffix `config:"hash_memory_limit"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
@@ -304,10 +270,11 @@ type Fs struct {
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
rst *pikpakClient // the connection to the server
rst *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
rootFolderID string // the id of the root folder
deviceID string // device id used for api requests
client *http.Client // authorized client
m configmap.Mapper
tokenMu *sync.Mutex // when renewing tokens
@@ -461,12 +428,6 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
} else if apiErr.Reason == "file_space_not_enough" {
// "file_space_not_enough" (8): Storage space is not enough
return false, fserrors.FatalError(err)
} else if apiErr.Reason == "captcha_invalid" && apiErr.Code == 9 {
// "captcha_invalid" (9): Verification code is invalid
// This error occurred on the POST:/drive/v1/files endpoint
// when a zero-byte file was uploaded with an invalid captcha token
f.rst.captcha.Invalidate()
return true, err
}
}
@@ -490,36 +451,13 @@ func errorHandler(resp *http.Response) error {
return errResponse
}
// getClient makes an http client according to the options
func getClient(ctx context.Context, opt *Options) *http.Client {
// Override few config settings and create a client
newCtx, ci := fs.AddConfig(ctx)
ci.UserAgent = opt.UserAgent
return fshttp.NewClient(newCtx)
}
// newClientWithPacer sets a new http/rest client with a pacer to Fs
func (f *Fs) newClientWithPacer(ctx context.Context) (err error) {
var ts *oauthutil.TokenSource
f.client, ts, err = oauthutil.NewClientWithBaseClient(ctx, f.name, f.m, oauthConfig, getClient(ctx, &f.opt))
f.client, _, err = oauthutil.NewClient(ctx, f.name, f.m, oauthConfig)
if err != nil {
return fmt.Errorf("failed to create oauth client: %w", err)
}
token, err := ts.Token()
if err != nil {
return err
}
// parse user_id from oauth access token for later use
if parts := strings.Split(token.AccessToken, "."); len(parts) > 1 {
jsonStr, _ := base64.URLEncoding.DecodeString(parts[1] + "===")
info := struct {
UserID string `json:"sub,omitempty"`
}{}
if jsonErr := json.Unmarshal(jsonStr, &info); jsonErr == nil {
f.opt.UserID = info.UserID
}
}
f.rst = newPikpakClient(f.client, &f.opt).SetCaptchaTokener(ctx, f.m)
f.rst = rest.NewClient(f.client).SetRoot(rootURL).SetErrorHandler(errorHandler)
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
return nil
}
@@ -552,19 +490,9 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
CanHaveEmptyDirectories: true, // can have empty directories
NoMultiThreading: true, // can't have multiple threads downloading
}).Fill(ctx, f)
// new device id if necessary
if len(f.opt.DeviceID) != 32 {
f.opt.DeviceID = genDeviceID()
m.Set("device_id", f.opt.DeviceID)
fs.Infof(nil, "Using new device id %q", f.opt.DeviceID)
}
f.deviceID = genDeviceID()
if err := f.newClientWithPacer(ctx); err != nil {
// re-authorize if necessary
if strings.Contains(err.Error(), "invalid_grant") {
return f, f.reAuthorize(ctx)
}
return nil, err
}
@@ -1262,33 +1190,32 @@ func (f *Fs) uploadByForm(ctx context.Context, in io.Reader, name string, size i
func (f *Fs) uploadByResumable(ctx context.Context, in io.Reader, name string, size int64, resumable *api.Resumable) (err error) {
p := resumable.Params
endpoint := strings.Join(strings.Split(p.Endpoint, ".")[1:], ".") // "mypikpak.com"
// Create a credentials provider
creds := credentials.NewStaticCredentialsProvider(p.AccessKeyID, p.AccessKeySecret, p.SecurityToken)
cfg, err := awsconfig.LoadDefaultConfig(ctx,
awsconfig.WithCredentialsProvider(creds),
awsconfig.WithRegion("pikpak"))
cfg := &aws.Config{
Credentials: credentials.NewStaticCredentials(p.AccessKeyID, p.AccessKeySecret, p.SecurityToken),
Region: aws.String("pikpak"),
Endpoint: &endpoint,
}
sess, err := session.NewSession(cfg)
if err != nil {
return
}
partSize := chunksize.Calculator(name, size, s3manager.MaxUploadParts, f.opt.ChunkSize)
client := s3.NewFromConfig(cfg, func(o *s3.Options) {
o.BaseEndpoint = aws.String("https://mypikpak.com/")
})
partSize := chunksize.Calculator(name, size, int(manager.MaxUploadParts), f.opt.ChunkSize)
// Create an uploader with custom options
uploader := manager.NewUploader(client, func(u *manager.Uploader) {
// Create an uploader with the session and custom options
uploader := s3manager.NewUploader(sess, func(u *s3manager.Uploader) {
u.PartSize = int64(partSize)
u.Concurrency = f.opt.UploadConcurrency
})
// Perform an upload
_, err = uploader.Upload(ctx, &s3.PutObjectInput{
// Upload input parameters
uParams := &s3manager.UploadInput{
Bucket: &p.Bucket,
Key: &p.Key,
Body: in,
})
}
// Perform an upload
_, err = uploader.UploadWithContext(ctx, uParams)
return
}
@@ -1321,12 +1248,6 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, gcid string,
return nil, fmt.Errorf("invalid response: %+v", new)
} else if new.File.Phase == api.PhaseTypeComplete {
// early return; in case of zero-byte objects
if acc, ok := in.(*accounting.Account); ok && acc != nil {
// if `in io.Reader` is still in type of `*accounting.Account` (meaning that it is unused)
// it is considered as a server side copy as no incoming/outgoing traffic occur at all
acc.ServerSideTransferStart()
acc.ServerSideCopyEnd(size)
}
return new.File, nil
}
@@ -1581,14 +1502,15 @@ func (o *Object) setMetaData(info *api.File) (err error) {
o.md5sum = info.Md5Checksum
if info.Links.ApplicationOctetStream != nil {
o.link = info.Links.ApplicationOctetStream
if !o.fs.opt.NoMediaLink {
if fid := parseFileID(o.link.URL); fid != "" {
for _, media := range info.Medias {
if media.Link != nil && parseFileID(media.Link.URL) == fid {
fs.Debugf(o, "Using a media link")
o.link = media.Link
break
}
if fid := parseFileID(o.link.URL); fid != "" {
for mid, media := range info.Medias {
if media.Link == nil {
continue
}
if mfid := parseFileID(media.Link.URL); fid == mfid {
fs.Debugf(o, "Using a media link from Medias[%d]", mid)
o.link = media.Link
break
}
}
}
@@ -1777,7 +1699,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, src fs.ObjectInfo, wi
gcid, err := o.fs.getGcid(ctx, src)
if err != nil || gcid == "" {
fs.Debugf(o, "calculating gcid: %v", err)
if srcObj := unWrapObjectInfo(src); srcObj != nil && srcObj.Fs().Features().IsLocal {
if srcObj := fs.UnWrapObjectInfo(src); srcObj != nil && srcObj.Fs().Features().IsLocal {
// No buffering; directly calculate gcid from source
rc, err := srcObj.Open(ctx)
if err != nil {
@@ -1789,12 +1711,18 @@ func (o *Object) upload(ctx context.Context, in io.Reader, src fs.ObjectInfo, wi
return fmt.Errorf("failed to calculate gcid: %w", err)
}
} else {
// unwrap the accounting from the input, we use wrap to put it
// back on after the buffering
var wrap accounting.WrapFn
in, wrap = accounting.UnWrap(in)
var cleanup func()
gcid, in, cleanup, err = readGcid(in, size, int64(o.fs.opt.HashMemoryThreshold))
defer cleanup()
if err != nil {
return fmt.Errorf("failed to calculate gcid: %w", err)
}
// Wrap the accounting back onto the stream
in = wrap(in)
}
}
fs.Debugf(o, "gcid = %s", gcid)

View File

@@ -1,397 +0,0 @@
package pixeldrain
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/rest"
)
// FilesystemPath is the object which is returned from the pixeldrain API when
// running the stat command on a path. It includes the node information for all
// the members of the path and for all the children of the requested directory.
type FilesystemPath struct {
Path []FilesystemNode `json:"path"`
BaseIndex int `json:"base_index"`
Children []FilesystemNode `json:"children"`
}
// Base returns the base node of the path, this is the node that the path points
// to
func (fsp *FilesystemPath) Base() FilesystemNode {
return fsp.Path[fsp.BaseIndex]
}
// FilesystemNode is a single node in the pixeldrain filesystem. Usually part of
// a Path or Children slice. The Node is also returned as response from update
// commands, if requested
type FilesystemNode struct {
Type string `json:"type"`
Path string `json:"path"`
Name string `json:"name"`
Created time.Time `json:"created"`
Modified time.Time `json:"modified"`
ModeOctal string `json:"mode_octal"`
// File params
FileSize int64 `json:"file_size"`
FileType string `json:"file_type"`
SHA256Sum string `json:"sha256_sum"`
// ID is only filled in when the file/directory is publicly shared
ID string `json:"id,omitempty"`
}
// ChangeLog is a log of changes that happened in a filesystem. Changes returned
// from the API are on chronological order from old to new. A change log can be
// requested for any directory or file, but change logging needs to be enabled
// with the update API before any log entries will be made. Changes are logged
// for 24 hours after logging was enabled. Each time a change log is requested
// the timer is reset to 24 hours.
type ChangeLog []ChangeLogEntry
// ChangeLogEntry is a single entry in a directory's change log. It contains the
// time at which the change occurred. The path relative to the requested
// directory and the action that was performend (update, move or delete). In
// case of a move operation the new path of the file is stored in the path_new
// field
type ChangeLogEntry struct {
Time time.Time `json:"time"`
Path string `json:"path"`
PathNew string `json:"path_new"`
Action string `json:"action"`
Type string `json:"type"`
}
// UserInfo contains information about the logged in user
type UserInfo struct {
Username string `json:"username"`
Subscription SubscriptionType `json:"subscription"`
StorageSpaceUsed int64 `json:"storage_space_used"`
}
// SubscriptionType contains information about a subscription type. It's not the
// active subscription itself, only the properties of the subscription. Like the
// perks and cost
type SubscriptionType struct {
Name string `json:"name"`
StorageSpace int64 `json:"storage_space"`
}
// APIError is the error type returned by the pixeldrain API
type APIError struct {
StatusCode string `json:"value"`
Message string `json:"message"`
}
func (e APIError) Error() string { return e.StatusCode }
// Generalized errors which are caught in our own handlers and translated to
// more specific errors from the fs package.
var (
errNotFound = errors.New("pd api: path not found")
errExists = errors.New("pd api: node already exists")
errAuthenticationFailed = errors.New("pd api: authentication failed")
)
func apiErrorHandler(resp *http.Response) (err error) {
var e APIError
if err = json.NewDecoder(resp.Body).Decode(&e); err != nil {
return fmt.Errorf("failed to parse error json: %w", err)
}
// We close the body here so that the API handlers can be sure that the
// response body is not still open when an error was returned
if err = resp.Body.Close(); err != nil {
return fmt.Errorf("failed to close resp body: %w", err)
}
if e.StatusCode == "path_not_found" {
return errNotFound
} else if e.StatusCode == "directory_not_empty" {
return fs.ErrorDirectoryNotEmpty
} else if e.StatusCode == "node_already_exists" {
return errExists
} else if e.StatusCode == "authentication_failed" {
return errAuthenticationFailed
} else if e.StatusCode == "permission_denied" {
return fs.ErrorPermissionDenied
}
return e
}
var retryErrorCodes = []int{
429, // Too Many Requests.
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
}
// shouldRetry returns a boolean as to whether this resp and err deserve to be
// retried. It returns the err as a convenience so it can be used as the return
// value in the pacer function
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// paramsFromMetadata turns the fs.Metadata into instructions the pixeldrain API
// can understand.
func paramsFromMetadata(meta fs.Metadata) (params url.Values) {
params = make(url.Values)
if modified, ok := meta["mtime"]; ok {
params.Set("modified", modified)
}
if created, ok := meta["btime"]; ok {
params.Set("created", created)
}
if mode, ok := meta["mode"]; ok {
params.Set("mode", mode)
}
if shared, ok := meta["shared"]; ok {
params.Set("shared", shared)
}
if loggingEnabled, ok := meta["logging_enabled"]; ok {
params.Set("logging_enabled", loggingEnabled)
}
return params
}
// nodeToObject converts a single FilesystemNode API response to an object. The
// node is usually a single element from a directory listing
func (f *Fs) nodeToObject(node FilesystemNode) (o *Object) {
// Trim the path prefix. The path prefix is hidden from rclone during all
// operations. Saving it here would confuse rclone a lot. So instead we
// strip it here and add it back for every API request we need to perform
node.Path = strings.TrimPrefix(node.Path, f.pathPrefix)
return &Object{fs: f, base: node}
}
func (f *Fs) nodeToDirectory(node FilesystemNode) fs.DirEntry {
return fs.NewDir(strings.TrimPrefix(node.Path, f.pathPrefix), node.Modified).SetID(node.ID)
}
func (f *Fs) escapePath(p string) (out string) {
// Add the path prefix, encode all the parts and combine them together
var parts = strings.Split(f.pathPrefix+p, "/")
for i := range parts {
parts[i] = url.PathEscape(parts[i])
}
return strings.Join(parts, "/")
}
func (f *Fs) put(
ctx context.Context,
path string,
body io.Reader,
meta fs.Metadata,
options []fs.OpenOption,
) (node FilesystemNode, err error) {
var params = paramsFromMetadata(meta)
// Tell the server to automatically create parent directories if they don't
// exist yet
params.Set("make_parents", "true")
return node, f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(
ctx,
&rest.Opts{
Method: "PUT",
Path: f.escapePath(path),
Body: body,
Parameters: params,
Options: options,
},
nil,
&node,
)
return shouldRetry(ctx, resp, err)
})
}
func (f *Fs) read(ctx context.Context, path string, options []fs.OpenOption) (in io.ReadCloser, err error) {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &rest.Opts{
Method: "GET",
Path: f.escapePath(path),
Options: options,
})
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
}
return resp.Body, err
}
func (f *Fs) stat(ctx context.Context, path string) (fsp FilesystemPath, err error) {
return fsp, f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(
ctx,
&rest.Opts{
Method: "GET",
Path: f.escapePath(path),
// To receive node info from the pixeldrain API you need to add the
// ?stat query. Without it pixeldrain will return the file contents
// in the URL points to a file
Parameters: url.Values{"stat": []string{""}},
},
nil,
&fsp,
)
return shouldRetry(ctx, resp, err)
})
}
func (f *Fs) changeLog(ctx context.Context, start, end time.Time) (changeLog ChangeLog, err error) {
return changeLog, f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(
ctx,
&rest.Opts{
Method: "GET",
Path: f.escapePath(""),
Parameters: url.Values{
"change_log": []string{""},
"start": []string{start.Format(time.RFC3339Nano)},
"end": []string{end.Format(time.RFC3339Nano)},
},
},
nil,
&changeLog,
)
return shouldRetry(ctx, resp, err)
})
}
func (f *Fs) update(ctx context.Context, path string, fields fs.Metadata) (node FilesystemNode, err error) {
var params = paramsFromMetadata(fields)
params.Set("action", "update")
return node, f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(
ctx,
&rest.Opts{
Method: "POST",
Path: f.escapePath(path),
MultipartParams: params,
},
nil,
&node,
)
return shouldRetry(ctx, resp, err)
})
}
func (f *Fs) mkdir(ctx context.Context, dir string) (err error) {
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(
ctx,
&rest.Opts{
Method: "POST",
Path: f.escapePath(dir),
MultipartParams: url.Values{"action": []string{"mkdirall"}},
NoResponse: true,
},
nil,
nil,
)
return shouldRetry(ctx, resp, err)
})
}
var errIncompatibleSourceFS = errors.New("source filesystem is not the same as target")
// Renames a file on the server side. Can be used for both directories and files
func (f *Fs) rename(ctx context.Context, src fs.Fs, from, to string, meta fs.Metadata) (node FilesystemNode, err error) {
srcFs, ok := src.(*Fs)
if !ok {
// This is not a pixeldrain FS, can't move
return node, errIncompatibleSourceFS
} else if srcFs.opt.RootFolderID != f.opt.RootFolderID {
// Path is not in the same root dir, can't move
return node, errIncompatibleSourceFS
}
var params = paramsFromMetadata(meta)
params.Set("action", "rename")
// The target is always in our own filesystem so here we use our
// own pathPrefix
params.Set("target", f.pathPrefix+to)
// Create parent directories if the parent directory of the file
// does not exist yet
params.Set("make_parents", "true")
return node, f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(
ctx,
&rest.Opts{
Method: "POST",
// Important: We use the source FS path prefix here
Path: srcFs.escapePath(from),
MultipartParams: params,
},
nil,
&node,
)
return shouldRetry(ctx, resp, err)
})
}
func (f *Fs) delete(ctx context.Context, path string, recursive bool) (err error) {
var params url.Values
if recursive {
// Tell the server to recursively delete all child files
params = url.Values{"recursive": []string{"true"}}
}
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(
ctx,
&rest.Opts{
Method: "DELETE",
Path: f.escapePath(path),
Parameters: params,
NoResponse: true,
},
nil, nil,
)
return shouldRetry(ctx, resp, err)
})
}
func (f *Fs) userInfo(ctx context.Context) (user UserInfo, err error) {
return user, f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(
ctx,
&rest.Opts{
Method: "GET",
// The default RootURL points at the filesystem endpoint. We can't
// use that to request user information. So here we override it to
// the user endpoint
RootURL: f.opt.APIURL + "/user",
},
nil,
&user,
)
return shouldRetry(ctx, resp, err)
})
}

View File

@@ -1,567 +0,0 @@
// Package pixeldrain provides an interface to the Pixeldrain object storage
// system.
package pixeldrain
import (
"context"
"errors"
"fmt"
"io"
"path"
"strconv"
"strings"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
const (
timeFormat = time.RFC3339Nano
minSleep = pacer.MinSleep(10 * time.Millisecond)
maxSleep = pacer.MaxSleep(1 * time.Second)
decayConstant = pacer.DecayConstant(2) // bigger for slower decay, exponential
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "pixeldrain",
Description: "Pixeldrain Filesystem",
NewFs: NewFs,
Config: nil,
Options: []fs.Option{{
Name: "api_key",
Help: "API key for your pixeldrain account.\n" +
"Found on https://pixeldrain.com/user/api_keys.",
Sensitive: true,
}, {
Name: "root_folder_id",
Help: "Root of the filesystem to use.\n\n" +
"Set to 'me' to use your personal filesystem. " +
"Set to a shared directory ID to use a shared directory.",
Default: "me",
}, {
Name: "api_url",
Help: "The API endpoint to connect to. In the vast majority of cases it's fine to leave\n" +
"this at default. It is only intended to be changed for testing purposes.",
Default: "https://pixeldrain.com/api",
Advanced: true,
Required: true,
}},
MetadataInfo: &fs.MetadataInfo{
System: map[string]fs.MetadataHelp{
"mode": {
Help: "File mode",
Type: "octal, unix style",
Example: "755",
},
"mtime": {
Help: "Time of last modification",
Type: "RFC 3339",
Example: timeFormat,
},
"btime": {
Help: "Time of file birth (creation)",
Type: "RFC 3339",
Example: timeFormat,
},
},
Help: "Pixeldrain supports file modes and creation times.",
},
})
}
// Options defines the configuration for this backend
type Options struct {
APIKey string `config:"api_key"`
RootFolderID string `config:"root_folder_id"`
APIURL string `config:"api_url"`
}
// Fs represents a remote box
type Fs struct {
name string // name of this remote, as given to NewFS
root string // the path we are working on, as given to NewFS
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the server
pacer *fs.Pacer
loggedIn bool // if the user is authenticated
// Pathprefix is the directory we're working in. The pathPrefix is stripped
// from every API response containing a path. The pathPrefix always begins
// and ends with a slash for concatenation convenience
pathPrefix string
}
// Object describes a pixeldrain file
type Object struct {
fs *Fs // what this object is part of
base FilesystemNode // the node this object references
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(fshttp.NewClient(ctx)).SetErrorHandler(apiErrorHandler),
pacer: fs.NewPacer(ctx, pacer.NewDefault(minSleep, maxSleep, decayConstant)),
}
f.features = (&fs.Features{
ReadMimeType: true,
CanHaveEmptyDirectories: true,
ReadMetadata: true,
WriteMetadata: true,
}).Fill(ctx, f)
// Set the path prefix. This is the path to the root directory on the
// server. We add it to each request and strip it from each response because
// rclone does not want to see it
f.pathPrefix = "/" + path.Join(opt.RootFolderID, f.root) + "/"
// The root URL equates to https://pixeldrain.com/api/filesystem during
// normal operation. API handlers need to manually add the pathPrefix to
// each request
f.srv.SetRoot(opt.APIURL + "/filesystem")
// If using an APIKey, set the Authorization header
if len(opt.APIKey) > 0 {
f.srv.SetUserPass("", opt.APIKey)
// Check if credentials are correct
user, err := f.userInfo(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get user data: %w", err)
}
f.loggedIn = true
fs.Infof(f,
"Logged in as '%s', subscription '%s', storage limit %d",
user.Username, user.Subscription.Name, user.Subscription.StorageSpace,
)
}
if !f.loggedIn && opt.RootFolderID == "me" {
return nil, errors.New("authentication required: the 'me' directory can only be accessed while logged in")
}
// Satisfy TestFsIsFile. This test expects that we throw an error if the
// filesystem root is a file
fsp, err := f.stat(ctx, "")
if err != errNotFound && err != nil {
// It doesn't matter if the root directory does not exist, as long as it
// is not a file. This is what the test dictates
return f, err
} else if err == nil && fsp.Base().Type == "file" {
// The filesystem root is a file, rclone wants us to set the root to the
// parent directory
f.root = path.Dir(f.root)
f.pathPrefix = "/" + path.Join(opt.RootFolderID, f.root) + "/"
return f, fs.ErrorIsFile
}
return f, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
fsp, err := f.stat(ctx, dir)
if err == errNotFound {
return nil, fs.ErrorDirNotFound
} else if err != nil {
return nil, err
} else if fsp.Base().Type == "file" {
return nil, fs.ErrorIsFile
}
entries = make(fs.DirEntries, len(fsp.Children))
for i := range fsp.Children {
if fsp.Children[i].Type == "dir" {
entries[i] = f.nodeToDirectory(fsp.Children[i])
} else {
entries[i] = f.nodeToObject(fsp.Children[i])
}
}
return entries, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
fsp, err := f.stat(ctx, remote)
if err == errNotFound {
return nil, fs.ErrorObjectNotFound
} else if err != nil {
return nil, err
} else if fsp.Base().Type == "dir" {
return nil, fs.ErrorIsDir
}
return f.nodeToObject(fsp.Base()), nil
}
// Put the object
//
// Copy the reader in to the new object which is returned.
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
if err != nil {
return nil, fmt.Errorf("failed to get object metadata")
}
// Overwrite the mtime if it was not already set in the metadata
if _, ok := meta["mtime"]; !ok {
if meta == nil {
meta = make(fs.Metadata)
}
meta["mtime"] = src.ModTime(ctx).Format(timeFormat)
}
node, err := f.put(ctx, src.Remote(), in, meta, options)
if err != nil {
return nil, fmt.Errorf("failed to put object: %w", err)
}
return f.nodeToObject(node), nil
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
err = f.mkdir(ctx, dir)
if err == errNotFound {
return fs.ErrorDirNotFound
} else if err == errExists {
// Spec says we do not return an error if the directory already exists
return nil
}
return err
}
// Rmdir deletes the root folder
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
err = f.delete(ctx, dir, false)
if err == errNotFound {
return fs.ErrorDirNotFound
}
return err
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string { return f.name }
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string { return f.root }
// String converts this Fs to a string
func (f *Fs) String() string { return fmt.Sprintf("pixeldrain root '%s'", f.root) }
// Precision return the precision of this Fs
func (f *Fs) Precision() time.Duration { return time.Millisecond }
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set { return hash.Set(hash.SHA256) }
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features { return f.features }
// Purge all files in the directory specified
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
err = f.delete(ctx, dir, true)
if err == errNotFound {
return fs.ErrorDirNotFound
}
return err
}
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
// This is not a pixeldrain object. Can't move
return nil, fs.ErrorCantMove
}
node, err := f.rename(ctx, srcObj.fs, srcObj.base.Path, remote, fs.GetConfig(ctx).MetadataSet)
if err == errIncompatibleSourceFS {
return nil, fs.ErrorCantMove
} else if err == errNotFound {
return nil, fs.ErrorObjectNotFound
}
return f.nodeToObject(node), nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
_, err = f.rename(ctx, src, srcRemote, dstRemote, nil)
if err == errIncompatibleSourceFS {
return fs.ErrorCantDirMove
} else if err == errNotFound {
return fs.ErrorDirNotFound
} else if err == errExists {
return fs.ErrorDirExists
}
return err
}
// ChangeNotify calls the passed function with a path
// that has had changes. If the implementation
// uses polling, it should adhere to the given interval.
// At least one value will be written to the channel,
// specifying the initial value and updated values might
// follow. A 0 Duration should pause the polling.
// The ChangeNotify implementation must empty the channel
// regularly. When the channel gets closed, the implementation
// should stop polling and release resources.
func (f *Fs) ChangeNotify(ctx context.Context, notify func(string, fs.EntryType), newInterval <-chan time.Duration) {
// If the bucket ID is not /me we need to explicitly enable change logging
// for this directory or file
if f.pathPrefix != "/me/" {
_, err := f.update(ctx, "", fs.Metadata{"logging_enabled": "true"})
if err != nil {
fs.Errorf(f, "Failed to set up change logging for path '%s': %s", f.pathPrefix, err)
}
}
go f.changeNotify(ctx, notify, newInterval)
}
func (f *Fs) changeNotify(ctx context.Context, notify func(string, fs.EntryType), newInterval <-chan time.Duration) {
var ticker = time.NewTicker(<-newInterval)
var lastPoll = time.Now()
for {
select {
case dur, ok := <-newInterval:
if !ok {
ticker.Stop()
return
}
fs.Debugf(f, "Polling changes at an interval of %s", dur)
ticker.Reset(dur)
case t := <-ticker.C:
clog, err := f.changeLog(ctx, lastPoll, t)
if err != nil {
fs.Errorf(f, "Failed to get change log for path '%s': %s", f.pathPrefix, err)
continue
}
for i := range clog {
fs.Debugf(f, "Path '%s' (%s) changed (%s) in directory '%s'",
clog[i].Path, clog[i].Type, clog[i].Action, f.pathPrefix)
if clog[i].Type == "dir" {
notify(strings.TrimPrefix(clog[i].Path, "/"), fs.EntryDirectory)
} else if clog[i].Type == "file" {
notify(strings.TrimPrefix(clog[i].Path, "/"), fs.EntryObject)
}
}
lastPoll = t
}
}
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Put already supports streaming so we just use that
return f.Put(ctx, in, src, options...)
}
// DirSetModTime sets the mtime metadata on a directory
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) (err error) {
_, err = f.update(ctx, dir, fs.Metadata{"mtime": modTime.Format(timeFormat)})
return err
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
fsn, err := f.update(ctx, remote, fs.Metadata{"shared": strconv.FormatBool(!unlink)})
if err != nil {
return "", err
}
if fsn.ID != "" {
return strings.Replace(f.opt.APIURL, "/api", "/d/", 1) + fsn.ID, nil
}
return "", nil
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
user, err := f.userInfo(ctx)
if err != nil {
return nil, fmt.Errorf("failed to read user info: %w", err)
}
usage = &fs.Usage{Used: fs.NewUsageValue(user.StorageSpaceUsed)}
if user.Subscription.StorageSpace > -1 {
usage.Total = fs.NewUsageValue(user.Subscription.StorageSpace)
}
return usage, nil
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
_, err = o.fs.update(ctx, o.base.Path, fs.Metadata{"mtime": modTime.Format(timeFormat)})
if err == nil {
o.base.Modified = modTime
}
return err
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
return o.fs.read(ctx, o.base.Path, options)
}
// Update the object with the contents of the io.Reader, modTime and size
//
// If existing is set then it updates the object rather than creating a new one.
//
// The new object may have been created if an error is returned.
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
// Copy the parameters and update the object
o.base.Modified = src.ModTime(ctx)
o.base.FileSize = src.Size()
o.base.SHA256Sum, _ = src.Hash(ctx, hash.SHA256)
_, err = o.fs.Put(ctx, in, o, options...)
return err
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return o.fs.delete(ctx, o.base.Path, false)
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Hash returns the SHA-256 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.SHA256 {
return "", hash.ErrUnsupported
}
return o.base.SHA256Sum, nil
}
// Storable returns a boolean showing whether this object storable
func (o *Object) Storable() bool {
return true
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.base.Path
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.base.Path
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.base.Modified
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.base.FileSize
}
// MimeType returns the content type of the Object if known, or "" if not
func (o *Object) MimeType(ctx context.Context) string {
return o.base.FileType
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
return fs.Metadata{
"mode": o.base.ModeOctal,
"mtime": o.base.Modified.Format(timeFormat),
"btime": o.base.Created.Format(timeFormat),
}, nil
}
// Verify that all the interfaces are implemented correctly
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Info = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.DirEntry = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
_ fs.Metadataer = (*Object)(nil)
)

View File

@@ -1,18 +0,0 @@
// Test pixeldrain filesystem interface
package pixeldrain_test
import (
"testing"
"github.com/rclone/rclone/backend/pixeldrain"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestPixeldrain:",
NilObject: (*pixeldrain.Object)(nil),
SkipInvalidUTF8: true, // Pixeldrain throws an error on invalid utf-8
})
}

View File

@@ -43,6 +43,7 @@ import (
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
)
const (
@@ -58,10 +59,12 @@ const (
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauthutil.Config{
Scopes: nil,
AuthURL: "https://www.premiumize.me/authorize",
TokenURL: "https://www.premiumize.me/token",
oauthConfig = &oauth2.Config{
Scopes: nil,
Endpoint: oauth2.Endpoint{
AuthURL: "https://www.premiumize.me/authorize",
TokenURL: "https://www.premiumize.me/token",
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,

View File

@@ -449,7 +449,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// No root so return old f
return f, nil
}
_, err := tempF.newObject(ctx, remote)
_, err := tempF.newObjectWithLink(ctx, remote, nil)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
@@ -487,7 +487,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
// ErrorIsDir if possible without doing any extra work,
// otherwise ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObject(ctx, remote)
return f.newObjectWithLink(ctx, remote, nil)
}
func (f *Fs) getObjectLink(ctx context.Context, remote string) (*proton.Link, error) {
@@ -516,27 +516,35 @@ func (f *Fs) getObjectLink(ctx context.Context, remote string) (*proton.Link, er
return link, nil
}
// readMetaDataForLink reads the metadata from the remote
func (f *Fs) readMetaDataForLink(ctx context.Context, link *proton.Link) (*protonDriveAPI.FileSystemAttrs, error) {
// readMetaDataForRemote reads the metadata from the remote
func (f *Fs) readMetaDataForRemote(ctx context.Context, remote string, _link *proton.Link) (*proton.Link, *protonDriveAPI.FileSystemAttrs, error) {
link, err := f.getObjectLink(ctx, remote)
if err != nil {
return nil, nil, err
}
var fileSystemAttrs *protonDriveAPI.FileSystemAttrs
var err error
if err = f.pacer.Call(func() (bool, error) {
fileSystemAttrs, err = f.protonDrive.GetActiveRevisionAttrs(ctx, link)
return shouldRetry(ctx, err)
}); err != nil {
return nil, err
return nil, nil, err
}
return fileSystemAttrs, nil
return link, fileSystemAttrs, nil
}
// Return an Object from a path and link
// readMetaData gets the metadata if it hasn't already been fetched
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithLink(ctx context.Context, remote string, link *proton.Link) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
// it also sets the info
func (o *Object) readMetaData(ctx context.Context, link *proton.Link) (err error) {
if o.link != nil {
return nil
}
link, fileSystemAttrs, err := o.fs.readMetaDataForRemote(ctx, o.remote, link)
if err != nil {
return err
}
o.id = link.LinkID
@@ -546,10 +554,6 @@ func (f *Fs) newObjectWithLink(ctx context.Context, remote string, link *proton.
o.mimetype = link.MIMEType
o.link = link
fileSystemAttrs, err := o.fs.readMetaDataForLink(ctx, link)
if err != nil {
return nil, err
}
if fileSystemAttrs != nil {
o.modTime = fileSystemAttrs.ModificationTime
o.originalSize = &fileSystemAttrs.Size
@@ -557,18 +561,23 @@ func (f *Fs) newObjectWithLink(ctx context.Context, remote string, link *proton.
o.digests = &fileSystemAttrs.Digests
}
return o, nil
return nil
}
// Return an Object from a path only
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObject(ctx context.Context, remote string) (fs.Object, error) {
link, err := f.getObjectLink(ctx, remote)
func (f *Fs) newObjectWithLink(ctx context.Context, remote string, link *proton.Link) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
err := o.readMetaData(ctx, link)
if err != nil {
return nil, err
}
return f.newObjectWithLink(ctx, remote, link)
return o, nil
}
// List the objects and directories in dir into entries. The
@@ -874,7 +883,9 @@ func (o *Object) Storable() bool {
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
fs.FixRangeOption(options, *o.originalSize)
if o.originalSize != nil {
fs.FixRangeOption(options, *o.originalSize)
}
var offset, limit int64 = 0, -1
for _, option := range options { // if the caller passes in nil for options, it will become array of nil
switch x := option.(type) {

View File

@@ -572,17 +572,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Objec
if err != nil {
return nil, err
}
// We have successfully copied the file to random name
// Check to see if file already exists first and delete it if so
existingObj, err := f.NewObject(ctx, remote)
if err == nil {
err = existingObj.Remove(ctx)
if err != nil {
return nil, fmt.Errorf("server side copy: failed to remove existing file: %w", err)
}
}
err = f.pacer.Call(func() (bool, error) {
params := url.Values{}
params.Set("file_id", strconv.FormatInt(resp.File.ID, 10))

View File

@@ -13,6 +13,7 @@ import (
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"golang.org/x/oauth2"
)
/*
@@ -40,10 +41,12 @@ const (
var (
// Description of how to auth for this app
putioConfig = &oauthutil.Config{
Scopes: []string{},
AuthURL: "https://api.put.io/v2/oauth2/authenticate",
TokenURL: "https://api.put.io/v2/oauth2/access_token",
putioConfig = &oauth2.Config{
Scopes: []string{},
Endpoint: oauth2.Endpoint{
AuthURL: "https://api.put.io/v2/oauth2/authenticate",
TokenURL: "https://api.put.io/v2/oauth2/access_token",
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneObscuredClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,

View File

@@ -13,8 +13,7 @@ import (
"reflect"
"strings"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/aws-sdk-go/service/s3"
)
// flags
@@ -83,18 +82,15 @@ func main() {
package s3
import (
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
)
import "github.com/aws/aws-sdk-go/service/s3"
`)
genSetFrom(new(s3.ListObjectsInput), new(s3.ListObjectsV2Input))
genSetFrom(new(s3.ListObjectsV2Output), new(s3.ListObjectsOutput))
genSetFrom(new(s3.ListObjectVersionsInput), new(s3.ListObjectsV2Input))
genSetFrom(new(types.ObjectVersion), new(types.DeleteMarkerEntry))
genSetFrom(new(s3.ObjectVersion), new(s3.DeleteMarkerEntry))
genSetFrom(new(s3.ListObjectsV2Output), new(s3.ListObjectVersionsOutput))
genSetFrom(new(types.Object), new(types.ObjectVersion))
genSetFrom(new(s3.Object), new(s3.ObjectVersion))
genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.HeadObjectOutput))
genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.CopyObjectInput))
genSetFrom(new(s3.UploadPartCopyInput), new(s3.CopyObjectInput))

Some files were not shown because too many files have changed in this diff Show More