mirror of
https://github.com/rclone/rclone.git
synced 2025-12-24 04:04:37 +00:00
Compare commits
1 Commits
v1.60.1
...
fix-union-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1d1d847f18 |
64
.github/workflows/build.yml
vendored
64
.github/workflows/build.yml
vendored
@@ -25,12 +25,12 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.17', 'go1.18']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.16', 'go1.17']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.19.x'
|
||||
go: '1.18.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -41,14 +41,14 @@ jobs:
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '1.19.x'
|
||||
go: '1.18.x'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-11
|
||||
go: '1.19.x'
|
||||
go: '1.18.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -57,14 +57,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-11
|
||||
go: '1.19.x'
|
||||
go: '1.18.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '1.19.x'
|
||||
go: '1.18.x'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
@@ -74,20 +74,20 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.19.x'
|
||||
go: '1.18.x'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.17
|
||||
- job_name: go1.16
|
||||
os: ubuntu-latest
|
||||
go: '1.17.x'
|
||||
go: '1.16.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.18
|
||||
- job_name: go1.17
|
||||
os: ubuntu-latest
|
||||
go: '1.18.x'
|
||||
go: '1.17.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
@@ -97,12 +97,12 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
stable: 'false'
|
||||
go-version: ${{ matrix.go }}
|
||||
@@ -162,7 +162,7 @@ jobs:
|
||||
env
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
@@ -226,10 +226,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Code quality test
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
|
||||
version: latest
|
||||
@@ -242,18 +242,22 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.18.x
|
||||
|
||||
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
|
||||
- name: Force NDK version
|
||||
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;23.1.7779620" | grep -v = || true
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
@@ -274,29 +278,27 @@ jobs:
|
||||
go install golang.org/x/mobile/cmd/gobind@latest
|
||||
go install golang.org/x/mobile/cmd/gomobile@latest
|
||||
env PATH=$PATH:~/go/bin gomobile init
|
||||
echo "RCLONE_NDK_VERSION=21" >> $GITHUB_ENV
|
||||
|
||||
- name: arm-v7a gomobile build
|
||||
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
||||
run: env PATH=$PATH:~/go/bin gomobile bind -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
||||
|
||||
- name: arm-v7a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||
echo 'GOARM=7' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: arm-v7a build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
|
||||
|
||||
- name: arm64-v8a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||
@@ -304,12 +306,12 @@ jobs:
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: arm64-v8a build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
|
||||
|
||||
- name: x86 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||
@@ -317,12 +319,12 @@ jobs:
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x86 build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
|
||||
|
||||
- name: x64 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||
@@ -330,7 +332,7 @@ jobs:
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x64 build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x64 .
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
|
||||
|
||||
- name: Upload artifacts
|
||||
run: |
|
||||
|
||||
@@ -12,7 +12,7 @@ jobs:
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish image
|
||||
|
||||
@@ -11,7 +11,7 @@ jobs:
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Get actual patch version
|
||||
@@ -40,7 +40,7 @@ jobs:
|
||||
name: Build docker plugin job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
|
||||
@@ -5,7 +5,7 @@ linters:
|
||||
- deadcode
|
||||
- errcheck
|
||||
- goimports
|
||||
- revive
|
||||
#- revive
|
||||
- ineffassign
|
||||
- structcheck
|
||||
- varcheck
|
||||
@@ -20,7 +20,7 @@ issues:
|
||||
exclude-use-default: false
|
||||
|
||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||
max-issues-per-linter: 0
|
||||
max-per-linter: 0
|
||||
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
|
||||
@@ -77,7 +77,7 @@ Make sure you
|
||||
* Add [documentation](#writing-documentation) for a new feature.
|
||||
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
|
||||
|
||||
When you are done with that push your changes to GitHub:
|
||||
When you are done with that push your changes to Github:
|
||||
|
||||
git push -u origin my-new-feature
|
||||
|
||||
@@ -88,7 +88,7 @@ Your changes will then get reviewed and you might get asked to fix some stuff. I
|
||||
|
||||
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
|
||||
|
||||
## Using Git and GitHub ##
|
||||
## Using Git and Github ##
|
||||
|
||||
### Committing your changes ###
|
||||
|
||||
|
||||
2470
MANUAL.html
generated
2470
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
2813
MANUAL.txt
generated
2813
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
@@ -45,10 +45,10 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
@@ -62,20 +62,17 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
|
||||
@@ -53,14 +53,6 @@ doing that so it may be necessary to roll back dependencies to the
|
||||
version specified by `make updatedirect` in order to get rclone to
|
||||
build.
|
||||
|
||||
## Tidy beta
|
||||
|
||||
At some point after the release run
|
||||
|
||||
bin/tidy-beta v1.55
|
||||
|
||||
where the version number is that of a couple ago to remove old beta binaries.
|
||||
|
||||
## Making a point release
|
||||
|
||||
If rclone needs a point release due to some horrendous bug:
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// Package alias implements a virtual provider to rename existing remotes.
|
||||
package alias
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// Package all imports all the backends
|
||||
package all
|
||||
|
||||
import (
|
||||
@@ -24,6 +23,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/hdfs"
|
||||
_ "github.com/rclone/rclone/backend/hidrive"
|
||||
_ "github.com/rclone/rclone/backend/http"
|
||||
_ "github.com/rclone/rclone/backend/hubic"
|
||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
_ "github.com/rclone/rclone/backend/koofr"
|
||||
@@ -34,7 +34,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/netstorage"
|
||||
_ "github.com/rclone/rclone/backend/onedrive"
|
||||
_ "github.com/rclone/rclone/backend/opendrive"
|
||||
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||
_ "github.com/rclone/rclone/backend/putio"
|
||||
@@ -44,7 +43,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
_ "github.com/rclone/rclone/backend/sharefile"
|
||||
_ "github.com/rclone/rclone/backend/sia"
|
||||
_ "github.com/rclone/rclone/backend/smb"
|
||||
_ "github.com/rclone/rclone/backend/storj"
|
||||
_ "github.com/rclone/rclone/backend/sugarsync"
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
|
||||
@@ -556,9 +556,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
//
|
||||
// This is a workaround for Amazon sometimes returning
|
||||
//
|
||||
// - 408 REQUEST_TIMEOUT
|
||||
// - 504 GATEWAY_TIMEOUT
|
||||
// - 500 Internal server error
|
||||
// * 408 REQUEST_TIMEOUT
|
||||
// * 504 GATEWAY_TIMEOUT
|
||||
// * 500 Internal server error
|
||||
//
|
||||
// At the end of large uploads. The speculation is that the timeout
|
||||
// is waiting for the sha1 hashing to complete and the file may well
|
||||
@@ -626,7 +626,7 @@ func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader,
|
||||
|
||||
// Put the object into the container
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -685,9 +685,9 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1002,6 +1002,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
@@ -1114,7 +1115,7 @@ func (f *Fs) listContainersToFn(fn listContainerFn) error {
|
||||
|
||||
// Put the object into the container
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -1246,9 +1247,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1355,12 +1356,11 @@ func (o *Object) setMetadata(metadata azblob.Metadata) {
|
||||
// decodeMetaDataFromPropertiesResponse sets the metadata from the data passed in
|
||||
//
|
||||
// Sets
|
||||
//
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.md5
|
||||
// o.meta
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.md5
|
||||
// o.meta
|
||||
func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetPropertiesResponse) (err error) {
|
||||
metadata := info.NewMetadata()
|
||||
size := info.ContentLength()
|
||||
@@ -1443,11 +1443,10 @@ func (o *Object) clearMetaData() {
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// Sets
|
||||
//
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.md5
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.md5
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
container, _ := o.split()
|
||||
if !o.fs.containerOK(container) {
|
||||
@@ -1677,14 +1676,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
}
|
||||
|
||||
uploadParts := maxUploadParts
|
||||
uploadParts := int64(maxUploadParts)
|
||||
if uploadParts < 1 {
|
||||
uploadParts = 1
|
||||
} else if uploadParts > maxUploadParts {
|
||||
uploadParts = maxUploadParts
|
||||
}
|
||||
// calculate size of parts/blocks
|
||||
partSize := chunksize.Calculator(o, src.Size(), uploadParts, o.fs.opt.ChunkSize)
|
||||
partSize := chunksize.Calculator(o, int(uploadParts), o.fs.opt.ChunkSize)
|
||||
|
||||
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
|
||||
BufferSize: int(partSize),
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// Package api provides types used by the Backblaze B2 API.
|
||||
package api
|
||||
|
||||
import (
|
||||
@@ -239,7 +238,7 @@ type GetFileInfoRequest struct {
|
||||
// If the original source of the file being uploaded has a last
|
||||
// modified time concept, Backblaze recommends using
|
||||
// src_last_modified_millis as the name, and a string holding the base
|
||||
// 10 number of milliseconds since midnight, January 1, 1970
|
||||
// 10 number number of milliseconds since midnight, January 1, 1970
|
||||
// UTC. This fits in a 64 bit integer such as the type "long" in the
|
||||
// programming language Java. It is intended to be compatible with
|
||||
// Java's time long. For example, it can be passed directly into the
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Package b2 provides an interface to the Backblaze B2 object storage system.
|
||||
// Package b2 provides an interface to the Backblaze B2 object storage system
|
||||
package b2
|
||||
|
||||
// FIXME should we remove sha1 checks from here as rclone now supports
|
||||
@@ -656,15 +656,15 @@ var errEndList = errors.New("end list")
|
||||
//
|
||||
// (bucket, directory) is the starting directory
|
||||
//
|
||||
// If prefix is set then it is removed from all file names.
|
||||
// If prefix is set then it is removed from all file names
|
||||
//
|
||||
// If addBucket is set then it adds the bucket to the start of the
|
||||
// remotes generated.
|
||||
// remotes generated
|
||||
//
|
||||
// If recurse is set the function will recursively list.
|
||||
// If recurse is set the function will recursively list
|
||||
//
|
||||
// If limit is > 0 then it limits to that many files (must be less
|
||||
// than 1000).
|
||||
// than 1000)
|
||||
//
|
||||
// If hidden is set then it will list the hidden (deleted) files too.
|
||||
//
|
||||
@@ -1025,7 +1025,7 @@ func (f *Fs) clearBucketID(bucket string) {
|
||||
|
||||
// Put the object into the bucket
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -1334,9 +1334,9 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1478,7 +1478,7 @@ func (o *Object) Size() int64 {
|
||||
|
||||
// Clean the SHA1
|
||||
//
|
||||
// Make sure it is lower case.
|
||||
// Make sure it is lower case
|
||||
//
|
||||
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
|
||||
// Some tools (e.g. Cyberduck) use this
|
||||
@@ -1490,11 +1490,10 @@ func cleanSHA1(sha1 string) string {
|
||||
// decodeMetaDataRaw sets the metadata from the data passed in
|
||||
//
|
||||
// Sets
|
||||
//
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.sha1
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.sha1
|
||||
func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp api.Timestamp, Info map[string]string, mimeType string) (err error) {
|
||||
o.id = ID
|
||||
o.sha1 = SHA1
|
||||
@@ -1513,11 +1512,10 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
|
||||
// decodeMetaData sets the metadata in the object from an api.File
|
||||
//
|
||||
// Sets
|
||||
//
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.sha1
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.sha1
|
||||
func (o *Object) decodeMetaData(info *api.File) (err error) {
|
||||
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
|
||||
}
|
||||
@@ -1525,11 +1523,10 @@ func (o *Object) decodeMetaData(info *api.File) (err error) {
|
||||
// decodeMetaDataFileInfo sets the metadata in the object from an api.FileInfo
|
||||
//
|
||||
// Sets
|
||||
//
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.sha1
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.sha1
|
||||
func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) {
|
||||
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
|
||||
}
|
||||
@@ -1587,11 +1584,10 @@ func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// Sets
|
||||
//
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.sha1
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.sha1
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if o.id != "" {
|
||||
return nil
|
||||
|
||||
@@ -97,7 +97,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
if size == -1 {
|
||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
||||
} else {
|
||||
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
|
||||
chunkSize = chunksize.Calculator(src, maxParts, defaultChunkSize)
|
||||
parts = size / int64(chunkSize)
|
||||
if size%int64(chunkSize) != 0 {
|
||||
parts++
|
||||
|
||||
@@ -14,7 +14,7 @@ const (
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
)
|
||||
|
||||
// Time represents date and time information for the
|
||||
// Time represents represents date and time information for the
|
||||
// box API, by using RFC3339
|
||||
type Time time.Time
|
||||
|
||||
|
||||
@@ -266,7 +266,7 @@ type Fs struct {
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
@@ -692,7 +692,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object, leaf, directoryID and error.
|
||||
// Returns the object, leaf, directoryID and error
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
@@ -752,7 +752,7 @@ func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -792,9 +792,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
|
||||
// PutUnchecked the object into the container
|
||||
//
|
||||
// This will produce an error if the object already exists.
|
||||
// This will produce an error if the object already exists
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -877,9 +877,9 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -995,9 +995,9 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1235,6 +1235,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
@@ -1345,9 +1346,9 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
//
|
||||
// The new object may have been created if an error is returned.
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
if o.fs.tokenRenewer != nil {
|
||||
o.fs.tokenRenewer.Start()
|
||||
|
||||
1
backend/cache/cache.go
vendored
1
backend/cache/cache.go
vendored
@@ -1,7 +1,6 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
// Package cache implements a virtual provider to cache existing remotes.
|
||||
package cache
|
||||
|
||||
import (
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
|
||||
//
|
||||
// Chunker's composite files have one or more chunks
|
||||
// and optional metadata object. If it's present,
|
||||
// meta object is named after the original file.
|
||||
@@ -64,7 +65,7 @@ import (
|
||||
// length of 13 decimals it makes a 7-digit base-36 number.
|
||||
//
|
||||
// When transactions is set to the norename style, data chunks will
|
||||
// keep their temporary chunk names (with the transaction identifier
|
||||
// keep their temporary chunk names (with the transacion identifier
|
||||
// suffix). To distinguish them from temporary chunks, the txn field
|
||||
// of the metadata file is set to match the transaction identifier of
|
||||
// the data chunks.
|
||||
@@ -78,6 +79,7 @@ import (
|
||||
// Metadata format v1 does not define any control chunk types,
|
||||
// they are currently ignored aka reserved.
|
||||
// In future they can be used to implement resumable uploads etc.
|
||||
//
|
||||
const (
|
||||
ctrlTypeRegStr = `[a-z][a-z0-9]{2,6}`
|
||||
tempSuffixFormat = `_%04s`
|
||||
@@ -540,6 +542,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
|
||||
//
|
||||
// xactID is a transaction identifier. Empty xactID denotes active chunk,
|
||||
// otherwise temporary chunk name is produced.
|
||||
//
|
||||
func (f *Fs) makeChunkName(filePath string, chunkNo int, ctrlType, xactID string) string {
|
||||
dir, parentName := path.Split(filePath)
|
||||
var name, tempSuffix string
|
||||
@@ -705,6 +708,7 @@ func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err
|
||||
// directory together with dead chunks.
|
||||
// In future a flag named like `--chunker-list-hidden` may be added to
|
||||
// rclone that will tell List to reveal hidden chunks.
|
||||
//
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
entries, err = f.base.List(ctx, dir)
|
||||
if err != nil {
|
||||
@@ -864,6 +868,7 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
||||
// Note that chunker prefers analyzing file names rather than reading
|
||||
// the content of meta object assuming that directory scans are fast
|
||||
// but opening even a small file can be slow on some backends.
|
||||
//
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.scanObject(ctx, remote, false)
|
||||
}
|
||||
@@ -1079,7 +1084,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
||||
|
||||
// readXactID returns the transaction ID stored in the passed metadata object
|
||||
func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
||||
// if xactID has already been read and cached return it now
|
||||
// if xactID has already been read and cahced return it now
|
||||
if o.xIDCached {
|
||||
return o.xactID, nil
|
||||
}
|
||||
@@ -1581,6 +1586,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
// This command will chain to `purge` from wrapped remote.
|
||||
// As a result it removes not only composite chunker files with their
|
||||
// active chunks but also all hidden temporary chunks in the directory.
|
||||
//
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
do := f.base.Features().Purge
|
||||
if do == nil {
|
||||
@@ -1622,6 +1628,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
// Unsupported control chunks will get re-picked by a more recent
|
||||
// rclone version with unexpected results. This can be helped by
|
||||
// the `delete hidden` flag above or at least the user has been warned.
|
||||
//
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
|
||||
// operations.Move can still call Remove if chunker's Move refuses
|
||||
@@ -1797,9 +1804,9 @@ func (f *Fs) okForServerSide(ctx context.Context, src fs.Object, opName string)
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1818,9 +1825,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -2118,6 +2125,7 @@ func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
|
||||
// file, then tries to read it from metadata. This in theory
|
||||
// handles the unusual case when a small file has been tampered
|
||||
// on the level of wrapped remote but chunker is unaware of that.
|
||||
//
|
||||
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (string, error) {
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
return "", err // valid metadata is required to get hash, abort
|
||||
@@ -2406,6 +2414,7 @@ type metaSimpleJSON struct {
|
||||
// - for files larger than chunk size
|
||||
// - if file contents can be mistaken as meta object
|
||||
// - if consistent hashing is On but wrapped remote can't provide given hash
|
||||
//
|
||||
func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1, xactID string) ([]byte, error) {
|
||||
version := metadataVersion
|
||||
if xactID == "" && version == 2 {
|
||||
@@ -2438,6 +2447,7 @@ func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1,
|
||||
// New format will have a higher version number and cannot be correctly
|
||||
// handled by current implementation.
|
||||
// The version check below will then explicitly ask user to upgrade rclone.
|
||||
//
|
||||
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
|
||||
// Be strict about JSON format
|
||||
// to reduce possibility that a random small file resembles metadata.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Package combine implents a backend to combine multiple remotes in a directory tree
|
||||
// Package combine implents a backend to combine multipe remotes in a directory tree
|
||||
package combine
|
||||
|
||||
/*
|
||||
@@ -145,7 +145,6 @@ func (f *Fs) newUpstream(ctx context.Context, dir, remote string) (*upstream, er
|
||||
dir: dir,
|
||||
pathAdjustment: newAdjustment(f.root, dir),
|
||||
}
|
||||
cache.PinUntilFinalized(u.f, u)
|
||||
return u, nil
|
||||
}
|
||||
|
||||
@@ -457,9 +456,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -491,9 +490,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
|
||||
@@ -29,7 +29,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
@@ -91,7 +90,7 @@ Generally -1 (default, equivalent to 5) is recommended.
|
||||
Levels 1 to 9 increase compression at the cost of speed. Going past 6
|
||||
generally offers very little return.
|
||||
|
||||
Level -2 uses Huffman encoding only. Only use if you know what you
|
||||
Level -2 uses Huffmann encoding only. Only use if you know what you
|
||||
are doing.
|
||||
Level 0 turns off compression.`,
|
||||
Default: sgzip.DefaultCompression,
|
||||
@@ -131,7 +130,7 @@ type Fs struct {
|
||||
features *fs.Features // optional features
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -368,16 +367,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
meta, err := readMetadata(ctx, mo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding metadata: %w", err)
|
||||
meta := readMetadata(ctx, mo)
|
||||
if meta == nil {
|
||||
return nil, errors.New("error decoding metadata")
|
||||
}
|
||||
// Create our Object
|
||||
o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.newObject(o, mo, meta), nil
|
||||
return f.newObject(o, mo, meta), err
|
||||
}
|
||||
|
||||
// checkCompressAndType checks if an object is compressible and determines it's mime type
|
||||
@@ -455,7 +451,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
||||
return f.Fs.Put(ctx, bytes.NewBuffer(buf[:n]), src, options...)
|
||||
}
|
||||
|
||||
// Need to include what we already read
|
||||
// Need to include what we allready read
|
||||
in = &ReadCloserWrapper{
|
||||
Reader: io.MultiReader(bytes.NewReader(buf), in),
|
||||
Closer: in,
|
||||
@@ -681,7 +677,7 @@ func (f *Fs) putWithCustomFunctions(ctx context.Context, in io.Reader, src fs.Ob
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return f.newObject(dataObject, mo, meta), nil
|
||||
return f.newObject(dataObject, mo, meta), err
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
@@ -735,7 +731,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
}
|
||||
|
||||
// If our new object is compressed we have to rename it with the correct size.
|
||||
// Uncompressed objects don't store the size in the name so we they'll already have the correct name.
|
||||
// Uncompressed objects don't store the size in the name so we they'll allready have the correct name.
|
||||
if compressible {
|
||||
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
|
||||
if err != nil {
|
||||
@@ -746,7 +742,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
return newObj, nil
|
||||
}
|
||||
|
||||
// Temporarily disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
|
||||
// Temporarely disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
|
||||
// will break stuff. Right no I can't think of a way to make this work.
|
||||
|
||||
// PutUnchecked uploads the object
|
||||
@@ -789,9 +785,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -839,9 +835,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1044,19 +1040,24 @@ func newMetadata(size int64, mode int, cmeta sgzip.GzipMetadata, md5 string, mim
|
||||
}
|
||||
|
||||
// This function will read the metadata from a metadata object.
|
||||
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) {
|
||||
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata) {
|
||||
// Open our meradata object
|
||||
rc, err := mo.Open(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil
|
||||
}
|
||||
defer fs.CheckClose(rc, &err)
|
||||
defer func() {
|
||||
err := rc.Close()
|
||||
if err != nil {
|
||||
fs.Errorf(mo, "Error closing object: %v", err)
|
||||
}
|
||||
}()
|
||||
jr := json.NewDecoder(rc)
|
||||
meta = new(ObjectMetadata)
|
||||
if err = jr.Decode(meta); err != nil {
|
||||
return nil, err
|
||||
return nil
|
||||
}
|
||||
return meta, nil
|
||||
return meta
|
||||
}
|
||||
|
||||
// Remove removes this object
|
||||
@@ -1101,9 +1102,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
origName := o.Remote()
|
||||
if o.meta.Mode != Uncompressed || compressible {
|
||||
newObject, err = o.f.putWithCustomFunctions(ctx, in, o.f.wrapInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, compressible, mimeType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if newObject.Object.Remote() != o.Object.Remote() {
|
||||
if removeErr := o.Object.Remove(ctx); removeErr != nil {
|
||||
return removeErr
|
||||
@@ -1117,9 +1115,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
// If we are, just update the object and metadata
|
||||
newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Update object metadata and return
|
||||
o.Object = newObject.Object
|
||||
@@ -1130,9 +1128,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// This will initialize the variables of a new press Object. The metadata object, mo, and metadata struct, meta, must be specified.
|
||||
func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object {
|
||||
if o == nil {
|
||||
log.Trace(nil, "newObject(%#v, %#v, %#v) called with nil o", o, mo, meta)
|
||||
}
|
||||
return &Object{
|
||||
Object: o,
|
||||
f: f,
|
||||
@@ -1145,9 +1140,6 @@ func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object
|
||||
|
||||
// This initializes the variables of a press Object with only the size. The metadata will be loaded later on demand.
|
||||
func (f *Fs) newObjectSizeAndNameOnly(o fs.Object, moName string, size int64) *Object {
|
||||
if o == nil {
|
||||
log.Trace(nil, "newObjectSizeAndNameOnly(%#v, %#v, %#v) called with nil o", o, moName, size)
|
||||
}
|
||||
return &Object{
|
||||
Object: o,
|
||||
f: f,
|
||||
@@ -1175,7 +1167,7 @@ func (o *Object) loadMetadataIfNotLoaded(ctx context.Context) (err error) {
|
||||
return err
|
||||
}
|
||||
if o.meta == nil {
|
||||
o.meta, err = readMetadata(ctx, o.mo)
|
||||
o.meta = readMetadata(ctx, o.mo)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -127,11 +127,11 @@ type fileNameEncoding interface {
|
||||
// RFC4648
|
||||
//
|
||||
// The standard encoding is modified in two ways
|
||||
// - it becomes lower case (no-one likes upper case filenames!)
|
||||
// - we strip the padding character `=`
|
||||
// * it becomes lower case (no-one likes upper case filenames!)
|
||||
// * we strip the padding character `=`
|
||||
type caseInsensitiveBase32Encoding struct{}
|
||||
|
||||
// EncodeToString encodes a string using the modified version of
|
||||
// EncodeToString encodes a strign using the modified version of
|
||||
// base32 encoding.
|
||||
func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string {
|
||||
encoded := base32.HexEncoding.EncodeToString(src)
|
||||
@@ -244,7 +244,7 @@ func (c *Cipher) putBlock(buf []byte) {
|
||||
|
||||
// encryptSegment encrypts a path segment
|
||||
//
|
||||
// This uses EME with AES.
|
||||
// This uses EME with AES
|
||||
//
|
||||
// EME (ECB-Mix-ECB) is a wide-block encryption mode presented in the
|
||||
// 2003 paper "A Parallelizable Enciphering Mode" by Halevi and
|
||||
@@ -254,8 +254,8 @@ func (c *Cipher) putBlock(buf []byte) {
|
||||
// same filename must encrypt to the same thing.
|
||||
//
|
||||
// This means that
|
||||
// - filenames with the same name will encrypt the same
|
||||
// - filenames which start the same won't have a common prefix
|
||||
// * filenames with the same name will encrypt the same
|
||||
// * filenames which start the same won't have a common prefix
|
||||
func (c *Cipher) encryptSegment(plaintext string) string {
|
||||
if plaintext == "" {
|
||||
return ""
|
||||
@@ -1085,7 +1085,7 @@ func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
|
||||
|
||||
// DecryptDataSeek decrypts the data stream from offset
|
||||
//
|
||||
// The open function must return a ReadCloser opened to the offset supplied.
|
||||
// The open function must return a ReadCloser opened to the offset supplied
|
||||
//
|
||||
// You must use this form of DecryptData if you might want to Seek the file handle
|
||||
func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
|
||||
|
||||
@@ -125,7 +125,7 @@ names, or for debugging purposes.`,
|
||||
|
||||
This option could help with shortening the encrypted filename. The
|
||||
suitable option would depend on the way your remote count the filename
|
||||
length and if it's case sensitive.`,
|
||||
length and if it's case sensitve.`,
|
||||
Default: "base32",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
@@ -507,9 +507,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -532,9 +532,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
|
||||
@@ -1210,7 +1210,6 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
||||
WriteMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||
FilterAware: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
@@ -2181,7 +2180,7 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -2415,9 +2414,9 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -2650,9 +2649,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -3306,7 +3305,7 @@ drives found and a combined drive.
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
|
||||
Adding this to the rclone config file will cause those team drives to
|
||||
be accessible with the aliases shown. Any illegal characters will be
|
||||
be accessible with the aliases shown. Any illegal charactes will be
|
||||
substituted with "_" and duplicate names will have numbers suffixed.
|
||||
It will also add a remote called AllDrives which shows all the shared
|
||||
drives combined into one directory tree.
|
||||
@@ -3570,6 +3569,7 @@ func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *baseObject) ModTime(ctx context.Context) time.Time {
|
||||
@@ -3826,7 +3826,7 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
|
||||
|
||||
// Update the already existing object
|
||||
//
|
||||
// Copy the reader into the object updating modTime and size.
|
||||
// Copy the reader into the object updating modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
|
||||
@@ -518,9 +518,6 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
|
||||
func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
||||
// Check set up for filtering
|
||||
assert.True(t, f.Features().FilterAware)
|
||||
|
||||
opt := &filter.Opt{}
|
||||
err := opt.MaxAge.Set("1h")
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -309,7 +309,7 @@ func (b *batcher) Shutdown() {
|
||||
}
|
||||
b.shutOnce.Do(func() {
|
||||
atexit.Unregister(b.atexit)
|
||||
fs.Infof(b.f, "Committing uploads - please wait...")
|
||||
fs.Infof(b.f, "Commiting uploads - please wait...")
|
||||
// show that batcher is shutting down
|
||||
close(b.closed)
|
||||
// quit the commitLoop by sending a quitRequest message
|
||||
|
||||
@@ -268,7 +268,7 @@ default based on the batch_mode in use.
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "batch_commit_timeout",
|
||||
Help: `Max time to wait for a batch to finish committing`,
|
||||
Help: `Max time to wait for a batch to finish comitting`,
|
||||
Default: fs.Duration(10 * time.Minute),
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -925,7 +925,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -1044,9 +1044,9 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1105,9 +1105,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1435,7 +1435,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
}
|
||||
|
||||
if entryPath != "" {
|
||||
notifyFunc(f.opt.Enc.ToStandardPath(entryPath), entryType)
|
||||
notifyFunc(entryPath, entryType)
|
||||
}
|
||||
}
|
||||
if !changeList.HasMore {
|
||||
@@ -1669,7 +1669,7 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
|
||||
correctOffset := uErr.EndpointError.IncorrectOffset.CorrectOffset
|
||||
delta := int64(correctOffset) - int64(cursor.Offset)
|
||||
skip += delta
|
||||
what := fmt.Sprintf("incorrect offset error received: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
|
||||
what := fmt.Sprintf("incorrect offset error receved: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
|
||||
if skip < 0 {
|
||||
return false, fmt.Errorf("can't seek backwards to correct offset: %s", what)
|
||||
} else if skip == chunkSize {
|
||||
@@ -1697,9 +1697,6 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
|
||||
if size > 0 {
|
||||
// if size is known, check if next chunk is final
|
||||
appendArg.Close = uint64(size)-in.BytesRead() <= uint64(chunkSize)
|
||||
if in.BytesRead() > uint64(size) {
|
||||
return nil, fmt.Errorf("expected %d bytes in input, but have read %d so far", size, in.BytesRead())
|
||||
}
|
||||
} else {
|
||||
// if size is unknown, upload as long as we can read full chunks from the reader
|
||||
appendArg.Close = in.BytesRead()-cursor.Offset < uint64(chunkSize)
|
||||
@@ -1763,7 +1760,7 @@ func checkPathLength(name string) (err error) {
|
||||
|
||||
// Update the already existing object
|
||||
//
|
||||
// Copy the reader into the object updating modTime and size.
|
||||
// Copy the reader into the object updating modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// Package fichier provides an interface to the 1Fichier storage system.
|
||||
package fichier
|
||||
|
||||
import (
|
||||
|
||||
@@ -84,7 +84,7 @@ type CopyFileResponse struct {
|
||||
URLs []FileCopy `json:"urls"`
|
||||
}
|
||||
|
||||
// FileCopy is used in the CopyFileResponse
|
||||
// FileCopy is used in the the CopyFileResponse
|
||||
type FileCopy struct {
|
||||
FromURL string `json:"from_url"`
|
||||
ToURL string `json:"to_url"`
|
||||
|
||||
@@ -19,7 +19,7 @@ const (
|
||||
timeFormatJSON = `"` + timeFormatParameters + `"`
|
||||
)
|
||||
|
||||
// Time represents date and time information for the
|
||||
// Time represents represents date and time information for the
|
||||
// filefabric API
|
||||
type Time time.Time
|
||||
|
||||
@@ -95,7 +95,7 @@ type Status struct {
|
||||
// Warning string `json:"warning"` // obsolete
|
||||
}
|
||||
|
||||
// Status satisfies the error interface
|
||||
// Status statisfies the error interface
|
||||
func (e *Status) Error() string {
|
||||
return fmt.Sprintf("%s (%s)", e.Message, e.Code)
|
||||
}
|
||||
|
||||
@@ -150,7 +150,7 @@ type Fs struct {
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
m configmap.Mapper // to save config
|
||||
srv *rest.Client // the connection to the server
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenMu sync.Mutex // hold when reading the token
|
||||
@@ -373,7 +373,7 @@ type params map[string]interface{}
|
||||
|
||||
// rpc calls the rpc.php method of the SME file fabric
|
||||
//
|
||||
// This is an entry point to all the method calls.
|
||||
// This is an entry point to all the method calls
|
||||
//
|
||||
// If result is nil then resp.Body will need closing
|
||||
func (f *Fs) rpc(ctx context.Context, function string, p params, result api.OKError, options []fs.OpenOption) (resp *http.Response, err error) {
|
||||
@@ -678,7 +678,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object, leaf, directoryID and error.
|
||||
// Returns the object, leaf, directoryID and error
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
@@ -697,7 +697,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -783,9 +783,9 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -843,7 +843,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// Wait for the background task to complete if necessary
|
||||
// Wait for the the background task to complete if necessary
|
||||
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err error) {
|
||||
if taskID == "" || taskID == "0" {
|
||||
// No task to wait for
|
||||
@@ -956,9 +956,9 @@ func (f *Fs) move(ctx context.Context, isDir bool, id, oldLeaf, newLeaf, oldDire
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1135,6 +1135,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
@@ -1200,7 +1201,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/ftp"
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -81,22 +81,8 @@ security from the server in order to upgrade a plain text connection
|
||||
to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
Help: strings.Replace(`Maximum number of FTP simultaneous connections, 0 for unlimited.
|
||||
|
||||
Note that setting this is very likely to cause deadlocks so it should
|
||||
be used with care.
|
||||
|
||||
If you are doing a sync or copy then make sure concurrency is one more
|
||||
than the sum of |--transfers| and |--checkers|.
|
||||
|
||||
If you use |--check-first| then it just needs to be one more than the
|
||||
maximum of |--checkers| and |--transfers|.
|
||||
|
||||
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
|
||||
--check-first| or |--checkers 1 --transfers 1|.
|
||||
|
||||
`, "|", "`", -1),
|
||||
Name: "concurrency",
|
||||
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited.",
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -124,11 +110,6 @@ So for |concurrency 3| you'd use |--checkers 2 --transfers 2
|
||||
Help: "Use MDTM to set modification time (VsFtpd quirk)",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "force_list_hidden",
|
||||
Help: "Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "idle_timeout",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
@@ -210,7 +191,6 @@ type Options struct {
|
||||
DisableMLSD bool `config:"disable_mlsd"`
|
||||
DisableUTF8 bool `config:"disable_utf8"`
|
||||
WritingMDTM bool `config:"writing_mdtm"`
|
||||
ForceListHidden bool `config:"force_list_hidden"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
CloseTimeout fs.Duration `config:"close_timeout"`
|
||||
ShutTimeout fs.Duration `config:"shut_timeout"`
|
||||
@@ -336,44 +316,14 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
fs.Debugf(f, "Connecting to FTP server")
|
||||
|
||||
// Make ftp library dial with fshttp dialer optionally using TLS
|
||||
initialConnection := true
|
||||
dial := func(network, address string) (conn net.Conn, err error) {
|
||||
fs.Debugf(f, "dial(%q,%q)", network, address)
|
||||
defer func() {
|
||||
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
|
||||
}()
|
||||
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if f.tlsConf != nil && err == nil {
|
||||
conn = tls.Client(conn, f.tlsConf)
|
||||
}
|
||||
// Connect using cleartext only for non TLS
|
||||
if f.tlsConf == nil {
|
||||
return conn, nil
|
||||
}
|
||||
// Initial connection only needs to be cleartext for explicit TLS
|
||||
if f.opt.ExplicitTLS && initialConnection {
|
||||
initialConnection = false
|
||||
return conn, nil
|
||||
}
|
||||
// Upgrade connection to TLS
|
||||
tlsConn := tls.Client(conn, f.tlsConf)
|
||||
// Do the initial handshake - tls.Client doesn't do it for us
|
||||
// If we do this then connections to proftpd/pureftpd lock up
|
||||
// See: https://github.com/rclone/rclone/issues/6426
|
||||
// See: https://github.com/jlaffaye/ftp/issues/282
|
||||
if false {
|
||||
err = tlsConn.HandshakeContext(ctx)
|
||||
if err != nil {
|
||||
_ = conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return tlsConn, nil
|
||||
}
|
||||
ftpConfig := []ftp.DialOption{
|
||||
ftp.DialWithContext(ctx),
|
||||
ftp.DialWithDialFunc(dial),
|
||||
return
|
||||
}
|
||||
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dial)}
|
||||
|
||||
if f.opt.TLS {
|
||||
// Our dialer takes care of TLS but ftp library also needs tlsConf
|
||||
@@ -381,6 +331,12 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
|
||||
} else if f.opt.ExplicitTLS {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
|
||||
// Initial connection needs to be cleartext for explicit TLS
|
||||
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithNetConn(conn))
|
||||
}
|
||||
if f.opt.DisableEPSV {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
||||
@@ -397,9 +353,6 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
if f.opt.WritingMDTM {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true))
|
||||
}
|
||||
if f.opt.ForceListHidden {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithForceListHidden(true))
|
||||
}
|
||||
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
|
||||
}
|
||||
@@ -816,12 +769,11 @@ func (f *Fs) Hashes() hash.Set {
|
||||
|
||||
// Precision shows whether modified time is supported or not depending on the
|
||||
// FTP server capabilities, namely whether FTP server:
|
||||
// - accepts the MDTM command to get file time (fGetTime)
|
||||
// or supports MLSD returning precise file time in the list (fLstTime)
|
||||
// - accepts the MFMT command to set file time (fSetTime)
|
||||
// or non-standard form of the MDTM command (fSetTime, too)
|
||||
// used by VsFtpd for the same purpose (WritingMDTM)
|
||||
//
|
||||
// - accepts the MDTM command to get file time (fGetTime)
|
||||
// or supports MLSD returning precise file time in the list (fLstTime)
|
||||
// - accepts the MFMT command to set file time (fSetTime)
|
||||
// or non-standard form of the MDTM command (fSetTime, too)
|
||||
// used by VsFtpd for the same purpose (WritingMDTM)
|
||||
// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
if (f.fGetTime || f.fLstTime) && f.fSetTime {
|
||||
@@ -1197,7 +1149,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
|
||||
// Update the already existing object
|
||||
//
|
||||
// Copy the reader into the object updating modTime and size.
|
||||
// Copy the reader into the object updating modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
|
||||
@@ -34,9 +34,9 @@ func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
|
||||
// test that big file uploads do not cause network i/o timeout
|
||||
func (f *Fs) testUploadTimeout(t *testing.T) {
|
||||
const (
|
||||
fileSize = 100000000 // 100 MiB
|
||||
idleTimeout = 1 * time.Second // small because test server is local
|
||||
maxTime = 10 * time.Second // prevent test hangup
|
||||
fileSize = 100000000 // 100 MiB
|
||||
idleTimeout = 40 * time.Millisecond // small because test server is local
|
||||
maxTime = 10 * time.Second // prevent test hangup
|
||||
)
|
||||
|
||||
if testing.Short() {
|
||||
|
||||
@@ -311,7 +311,7 @@ rclone does if you know the bucket exists already.
|
||||
Help: `If set this will decompress gzip encoded objects.
|
||||
|
||||
It is possible to upload objects to GCS with "Content-Encoding: gzip"
|
||||
set. Normally rclone will download these files as compressed objects.
|
||||
set. Normally rclone will download these files files as compressed objects.
|
||||
|
||||
If this flag is set then rclone will decompress these files with
|
||||
"Content-Encoding: gzip" as they are received. This means that rclone
|
||||
@@ -319,10 +319,6 @@ can't check the size and hash but the file contents will be decompressed.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -347,7 +343,6 @@ type Options struct {
|
||||
StorageClass string `config:"storage_class"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
Decompress bool `config:"decompress"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -528,11 +523,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
f.client = oAuthClient
|
||||
gcsOpts := []option.ClientOption{option.WithHTTPClient(f.client)}
|
||||
if opt.Endpoint != "" {
|
||||
gcsOpts = append(gcsOpts, option.WithEndpoint(opt.Endpoint))
|
||||
}
|
||||
f.svc, err = storage.NewService(context.Background(), gcsOpts...)
|
||||
f.svc, err = storage.NewService(context.Background(), option.WithHTTPClient(f.client))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create Google Cloud Storage client: %w", err)
|
||||
}
|
||||
@@ -589,7 +580,7 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
|
||||
//
|
||||
// dir is the starting directory, "" for root
|
||||
//
|
||||
// Set recurse to read sub directories.
|
||||
// Set recurse to read sub directories
|
||||
//
|
||||
// The remote has prefix removed from it and if addBucket is set
|
||||
// then it adds the bucket to the start.
|
||||
@@ -807,7 +798,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
|
||||
// Put the object into the bucket
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -909,9 +900,9 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// Package api provides types used by the Google Photos API.
|
||||
package api
|
||||
|
||||
import (
|
||||
|
||||
@@ -178,7 +178,7 @@ type Fs struct {
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
unAuth *rest.Client // unauthenticated http client
|
||||
srv *rest.Client // the connection to the server
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
ts *oauthutil.TokenSource // token source for oauth2
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
startTime time.Time // time Fs was started - used for datestamps
|
||||
@@ -661,7 +661,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
|
||||
// Put the object into the bucket
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
|
||||
@@ -315,7 +315,7 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
|
||||
|
||||
// featureFilter creates a filter for the Feature enum
|
||||
//
|
||||
// The API only supports one feature, FAVORITES, so hardcode that feature.
|
||||
// The API only supports one feature, FAVORITES, so hardcode that feature
|
||||
//
|
||||
// https://developers.google.com/photos/library/reference/rest/v1/mediaItems/search#FeatureFilter
|
||||
func featureFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter) {
|
||||
|
||||
@@ -265,9 +265,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
// Package hdfs provides an interface to the HDFS storage system.
|
||||
package hdfs
|
||||
|
||||
import (
|
||||
|
||||
@@ -330,7 +330,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, transaction)
|
||||
}
|
||||
|
||||
// Do not allow the root-prefix to be nonexistent nor a directory,
|
||||
// Do not allow the root-prefix to be non-existent nor a directory,
|
||||
// but it can be empty.
|
||||
if f.opt.RootPrefix != "" {
|
||||
item, err := f.fetchMetadataForPath(ctx, f.opt.RootPrefix, api.HiDriveObjectNoMetadataFields)
|
||||
@@ -623,7 +623,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
// should be retried after the parent-directories of the destination have been created.
|
||||
// If so, it will create the parent-directories.
|
||||
//
|
||||
// If any errors arise while finding the source or
|
||||
// If any errors arrise while finding the source or
|
||||
// creating the parent-directory those will be returned.
|
||||
// Otherwise returns the originalError.
|
||||
func (f *Fs) shouldRetryAndCreateParents(ctx context.Context, destinationPath string, sourcePath string, originalError error) (bool, error) {
|
||||
@@ -961,7 +961,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
} else {
|
||||
_, _, err = o.fs.uploadFileChunked(ctx, resolvedPath, in, modTime, int(o.fs.opt.UploadChunkSize), o.fs.opt.UploadConcurrency)
|
||||
}
|
||||
// Try to check if object was updated, either way.
|
||||
// Try to check if object was updated, eitherway.
|
||||
// Metadata should be updated even if the upload fails.
|
||||
info, metaErr = o.fs.fetchMetadataForPath(ctx, resolvedPath, api.HiDriveObjectWithMetadataFields)
|
||||
} else {
|
||||
|
||||
@@ -138,7 +138,7 @@ var testTable = []struct {
|
||||
// pattern describes how to use data to construct the hash-input.
|
||||
// For every entry n at even indices this repeats the data n times.
|
||||
// For every entry m at odd indices this repeats a null-byte m times.
|
||||
// The input-data is constructed by concatenating the results in order.
|
||||
// The input-data is constructed by concatinating the results in order.
|
||||
pattern []int64
|
||||
out []byte
|
||||
name string
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// Package internal provides utilities for HiDrive.
|
||||
package internal
|
||||
|
||||
import (
|
||||
|
||||
62
backend/hubic/auth.go
Normal file
62
backend/hubic/auth.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package hubic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// auth is an authenticator for swift
|
||||
type auth struct {
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// newAuth creates a swift authenticator
|
||||
func newAuth(f *Fs) *auth {
|
||||
return &auth{
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Request constructs an http.Request for authentication
|
||||
//
|
||||
// returns nil for not needed
|
||||
func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Request, err error) {
|
||||
const retries = 10
|
||||
for try := 1; try <= retries; try++ {
|
||||
err = a.f.getCredentials(context.TODO())
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
fs.Debugf(a.f, "retrying auth request %d/%d: %v", try, retries, err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Response parses the result of an http request
|
||||
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// The public storage URL - set Internal to true to read
|
||||
// internal/service net URL
|
||||
func (a *auth) StorageUrl(Internal bool) string { // nolint
|
||||
return a.f.credentials.Endpoint
|
||||
}
|
||||
|
||||
// The access token
|
||||
func (a *auth) Token() string {
|
||||
return a.f.credentials.Token
|
||||
}
|
||||
|
||||
// The CDN url if available
|
||||
func (a *auth) CdnUrl() string { // nolint
|
||||
return ""
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ swift.Authenticator = (*auth)(nil)
|
||||
200
backend/hubic/hubic.go
Normal file
200
backend/hubic/hubic.go
Normal file
@@ -0,0 +1,200 @@
|
||||
// Package hubic provides an interface to the Hubic object storage
|
||||
// system.
|
||||
package hubic
|
||||
|
||||
// This uses the normal swift mechanism to update the credentials and
|
||||
// ignores the expires field returned by the Hubic API. This may need
|
||||
// to be revisited after some actual experience.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
swiftLib "github.com/ncw/swift/v2"
|
||||
"github.com/rclone/rclone/backend/swift"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
rcloneClientID = "api_hubic_svWP970PvSWbw5G3PzrAqZ6X2uHeZBPI"
|
||||
rcloneEncryptedClientSecret = "leZKCcqy9movLhDWLVXX8cSLp_FzoiAPeEJOIOMRw1A5RuC4iLEPDYPWVF46adC_MVonnLdVEOTHVstfBOZ_lY4WNp8CK_YWlpRZ9diT5YI"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: []string{
|
||||
"credentials.r", // Read OpenStack credentials
|
||||
},
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://api.hubic.com/oauth/auth/",
|
||||
TokenURL: "https://api.hubic.com/oauth/token/",
|
||||
},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "hubic",
|
||||
Description: "Hubic",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
|
||||
})
|
||||
}
|
||||
|
||||
// credentials is the JSON returned from the Hubic API to read the
|
||||
// OpenStack credentials
|
||||
type credentials struct {
|
||||
Token string `json:"token"` // OpenStack token
|
||||
Endpoint string `json:"endpoint"` // OpenStack endpoint
|
||||
Expires string `json:"expires"` // Expires date - e.g. "2015-11-09T14:24:56+01:00"
|
||||
}
|
||||
|
||||
// Fs represents a remote hubic
|
||||
type Fs struct {
|
||||
fs.Fs // wrapped Fs
|
||||
features *fs.Features // optional features
|
||||
client *http.Client // client for oauth api
|
||||
credentials credentials // returned from the Hubic API
|
||||
expires time.Time // time credentials expire
|
||||
}
|
||||
|
||||
// Object describes a swift object
|
||||
type Object struct {
|
||||
*swift.Object
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Object.String()
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.Fs == nil {
|
||||
return "Hubic"
|
||||
}
|
||||
return fmt.Sprintf("Hubic %s", f.Fs.String())
|
||||
}
|
||||
|
||||
// getCredentials reads the OpenStack Credentials using the Hubic API
|
||||
//
|
||||
// The credentials are read into the Fs
|
||||
func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", "https://api.hubic.com/1.0/account/credentials", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
bodyStr := strings.TrimSpace(strings.ReplaceAll(string(body), "\n", " "))
|
||||
return fmt.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
|
||||
}
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
var result credentials
|
||||
err = decoder.Decode(&result)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// fs.Debugf(f, "Got credentials %+v", result)
|
||||
if result.Token == "" || result.Endpoint == "" || result.Expires == "" {
|
||||
return errors.New("couldn't read token, result and expired from credentials")
|
||||
}
|
||||
f.credentials = result
|
||||
expires, err := time.Parse(time.RFC3339, result.Expires)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.expires = expires
|
||||
fs.Debugf(f, "Got swift credentials (expiry %v in %v)", f.expires, time.Until(f.expires))
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Hubic: %w", err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
client: client,
|
||||
}
|
||||
|
||||
// Make the swift Connection
|
||||
ci := fs.GetConfig(ctx)
|
||||
c := &swiftLib.Connection{
|
||||
Auth: newAuth(f),
|
||||
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
|
||||
Transport: fshttp.NewTransport(ctx),
|
||||
}
|
||||
err = c.Authenticate(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error authenticating swift connection: %w", err)
|
||||
}
|
||||
|
||||
// Parse config into swift.Options struct
|
||||
opt := new(swift.Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Make inner swift Fs from the connection
|
||||
swiftFs, err := swift.NewFsWithConnection(ctx, opt, name, root, c, true)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, err
|
||||
}
|
||||
f.Fs = swiftFs
|
||||
f.features = f.Fs.Features().Wrap(f)
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
func (f *Fs) UnWrap() fs.Fs {
|
||||
return f.Fs
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
)
|
||||
19
backend/hubic/hubic_test.go
Normal file
19
backend/hubic/hubic_test.go
Normal file
@@ -0,0 +1,19 @@
|
||||
// Test Hubic filesystem interface
|
||||
package hubic_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/hubic"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestHubic:",
|
||||
NilObject: (*hubic.Object)(nil),
|
||||
SkipFsCheckWrap: true,
|
||||
SkipObjectCheckWrap: true,
|
||||
})
|
||||
}
|
||||
@@ -227,7 +227,7 @@ type Object struct {
|
||||
rawData json.RawMessage
|
||||
}
|
||||
|
||||
// IAFile represents a subset of object in MetadataResponse.Files
|
||||
// IAFile reprensents a subset of object in MetadataResponse.Files
|
||||
type IAFile struct {
|
||||
Name string `json:"name"`
|
||||
// Source string `json:"source"`
|
||||
@@ -243,7 +243,7 @@ type IAFile struct {
|
||||
rawData json.RawMessage
|
||||
}
|
||||
|
||||
// MetadataResponse represents subset of the JSON object returned by (frontend)/metadata/
|
||||
// MetadataResponse reprensents subset of the JSON object returned by (frontend)/metadata/
|
||||
type MetadataResponse struct {
|
||||
Files []IAFile `json:"files"`
|
||||
ItemSize int64 `json:"item_size"`
|
||||
@@ -572,14 +572,14 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return "", err
|
||||
}
|
||||
bucket, bucketPath := f.split(remote)
|
||||
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, quotePath(bucketPath)), nil
|
||||
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, bucketPath), nil
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -760,7 +760,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// make a GET request to (frontend)/download/:item/:path
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: path.Join("/download/", o.fs.root, quotePath(o.fs.opt.Enc.FromStandardPath(o.remote))),
|
||||
Path: path.Join("/download/", o.fs.root, o.fs.opt.Enc.FromStandardPath(o.remote)),
|
||||
Options: optionsFixed,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -1273,7 +1273,7 @@ func trimPathPrefix(s, prefix string, enc encoder.MultiEncoder) string {
|
||||
return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/"))
|
||||
}
|
||||
|
||||
// mimics urllib.parse.quote() on Python; exclude / from url.PathEscape
|
||||
// mimicks urllib.parse.quote() on Python; exclude / from url.PathEscape
|
||||
func quotePath(s string) string {
|
||||
seg := strings.Split(s, "/")
|
||||
newValues := []string{}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// Package api provides types used by the Jottacloud API.
|
||||
package api
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// Package jottacloud provides an interface to the Jottacloud storage system.
|
||||
package jottacloud
|
||||
|
||||
import (
|
||||
@@ -153,7 +152,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
m.Set(configClientSecret, "")
|
||||
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
token, tokenEndpoint, err := doTokenAuth(ctx, srv, loginToken)
|
||||
token, tokenEndpoint, username, err := doTokenAuth(ctx, srv, loginToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get oauth token: %w", err)
|
||||
}
|
||||
@@ -162,6 +161,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error while saving token: %w", err)
|
||||
}
|
||||
m.Set(configUsername, username)
|
||||
return fs.ConfigGoto("choose_device")
|
||||
case "legacy": // configure a jottacloud backend using legacy authentication
|
||||
m.Set("configVersion", fmt.Sprint(legacyConfigVersion))
|
||||
@@ -272,21 +272,30 @@ sync or the backup section, for example, you must choose yes.`)
|
||||
if config.Result != "true" {
|
||||
m.Set(configDevice, "")
|
||||
m.Set(configMountpoint, "")
|
||||
}
|
||||
username, userOk := m.Get(configUsername)
|
||||
if userOk && config.Result != "true" {
|
||||
return fs.ConfigGoto("end")
|
||||
}
|
||||
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
|
||||
cust, err := getCustomerInfo(ctx, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if !userOk {
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
cust, err := getCustomerInfo(ctx, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
username = cust.Username
|
||||
m.Set(configUsername, username)
|
||||
if config.Result != "true" {
|
||||
return fs.ConfigGoto("end")
|
||||
}
|
||||
}
|
||||
|
||||
acc, err := getDriveInfo(ctx, jfsSrv, cust.Username)
|
||||
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL)
|
||||
acc, err := getDriveInfo(ctx, jfsSrv, username)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -317,14 +326,10 @@ a new by entering a unique name.`, defaultDevice)
|
||||
return nil, err
|
||||
}
|
||||
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
|
||||
cust, err := getCustomerInfo(ctx, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
username, _ := m.Get(configUsername)
|
||||
|
||||
acc, err := getDriveInfo(ctx, jfsSrv, cust.Username)
|
||||
acc, err := getDriveInfo(ctx, jfsSrv, username)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -339,7 +344,7 @@ a new by entering a unique name.`, defaultDevice)
|
||||
var dev *api.JottaDevice
|
||||
if isNew {
|
||||
fs.Debugf(nil, "Creating new device: %s", device)
|
||||
dev, err = createDevice(ctx, jfsSrv, path.Join(cust.Username, device))
|
||||
dev, err = createDevice(ctx, jfsSrv, path.Join(username, device))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -347,7 +352,7 @@ a new by entering a unique name.`, defaultDevice)
|
||||
m.Set(configDevice, device)
|
||||
|
||||
if !isNew {
|
||||
dev, err = getDeviceInfo(ctx, jfsSrv, path.Join(cust.Username, device))
|
||||
dev, err = getDeviceInfo(ctx, jfsSrv, path.Join(username, device))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -377,16 +382,11 @@ You may create a new by entering a unique name.`, device)
|
||||
return nil, err
|
||||
}
|
||||
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
|
||||
cust, err := getCustomerInfo(ctx, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
username, _ := m.Get(configUsername)
|
||||
device, _ := m.Get(configDevice)
|
||||
|
||||
dev, err := getDeviceInfo(ctx, jfsSrv, path.Join(cust.Username, device))
|
||||
dev, err := getDeviceInfo(ctx, jfsSrv, path.Join(username, device))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -404,7 +404,7 @@ You may create a new by entering a unique name.`, device)
|
||||
return nil, fmt.Errorf("custom mountpoints not supported on built-in %s device: %w", defaultDevice, err)
|
||||
}
|
||||
fs.Debugf(nil, "Creating new mountpoint: %s", mountpoint)
|
||||
_, err := createMountPoint(ctx, jfsSrv, path.Join(cust.Username, device, mountpoint))
|
||||
_, err := createMountPoint(ctx, jfsSrv, path.Join(username, device, mountpoint))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -591,10 +591,10 @@ func doLegacyAuth(ctx context.Context, srv *rest.Client, oauthConfig *oauth2.Con
|
||||
}
|
||||
|
||||
// doTokenAuth runs the actual token request for V2 authentication
|
||||
func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 string) (token oauth2.Token, tokenEndpoint string, err error) {
|
||||
func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 string) (token oauth2.Token, tokenEndpoint string, username string, err error) {
|
||||
loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64)
|
||||
if err != nil {
|
||||
return token, "", err
|
||||
return token, "", "", err
|
||||
}
|
||||
|
||||
// decode login token
|
||||
@@ -602,7 +602,7 @@ func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 stri
|
||||
decoder := json.NewDecoder(bytes.NewReader(loginTokenBytes))
|
||||
err = decoder.Decode(&loginToken)
|
||||
if err != nil {
|
||||
return token, "", err
|
||||
return token, "", "", err
|
||||
}
|
||||
|
||||
// retrieve endpoint urls
|
||||
@@ -613,7 +613,7 @@ func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 stri
|
||||
var wellKnown api.WellKnown
|
||||
_, err = apiSrv.CallJSON(ctx, &opts, nil, &wellKnown)
|
||||
if err != nil {
|
||||
return token, "", err
|
||||
return token, "", "", err
|
||||
}
|
||||
|
||||
// prepare out token request with username and password
|
||||
@@ -635,14 +635,14 @@ func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 stri
|
||||
var jsonToken api.TokenJSON
|
||||
_, err = apiSrv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
if err != nil {
|
||||
return token, "", err
|
||||
return token, "", "", err
|
||||
}
|
||||
|
||||
token.AccessToken = jsonToken.AccessToken
|
||||
token.RefreshToken = jsonToken.RefreshToken
|
||||
token.TokenType = jsonToken.TokenType
|
||||
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
|
||||
return token, wellKnown.TokenEndpoint, err
|
||||
return token, wellKnown.TokenEndpoint, loginToken.Username, err
|
||||
}
|
||||
|
||||
// getCustomerInfo queries general information about the account
|
||||
@@ -944,11 +944,17 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return err
|
||||
})
|
||||
|
||||
cust, err := getCustomerInfo(ctx, f.apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
user, userOk := m.Get(configUsername)
|
||||
if userOk {
|
||||
f.user = user
|
||||
} else {
|
||||
fs.Infof(nil, "Username not found in config and must be looked up, reconfigure to avoid the extra request")
|
||||
cust, err := getCustomerInfo(ctx, f.apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.user = cust.Username
|
||||
}
|
||||
f.user = cust.Username
|
||||
f.setEndpoints()
|
||||
|
||||
if root != "" && !rootIsDir {
|
||||
@@ -1248,7 +1254,7 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -1398,9 +1404,9 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1418,7 +1424,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
|
||||
|
||||
// if destination was a trashed file then after a successful copy the copied file is still in trash (bug in api?)
|
||||
// if destination was a trashed file then after a successfull copy the copied file is still in trash (bug in api?)
|
||||
if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
|
||||
fs.Debugf(src, "Server-side copied to trashed destination, restoring")
|
||||
info, err = f.createOrUpdate(ctx, remote, srcObj.modTime, srcObj.size, srcObj.md5)
|
||||
@@ -1434,9 +1440,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1830,7 +1836,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// Package koofr provides an interface to the Koofr storage system.
|
||||
package koofr
|
||||
|
||||
import (
|
||||
@@ -668,7 +667,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
//
|
||||
// https://app.koofr.net/content/links/39a6cc01-3b23-477a-8059-c0fb3b0f15de/files/get?path=%2F
|
||||
//
|
||||
// I am not sure about meaning of "path" parameter; in my experiments
|
||||
// I am not sure about meaning of "path" parameter; in my expriments
|
||||
// it is always "%2F", and omitting it or putting any other value
|
||||
// results in 404.
|
||||
//
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
@@ -235,16 +234,15 @@ type Options struct {
|
||||
|
||||
// Fs represents a local filesystem rooted at root
|
||||
type Fs struct {
|
||||
name string // the name of the remote
|
||||
root string // The root directory (OS path)
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
dev uint64 // device number of root node
|
||||
precisionOk sync.Once // Whether we need to read the precision
|
||||
precision time.Duration // precision of local filesystem
|
||||
warnedMu sync.Mutex // used for locking access to 'warned'.
|
||||
warned map[string]struct{} // whether we have warned about this string
|
||||
xattrSupported int32 // whether xattrs are supported (atomic access)
|
||||
name string // the name of the remote
|
||||
root string // The root directory (OS path)
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
dev uint64 // device number of root node
|
||||
precisionOk sync.Once // Whether we need to read the precision
|
||||
precision time.Duration // precision of local filesystem
|
||||
warnedMu sync.Mutex // used for locking access to 'warned'.
|
||||
warned map[string]struct{} // whether we have warned about this string
|
||||
|
||||
// do os.Lstat or os.Stat
|
||||
lstat func(name string) (os.FileInfo, error)
|
||||
@@ -288,9 +286,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
dev: devUnset,
|
||||
lstat: os.Lstat,
|
||||
}
|
||||
if xattrSupported {
|
||||
f.xattrSupported = 1
|
||||
}
|
||||
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: f.caseInsensitive(),
|
||||
@@ -300,7 +295,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
|
||||
FilterAware: true,
|
||||
}).Fill(ctx, f)
|
||||
if opt.FollowSymlinks {
|
||||
f.lstat = os.Stat
|
||||
@@ -445,8 +439,6 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
filter, useFilter := filter.GetConfig(ctx), filter.GetUseFilter(ctx)
|
||||
|
||||
fsDirPath := f.localPath(dir)
|
||||
_, err = os.Stat(fsDirPath)
|
||||
if err != nil {
|
||||
@@ -497,13 +489,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
continue
|
||||
}
|
||||
if fierr != nil {
|
||||
// Don't report errors on any file names that are excluded
|
||||
if useFilter {
|
||||
newRemote := f.cleanRemote(dir, name)
|
||||
if !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
|
||||
fs.Errorf(dir, "%v", fierr)
|
||||
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
||||
@@ -537,11 +522,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
mode = fi.Mode()
|
||||
}
|
||||
// Don't include non directory if not included
|
||||
// we leave directory filtering to the layer above
|
||||
if useFilter && !fi.IsDir() && !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
if fi.IsDir() {
|
||||
// Ignore directories which are symlinks. These are junction points under windows which
|
||||
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
|
||||
@@ -715,9 +695,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1400,27 +1380,30 @@ func (o *Object) writeMetadata(metadata fs.Metadata) (err error) {
|
||||
}
|
||||
|
||||
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
|
||||
if runtime.GOOS != "windows" || !strings.HasPrefix(s, "\\") {
|
||||
if !filepath.IsAbs(s) {
|
||||
if runtime.GOOS == "windows" {
|
||||
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
} else {
|
||||
s = filepath.Clean(s)
|
||||
}
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
s = filepath.ToSlash(s)
|
||||
vol := filepath.VolumeName(s)
|
||||
s = vol + enc.FromStandardPath(s[len(vol):])
|
||||
s = filepath.FromSlash(s)
|
||||
|
||||
if !noUNC {
|
||||
// Convert to UNC
|
||||
s = file.UNCPath(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
if !filepath.IsAbs(s) {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
s = enc.FromStandardPath(s)
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -9,13 +9,11 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
@@ -192,7 +190,7 @@ func TestHashOnUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
|
||||
|
||||
// Reupload it with different contents but same size and timestamp
|
||||
// Reupload it with diferent contents but same size and timestamp
|
||||
var b = bytes.NewBufferString("CONTENT")
|
||||
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
|
||||
err = o.Update(ctx, b, src)
|
||||
@@ -368,36 +366,3 @@ func TestMetadata(t *testing.T) {
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestFilter(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
when := time.Now()
|
||||
r.WriteFile("included", "included file", when)
|
||||
r.WriteFile("excluded", "excluded file", when)
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Check set up for filtering
|
||||
assert.True(t, f.Features().FilterAware)
|
||||
|
||||
// Add a filter
|
||||
ctx, fi := filter.AddConfig(ctx)
|
||||
require.NoError(t, fi.AddRule("+ included"))
|
||||
require.NoError(t, fi.AddRule("- *"))
|
||||
|
||||
// Check listing without use filter flag
|
||||
entries, err := f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[excluded included]", fmt.Sprint(entries))
|
||||
|
||||
// Add user filter flag
|
||||
ctx = filter.SetUseFilter(ctx, true)
|
||||
|
||||
// Check listing with use filter flag
|
||||
entries, err = f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
@@ -5,41 +5,19 @@ package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
statxCheckOnce sync.Once
|
||||
readMetadataFromFileFn func(o *Object, m *fs.Metadata) (err error)
|
||||
)
|
||||
|
||||
// Read the metadata from the file into metadata where possible
|
||||
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
|
||||
statxCheckOnce.Do(func() {
|
||||
// Check statx() is available as it was only introduced in kernel 4.11
|
||||
// If not, fall back to fstatat() which was introduced in 2.6.16 which is guaranteed for all Go versions
|
||||
var stat unix.Statx_t
|
||||
if unix.Statx(unix.AT_FDCWD, ".", 0, unix.STATX_ALL, &stat) != unix.ENOSYS {
|
||||
readMetadataFromFileFn = readMetadataFromFileStatx
|
||||
} else {
|
||||
readMetadataFromFileFn = readMetadataFromFileFstatat
|
||||
}
|
||||
})
|
||||
return readMetadataFromFileFn(o, m)
|
||||
}
|
||||
|
||||
// Read the metadata from the file into metadata where possible
|
||||
func readMetadataFromFileStatx(o *Object, m *fs.Metadata) (err error) {
|
||||
flags := unix.AT_SYMLINK_NOFOLLOW
|
||||
if o.fs.opt.FollowSymlinks {
|
||||
flags = 0
|
||||
}
|
||||
var stat unix.Statx_t
|
||||
// statx() was added to Linux in kernel 4.11
|
||||
err = unix.Statx(unix.AT_FDCWD, o.path, flags, (0 |
|
||||
unix.STATX_TYPE | // Want stx_mode & S_IFMT
|
||||
unix.STATX_MODE | // Want stx_mode & ~S_IFMT
|
||||
@@ -67,36 +45,3 @@ func readMetadataFromFileStatx(o *Object, m *fs.Metadata) (err error) {
|
||||
setTime("btime", stat.Btime)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read the metadata from the file into metadata where possible
|
||||
func readMetadataFromFileFstatat(o *Object, m *fs.Metadata) (err error) {
|
||||
flags := unix.AT_SYMLINK_NOFOLLOW
|
||||
if o.fs.opt.FollowSymlinks {
|
||||
flags = 0
|
||||
}
|
||||
var stat unix.Stat_t
|
||||
// fstatat() was added to Linux in kernel 2.6.16
|
||||
// Go only supports 2.6.32 or later
|
||||
err = unix.Fstatat(unix.AT_FDCWD, o.path, &stat, flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
|
||||
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
|
||||
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
|
||||
if stat.Rdev != 0 {
|
||||
m.Set("rdev", fmt.Sprintf("%x", stat.Rdev))
|
||||
}
|
||||
setTime := func(key string, t unix.Timespec) {
|
||||
// The types of t.Sec and t.Nsec vary from int32 to int64 on
|
||||
// different Linux architectures so we need to cast them to
|
||||
// int64 here and hence need to quiet the linter about
|
||||
// unecessary casts.
|
||||
//
|
||||
// nolint: unconvert
|
||||
m.Set(key, time.Unix(int64(t.Sec), int64(t.Nsec)).Format(metadataTimeFormat))
|
||||
}
|
||||
setTime("atime", stat.Atim)
|
||||
setTime("mtime", stat.Mtim)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
const haveSetBTime = false
|
||||
|
||||
// setBTime changes the birth time of the file passed in
|
||||
// setBTime changes the the birth time of the file passed in
|
||||
func setBTime(name string, btime time.Time) error {
|
||||
// Does nothing
|
||||
return nil
|
||||
|
||||
@@ -5,13 +5,13 @@ package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const haveSetBTime = true
|
||||
|
||||
// setBTime sets the birth time of the file passed in
|
||||
// setBTime sets the the birth time of the file passed in
|
||||
func setBTime(name string, btime time.Time) (err error) {
|
||||
h, err := syscall.Open(name, os.O_RDWR, 0755)
|
||||
if err != nil {
|
||||
|
||||
@@ -6,8 +6,6 @@ package local
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/xattr"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -18,30 +16,12 @@ const (
|
||||
xattrSupported = xattr.XATTR_SUPPORTED
|
||||
)
|
||||
|
||||
// Check to see if the error supplied is a not supported error, and if
|
||||
// so, disable xattrs
|
||||
func (f *Fs) xattrIsNotSupported(err error) bool {
|
||||
xattrErr, ok := err.(*xattr.Error)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
// Xattrs not supported can be ENOTSUP or ENOATTR or EINVAL (on Solaris)
|
||||
if xattrErr.Err == syscall.EINVAL || xattrErr.Err == syscall.ENOTSUP || xattrErr.Err == xattr.ENOATTR {
|
||||
// Show xattrs not supported
|
||||
if atomic.CompareAndSwapInt32(&f.xattrSupported, 1, 0) {
|
||||
fs.Errorf(f, "xattrs not supported - disabling: %v", err)
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getXattr returns the extended attributes for an object
|
||||
//
|
||||
// It doesn't return any attributes owned by this backend in
|
||||
// metadataKeys
|
||||
func (o *Object) getXattr() (metadata fs.Metadata, err error) {
|
||||
if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
|
||||
if !xattrSupported {
|
||||
return nil, nil
|
||||
}
|
||||
var list []string
|
||||
@@ -51,9 +31,6 @@ func (o *Object) getXattr() (metadata fs.Metadata, err error) {
|
||||
list, err = xattr.LList(o.path)
|
||||
}
|
||||
if err != nil {
|
||||
if o.fs.xattrIsNotSupported(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to read xattr: %w", err)
|
||||
}
|
||||
if len(list) == 0 {
|
||||
@@ -68,9 +45,6 @@ func (o *Object) getXattr() (metadata fs.Metadata, err error) {
|
||||
v, err = xattr.LGet(o.path, k)
|
||||
}
|
||||
if err != nil {
|
||||
if o.fs.xattrIsNotSupported(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to read xattr key %q: %w", k, err)
|
||||
}
|
||||
k = strings.ToLower(k)
|
||||
@@ -90,7 +64,7 @@ func (o *Object) getXattr() (metadata fs.Metadata, err error) {
|
||||
//
|
||||
// It doesn't set any attributes owned by this backend in metadataKeys
|
||||
func (o *Object) setXattr(metadata fs.Metadata) (err error) {
|
||||
if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
|
||||
if !xattrSupported {
|
||||
return nil
|
||||
}
|
||||
for k, value := range metadata {
|
||||
@@ -106,9 +80,6 @@ func (o *Object) setXattr(metadata fs.Metadata) (err error) {
|
||||
err = xattr.LSet(o.path, k, v)
|
||||
}
|
||||
if err != nil {
|
||||
if o.fs.xattrIsNotSupported(err) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("failed to set xattr key %q: %w", k, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -69,11 +69,6 @@ func (w *BinWriter) WritePu64(val int64) {
|
||||
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
|
||||
}
|
||||
|
||||
// WriteP64 writes an signed long as unsigned varint
|
||||
func (w *BinWriter) WriteP64(val int64) {
|
||||
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
|
||||
}
|
||||
|
||||
// WriteString writes a zero-terminated string
|
||||
func (w *BinWriter) WriteString(str string) {
|
||||
buf := []byte(str)
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// Package api provides types used by the Mail.ru API.
|
||||
package api
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// Package mailru provides an interface to the Mail.ru Cloud storage system.
|
||||
package mailru
|
||||
|
||||
import (
|
||||
@@ -91,13 +90,8 @@ func init() {
|
||||
Help: "User name (usually email).",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: `Password.
|
||||
|
||||
This must be an app password - rclone will not work with your normal
|
||||
password. See the Configuration section in the docs for how to make an
|
||||
app password.
|
||||
`,
|
||||
Name: "pass",
|
||||
Help: "Password.",
|
||||
Required: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
@@ -636,17 +630,21 @@ func (f *Fs) readItemMetaData(ctx context.Context, path string) (entry fs.DirEnt
|
||||
|
||||
// itemToEntry converts API item to rclone directory entry
|
||||
// The dirSize return value is:
|
||||
//
|
||||
// <0 - for a file or in case of error
|
||||
// =0 - for an empty directory
|
||||
// >0 - for a non-empty directory
|
||||
// <0 - for a file or in case of error
|
||||
// =0 - for an empty directory
|
||||
// >0 - for a non-empty directory
|
||||
func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.DirEntry, dirSize int, err error) {
|
||||
remote, err := f.relPath(f.opt.Enc.ToStandardPath(item.Home))
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
|
||||
modTime := time.Unix(int64(item.Mtime), 0)
|
||||
mTime := int64(item.Mtime)
|
||||
if mTime < 0 {
|
||||
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
|
||||
mTime = 0
|
||||
}
|
||||
modTime := time.Unix(mTime, 0)
|
||||
|
||||
isDir, err := f.isDir(item.Kind, remote)
|
||||
if err != nil {
|
||||
@@ -2058,7 +2056,7 @@ func (o *Object) addFileMetaData(ctx context.Context, overwrite bool) error {
|
||||
req.WritePu16(0) // revision
|
||||
req.WriteString(o.fs.opt.Enc.FromStandardPath(o.absPath()))
|
||||
req.WritePu64(o.size)
|
||||
req.WriteP64(o.modTime.Unix())
|
||||
req.WritePu64(o.modTime.Unix())
|
||||
req.WritePu32(0)
|
||||
req.Write(o.mrHash)
|
||||
|
||||
|
||||
@@ -118,7 +118,7 @@ type Fs struct {
|
||||
|
||||
// Object describes a mega object
|
||||
//
|
||||
// Will definitely have info but maybe not meta.
|
||||
// Will definitely have info but maybe not meta
|
||||
//
|
||||
// Normally rclone would just store an ID here but go-mega and mega.nz
|
||||
// expect you to build an entire tree of all the objects in memory.
|
||||
@@ -347,7 +347,7 @@ func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("internal error: mkdir called with nonexistent root node: %w", err)
|
||||
return nil, fmt.Errorf("internal error: mkdir called with non-existent root node: %w", err)
|
||||
}
|
||||
// i is number of directories to create (may be 0)
|
||||
// node is directory to create them from
|
||||
@@ -387,7 +387,7 @@ func (f *Fs) findRoot(ctx context.Context, create bool) (*mega.Node, error) {
|
||||
return f._rootNode, nil
|
||||
}
|
||||
|
||||
// Check for preexisting root
|
||||
// Check for pre-existing root
|
||||
absRoot := f.srv.FS.GetRoot()
|
||||
node, err := f.findDir(absRoot, f.root)
|
||||
//log.Printf("findRoot findDir %p %v", node, err)
|
||||
@@ -536,7 +536,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the dirNode, object, leaf and error.
|
||||
// Returns the dirNode, object, leaf and error
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
|
||||
@@ -554,7 +554,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
// PutUnchecked uploads the object
|
||||
@@ -576,7 +576,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
|
||||
// PutUnchecked the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
// PutUnchecked uploads the object
|
||||
@@ -749,9 +749,9 @@ func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote st
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -979,6 +979,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
@@ -1114,7 +1115,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
|
||||
@@ -418,7 +418,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
|
||||
// Put the object into the bucket
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -463,9 +463,9 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
|
||||
@@ -118,7 +118,7 @@ type Fs struct {
|
||||
filetype string // dir, file or symlink
|
||||
dirscreated map[string]bool // if implicit dir has been created already
|
||||
dirscreatedMutex sync.Mutex // mutex to protect dirscreated
|
||||
statcache map[string][]File // cache successful stat requests
|
||||
statcache map[string][]File // cache successfull stat requests
|
||||
statcacheMutex sync.RWMutex // RWMutex to protect statcache
|
||||
}
|
||||
|
||||
@@ -424,7 +424,7 @@ func (f *Fs) getFileName(file *File) string {
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if f.filetype == "" {
|
||||
// This happens in two scenarios.
|
||||
// 1. NewFs is done on a nonexistent object, then later rclone attempts to List/ListR this NewFs.
|
||||
// 1. NewFs is done on a non-existent object, then later rclone attempts to List/ListR this NewFs.
|
||||
// 2. List/ListR is called from the context of test_all and not the regular rclone binary.
|
||||
err := f.initFs(ctx, dir)
|
||||
if err != nil {
|
||||
@@ -488,7 +488,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
if f.filetype == "" {
|
||||
// This happens in two scenarios.
|
||||
// 1. NewFs is done on a nonexistent object, then later rclone attempts to List/ListR this NewFs.
|
||||
// 1. NewFs is done on a non-existent object, then later rclone attempts to List/ListR this NewFs.
|
||||
// 2. List/ListR is called from the context of test_all and not the regular rclone binary.
|
||||
err := f.initFs(ctx, dir)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
// Package api provides types used by the OneDrive API.
|
||||
// Types passed and returned to and from the API
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
@@ -13,7 +14,7 @@ const (
|
||||
PackageTypeOneNote = "oneNote"
|
||||
)
|
||||
|
||||
// Error is returned from OneDrive when things go wrong
|
||||
// Error is returned from one drive when things go wrong
|
||||
type Error struct {
|
||||
ErrorInfo struct {
|
||||
Code string `json:"code"`
|
||||
@@ -70,7 +71,7 @@ type Drive struct {
|
||||
Quota Quota `json:"quota"`
|
||||
}
|
||||
|
||||
// Timestamp represents date and time information for the
|
||||
// Timestamp represents represents date and time information for the
|
||||
// OneDrive API, by using ISO 8601 and is always in UTC time.
|
||||
type Timestamp time.Time
|
||||
|
||||
@@ -249,8 +250,8 @@ type MoveItemRequest struct {
|
||||
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo,omitempty"` // File system information on client. Read-write.
|
||||
}
|
||||
|
||||
// CreateShareLinkRequest is the request to create a sharing link
|
||||
// Always Type:view and Scope:anonymous for public sharing
|
||||
//CreateShareLinkRequest is the request to create a sharing link
|
||||
//Always Type:view and Scope:anonymous for public sharing
|
||||
type CreateShareLinkRequest struct {
|
||||
Type string `json:"type"` // Link type in View, Edit or Embed
|
||||
Scope string `json:"scope,omitempty"` // Scope in anonymous, organization
|
||||
@@ -258,7 +259,7 @@ type CreateShareLinkRequest struct {
|
||||
Expiry *time.Time `json:"expirationDateTime,omitempty"` // A String with format of yyyy-MM-ddTHH:mm:ssZ of DateTime indicates the expiration time of the permission.
|
||||
}
|
||||
|
||||
// CreateShareLinkResponse is the response from CreateShareLinkRequest
|
||||
//CreateShareLinkResponse is the response from CreateShareLinkRequest
|
||||
type CreateShareLinkResponse struct {
|
||||
ID string `json:"id"`
|
||||
Roles []string `json:"roles"`
|
||||
|
||||
@@ -600,14 +600,14 @@ type Options struct {
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote OneDrive
|
||||
// Fs represents a remote one drive
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
ci *fs.ConfigInfo // global config
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the OneDrive server
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
@@ -615,7 +615,7 @@ type Fs struct {
|
||||
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
|
||||
}
|
||||
|
||||
// Object describes a OneDrive object
|
||||
// Object describes a one drive object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
@@ -645,7 +645,7 @@ func (f *Fs) Root() string {
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("OneDrive root '%s'", f.root)
|
||||
return fmt.Sprintf("One drive root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
@@ -653,7 +653,7 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// parsePath parses a OneDrive 'url'
|
||||
// parsePath parses a one drive 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
@@ -727,7 +727,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
||||
// "shared with me" folders in OneDrive Personal (See #2536, #2778)
|
||||
// This path pattern comes from https://github.com/OneDrive/onedrive-api-docs/issues/908#issuecomment-417488480
|
||||
//
|
||||
// If `relPath` == ”, do not append the slash (See #3664)
|
||||
// If `relPath` == '', do not append the slash (See #3664)
|
||||
func (f *Fs) readMetaDataForPathRelativeToID(ctx context.Context, normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
|
||||
opts, _ := f.newOptsCallWithIDPath(normalizedID, relPath, true, "GET", "")
|
||||
|
||||
@@ -891,12 +891,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}).Fill(ctx, f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// Disable change polling in China region
|
||||
// See: https://github.com/rclone/rclone/issues/6444
|
||||
if f.opt.Region == regionCN {
|
||||
f.features.ChangeNotify = nil
|
||||
}
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, _, err := f.readMetaDataForPath(ctx, "")
|
||||
@@ -1143,7 +1137,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object, leaf, directoryID and error.
|
||||
// Returns the object, leaf, directoryID and error
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
@@ -1162,7 +1156,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
|
||||
// Put the object into the container
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -1286,9 +1280,9 @@ func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1393,9 +1387,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1849,6 +1843,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// Package opendrive provides an interface to the OpenDrive storage system.
|
||||
package opendrive
|
||||
|
||||
import (
|
||||
@@ -341,9 +340,9 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -405,9 +404,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -563,7 +562,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object, leaf, directoryID and error.
|
||||
// Returns the object, leaf, directoryID and error
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
@@ -600,7 +599,7 @@ func (f *Fs) readMetaDataForFolderID(ctx context.Context, id string) (info *Fold
|
||||
|
||||
// Put the object into the bucket
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -830,6 +829,7 @@ func (o *Object) Size() int64 {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
|
||||
@@ -1,158 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rsa"
|
||||
"errors"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/common/auth"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
)
|
||||
|
||||
func getConfigurationProvider(opt *Options) (common.ConfigurationProvider, error) {
|
||||
switch opt.Provider {
|
||||
case instancePrincipal:
|
||||
return auth.InstancePrincipalConfigurationProvider()
|
||||
case userPrincipal:
|
||||
if opt.ConfigFile != "" && !fileExists(opt.ConfigFile) {
|
||||
fs.Errorf(userPrincipal, "oci config file doesn't exist at %v", opt.ConfigFile)
|
||||
}
|
||||
return common.CustomProfileConfigProvider(opt.ConfigFile, opt.ConfigProfile), nil
|
||||
case resourcePrincipal:
|
||||
return auth.ResourcePrincipalConfigurationProvider()
|
||||
case noAuth:
|
||||
fs.Infof("client", "using no auth provider")
|
||||
return getNoAuthConfiguration()
|
||||
default:
|
||||
}
|
||||
return common.DefaultConfigProvider(), nil
|
||||
}
|
||||
|
||||
func newObjectStorageClient(ctx context.Context, opt *Options) (*objectstorage.ObjectStorageClient, error) {
|
||||
p, err := getConfigurationProvider(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := objectstorage.NewObjectStorageClientWithConfigurationProvider(p)
|
||||
if err != nil {
|
||||
fs.Errorf(opt.Provider, "failed to create object storage client, %v", err)
|
||||
return nil, err
|
||||
}
|
||||
if opt.Region != "" {
|
||||
client.SetRegion(opt.Region)
|
||||
}
|
||||
modifyClient(ctx, opt, &client.BaseClient)
|
||||
return &client, err
|
||||
}
|
||||
|
||||
func fileExists(filePath string) bool {
|
||||
if _, err := os.Stat(filePath); errors.Is(err, os.ErrNotExist) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func modifyClient(ctx context.Context, opt *Options, client *common.BaseClient) {
|
||||
client.HTTPClient = getHTTPClient(ctx)
|
||||
if opt.Provider == noAuth {
|
||||
client.Signer = getNoAuthSigner()
|
||||
}
|
||||
}
|
||||
|
||||
// getClient makes http client according to the global options
|
||||
// this has rclone specific options support like dump headers, body etc.
|
||||
func getHTTPClient(ctx context.Context) *http.Client {
|
||||
return fshttp.NewClient(ctx)
|
||||
}
|
||||
|
||||
var retryErrorCodes = []int{
|
||||
408, // Request Timeout
|
||||
429, // Rate exceeded.
|
||||
500, // Get occasional 500 Internal Server Error
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Time-out
|
||||
}
|
||||
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// If this is an ocierr object, try and extract more useful information to determine if we should retry
|
||||
if ociError, ok := err.(common.ServiceError); ok {
|
||||
// Simple case, check the original embedded error in case it's generically retryable
|
||||
if fserrors.ShouldRetry(err) {
|
||||
return true, err
|
||||
}
|
||||
// If it is a timeout then we want to retry that
|
||||
if ociError.GetCode() == "RequestTimeout" {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
// Ok, not an oci error, check for generic failure conditions
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
func getNoAuthConfiguration() (common.ConfigurationProvider, error) {
|
||||
return &noAuthConfigurator{}, nil
|
||||
}
|
||||
|
||||
func getNoAuthSigner() common.HTTPRequestSigner {
|
||||
return &noAuthSigner{}
|
||||
}
|
||||
|
||||
type noAuthConfigurator struct {
|
||||
}
|
||||
|
||||
type noAuthSigner struct {
|
||||
}
|
||||
|
||||
func (n *noAuthSigner) Sign(*http.Request) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) PrivateRSAKey() (*rsa.PrivateKey, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) KeyID() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) TenancyOCID() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) UserOCID() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) KeyFingerprint() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) Region() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) AuthType() (common.AuthConfig, error) {
|
||||
return common.AuthConfig{
|
||||
AuthType: common.UnknownAuthenticationType,
|
||||
IsFromConfigFile: false,
|
||||
OboToken: nil,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ common.ConfigurationProvider = &noAuthConfigurator{}
|
||||
_ common.HTTPRequestSigner = &noAuthSigner{}
|
||||
)
|
||||
@@ -1,228 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Command Interface Implementation
|
||||
// ------------------------------------------------------------
|
||||
|
||||
const (
|
||||
operationRename = "rename"
|
||||
operationListMultiPart = "list-multipart-uploads"
|
||||
operationCleanup = "cleanup"
|
||||
)
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: operationRename,
|
||||
Short: "change the name of an object",
|
||||
Long: `This command can be used to rename a object.
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name
|
||||
`,
|
||||
Opts: nil,
|
||||
}, {
|
||||
Name: operationListMultiPart,
|
||||
Short: "List the unfinished multipart uploads",
|
||||
Long: `This command lists the unfinished multipart uploads in JSON format.
|
||||
|
||||
rclone backend list-multipart-uploads oos:bucket/path/to/object
|
||||
|
||||
It returns a dictionary of buckets with values as lists of unfinished
|
||||
multipart uploads.
|
||||
|
||||
You can call it with no bucket in which case it lists all bucket, with
|
||||
a bucket or with a bucket and path.
|
||||
|
||||
{
|
||||
"test-bucket": [
|
||||
{
|
||||
"namespace": "test-namespace",
|
||||
"bucket": "test-bucket",
|
||||
"object": "600m.bin",
|
||||
"uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
|
||||
"timeCreated": "2022-07-29T06:21:16.595Z",
|
||||
"storageTier": "Standard"
|
||||
}
|
||||
]
|
||||
`,
|
||||
}, {
|
||||
Name: operationCleanup,
|
||||
Short: "Remove unfinished multipart uploads.",
|
||||
Long: `This command removes unfinished multipart uploads of age greater than
|
||||
max-age which defaults to 24 hours.
|
||||
|
||||
Note that you can use -i/--dry-run with this command to see what it
|
||||
would do.
|
||||
|
||||
rclone backend cleanup oos:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
|
||||
|
||||
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||
`,
|
||||
Opts: map[string]string{
|
||||
"max-age": "Max age of upload to delete",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
/*
|
||||
Command the backend to run a named command
|
||||
|
||||
The command run is name
|
||||
args may be used to read arguments from
|
||||
opts may be used to read optional arguments from
|
||||
|
||||
The result should be capable of being JSON encoded
|
||||
If it is a string or a []string it will be shown to the user
|
||||
otherwise it will be JSON encoded and shown to the user like that
|
||||
*/
|
||||
func (f *Fs) Command(ctx context.Context, commandName string, args []string,
|
||||
opt map[string]string) (result interface{}, err error) {
|
||||
// fs.Debugf(f, "command %v, args: %v, opts:%v", commandName, args, opt)
|
||||
switch commandName {
|
||||
case operationRename:
|
||||
if len(args) < 2 {
|
||||
return nil, fmt.Errorf("path to object or its new name to rename is empty")
|
||||
}
|
||||
remote := args[0]
|
||||
newName := args[1]
|
||||
return f.rename(ctx, remote, newName)
|
||||
case operationListMultiPart:
|
||||
return f.listMultipartUploadsAll(ctx)
|
||||
case operationCleanup:
|
||||
maxAge := 24 * time.Hour
|
||||
if opt["max-age"] != "" {
|
||||
maxAge, err = fs.ParseDuration(opt["max-age"])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bad max-age: %w", err)
|
||||
}
|
||||
}
|
||||
return nil, f.cleanUp(ctx, maxAge)
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) rename(ctx context.Context, remote, newName string) (interface{}, error) {
|
||||
if remote == "" {
|
||||
return nil, fmt.Errorf("path to object file cannot be empty")
|
||||
}
|
||||
if newName == "" {
|
||||
return nil, fmt.Errorf("the object's new name cannot be empty")
|
||||
}
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
bucketName, objectPath := o.split()
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "failed to read object:%v %v ", objectPath, err)
|
||||
if strings.HasPrefix(objectPath, bucketName) {
|
||||
fs.Errorf(f, "warn: ensure object path: %v is relative to bucket:%v and doesn't include the bucket name",
|
||||
objectPath, bucketName)
|
||||
}
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
details := objectstorage.RenameObjectDetails{
|
||||
SourceName: common.String(objectPath),
|
||||
NewName: common.String(newName),
|
||||
}
|
||||
request := objectstorage.RenameObjectRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
RenameObjectDetails: details,
|
||||
OpcClientRequestId: nil,
|
||||
RequestMetadata: common.RequestMetadata{},
|
||||
}
|
||||
var response objectstorage.RenameObjectResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
response, err = f.srv.RenameObject(ctx, request)
|
||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Infof(f, "success: renamed object-path: %v to %v", objectPath, newName)
|
||||
return "renamed successfully", nil
|
||||
}
|
||||
|
||||
func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string][]*objectstorage.MultipartUpload,
|
||||
err error) {
|
||||
uploadsMap = make(map[string][]*objectstorage.MultipartUpload)
|
||||
bucket, directory := f.split("")
|
||||
if bucket != "" {
|
||||
uploads, err := f.listMultipartUploads(ctx, bucket, directory)
|
||||
if err != nil {
|
||||
return uploadsMap, err
|
||||
}
|
||||
uploadsMap[bucket] = uploads
|
||||
return uploadsMap, nil
|
||||
}
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return uploadsMap, err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
bucket := entry.Remote()
|
||||
uploads, listErr := f.listMultipartUploads(ctx, bucket, "")
|
||||
if listErr != nil {
|
||||
err = listErr
|
||||
fs.Errorf(f, "%v", err)
|
||||
}
|
||||
uploadsMap[bucket] = uploads
|
||||
}
|
||||
return uploadsMap, err
|
||||
}
|
||||
|
||||
// listMultipartUploads lists all outstanding multipart uploads for (bucket, key)
|
||||
//
|
||||
// Note that rather lazily we treat key as a prefix, so it matches
|
||||
// directories and objects. This could surprise the user if they ask
|
||||
// for "dir" and it returns "dirKey"
|
||||
func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory string) (
|
||||
uploads []*objectstorage.MultipartUpload, err error) {
|
||||
|
||||
uploads = []*objectstorage.MultipartUpload{}
|
||||
req := objectstorage.ListMultipartUploadsRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
}
|
||||
|
||||
var response objectstorage.ListMultipartUploadsResponse
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
response, err = f.srv.ListMultipartUploads(ctx, req)
|
||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
// fs.Debugf(f, "failed to list multi part uploads %v", err)
|
||||
return uploads, err
|
||||
}
|
||||
for index, item := range response.Items {
|
||||
if directory != "" && item.Object != nil && !strings.HasPrefix(*item.Object, directory) {
|
||||
continue
|
||||
}
|
||||
uploads = append(uploads, &response.Items[index])
|
||||
}
|
||||
if response.OpcNextPage == nil {
|
||||
break
|
||||
}
|
||||
req.Page = response.OpcNextPage
|
||||
}
|
||||
return uploads, nil
|
||||
}
|
||||
@@ -1,155 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Implement Copier is an optional interfaces for Fs
|
||||
//------------------------------------------------------------
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
// This is stored with the remote path given
|
||||
// It returns the destination Object and a possible error
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
// fs.Debugf(f, "copying %v to %v", src.Remote(), remote)
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
// fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
// Temporary Object under construction
|
||||
dstObj := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
err := f.copy(ctx, dstObj, srcObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// copy does a server-side copy from dstObj <- srcObj
|
||||
//
|
||||
// If newInfo is nil then the metadata will be copied otherwise it
|
||||
// will be replaced with newInfo
|
||||
func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object) (err error) {
|
||||
srcBucket, srcPath := srcObj.split()
|
||||
dstBucket, dstPath := dstObj.split()
|
||||
if dstBucket != srcBucket {
|
||||
exists, err := f.bucketExists(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
err = f.makeBucket(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
copyObjectDetails := objectstorage.CopyObjectDetails{
|
||||
SourceObjectName: common.String(srcPath),
|
||||
DestinationRegion: common.String(dstObj.fs.opt.Region),
|
||||
DestinationNamespace: common.String(dstObj.fs.opt.Namespace),
|
||||
DestinationBucket: common.String(dstBucket),
|
||||
DestinationObjectName: common.String(dstPath),
|
||||
DestinationObjectMetadata: metadataWithOpcPrefix(srcObj.meta),
|
||||
}
|
||||
req := objectstorage.CopyObjectRequest{
|
||||
NamespaceName: common.String(srcObj.fs.opt.Namespace),
|
||||
BucketName: common.String(srcBucket),
|
||||
CopyObjectDetails: copyObjectDetails,
|
||||
}
|
||||
var resp objectstorage.CopyObjectResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CopyObject(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
workRequestID := resp.OpcWorkRequestId
|
||||
timeout := time.Duration(f.opt.CopyTimeout)
|
||||
dstName := dstObj.String()
|
||||
// https://docs.oracle.com/en-us/iaas/Content/Object/Tasks/copyingobjects.htm
|
||||
// To enable server side copy object, customers will have to
|
||||
// grant policy to objectstorage service to manage object-family
|
||||
// Allow service objectstorage-<region_identifier> to manage object-family in tenancy
|
||||
// Another option to avoid the policy is to download and reupload the file.
|
||||
// This download upload will work for maximum file size limit of 5GB
|
||||
err = copyObjectWaitForWorkRequest(ctx, workRequestID, dstName, timeout, f.srv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func copyObjectWaitForWorkRequest(ctx context.Context, wID *string, entityType string, timeout time.Duration,
|
||||
client *objectstorage.ObjectStorageClient) error {
|
||||
|
||||
stateConf := &StateChangeConf{
|
||||
Pending: []string{
|
||||
string(objectstorage.WorkRequestStatusAccepted),
|
||||
string(objectstorage.WorkRequestStatusInProgress),
|
||||
string(objectstorage.WorkRequestStatusCanceling),
|
||||
},
|
||||
Target: []string{
|
||||
string(objectstorage.WorkRequestSummaryStatusCompleted),
|
||||
string(objectstorage.WorkRequestSummaryStatusCanceled),
|
||||
string(objectstorage.WorkRequestStatusFailed),
|
||||
},
|
||||
Refresh: func() (interface{}, string, error) {
|
||||
getWorkRequestRequest := objectstorage.GetWorkRequestRequest{}
|
||||
getWorkRequestRequest.WorkRequestId = wID
|
||||
workRequestResponse, err := client.GetWorkRequest(context.Background(), getWorkRequestRequest)
|
||||
wr := &workRequestResponse.WorkRequest
|
||||
return workRequestResponse, string(wr.Status), err
|
||||
},
|
||||
Timeout: timeout,
|
||||
}
|
||||
|
||||
wrr, e := stateConf.WaitForStateContext(ctx, entityType)
|
||||
if e != nil {
|
||||
return fmt.Errorf("work request did not succeed, workId: %s, entity: %s. Message: %s", *wID, entityType, e)
|
||||
}
|
||||
|
||||
wr := wrr.(objectstorage.GetWorkRequestResponse).WorkRequest
|
||||
if wr.Status == objectstorage.WorkRequestStatusFailed {
|
||||
errorMessage, _ := getObjectStorageErrorFromWorkRequest(ctx, wID, client)
|
||||
return fmt.Errorf("work request did not succeed, workId: %s, entity: %s. Message: %s", *wID, entityType, errorMessage)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getObjectStorageErrorFromWorkRequest(ctx context.Context, workRequestID *string, client *objectstorage.ObjectStorageClient) (string, error) {
|
||||
req := objectstorage.ListWorkRequestErrorsRequest{}
|
||||
req.WorkRequestId = workRequestID
|
||||
res, err := client.ListWorkRequestErrors(ctx, req)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
allErrs := make([]string, 0)
|
||||
for _, errs := range res.Items {
|
||||
allErrs = append(allErrs, *errs.Message)
|
||||
}
|
||||
|
||||
errorMessage := strings.Join(allErrs, "\n")
|
||||
return errorMessage, nil
|
||||
}
|
||||
@@ -1,621 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Object Interface Implementation
|
||||
// ------------------------------------------------------------
|
||||
|
||||
const (
|
||||
metaMtime = "mtime" // the meta key to store mtime in - e.g. X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "md5chksum" // the meta key to store md5hash in
|
||||
// StandardTier object storage tier
|
||||
ociMetaPrefix = "opc-meta-"
|
||||
)
|
||||
|
||||
var archive = "archive"
|
||||
var infrequentAccess = "infrequentaccess"
|
||||
var standard = "standard"
|
||||
|
||||
var storageTierMap = map[string]*string{
|
||||
archive: &archive,
|
||||
infrequentAccess: &infrequentAccess,
|
||||
standard: &standard,
|
||||
}
|
||||
|
||||
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
|
||||
|
||||
// Object describes a oci bucket object
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
md5 string // MD5 hash if known
|
||||
bytes int64 // Size of the object
|
||||
lastModified time.Time // The modified time of the object if known
|
||||
meta map[string]string // The object metadata if known - may be nil
|
||||
mimeType string // Content-Type of the object
|
||||
|
||||
// Metadata as pointers to strings as they often won't be present
|
||||
storageTier *string // e.g. Standard
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the object
|
||||
func (o *Object) split() (bucket, bucketPath string) {
|
||||
return o.fs.split(o.remote)
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
fs.Debugf(o, "trying to read metadata %v", o.remote)
|
||||
if o.meta != nil {
|
||||
return nil
|
||||
}
|
||||
info, err := o.headObject(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return o.decodeMetaDataHead(info)
|
||||
}
|
||||
|
||||
// headObject gets the metadata from the object unconditionally
|
||||
func (o *Object) headObject(ctx context.Context) (info *objectstorage.HeadObjectResponse, err error) {
|
||||
bucketName, objectPath := o.split()
|
||||
req := objectstorage.HeadObjectRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(objectPath),
|
||||
}
|
||||
var response objectstorage.HeadObjectResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
response, err = o.fs.srv.HeadObject(ctx, req)
|
||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
if svcErr, ok := err.(common.ServiceError); ok {
|
||||
if svcErr.GetHTTPStatusCode() == http.StatusNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
o.fs.cache.MarkOK(bucketName)
|
||||
return &response, err
|
||||
}
|
||||
|
||||
func (o *Object) decodeMetaDataHead(info *objectstorage.HeadObjectResponse) (err error) {
|
||||
return o.setMetaData(
|
||||
info.ContentLength,
|
||||
info.ContentMd5,
|
||||
info.ContentType,
|
||||
info.LastModified,
|
||||
info.StorageTier,
|
||||
info.OpcMeta)
|
||||
}
|
||||
|
||||
func (o *Object) decodeMetaDataObject(info *objectstorage.GetObjectResponse) (err error) {
|
||||
return o.setMetaData(
|
||||
info.ContentLength,
|
||||
info.ContentMd5,
|
||||
info.ContentType,
|
||||
info.LastModified,
|
||||
info.StorageTier,
|
||||
info.OpcMeta)
|
||||
}
|
||||
|
||||
func (o *Object) setMetaData(
|
||||
contentLength *int64,
|
||||
contentMd5 *string,
|
||||
contentType *string,
|
||||
lastModified *common.SDKTime,
|
||||
storageTier interface{},
|
||||
meta map[string]string) error {
|
||||
|
||||
if contentLength != nil {
|
||||
o.bytes = *contentLength
|
||||
}
|
||||
if contentMd5 != nil {
|
||||
md5, err := o.base64ToMd5(*contentMd5)
|
||||
if err == nil {
|
||||
o.md5 = md5
|
||||
}
|
||||
}
|
||||
o.meta = meta
|
||||
if o.meta == nil {
|
||||
o.meta = map[string]string{}
|
||||
}
|
||||
// Read MD5 from metadata if present
|
||||
if md5sumBase64, ok := o.meta[metaMD5Hash]; ok {
|
||||
md5, err := o.base64ToMd5(md5sumBase64)
|
||||
if err != nil {
|
||||
o.md5 = md5
|
||||
}
|
||||
}
|
||||
if lastModified == nil {
|
||||
o.lastModified = time.Now()
|
||||
fs.Logf(o, "Failed to read last modified")
|
||||
} else {
|
||||
o.lastModified = lastModified.Time
|
||||
}
|
||||
if contentType != nil {
|
||||
o.mimeType = *contentType
|
||||
}
|
||||
if storageTier == nil || storageTier == "" {
|
||||
o.storageTier = storageTierMap[standard]
|
||||
} else {
|
||||
tier := strings.ToLower(fmt.Sprintf("%v", storageTier))
|
||||
o.storageTier = storageTierMap[tier]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) base64ToMd5(md5sumBase64 string) (md5 string, err error) {
|
||||
md5sumBytes, err := base64.StdEncoding.DecodeString(md5sumBase64)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to read md5sum from metadata %q: %v", md5sumBase64, err)
|
||||
return "", err
|
||||
} else if len(md5sumBytes) != 16 {
|
||||
fs.Debugf(o, "failed to read md5sum from metadata %q: wrong length", md5sumBase64)
|
||||
return "", fmt.Errorf("failed to read md5sum from metadata %q: wrong length", md5sumBase64)
|
||||
}
|
||||
return hex.EncodeToString(md5sumBytes), nil
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.bytes
|
||||
}
|
||||
|
||||
// GetTier returns storage class as string
|
||||
func (o *Object) GetTier() string {
|
||||
if o.storageTier == nil || *o.storageTier == "" {
|
||||
return standard
|
||||
}
|
||||
return *o.storageTier
|
||||
}
|
||||
|
||||
// SetTier performs changing storage class
|
||||
func (o *Object) SetTier(tier string) (err error) {
|
||||
ctx := context.TODO()
|
||||
tier = strings.ToLower(tier)
|
||||
bucketName, bucketPath := o.split()
|
||||
tierEnum, ok := objectstorage.GetMappingStorageTierEnum(tier)
|
||||
if !ok {
|
||||
return fmt.Errorf("not a valid storage tier %v ", tier)
|
||||
}
|
||||
|
||||
req := objectstorage.UpdateObjectStorageTierRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
UpdateObjectStorageTierDetails: objectstorage.UpdateObjectStorageTierDetails{
|
||||
ObjectName: common.String(bucketPath),
|
||||
StorageTier: tierEnum,
|
||||
},
|
||||
}
|
||||
_, err = o.fs.srv.UpdateObjectStorageTier(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.storageTier = storageTierMap[tier]
|
||||
return err
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return ""
|
||||
}
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
// Convert base64 encoded md5 into lower case hex
|
||||
if o.md5 == "" {
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return o.md5, nil
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned to the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) (result time.Time) {
|
||||
if o.fs.ci.UseServerModTime {
|
||||
return o.lastModified
|
||||
}
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
}
|
||||
// read mtime out of metadata if available
|
||||
d, ok := o.meta[metaMtime]
|
||||
if !ok || d == "" {
|
||||
return o.lastModified
|
||||
}
|
||||
modTime, err := swift.FloatStringToTime(d)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read mtime from object: %v", err)
|
||||
return o.lastModified
|
||||
}
|
||||
return modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.meta[metaMtime] = swift.TimeToFloatString(modTime)
|
||||
_, err = o.fs.Copy(ctx, o, o.remote)
|
||||
return err
|
||||
}
|
||||
|
||||
// Storable returns if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
bucketName, bucketPath := o.split()
|
||||
req := objectstorage.DeleteObjectRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.DeleteObject(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Open object file
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
bucketName, bucketPath := o.split()
|
||||
req := objectstorage.GetObjectRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
}
|
||||
o.applyGetObjectOptions(&req, options...)
|
||||
|
||||
var resp objectstorage.GetObjectResponse
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
resp, err = o.fs.srv.GetObject(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// read size from ContentLength or ContentRange
|
||||
bytes := resp.ContentLength
|
||||
if resp.ContentRange != nil {
|
||||
var contentRange = *resp.ContentRange
|
||||
slash := strings.IndexRune(contentRange, '/')
|
||||
if slash >= 0 {
|
||||
i, err := strconv.ParseInt(contentRange[slash+1:], 10, 64)
|
||||
if err == nil {
|
||||
bytes = &i
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to find parse integer from in %q: %v", contentRange, err)
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to find length in %q", contentRange)
|
||||
}
|
||||
}
|
||||
err = o.decodeMetaDataObject(&resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o.bytes = *bytes
|
||||
return resp.HTTPResponse().Body, nil
|
||||
}
|
||||
|
||||
// Update an object if it has changed
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
bucketName, bucketPath := o.split()
|
||||
err = o.fs.makeBucket(ctx, bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// determine if we like upload single or multipart.
|
||||
size := src.Size()
|
||||
multipart := size >= int64(o.fs.opt.UploadCutoff)
|
||||
|
||||
// Set the mtime in the metadata
|
||||
modTime := src.ModTime(ctx)
|
||||
metadata := map[string]string{
|
||||
metaMtime: swift.TimeToFloatString(modTime),
|
||||
}
|
||||
|
||||
// read the md5sum if available
|
||||
// - for non-multipart
|
||||
// - so we can add a ContentMD5
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
|
||||
// - for multipart provided checksums aren't disabled
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash
|
||||
var md5sumBase64 string
|
||||
var md5sumHex string
|
||||
if !multipart || !o.fs.opt.DisableChecksum {
|
||||
md5sumHex, err = src.Hash(ctx, hash.MD5)
|
||||
if err == nil && matchMd5.MatchString(md5sumHex) {
|
||||
hashBytes, err := hex.DecodeString(md5sumHex)
|
||||
if err == nil {
|
||||
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
|
||||
if multipart && !o.fs.opt.DisableChecksum {
|
||||
// Set the md5sum as metadata on the object if
|
||||
// - a multipart upload
|
||||
// - the ETag is not an MD5, e.g. when using SSE/SSE-C
|
||||
// provided checksums aren't disabled
|
||||
metadata[metaMD5Hash] = md5sumBase64
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Guess the content type
|
||||
mimeType := fs.MimeType(ctx, src)
|
||||
|
||||
if multipart {
|
||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||
uploadRequest := transfer.UploadRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
ContentType: common.String(mimeType),
|
||||
PartSize: common.Int64(chunkSize),
|
||||
AllowMultipartUploads: common.Bool(true),
|
||||
AllowParrallelUploads: common.Bool(true),
|
||||
ObjectStorageClient: o.fs.srv,
|
||||
EnableMultipartChecksumVerification: common.Bool(!o.fs.opt.DisableChecksum),
|
||||
NumberOfGoroutines: common.Int(o.fs.opt.UploadConcurrency),
|
||||
Metadata: metadataWithOpcPrefix(metadata),
|
||||
}
|
||||
if o.fs.opt.StorageTier != "" {
|
||||
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
|
||||
if !ok {
|
||||
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
|
||||
}
|
||||
uploadRequest.StorageTier = storageTier
|
||||
}
|
||||
o.applyMultiPutOptions(&uploadRequest, options...)
|
||||
uploadStreamRequest := transfer.UploadStreamRequest{
|
||||
UploadRequest: uploadRequest,
|
||||
StreamReader: in,
|
||||
}
|
||||
uploadMgr := transfer.NewUploadManager()
|
||||
var uploadID = ""
|
||||
|
||||
defer atexit.OnError(&err, func() {
|
||||
if uploadID == "" {
|
||||
return
|
||||
}
|
||||
if o.fs.opt.LeavePartsOnError {
|
||||
return
|
||||
}
|
||||
fs.Debugf(o, "Cancelling multipart upload")
|
||||
errCancel := o.fs.abortMultiPartUpload(
|
||||
context.Background(),
|
||||
bucketName,
|
||||
bucketPath,
|
||||
uploadID)
|
||||
if errCancel != nil {
|
||||
fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
|
||||
}
|
||||
})()
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
uploadResponse, err := uploadMgr.UploadStream(ctx, uploadStreamRequest)
|
||||
var httpResponse *http.Response
|
||||
if err == nil {
|
||||
if uploadResponse.Type == transfer.MultipartUpload {
|
||||
if uploadResponse.MultipartUploadResponse != nil {
|
||||
httpResponse = uploadResponse.MultipartUploadResponse.HTTPResponse()
|
||||
}
|
||||
} else {
|
||||
if uploadResponse.SinglepartUploadResponse != nil {
|
||||
httpResponse = uploadResponse.SinglepartUploadResponse.HTTPResponse()
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
uploadID := ""
|
||||
if uploadResponse.MultipartUploadResponse != nil && uploadResponse.MultipartUploadResponse.UploadID != nil {
|
||||
uploadID = *uploadResponse.MultipartUploadResponse.UploadID
|
||||
fs.Debugf(o, "multipart streaming upload failed, aborting uploadID: %v, may retry", uploadID)
|
||||
_ = o.fs.abortMultiPartUpload(ctx, bucketName, bucketPath, uploadID)
|
||||
}
|
||||
}
|
||||
return shouldRetry(ctx, httpResponse, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(o, "multipart streaming upload failed %v", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
req := objectstorage.PutObjectRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
ContentType: common.String(mimeType),
|
||||
PutObjectBody: io.NopCloser(in),
|
||||
OpcMeta: metadata,
|
||||
}
|
||||
if size >= 0 {
|
||||
req.ContentLength = common.Int64(size)
|
||||
}
|
||||
if o.fs.opt.StorageTier != "" {
|
||||
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
|
||||
if !ok {
|
||||
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
|
||||
}
|
||||
req.StorageTier = storageTier
|
||||
}
|
||||
o.applyPutOptions(&req, options...)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.PutObject(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(o, "put object failed %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Read the metadata from the newly created object
|
||||
o.meta = nil // wipe old metadata
|
||||
return o.readMetaData(ctx)
|
||||
}
|
||||
|
||||
func (o *Object) applyPutOptions(req *objectstorage.PutObjectRequest, options ...fs.OpenOption) {
|
||||
// Apply upload options
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
lowerKey := strings.ToLower(key)
|
||||
switch lowerKey {
|
||||
case "":
|
||||
// ignore
|
||||
case "cache-control":
|
||||
req.CacheControl = common.String(value)
|
||||
case "content-disposition":
|
||||
req.ContentDisposition = common.String(value)
|
||||
case "content-encoding":
|
||||
req.ContentEncoding = common.String(value)
|
||||
case "content-language":
|
||||
req.ContentLanguage = common.String(value)
|
||||
case "content-type":
|
||||
req.ContentType = common.String(value)
|
||||
default:
|
||||
if strings.HasPrefix(lowerKey, ociMetaPrefix) {
|
||||
req.OpcMeta[lowerKey] = value
|
||||
} else {
|
||||
fs.Errorf(o, "Don't know how to set key %q on upload", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Object) applyGetObjectOptions(req *objectstorage.GetObjectRequest, options ...fs.OpenOption) {
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
for _, option := range options {
|
||||
switch option.(type) {
|
||||
case *fs.RangeOption, *fs.SeekOption:
|
||||
_, value := option.Header()
|
||||
req.Range = &value
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Apply upload options
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
lowerKey := strings.ToLower(key)
|
||||
switch lowerKey {
|
||||
case "":
|
||||
// ignore
|
||||
case "cache-control":
|
||||
req.HttpResponseCacheControl = common.String(value)
|
||||
case "content-disposition":
|
||||
req.HttpResponseContentDisposition = common.String(value)
|
||||
case "content-encoding":
|
||||
req.HttpResponseContentEncoding = common.String(value)
|
||||
case "content-language":
|
||||
req.HttpResponseContentLanguage = common.String(value)
|
||||
case "content-type":
|
||||
req.HttpResponseContentType = common.String(value)
|
||||
case "range":
|
||||
// do nothing
|
||||
default:
|
||||
fs.Errorf(o, "Don't know how to set key %q on upload", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Object) applyMultiPutOptions(req *transfer.UploadRequest, options ...fs.OpenOption) {
|
||||
// Apply upload options
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
lowerKey := strings.ToLower(key)
|
||||
switch lowerKey {
|
||||
case "":
|
||||
// ignore
|
||||
case "content-encoding":
|
||||
req.ContentEncoding = common.String(value)
|
||||
case "content-language":
|
||||
req.ContentLanguage = common.String(value)
|
||||
case "content-type":
|
||||
req.ContentType = common.String(value)
|
||||
default:
|
||||
if strings.HasPrefix(lowerKey, ociMetaPrefix) {
|
||||
req.Metadata[lowerKey] = value
|
||||
} else {
|
||||
fs.Errorf(o, "Don't know how to set key %q on upload", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func metadataWithOpcPrefix(src map[string]string) map[string]string {
|
||||
dst := make(map[string]string)
|
||||
for lowerKey, value := range src {
|
||||
if !strings.HasPrefix(lowerKey, ociMetaPrefix) {
|
||||
dst[ociMetaPrefix+lowerKey] = value
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
@@ -1,242 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
)
|
||||
|
||||
const (
|
||||
maxSizeForCopy = 4768 * 1024 * 1024
|
||||
minChunkSize = fs.SizeSuffix(1024 * 1024 * 5)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
defaultUploadConcurrency = 10
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 5 * time.Minute
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
defaultCopyTimeoutDuration = fs.Duration(time.Minute)
|
||||
)
|
||||
|
||||
const (
|
||||
userPrincipal = "user_principal_auth"
|
||||
instancePrincipal = "instance_principal_auth"
|
||||
resourcePrincipal = "resource_principal_auth"
|
||||
environmentAuth = "env_auth"
|
||||
noAuth = "no_auth"
|
||||
|
||||
userPrincipalHelpText = `use an OCI user and an API key for authentication.
|
||||
you’ll need to put in a config file your tenancy OCID, user OCID, region, the path, fingerprint to an API key.
|
||||
https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm`
|
||||
|
||||
instancePrincipalHelpText = `use instance principals to authorize an instance to make API calls.
|
||||
each instance has its own identity, and authenticates using the certificates that are read from instance metadata.
|
||||
https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfrominstances.htm`
|
||||
|
||||
resourcePrincipalHelpText = `use resource principals to make API calls`
|
||||
|
||||
environmentAuthHelpText = `automatically pickup the credentials from runtime(env), first one to provide auth wins`
|
||||
|
||||
noAuthHelpText = `no credentials needed, this is typically for reading public buckets`
|
||||
)
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Provider string `config:"provider"`
|
||||
Compartment string `config:"compartment"`
|
||||
Namespace string `config:"namespace"`
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ConfigFile string `config:"config_file"`
|
||||
ConfigProfile string `config:"config_profile"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
CopyTimeout fs.Duration `config:"copy_timeout"`
|
||||
StorageTier string `config:"storage_tier"`
|
||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
}
|
||||
|
||||
func newOptions() []fs.Option {
|
||||
return []fs.Option{{
|
||||
Name: fs.ConfigProvider,
|
||||
Help: "Choose your Auth Provider",
|
||||
Required: true,
|
||||
Default: environmentAuth,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: environmentAuth,
|
||||
Help: environmentAuthHelpText,
|
||||
}, {
|
||||
Value: userPrincipal,
|
||||
Help: userPrincipalHelpText,
|
||||
}, {
|
||||
Value: instancePrincipal,
|
||||
Help: instancePrincipalHelpText,
|
||||
}, {
|
||||
Value: resourcePrincipal,
|
||||
Help: resourcePrincipalHelpText,
|
||||
}, {
|
||||
Value: noAuth,
|
||||
Help: noAuthHelpText,
|
||||
}},
|
||||
}, {
|
||||
Name: "namespace",
|
||||
Help: "Object storage namespace",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "compartment",
|
||||
Help: "Object storage compartment OCID",
|
||||
Provider: "!no_auth",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Object storage Region",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Object storage API.\n\nLeave blank to use the default endpoint for the region.",
|
||||
Required: false,
|
||||
}, {
|
||||
Name: "config_file",
|
||||
Help: "Path to OCI config file",
|
||||
Provider: userPrincipal,
|
||||
Default: "~/.oci/config",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "~/.oci/config",
|
||||
Help: "oci configuration file location",
|
||||
}},
|
||||
}, {
|
||||
Name: "config_profile",
|
||||
Help: "Profile name inside the oci config file",
|
||||
Provider: userPrincipal,
|
||||
Default: "Default",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "Default",
|
||||
Help: "Use the default profile",
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload.
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5 GiB.`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to use for uploading.
|
||||
|
||||
When uploading files larger than upload_cutoff or files with unknown
|
||||
size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google
|
||||
photos or google docs) they will be uploaded as multipart uploads
|
||||
using this chunk size.
|
||||
|
||||
Note that "upload_concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
|
||||
If you are transferring large files over high-speed links and you have
|
||||
enough memory, then increasing this will speed up the transfers.
|
||||
|
||||
Rclone will automatically increase the chunk size when uploading a
|
||||
large file of known size to stay below the 10,000 chunks limit.
|
||||
|
||||
Files of unknown size are uploaded with the configured
|
||||
chunk_size. Since the default chunk size is 5 MiB and there can be at
|
||||
most 10,000 chunks, this means that by default the maximum size of
|
||||
a file you can stream upload is 48 GiB. If you wish to stream upload
|
||||
larger files then you will need to increase chunk_size.
|
||||
|
||||
Increasing the chunk size decreases the accuracy of the progress
|
||||
statistics displayed with "-P" flag.
|
||||
`,
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
If you are uploading small numbers of large files over high-speed links
|
||||
and these uploads do not fully utilize your bandwidth, then increasing
|
||||
this may help to speed up the transfers.`,
|
||||
Default: defaultUploadConcurrency,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_cutoff",
|
||||
Help: `Cutoff for switching to multipart copy.
|
||||
|
||||
Any files larger than this that need to be server-side copied will be
|
||||
copied in chunks of this size.
|
||||
|
||||
The minimum is 0 and the maximum is 5 GiB.`,
|
||||
Default: fs.SizeSuffix(maxSizeForCopy),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_timeout",
|
||||
Help: `Timeout for copy.
|
||||
|
||||
Copy is an asynchronous operation, specify timeout to wait for copy to succeed
|
||||
`,
|
||||
Default: defaultCopyTimeoutDuration,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Don't store MD5 checksum with object metadata.
|
||||
|
||||
Normally rclone will calculate the MD5 checksum of the input before
|
||||
uploading it so it can add it to metadata on the object. This is great
|
||||
for data integrity checking but can cause long delays for large files
|
||||
to start uploading.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
// Any UTF-8 character is valid in a key, however it can't handle
|
||||
// invalid UTF-8 and / have a special meaning.
|
||||
//
|
||||
// The SDK can't seem to handle uploading files called '.
|
||||
// - initial / encoding
|
||||
// - doubled / encoding
|
||||
// - trailing / encoding
|
||||
// so that OSS keys are always valid file names
|
||||
Default: encoder.EncodeInvalidUtf8 |
|
||||
encoder.EncodeSlash |
|
||||
encoder.EncodeDot,
|
||||
}, {
|
||||
Name: "leave_parts_on_error",
|
||||
Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
|
||||
|
||||
It should be set to true for resuming uploads across different sessions.
|
||||
|
||||
WARNING: Storing parts of an incomplete multipart upload counts towards space usage on object storage and will add
|
||||
additional costs if not cleaned up.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_bucket",
|
||||
Help: `If set, don't attempt to check the bucket exists or create it.
|
||||
|
||||
This can be useful when trying to minimise the number of transactions
|
||||
rclone does if you know the bucket exists already.
|
||||
|
||||
It can also be needed if the user you are using does not have bucket
|
||||
creation permissions.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}}
|
||||
}
|
||||
@@ -1,695 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
// Package oracleobjectstorage provides an interface to the OCI object storage system.
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "oracleobjectstorage",
|
||||
Description: "Oracle Cloud Infrastructure Object Storage",
|
||||
Prefix: "oos",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: newOptions(),
|
||||
})
|
||||
}
|
||||
|
||||
// Fs represents a remote object storage server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
ci *fs.ConfigInfo // global config
|
||||
features *fs.Features // optional features
|
||||
srv *objectstorage.ObjectStorageClient // the connection to the object storage
|
||||
rootBucket string // bucket part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache for bucket creation status
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// NewFs Initialize backend
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
objectStorageClient, err := newObjectStorageClient(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p := pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
srv: objectStorageClient,
|
||||
cache: bucket.NewCache(),
|
||||
pacer: fs.NewPacer(ctx, p),
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
SlowModTime: true,
|
||||
}).Fill(ctx, f)
|
||||
if f.rootBucket != "" && f.rootDirectory != "" && !strings.HasSuffix(root, "/") {
|
||||
// Check to see if the (bucket,directory) is actually an existing file
|
||||
oldRoot := f.root
|
||||
newRoot, leaf := path.Split(oldRoot)
|
||||
f.setRoot(newRoot)
|
||||
_, err := f.NewObject(ctx, leaf)
|
||||
if err != nil {
|
||||
// File doesn't exist or is a directory so return old f
|
||||
f.setRoot(oldRoot)
|
||||
return f, nil
|
||||
}
|
||||
// return an error with fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, err
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if cs < minChunkSize {
|
||||
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||
if cs > maxUploadCutoff {
|
||||
return fmt.Errorf("%s is greater than %s", cs, maxUploadCutoff)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadCutoff(cs)
|
||||
if err == nil {
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Implement backed that represents a remote object storage server
|
||||
// Fs is the interface a cloud storage system must provide
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.rootBucket == "" {
|
||||
return "oos:root"
|
||||
}
|
||||
if f.rootDirectory == "" {
|
||||
return fmt.Sprintf("oos:bucket %s", f.rootBucket)
|
||||
}
|
||||
return fmt.Sprintf("oos:bucket %s, path %s", f.rootBucket, f.rootDirectory)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Precision of the remote
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Millisecond
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
func (f *Fs) setRoot(root string) {
|
||||
f.root = parsePath(root)
|
||||
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
|
||||
}
|
||||
|
||||
// parsePath parses a remote 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
bucketName, directory := f.split(dir)
|
||||
fs.Debugf(f, "listing: bucket : %v, directory: %v", bucketName, dir)
|
||||
if bucketName == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
return f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "")
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object.
|
||||
type listFn func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error
|
||||
|
||||
// list the objects into the function supplied from
|
||||
// the bucket and root supplied
|
||||
// (bucket, directory) is the starting directory
|
||||
// If prefix is set then it is removed from all file names
|
||||
// If addBucket is set then it adds the bucket to the start of the remotes generated
|
||||
// If recurse is set the function will recursively list
|
||||
// If limit is > 0 then it limits to that many files (must be less than 1000)
|
||||
// If hidden is set then it will list the hidden (deleted) files too.
|
||||
// if findFile is set it will look for files called (bucket, directory)
|
||||
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, limit int,
|
||||
fn listFn) (err error) {
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
}
|
||||
if directory != "" {
|
||||
directory += "/"
|
||||
}
|
||||
|
||||
delimiter := ""
|
||||
if !recurse {
|
||||
delimiter = "/"
|
||||
}
|
||||
chunkSize := 1000
|
||||
if limit > 0 {
|
||||
chunkSize = limit
|
||||
}
|
||||
var request = objectstorage.ListObjectsRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucket),
|
||||
Prefix: common.String(directory),
|
||||
Limit: common.Int(chunkSize),
|
||||
Fields: common.String("name,size,etag,timeCreated,md5,timeModified,storageTier,archivalState"),
|
||||
}
|
||||
if delimiter != "" {
|
||||
request.Delimiter = common.String(delimiter)
|
||||
}
|
||||
|
||||
for {
|
||||
var resp objectstorage.ListObjectsResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
resp, err = f.srv.ListObjects(ctx, request)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
if ociError, ok := err.(common.ServiceError); ok {
|
||||
// If it is a timeout then we want to retry that
|
||||
if ociError.GetHTTPStatusCode() == http.StatusNotFound {
|
||||
err = fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
if f.rootBucket == "" {
|
||||
// if listing from the root ignore wrong region requests returning
|
||||
// empty directory
|
||||
if reqErr, ok := err.(common.ServiceError); ok {
|
||||
// 301 if wrong region for bucket
|
||||
if reqErr.GetHTTPStatusCode() == http.StatusMovedPermanently {
|
||||
fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", bucket)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
if !recurse {
|
||||
for _, commonPrefix := range resp.ListObjects.Prefixes {
|
||||
if commonPrefix == "" {
|
||||
fs.Logf(f, "Nil common prefix received")
|
||||
continue
|
||||
}
|
||||
remote := commonPrefix
|
||||
remote = f.opt.Enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
remote = strings.TrimSuffix(remote, "/")
|
||||
err = fn(remote, &objectstorage.ObjectSummary{Name: &remote}, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range resp.Objects {
|
||||
object := &resp.Objects[i]
|
||||
// Finish if file name no longer has prefix
|
||||
//if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
|
||||
// return nil
|
||||
//}
|
||||
remote := *object.Name
|
||||
remote = f.opt.Enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
// fs.Debugf(f, "Odd name received %v", object.Name)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
// Check for directory
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
// is this a directory marker?
|
||||
if isDirectory && object.Size != nil && *object.Size == 0 {
|
||||
continue // skip directory marker
|
||||
}
|
||||
if isDirectory && len(remote) > 1 {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
err = fn(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// end if no NextFileName
|
||||
if resp.NextStartWith == nil {
|
||||
break
|
||||
}
|
||||
request.Start = resp.NextStartWith
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert a list item into a DirEntry
|
||||
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *objectstorage.ObjectSummary, isDirectory bool) (fs.DirEntry, error) {
|
||||
if isDirectory {
|
||||
size := int64(0)
|
||||
if object.Size != nil {
|
||||
size = *object.Size
|
||||
}
|
||||
d := fs.NewDir(remote, time.Time{}).SetSize(size)
|
||||
return d, nil
|
||||
}
|
||||
o, err := f.newObjectWithInfo(ctx, remote, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
fn := func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, 0, fn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// listBuckets returns all the buckets to out
|
||||
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
if f.opt.Provider == noAuth {
|
||||
return nil, fmt.Errorf("can't list buckets with %v provider, use a valid auth provider in config file", noAuth)
|
||||
}
|
||||
var request = objectstorage.ListBucketsRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
CompartmentId: common.String(f.opt.Compartment),
|
||||
}
|
||||
var resp objectstorage.ListBucketsResponse
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.ListBuckets(ctx, request)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, item := range resp.Items {
|
||||
bucketName := f.opt.Enc.ToStandardName(*item.Name)
|
||||
f.cache.MarkOK(bucketName)
|
||||
d := fs.NewDir(bucketName, item.TimeCreated.Time)
|
||||
entries = append(entries, d)
|
||||
}
|
||||
if resp.OpcNextPage == nil {
|
||||
break
|
||||
}
|
||||
request.Page = resp.OpcNextPage
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *objectstorage.ObjectSummary) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
// Set info but not meta
|
||||
if info.TimeModified == nil {
|
||||
fs.Logf(o, "Failed to read last modified")
|
||||
o.lastModified = time.Now()
|
||||
} else {
|
||||
o.lastModified = info.TimeModified.Time
|
||||
}
|
||||
if info.Md5 != nil {
|
||||
md5, err := o.base64ToMd5(*info.Md5)
|
||||
if err != nil {
|
||||
o.md5 = md5
|
||||
}
|
||||
}
|
||||
o.bytes = *info.Size
|
||||
o.storageTier = storageTierMap[strings.ToLower(string(info.StorageTier))]
|
||||
} else {
|
||||
err := o.readMetaData(ctx) // reads info and headers, returning an error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// Put the object into the bucket
|
||||
// Copy the reader in to the new object which is returned
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
bucketName, _ := f.split(dir)
|
||||
return f.makeBucket(ctx, bucketName)
|
||||
}
|
||||
|
||||
// makeBucket creates the bucket if it doesn't exist
|
||||
func (f *Fs) makeBucket(ctx context.Context, bucketName string) error {
|
||||
if f.opt.NoCheckBucket {
|
||||
return nil
|
||||
}
|
||||
return f.cache.Create(bucketName, func() error {
|
||||
details := objectstorage.CreateBucketDetails{
|
||||
Name: common.String(bucketName),
|
||||
CompartmentId: common.String(f.opt.Compartment),
|
||||
PublicAccessType: objectstorage.CreateBucketDetailsPublicAccessTypeNopublicaccess,
|
||||
}
|
||||
req := objectstorage.CreateBucketRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
CreateBucketDetails: details,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CreateBucket(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Infof(f, "Bucket %q created with accessType %q", bucketName,
|
||||
objectstorage.CreateBucketDetailsPublicAccessTypeNopublicaccess)
|
||||
}
|
||||
if svcErr, ok := err.(common.ServiceError); ok {
|
||||
if code := svcErr.GetCode(); code == "BucketAlreadyOwnedByYou" || code == "BucketAlreadyExists" {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}, func() (bool, error) {
|
||||
return f.bucketExists(ctx, bucketName)
|
||||
})
|
||||
}
|
||||
|
||||
// Check if the bucket exists
|
||||
//
|
||||
// NB this can return incorrect results if called immediately after bucket deletion
|
||||
func (f *Fs) bucketExists(ctx context.Context, bucketName string) (bool, error) {
|
||||
req := objectstorage.HeadBucketRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.HeadBucket(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if err, ok := err.(common.ServiceError); ok {
|
||||
if err.GetHTTPStatusCode() == http.StatusNotFound {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Rmdir delete an empty bucket. if bucket is not empty this is will fail with appropriate error
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
bucketName, directory := f.split(dir)
|
||||
if bucketName == "" || directory != "" {
|
||||
return nil
|
||||
}
|
||||
return f.cache.Remove(bucketName, func() error {
|
||||
req := objectstorage.DeleteBucketRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.DeleteBucket(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Infof(f, "Bucket %q deleted", bucketName)
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) abortMultiPartUpload(ctx context.Context, bucketName, bucketPath, uploadID string) (err error) {
|
||||
if uploadID == "" {
|
||||
return nil
|
||||
}
|
||||
request := objectstorage.AbortMultipartUploadRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
UploadId: common.String(uploadID),
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.AbortMultipartUpload(ctx, request)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// cleanUpBucket removes all pending multipart uploads for a given bucket over the age of maxAge
|
||||
func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Duration,
|
||||
uploads []*objectstorage.MultipartUpload) (err error) {
|
||||
fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than %v", bucket, maxAge)
|
||||
for _, upload := range uploads {
|
||||
if upload.TimeCreated != nil && upload.Object != nil && upload.UploadId != nil {
|
||||
age := time.Since(upload.TimeCreated.Time)
|
||||
what := fmt.Sprintf("pending multipart upload for bucket %q key %q dated %v (%v ago)", bucket, *upload.Object,
|
||||
upload.TimeCreated, age)
|
||||
if age > maxAge {
|
||||
fs.Infof(f, "removing %s", what)
|
||||
if operations.SkipDestructive(ctx, what, "remove pending upload") {
|
||||
continue
|
||||
}
|
||||
ignoreErr := f.abortMultiPartUpload(ctx, *upload.Bucket, *upload.Object, *upload.UploadId)
|
||||
if ignoreErr != nil {
|
||||
// fs.Debugf(f, "ignoring error %s", ignoreErr)
|
||||
}
|
||||
} else {
|
||||
// fs.Debugf(f, "ignoring %s", what)
|
||||
}
|
||||
} else {
|
||||
fs.Infof(f, "MultipartUpload doesn't have sufficient details to abort.")
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// CleanUp removes all pending multipart uploads
|
||||
func (f *Fs) cleanUp(ctx context.Context, maxAge time.Duration) (err error) {
|
||||
uploadsMap, err := f.listMultipartUploadsAll(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for bucketName, uploads := range uploadsMap {
|
||||
cleanErr := f.cleanUpBucket(ctx, bucketName, maxAge, uploads)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to cleanup bucket %q: %v", bucketName, cleanErr)
|
||||
err = cleanErr
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// CleanUp removes all pending multipart uploads older than 24 hours
|
||||
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
return f.cleanUp(ctx, 24*time.Hour)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Implement ListRer is an optional interfaces for Fs
|
||||
//------------------------------------------------------------
|
||||
|
||||
/*
|
||||
ListR lists the objects and directories of the Fs starting
|
||||
from dir recursively into out.
|
||||
|
||||
dir should be "" to start from the root, and should not
|
||||
have trailing slashes.
|
||||
|
||||
This should return ErrDirNotFound if the directory isn't
|
||||
found.
|
||||
|
||||
It should call callback for each tranche of entries read.
|
||||
These need not be returned in any particular order. If
|
||||
callback returns an error then the listing will stop
|
||||
immediately.
|
||||
|
||||
Don't implement this unless you have a more efficient way
|
||||
of listing recursively that doing a directory traversal.
|
||||
*/
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
bucketName, directory := f.split(dir)
|
||||
list := walk.NewListRHelper(callback)
|
||||
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
||||
return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Add(entry)
|
||||
})
|
||||
}
|
||||
if bucketName == "" {
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bucketName := entry.Remote()
|
||||
err = listR(bucketName, "", f.rootDirectory, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucketName)
|
||||
}
|
||||
} else {
|
||||
err = listR(bucketName, directory, f.rootDirectory, f.rootBucket == "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucketName)
|
||||
}
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.GetTierer = &Object{}
|
||||
_ fs.SetTierer = &Object{}
|
||||
)
|
||||
@@ -1,33 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestOracleObjectStorage:",
|
||||
TiersToTest: []string{"standard", "archive"},
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
@@ -1,7 +0,0 @@
|
||||
// Build for oracleobjectstorage for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || solaris || js
|
||||
// +build plan9 solaris js
|
||||
|
||||
package oracleobjectstorage
|
||||
@@ -1,362 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
var refreshGracePeriod = 30 * time.Second
|
||||
|
||||
// StateRefreshFunc is a function type used for StateChangeConf that is
|
||||
// responsible for refreshing the item being watched for a state change.
|
||||
//
|
||||
// It returns three results. `result` is any object that will be returned
|
||||
// as the final object after waiting for state change. This allows you to
|
||||
// return the final updated object, for example an EC2 instance after refreshing
|
||||
// it. A nil result represents not found.
|
||||
//
|
||||
// `state` is the latest state of that object. And `err` is any error that
|
||||
// may have happened while refreshing the state.
|
||||
type StateRefreshFunc func() (result interface{}, state string, err error)
|
||||
|
||||
// StateChangeConf is the configuration struct used for `WaitForState`.
|
||||
type StateChangeConf struct {
|
||||
Delay time.Duration // Wait this time before starting checks
|
||||
Pending []string // States that are "allowed" and will continue trying
|
||||
Refresh StateRefreshFunc // Refreshes the current state
|
||||
Target []string // Target state
|
||||
Timeout time.Duration // The amount of time to wait before timeout
|
||||
MinTimeout time.Duration // Smallest time to wait before refreshes
|
||||
PollInterval time.Duration // Override MinTimeout/backoff and only poll this often
|
||||
NotFoundChecks int // Number of times to allow not found (nil result from Refresh)
|
||||
|
||||
// This is to work around inconsistent APIs
|
||||
ContinuousTargetOccurrence int // Number of times the Target state has to occur continuously
|
||||
}
|
||||
|
||||
// WaitForStateContext watches an object and waits for it to achieve the state
|
||||
// specified in the configuration using the specified Refresh() func,
|
||||
// waiting the number of seconds specified in the timeout configuration.
|
||||
//
|
||||
// If the Refresh function returns an error, exit immediately with that error.
|
||||
//
|
||||
// If the Refresh function returns a state other than the Target state or one
|
||||
// listed in Pending, return immediately with an error.
|
||||
//
|
||||
// If the Timeout is exceeded before reaching the Target state, return an
|
||||
// error.
|
||||
//
|
||||
// Otherwise, the result is the result of the first call to the Refresh function to
|
||||
// reach the target state.
|
||||
//
|
||||
// Cancellation from the passed in context will cancel the refresh loop
|
||||
func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType string) (interface{}, error) {
|
||||
// fs.Debugf(entityType, "Waiting for state to become: %s", conf.Target)
|
||||
|
||||
notfoundTick := 0
|
||||
targetOccurrence := 0
|
||||
|
||||
// Set a default for times to check for not found
|
||||
if conf.NotFoundChecks == 0 {
|
||||
conf.NotFoundChecks = 20
|
||||
}
|
||||
|
||||
if conf.ContinuousTargetOccurrence == 0 {
|
||||
conf.ContinuousTargetOccurrence = 1
|
||||
}
|
||||
|
||||
type Result struct {
|
||||
Result interface{}
|
||||
State string
|
||||
Error error
|
||||
Done bool
|
||||
}
|
||||
|
||||
// Read every result from the refresh loop, waiting for a positive result.Done.
|
||||
resCh := make(chan Result, 1)
|
||||
// cancellation channel for the refresh loop
|
||||
cancelCh := make(chan struct{})
|
||||
|
||||
result := Result{}
|
||||
|
||||
go func() {
|
||||
defer close(resCh)
|
||||
|
||||
select {
|
||||
case <-time.After(conf.Delay):
|
||||
case <-cancelCh:
|
||||
return
|
||||
}
|
||||
|
||||
// start with 0 delay for the first loop
|
||||
var wait time.Duration
|
||||
|
||||
for {
|
||||
// store the last result
|
||||
resCh <- result
|
||||
|
||||
// wait and watch for cancellation
|
||||
select {
|
||||
case <-cancelCh:
|
||||
return
|
||||
case <-time.After(wait):
|
||||
// first round had no wait
|
||||
if wait == 0 {
|
||||
wait = 100 * time.Millisecond
|
||||
}
|
||||
}
|
||||
|
||||
res, currentState, err := conf.Refresh()
|
||||
result = Result{
|
||||
Result: res,
|
||||
State: currentState,
|
||||
Error: err,
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
resCh <- result
|
||||
return
|
||||
}
|
||||
|
||||
// If we're waiting for the absence of a thing, then return
|
||||
if res == nil && len(conf.Target) == 0 {
|
||||
targetOccurrence++
|
||||
if conf.ContinuousTargetOccurrence == targetOccurrence {
|
||||
result.Done = true
|
||||
resCh <- result
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if res == nil {
|
||||
// If we didn't find the resource, check if we have been
|
||||
// not finding it for a while, and if so, report an error.
|
||||
notfoundTick++
|
||||
if notfoundTick > conf.NotFoundChecks {
|
||||
result.Error = &NotFoundError{
|
||||
LastError: err,
|
||||
Retries: notfoundTick,
|
||||
}
|
||||
resCh <- result
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// Reset the counter for when a resource isn't found
|
||||
notfoundTick = 0
|
||||
found := false
|
||||
|
||||
for _, allowed := range conf.Target {
|
||||
if currentState == allowed {
|
||||
found = true
|
||||
targetOccurrence++
|
||||
if conf.ContinuousTargetOccurrence == targetOccurrence {
|
||||
result.Done = true
|
||||
resCh <- result
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
for _, allowed := range conf.Pending {
|
||||
if currentState == allowed {
|
||||
found = true
|
||||
targetOccurrence = 0
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found && len(conf.Pending) > 0 {
|
||||
result.Error = &UnexpectedStateError{
|
||||
LastError: err,
|
||||
State: result.State,
|
||||
ExpectedState: conf.Target,
|
||||
}
|
||||
resCh <- result
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Wait between refreshes using exponential backoff, except when
|
||||
// waiting for the target state to reoccur.
|
||||
if targetOccurrence == 0 {
|
||||
wait *= 2
|
||||
}
|
||||
|
||||
// If a poll interval has been specified, choose that interval.
|
||||
// Otherwise, bound the default value.
|
||||
if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second {
|
||||
wait = conf.PollInterval
|
||||
} else {
|
||||
if wait < conf.MinTimeout {
|
||||
wait = conf.MinTimeout
|
||||
} else if wait > 10*time.Second {
|
||||
wait = 10 * time.Second
|
||||
}
|
||||
}
|
||||
|
||||
// fs.Debugf(entityType, "[TRACE] Waiting %s before next try", wait)
|
||||
}
|
||||
}()
|
||||
|
||||
// store the last value result from the refresh loop
|
||||
lastResult := Result{}
|
||||
|
||||
timeout := time.After(conf.Timeout)
|
||||
for {
|
||||
select {
|
||||
case r, ok := <-resCh:
|
||||
// channel closed, so return the last result
|
||||
if !ok {
|
||||
return lastResult.Result, lastResult.Error
|
||||
}
|
||||
|
||||
// we reached the intended state
|
||||
if r.Done {
|
||||
return r.Result, r.Error
|
||||
}
|
||||
|
||||
// still waiting, store the last result
|
||||
lastResult = r
|
||||
case <-ctx.Done():
|
||||
close(cancelCh)
|
||||
return nil, ctx.Err()
|
||||
case <-timeout:
|
||||
// fs.Debugf(entityType, "[WARN] WaitForState timeout after %s", conf.Timeout)
|
||||
// fs.Debugf(entityType, "[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod)
|
||||
|
||||
// cancel the goroutine and start our grace period timer
|
||||
close(cancelCh)
|
||||
timeout := time.After(refreshGracePeriod)
|
||||
|
||||
// we need a for loop and a label to break on, because we may have
|
||||
// an extra response value to read, but still want to wait for the
|
||||
// channel to close.
|
||||
forSelect:
|
||||
for {
|
||||
select {
|
||||
case r, ok := <-resCh:
|
||||
if r.Done {
|
||||
// the last refresh loop reached the desired state
|
||||
return r.Result, r.Error
|
||||
}
|
||||
|
||||
if !ok {
|
||||
// the goroutine returned
|
||||
break forSelect
|
||||
}
|
||||
|
||||
// target state not reached, save the result for the
|
||||
// TimeoutError and wait for the channel to close
|
||||
lastResult = r
|
||||
case <-ctx.Done():
|
||||
fs.Errorf(entityType, "Context cancellation detected, abandoning grace period")
|
||||
break forSelect
|
||||
case <-timeout:
|
||||
fs.Errorf(entityType, "WaitForState exceeded refresh grace period")
|
||||
break forSelect
|
||||
}
|
||||
}
|
||||
|
||||
return nil, &TimeoutError{
|
||||
LastError: lastResult.Error,
|
||||
LastState: lastResult.State,
|
||||
Timeout: conf.Timeout,
|
||||
ExpectedState: conf.Target,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NotFoundError resource not found error
|
||||
type NotFoundError struct {
|
||||
LastError error
|
||||
LastRequest interface{}
|
||||
LastResponse interface{}
|
||||
Message string
|
||||
Retries int
|
||||
}
|
||||
|
||||
func (e *NotFoundError) Error() string {
|
||||
if e.Message != "" {
|
||||
return e.Message
|
||||
}
|
||||
|
||||
if e.Retries > 0 {
|
||||
return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries)
|
||||
}
|
||||
|
||||
return "couldn't find resource"
|
||||
}
|
||||
|
||||
func (e *NotFoundError) Unwrap() error {
|
||||
return e.LastError
|
||||
}
|
||||
|
||||
// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending
|
||||
type UnexpectedStateError struct {
|
||||
LastError error
|
||||
State string
|
||||
ExpectedState []string
|
||||
}
|
||||
|
||||
func (e *UnexpectedStateError) Error() string {
|
||||
return fmt.Sprintf(
|
||||
"unexpected state '%s', wanted target '%s'. last error: %s",
|
||||
e.State,
|
||||
strings.Join(e.ExpectedState, ", "),
|
||||
e.LastError,
|
||||
)
|
||||
}
|
||||
|
||||
func (e *UnexpectedStateError) Unwrap() error {
|
||||
return e.LastError
|
||||
}
|
||||
|
||||
// TimeoutError is returned when WaitForState times out
|
||||
type TimeoutError struct {
|
||||
LastError error
|
||||
LastState string
|
||||
Timeout time.Duration
|
||||
ExpectedState []string
|
||||
}
|
||||
|
||||
func (e *TimeoutError) Error() string {
|
||||
expectedState := "resource to be gone"
|
||||
if len(e.ExpectedState) > 0 {
|
||||
expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", "))
|
||||
}
|
||||
|
||||
extraInfo := make([]string, 0)
|
||||
if e.LastState != "" {
|
||||
extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState))
|
||||
}
|
||||
if e.Timeout > 0 {
|
||||
extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String()))
|
||||
}
|
||||
|
||||
suffix := ""
|
||||
if len(extraInfo) > 0 {
|
||||
suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", "))
|
||||
}
|
||||
|
||||
if e.LastError != nil {
|
||||
return fmt.Sprintf("timeout while waiting for %s%s: %s",
|
||||
expectedState, suffix, e.LastError)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("timeout while waiting for %s%s",
|
||||
expectedState, suffix)
|
||||
}
|
||||
|
||||
func (e *TimeoutError) Unwrap() error {
|
||||
return e.LastError
|
||||
}
|
||||
@@ -13,7 +13,7 @@ const (
|
||||
timeFormat = `"` + time.RFC1123Z + `"`
|
||||
)
|
||||
|
||||
// Time represents date and time information for the
|
||||
// Time represents represents date and time information for the
|
||||
// pcloud API, by using RFC1123Z
|
||||
type Time time.Time
|
||||
|
||||
|
||||
@@ -588,7 +588,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object, leaf, directoryID and error.
|
||||
// Returns the object, leaf, directoryID and error
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
@@ -607,7 +607,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
|
||||
// Put the object into the container
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -681,9 +681,9 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -766,9 +766,9 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1073,6 +1073,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
@@ -1151,7 +1152,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
|
||||
@@ -493,7 +493,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object, leaf, directoryID and error.
|
||||
// Returns the object, leaf, directoryID and error
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
@@ -512,7 +512,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -530,9 +530,9 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
|
||||
// PutUnchecked the object into the container
|
||||
//
|
||||
// This will produce an error if the object already exists.
|
||||
// This will produce an error if the object already exists
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -694,9 +694,9 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -870,6 +870,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
@@ -916,7 +917,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
|
||||
@@ -230,7 +230,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||
@@ -523,9 +523,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -562,9 +562,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
|
||||
@@ -261,7 +261,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// Update the already existing object
|
||||
//
|
||||
// Copy the reader into the object updating modTime and size.
|
||||
// Copy the reader into the object updating modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// Package putio provides an interface to the put.io storage system.
|
||||
package putio
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
// Package qingstor provides an interface to QingStor object storage
|
||||
// Home: https://www.qingcloud.com/
|
||||
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
// Package qingstor provides an interface to QingStor object storage
|
||||
// Home: https://www.qingcloud.com/
|
||||
package qingstor
|
||||
|
||||
import (
|
||||
@@ -430,9 +431,9 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -476,7 +477,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error ErrorObjectNotFound.
|
||||
//If it can't be found it returns the error ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *qs.KeyType) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
|
||||
@@ -1,101 +0,0 @@
|
||||
// Generate boilerplate code for setting similar structs from each other
|
||||
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
)
|
||||
|
||||
// flags
|
||||
var (
|
||||
outputFile = flag.String("o", "", "Output file name, stdout if unset")
|
||||
)
|
||||
|
||||
// globals
|
||||
var (
|
||||
out io.Writer = os.Stdout
|
||||
)
|
||||
|
||||
// genSetFrom generates code to set the public members of a from b
|
||||
//
|
||||
// a and b should be pointers to structs
|
||||
//
|
||||
// a can be a different type from b
|
||||
//
|
||||
// Only the Fields which have the same name and assignable type on a
|
||||
// and b will be set.
|
||||
//
|
||||
// This is useful for copying between almost identical structures that
|
||||
// are frequently present in auto-generated code for cloud storage
|
||||
// interfaces.
|
||||
func genSetFrom(a, b interface{}) {
|
||||
name := fmt.Sprintf("setFrom_%T_%T", a, b)
|
||||
name = strings.Replace(name, ".", "", -1)
|
||||
name = strings.Replace(name, "*", "", -1)
|
||||
fmt.Fprintf(out, "\n// %s copies matching elements from a to b\n", name)
|
||||
fmt.Fprintf(out, "func %s(a %T, b %T) {\n", name, a, b)
|
||||
ta := reflect.TypeOf(a).Elem()
|
||||
tb := reflect.TypeOf(b).Elem()
|
||||
va := reflect.ValueOf(a).Elem()
|
||||
vb := reflect.ValueOf(b).Elem()
|
||||
for i := 0; i < tb.NumField(); i++ {
|
||||
bField := vb.Field(i)
|
||||
tbField := tb.Field(i)
|
||||
name := tbField.Name
|
||||
aField := va.FieldByName(name)
|
||||
taField, found := ta.FieldByName(name)
|
||||
if found && aField.IsValid() && bField.IsValid() && aField.CanSet() && tbField.Type.AssignableTo(taField.Type) {
|
||||
fmt.Fprintf(out, "\ta.%s = b.%s\n", name, name)
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(out, "}\n")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if *outputFile != "" {
|
||||
fd, err := os.Create(*outputFile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
err := fd.Close()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}()
|
||||
out = fd
|
||||
}
|
||||
|
||||
fmt.Fprintf(out, `// Code generated by "go run gen_setfrom.go"; DO NOT EDIT.
|
||||
|
||||
package s3
|
||||
|
||||
import "github.com/aws/aws-sdk-go/service/s3"
|
||||
`)
|
||||
|
||||
genSetFrom(new(s3.ListObjectsInput), new(s3.ListObjectsV2Input))
|
||||
genSetFrom(new(s3.ListObjectsV2Output), new(s3.ListObjectsOutput))
|
||||
genSetFrom(new(s3.ListObjectVersionsInput), new(s3.ListObjectsV2Input))
|
||||
genSetFrom(new(s3.ObjectVersion), new(s3.DeleteMarkerEntry))
|
||||
genSetFrom(new(s3.ListObjectsV2Output), new(s3.ListObjectVersionsOutput))
|
||||
genSetFrom(new(s3.Object), new(s3.ObjectVersion))
|
||||
genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.HeadObjectOutput))
|
||||
genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.CopyObjectInput))
|
||||
genSetFrom(new(s3.UploadPartCopyInput), new(s3.CopyObjectInput))
|
||||
genSetFrom(new(s3.HeadObjectOutput), new(s3.GetObjectOutput))
|
||||
genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.PutObjectInput))
|
||||
genSetFrom(new(s3.HeadObjectOutput), new(s3.PutObjectInput))
|
||||
}
|
||||
1224
backend/s3/s3.go
1224
backend/s3/s3.go
File diff suppressed because it is too large
Load Diff
@@ -4,19 +4,14 @@ import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -31,15 +26,9 @@ func gz(t *testing.T, s string) string {
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func md5sum(t *testing.T, s string) string {
|
||||
hash := md5.Sum([]byte(s))
|
||||
return fmt.Sprintf("%x", hash)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestMetadata(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
original := random.String(1000)
|
||||
contents := gz(t, original)
|
||||
contents := gz(t, random.String(1000))
|
||||
|
||||
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
btime := time.Now()
|
||||
@@ -76,31 +65,6 @@ func (f *Fs) InternalTestMetadata(t *testing.T) {
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("GzipEncoding", func(t *testing.T) {
|
||||
// Test that the gzipped file we uploaded can be
|
||||
// downloaded with and without decompression
|
||||
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
|
||||
gotContents := fstests.ReadObject(ctx, t, o, -1)
|
||||
assert.Equal(t, wantContents, gotContents)
|
||||
assert.Equal(t, wantSize, o.Size())
|
||||
gotHash, err := o.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, wantHash, gotHash)
|
||||
}
|
||||
|
||||
t.Run("NoDecompress", func(t *testing.T) {
|
||||
checkDownload(contents, int64(len(contents)), md5sum(t, contents))
|
||||
})
|
||||
t.Run("Decompress", func(t *testing.T) {
|
||||
f.opt.Decompress = true
|
||||
defer func() {
|
||||
f.opt.Decompress = false
|
||||
}()
|
||||
checkDownload(original, -1, "")
|
||||
})
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestNoHead(t *testing.T) {
|
||||
@@ -116,279 +80,13 @@ func (f *Fs) InternalTestNoHead(t *testing.T) {
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
// PutTestcontents checks the received object
|
||||
// PutTestcontests checks the received object
|
||||
|
||||
}
|
||||
|
||||
func TestVersionLess(t *testing.T) {
|
||||
key1 := "key1"
|
||||
key2 := "key2"
|
||||
t1 := fstest.Time("2022-01-21T12:00:00+01:00")
|
||||
t2 := fstest.Time("2022-01-21T12:00:01+01:00")
|
||||
for n, test := range []struct {
|
||||
a, b *s3.ObjectVersion
|
||||
want bool
|
||||
}{
|
||||
{a: nil, b: nil, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: nil, want: false},
|
||||
{a: nil, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t2}, want: false},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t2}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key2, LastModified: &t1}, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key2, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(false)}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(true)}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(false)}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(true)}, want: false},
|
||||
} {
|
||||
got := versionLess(test.a, test.b)
|
||||
assert.Equal(t, test.want, got, fmt.Sprintf("%d: %+v", n, test))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMergeDeleteMarkers(t *testing.T) {
|
||||
key1 := "key1"
|
||||
key2 := "key2"
|
||||
t1 := fstest.Time("2022-01-21T12:00:00+01:00")
|
||||
t2 := fstest.Time("2022-01-21T12:00:01+01:00")
|
||||
for n, test := range []struct {
|
||||
versions []*s3.ObjectVersion
|
||||
markers []*s3.DeleteMarkerEntry
|
||||
want []*s3.ObjectVersion
|
||||
}{
|
||||
{
|
||||
versions: []*s3.ObjectVersion{},
|
||||
markers: []*s3.DeleteMarkerEntry{},
|
||||
want: []*s3.ObjectVersion{},
|
||||
},
|
||||
{
|
||||
versions: []*s3.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
},
|
||||
},
|
||||
markers: []*s3.DeleteMarkerEntry{},
|
||||
want: []*s3.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
versions: []*s3.ObjectVersion{},
|
||||
markers: []*s3.DeleteMarkerEntry{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
},
|
||||
},
|
||||
want: []*s3.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
Size: isDeleteMarker,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
versions: []*s3.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t2,
|
||||
},
|
||||
{
|
||||
Key: &key2,
|
||||
LastModified: &t2,
|
||||
},
|
||||
},
|
||||
markers: []*s3.DeleteMarkerEntry{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
},
|
||||
},
|
||||
want: []*s3.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t2,
|
||||
},
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
Size: isDeleteMarker,
|
||||
},
|
||||
{
|
||||
Key: &key2,
|
||||
LastModified: &t2,
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
got := mergeDeleteMarkers(test.versions, test.markers)
|
||||
assert.Equal(t, test.want, got, fmt.Sprintf("%d: %+v", n, test))
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestVersions(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Enable versioning for this bucket during this test
|
||||
_, err := f.setGetVersioning(ctx, "Enabled")
|
||||
if err != nil {
|
||||
t.Skipf("Couldn't enable versioning: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
// Disable versioning for this bucket
|
||||
_, err := f.setGetVersioning(ctx, "Suspended")
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
// Small pause to make the LastModified different since AWS
|
||||
// only seems to track them to 1 second granularity
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// Create an object
|
||||
const fileName = "test-versions.txt"
|
||||
contents := random.String(100)
|
||||
item := fstest.NewItem(fileName, contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
|
||||
// Small pause
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// Remove it
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
|
||||
// Small pause to make the LastModified different since AWS only seems to track them to 1 second granularity
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// And create it with different size and contents
|
||||
newContents := random.String(101)
|
||||
newItem := fstest.NewItem(fileName, newContents, fstest.Time("2002-05-06T04:05:06.499999999Z"))
|
||||
newObj := fstests.PutTestContents(ctx, t, f, &newItem, newContents, true)
|
||||
|
||||
t.Run("Versions", func(t *testing.T) {
|
||||
// Set --s3-versions for this test
|
||||
f.opt.Versions = true
|
||||
defer func() {
|
||||
f.opt.Versions = false
|
||||
}()
|
||||
|
||||
// Read the contents
|
||||
entries, err := f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
tests := 0
|
||||
var fileNameVersion string
|
||||
for _, entry := range entries {
|
||||
remote := entry.Remote()
|
||||
if remote == fileName {
|
||||
t.Run("ReadCurrent", func(t *testing.T) {
|
||||
assert.Equal(t, newContents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
|
||||
})
|
||||
tests++
|
||||
} else if versionTime, p := version.Remove(remote); !versionTime.IsZero() && p == fileName {
|
||||
t.Run("ReadVersion", func(t *testing.T) {
|
||||
assert.Equal(t, contents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
|
||||
})
|
||||
assert.WithinDuration(t, obj.(*Object).lastModified, versionTime, time.Second, "object time must be with 1 second of version time")
|
||||
fileNameVersion = remote
|
||||
tests++
|
||||
}
|
||||
}
|
||||
assert.Equal(t, 2, tests, "object missing from listing")
|
||||
|
||||
// Check we can read the object with a version suffix
|
||||
t.Run("NewObject", func(t *testing.T) {
|
||||
o, err := f.NewObject(ctx, fileNameVersion)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, o)
|
||||
assert.Equal(t, int64(100), o.Size(), o.Remote())
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("VersionAt", func(t *testing.T) {
|
||||
// We set --s3-version-at for this test so make sure we reset it at the end
|
||||
defer func() {
|
||||
f.opt.VersionAt = fs.Time{}
|
||||
}()
|
||||
|
||||
var (
|
||||
firstObjectTime = obj.(*Object).lastModified
|
||||
secondObjectTime = newObj.(*Object).lastModified
|
||||
)
|
||||
|
||||
for _, test := range []struct {
|
||||
what string
|
||||
at time.Time
|
||||
want []fstest.Item
|
||||
wantErr error
|
||||
wantSize int64
|
||||
}{
|
||||
{
|
||||
what: "Before",
|
||||
at: firstObjectTime.Add(-time.Second),
|
||||
want: fstests.InternalTestFiles,
|
||||
wantErr: fs.ErrorObjectNotFound,
|
||||
},
|
||||
{
|
||||
what: "AfterOne",
|
||||
at: firstObjectTime.Add(time.Second),
|
||||
want: append([]fstest.Item{item}, fstests.InternalTestFiles...),
|
||||
wantSize: 100,
|
||||
},
|
||||
{
|
||||
what: "AfterDelete",
|
||||
at: secondObjectTime.Add(-time.Second),
|
||||
want: fstests.InternalTestFiles,
|
||||
wantErr: fs.ErrorObjectNotFound,
|
||||
},
|
||||
{
|
||||
what: "AfterTwo",
|
||||
at: secondObjectTime.Add(time.Second),
|
||||
want: append([]fstest.Item{newItem}, fstests.InternalTestFiles...),
|
||||
wantSize: 101,
|
||||
},
|
||||
} {
|
||||
t.Run(test.what, func(t *testing.T) {
|
||||
f.opt.VersionAt = fs.Time(test.at)
|
||||
t.Run("List", func(t *testing.T) {
|
||||
fstest.CheckListing(t, f, test.want)
|
||||
})
|
||||
t.Run("NewObject", func(t *testing.T) {
|
||||
gotObj, gotErr := f.NewObject(ctx, fileName)
|
||||
assert.Equal(t, test.wantErr, gotErr)
|
||||
if gotErr == nil {
|
||||
assert.Equal(t, test.wantSize, gotObj.Size())
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Cleanup", func(t *testing.T) {
|
||||
require.NoError(t, f.CleanUpHidden(ctx))
|
||||
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
|
||||
fstest.CheckListing(t, f, items)
|
||||
// Set --s3-versions for this test
|
||||
f.opt.Versions = true
|
||||
defer func() {
|
||||
f.opt.Versions = false
|
||||
}()
|
||||
fstest.CheckListing(t, f, items)
|
||||
})
|
||||
|
||||
// Purge gets tested later
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("Metadata", f.InternalTestMetadata)
|
||||
t.Run("NoHead", f.InternalTestNoHead)
|
||||
t.Run("Versions", f.InternalTestVersions)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -1,239 +0,0 @@
|
||||
// Code generated by "go run gen_setfrom.go"; DO NOT EDIT.
|
||||
|
||||
package s3
|
||||
|
||||
import "github.com/aws/aws-sdk-go/service/s3"
|
||||
|
||||
// setFrom_s3ListObjectsInput_s3ListObjectsV2Input copies matching elements from a to b
|
||||
func setFrom_s3ListObjectsInput_s3ListObjectsV2Input(a *s3.ListObjectsInput, b *s3.ListObjectsV2Input) {
|
||||
a.Bucket = b.Bucket
|
||||
a.Delimiter = b.Delimiter
|
||||
a.EncodingType = b.EncodingType
|
||||
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
||||
a.MaxKeys = b.MaxKeys
|
||||
a.Prefix = b.Prefix
|
||||
a.RequestPayer = b.RequestPayer
|
||||
}
|
||||
|
||||
// setFrom_s3ListObjectsV2Output_s3ListObjectsOutput copies matching elements from a to b
|
||||
func setFrom_s3ListObjectsV2Output_s3ListObjectsOutput(a *s3.ListObjectsV2Output, b *s3.ListObjectsOutput) {
|
||||
a.CommonPrefixes = b.CommonPrefixes
|
||||
a.Contents = b.Contents
|
||||
a.Delimiter = b.Delimiter
|
||||
a.EncodingType = b.EncodingType
|
||||
a.IsTruncated = b.IsTruncated
|
||||
a.MaxKeys = b.MaxKeys
|
||||
a.Name = b.Name
|
||||
a.Prefix = b.Prefix
|
||||
}
|
||||
|
||||
// setFrom_s3ListObjectVersionsInput_s3ListObjectsV2Input copies matching elements from a to b
|
||||
func setFrom_s3ListObjectVersionsInput_s3ListObjectsV2Input(a *s3.ListObjectVersionsInput, b *s3.ListObjectsV2Input) {
|
||||
a.Bucket = b.Bucket
|
||||
a.Delimiter = b.Delimiter
|
||||
a.EncodingType = b.EncodingType
|
||||
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
||||
a.MaxKeys = b.MaxKeys
|
||||
a.Prefix = b.Prefix
|
||||
}
|
||||
|
||||
// setFrom_s3ObjectVersion_s3DeleteMarkerEntry copies matching elements from a to b
|
||||
func setFrom_s3ObjectVersion_s3DeleteMarkerEntry(a *s3.ObjectVersion, b *s3.DeleteMarkerEntry) {
|
||||
a.IsLatest = b.IsLatest
|
||||
a.Key = b.Key
|
||||
a.LastModified = b.LastModified
|
||||
a.Owner = b.Owner
|
||||
a.VersionId = b.VersionId
|
||||
}
|
||||
|
||||
// setFrom_s3ListObjectsV2Output_s3ListObjectVersionsOutput copies matching elements from a to b
|
||||
func setFrom_s3ListObjectsV2Output_s3ListObjectVersionsOutput(a *s3.ListObjectsV2Output, b *s3.ListObjectVersionsOutput) {
|
||||
a.CommonPrefixes = b.CommonPrefixes
|
||||
a.Delimiter = b.Delimiter
|
||||
a.EncodingType = b.EncodingType
|
||||
a.IsTruncated = b.IsTruncated
|
||||
a.MaxKeys = b.MaxKeys
|
||||
a.Name = b.Name
|
||||
a.Prefix = b.Prefix
|
||||
}
|
||||
|
||||
// setFrom_s3Object_s3ObjectVersion copies matching elements from a to b
|
||||
func setFrom_s3Object_s3ObjectVersion(a *s3.Object, b *s3.ObjectVersion) {
|
||||
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||
a.ETag = b.ETag
|
||||
a.Key = b.Key
|
||||
a.LastModified = b.LastModified
|
||||
a.Owner = b.Owner
|
||||
a.Size = b.Size
|
||||
a.StorageClass = b.StorageClass
|
||||
}
|
||||
|
||||
// setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput copies matching elements from a to b
|
||||
func setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput(a *s3.CreateMultipartUploadInput, b *s3.HeadObjectOutput) {
|
||||
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||
a.CacheControl = b.CacheControl
|
||||
a.ContentDisposition = b.ContentDisposition
|
||||
a.ContentEncoding = b.ContentEncoding
|
||||
a.ContentLanguage = b.ContentLanguage
|
||||
a.ContentType = b.ContentType
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
a.ObjectLockRetainUntilDate = b.ObjectLockRetainUntilDate
|
||||
a.SSECustomerAlgorithm = b.SSECustomerAlgorithm
|
||||
a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5
|
||||
a.SSEKMSKeyId = b.SSEKMSKeyId
|
||||
a.ServerSideEncryption = b.ServerSideEncryption
|
||||
a.StorageClass = b.StorageClass
|
||||
a.WebsiteRedirectLocation = b.WebsiteRedirectLocation
|
||||
}
|
||||
|
||||
// setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput copies matching elements from a to b
|
||||
func setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput(a *s3.CreateMultipartUploadInput, b *s3.CopyObjectInput) {
|
||||
a.ACL = b.ACL
|
||||
a.Bucket = b.Bucket
|
||||
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||
a.CacheControl = b.CacheControl
|
||||
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||
a.ContentDisposition = b.ContentDisposition
|
||||
a.ContentEncoding = b.ContentEncoding
|
||||
a.ContentLanguage = b.ContentLanguage
|
||||
a.ContentType = b.ContentType
|
||||
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
||||
a.Expires = b.Expires
|
||||
a.GrantFullControl = b.GrantFullControl
|
||||
a.GrantRead = b.GrantRead
|
||||
a.GrantReadACP = b.GrantReadACP
|
||||
a.GrantWriteACP = b.GrantWriteACP
|
||||
a.Key = b.Key
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
a.ObjectLockRetainUntilDate = b.ObjectLockRetainUntilDate
|
||||
a.RequestPayer = b.RequestPayer
|
||||
a.SSECustomerAlgorithm = b.SSECustomerAlgorithm
|
||||
a.SSECustomerKey = b.SSECustomerKey
|
||||
a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5
|
||||
a.SSEKMSEncryptionContext = b.SSEKMSEncryptionContext
|
||||
a.SSEKMSKeyId = b.SSEKMSKeyId
|
||||
a.ServerSideEncryption = b.ServerSideEncryption
|
||||
a.StorageClass = b.StorageClass
|
||||
a.Tagging = b.Tagging
|
||||
a.WebsiteRedirectLocation = b.WebsiteRedirectLocation
|
||||
}
|
||||
|
||||
// setFrom_s3UploadPartCopyInput_s3CopyObjectInput copies matching elements from a to b
|
||||
func setFrom_s3UploadPartCopyInput_s3CopyObjectInput(a *s3.UploadPartCopyInput, b *s3.CopyObjectInput) {
|
||||
a.Bucket = b.Bucket
|
||||
a.CopySource = b.CopySource
|
||||
a.CopySourceIfMatch = b.CopySourceIfMatch
|
||||
a.CopySourceIfModifiedSince = b.CopySourceIfModifiedSince
|
||||
a.CopySourceIfNoneMatch = b.CopySourceIfNoneMatch
|
||||
a.CopySourceIfUnmodifiedSince = b.CopySourceIfUnmodifiedSince
|
||||
a.CopySourceSSECustomerAlgorithm = b.CopySourceSSECustomerAlgorithm
|
||||
a.CopySourceSSECustomerKey = b.CopySourceSSECustomerKey
|
||||
a.CopySourceSSECustomerKeyMD5 = b.CopySourceSSECustomerKeyMD5
|
||||
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
||||
a.ExpectedSourceBucketOwner = b.ExpectedSourceBucketOwner
|
||||
a.Key = b.Key
|
||||
a.RequestPayer = b.RequestPayer
|
||||
a.SSECustomerAlgorithm = b.SSECustomerAlgorithm
|
||||
a.SSECustomerKey = b.SSECustomerKey
|
||||
a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5
|
||||
}
|
||||
|
||||
// setFrom_s3HeadObjectOutput_s3GetObjectOutput copies matching elements from a to b
|
||||
func setFrom_s3HeadObjectOutput_s3GetObjectOutput(a *s3.HeadObjectOutput, b *s3.GetObjectOutput) {
|
||||
a.AcceptRanges = b.AcceptRanges
|
||||
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||
a.CacheControl = b.CacheControl
|
||||
a.ChecksumCRC32 = b.ChecksumCRC32
|
||||
a.ChecksumCRC32C = b.ChecksumCRC32C
|
||||
a.ChecksumSHA1 = b.ChecksumSHA1
|
||||
a.ChecksumSHA256 = b.ChecksumSHA256
|
||||
a.ContentDisposition = b.ContentDisposition
|
||||
a.ContentEncoding = b.ContentEncoding
|
||||
a.ContentLanguage = b.ContentLanguage
|
||||
a.ContentLength = b.ContentLength
|
||||
a.ContentType = b.ContentType
|
||||
a.DeleteMarker = b.DeleteMarker
|
||||
a.ETag = b.ETag
|
||||
a.Expiration = b.Expiration
|
||||
a.Expires = b.Expires
|
||||
a.LastModified = b.LastModified
|
||||
a.Metadata = b.Metadata
|
||||
a.MissingMeta = b.MissingMeta
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
a.ObjectLockRetainUntilDate = b.ObjectLockRetainUntilDate
|
||||
a.PartsCount = b.PartsCount
|
||||
a.ReplicationStatus = b.ReplicationStatus
|
||||
a.RequestCharged = b.RequestCharged
|
||||
a.Restore = b.Restore
|
||||
a.SSECustomerAlgorithm = b.SSECustomerAlgorithm
|
||||
a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5
|
||||
a.SSEKMSKeyId = b.SSEKMSKeyId
|
||||
a.ServerSideEncryption = b.ServerSideEncryption
|
||||
a.StorageClass = b.StorageClass
|
||||
a.VersionId = b.VersionId
|
||||
a.WebsiteRedirectLocation = b.WebsiteRedirectLocation
|
||||
}
|
||||
|
||||
// setFrom_s3CreateMultipartUploadInput_s3PutObjectInput copies matching elements from a to b
|
||||
func setFrom_s3CreateMultipartUploadInput_s3PutObjectInput(a *s3.CreateMultipartUploadInput, b *s3.PutObjectInput) {
|
||||
a.ACL = b.ACL
|
||||
a.Bucket = b.Bucket
|
||||
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||
a.CacheControl = b.CacheControl
|
||||
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||
a.ContentDisposition = b.ContentDisposition
|
||||
a.ContentEncoding = b.ContentEncoding
|
||||
a.ContentLanguage = b.ContentLanguage
|
||||
a.ContentType = b.ContentType
|
||||
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
||||
a.Expires = b.Expires
|
||||
a.GrantFullControl = b.GrantFullControl
|
||||
a.GrantRead = b.GrantRead
|
||||
a.GrantReadACP = b.GrantReadACP
|
||||
a.GrantWriteACP = b.GrantWriteACP
|
||||
a.Key = b.Key
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
a.ObjectLockRetainUntilDate = b.ObjectLockRetainUntilDate
|
||||
a.RequestPayer = b.RequestPayer
|
||||
a.SSECustomerAlgorithm = b.SSECustomerAlgorithm
|
||||
a.SSECustomerKey = b.SSECustomerKey
|
||||
a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5
|
||||
a.SSEKMSEncryptionContext = b.SSEKMSEncryptionContext
|
||||
a.SSEKMSKeyId = b.SSEKMSKeyId
|
||||
a.ServerSideEncryption = b.ServerSideEncryption
|
||||
a.StorageClass = b.StorageClass
|
||||
a.Tagging = b.Tagging
|
||||
a.WebsiteRedirectLocation = b.WebsiteRedirectLocation
|
||||
}
|
||||
|
||||
// setFrom_s3HeadObjectOutput_s3PutObjectInput copies matching elements from a to b
|
||||
func setFrom_s3HeadObjectOutput_s3PutObjectInput(a *s3.HeadObjectOutput, b *s3.PutObjectInput) {
|
||||
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||
a.CacheControl = b.CacheControl
|
||||
a.ChecksumCRC32 = b.ChecksumCRC32
|
||||
a.ChecksumCRC32C = b.ChecksumCRC32C
|
||||
a.ChecksumSHA1 = b.ChecksumSHA1
|
||||
a.ChecksumSHA256 = b.ChecksumSHA256
|
||||
a.ContentDisposition = b.ContentDisposition
|
||||
a.ContentEncoding = b.ContentEncoding
|
||||
a.ContentLanguage = b.ContentLanguage
|
||||
a.ContentLength = b.ContentLength
|
||||
a.ContentType = b.ContentType
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
a.ObjectLockRetainUntilDate = b.ObjectLockRetainUntilDate
|
||||
a.SSECustomerAlgorithm = b.SSECustomerAlgorithm
|
||||
a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5
|
||||
a.SSEKMSKeyId = b.SSEKMSKeyId
|
||||
a.ServerSideEncryption = b.ServerSideEncryption
|
||||
a.StorageClass = b.StorageClass
|
||||
a.WebsiteRedirectLocation = b.WebsiteRedirectLocation
|
||||
}
|
||||
@@ -1,4 +1,3 @@
|
||||
// Package api provides types used by the Seafile API.
|
||||
package api
|
||||
|
||||
// Some api objects are duplicated with only small differences,
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// Package seafile provides an interface to the Seafile storage system.
|
||||
package seafile
|
||||
|
||||
import (
|
||||
@@ -137,7 +136,7 @@ type Fs struct {
|
||||
features *fs.Features // optional features
|
||||
endpoint *url.URL // URL of the host
|
||||
endpointURL string // endpoint as a string
|
||||
srv *rest.Client // the connection to the server
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
authMu sync.Mutex // Mutex to protect library decryption
|
||||
createDirMutex sync.Mutex // Protect creation of directories
|
||||
@@ -672,9 +671,9 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) e
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
@@ -723,9 +722,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
// Package sftp provides a filesystem interface using github.com/pkg/sftp
|
||||
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
// Package sftp provides a filesystem interface using github.com/pkg/sftp
|
||||
package sftp
|
||||
|
||||
import (
|
||||
@@ -275,24 +276,19 @@ Set to 0 to keep connections indefinitely.
|
||||
Name: "chunk_size",
|
||||
Help: `Upload and download chunk size.
|
||||
|
||||
This controls the maximum size of payload in SFTP protocol packets.
|
||||
The RFC limits this to 32768 bytes (32k), which is the default. However,
|
||||
a lot of servers support larger sizes, typically limited to a maximum
|
||||
total package size of 256k, and setting it larger will increase transfer
|
||||
speed dramatically on high latency links. This includes OpenSSH, and,
|
||||
for example, using the value of 255k works well, leaving plenty of room
|
||||
for overhead while still being within a total packet size of 256k.
|
||||
This controls the maximum packet size used in the SFTP protocol. The
|
||||
RFC limits this to 32768 bytes (32k), however a lot of servers
|
||||
support larger sizes and setting it larger will increase transfer
|
||||
speed dramatically on high latency links.
|
||||
|
||||
Make sure to test thoroughly before using a value higher than 32k,
|
||||
and only use it if you always connect to the same server or after
|
||||
sufficiently broad testing. If you get errors such as
|
||||
"failed to send packet payload: EOF", lots of "connection lost",
|
||||
or "corrupted on transfer", when copying a larger file, try lowering
|
||||
the value. The server run by [rclone serve sftp](/commands/rclone_serve_sftp)
|
||||
sends packets with standard 32k maximum payload so you must not
|
||||
set a different chunk_size when downloading files, but it accepts
|
||||
packets up to the 256k total size, so for uploads the chunk_size
|
||||
can be set as for the OpenSSH example above.
|
||||
Only use a setting higher than 32k if you always connect to the same
|
||||
server or after sufficiently broad testing.
|
||||
|
||||
For example using the value of 252k with OpenSSH works well with its
|
||||
maximum packet size of 256k.
|
||||
|
||||
If you get the error "failed to send packet header: EOF" when copying
|
||||
a large file, try lowering this number.
|
||||
`,
|
||||
Default: 32 * fs.Kibi,
|
||||
Advanced: true,
|
||||
@@ -1171,10 +1167,6 @@ func (f *Fs) mkdir(ctx context.Context, dirPath string) error {
|
||||
err = c.sftpClient.Mkdir(dirPath)
|
||||
f.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
if os.IsExist(err) {
|
||||
fs.Debugf(f, "directory %q exists after Mkdir is attempted", dirPath)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("mkdir %q failed: %w", dirPath, err)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -741,7 +741,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object, leaf, directoryID and error.
|
||||
// Returns the object, leaf, directoryID and error
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
@@ -760,7 +760,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -783,9 +783,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
|
||||
// PutUnchecked the object into the container
|
||||
//
|
||||
// This will produce an error if the object already exists.
|
||||
// This will produce an error if the object already exists
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -973,9 +973,9 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1043,9 +1043,9 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1256,6 +1256,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
@@ -1323,7 +1324,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// Package api provides types used by the Sia API.
|
||||
package api
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// Package sia provides an interface to the Sia storage system.
|
||||
package sia
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,233 +0,0 @@
|
||||
package smb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
smb2 "github.com/hirochachacha/go-smb2"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
)
|
||||
|
||||
// dial starts a client connection to the given SMB server. It is a
|
||||
// convenience function that connects to the given network address,
|
||||
// initiates the SMB handshake, and then sets up a Client.
|
||||
func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
|
||||
dialer := fshttp.NewDialer(ctx)
|
||||
tconn, err := dialer.Dial(network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pass := ""
|
||||
if f.opt.Pass != "" {
|
||||
pass, err = obscure.Reveal(f.opt.Pass)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
d := &smb2.Dialer{
|
||||
Initiator: &smb2.NTLMInitiator{
|
||||
User: f.opt.User,
|
||||
Password: pass,
|
||||
Domain: f.opt.Domain,
|
||||
},
|
||||
}
|
||||
|
||||
session, err := d.DialContext(ctx, tconn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &conn{
|
||||
smbSession: session,
|
||||
conn: &tconn,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// conn encapsulates a SMB client and corresponding SMB client
|
||||
type conn struct {
|
||||
conn *net.Conn
|
||||
smbSession *smb2.Session
|
||||
smbShare *smb2.Share
|
||||
shareName string
|
||||
}
|
||||
|
||||
// Closes the connection
|
||||
func (c *conn) close() (err error) {
|
||||
if c.smbShare != nil {
|
||||
err = c.smbShare.Umount()
|
||||
}
|
||||
sessionLogoffErr := c.smbSession.Logoff()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return sessionLogoffErr
|
||||
}
|
||||
|
||||
// True if it's closed
|
||||
func (c *conn) closed() bool {
|
||||
var nopErr error
|
||||
if c.smbShare != nil {
|
||||
// stat the current directory
|
||||
_, nopErr = c.smbShare.Stat(".")
|
||||
} else {
|
||||
// list the shares
|
||||
_, nopErr = c.smbSession.ListSharenames()
|
||||
}
|
||||
return nopErr == nil
|
||||
}
|
||||
|
||||
// Show that we are using a SMB session
|
||||
//
|
||||
// Call removeSession() when done
|
||||
func (f *Fs) addSession() {
|
||||
atomic.AddInt32(&f.sessions, 1)
|
||||
}
|
||||
|
||||
// Show the SMB session is no longer in use
|
||||
func (f *Fs) removeSession() {
|
||||
atomic.AddInt32(&f.sessions, -1)
|
||||
}
|
||||
|
||||
// getSessions shows whether there are any sessions in use
|
||||
func (f *Fs) getSessions() int32 {
|
||||
return atomic.LoadInt32(&f.sessions)
|
||||
}
|
||||
|
||||
// Open a new connection to the SMB server.
|
||||
func (f *Fs) newConnection(ctx context.Context, share string) (c *conn, err error) {
|
||||
// As we are pooling these connections we need to decouple
|
||||
// them from the current context
|
||||
ctx = context.Background()
|
||||
|
||||
c, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't connect SMB: %w", err)
|
||||
}
|
||||
if share != "" {
|
||||
// mount the specified share as well if user requested
|
||||
c.smbShare, err = c.smbSession.Mount(share)
|
||||
if err != nil {
|
||||
_ = c.smbSession.Logoff()
|
||||
return nil, fmt.Errorf("couldn't initialize SMB: %w", err)
|
||||
}
|
||||
c.smbShare = c.smbShare.WithContext(ctx)
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Ensure the specified share is mounted or the session is unmounted
|
||||
func (c *conn) mountShare(share string) (err error) {
|
||||
if c.shareName == share {
|
||||
return nil
|
||||
}
|
||||
if c.smbShare != nil {
|
||||
err = c.smbShare.Umount()
|
||||
c.smbShare = nil
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if share != "" {
|
||||
c.smbShare, err = c.smbSession.Mount(share)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
c.shareName = share
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get a SMB connection from the pool, or open a new one
|
||||
func (f *Fs) getConnection(ctx context.Context, share string) (c *conn, err error) {
|
||||
accounting.LimitTPS(ctx)
|
||||
f.poolMu.Lock()
|
||||
for len(f.pool) > 0 {
|
||||
c = f.pool[0]
|
||||
f.pool = f.pool[1:]
|
||||
err = c.mountShare(share)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
fs.Debugf(f, "Discarding unusable SMB connection: %v", err)
|
||||
c = nil
|
||||
}
|
||||
f.poolMu.Unlock()
|
||||
if c != nil {
|
||||
return c, nil
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
c, err = f.newConnection(ctx, share)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return c, err
|
||||
}
|
||||
|
||||
// Return a SMB connection to the pool
|
||||
//
|
||||
// It nils the pointed to connection out so it can't be reused
|
||||
func (f *Fs) putConnection(pc **conn) {
|
||||
c := *pc
|
||||
*pc = nil
|
||||
|
||||
var nopErr error
|
||||
if c.smbShare != nil {
|
||||
// stat the current directory
|
||||
_, nopErr = c.smbShare.Stat(".")
|
||||
} else {
|
||||
// list the shares
|
||||
_, nopErr = c.smbSession.ListSharenames()
|
||||
}
|
||||
if nopErr != nil {
|
||||
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
|
||||
_ = c.close()
|
||||
return
|
||||
}
|
||||
|
||||
f.poolMu.Lock()
|
||||
f.pool = append(f.pool, c)
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer
|
||||
}
|
||||
f.poolMu.Unlock()
|
||||
}
|
||||
|
||||
// Drain the pool of any connections
|
||||
func (f *Fs) drainPool(ctx context.Context) (err error) {
|
||||
f.poolMu.Lock()
|
||||
defer f.poolMu.Unlock()
|
||||
if sessions := f.getSessions(); sessions != 0 {
|
||||
fs.Debugf(f, "Not closing %d unused connections as %d sessions active", len(f.pool), sessions)
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Stop()
|
||||
}
|
||||
if len(f.pool) != 0 {
|
||||
fs.Debugf(f, "Closing %d unused connections", len(f.pool))
|
||||
}
|
||||
for i, c := range f.pool {
|
||||
if !c.closed() {
|
||||
cErr := c.close()
|
||||
if cErr != nil {
|
||||
err = cErr
|
||||
}
|
||||
}
|
||||
f.pool[i] = nil
|
||||
}
|
||||
f.pool = nil
|
||||
return err
|
||||
}
|
||||
@@ -1,789 +0,0 @@
|
||||
// Package smb provides an interface to SMB servers
|
||||
package smb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
const (
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
var (
|
||||
currentUser = env.CurrentUser()
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "smb",
|
||||
Description: "SMB / CIFS",
|
||||
NewFs: NewFs,
|
||||
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
Help: "SMB server hostname to connect to.\n\nE.g. \"example.com\".",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "SMB username.",
|
||||
Default: currentUser,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "SMB port number.",
|
||||
Default: 445,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "SMB password.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "domain",
|
||||
Help: "Domain name for NTLM authentication.",
|
||||
Default: "WORKGROUP",
|
||||
}, {
|
||||
Name: "idle_timeout",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
Help: `Max time before closing idle connections.
|
||||
|
||||
If no connections have been returned to the connection pool in the time
|
||||
given, rclone will empty the connection pool.
|
||||
|
||||
Set to 0 to keep connections indefinitely.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hide_special_share",
|
||||
Help: "Hide special shares (e.g. print$) which users aren't supposed to access.",
|
||||
Default: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "case_insensitive",
|
||||
Help: "Whether the server is configured to be case-insensitive.\n\nAlways true on Windows shares.",
|
||||
Default: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encoder.EncodeZero |
|
||||
// path separator
|
||||
encoder.EncodeSlash |
|
||||
encoder.EncodeBackSlash |
|
||||
// windows
|
||||
encoder.EncodeWin |
|
||||
encoder.EncodeCtl |
|
||||
encoder.EncodeDot |
|
||||
// the file turns into 8.3 names (and cannot be converted back)
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeRightPeriod |
|
||||
//
|
||||
encoder.EncodeInvalidUtf8,
|
||||
},
|
||||
}})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Host string `config:"host"`
|
||||
Port string `config:"port"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Domain string `config:"domain"`
|
||||
HideSpecial bool `config:"hide_special_share"`
|
||||
CaseInsensitive bool `config:"case_insensitive"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a SMB remote
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
pacer *fs.Pacer // pacer for operations
|
||||
|
||||
sessions int32
|
||||
poolMu sync.Mutex
|
||||
pool []*conn
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// Object describes a file at the server
|
||||
type Object struct {
|
||||
fs *Fs // reference to Fs
|
||||
remote string // the remote path
|
||||
statResult os.FileInfo
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
ctx: ctx,
|
||||
root: root,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: opt.CaseInsensitive,
|
||||
CanHaveEmptyDirectories: true,
|
||||
BucketBased: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||
// set the pool drainer timer going
|
||||
if opt.IdleTimeout > 0 {
|
||||
f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
|
||||
}
|
||||
|
||||
// test if the root exists as a file
|
||||
share, dir := f.split("")
|
||||
if share == "" || dir == "" {
|
||||
return f, nil
|
||||
}
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stat, err := cn.smbShare.Stat(f.toSambaPath(dir))
|
||||
f.putConnection(&cn)
|
||||
if err != nil {
|
||||
// ignore stat error here
|
||||
return f, nil
|
||||
}
|
||||
if !stat.IsDir() {
|
||||
f.root, err = path.Dir(root), fs.ErrorIsFile
|
||||
}
|
||||
fs.Debugf(f, "Using root directory %q", f.root)
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
bucket, file := f.split("")
|
||||
if bucket == "" {
|
||||
return fmt.Sprintf("smb://%s@%s:%s/", f.opt.User, f.opt.Host, f.opt.Port)
|
||||
}
|
||||
return fmt.Sprintf("smb://%s@%s:%s/%s/%s", f.opt.User, f.opt.Host, f.opt.Port, bucket, file)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Hashes returns nothing as SMB itself doesn't have a way to tell checksums
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.NewHashSet()
|
||||
}
|
||||
|
||||
// Precision returns the precision of mtime
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Millisecond
|
||||
}
|
||||
|
||||
// NewObject creates a new file object
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
share, path := f.split(remote)
|
||||
return f.findObjectSeparate(ctx, share, path)
|
||||
}
|
||||
|
||||
func (f *Fs) findObjectSeparate(ctx context.Context, share, path string) (fs.Object, error) {
|
||||
if share == "" || path == "" {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stat, err := cn.smbShare.Stat(f.toSambaPath(path))
|
||||
f.putConnection(&cn)
|
||||
if err != nil {
|
||||
return nil, translateError(err, false)
|
||||
}
|
||||
if stat.IsDir() {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
|
||||
return f.makeEntry(share, path, stat), nil
|
||||
}
|
||||
|
||||
// Mkdir creates a directory on the server
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
share, path := f.split(dir)
|
||||
if share == "" || path == "" {
|
||||
return nil
|
||||
}
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = cn.smbShare.MkdirAll(f.toSambaPath(path), 0o755)
|
||||
f.putConnection(&cn)
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir removes an empty directory on the server
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
share, path := f.split(dir)
|
||||
if share == "" || path == "" {
|
||||
return nil
|
||||
}
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = cn.smbShare.Remove(f.toSambaPath(path))
|
||||
f.putConnection(&cn)
|
||||
return err
|
||||
}
|
||||
|
||||
// Put uploads a file
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
|
||||
err := o.Update(ctx, in, src, options...)
|
||||
if err == nil {
|
||||
return o, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
|
||||
err := o.Update(ctx, in, src, options...)
|
||||
if err == nil {
|
||||
return o, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (_ fs.Object, err error) {
|
||||
dstShare, dstPath := f.split(remote)
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
srcShare, srcPath := srcObj.split()
|
||||
if dstShare != srcShare {
|
||||
fs.Debugf(src, "Can't move - must be on the same share")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
err = f.ensureDirectory(ctx, dstShare, dstPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make parent directories: %w", err)
|
||||
}
|
||||
|
||||
cn, err := f.getConnection(ctx, dstShare)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = cn.smbShare.Rename(f.toSambaPath(srcPath), f.toSambaPath(dstPath))
|
||||
f.putConnection(&cn)
|
||||
if err != nil {
|
||||
return nil, translateError(err, false)
|
||||
}
|
||||
return f.findObjectSeparate(ctx, dstShare, dstPath)
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
dstShare, dstPath := f.split(dstRemote)
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcShare, srcPath := srcFs.split(srcRemote)
|
||||
if dstShare != srcShare {
|
||||
fs.Debugf(src, "Can't move - must be on the same share")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
err = f.ensureDirectory(ctx, dstShare, dstPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to make parent directories: %w", err)
|
||||
}
|
||||
|
||||
cn, err := f.getConnection(ctx, dstShare)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.putConnection(&cn)
|
||||
|
||||
_, err = cn.smbShare.Stat(dstPath)
|
||||
if os.IsNotExist(err) {
|
||||
err = cn.smbShare.Rename(f.toSambaPath(srcPath), f.toSambaPath(dstPath))
|
||||
return translateError(err, true)
|
||||
}
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
// List files and directories in a directory
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
share, _path := f.split(dir)
|
||||
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.putConnection(&cn)
|
||||
|
||||
if share == "" {
|
||||
shares, err := cn.smbSession.ListSharenames()
|
||||
for _, shh := range shares {
|
||||
shh = f.toNativePath(shh)
|
||||
if strings.HasSuffix(shh, "$") && f.opt.HideSpecial {
|
||||
continue
|
||||
}
|
||||
entries = append(entries, fs.NewDir(shh, time.Time{}))
|
||||
}
|
||||
return entries, err
|
||||
}
|
||||
|
||||
dirents, err := cn.smbShare.ReadDir(f.toSambaPath(_path))
|
||||
if err != nil {
|
||||
return entries, translateError(err, true)
|
||||
}
|
||||
for _, file := range dirents {
|
||||
nfn := f.toNativePath(file.Name())
|
||||
if file.IsDir() {
|
||||
entries = append(entries, fs.NewDir(path.Join(dir, nfn), file.ModTime()))
|
||||
} else {
|
||||
entries = append(entries, f.makeEntryRelative(share, _path, nfn, file))
|
||||
}
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// About returns things about remaining and used spaces
|
||||
func (f *Fs) About(ctx context.Context) (_ *fs.Usage, err error) {
|
||||
share, dir := f.split("/")
|
||||
if share == "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
dir = f.toSambaPath(dir)
|
||||
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stat, err := cn.smbShare.Statfs(dir)
|
||||
f.putConnection(&cn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bs := int64(stat.BlockSize())
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(bs * int64(stat.TotalBlockCount())),
|
||||
Used: fs.NewUsageValue(bs * int64(stat.TotalBlockCount()-stat.FreeBlockCount())),
|
||||
Free: fs.NewUsageValue(bs * int64(stat.AvailableBlockCount())),
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any
|
||||
// cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
return f.drainPool(ctx)
|
||||
}
|
||||
|
||||
func (f *Fs) makeEntry(share, _path string, stat os.FileInfo) *Object {
|
||||
remote := path.Join(share, _path)
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: trimPathPrefix(remote, f.root),
|
||||
statResult: stat,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) makeEntryRelative(share, _path, relative string, stat os.FileInfo) *Object {
|
||||
return f.makeEntry(share, path.Join(_path, relative), stat)
|
||||
}
|
||||
|
||||
func (f *Fs) ensureDirectory(ctx context.Context, share, _path string) error {
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = cn.smbShare.MkdirAll(f.toSambaPath(path.Dir(_path)), 0o755)
|
||||
f.putConnection(&cn)
|
||||
return err
|
||||
}
|
||||
|
||||
/// Object
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime is the last modified time (read-only)
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.statResult.ModTime()
|
||||
}
|
||||
|
||||
// Size is the file length
|
||||
func (o *Object) Size() int64 {
|
||||
return o.statResult.Size()
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Hash always returns empty value
|
||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Storable returns if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime sets modTime on a particular file
|
||||
func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) {
|
||||
share, reqDir := o.split()
|
||||
if share == "" || reqDir == "" {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
reqDir = o.fs.toSambaPath(reqDir)
|
||||
|
||||
cn, err := o.fs.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer o.fs.putConnection(&cn)
|
||||
|
||||
err = cn.smbShare.Chtimes(reqDir, t, t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fi, err := cn.smbShare.Stat(reqDir)
|
||||
if err == nil {
|
||||
o.statResult = fi
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
share, filename := o.split()
|
||||
if share == "" || filename == "" {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
filename = o.fs.toSambaPath(filename)
|
||||
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.Size())
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
o.fs.addSession() // Show session in use
|
||||
defer o.fs.removeSession()
|
||||
|
||||
cn, err := o.fs.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fl, err := cn.smbShare.OpenFile(filename, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
o.fs.putConnection(&cn)
|
||||
return nil, fmt.Errorf("failed to open: %w", err)
|
||||
}
|
||||
pos, err := fl.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
o.fs.putConnection(&cn)
|
||||
return nil, fmt.Errorf("failed to seek: %w", err)
|
||||
}
|
||||
if pos != offset {
|
||||
o.fs.putConnection(&cn)
|
||||
return nil, fmt.Errorf("failed to seek: wrong position (expected=%d, reported=%d)", offset, pos)
|
||||
}
|
||||
|
||||
in = readers.NewLimitedReadCloser(fl, limit)
|
||||
in = &boundReadCloser{
|
||||
rc: in,
|
||||
close: func() error {
|
||||
o.fs.putConnection(&cn)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
return in, nil
|
||||
}
|
||||
|
||||
// Update the Object from in with modTime and size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
share, filename := o.split()
|
||||
if share == "" || filename == "" {
|
||||
return fs.ErrorIsDir
|
||||
}
|
||||
|
||||
err = o.fs.ensureDirectory(ctx, share, filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to make parent directories: %w", err)
|
||||
}
|
||||
|
||||
filename = o.fs.toSambaPath(filename)
|
||||
|
||||
o.fs.addSession() // Show session in use
|
||||
defer o.fs.removeSession()
|
||||
|
||||
cn, err := o.fs.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
o.statResult, _ = cn.smbShare.Stat(filename)
|
||||
o.fs.putConnection(&cn)
|
||||
}()
|
||||
|
||||
fl, err := cn.smbShare.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open: %w", err)
|
||||
}
|
||||
|
||||
// remove the file if upload failed
|
||||
remove := func() {
|
||||
// Windows doesn't allow removal of files without closing file
|
||||
removeErr := fl.Close()
|
||||
if removeErr != nil {
|
||||
fs.Debugf(src, "failed to close the file for delete: %v", removeErr)
|
||||
// try to remove the file anyway; the file may be already closed
|
||||
}
|
||||
|
||||
removeErr = cn.smbShare.Remove(filename)
|
||||
if removeErr != nil {
|
||||
fs.Debugf(src, "failed to remove: %v", removeErr)
|
||||
} else {
|
||||
fs.Debugf(src, "removed after failed upload: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = fl.ReadFrom(in)
|
||||
if err != nil {
|
||||
remove()
|
||||
return fmt.Errorf("Update ReadFrom failed: %w", err)
|
||||
}
|
||||
|
||||
err = fl.Close()
|
||||
if err != nil {
|
||||
remove()
|
||||
return fmt.Errorf("Update Close failed: %w", err)
|
||||
}
|
||||
|
||||
// Set the modified time
|
||||
err = o.SetModTime(ctx, src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Update SetModTime failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
share, filename := o.split()
|
||||
if share == "" || filename == "" {
|
||||
return fs.ErrorIsDir
|
||||
}
|
||||
filename = o.fs.toSambaPath(filename)
|
||||
|
||||
cn, err := o.fs.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = cn.smbShare.Remove(filename)
|
||||
o.fs.putConnection(&cn)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// String converts this Object to a string
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
/// Misc
|
||||
|
||||
// split returns share name and path in the share from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (shareName, filepath string) {
|
||||
return bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
}
|
||||
|
||||
// split returns share name and path in the share from the object
|
||||
func (o *Object) split() (shareName, filepath string) {
|
||||
return o.fs.split(o.remote)
|
||||
}
|
||||
|
||||
func (f *Fs) toSambaPath(path string) string {
|
||||
// 1. encode via Rclone's escaping system
|
||||
// 2. convert to backslash-separated path
|
||||
return strings.ReplaceAll(f.opt.Enc.FromStandardPath(path), "/", "\\")
|
||||
}
|
||||
|
||||
func (f *Fs) toNativePath(path string) string {
|
||||
// 1. convert *back* to slash-separated path
|
||||
// 2. encode via Rclone's escaping system
|
||||
return f.opt.Enc.ToStandardPath(strings.ReplaceAll(path, "\\", "/"))
|
||||
}
|
||||
|
||||
func ensureSuffix(s, suffix string) string {
|
||||
if strings.HasSuffix(s, suffix) {
|
||||
return s
|
||||
}
|
||||
return s + suffix
|
||||
}
|
||||
|
||||
func trimPathPrefix(s, prefix string) string {
|
||||
// we need to clean the paths to make tests pass!
|
||||
s = betterPathClean(s)
|
||||
prefix = betterPathClean(prefix)
|
||||
if s == prefix || s == prefix+"/" {
|
||||
return ""
|
||||
}
|
||||
prefix = ensureSuffix(prefix, "/")
|
||||
return strings.TrimPrefix(s, prefix)
|
||||
}
|
||||
|
||||
func betterPathClean(p string) string {
|
||||
d := path.Clean(p)
|
||||
if d == "." {
|
||||
return ""
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
type boundReadCloser struct {
|
||||
rc io.ReadCloser
|
||||
close func() error
|
||||
}
|
||||
|
||||
func (r *boundReadCloser) Read(p []byte) (n int, err error) {
|
||||
return r.rc.Read(p)
|
||||
}
|
||||
|
||||
func (r *boundReadCloser) Close() error {
|
||||
err1 := r.rc.Close()
|
||||
err2 := r.close()
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
func translateError(e error, dir bool) error {
|
||||
if os.IsNotExist(e) {
|
||||
if dir {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.DirMover = &Fs{}
|
||||
_ fs.Abouter = &Fs{}
|
||||
_ fs.Shutdowner = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ io.ReadCloser = &boundReadCloser{}
|
||||
)
|
||||
@@ -1,17 +0,0 @@
|
||||
// Test smb filesystem interface
|
||||
package smb_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/smb"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestSMB:rclone",
|
||||
NilObject: (*smb.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -683,9 +683,9 @@ func newPrefix(prefix string) string {
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
|
||||
@@ -200,7 +200,7 @@ type Fs struct {
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
m configmap.Mapper // config file access
|
||||
@@ -713,7 +713,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object, leaf, directoryID and error.
|
||||
// Returns the object, leaf, directoryID and error
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
@@ -732,7 +732,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -755,9 +755,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
|
||||
// PutUnchecked the object into the container
|
||||
//
|
||||
// This will produce an error if the object already exists.
|
||||
// This will produce an error if the object already exists
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -852,9 +852,9 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -985,9 +985,9 @@ func (f *Fs) moveDir(ctx context.Context, id, leaf, directoryID string) (err err
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1156,6 +1156,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
@@ -1228,7 +1229,7 @@ func (f *Fs) createFile(ctx context.Context, pathID, leaf, mimeType string) (new
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
|
||||
@@ -40,7 +40,7 @@ const (
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
)
|
||||
|
||||
// SharedOptions are shared between swift and backends which depend on swift
|
||||
// SharedOptions are shared between swift and hubic
|
||||
var SharedOptions = []fs.Option{{
|
||||
Name: "chunk_size",
|
||||
Help: `Above this size files will be chunked into a _segments container.
|
||||
@@ -63,32 +63,6 @@ Rclone will still chunk files bigger than chunk_size when doing normal
|
||||
copy operations.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_large_objects",
|
||||
Help: strings.ReplaceAll(`Disable support for static and dynamic large objects
|
||||
|
||||
Swift cannot transparently store files bigger than 5 GiB. There are
|
||||
two schemes for doing that, static or dynamic large objects, and the
|
||||
API does not allow rclone to determine whether a file is a static or
|
||||
dynamic large object without doing a HEAD on the object. Since these
|
||||
need to be treated differently, this means rclone has to issue HEAD
|
||||
requests for objects for example when reading checksums.
|
||||
|
||||
When |no_large_objects| is set, rclone will assume that there are no
|
||||
static or dynamic large objects stored. This means it can stop doing
|
||||
the extra HEAD calls which in turn increases performance greatly
|
||||
especially when doing a swift to swift transfer with |--checksum| set.
|
||||
|
||||
Setting this option implies |no_chunk| and also that no files will be
|
||||
uploaded in chunks, so files bigger than 5 GiB will just fail on
|
||||
upload.
|
||||
|
||||
If you set this option and there *are* static or dynamic large objects,
|
||||
then this will give incorrect hashes for them. Downloads will succeed,
|
||||
but other operations such as Remove and Copy will fail.
|
||||
`, "|", "`"),
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -248,7 +222,6 @@ type Options struct {
|
||||
EndpointType string `config:"endpoint_type"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
NoChunk bool `config:"no_chunk"`
|
||||
NoLargeObjects bool `config:"no_large_objects"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -817,7 +790,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
|
||||
// Put the object into the container
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -929,9 +902,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1046,7 +1019,7 @@ func copyLargeObject(ctx context.Context, f *Fs, src *Object, dstContainer strin
|
||||
return err
|
||||
}
|
||||
|
||||
// remove copied segments when copy process failed
|
||||
//remove copied segments when copy process failed
|
||||
func handleCopyFail(ctx context.Context, f *Fs, segmentsContainer string, segments []string, err error) {
|
||||
fs.Debugf(f, "handle copy segment fail")
|
||||
if err == nil {
|
||||
@@ -1127,24 +1100,15 @@ func (o *Object) hasHeader(ctx context.Context, header string) (bool, error) {
|
||||
|
||||
// isDynamicLargeObject checks for X-Object-Manifest header
|
||||
func (o *Object) isDynamicLargeObject(ctx context.Context) (bool, error) {
|
||||
if o.fs.opt.NoLargeObjects {
|
||||
return false, nil
|
||||
}
|
||||
return o.hasHeader(ctx, "X-Object-Manifest")
|
||||
}
|
||||
|
||||
// isStaticLargeObjectFile checks for the X-Static-Large-Object header
|
||||
func (o *Object) isStaticLargeObject(ctx context.Context) (bool, error) {
|
||||
if o.fs.opt.NoLargeObjects {
|
||||
return false, nil
|
||||
}
|
||||
return o.hasHeader(ctx, "X-Static-Large-Object")
|
||||
}
|
||||
|
||||
func (o *Object) isLargeObject(ctx context.Context) (result bool, err error) {
|
||||
if o.fs.opt.NoLargeObjects {
|
||||
return false, nil
|
||||
}
|
||||
result, err = o.hasHeader(ctx, "X-Static-Large-Object")
|
||||
if result {
|
||||
return
|
||||
@@ -1176,11 +1140,10 @@ func (o *Object) Size() int64 {
|
||||
// decodeMetaData sets the metadata in the object from a swift.Object
|
||||
//
|
||||
// Sets
|
||||
//
|
||||
// o.lastModified
|
||||
// o.size
|
||||
// o.md5
|
||||
// o.contentType
|
||||
// o.lastModified
|
||||
// o.size
|
||||
// o.md5
|
||||
// o.contentType
|
||||
func (o *Object) decodeMetaData(info *swift.Object) (err error) {
|
||||
o.lastModified = info.LastModified
|
||||
o.size = info.Bytes
|
||||
@@ -1221,6 +1184,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
@@ -1500,7 +1464,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
headers := m.ObjectHeaders()
|
||||
fs.OpenOptionAddHeaders(options, headers)
|
||||
|
||||
if (size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk)) && !o.fs.opt.NoLargeObjects {
|
||||
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
|
||||
_, err = o.updateChunks(ctx, in, headers, size, contentType)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -16,8 +16,8 @@ func TestInternalUrlEncode(t *testing.T) {
|
||||
want string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abcdefghijklmnopqrstuvwxyz", "abcdefghijklmnopqrstuvwxyz"},
|
||||
{"ABCDEFGHIJKLMNOPQRSTUVWXYZ", "ABCDEFGHIJKLMNOPQRSTUVWXYZ"},
|
||||
{"abcdefghijklmopqrstuvwxyz", "abcdefghijklmopqrstuvwxyz"},
|
||||
{"ABCDEFGHIJKLMOPQRSTUVWXYZ", "ABCDEFGHIJKLMOPQRSTUVWXYZ"},
|
||||
{"0123456789", "0123456789"},
|
||||
{"abc/ABC/123", "abc/ABC/123"},
|
||||
{" ", "%20%20%20"},
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user