1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-26 06:13:32 +00:00

Compare commits

..

1 Commits

273 changed files with 43929 additions and 70534 deletions

View File

@@ -25,12 +25,12 @@ jobs:
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.16', 'go1.17']
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.15', 'go1.16']
include:
- job_name: linux
os: ubuntu-latest
go: '1.18.x'
go: '1.17.x'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
@@ -40,8 +40,8 @@ jobs:
deploy: true
- job_name: mac_amd64
os: macos-11
go: '1.18.x'
os: macOS-latest
go: '1.17.x'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
@@ -49,38 +49,49 @@ jobs:
deploy: true
- job_name: mac_arm64
os: macos-11
go: '1.18.x'
os: macOS-latest
go: '1.17.x'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows
- job_name: windows_amd64
os: windows-latest
go: '1.18.x'
go: '1.17.x'
gotags: cmount
cgo: '0'
build_flags: '-include "^windows/"'
build_flags: '-include "^windows/amd64" -cgo'
build_args: '-buildmode exe'
quicktest: true
racequicktest: true
deploy: true
- job_name: windows_386
os: windows-latest
go: '1.17.x'
gotags: cmount
goarch: '386'
cgo: '1'
build_flags: '-include "^windows/386" -cgo'
build_args: '-buildmode exe'
quicktest: true
deploy: true
- job_name: other_os
os: ubuntu-latest
go: '1.18.x'
go: '1.17.x'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
- job_name: go1.16
- job_name: go1.15
os: ubuntu-latest
go: '1.16.x'
go: '1.15.x'
quicktest: true
racequicktest: true
- job_name: go1.17
- job_name: go1.16
os: ubuntu-latest
go: '1.17.x'
go: '1.16.x'
quicktest: true
racequicktest: true
@@ -99,7 +110,6 @@ jobs:
with:
stable: 'false'
go-version: ${{ matrix.go }}
check-latest: true
- name: Set environment variables
shell: bash
@@ -124,7 +134,7 @@ jobs:
run: |
brew update
brew install --cask macfuse
if: matrix.os == 'macos-11'
if: matrix.os == 'macOS-latest'
- name: Install Libraries on Windows
shell: powershell
@@ -167,11 +177,6 @@ jobs:
run: |
make
- name: Rclone version
shell: bash
run: |
rclone version
- name: Run tests
shell: bash
run: |
@@ -240,14 +245,14 @@ jobs:
fetch-depth: 0
# Upgrade together with NDK version
- name: Set up Go
- name: Set up Go 1.16
uses: actions/setup-go@v1
with:
go-version: 1.18.x
go-version: 1.16
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
- name: Force NDK version
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;23.1.7779620" | grep -v = || true
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;22.1.7171670" | grep -v = || true
- name: Go module cache
uses: actions/cache@v2
@@ -268,8 +273,8 @@ jobs:
- name: install gomobile
run: |
go install golang.org/x/mobile/cmd/gobind@latest
go install golang.org/x/mobile/cmd/gomobile@latest
go get golang.org/x/mobile/cmd/gobind
go get golang.org/x/mobile/cmd/gomobile
env PATH=$PATH:~/go/bin gomobile init
- name: arm-v7a gomobile build
@@ -278,7 +283,7 @@ jobs:
- name: arm-v7a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm' >> $GITHUB_ENV
@@ -291,7 +296,7 @@ jobs:
- name: arm64-v8a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm64' >> $GITHUB_ENV
@@ -304,7 +309,7 @@ jobs:
- name: x86 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=386' >> $GITHUB_ENV
@@ -317,7 +322,7 @@ jobs:
- name: x64 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=amd64' >> $GITHUB_ENV

View File

@@ -20,7 +20,7 @@ jobs:
with:
tag: beta
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}

View File

@@ -28,7 +28,7 @@ jobs:
with:
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
@@ -50,7 +50,7 @@ jobs:
PLUGIN_USER=rclone
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
for PLUGIN_ARCH in amd64 arm64 arm/v7 ;do
export PLUGIN_USER PLUGIN_ARCH
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}

View File

@@ -15,7 +15,7 @@ Current active maintainers of rclone are:
| Ivan Andreev | @ivandeex | chunker & mailru backends |
| Max Sum | @Max-Sum | union backend |
| Fred | @creativeprojects | seafile backend |
| Caleb Case | @calebcase | storj backend |
| Caleb Case | @calebcase | tardigrade backend |
**This is a work in progress Draft**

5153
MANUAL.html generated

File diff suppressed because it is too large Load Diff

5730
MANUAL.md generated

File diff suppressed because it is too large Load Diff

7540
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -97,7 +97,7 @@ release_dep_linux:
# Get the release dependencies we only install on Windows
release_dep_windows:
GOOS="" GOARCH="" go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@latest
GO111MODULE=off GOOS="" GOARCH="" go get github.com/josephspurrier/goversioninfo/cmd/goversioninfo
# Update dependencies
showupdates:
@@ -245,18 +245,18 @@ retag:
startdev:
@echo "Version is $(VERSION)"
@echo "Next version is $(NEXT_VERSION)"
echo -e "package fs\n\n// VersionTag of rclone\nvar VersionTag = \"$(NEXT_VERSION)\"\n" | gofmt > fs/versiontag.go
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)-DEV\"\n" | gofmt > fs/version.go
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_VERSION)" > VERSION
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/versiontag.go VERSION docs/layouts/partials/version.html
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
startstable:
@echo "Version is $(VERSION)"
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
echo -e "package fs\n\n// VersionTag of rclone\nvar VersionTag = \"$(NEXT_PATCH_VERSION)\"\n" | gofmt > fs/versiontag.go
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_PATCH_VERSION)-DEV\"\n" | gofmt > fs/version.go
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_PATCH_VERSION)" > VERSION
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/versiontag.go VERSION docs/layouts/partials/version.html
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
winzip:
zip -9 rclone-$(TAG).zip rclone.exe

View File

@@ -1,5 +1,4 @@
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/)
[Website](https://rclone.org) |
[Documentation](https://rclone.org/docs/) |
@@ -21,19 +20,14 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
## Storage providers
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
* Box [:page_facing_up:](https://rclone.org/box/)
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
* Arvan Cloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
@@ -44,7 +38,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
* HTTP [:page_facing_up:](https://rclone.org/http/)
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
@@ -72,8 +65,8 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* Storj [:page_facing_up:](https://rclone.org/storj/)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)

View File

@@ -1 +1 @@
v1.59.0
v1.58.0

View File

@@ -22,14 +22,12 @@ import (
_ "github.com/rclone/rclone/backend/hdfs"
_ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/hubic"
_ "github.com/rclone/rclone/backend/internetarchive"
_ "github.com/rclone/rclone/backend/jottacloud"
_ "github.com/rclone/rclone/backend/koofr"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/mailru"
_ "github.com/rclone/rclone/backend/mega"
_ "github.com/rclone/rclone/backend/memory"
_ "github.com/rclone/rclone/backend/netstorage"
_ "github.com/rclone/rclone/backend/onedrive"
_ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/pcloud"
@@ -41,9 +39,9 @@ import (
_ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sia"
_ "github.com/rclone/rclone/backend/storj"
_ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/tardigrade"
_ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/uptobox"
_ "github.com/rclone/rclone/backend/webdav"

View File

@@ -26,7 +26,6 @@ import (
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
@@ -44,14 +43,15 @@ import (
const (
minSleep = 10 * time.Millisecond
maxSleep = 10 * time.Second
decayConstant = 1 // bigger for slower decay, exponential
maxListChunkSize = 5000 // number of items to read at once
maxUploadParts = 50000 // maximum allowed number of parts/blocks in a multi-part upload
decayConstant = 1 // bigger for slower decay, exponential
maxListChunkSize = 5000 // number of items to read at once
modTimeKey = "mtime"
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
storageDefaultBaseURL = "blob.core.windows.net"
defaultChunkSize = 4 * fs.Mebi
maxChunkSize = 100 * fs.Mebi
uploadConcurrency = 4
defaultAccessTier = azblob.AccessTierNone
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
// Default storage account, key and blob endpoint for emulator support,
@@ -134,33 +134,12 @@ msi_client_id, or msi_mi_res_id parameters.`,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Upload chunk size.
Help: `Upload chunk size (<= 100 MiB).
Note that this is stored in memory and there may be up to
"--transfers" * "--azureblob-upload-concurrency" chunks stored at once
in memory.`,
"--transfers" chunks stored at once in memory.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
If you are uploading small numbers of large files over high-speed
links and these uploads do not fully utilize your bandwidth, then
increasing this may help to speed up the transfers.
In tests, upload speed increases almost linearly with upload
concurrency. For example to fill a gigabit pipe it may be necessary to
raise this to 64. Note that this will use more memory.
Note that chunks are stored in memory and there may be up to
"--transfers" * "--azureblob-upload-concurrency" chunks stored at once
in memory.`,
Default: 16,
Advanced: true,
}, {
Name: "list_chunk",
Help: `Size of blob list.
@@ -278,7 +257,6 @@ type Options struct {
Endpoint string `config:"endpoint"`
SASURL string `config:"sas_url"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
ListChunkSize uint `config:"list_chunk"`
AccessTier string `config:"access_tier"`
ArchiveTierDelete bool `config:"archive_tier_delete"`
@@ -373,9 +351,15 @@ func (o *Object) split() (container, containerPath string) {
// validateAccessTier checks if azureblob supports user supplied tier
func validateAccessTier(tier string) bool {
return strings.EqualFold(tier, string(azblob.AccessTierHot)) ||
strings.EqualFold(tier, string(azblob.AccessTierCool)) ||
strings.EqualFold(tier, string(azblob.AccessTierArchive))
switch tier {
case string(azblob.AccessTierHot),
string(azblob.AccessTierCool),
string(azblob.AccessTierArchive):
// valid cases
return true
default:
return false
}
}
// validatePublicAccess checks if azureblob supports use supplied public access level
@@ -432,6 +416,9 @@ func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
if cs > maxChunkSize {
return fmt.Errorf("%s is greater than %s", cs, maxChunkSize)
}
return nil
}
@@ -608,7 +595,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
serviceURL = azblob.NewServiceURL(*u, pipeline)
case opt.UseMSI:
var token adal.Token
var userMSI = &userMSI{}
var userMSI *userMSI = &userMSI{}
if len(opt.MSIClientID) > 0 || len(opt.MSIObjectID) > 0 || len(opt.MSIResourceID) > 0 {
// Specifying a user-assigned identity. Exactly one of the above IDs must be specified.
// Validate and ensure exactly one is set. (To do: better validation.)
@@ -1457,10 +1444,6 @@ func (o *Object) clearMetaData() {
// o.size
// o.md5
func (o *Object) readMetaData() (err error) {
container, _ := o.split()
if !o.fs.containerOK(container) {
return fs.ErrorObjectNotFound
}
if !o.modTime.IsZero() {
return nil
}
@@ -1653,10 +1636,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return errCantUpdateArchiveTierBlobs
}
}
container, containerPath := o.split()
if container == "" || containerPath == "" {
return fmt.Errorf("can't upload to root - need a container")
}
container, _ := o.split()
err = o.fs.makeContainer(ctx, container)
if err != nil {
return err
@@ -1685,21 +1665,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
}
uploadParts := int64(maxUploadParts)
if uploadParts < 1 {
uploadParts = 1
} else if uploadParts > maxUploadParts {
uploadParts = maxUploadParts
}
// calculate size of parts/blocks
partSize := chunksize.Calculator(o, int(uploadParts), o.fs.opt.ChunkSize)
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
BufferSize: int(partSize),
MaxBuffers: o.fs.opt.UploadConcurrency,
BufferSize: int(o.fs.opt.ChunkSize),
MaxBuffers: uploadConcurrency,
Metadata: o.meta,
BlobHTTPHeaders: httpHeaders,
TransferManager: o.fs.newPoolWrapper(o.fs.opt.UploadConcurrency),
TransferManager: o.fs.newPoolWrapper(uploadConcurrency),
}
// Don't retry, return a retry error instead

View File

@@ -17,10 +17,12 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{},
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MaxChunkSize: maxChunkSize,
},
})
}
@@ -61,25 +63,3 @@ func TestServicePrincipalFileFailure(t *testing.T) {
assert.Error(t, err)
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
}
func TestValidateAccessTier(t *testing.T) {
tests := map[string]struct {
accessTier string
want bool
}{
"hot": {"hot", true},
"HOT": {"HOT", true},
"Hot": {"Hot", true},
"cool": {"cool", true},
"archive": {"archive", true},
"empty": {"", false},
"unknown": {"unknown", false},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
got := validateAccessTier(test.accessTier)
assert.Equal(t, test.want, got)
})
}
}

View File

@@ -64,8 +64,7 @@ const (
// Globals
var (
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
errNotWithVersionAt = errors.New("can't modify or delete files in --b2-version-at mode")
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
)
// Register with Fs
@@ -107,11 +106,6 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
Help: "Include old versions in directory listings.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
Default: false,
Advanced: true,
}, {
Name: "version_at",
Help: "Show file versions as they were at the specified time.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
Default: fs.Time{},
Advanced: true,
}, {
Name: "hard_delete",
Help: "Permanently delete files on remote removal, otherwise hide files.",
@@ -166,15 +160,7 @@ free egress for data downloaded through the Cloudflare network.
Rclone works with private buckets by sending an "Authorization" header.
If the custom endpoint rewrites the requests for authentication,
e.g., in Cloudflare Workers, this header needs to be handled properly.
Leave blank if you want to use the endpoint provided by Backblaze.
The URL provided here SHOULD have the protocol and SHOULD NOT have
a trailing slash or specify the /file/bucket subpath as rclone will
request files with "{download_url}/file/{bucket_name}/{path}".
Example:
> https://mysubdomain.mydomain.tld
(No trailing "/", "file" or "bucket")`,
Leave blank if you want to use the endpoint provided by Backblaze.`,
Advanced: true,
}, {
Name: "download_auth_duration",
@@ -217,7 +203,6 @@ type Options struct {
Endpoint string `config:"endpoint"`
TestMode string `config:"test_mode"`
Versions bool `config:"versions"`
VersionAt fs.Time `config:"version_at"`
HardDelete bool `config:"hard_delete"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
@@ -703,12 +688,9 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
Method: "POST",
Path: "/b2_list_file_names",
}
if hidden || f.opt.VersionAt.IsSet() {
if hidden {
opts.Path = "/b2_list_file_versions"
}
lastFileName := ""
for {
var response api.ListFileNamesResponse
err := f.pacer.Call(func() (bool, error) {
@@ -738,21 +720,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
if addBucket {
remote = path.Join(bucket, remote)
}
if f.opt.VersionAt.IsSet() {
if time.Time(file.UploadTimestamp).After(time.Time(f.opt.VersionAt)) {
// Ignore versions that were created after the specified time
continue
}
if file.Name == lastFileName {
// Ignore versions before the already returned version
continue
}
}
// Send object
lastFileName = file.Name
err = fn(remote, file, isDirectory)
if err != nil {
if err == errEndList {
@@ -1852,9 +1820,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if o.fs.opt.Versions {
return errNotWithVersions
}
if o.fs.opt.VersionAt.IsSet() {
return errNotWithVersionAt
}
size := src.Size()
bucket, bucketPath := o.split()
@@ -2010,9 +1975,6 @@ func (o *Object) Remove(ctx context.Context) error {
if o.fs.opt.Versions {
return errNotWithVersions
}
if o.fs.opt.VersionAt.IsSet() {
return errNotWithVersionAt
}
if o.fs.opt.HardDelete {
return o.fs.deleteByID(ctx, o.id, bucketPath)
}

View File

@@ -18,7 +18,6 @@ import (
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/rest"
@@ -89,19 +88,21 @@ type largeUpload struct {
// newLargeUpload starts an upload of object o from in with metadata in src
//
// If newInfo is set then metadata from that will be used instead of reading it from src
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, chunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
remote := o.remote
size := src.Size()
parts := int64(0)
sha1SliceSize := int64(maxParts)
chunkSize := defaultChunkSize
if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
} else {
chunkSize = chunksize.Calculator(src, maxParts, defaultChunkSize)
parts = size / int64(chunkSize)
if size%int64(chunkSize) != 0 {
parts++
}
if parts > maxParts {
return nil, fmt.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
}
sha1SliceSize = parts
}

View File

@@ -394,11 +394,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
notifiedRemotes: make(map[string]bool),
}
cache.PinUntilFinalized(f.Fs, f)
rps := rate.Inf
if opt.Rps > 0 {
rps = rate.Limit(float64(opt.Rps))
}
f.rateLimiter = rate.NewLimiter(rps, opt.TotalWorkers)
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
f.plexConnector = &plexConnector{}
if opt.PlexURL != "" {
@@ -1747,7 +1743,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
do := f.Fs.Features().About
if do == nil {
return nil, errors.New("not supported by underlying remote")
return nil, errors.New("About not supported")
}
return do(ctx)
}

View File

@@ -515,7 +515,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
strRegex := regexp.QuoteMeta(pattern)
strRegex = reHashes.ReplaceAllLiteralString(strRegex, reDataOrCtrl)
strRegex = strings.ReplaceAll(strRegex, "\\*", mainNameRegStr)
strRegex = strings.Replace(strRegex, "\\*", mainNameRegStr, -1)
strRegex = fmt.Sprintf("^%s(?:%s|%s)?$", strRegex, tempSuffixRegStr, tempSuffixRegOld)
f.nameRegexp = regexp.MustCompile(strRegex)
@@ -524,7 +524,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
if numDigits > 1 {
fmtDigits = fmt.Sprintf("%%0%dd", numDigits)
}
strFmt := strings.ReplaceAll(pattern, "%", "%%")
strFmt := strings.Replace(pattern, "%", "%%", -1)
strFmt = strings.Replace(strFmt, "*", "%s", 1)
f.dataNameFmt = reHashes.ReplaceAllLiteralString(strFmt, fmtDigits)
f.ctrlNameFmt = reHashes.ReplaceAllLiteralString(strFmt, "_%s")
@@ -1895,7 +1895,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
func (f *Fs) CleanUp(ctx context.Context) error {
do := f.base.Features().CleanUp
if do == nil {
return errors.New("not supported by underlying remote")
return errors.New("can't CleanUp")
}
return do(ctx)
}
@@ -1904,7 +1904,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
do := f.base.Features().About
if do == nil {
return nil, errors.New("not supported by underlying remote")
return nil, errors.New("About not supported")
}
return do(ctx)
}

View File

@@ -401,10 +401,6 @@ func isCompressible(r io.Reader) (bool, error) {
if err != nil {
return false, err
}
err = w.Close()
if err != nil {
return false, err
}
ratio := float64(n) / float64(b.Len())
return ratio > minCompressionRatio, nil
}
@@ -630,11 +626,9 @@ func (f *Fs) putMetadata(ctx context.Context, meta *ObjectMetadata, src fs.Objec
// Put the data
mo, err = put(ctx, metaReader, f.wrapInfo(src, makeMetadataName(src.Remote()), int64(len(data))), options...)
if err != nil {
if mo != nil {
removeErr := mo.Remove(ctx)
if removeErr != nil {
fs.Errorf(mo, "Failed to remove partially transferred object: %v", err)
}
removeErr := mo.Remove(ctx)
if removeErr != nil {
fs.Errorf(mo, "Failed to remove partially transferred object: %v", err)
}
return nil, err
}
@@ -906,7 +900,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
func (f *Fs) CleanUp(ctx context.Context) error {
do := f.Fs.Features().CleanUp
if do == nil {
return errors.New("not supported by underlying remote")
return errors.New("can't CleanUp: not supported by underlying remote")
}
return do(ctx)
}
@@ -915,7 +909,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
do := f.Fs.Features().About
if do == nil {
return nil, errors.New("not supported by underlying remote")
return nil, errors.New("can't About: not supported by underlying remote")
}
return do(ctx)
}

View File

@@ -443,7 +443,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
}
fs.Debugf(src, "%v = %s OK", ht, srcHash)
}
@@ -597,7 +597,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
func (f *Fs) CleanUp(ctx context.Context) error {
do := f.Fs.Features().CleanUp
if do == nil {
return errors.New("not supported by underlying remote")
return errors.New("can't CleanUp")
}
return do(ctx)
}
@@ -606,7 +606,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
do := f.Fs.Features().About
if do == nil {
return nil, errors.New("not supported by underlying remote")
return nil, errors.New("About not supported")
}
return do(ctx)
}

View File

@@ -70,7 +70,7 @@ const (
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
minChunkSize = fs.SizeSuffix(googleapi.MinUploadChunkSize)
defaultChunkSize = 8 * fs.Mebi
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks,resourceKey"
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks"
listRGrouping = 50 // number of IDs to search at once when using ListR
listRInputBuffer = 1000 // size of input buffer when using ListR
defaultXDGIcon = "text-html"
@@ -84,7 +84,7 @@ var (
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
RedirectURL: oauthutil.TitleBarRedirectURL,
}
_mimeTypeToExtensionDuplicates = map[string]string{
"application/x-vnd.oasis.opendocument.presentation": ".odp",
@@ -299,17 +299,6 @@ a non root folder as its starting point.
Default: true,
Help: "Send files to the trash instead of deleting permanently.\n\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.",
Advanced: true,
}, {
Name: "copy_shortcut_content",
Default: false,
Help: `Server side copy contents of shortcuts instead of the shortcut.
When doing server side copies, normally rclone will copy shortcuts as
shortcuts.
If this flag is used then rclone will copy the contents of shortcuts
rather than shortcuts themselves when doing server side copies.`,
Advanced: true,
}, {
Name: "skip_gdocs",
Default: false,
@@ -553,14 +542,6 @@ Google don't document so it may break in the future.
Normally rclone dereferences shortcut files making them appear as if
they are the original file (see [the shortcuts section](#shortcuts)).
If this flag is set then rclone will ignore shortcut files completely.
`,
Advanced: true,
Default: false,
}, {
Name: "skip_dangling_shortcuts",
Help: `If set skip dangling shortcut files.
If this is set then rclone will not show any dangling shortcuts in listings.
`,
Advanced: true,
Default: false,
@@ -597,7 +578,6 @@ type Options struct {
TeamDriveID string `config:"team_drive"`
AuthOwnerOnly bool `config:"auth_owner_only"`
UseTrash bool `config:"use_trash"`
CopyShortcutContent bool `config:"copy_shortcut_content"`
SkipGdocs bool `config:"skip_gdocs"`
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
SharedWithMe bool `config:"shared_with_me"`
@@ -624,7 +604,6 @@ type Options struct {
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
StopOnDownloadLimit bool `config:"stop_on_download_limit"`
SkipShortcuts bool `config:"skip_shortcuts"`
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -660,7 +639,6 @@ type baseObject struct {
mimeType string // The object MIME type
bytes int64 // size of the object
parents []string // IDs of the parent directories
resourceKey *string // resourceKey is needed for link shared objects
}
type documentObject struct {
baseObject
@@ -830,8 +808,8 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
if title != "" {
searchTitle := f.opt.Enc.FromStandardName(title)
// Escaping the backslash isn't documented but seems to work
searchTitle = strings.ReplaceAll(searchTitle, `\`, `\\`)
searchTitle = strings.ReplaceAll(searchTitle, `'`, `\'`)
searchTitle = strings.Replace(searchTitle, `\`, `\\`, -1)
searchTitle = strings.Replace(searchTitle, `'`, `\'`, -1)
var titleQuery bytes.Buffer
_, _ = fmt.Fprintf(&titleQuery, "(name='%s'", searchTitle)
@@ -928,11 +906,6 @@ OUTER:
if err != nil {
return false, fmt.Errorf("list: %w", err)
}
// leave the dangling shortcut out of the listings
// we've already logged about the dangling shortcut in resolveShortcut
if f.opt.SkipDanglingShortcuts && item.MimeType == shortcutMimeTypeDangling {
continue
}
}
// Check the case of items is correct since
// the `=` operator is case insensitive.
@@ -1320,16 +1293,12 @@ func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
}
}
}
o := &Object{
return &Object{
baseObject: f.newBaseObject(remote, info),
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, actualID(info.Id)),
md5sum: strings.ToLower(info.Md5Checksum),
v2Download: f.opt.V2DownloadMinSize != -1 && info.Size >= int64(f.opt.V2DownloadMinSize),
}
if info.ResourceKey != "" {
o.resourceKey = &info.ResourceKey
}
return o
}
// newDocumentObject creates an fs.Object for a google docs drive.File
@@ -1602,15 +1571,6 @@ func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string
}
}
// If using a link type export and a more specific export
// hasn't been found all docs should be exported
for _, _extension := range f.exportExtensions {
_mimeType := mime.TypeByExtension(_extension)
if isLinkMimeType(_mimeType) {
return _extension, _mimeType, true
}
}
// else return empty
return "", "", isDocument
}
@@ -1621,14 +1581,6 @@ func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string
// Look through the exportExtensions and find the first format that can be
// converted. If none found then return ("", "", "", false)
func (f *Fs) findExportFormat(ctx context.Context, item *drive.File) (extension, filename, mimeType string, isDocument bool) {
// If item has MD5 sum it is a file stored on drive
if item.Md5Checksum != "" {
return
}
// Folders can't be documents
if item.MimeType == driveFolderType {
return
}
extension, mimeType, isDocument = f.findExportFormatByMimeType(ctx, item.MimeType)
if extension != "" {
filename = item.Name + extension
@@ -1741,6 +1693,11 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
var paths []string
var grouping int32
usingQueryFilter := false
if fi, use := filter.GetConfig(ctx), filter.GetUseFilter(ctx); fi != nil && use {
usingQueryFilter = true
}
for dir := range in {
dirs = append(dirs[:0], dir.id)
paths = append(paths[:0], dir.path)
@@ -1813,7 +1770,8 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
// drive where (A in parents) or (B in parents) returns nothing
// sometimes. See #3114, #4289 and
// https://issuetracker.google.com/issues/149522397
if len(dirs) > 1 && !foundItems {
// However, empty result is legitimate if query filter was applied.
if len(dirs) > 1 && !foundItems && !usingQueryFilter {
if atomic.SwapInt32(&f.grouping, 1) != 1 {
fs.Debugf(f, "Disabling ListR to work around bug in drive as multi listing (%d) returned no entries", len(dirs))
}
@@ -1831,7 +1789,8 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
}
// If using a grouping of 1 and dir was empty then check to see if it
// is part of the group that caused grouping to be disabled.
if grouping == 1 && len(dirs) == 1 && !foundItems {
// However, empty result is legitimate if query filter was applied.
if grouping == 1 && len(dirs) == 1 && !foundItems && !usingQueryFilter {
f.listRmu.Lock()
if _, found := f.listRempties[dirs[0]]; found {
// Remove the ID
@@ -2422,24 +2381,16 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
createInfo.Description = ""
}
// get the ID of the thing to copy
// copy the contents if CopyShortcutContent
// else copy the shortcut only
// get the ID of the thing to copy - this is the shortcut if available
id := shortcutID(srcObj.id)
if f.opt.CopyShortcutContent {
id = actualID(srcObj.id)
}
var info *drive.File
err = f.pacer.Call(func() (bool, error) {
copy := f.svc.Files.Copy(id, createInfo).
info, err = f.svc.Files.Copy(id, createInfo).
Fields(partialFields).
SupportsAllDrives(true).
KeepRevisionForever(f.opt.KeepRevisionForever)
srcObj.addResourceKey(copy.Header())
info, err = copy.Context(ctx).Do()
KeepRevisionForever(f.opt.KeepRevisionForever).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
@@ -3536,14 +3487,6 @@ func (o *baseObject) Storable() bool {
return true
}
// addResourceKey adds a X-Goog-Drive-Resource-Keys header for this
// object if required.
func (o *baseObject) addResourceKey(header http.Header) {
if o.resourceKey != nil {
header.Add("X-Goog-Drive-Resource-Keys", fmt.Sprintf("%s/%s", o.id, *o.resourceKey))
}
}
// httpResponse gets an http.Response object for the object
// using the url and method passed in
func (o *baseObject) httpResponse(ctx context.Context, url, method string, options []fs.OpenOption) (req *http.Request, res *http.Response, err error) {
@@ -3559,7 +3502,6 @@ func (o *baseObject) httpResponse(ctx context.Context, url, method string, optio
// Don't supply range requests for 0 length objects as they always fail
delete(req.Header, "Range")
}
o.addResourceKey(req.Header)
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.client.Do(req)
if err == nil {

View File

@@ -422,7 +422,11 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
require.NoError(t, err)
o := obj.(*Object)
dir := t.TempDir()
dir, err := ioutil.TempDir("", "rclone-drive-copyid-test")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(dir)
}()
checkFile := func(name string) {
filePath := filepath.Join(dir, name)
@@ -487,11 +491,19 @@ func (f *Fs) InternalTestAgeQuery(t *testing.T) {
subFs, isDriveFs := subFsResult.(*Fs)
require.True(t, isDriveFs)
tempDir1 := t.TempDir()
tempDir1, err := ioutil.TempDir("", "rclone-drive-agequery1-test")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(tempDir1)
}()
tempFs1, err := fs.NewFs(defCtx, tempDir1)
require.NoError(t, err)
tempDir2 := t.TempDir()
tempDir2, err := ioutil.TempDir("", "rclone-drive-agequery2-test")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(tempDir2)
}()
tempFs2, err := fs.NewFs(defCtx, tempDir2)
require.NoError(t, err)

View File

@@ -118,12 +118,12 @@ func (b *batcher) Batching() bool {
}
// finishBatch commits the batch, returning a batch status to poll or maybe complete
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) {
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (batchStatus *files.UploadSessionFinishBatchLaunch, err error) {
var arg = &files.UploadSessionFinishBatchArg{
Entries: items,
}
err = b.f.pacer.Call(func() (bool, error) {
complete, err = b.f.srv.UploadSessionFinishBatchV2(arg)
batchStatus, err = b.f.srv.UploadSessionFinishBatch(arg)
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
@@ -137,7 +137,7 @@ func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionF
if err != nil {
return nil, fmt.Errorf("batch commit failed: %w", err)
}
return complete, nil
return batchStatus, nil
}
// finishBatchJobStatus waits for the batch to complete returning completed entries
@@ -199,11 +199,26 @@ func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionF
fs.Debugf(b.f, "Committing %s", desc)
// finalise the batch getting either a result or a job id to poll
complete, err := b.finishBatch(ctx, items)
batchStatus, err := b.finishBatch(ctx, items)
if err != nil {
return err
}
// check whether batch is complete
var complete *files.UploadSessionFinishBatchResult
switch batchStatus.Tag {
case "async_job_id":
// wait for batch to complete
complete, err = b.finishBatchJobStatus(ctx, batchStatus)
if err != nil {
return err
}
case "complete":
complete = batchStatus.Complete
default:
return fmt.Errorf("batch returned unknown status %q", batchStatus.Tag)
}
// Check we got the right number of entries
entries := complete.Entries
if len(entries) != len(results) {

View File

@@ -1269,7 +1269,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return shouldRetry(ctx, err)
})
if err != nil {
return nil, err
return nil, fmt.Errorf("about failed: %w", err)
}
var total uint64
if q.Allocation != nil {
@@ -1370,12 +1370,10 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
if timeout < 30 {
timeout = 30
fs.Debugf(f, "Increasing poll interval to minimum 30s")
}
if timeout > 480 {
timeout = 480
fs.Debugf(f, "Decreasing poll interval to maximum 480s")
}
err = f.pacer.Call(func() (bool, error) {
@@ -1652,37 +1650,13 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
}
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
skip := int64(0)
err = o.fs.pacer.Call(func() (bool, error) {
// seek to the start in case this is a retry
if _, err = chunk.Seek(skip, io.SeekStart); err != nil {
return false, err
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, nil
}
err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
// after session is started, we retry everything
if err != nil {
// Check for incorrect offset error and retry with new offset
if uErr, ok := err.(files.UploadSessionAppendV2APIError); ok {
if uErr.EndpointError != nil && uErr.EndpointError.IncorrectOffset != nil {
correctOffset := uErr.EndpointError.IncorrectOffset.CorrectOffset
delta := int64(correctOffset) - int64(cursor.Offset)
skip += delta
what := fmt.Sprintf("incorrect offset error receved: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
if skip < 0 {
return false, fmt.Errorf("can't seek backwards to correct offset: %s", what)
} else if skip == chunkSize {
fs.Debugf(o, "%s: chunk received OK - continuing", what)
return false, nil
} else if skip > chunkSize {
// This error should never happen
return false, fmt.Errorf("can't seek forwards by more than a chunk to correct offset: %s", what)
}
// Skip the sent data on next retry
cursor.Offset = uint64(int64(cursor.Offset) + delta)
fs.Debugf(o, "%s: skipping bytes on retry to fix offset", what)
}
}
}
return err != nil, err
})
if err != nil {
@@ -1786,7 +1760,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
entry, err = o.uploadChunked(ctx, in, commitInfo, size)
} else {
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
entry, err = o.fs.srv.Upload(&files.UploadArg{CommitInfo: *commitInfo}, in)
entry, err = o.fs.srv.Upload(commitInfo, in)
return shouldRetry(ctx, err)
})
}

View File

@@ -42,15 +42,18 @@ func init() {
}, {
Help: "If you want to download a shared folder, add this parameter.",
Name: "shared_folder",
Required: false,
Advanced: true,
}, {
Help: "If you want to download a shared file that is password protected, add this parameter.",
Name: "file_password",
Required: false,
Advanced: true,
IsPassword: true,
}, {
Help: "If you want to list the files in a shared folder that is password protected, add this parameter.",
Name: "folder_password",
Required: false,
Advanced: true,
IsPassword: true,
}, {
@@ -514,32 +517,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return dstObj, nil
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/user/info.cgi",
ContentType: "application/json",
}
var accountInfo AccountInfo
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rest.CallJSON(ctx, &opts, nil, &accountInfo)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to read user info: %w", err)
}
// FIXME max upload size would be useful to use in Update
usage = &fs.Usage{
Used: fs.NewUsageValue(accountInfo.ColdStorage), // bytes in use
Total: fs.NewUsageValue(accountInfo.AvailableColdStorage), // bytes total
Free: fs.NewUsageValue(accountInfo.AvailableColdStorage - accountInfo.ColdStorage), // bytes free
}
return usage, nil
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
o, err := f.NewObject(ctx, remote)

View File

@@ -182,34 +182,3 @@ type FoldersList struct {
Status string `json:"Status"`
SubFolders []Folder `json:"sub_folders"`
}
// AccountInfo is the structure how 1Fichier returns user info
type AccountInfo struct {
StatsDate string `json:"stats_date"`
MailRM string `json:"mail_rm"`
DefaultQuota int64 `json:"default_quota"`
UploadForbidden string `json:"upload_forbidden"`
PageLimit int `json:"page_limit"`
ColdStorage int64 `json:"cold_storage"`
Status string `json:"status"`
UseCDN string `json:"use_cdn"`
AvailableColdStorage int64 `json:"available_cold_storage"`
DefaultPort string `json:"default_port"`
DefaultDomain int `json:"default_domain"`
Email string `json:"email"`
DownloadMenu string `json:"download_menu"`
FTPDID int `json:"ftp_did"`
DefaultPortFiles string `json:"default_port_files"`
FTPReport string `json:"ftp_report"`
OverQuota int64 `json:"overquota"`
AvailableStorage int64 `json:"available_storage"`
CDN string `json:"cdn"`
Offer string `json:"offer"`
SubscriptionEnd string `json:"subscription_end"`
TFA string `json:"2fa"`
AllowedColdStorage int64 `json:"allowed_cold_storage"`
HotStorage int64 `json:"hot_storage"`
DefaultColdStorageQuota int64 `json:"default_cold_storage_quota"`
FTPMode string `json:"ftp_mode"`
RUReport string `json:"ru_report"`
}

View File

@@ -52,13 +52,11 @@ func init() {
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
Required: true,
}, {
Name: "user",
Help: "FTP username.",
Default: currentUser,
Name: "user",
Help: "FTP username, leave blank for current username, " + currentUser + ".",
}, {
Name: "port",
Help: "FTP port number.",
Default: 21,
Name: "port",
Help: "FTP port, leave blank to use default (21).",
}, {
Name: "pass",
Help: "FTP password.",
@@ -100,11 +98,6 @@ to an encrypted one. Cannot be used in combination with implicit FTP.`,
Help: "Disable using MLSD even if server advertises support.",
Default: false,
Advanced: true,
}, {
Name: "disable_utf8",
Help: "Disable using UTF-8 even if server advertises support.",
Default: false,
Advanced: true,
}, {
Name: "writing_mdtm",
Help: "Use MDTM to set modification time (VsFtpd quirk)",
@@ -189,7 +182,6 @@ type Options struct {
SkipVerifyTLSCert bool `config:"no_check_certificate"`
DisableEPSV bool `config:"disable_epsv"`
DisableMLSD bool `config:"disable_mlsd"`
DisableUTF8 bool `config:"disable_utf8"`
WritingMDTM bool `config:"writing_mdtm"`
IdleTimeout fs.Duration `config:"idle_timeout"`
CloseTimeout fs.Duration `config:"close_timeout"`
@@ -344,9 +336,6 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
if f.opt.DisableMLSD {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
}
if f.opt.DisableUTF8 {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledUTF8(true))
}
if f.opt.ShutTimeout != 0 && f.opt.ShutTimeout != fs.DurationOff {
ftpConfig = append(ftpConfig, ftp.DialWithShutTimeout(time.Duration(f.opt.ShutTimeout)))
}

View File

@@ -24,7 +24,6 @@ import (
"path"
"strconv"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
@@ -66,7 +65,7 @@ var (
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
RedirectURL: oauthutil.TitleBarRedirectURL,
}
)
@@ -183,30 +182,15 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
}, {
Value: "asia-northeast1",
Help: "Tokyo",
}, {
Value: "asia-northeast2",
Help: "Osaka",
}, {
Value: "asia-northeast3",
Help: "Seoul",
}, {
Value: "asia-south1",
Help: "Mumbai",
}, {
Value: "asia-south2",
Help: "Delhi",
}, {
Value: "asia-southeast1",
Help: "Singapore",
}, {
Value: "asia-southeast2",
Help: "Jakarta",
}, {
Value: "australia-southeast1",
Help: "Sydney",
}, {
Value: "australia-southeast2",
Help: "Melbourne",
}, {
Value: "europe-north1",
Help: "Finland",
@@ -222,12 +206,6 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
}, {
Value: "europe-west4",
Help: "Netherlands",
}, {
Value: "europe-west6",
Help: "Zürich",
}, {
Value: "europe-central2",
Help: "Warsaw",
}, {
Value: "us-central1",
Help: "Iowa",
@@ -243,33 +221,6 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
}, {
Value: "us-west2",
Help: "California",
}, {
Value: "us-west3",
Help: "Salt Lake City",
}, {
Value: "us-west4",
Help: "Las Vegas",
}, {
Value: "northamerica-northeast1",
Help: "Montréal",
}, {
Value: "northamerica-northeast2",
Help: "Toronto",
}, {
Value: "southamerica-east1",
Help: "São Paulo",
}, {
Value: "southamerica-west1",
Help: "Santiago",
}, {
Value: "asia1",
Help: "Dual region: asia-northeast1 and asia-northeast2.",
}, {
Value: "eur4",
Help: "Dual region: europe-north1 and europe-west4.",
}, {
Value: "nam4",
Help: "Dual region: us-central1 and us-east1.",
}},
}, {
Name: "storage_class",
@@ -296,30 +247,6 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
Value: "DURABLE_REDUCED_AVAILABILITY",
Help: "Durable reduced availability storage class",
}},
}, {
Name: "no_check_bucket",
Help: `If set, don't attempt to check the bucket exists or create it.
This can be useful when trying to minimise the number of transactions
rclone does if you know the bucket exists already.
`,
Default: false,
Advanced: true,
}, {
Name: "download_compressed",
Help: `If set this will download compressed objects as-is.
It is possible to upload objects to GCS with "Content-Encoding: gzip"
set. Normally rclone will transparently decompress these files on
download. This means that rclone can't check the hash or the size of
the file as both of these refer to the compressed object.
If this flag is set then rclone will download files with
"Content-Encoding: gzip" as they are received. This means that rclone
can check the size and hash but the file contents will be compressed.
`,
Advanced: true,
Default: false,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -342,24 +269,21 @@ type Options struct {
BucketPolicyOnly bool `config:"bucket_policy_only"`
Location string `config:"location"`
StorageClass string `config:"storage_class"`
NoCheckBucket bool `config:"no_check_bucket"`
DownloadCompressed bool `config:"download_compressed"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote storage server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
svc *storage.Service // the connection to the storage server
client *http.Client // authorized client
rootBucket string // bucket part of root (if any)
rootDirectory string // directory part of root (if any)
cache *bucket.Cache // cache of bucket status
pacer *fs.Pacer // To pace the API calls
warnCompressed sync.Once // warn once about compressed files
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
svc *storage.Service // the connection to the storage server
client *http.Client // authorized client
rootBucket string // bucket part of root (if any)
rootDirectory string // directory part of root (if any)
cache *bucket.Cache // cache of bucket status
pacer *fs.Pacer // To pace the API calls
}
// Object describes a storage object
@@ -373,7 +297,6 @@ type Object struct {
bytes int64 // Bytes in the object
modTime time.Time // Modified time of the object
mimeType string
gzipped bool // set if object has Content-Encoding: gzip
}
// ------------------------------------------------------------
@@ -511,7 +434,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
name: name,
root: root,
opt: *opt,
pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))),
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
cache: bucket.NewCache(),
}
f.setRoot(root)
@@ -869,14 +792,6 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
}, nil)
}
// checkBucket creates the bucket if it doesn't exist unless NoCheckBucket is true
func (f *Fs) checkBucket(ctx context.Context, bucket string) error {
if f.opt.NoCheckBucket {
return nil
}
return f.makeBucket(ctx, bucket)
}
// Rmdir deletes the bucket if the fs is at the root
//
// Returns an error if it isn't empty: Error 409: The bucket you tried
@@ -910,7 +825,7 @@ func (f *Fs) Precision() time.Duration {
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstBucket, dstPath := f.split(remote)
err := f.checkBucket(ctx, dstBucket)
err := f.makeBucket(ctx, dstBucket)
if err != nil {
return nil, err
}
@@ -994,7 +909,6 @@ func (o *Object) setMetaData(info *storage.Object) {
o.url = info.MediaLink
o.bytes = int64(info.Size)
o.mimeType = info.ContentType
o.gzipped = info.ContentEncoding == "gzip"
// Read md5sum
md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash)
@@ -1033,15 +947,6 @@ func (o *Object) setMetaData(info *storage.Object) {
} else {
o.modTime = modTime
}
// If gunzipping then size and md5sum are unknown
if o.gzipped && !o.fs.opt.DownloadCompressed {
o.bytes = -1
o.md5sum = ""
o.fs.warnCompressed.Do(func() {
fs.Logf(o.fs, "Decompressing 'Content-Encoding: gzip' compressed file. Use --gcs-download-compressed to override")
})
}
}
// readObjectInfo reads the definition for an object
@@ -1142,15 +1047,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return nil, err
}
fs.FixRangeOption(options, o.bytes)
if o.gzipped && o.fs.opt.DownloadCompressed {
// Allow files which are stored on the cloud storage system
// compressed to be downloaded without being decompressed. Note
// that setting this here overrides the automatic decompression
// in the Transport.
//
// See: https://cloud.google.com/storage/docs/transcoding
req.Header.Set("Accept-Encoding", "gzip")
}
fs.OpenOptionAddHTTPHeaders(req.Header, options)
var res *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
@@ -1179,7 +1075,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
bucket, bucketPath := o.split()
err := o.fs.checkBucket(ctx, bucket)
err := o.fs.makeBucket(ctx, bucket)
if err != nil {
return err
}

View File

@@ -69,7 +69,7 @@ var (
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
RedirectURL: oauthutil.TitleBarRedirectURL,
}
)
@@ -562,7 +562,7 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
for i := range items {
item := &result.MediaItems[i]
remote := item.Filename
remote = strings.ReplaceAll(remote, "/", "")
remote = strings.Replace(remote, "/", "", -1)
err = fn(remote, item, false)
if err != nil {
return err

View File

@@ -202,11 +202,7 @@ func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries,
for _, entry := range baseEntries {
switch x := entry.(type) {
case fs.Object:
obj, err := f.wrapObject(x, nil)
if err != nil {
return nil, err
}
hashEntries = append(hashEntries, obj)
hashEntries = append(hashEntries, f.wrapObject(x, nil))
default:
hashEntries = append(hashEntries, entry) // trash in - trash out
}
@@ -255,7 +251,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
if do := f.Fs.Features().PutStream; do != nil {
_ = f.pruneHash(src.Remote())
oResult, err := do(ctx, in, src, options...)
return f.wrapObject(oResult, err)
return f.wrapObject(oResult, err), err
}
return nil, errors.New("PutStream not supported")
}
@@ -265,7 +261,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
if do := f.Fs.Features().PutUnchecked; do != nil {
_ = f.pruneHash(src.Remote())
oResult, err := do(ctx, in, src, options...)
return f.wrapObject(oResult, err)
return f.wrapObject(oResult, err), err
}
return nil, errors.New("PutUnchecked not supported")
}
@@ -282,7 +278,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
if do := f.Fs.Features().CleanUp; do != nil {
return do(ctx)
}
return errors.New("not supported by underlying remote")
return errors.New("CleanUp not supported")
}
// About gets quota information from the Fs
@@ -290,7 +286,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
if do := f.Fs.Features().About; do != nil {
return do(ctx)
}
return nil, errors.New("not supported by underlying remote")
return nil, errors.New("About not supported")
}
// ChangeNotify calls the passed function with a path that has had changes.
@@ -352,7 +348,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, fs.ErrorCantCopy
}
oResult, err := do(ctx, o.Object, remote)
return f.wrapObject(oResult, err)
return f.wrapObject(oResult, err), err
}
// Move src to this remote using server-side move operations.
@@ -375,7 +371,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
dir: false,
fs: f,
})
return f.wrapObject(oResult, nil)
return f.wrapObject(oResult, nil), nil
}
// DirMove moves src, srcRemote to this remote at dstRemote using server-side move operations.
@@ -414,7 +410,7 @@ func (f *Fs) Shutdown(ctx context.Context) (err error) {
// NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
o, err := f.Fs.NewObject(ctx, remote)
return f.wrapObject(o, err)
return f.wrapObject(o, err), err
}
//
@@ -428,15 +424,11 @@ type Object struct {
}
// Wrap base object into hasher object
func (f *Fs) wrapObject(o fs.Object, err error) (obj fs.Object, outErr error) {
// log.Trace(o, "err=%v", err)("obj=%#v, outErr=%v", &obj, &outErr)
if err != nil {
return nil, err
func (f *Fs) wrapObject(o fs.Object, err error) *Object {
if err != nil || o == nil {
return nil
}
if o == nil {
return nil, fs.ErrorObjectNotFound
}
return &Object{Object: o, f: f}, nil
return &Object{Object: o, f: f}
}
// Fs returns read only access to the Fs that this object is part of

View File

@@ -184,7 +184,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (r io.ReadC
// Put data into the remote path with given modTime and size
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
var (
o fs.Object
o *Object
common hash.Set
rehash bool
hashes hashMap
@@ -210,8 +210,8 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
_ = f.pruneHash(src.Remote())
oResult, err := f.Fs.Put(ctx, wrapIn, src, options...)
o, err = f.wrapObject(oResult, err)
if err != nil {
o = f.wrapObject(oResult, err)
if o == nil {
return nil, err
}
@@ -224,7 +224,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
}
}
if len(hashes) > 0 {
err := o.(*Object).putHashes(ctx, hashes)
err := o.putHashes(ctx, hashes)
fs.Debugf(o, "Applied %d source hashes, err: %v", len(hashes), err)
}
return o, err

View File

@@ -22,8 +22,9 @@ func init() {
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
Required: true,
}, {
Name: "username",
Help: "Hadoop user name.",
Name: "username",
Help: "Hadoop user name.",
Required: false,
Examples: []fs.OptionExample{{
Value: "root",
Help: "Connect to hdfs as root.",
@@ -35,6 +36,7 @@ func init() {
Enables KERBEROS authentication. Specifies the Service Principal Name
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
Required: false,
Advanced: true,
}, {
Name: "data_transfer_protection",
@@ -44,6 +46,7 @@ Specifies whether or not authentication, data signature integrity
checks, and wire encryption is required when communicating the the
datanodes. Possible values are 'authentication', 'integrity' and
'privacy'. Used only with KERBEROS enabled.`,
Required: false,
Examples: []fs.OptionExample{{
Value: "privacy",
Help: "Ensure authentication, integrity and encryption enabled.",

View File

@@ -52,7 +52,8 @@ The input format is comma separated list of key,value pairs. Standard
For example, to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.`,
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
`,
Default: fs.CommaSepList{},
Advanced: true,
}, {
@@ -73,9 +74,8 @@ directories.`,
Advanced: true,
}, {
Name: "no_head",
Help: `Don't use HEAD requests.
Help: `Don't use HEAD requests to find file sizes in dir listing.
HEAD requests are mainly used to find file sizes in dir listing.
If your site is being very slow to load then you can try this option.
Normally rclone does a HEAD request for each potential file in a
directory listing to:
@@ -84,9 +84,12 @@ directory listing to:
- check it really exists
- check to see if it is a directory
If you set this option, rclone will not do the HEAD request. This will mean
that directory listings are much quicker, but rclone won't have the times or
sizes of any files, and some files that don't exist may be in the listing.`,
If you set this option, rclone will not do the HEAD request. This will mean
- directory listings are much quicker
- rclone won't have the times or sizes of any files
- some files that don't exist may be in the listing
`,
Default: false,
Advanced: true,
}},
@@ -130,87 +133,11 @@ func statusError(res *http.Response, err error) error {
}
if res.StatusCode < 200 || res.StatusCode > 299 {
_ = res.Body.Close()
return fmt.Errorf("HTTP Error: %s", res.Status)
return fmt.Errorf("HTTP Error %d: %s", res.StatusCode, res.Status)
}
return nil
}
// getFsEndpoint decides if url is to be considered a file or directory,
// and returns a proper endpoint url to use for the fs.
func getFsEndpoint(ctx context.Context, client *http.Client, url string, opt *Options) (string, bool) {
// If url ends with '/' it is already a proper url always assumed to be a directory.
if url[len(url)-1] == '/' {
return url, false
}
// If url does not end with '/' we send a HEAD request to decide
// if it is directory or file, and if directory appends the missing
// '/', or if file returns the directory url to parent instead.
createFileResult := func() (string, bool) {
fs.Debugf(nil, "If path is a directory you must add a trailing '/'")
parent, _ := path.Split(url)
return parent, true
}
createDirResult := func() (string, bool) {
fs.Debugf(nil, "To avoid the initial HEAD request add a trailing '/' to the path")
return url + "/", false
}
// If HEAD requests are not allowed we just have to assume it is a file.
if opt.NoHead {
fs.Debugf(nil, "Assuming path is a file as --http-no-head is set")
return createFileResult()
}
// Use a client which doesn't follow redirects so the server
// doesn't redirect http://host/dir to http://host/dir/
noRedir := *client
noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
if err != nil {
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be created: %v", err)
return createFileResult()
}
addHeaders(req, opt)
res, err := noRedir.Do(req)
if err != nil {
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be sent: %v", err)
return createFileResult()
}
if res.StatusCode == http.StatusNotFound {
fs.Debugf(nil, "Assuming path is a directory as HEAD response is it does not exist as a file (%s)", res.Status)
return createDirResult()
}
if res.StatusCode == http.StatusMovedPermanently ||
res.StatusCode == http.StatusFound ||
res.StatusCode == http.StatusSeeOther ||
res.StatusCode == http.StatusTemporaryRedirect ||
res.StatusCode == http.StatusPermanentRedirect {
redir := res.Header.Get("Location")
if redir != "" {
if redir[len(redir)-1] == '/' {
fs.Debugf(nil, "Assuming path is a directory as HEAD response is redirect (%s) to a path that ends with '/': %s", res.Status, redir)
return createDirResult()
}
fs.Debugf(nil, "Assuming path is a file as HEAD response is redirect (%s) to a path that does not end with '/': %s", res.Status, redir)
return createFileResult()
}
fs.Debugf(nil, "Assuming path is a file as HEAD response is redirect (%s) but no location header", res.Status)
return createFileResult()
}
if res.StatusCode < 200 || res.StatusCode > 299 {
// Example is 403 (http.StatusForbidden) for servers not allowing HEAD requests.
fs.Debugf(nil, "Assuming path is a file as HEAD response is an error (%s)", res.Status)
return createFileResult()
}
fs.Debugf(nil, "Assuming path is a file as HEAD response is success (%s)", res.Status)
return createFileResult()
}
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
@@ -241,9 +168,37 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
client := fshttp.NewClient(ctx)
endpoint, isFile := getFsEndpoint(ctx, client, u.String(), opt)
fs.Debugf(nil, "Root: %s", endpoint)
u, err = url.Parse(endpoint)
var isFile = false
if !strings.HasSuffix(u.String(), "/") {
// Make a client which doesn't follow redirects so the server
// doesn't redirect http://host/dir to http://host/dir/
noRedir := *client
noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
// check to see if points to a file
req, err := http.NewRequestWithContext(ctx, "HEAD", u.String(), nil)
if err == nil {
addHeaders(req, opt)
res, err := noRedir.Do(req)
err = statusError(res, err)
if err == nil {
isFile = true
}
}
}
newRoot := u.String()
if isFile {
// Point to the parent if this is a file
newRoot, _ = path.Split(u.String())
} else {
if !strings.HasSuffix(newRoot, "/") {
newRoot += "/"
}
}
u, err = url.Parse(newRoot)
if err != nil {
return nil, err
}
@@ -261,16 +216,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
if isFile {
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
if !strings.HasSuffix(f.endpointURL, "/") {
return nil, errors.New("internal error: url doesn't end with /")
}
return f, nil
}
@@ -346,7 +297,7 @@ func parseName(base *url.URL, name string) (string, error) {
}
// check it doesn't have URL parameters
uStr := u.String()
if strings.Contains(uStr, "?") {
if strings.Index(uStr, "?") >= 0 {
return "", errFoundQuestionMark
}
// check that this is going back to the same host and scheme
@@ -458,7 +409,7 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
return nil, fmt.Errorf("readDir: %w", err)
}
default:
return nil, fmt.Errorf("can't parse content type %q", contentType)
return nil, fmt.Errorf("Can't parse content type %q", contentType)
}
return names, nil
}

View File

@@ -8,10 +8,8 @@ import (
"net/http/httptest"
"net/url"
"os"
"path"
"path/filepath"
"sort"
"strconv"
"strings"
"testing"
"time"
@@ -26,11 +24,10 @@ import (
)
var (
remoteName = "TestHTTP"
testPath = "test"
filesPath = filepath.Join(testPath, "files")
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
lineEndSize = 1
remoteName = "TestHTTP"
testPath = "test"
filesPath = filepath.Join(testPath, "files")
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
)
// prepareServer the test server and return a function to tidy it up afterwards
@@ -38,22 +35,6 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
// file server for test/files
fileServer := http.FileServer(http.Dir(filesPath))
// verify the file path is correct, and also check which line endings
// are used to get sizes right ("\n" except on Windows, but even there
// we may have "\n" or "\r\n" depending on git crlf setting)
fileList, err := ioutil.ReadDir(filesPath)
require.NoError(t, err)
require.Greater(t, len(fileList), 0)
for _, file := range fileList {
if !file.IsDir() {
data, _ := ioutil.ReadFile(filepath.Join(filesPath, file.Name()))
if strings.HasSuffix(string(data), "\r\n") {
lineEndSize = 2
}
break
}
}
// test the headers are there then pass on to fileServer
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
@@ -110,7 +91,7 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
e = entries[1]
assert.Equal(t, "one%.txt", e.Remote())
assert.Equal(t, int64(5+lineEndSize), e.Size())
assert.Equal(t, int64(6), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
@@ -127,7 +108,7 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
_, ok = e.(fs.Directory)
assert.True(t, ok)
} else {
assert.Equal(t, int64(40+lineEndSize), e.Size())
assert.Equal(t, int64(41), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
}
@@ -160,7 +141,7 @@ func TestListSubDir(t *testing.T) {
e := entries[0]
assert.Equal(t, "three/underthree.txt", e.Remote())
assert.Equal(t, int64(8+lineEndSize), e.Size())
assert.Equal(t, int64(9), e.Size())
_, ok := e.(*Object)
assert.True(t, ok)
}
@@ -173,7 +154,7 @@ func TestNewObject(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, "four/under four.txt", o.Remote())
assert.Equal(t, int64(8+lineEndSize), o.Size())
assert.Equal(t, int64(9), o.Size())
_, ok := o.(*Object)
assert.True(t, ok)
@@ -206,11 +187,7 @@ func TestOpen(t *testing.T) {
data, err := ioutil.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
if lineEndSize == 2 {
assert.Equal(t, "beetroot\r\n", string(data))
} else {
assert.Equal(t, "beetroot\n", string(data))
}
assert.Equal(t, "beetroot\n", string(data))
// Test with range request
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
@@ -259,7 +236,7 @@ func TestIsAFileSubDir(t *testing.T) {
e := entries[0]
assert.Equal(t, "underthree.txt", e.Remote())
assert.Equal(t, int64(8+lineEndSize), e.Size())
assert.Equal(t, int64(9), e.Size())
_, ok := e.(*Object)
assert.True(t, ok)
}
@@ -376,106 +353,3 @@ func TestParseCaddy(t *testing.T) {
"v1.36-22-g06ea13a-ssh-agentβ/",
})
}
func TestFsNoSlashRoots(t *testing.T) {
// Test Fs with roots that does not end with '/', the logic that
// decides if url is to be considered a file or directory, based
// on result from a HEAD request.
// Handler for faking HEAD responses with different status codes
headCount := 0
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "HEAD" {
headCount++
responseCode, err := strconv.Atoi(path.Base(r.URL.String()))
require.NoError(t, err)
if strings.HasPrefix(r.URL.String(), "/redirect/") {
var redir string
if strings.HasPrefix(r.URL.String(), "/redirect/file/") {
redir = "/redirected"
} else if strings.HasPrefix(r.URL.String(), "/redirect/dir/") {
redir = "/redirected/"
} else {
require.Fail(t, "Redirect test requests must start with '/redirect/file/' or '/redirect/dir/'")
}
http.Redirect(w, r, redir, responseCode)
} else {
http.Error(w, http.StatusText(responseCode), responseCode)
}
}
})
// Make the test server
ts := httptest.NewServer(handler)
defer ts.Close()
// Configure the remote
configfile.Install()
m := configmap.Simple{
"type": "http",
"url": ts.URL,
}
// Test
for i, test := range []struct {
root string
isFile bool
}{
// 2xx success
{"parent/200", true},
{"parent/204", true},
// 3xx redirection Redirect status 301, 302, 303, 307, 308
{"redirect/file/301", true}, // Request is redirected to "/redirected"
{"redirect/dir/301", false}, // Request is redirected to "/redirected/"
{"redirect/file/302", true}, // Request is redirected to "/redirected"
{"redirect/dir/302", false}, // Request is redirected to "/redirected/"
{"redirect/file/303", true}, // Request is redirected to "/redirected"
{"redirect/dir/303", false}, // Request is redirected to "/redirected/"
{"redirect/file/304", true}, // Not really a redirect, handled like 4xx errors (below)
{"redirect/file/305", true}, // Not really a redirect, handled like 4xx errors (below)
{"redirect/file/306", true}, // Not really a redirect, handled like 4xx errors (below)
{"redirect/file/307", true}, // Request is redirected to "/redirected"
{"redirect/dir/307", false}, // Request is redirected to "/redirected/"
{"redirect/file/308", true}, // Request is redirected to "/redirected"
{"redirect/dir/308", false}, // Request is redirected to "/redirected/"
// 4xx client errors
{"parent/403", true}, // Forbidden status (head request blocked)
{"parent/404", false}, // Not found status
} {
for _, noHead := range []bool{false, true} {
var isFile bool
if noHead {
m.Set("no_head", "true")
isFile = true
} else {
m.Set("no_head", "false")
isFile = test.isFile
}
headCount = 0
f, err := NewFs(context.Background(), remoteName, test.root, m)
if noHead {
assert.Equal(t, 0, headCount)
} else {
assert.Equal(t, 1, headCount)
}
if isFile {
assert.ErrorIs(t, err, fs.ErrorIsFile)
} else {
assert.NoError(t, err)
}
var endpoint string
if isFile {
parent, _ := path.Split(test.root)
endpoint = "/" + parent
} else {
endpoint = "/" + test.root + "/"
}
what := fmt.Sprintf("i=%d, root=%q, isFile=%v, noHead=%v", i, test.root, isFile, noHead)
assert.Equal(t, ts.URL+endpoint, f.String(), what)
}
}
}

View File

@@ -119,7 +119,7 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
defer fs.CheckClose(resp.Body, &err)
if resp.StatusCode < 200 || resp.StatusCode > 299 {
body, _ := ioutil.ReadAll(resp.Body)
bodyStr := strings.TrimSpace(strings.ReplaceAll(string(body), "\n", " "))
bodyStr := strings.TrimSpace(strings.Replace(string(body), "\n", " ", -1))
return fmt.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
}
decoder := json.NewDecoder(resp.Body)

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +0,0 @@
// Test internetarchive filesystem interface
package internetarchive_test
import (
"testing"
"github.com/rclone/rclone/backend/internetarchive"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestIA:lesmi-rclone-test/",
NilObject: (*internetarchive.Object)(nil),
})
}

View File

@@ -8,69 +8,42 @@ import (
)
const (
// default time format historically used for all request and responses.
// Similar to time.RFC3339, but with an extra '-' in front of 'T',
// and no ':' separator in timezone offset. Some newer endpoints have
// moved to proper time.RFC3339 conformant format instead.
jottaTimeFormat = "2006-01-02-T15:04:05Z0700"
// default time format for almost all request and responses
timeFormat = "2006-01-02-T15:04:05Z0700"
// the API server seems to use a different format
apiTimeFormat = "2006-01-02T15:04:05Z07:00"
)
// unmarshalXML turns XML into a Time
func unmarshalXMLTime(d *xml.Decoder, start xml.StartElement, timeFormat string) (time.Time, error) {
// Time represents time values in the Jottacloud API. It uses a custom RFC3339 like format.
type Time time.Time
// UnmarshalXML turns XML into a Time
func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
var v string
if err := d.DecodeElement(&v, &start); err != nil {
return time.Time{}, err
return err
}
if v == "" {
return time.Time{}, nil
*t = Time(time.Time{})
return nil
}
newTime, err := time.Parse(timeFormat, v)
if err == nil {
return newTime, nil
*t = Time(newTime)
}
return time.Time{}, err
}
// JottaTime represents time values in the classic API using a custom RFC3339 like format
type JottaTime time.Time
// String returns JottaTime string in Jottacloud classic format
func (t JottaTime) String() string { return time.Time(t).Format(jottaTimeFormat) }
// UnmarshalXML turns XML into a JottaTime
func (t *JottaTime) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
tm, err := unmarshalXMLTime(d, start, jottaTimeFormat)
*t = JottaTime(tm)
return err
}
// MarshalXML turns a JottaTime into XML
func (t *JottaTime) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
// MarshalXML turns a Time into XML
func (t *Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
return e.EncodeElement(t.String(), start)
}
// Rfc3339Time represents time values in the newer APIs using standard RFC3339 format
type Rfc3339Time time.Time
// Return Time string in Jottacloud format
func (t Time) String() string { return time.Time(t).Format(timeFormat) }
// String returns Rfc3339Time string in Jottacloud RFC3339 format
func (t Rfc3339Time) String() string { return time.Time(t).Format(time.RFC3339) }
// UnmarshalXML turns XML into a Rfc3339Time
func (t *Rfc3339Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
tm, err := unmarshalXMLTime(d, start, time.RFC3339)
*t = Rfc3339Time(tm)
return err
}
// MarshalXML turns a Rfc3339Time into XML
func (t *Rfc3339Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
return e.EncodeElement(t.String(), start)
}
// MarshalJSON turns a Rfc3339Time into JSON
func (t *Rfc3339Time) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf("\"%s\"", t.String())), nil
}
// APIString returns Time string in Jottacloud API format
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
// LoginToken is struct representing the login token generated in the WebUI
type LoginToken struct {
@@ -149,11 +122,16 @@ type AllocateFileResponse struct {
// UploadResponse after an upload
type UploadResponse struct {
Path string `json:"path"`
ContentID string `json:"content_id"`
Bytes int64 `json:"bytes"`
Md5 string `json:"md5"`
Modified int64 `json:"modified"`
Name string `json:"name"`
Path string `json:"path"`
Kind string `json:"kind"`
ContentID string `json:"content_id"`
Bytes int64 `json:"bytes"`
Md5 string `json:"md5"`
Created int64 `json:"created"`
Modified int64 `json:"modified"`
Deleted interface{} `json:"deleted"`
Mime string `json:"mime"`
}
// DeviceRegistrationResponse is the response to registering a device
@@ -360,9 +338,9 @@ type JottaFolder struct {
Name string `xml:"name,attr"`
Deleted Flag `xml:"deleted,attr"`
Path string `xml:"path"`
CreatedAt JottaTime `xml:"created"`
ModifiedAt JottaTime `xml:"modified"`
Updated JottaTime `xml:"updated"`
CreatedAt Time `xml:"created"`
ModifiedAt Time `xml:"modified"`
Updated Time `xml:"updated"`
Folders []JottaFolder `xml:"folders>folder"`
Files []JottaFile `xml:"files>file"`
}
@@ -387,17 +365,17 @@ GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/.../<file>
// JottaFile represents a Jottacloud file
type JottaFile struct {
XMLName xml.Name
Name string `xml:"name,attr"`
Deleted Flag `xml:"deleted,attr"`
PublicURI string `xml:"publicURI"`
PublicSharePath string `xml:"publicSharePath"`
State string `xml:"currentRevision>state"`
CreatedAt JottaTime `xml:"currentRevision>created"`
ModifiedAt JottaTime `xml:"currentRevision>modified"`
Updated JottaTime `xml:"currentRevision>updated"`
Size int64 `xml:"currentRevision>size"`
MimeType string `xml:"currentRevision>mime"`
MD5 string `xml:"currentRevision>md5"`
Name string `xml:"name,attr"`
Deleted Flag `xml:"deleted,attr"`
PublicURI string `xml:"publicURI"`
PublicSharePath string `xml:"publicSharePath"`
State string `xml:"currentRevision>state"`
CreatedAt Time `xml:"currentRevision>created"`
ModifiedAt Time `xml:"currentRevision>modified"`
Updated Time `xml:"currentRevision>updated"`
Size int64 `xml:"currentRevision>size"`
MimeType string `xml:"currentRevision>mime"`
MD5 string `xml:"currentRevision>md5"`
}
// Error is a custom Error for wrapping Jottacloud error responses

View File

@@ -7,7 +7,6 @@ import (
"encoding/base64"
"encoding/hex"
"encoding/json"
"encoding/xml"
"errors"
"fmt"
"io"
@@ -191,7 +190,7 @@ machines.`)
m.Set("auth_code", "")
return fs.ConfigGoto("legacy_do_auth")
case "legacy_auth_code":
authCode := strings.ReplaceAll(config.Result, "-", "") // remove any "-" contained in the code so we have a 6 digit number
authCode := strings.Replace(config.Result, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
m.Set("auth_code", authCode)
return fs.ConfigGoto("legacy_do_auth")
case "legacy_do_auth":
@@ -519,7 +518,7 @@ func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 stri
values.Set("client_id", defaultClientID)
values.Set("grant_type", "password")
values.Set("password", loginToken.AuthToken)
values.Set("scope", "openid offline_access")
values.Set("scope", "offline_access+openid")
values.Set("username", loginToken.Username)
values.Encode()
opts = rest.Opts{
@@ -649,7 +648,7 @@ func errorHandler(resp *http.Response) error {
// Jottacloud wants '+' to be URL encoded even though the RFC states it's not reserved
func urlPathEscape(in string) string {
return strings.ReplaceAll(rest.URLPathEscape(in), "+", "%2B")
return strings.Replace(rest.URLPathEscape(in), "+", "%2B", -1)
}
// filePathRaw returns an unescaped file path (f.root, file)
@@ -932,105 +931,48 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return entries, nil
}
func parseListRStream(ctx context.Context, r io.Reader, filesystem *Fs, callback func(fs.DirEntry) error) error {
// listFileDirFn is called from listFileDir to handle an object.
type listFileDirFn func(fs.DirEntry) error
type stats struct {
Folders int `xml:"folders"`
Files int `xml:"files"`
}
var expected, actual stats
type xmlFile struct {
Path string `xml:"path"`
Name string `xml:"filename"`
Checksum string `xml:"md5"`
Size int64 `xml:"size"`
Modified api.Rfc3339Time `xml:"modified"` // Note: Liststream response includes 3 decimal milliseconds, but we ignore them since there is second precision everywhere else
Created api.Rfc3339Time `xml:"created"`
}
type xmlFolder struct {
Path string `xml:"path"`
}
addFolder := func(path string) error {
return callback(fs.NewDir(filesystem.opt.Enc.ToStandardPath(path), time.Time{}))
}
addFile := func(f *xmlFile) error {
return callback(&Object{
hasMetaData: true,
fs: filesystem,
remote: filesystem.opt.Enc.ToStandardPath(path.Join(f.Path, f.Name)),
size: f.Size,
md5: f.Checksum,
modTime: time.Time(f.Modified),
})
}
// liststream paths are /mountpoint/root/path
// so the returned paths should have /mountpoint/root/ trimmed
// as the caller is expecting path.
pathPrefix := filesystem.opt.Enc.FromStandardPath(path.Join("/", filesystem.opt.Mountpoint, filesystem.root))
trimPathPrefix := func(p string) string {
p = strings.TrimPrefix(p, pathPrefix)
p = strings.TrimPrefix(p, "/")
return p
}
uniqueFolders := map[string]bool{}
decoder := xml.NewDecoder(r)
for {
t, err := decoder.Token()
if err != nil {
if err != io.EOF {
return err
}
break
// List the objects and directories into entries, from a
// special kind of JottaFolder representing a FileDirLis
func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolder *api.JottaFolder, fn listFileDirFn) error {
pathPrefix := "/" + f.filePathRaw("") // Non-escaped prefix of API paths to be cut off, to be left with the remote path including the remoteStartPath
pathPrefixLength := len(pathPrefix)
startPath := path.Join(pathPrefix, remoteStartPath) // Non-escaped API path up to and including remoteStartPath, to decide if it should be created as a new dir object
startPathLength := len(startPath)
for i := range startFolder.Folders {
folder := &startFolder.Folders[i]
if !f.validFolder(folder) {
return nil
}
switch se := t.(type) {
case xml.StartElement:
switch se.Name.Local {
case "file":
var f xmlFile
if err := decoder.DecodeElement(&f, &se); err != nil {
return err
}
f.Path = trimPathPrefix(f.Path)
actual.Files++
if !uniqueFolders[f.Path] {
uniqueFolders[f.Path] = true
actual.Folders++
if err := addFolder(f.Path); err != nil {
return err
}
}
if err := addFile(&f); err != nil {
return err
}
case "folder":
var f xmlFolder
if err := decoder.DecodeElement(&f, &se); err != nil {
return err
}
f.Path = trimPathPrefix(f.Path)
uniqueFolders[f.Path] = true
actual.Folders++
if err := addFolder(f.Path); err != nil {
return err
}
case "stats":
if err := decoder.DecodeElement(&expected, &se); err != nil {
folderPath := f.opt.Enc.ToStandardPath(path.Join(folder.Path, folder.Name))
folderPathLength := len(folderPath)
var remoteDir string
if folderPathLength > pathPrefixLength {
remoteDir = folderPath[pathPrefixLength+1:]
if folderPathLength > startPathLength {
d := fs.NewDir(remoteDir, time.Time(folder.ModifiedAt))
err := fn(d)
if err != nil {
return err
}
}
}
for i := range folder.Files {
file := &folder.Files[i]
if f.validFile(file) {
remoteFile := path.Join(remoteDir, f.opt.Enc.ToStandardName(file.Name))
o, err := f.newObjectWithInfo(ctx, remoteFile, file)
if err != nil {
return err
}
err = fn(o)
if err != nil {
return err
}
}
}
}
if expected.Folders != actual.Folders ||
expected.Files != actual.Files {
return fmt.Errorf("Invalid result from listStream: expected[%#v] != actual[%#v]", expected, actual)
}
return nil
}
@@ -1046,23 +988,12 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
Path: f.filePath(dir),
Parameters: url.Values{},
}
opts.Parameters.Set("mode", "liststream")
list := walk.NewListRHelper(callback)
opts.Parameters.Set("mode", "list")
var resp *http.Response
var result api.JottaFolder // Could be JottaFileDirList, but JottaFolder is close enough
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
if err != nil {
return shouldRetry(ctx, resp, err)
}
err = parseListRStream(ctx, resp.Body, f, func(d fs.DirEntry) error {
if d.Remote() == dir {
return nil
}
return list.Add(d)
})
_ = resp.Body.Close()
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
@@ -1074,6 +1005,10 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
}
return fmt.Errorf("couldn't list files: %w", err)
}
list := walk.NewListRHelper(callback)
err = f.listFileDir(ctx, dir, &result, func(entry fs.DirEntry) error {
return list.Add(entry)
})
if err != nil {
return err
}
@@ -1191,45 +1126,6 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
}
// createOrUpdate tries to make remote file match without uploading.
// If the remote file exists, and has matching size and md5, only
// timestamps are updated. If the file does not exist or does does
// not match size and md5, but matching content can be constructed
// from deduplication, the file will be updated/created. If the file
// is currently in trash, but can be made to match, it will be
// restored. Returns ErrorObjectNotFound if upload will be necessary
// to get a matching remote file.
func (f *Fs) createOrUpdate(ctx context.Context, file string, modTime time.Time, size int64, md5 string) (info *api.JottaFile, err error) {
opts := rest.Opts{
Method: "POST",
Path: f.filePath(file),
Parameters: url.Values{},
ExtraHeaders: make(map[string]string),
}
opts.Parameters.Set("cphash", "true")
fileDate := api.JottaTime(modTime).String()
opts.ExtraHeaders["JSize"] = strconv.FormatInt(size, 10)
opts.ExtraHeaders["JMd5"] = md5
opts.ExtraHeaders["JCreated"] = fileDate
opts.ExtraHeaders["JModified"] = fileDate
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
return shouldRetry(ctx, resp, err)
})
if apiErr, ok := err.(*api.Error); ok {
// does not exist, i.e. not matching size and md5, and not possible to make it by deduplication
if apiErr.StatusCode == http.StatusNotFound {
return nil, fs.ErrorObjectNotFound
}
}
return info, nil
}
// copyOrMoves copies or moves directories or files depending on the method parameter
func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *api.JottaFile, err error) {
opts := rest.Opts{
@@ -1273,12 +1169,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
// if destination was a trashed file then after a successfull copy the copied file is still in trash (bug in api?)
if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
fs.Debugf(src, "Server-side copied to trashed destination, restoring")
info, err = f.createOrUpdate(ctx, remote, srcObj.modTime, srcObj.size, srcObj.md5)
}
if err != nil {
return nil, fmt.Errorf("couldn't copy file: %w", err)
}
@@ -1580,19 +1470,40 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return err
}
// request check/update with existing metadata and new modtime
// (note that if size/md5 does not match, the file content will
// also be modified if deduplication is possible, i.e. it is
// important to use correct/latest values)
_, err = o.fs.createOrUpdate(ctx, o.remote, modTime, o.size, o.md5)
// prepare allocate request with existing metadata but changed timestamps
var resp *http.Response
var options []fs.OpenOption
opts := rest.Opts{
Method: "POST",
Path: "files/v1/allocate",
Options: options,
ExtraHeaders: make(map[string]string),
}
fileDate := api.Time(modTime).APIString()
var request = api.AllocateFileRequest{
Bytes: o.size,
Created: fileDate,
Modified: fileDate,
Md5: o.md5,
Path: path.Join(o.fs.opt.Mountpoint, o.fs.opt.Enc.FromStandardPath(path.Join(o.fs.root, o.remote))),
}
// send it
var response api.AllocateFileResponse
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, &request, &response)
return shouldRetry(ctx, resp, err)
})
if err != nil {
if err == fs.ErrorObjectNotFound {
// file was modified (size/md5 changed) between readMetaData and createOrUpdate?
return errors.New("metadata did not match")
}
return err
}
// check response
if response.State != "COMPLETED" {
// could be the file was modified (size/md5 changed) between readMetaData and the allocate request
return errors.New("metadata did not match")
}
// update local metadata
o.modTime = modTime
return nil
@@ -1730,7 +1641,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Options: options,
ExtraHeaders: make(map[string]string),
}
fileDate := api.Rfc3339Time(src.ModTime(ctx)).String()
fileDate := api.Time(src.ModTime(ctx)).APIString()
// the allocate request
var request = api.AllocateFileRequest{

View File

@@ -28,57 +28,33 @@ import (
func init() {
fs.Register(&fs.RegInfo{
Name: "koofr",
Description: "Koofr, Digi Storage and other Koofr-compatible storage providers",
Description: "Koofr",
NewFs: NewFs,
Options: []fs.Option{{
Name: fs.ConfigProvider,
Help: "Choose your storage provider.",
// NOTE if you add a new provider here, then add it in the
// setProviderDefaults() function and update options accordingly
Examples: []fs.OptionExample{{
Value: "koofr",
Help: "Koofr, https://app.koofr.net/",
}, {
Value: "digistorage",
Help: "Digi Storage, https://storage.rcs-rds.ro/",
}, {
Value: "other",
Help: "Any other Koofr API compatible storage service",
}},
}, {
Name: "endpoint",
Help: "The Koofr API endpoint to use.",
Provider: "other",
Default: "https://app.koofr.net",
Required: true,
Advanced: true,
}, {
Name: "mountid",
Help: "Mount ID of the mount to use.\n\nIf omitted, the primary mount is used.",
Required: false,
Default: "",
Advanced: true,
}, {
Name: "setmtime",
Help: "Does the backend support setting modification time.\n\nSet this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
Default: true,
Required: true,
Advanced: true,
}, {
Name: "user",
Help: "Your user name.",
Help: "Your Koofr user name.",
Required: true,
}, {
Name: "password",
Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
Provider: "koofr",
IsPassword: true,
Required: true,
}, {
Name: "password",
Help: "Your password for rclone (generate one at https://storage.rcs-rds.ro/app/admin/preferences/password).",
Provider: "digistorage",
IsPassword: true,
Required: true,
}, {
Name: "password",
Help: "Your password for rclone (generate one at your service's settings page).",
Provider: "other",
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
IsPassword: true,
Required: true,
}, {
@@ -95,7 +71,6 @@ func init() {
// Options represent the configuration of the Koofr backend
type Options struct {
Provider string `config:"provider"`
Endpoint string `config:"endpoint"`
MountID string `config:"mountid"`
User string `config:"user"`
@@ -280,38 +255,13 @@ func (f *Fs) fullPath(part string) string {
return f.opt.Enc.FromStandardPath(path.Join("/", f.root, part))
}
func setProviderDefaults(opt *Options) {
// handle old, provider-less configs
if opt.Provider == "" {
if opt.Endpoint == "" || strings.HasPrefix(opt.Endpoint, "https://app.koofr.net") {
opt.Provider = "koofr"
} else if strings.HasPrefix(opt.Endpoint, "https://storage.rcs-rds.ro") {
opt.Provider = "digistorage"
} else {
opt.Provider = "other"
}
}
// now assign an endpoint
if opt.Provider == "koofr" {
opt.Endpoint = "https://app.koofr.net"
} else if opt.Provider == "digistorage" {
opt.Endpoint = "https://storage.rcs-rds.ro"
}
}
// NewFs constructs a new filesystem given a root path and rclone configuration options
// NewFs constructs a new filesystem given a root path and configuration options
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
opt := new(Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
setProviderDefaults(opt)
return NewFsFromOptions(ctx, name, root, opt)
}
// NewFsFromOptions constructs a new filesystem given a root path and internal configuration options
func NewFsFromOptions(ctx context.Context, name, root string, opt *Options) (ff fs.Fs, err error) {
pass, err := obscure.Reveal(opt.Password)
if err != nil {
return nil, err

View File

@@ -1133,9 +1133,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
// Wipe hashes before update
o.clearHashCache()
var symlinkData bytes.Buffer
// If the object is a regular file, create it.
// If it is a translated link, just read in the contents, and
@@ -1298,13 +1295,6 @@ func (o *Object) setMetadata(info os.FileInfo) {
}
}
// clearHashCache wipes any cached hashes for the object
func (o *Object) clearHashCache() {
o.fs.objectMetaMu.Lock()
o.hashes = nil
o.fs.objectMetaMu.Unlock()
}
// Stat an Object into info
func (o *Object) lstat() error {
info, err := o.fs.lstat(o.path)
@@ -1316,7 +1306,6 @@ func (o *Object) lstat() error {
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
o.clearHashCache()
return remove(o.path)
}

View File

@@ -1,7 +1,6 @@
package local
import (
"bytes"
"context"
"io/ioutil"
"os"
@@ -13,7 +12,6 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/readers"
@@ -168,64 +166,3 @@ func TestSymlinkError(t *testing.T) {
_, err := NewFs(context.Background(), "local", "/", m)
assert.Equal(t, errLinksAndCopyLinks, err)
}
// Test hashes on updating an object
func TestHashOnUpdate(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "file.txt"
when := time.Now()
r.WriteFile(filePath, "content", when)
f := r.Flocal.(*Fs)
// Get the object
o, err := f.NewObject(ctx, filePath)
require.NoError(t, err)
// Test the hash is as we expect
md5, err := o.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
// Reupload it with diferent contents but same size and timestamp
var b = bytes.NewBufferString("CONTENT")
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
err = o.Update(ctx, b, src)
require.NoError(t, err)
// Check the hash is as expected
md5, err = o.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, "45685e95985e20822fb2538a522a5ccf", md5)
}
// Test hashes on deleting an object
func TestHashOnDelete(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "file.txt"
when := time.Now()
r.WriteFile(filePath, "content", when)
f := r.Flocal.(*Fs)
// Get the object
o, err := f.NewObject(ctx, filePath)
require.NoError(t, err)
// Test the hash is as we expect
md5, err := o.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
// Delete the object
require.NoError(t, o.Remove(ctx))
// Test the hash cache is empty
require.Nil(t, o.(*Object).hashes)
// Test the hash returns an error
_, err = o.Hash(ctx, hash.MD5)
require.Error(t, err)
}

View File

@@ -58,7 +58,7 @@ type UserInfoResponse struct {
AutoProlong bool `json:"auto_prolong"`
Basequota int64 `json:"basequota"`
Enabled bool `json:"enabled"`
Expires int64 `json:"expires"`
Expires int `json:"expires"`
Prolong bool `json:"prolong"`
Promocodes struct {
} `json:"promocodes"`
@@ -80,7 +80,7 @@ type UserInfoResponse struct {
FileSizeLimit int64 `json:"file_size_limit"`
Space struct {
BytesTotal int64 `json:"bytes_total"`
BytesUsed int64 `json:"bytes_used"`
BytesUsed int `json:"bytes_used"`
Overquota bool `json:"overquota"`
} `json:"space"`
} `json:"cloud"`

View File

@@ -1572,7 +1572,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
}
total := info.Body.Cloud.Space.BytesTotal
used := info.Body.Cloud.Space.BytesUsed
used := int64(info.Body.Cloud.Space.BytesUsed)
usage := &fs.Usage{
Total: fs.NewUsageValue(total),

File diff suppressed because it is too large Load Diff

View File

@@ -1,16 +0,0 @@
package netstorage_test
import (
"testing"
"github.com/rclone/rclone/backend/netstorage"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestnStorage:",
NilObject: (*netstorage.Object)(nil),
})
}

View File

@@ -65,12 +65,9 @@ var (
authPath = "/common/oauth2/v2.0/authorize"
tokenPath = "/common/oauth2/v2.0/token"
scopesWithSitePermission = []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access", "Sites.Read.All"}
scopesWithoutSitePermission = []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"}
// Description of how to auth for this app for a business account
oauthConfig = &oauth2.Config{
Scopes: scopesWithSitePermission,
Scopes: []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access", "Sites.Read.All"},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -140,26 +137,6 @@ Note that the chunks will be buffered into memory.`,
Help: "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").",
Default: "",
Advanced: true,
}, {
Name: "root_folder_id",
Help: `ID of the root folder.
This isn't normally needed, but in special circumstances you might
know the folder ID that you wish to access but not be able to get
there through a path traversal.
`,
Advanced: true,
}, {
Name: "disable_site_permission",
Help: `Disable the request for Sites.Read.All permission.
If set to true, you will no longer be able to search for a SharePoint site when
configuring drive ID, because rclone will not request Sites.Read.All permission.
Set it to true if your organization didn't assign Sites.Read.All permission to the
application, and your organization disallows users to consent app permission
request on their own.`,
Default: false,
Advanced: true,
}, {
Name: "expose_onenote_files",
Help: `Set to make OneNote files show up in directory listings.
@@ -301,10 +278,6 @@ type siteResource struct {
type siteResponse struct {
Sites []siteResource `json:"value"`
}
type deltaResponse struct {
DeltaLink string `json:"@odata.deltaLink"`
Value []api.Item `json:"value"`
}
// Get the region and graphURL from the config
func getRegionURL(m configmap.Mapper) (region, graphURL string) {
@@ -401,12 +374,6 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
region, graphURL := getRegionURL(m)
if config.State == "" {
disableSitePermission, _ := m.Get("disable_site_permission")
if disableSitePermission == "true" {
oauthConfig.Scopes = scopesWithoutSitePermission
} else {
oauthConfig.Scopes = scopesWithSitePermission
}
oauthConfig.Endpoint = oauth2.Endpoint{
AuthURL: authEndpoint[region] + authPath,
TokenURL: authEndpoint[region] + tokenPath,
@@ -560,8 +527,6 @@ type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DriveID string `config:"drive_id"`
DriveType string `config:"drive_type"`
RootFolderID string `config:"root_folder_id"`
DisableSitePermission bool `config:"disable_site_permission"`
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ListChunk int64 `config:"list_chunk"`
@@ -653,12 +618,6 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
retry := false
if resp != nil {
switch resp.StatusCode {
case 400:
if apiErr, ok := err.(*api.Error); ok {
if apiErr.ErrorInfo.InnerError.Code == "pathIsTooLong" {
return false, fserrors.NoRetryError(err)
}
}
case 401:
if len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
retry = true
@@ -830,11 +789,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
rootURL := graphAPIEndpoint[opt.Region] + "/v1.0" + "/drives/" + opt.DriveID
if opt.DisableSitePermission {
oauthConfig.Scopes = scopesWithoutSitePermission
} else {
oauthConfig.Scopes = scopesWithSitePermission
}
oauthConfig.Endpoint = oauth2.Endpoint{
AuthURL: authEndpoint[opt.Region] + authPath,
TokenURL: authEndpoint[opt.Region] + tokenPath,
@@ -872,19 +826,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
})
// Get rootID
var rootID = opt.RootFolderID
if rootID == "" {
rootInfo, _, err := f.readMetaDataForPath(ctx, "")
if err != nil {
return nil, fmt.Errorf("failed to get root: %w", err)
}
rootID = rootInfo.GetID()
rootInfo, _, err := f.readMetaDataForPath(ctx, "")
if err != nil {
return nil, fmt.Errorf("failed to get root: %w", err)
}
if rootID == "" {
if rootInfo.GetID() == "" {
return nil, errors.New("failed to get root: ID was empty")
}
f.dirCache = dircache.New(root, rootID, f)
f.dirCache = dircache.New(root, rootInfo.GetID(), f)
// Find the current root
err = f.dirCache.FindRoot(ctx, false)
@@ -892,7 +842,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
tempF := *f
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
tempF.dirCache = dircache.New(newRoot, rootInfo.ID, &tempF)
tempF.root = newRoot
// Make new Fs which is the parent
err = tempF.dirCache.FindRoot(ctx, false)
@@ -1506,7 +1456,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
return nil, fmt.Errorf("about failed: %w", err)
}
q := drive.Quota
// On (some?) Onedrive sharepoints these are all 0 so return unknown in that case
@@ -2306,142 +2256,6 @@ func (f *Fs) canonicalDriveID(driveID string) (canonicalDriveID string) {
return canonicalDriveID
}
// ChangeNotify calls the passed function with a path that has had changes.
// If the implementation uses polling, it should adhere to the given interval.
//
// Automatically restarts itself in case of unexpected behavior of the remote.
//
// Close the returned channel to stop being notified.
//
// The Onedrive implementation gives the whole hierarchy up to the top when
// an object is changed. For instance, if a/b/c is changed, this function
// will call notifyFunc with a, a/b and a/b/c.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
go func() {
// get the StartPageToken early so all changes from now on get processed
nextDeltaToken, err := f.changeNotifyStartPageToken(ctx)
if err != nil {
fs.Errorf(f, "Could not get first deltaLink: %s", err)
return
}
fs.Debugf(f, "Next delta token is: %s", nextDeltaToken)
var ticker *time.Ticker
var tickerC <-chan time.Time
for {
select {
case pollInterval, ok := <-pollIntervalChan:
if !ok {
if ticker != nil {
ticker.Stop()
}
return
}
if ticker != nil {
ticker.Stop()
ticker, tickerC = nil, nil
}
if pollInterval != 0 {
ticker = time.NewTicker(pollInterval)
tickerC = ticker.C
}
case <-tickerC:
fs.Debugf(f, "Checking for changes on remote")
nextDeltaToken, err = f.changeNotifyRunner(ctx, notifyFunc, nextDeltaToken)
if err != nil {
fs.Infof(f, "Change notify listener failure: %s", err)
}
}
}
}()
}
func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (nextDeltaToken string, err error) {
delta, err := f.changeNotifyNextChange(ctx, "latest")
parsedURL, err := url.Parse(delta.DeltaLink)
if err != nil {
return
}
nextDeltaToken = parsedURL.Query().Get("token")
return
}
func (f *Fs) changeNotifyNextChange(ctx context.Context, token string) (delta deltaResponse, err error) {
opts := f.buildDriveDeltaOpts(token)
_, err = f.srv.CallJSON(ctx, &opts, nil, &delta)
return
}
func (f *Fs) buildDriveDeltaOpts(token string) rest.Opts {
rootURL := graphAPIEndpoint[f.opt.Region] + "/v1.0/drives"
return rest.Opts{
Method: "GET",
RootURL: rootURL,
Path: "/" + f.driveID + "/root/delta",
Parameters: map[string][]string{"token": {token}},
}
}
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), deltaToken string) (nextDeltaToken string, err error) {
delta, err := f.changeNotifyNextChange(ctx, deltaToken)
parsedURL, err := url.Parse(delta.DeltaLink)
if err != nil {
return
}
nextDeltaToken = parsedURL.Query().Get("token")
for _, item := range delta.Value {
isDriveRootFolder := item.GetParentReference().ID == ""
if isDriveRootFolder {
continue
}
fullPath, err := getItemFullPath(&item)
if err != nil {
fs.Errorf(f, "Could not get item full path: %s", err)
continue
}
if fullPath == f.root {
continue
}
relName, insideRoot := getRelativePathInsideBase(f.root, fullPath)
if !insideRoot {
continue
}
if item.GetFile() != nil {
notifyFunc(relName, fs.EntryObject)
} else if item.GetFolder() != nil {
notifyFunc(relName, fs.EntryDirectory)
}
}
return
}
func getItemFullPath(item *api.Item) (fullPath string, err error) {
err = nil
fullPath = item.GetName()
if parent := item.GetParentReference(); parent != nil && parent.Path != "" {
pathParts := strings.SplitN(parent.Path, ":", 2)
if len(pathParts) != 2 {
err = fmt.Errorf("invalid parent path: %s", parent.Path)
return
}
if pathParts[1] != "" {
fullPath = strings.TrimPrefix(pathParts[1], "/") + "/" + fullPath
}
}
return
}
// getRelativePathInsideBase checks if `target` is inside `base`. If so, it
// returns a relative path for `target` based on `base` and a boolean `true`.
// Otherwise returns "", false.

View File

@@ -136,8 +136,7 @@ func (q *quickXorHash) Write(p []byte) (n int, err error) {
func (q *quickXorHash) checkSum() (h [Size]byte) {
// Output the data as little endian bytes
ph := 0
for i := 0; i < len(q.data)-1; i++ {
d := q.data[i]
for _, d := range q.data[:len(q.data)-1] {
_ = h[ph+7] // bounds check
h[ph+0] = byte(d >> (8 * 0))
h[ph+1] = byte(d >> (8 * 1))

View File

@@ -2,6 +2,8 @@
// object storage system.
package pcloud
// FIXME implement ListR? /listfolder can do recursive lists
// FIXME cleanup returns login required?
// FIXME mime type? Fix overview if implement.
@@ -25,7 +27,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
@@ -245,7 +246,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
return nil, err
}
found, err := f.listAll(ctx, directoryID, false, true, false, func(item *api.Item) bool {
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
if item.Name == leaf {
info = item
return true
@@ -379,7 +380,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID
found, err = f.listAll(ctx, pathID, true, false, false, func(item *api.Item) bool {
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
if item.Name == leaf {
pathIDOut = item.ID
return true
@@ -445,16 +446,14 @@ type listAllFn func(*api.Item) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, recursive bool, fn listAllFn) (found bool, err error) {
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/listfolder",
Parameters: url.Values{},
}
if recursive {
opts.Parameters.Set("recursive", "1")
}
opts.Parameters.Set("folderid", dirIDtoNumber(dirID))
// FIXME can do recursive
var result api.ItemResult
var resp *http.Response
@@ -466,69 +465,24 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
if err != nil {
return found, fmt.Errorf("couldn't list files: %w", err)
}
var recursiveContents func(is []api.Item, path string)
recursiveContents = func(is []api.Item, path string) {
for i := range is {
item := &is[i]
if item.IsFolder {
if filesOnly {
continue
}
} else {
if directoriesOnly {
continue
}
for i := range result.Metadata.Contents {
item := &result.Metadata.Contents[i]
if item.IsFolder {
if filesOnly {
continue
}
item.Name = path + f.opt.Enc.ToStandardName(item.Name)
if fn(item) {
found = true
break
}
if recursive {
recursiveContents(item.Contents, item.Name+"/")
}
}
}
recursiveContents(result.Metadata.Contents, "")
return
}
// listHelper iterates over all items from the directory
// and calls the callback for each element.
func (f *Fs) listHelper(ctx context.Context, dir string, recursive bool, callback func(entries fs.DirEntry) error) (err error) {
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
}
var iErr error
_, err = f.listAll(ctx, directoryID, false, false, recursive, func(info *api.Item) bool {
remote := path.Join(dir, info.Name)
if info.IsFolder {
// cache the directory ID for later lookups
f.dirCache.Put(remote, info.ID)
d := fs.NewDir(remote, info.ModTime()).SetID(info.ID)
// FIXME more info from dir?
iErr = callback(d)
} else {
o, err := f.newObjectWithInfo(ctx, remote, info)
if err != nil {
iErr = err
return true
if directoriesOnly {
continue
}
iErr = callback(o)
}
if iErr != nil {
return true
item.Name = f.opt.Enc.ToStandardName(item.Name)
if fn(item) {
found = true
break
}
return false
})
if err != nil {
return err
}
if iErr != nil {
return iErr
}
return nil
return
}
// List the objects and directories in dir into entries. The
@@ -541,24 +495,36 @@ func (f *Fs) listHelper(ctx context.Context, dir string, recursive bool, callbac
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.listHelper(ctx, dir, false, func(o fs.DirEntry) error {
entries = append(entries, o)
return nil
})
return entries, err
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
list := walk.NewListRHelper(callback)
err = f.listHelper(ctx, dir, true, func(o fs.DirEntry) error {
return list.Add(o)
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
}
var iErr error
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
remote := path.Join(dir, info.Name)
if info.IsFolder {
// cache the directory ID for later lookups
f.dirCache.Put(remote, info.ID)
d := fs.NewDir(remote, info.ModTime()).SetID(info.ID)
// FIXME more info from dir?
entries = append(entries, d)
} else {
o, err := f.newObjectWithInfo(ctx, remote, info)
if err != nil {
iErr = err
return true
}
entries = append(entries, o)
}
return false
})
if err != nil {
return err
return nil, err
}
return list.Flush()
if iErr != nil {
return nil, iErr
}
return entries, nil
}
// Creates from the parameters passed in a half finished Object which
@@ -690,7 +656,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
opts.Parameters.Set("fileid", fileIDtoNumber(srcObj.id))
opts.Parameters.Set("toname", f.opt.Enc.FromStandardName(leaf))
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
opts.Parameters.Set("mtime", fmt.Sprintf("%d", uint64(srcObj.modTime.Unix())))
opts.Parameters.Set("mtime", fmt.Sprintf("%d", srcObj.modTime.Unix()))
var resp *http.Response
var result api.ItemResult
err = f.pacer.Call(func() (bool, error) {
@@ -906,7 +872,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
return nil, fmt.Errorf("about failed: %w", err)
}
usage = &fs.Usage{
Total: fs.NewUsageValue(q.Quota), // quota of bytes that can be used
@@ -1171,7 +1137,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
opts.Parameters.Set("filename", leaf)
opts.Parameters.Set("folderid", dirIDtoNumber(directoryID))
opts.Parameters.Set("nopartial", "1")
opts.Parameters.Set("mtime", fmt.Sprintf("%d", uint64(modTime.Unix())))
opts.Parameters.Set("mtime", fmt.Sprintf("%d", modTime.Unix()))
// Special treatment for a 0 length upload. This doesn't work
// with PUT even with Content-Length set (by setting

View File

@@ -783,10 +783,10 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
return nil, fmt.Errorf("CreateDir http: %w", err)
}
if err = info.AsErr(); err != nil {
return nil, err
return nil, fmt.Errorf("CreateDir: %w", err)
}
usage = &fs.Usage{
Used: fs.NewUsageValue(int64(info.SpaceUsed)),

View File

@@ -4,21 +4,16 @@ import (
"context"
"fmt"
"net/http"
"strconv"
"time"
"github.com/putdotio/go-putio/putio"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/pacer"
)
func checkStatusCode(resp *http.Response, expected ...int) error {
for _, code := range expected {
if resp.StatusCode == code {
return nil
}
func checkStatusCode(resp *http.Response, expected int) error {
if resp.StatusCode != expected {
return &statusCodeError{response: resp}
}
return &statusCodeError{response: resp}
return nil
}
type statusCodeError struct {
@@ -29,10 +24,8 @@ func (e *statusCodeError) Error() string {
return fmt.Sprintf("unexpected status code (%d) response while doing %s to %s", e.response.StatusCode, e.response.Request.Method, e.response.Request.URL.String())
}
// This method is called from fserrors.ShouldRetry() to determine if an error should be retried.
// Some errors (e.g. 429 Too Many Requests) are handled before this step, so they are not included here.
func (e *statusCodeError) Temporary() bool {
return e.response.StatusCode >= 500
return e.response.StatusCode == 429 || e.response.StatusCode >= 500
}
// shouldRetry returns a boolean as to whether this err deserves to be
@@ -47,16 +40,6 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
if perr, ok := err.(*putio.ErrorResponse); ok {
err = &statusCodeError{response: perr.Response}
}
if scerr, ok := err.(*statusCodeError); ok && scerr.response.StatusCode == 429 {
delay := defaultRateLimitSleep
header := scerr.response.Header.Get("x-ratelimit-reset")
if header != "" {
if resetTime, cerr := strconv.ParseInt(header, 10, 64); cerr == nil {
delay = time.Until(time.Unix(resetTime+1, 0))
}
}
return true, pacer.RetryAfterError(scerr, delay)
}
if fserrors.ShouldRetry(err) {
return true, err
}

View File

@@ -302,8 +302,8 @@ func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID
if err != nil {
return false, err
}
if err := checkStatusCode(resp, 201); err != nil {
return shouldRetry(ctx, err)
if resp.StatusCode != 201 {
return false, fmt.Errorf("unexpected status code from upload create: %d", resp.StatusCode)
}
location = resp.Header.Get("location")
if location == "" {
@@ -647,7 +647,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return shouldRetry(ctx, err)
})
if err != nil {
return nil, err
return nil, fmt.Errorf("about failed: %w", err)
}
return &fs.Usage{
Total: fs.NewUsageValue(ai.Disk.Size), // quota of bytes that can be used

View File

@@ -241,13 +241,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
// fs.Debugf(o, "opening file: id=%d", o.file.ID)
resp, err = o.fs.httpClient.Do(req)
if err != nil {
return shouldRetry(ctx, err)
}
if err := checkStatusCode(resp, 200, 206); err != nil {
return shouldRetry(ctx, err)
}
return false, nil
return shouldRetry(ctx, err)
})
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode >= 400 && perr.Response.StatusCode <= 499 {
_ = resp.Body.Close()

View File

@@ -33,9 +33,8 @@ const (
rcloneObscuredClientSecret = "cMwrjWVmrHZp3gf1ZpCrlyGAmPpB-YY5BbVnO1fj-G9evcd8"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 1 // bigger for slower decay, exponential
decayConstant = 2 // bigger for slower decay, exponential
defaultChunkSize = 48 * fs.Mebi
defaultRateLimitSleep = 60 * time.Second
)
var (

File diff suppressed because it is too large Load Diff

View File

@@ -42,8 +42,7 @@ const (
hashCommandNotSupported = "none"
minSleep = 100 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
keepAliveInterval = time.Minute // send keepalives every this long while running commands
decayConstant = 2 // bigger for slower decay, exponential
)
var (
@@ -60,13 +59,11 @@ func init() {
Help: "SSH host to connect to.\n\nE.g. \"example.com\".",
Required: true,
}, {
Name: "user",
Help: "SSH username.",
Default: currentUser,
Name: "user",
Help: "SSH username, leave blank for current username, " + currentUser + ".",
}, {
Name: "port",
Help: "SSH port number.",
Default: 22,
Name: "port",
Help: "SSH port, leave blank to use default (22).",
}, {
Name: "pass",
Help: "SSH password, leave blank to use ssh-agent.",
@@ -109,7 +106,7 @@ when the ssh-agent contains many keys.`,
Default: false,
}, {
Name: "use_insecure_cipher",
Help: `Enable the use of insecure ciphers and key exchange methods.
Help: `Enable the use of insecure ciphers and key exchange methods.
This enables the use of the following insecure ciphers and key exchange methods:
@@ -342,32 +339,6 @@ func (c *conn) wait() {
c.err <- c.sshClient.Conn.Wait()
}
// Send a keepalive over the ssh connection
func (c *conn) sendKeepAlive() {
_, _, err := c.sshClient.SendRequest("keepalive@openssh.com", true, nil)
if err != nil {
fs.Debugf(nil, "Failed to send keep alive: %v", err)
}
}
// Send keepalives every interval over the ssh connection until done is closed
func (c *conn) sendKeepAlives(interval time.Duration) (done chan struct{}) {
done = make(chan struct{})
go func() {
t := time.NewTicker(interval)
defer t.Stop()
for {
select {
case <-t.C:
c.sendKeepAlive()
case <-done:
return
}
}
}()
return done
}
// Closes the connection
func (c *conn) close() error {
sftpErr := c.sftpClient.Close()
@@ -1127,9 +1098,6 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
}
defer f.putSftpConnection(&c, err)
// Send keepalives while the connection is open
defer close(c.sendKeepAlives(keepAliveInterval))
session, err := c.sshClient.NewSession()
if err != nil {
return nil, fmt.Errorf("run: get SFTP session: %w", err)
@@ -1142,12 +1110,10 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
session.Stdout = &stdout
session.Stderr = &stderr
fs.Debugf(f, "Running remote command: %s", cmd)
err = session.Run(cmd)
if err != nil {
return nil, fmt.Errorf("failed to run %q: %s: %w", cmd, bytes.TrimSpace(stderr.Bytes()), err)
return nil, fmt.Errorf("failed to run %q: %s: %w", cmd, stderr.Bytes(), err)
}
fs.Debugf(f, "Remote command result: %s", bytes.TrimSpace(stdout.Bytes()))
return stdout.Bytes(), nil
}
@@ -1220,7 +1186,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
}
stdout, err := f.run(ctx, "df -k "+escapedPath)
if err != nil {
return nil, fmt.Errorf("your remote may not have the required df utility: %w", err)
return nil, fmt.Errorf("your remote may not support About: %w", err)
}
usageTotal, usageUsed, usageAvail := parseUsage(stdout)
@@ -1264,6 +1230,8 @@ func (o *Object) Remote() string {
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
o.fs.addSession() // Show session in use
defer o.fs.removeSession()
if o.fs.opt.DisableHashCheck {
return "", nil
}
@@ -1287,16 +1255,36 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
c, err := o.fs.getSftpConnection(ctx)
if err != nil {
return "", fmt.Errorf("Hash get SFTP connection: %w", err)
}
session, err := c.sshClient.NewSession()
o.fs.putSftpConnection(&c, err)
if err != nil {
return "", fmt.Errorf("Hash put SFTP connection: %w", err)
}
var stdout, stderr bytes.Buffer
session.Stdout = &stdout
session.Stderr = &stderr
escapedPath := shellEscape(o.path())
if o.fs.opt.PathOverride != "" {
escapedPath = shellEscape(path.Join(o.fs.opt.PathOverride, o.remote))
}
b, err := o.fs.run(ctx, hashCmd+" "+escapedPath)
err = session.Run(hashCmd + " " + escapedPath)
fs.Debugf(nil, "sftp cmd = %s", escapedPath)
if err != nil {
return "", fmt.Errorf("failed to calculate %v hash: %w", r, err)
_ = session.Close()
fs.Debugf(o, "Failed to calculate %v hash: %v (%s)", r, err, bytes.TrimSpace(stderr.Bytes()))
return "", nil
}
_ = session.Close()
b := stdout.Bytes()
fs.Debugf(nil, "sftp output = %q", b)
str := parseHash(b)
fs.Debugf(nil, "sftp hash = %q", str)
if r == hash.MD5 {
o.md5sum = &str
} else if r == hash.SHA1 {
@@ -1311,7 +1299,7 @@ var shellEscapeRegex = regexp.MustCompile("[^A-Za-z0-9_.,:/\\@\u0080-\uFFFFFFFF\
// when sending it to a shell.
func shellEscape(str string) string {
safe := shellEscapeRegex.ReplaceAllString(str, `\$0`)
return strings.ReplaceAll(safe, "\n", "'\n'")
return strings.Replace(safe, "\n", "'\n'", -1)
}
// Converts a byte array from the SSH session returned by

View File

@@ -754,34 +754,22 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
var total, objects int64
if f.rootContainer != "" {
var container swift.Container
err = f.pacer.Call(func() (bool, error) {
container, _, err = f.c.Container(ctx, f.rootContainer)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, fmt.Errorf("container info failed: %w", err)
}
total = container.Bytes
objects = container.Count
} else {
var containers []swift.Container
err = f.pacer.Call(func() (bool, error) {
containers, err = f.c.ContainersAll(ctx, nil)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, fmt.Errorf("container listing failed: %w", err)
}
for _, c := range containers {
total += c.Bytes
objects += c.Count
}
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var containers []swift.Container
var err error
err = f.pacer.Call(func() (bool, error) {
containers, err = f.c.ContainersAll(ctx, nil)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, fmt.Errorf("container listing failed: %w", err)
}
usage = &fs.Usage{
var total, objects int64
for _, c := range containers {
total += c.Bytes
objects += c.Count
}
usage := &fs.Usage{
Used: fs.NewUsageValue(total), // bytes in use
Objects: fs.NewUsageValue(objects), // objects in use
}

View File

@@ -1,8 +1,8 @@
//go:build !plan9
// +build !plan9
// Package storj provides an interface to Storj decentralized object storage.
package storj
// Package tardigrade provides an interface to Tardigrade decentralized object storage.
package tardigrade
import (
"context"
@@ -31,17 +31,16 @@ const (
)
var satMap = map[string]string{
"us-central-1.storj.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777",
"europe-west-1.storj.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777",
"asia-east-1.storj.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777",
"us-central-1.tardigrade.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777",
"europe-west-1.tardigrade.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777",
"asia-east-1.tardigrade.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777",
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "storj",
Description: "Storj Decentralized Cloud Storage",
Aliases: []string{"tardigrade"},
Name: "tardigrade",
Description: "Tardigrade Decentralized Cloud Storage",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, configIn fs.ConfigIn) (*fs.ConfigOut, error) {
provider, _ := m.Get(fs.ConfigProvider)
@@ -85,9 +84,10 @@ func init() {
},
Options: []fs.Option{
{
Name: fs.ConfigProvider,
Help: "Choose an authentication method.",
Default: existingProvider,
Name: fs.ConfigProvider,
Help: "Choose an authentication method.",
Required: true,
Default: existingProvider,
Examples: []fs.OptionExample{{
Value: "existing",
Help: "Use an existing access grant.",
@@ -99,21 +99,23 @@ func init() {
{
Name: "access_grant",
Help: "Access grant.",
Required: false,
Provider: "existing",
},
{
Name: "satellite_address",
Help: "Satellite address.\n\nCustom satellite address should match the format: `<nodeid>@<address>:<port>`.",
Required: false,
Provider: newProvider,
Default: "us-central-1.storj.io",
Default: "us-central-1.tardigrade.io",
Examples: []fs.OptionExample{{
Value: "us-central-1.storj.io",
Value: "us-central-1.tardigrade.io",
Help: "US Central 1",
}, {
Value: "europe-west-1.storj.io",
Value: "europe-west-1.tardigrade.io",
Help: "Europe West 1",
}, {
Value: "asia-east-1.storj.io",
Value: "asia-east-1.tardigrade.io",
Help: "Asia East 1",
},
},
@@ -121,11 +123,13 @@ func init() {
{
Name: "api_key",
Help: "API key.",
Required: false,
Provider: newProvider,
},
{
Name: "passphrase",
Help: "Encryption passphrase.\n\nTo access existing objects enter passphrase used for uploading.",
Required: false,
Provider: newProvider,
},
},
@@ -141,7 +145,7 @@ type Options struct {
Passphrase string `config:"passphrase"`
}
// Fs represents a remote to Storj
// Fs represents a remote to Tardigrade
type Fs struct {
name string // the name of the remote
root string // root of the filesystem
@@ -159,12 +163,11 @@ var (
_ fs.Fs = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Mover = &Fs{}
)
// NewFs creates a filesystem backed by Storj.
// NewFs creates a filesystem backed by Tardigrade.
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs, err error) {
// Setup filesystem and connection to Storj
// Setup filesystem and connection to Tardigrade
root = norm.NFC.String(root)
root = strings.Trim(root, "/")
@@ -185,24 +188,24 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs,
if f.opts.Access != "" {
access, err = uplink.ParseAccess(f.opts.Access)
if err != nil {
return nil, fmt.Errorf("storj: access: %w", err)
return nil, fmt.Errorf("tardigrade: access: %w", err)
}
}
if access == nil && f.opts.SatelliteAddress != "" && f.opts.APIKey != "" && f.opts.Passphrase != "" {
access, err = uplink.RequestAccessWithPassphrase(ctx, f.opts.SatelliteAddress, f.opts.APIKey, f.opts.Passphrase)
if err != nil {
return nil, fmt.Errorf("storj: access: %w", err)
return nil, fmt.Errorf("tardigrade: access: %w", err)
}
serializedAccess, err := access.Serialize()
if err != nil {
return nil, fmt.Errorf("storj: access: %w", err)
return nil, fmt.Errorf("tardigrade: access: %w", err)
}
err = config.SetValueAndSave(f.name, "access_grant", serializedAccess)
if err != nil {
return nil, fmt.Errorf("storj: access: %w", err)
return nil, fmt.Errorf("tardigrade: access: %w", err)
}
}
@@ -234,7 +237,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs,
if bucketName != "" && bucketPath != "" {
_, err = project.StatBucket(ctx, bucketName)
if err != nil {
return f, fmt.Errorf("storj: bucket: %w", err)
return f, fmt.Errorf("tardigrade: bucket: %w", err)
}
object, err := project.StatObject(ctx, bucketName, bucketPath)
@@ -260,7 +263,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs,
return f, nil
}
// connect opens a connection to Storj.
// connect opens a connection to Tardigrade.
func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
fs.Debugf(f, "connecting...")
defer fs.Debugf(f, "connected: %+v", err)
@@ -271,7 +274,7 @@ func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
project, err = cfg.OpenProject(ctx, f.access)
if err != nil {
return nil, fmt.Errorf("storj: project: %w", err)
return nil, fmt.Errorf("tardigrade: project: %w", err)
}
return
@@ -579,7 +582,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
return nil, err
}
return newObjectFromUplink(f, src.Remote(), upload.Info()), nil
return newObjectFromUplink(f, "", upload.Info()), nil
}
// PutStream uploads to the remote path with the modTime given of indeterminate
@@ -680,43 +683,3 @@ func newPrefix(prefix string) string {
return prefix + "/"
}
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Move parameters
srcBucket, srcKey := bucket.Split(srcObj.absolute)
dstBucket, dstKey := f.absolute(remote)
options := uplink.MoveObjectOptions{}
// Do the move
err := f.project.MoveObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options)
if err != nil {
// Make sure destination bucket exists
_, err := f.project.EnsureBucket(ctx, dstBucket)
if err != nil {
return nil, fmt.Errorf("rename object failed to create destination bucket: %w", err)
}
// And try again
err = f.project.MoveObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options)
if err != nil {
return nil, fmt.Errorf("rename object failed: %w", err)
}
}
// Read the new object
return f.NewObject(ctx, remote)
}

View File

@@ -1,7 +1,7 @@
//go:build !plan9
// +build !plan9
package storj
package tardigrade
import (
"context"
@@ -18,7 +18,7 @@ import (
"storj.io/uplink"
)
// Object describes a Storj object
// Object describes a Tardigrade object
type Object struct {
fs *Fs
@@ -32,7 +32,7 @@ type Object struct {
// Check the interfaces are satisfied.
var _ fs.Object = &Object{}
// newObjectFromUplink creates a new object from a Storj uplink object.
// newObjectFromUplink creates a new object from a Tardigrade uplink object.
func newObjectFromUplink(f *Fs, relative string, object *uplink.Object) *Object {
// Attempt to use the modified time from the metadata. Otherwise
// fallback to the server time.

View File

@@ -1,20 +1,20 @@
//go:build !plan9
// +build !plan9
// Test Storj filesystem interface
package storj_test
// Test Tardigrade filesystem interface
package tardigrade_test
import (
"testing"
"github.com/rclone/rclone/backend/storj"
"github.com/rclone/rclone/backend/tardigrade"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestStorj:",
NilObject: (*storj.Object)(nil),
RemoteName: "TestTardigrade:",
NilObject: (*tardigrade.Object)(nil),
})
}

View File

@@ -1,4 +1,4 @@
//go:build plan9
// +build plan9
package storj
package tardigrade

View File

@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"io"
"io/ioutil"
"sync"
"time"
@@ -85,10 +84,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
err := o.Update(ctx, readers[i], src, options...)
if err != nil {
errs[i] = fmt.Errorf("%s: %w", o.UpstreamFs().Name(), err)
if len(entries) > 1 {
// Drain the input buffer to allow other uploads to continue
_, _ = io.Copy(ioutil.Discard, readers[i])
}
}
} else {
errs[i] = fs.ErrorNotAFile

View File

@@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"path"
"path/filepath"
"strings"
@@ -34,21 +33,25 @@ func init() {
Help: "List of space separated upstreams.\n\nCan be 'upstreama:test/dir upstreamb:', '\"upstreama:test/space:ro dir\" upstreamb:', etc.",
Required: true,
}, {
Name: "action_policy",
Help: "Policy to choose upstream on ACTION category.",
Default: "epall",
Name: "action_policy",
Help: "Policy to choose upstream on ACTION category.",
Required: true,
Default: "epall",
}, {
Name: "create_policy",
Help: "Policy to choose upstream on CREATE category.",
Default: "epmfs",
Name: "create_policy",
Help: "Policy to choose upstream on CREATE category.",
Required: true,
Default: "epmfs",
}, {
Name: "search_policy",
Help: "Policy to choose upstream on SEARCH category.",
Default: "ff",
Name: "search_policy",
Help: "Policy to choose upstream on SEARCH category.",
Required: true,
Default: "ff",
}, {
Name: "cache_time",
Help: "Cache time of usage and free space (in seconds).\n\nThis option is only useful when a path preserving policy is used.",
Default: 120,
Name: "cache_time",
Help: "Cache time of usage and free space (in seconds).\n\nThis option is only useful when a path preserving policy is used.",
Required: true,
Default: 120,
}},
}
fs.Register(fsi)
@@ -141,20 +144,22 @@ func (f *Fs) Hashes() hash.Set {
return f.hashSet
}
// mkdir makes the directory passed in and returns the upstreams used
func (f *Fs) mkdir(ctx context.Context, dir string) ([]*upstream.Fs, error) {
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
upstreams, err := f.create(ctx, dir)
if err == fs.ErrorObjectNotFound {
parent := parentDir(dir)
if dir != parent {
upstreams, err = f.mkdir(ctx, parent)
if dir != parentDir(dir) {
if err := f.Mkdir(ctx, parentDir(dir)); err != nil {
return err
}
upstreams, err = f.create(ctx, dir)
} else if dir == "" {
// If root dirs not created then create them
upstreams, err = f.upstreams, nil
}
}
if err != nil {
return nil, err
return err
}
errs := Errors(make([]error, len(upstreams)))
multithread(len(upstreams), func(i int) {
@@ -163,17 +168,7 @@ func (f *Fs) mkdir(ctx context.Context, dir string) ([]*upstream.Fs, error) {
errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
}
})
err = errs.Err()
if err != nil {
return nil, err
}
return upstreams, nil
}
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
_, err := f.mkdir(ctx, dir)
return err
return errs.Err()
}
// Purge all files in the directory
@@ -457,7 +452,10 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
srcPath := src.Remote()
upstreams, err := f.create(ctx, srcPath)
if err == fs.ErrorObjectNotFound {
upstreams, err = f.mkdir(ctx, parentDir(srcPath))
if err := f.Mkdir(ctx, parentDir(srcPath)); err != nil {
return nil, err
}
upstreams, err = f.create(ctx, srcPath)
}
if err != nil {
return nil, err
@@ -492,10 +490,6 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
}
if err != nil {
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
if len(upstreams) > 1 {
// Drain the input buffer to allow other uploads to continue
_, _ = io.Copy(ioutil.Discard, readers[i])
}
return
}
objs[i] = u.WrapObject(o)

View File

@@ -4,6 +4,8 @@ import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"testing"
"time"
@@ -18,12 +20,19 @@ import (
)
// MakeTestDirs makes directories in /tmp for testing
func MakeTestDirs(t *testing.T, n int) (dirs []string) {
func MakeTestDirs(t *testing.T, n int) (dirs []string, clean func()) {
for i := 1; i <= n; i++ {
dir := t.TempDir()
dir, err := ioutil.TempDir("", fmt.Sprintf("rclone-union-test-%d", n))
require.NoError(t, err)
dirs = append(dirs, dir)
}
return dirs
clean = func() {
for _, dir := range dirs {
err := os.RemoveAll(dir)
assert.NoError(t, err)
}
}
return dirs, clean
}
func (f *Fs) TestInternalReadOnly(t *testing.T) {
@@ -86,7 +95,8 @@ func TestMoveCopy(t *testing.T) {
t.Skip("Skipping as -remote set")
}
ctx := context.Background()
dirs := MakeTestDirs(t, 1)
dirs, clean := MakeTestDirs(t, 1)
defer clean()
fsString := fmt.Sprintf(":union,upstreams='%s :memory:bucket':", dirs[0])
f, err := fs.NewFs(ctx, fsString)
require.NoError(t, err)

View File

@@ -27,7 +27,8 @@ func TestStandard(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := union.MakeTestDirs(t, 3)
dirs, clean := union.MakeTestDirs(t, 3)
defer clean()
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
name := "TestUnion"
fstests.Run(t, &fstests.Opt{
@@ -48,7 +49,8 @@ func TestRO(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := union.MakeTestDirs(t, 3)
dirs, clean := union.MakeTestDirs(t, 3)
defer clean()
upstreams := dirs[0] + " " + dirs[1] + ":ro " + dirs[2] + ":ro"
name := "TestUnionRO"
fstests.Run(t, &fstests.Opt{
@@ -69,7 +71,8 @@ func TestNC(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := union.MakeTestDirs(t, 3)
dirs, clean := union.MakeTestDirs(t, 3)
defer clean()
upstreams := dirs[0] + " " + dirs[1] + ":nc " + dirs[2] + ":nc"
name := "TestUnionNC"
fstests.Run(t, &fstests.Opt{
@@ -90,7 +93,8 @@ func TestPolicy1(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := union.MakeTestDirs(t, 3)
dirs, clean := union.MakeTestDirs(t, 3)
defer clean()
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
name := "TestUnionPolicy1"
fstests.Run(t, &fstests.Opt{
@@ -111,7 +115,8 @@ func TestPolicy2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := union.MakeTestDirs(t, 3)
dirs, clean := union.MakeTestDirs(t, 3)
defer clean()
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
name := "TestUnionPolicy2"
fstests.Run(t, &fstests.Opt{
@@ -132,7 +137,8 @@ func TestPolicy3(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := union.MakeTestDirs(t, 3)
dirs, clean := union.MakeTestDirs(t, 3)
defer clean()
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
name := "TestUnionPolicy3"
fstests.Run(t, &fstests.Opt{

View File

@@ -6,6 +6,8 @@ import (
"fmt"
"io"
"math"
"path"
"path/filepath"
"strings"
"sync"
"sync/atomic"
@@ -89,7 +91,7 @@ func New(ctx context.Context, remote, root string, cacheTime time.Duration) (*Fs
return nil, err
}
f.RootFs = rFs
rootString := fspath.JoinRootPath(remote, root)
rootString := path.Join(remote, filepath.ToSlash(root))
myFs, err := cache.Get(ctx, rootString)
if err != nil && err != fs.ErrorIsFile {
return nil, err

View File

@@ -124,22 +124,6 @@ You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'
`,
Default: fs.CommaSepList{},
Advanced: true,
}, {
Name: "base_path",
Help: `Base path of expected replies
Normally WebDAV servers return the files they are listing under the
url path as specified above. However some WebDAV servers return files
with URLs that are not under the endpoint URL. This causes rclone to
get confused and return errors like
Item with unknown path received: "/remote.php/webdav/folder1/", "/elsewhere/remote.php/webdav/folder1/"
errors. If that is the case, then set "base_path" to the path
specified in the error message up to the first item, in the above
example "/elsewhere/remote.php/webdav/".
`,
Advanced: true,
}},
})
}
@@ -154,7 +138,6 @@ type Options struct {
BearerTokenCommand string `config:"bearer_token_command"`
Enc encoder.MultiEncoder `config:"encoding"`
Headers fs.CommaSepList `config:"headers"`
BasePath string `config:"base_path"`
}
// Fs represents a remote webdav
@@ -471,9 +454,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, err
}
if !f.findHeader(opt.Headers, "Referer") {
f.srv.SetHeader("Referer", u.String())
}
f.srv.SetHeader("Referer", u.String())
if root != "" && !rootIsDir {
// Check to see if the root actually an existing file
@@ -536,17 +517,6 @@ func (f *Fs) addHeaders(headers fs.CommaSepList) {
}
}
// Returns true if the header was configured
func (f *Fs) findHeader(headers fs.CommaSepList, find string) bool {
for i := 0; i < len(headers); i += 2 {
key := f.opt.Headers[i]
if strings.EqualFold(key, find) {
return true
}
}
return false
}
// fetch the bearer token and set it if successful
func (f *Fs) fetchAndSetBearerToken() error {
if f.opt.BearerTokenCommand == "" {
@@ -710,10 +680,6 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
if err != nil {
return false, fmt.Errorf("couldn't join URL: %w", err)
}
basePath := baseURL.Path
if f.opt.BasePath != "" {
basePath = f.opt.BasePath
}
for i := range result.Responses {
item := &result.Responses[i]
isDir := itemIsDir(item)
@@ -728,11 +694,11 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
if isDir {
u.Path = addSlash(u.Path)
}
if !strings.HasPrefix(u.Path, basePath) {
fs.Debugf(nil, "Item with unknown path received: %q, %q", u.Path, basePath)
if !strings.HasPrefix(u.Path, baseURL.Path) {
fs.Debugf(nil, "Item with unknown path received: %q, %q", u.Path, baseURL.Path)
continue
}
subPath := u.Path[len(basePath):]
subPath := u.Path[len(baseURL.Path):]
if f.opt.Enc != encoder.EncodeZero {
subPath = f.opt.Enc.ToStandardPath(subPath)
}
@@ -1182,7 +1148,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
return nil, fmt.Errorf("about call failed: %w", err)
}
usage := &fs.Usage{}
if i, err := strconv.ParseInt(q.Used, 10, 64); err == nil && i >= 0 {

View File

@@ -66,11 +66,6 @@ func init() {
})
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "hard_delete",
Help: "Delete files permanently rather than putting them into the trash.",
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
@@ -84,9 +79,8 @@ func init() {
// Options defines the configuration for this backend
type Options struct {
Token string `config:"token"`
HardDelete bool `config:"hard_delete"`
Enc encoder.MultiEncoder `config:"encoding"`
Token string `config:"token"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote yandex
@@ -636,7 +630,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
}
}
//delete directory
return f.delete(ctx, root, f.opt.HardDelete)
return f.delete(ctx, root, false)
}
// Rmdir deletes the container
@@ -1147,7 +1141,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return o.fs.delete(ctx, o.filePath(), o.fs.opt.HardDelete)
return o.fs.delete(ctx, o.filePath(), false)
}
// MimeType of an Object if known, "" otherwise

View File

@@ -1,5 +1,5 @@
#!/bin/bash
set -e
docker build -t rclone/xgo-cgofuse https://github.com/winfsp/cgofuse.git
docker build -t rclone/xgo-cgofuse https://github.com/billziss-gh/cgofuse.git
docker images
docker push rclone/xgo-cgofuse

View File

@@ -52,7 +52,6 @@ var (
var osarches = []string{
"windows/386",
"windows/amd64",
"windows/arm64",
"darwin/amd64",
"darwin/arm64",
"linux/386",
@@ -86,13 +85,6 @@ var archFlags = map[string][]string{
"arm-v7": {"GOARM=7"},
}
// Map Go architectures to NFPM architectures
// Any missing are passed straight through
var goarchToNfpm = map[string]string{
"arm": "arm6",
"arm-v7": "arm7",
}
// runEnv - run a shell command with env
func runEnv(args, env []string) error {
if *debug {
@@ -173,17 +165,13 @@ func buildZip(dir string) string {
func buildDebAndRpm(dir, version, goarch string) []string {
// Make internal version number acceptable to .deb and .rpm
pkgVersion := version[1:]
pkgVersion = strings.ReplaceAll(pkgVersion, "β", "-beta")
pkgVersion = strings.ReplaceAll(pkgVersion, "-", ".")
nfpmArch, ok := goarchToNfpm[goarch]
if !ok {
nfpmArch = goarch
}
pkgVersion = strings.Replace(pkgVersion, "β", "-beta", -1)
pkgVersion = strings.Replace(pkgVersion, "-", ".", -1)
// Make nfpm.yaml from the template
substitute("../bin/nfpm.yaml", path.Join(dir, "nfpm.yaml"), map[string]string{
"Version": pkgVersion,
"Arch": nfpmArch,
"Arch": goarch,
})
// build them
@@ -265,12 +253,9 @@ func buildWindowsResourceSyso(goarch string, versionTag string) string {
"-o",
sysoPath,
}
if strings.Contains(goarch, "64") {
if goarch == "amd64" {
args = append(args, "-64") // Make the syso a 64-bit coff file
}
if strings.Contains(goarch, "arm") {
args = append(args, "-arm") // Make the syso an arm binary
}
args = append(args, jsonPath)
err = runEnv(args, nil)
if err != nil {
@@ -392,7 +377,7 @@ func compileArch(version, goos, goarch, dir string) bool {
artifacts := []string{buildZip(dir)}
// build a .deb and .rpm if appropriate
if goos == "linux" {
artifacts = append(artifacts, buildDebAndRpm(dir, version, goarch)...)
artifacts = append(artifacts, buildDebAndRpm(dir, version, stripVersion(goarch))...)
}
if *copyAs != "" {
for _, artifact := range artifacts {

View File

@@ -24,7 +24,6 @@ docs = [
"overview.md",
"flags.md",
"docker.md",
"bisync.md",
# Keep these alphabetical by full name
"fichier.md",
@@ -48,13 +47,11 @@ docs = [
"hdfs.md",
"http.md",
"hubic.md",
"internetarchive.md",
"jottacloud.md",
"koofr.md",
"mailru.md",
"mega.md",
"memory.md",
"netstorage.md",
"azureblob.md",
"onedrive.md",
"opendrive.md",
@@ -66,9 +63,8 @@ docs = [
"putio.md",
"seafile.md",
"sftp.md",
"storj.md",
"sugarsync.md",
"tardigrade.md", # stub only to redirect to storj.md
"tardigrade.md",
"uptobox.md",
"union.md",
"webdav.md",

View File

@@ -102,7 +102,7 @@ see complete list in [documentation](https://rclone.org/overview/#optional-featu
}
u, err := doAbout(context.Background())
if err != nil {
return fmt.Errorf("about call failed: %w", err)
return fmt.Errorf("About call failed: %w", err)
}
if u == nil {
return errors.New("nil usage returned")

View File

@@ -41,7 +41,7 @@ You can discover what commands a backend implements by using
rclone backend help <backendname>
You can also discover information about the backend using (see
[operations/fsinfo](/rc/#operations-fsinfo) in the remote control docs
[operations/fsinfo](/rc/#operations/fsinfo) in the remote control docs
for more info).
rclone backend features remote:
@@ -55,7 +55,7 @@ Pass arguments to the backend by placing them on the end of the line
rclone backend cleanup remote:path file1 file2 file3
Note to run these commands on a running backend then see
[backend/command](/rc/#backend-command) in the rc docs.
[backend/command](/rc/#backend/command) in the rc docs.
`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(2, 1e6, command, args)
@@ -149,7 +149,7 @@ See [the "rclone backend" command](/commands/rclone_backend/) for more
info on how to pass options and arguments.
These can be run on a running backend using the rc command
[backend/command](/rc/#backend-command).
[backend/command](/rc/#backend/command).
`, name)
for _, cmd := range cmds {

View File

@@ -489,7 +489,7 @@ func resolveExitCode(err error) {
os.Exit(exitcode.TransferExceeded)
case fserrors.ShouldRetry(err):
os.Exit(exitcode.RetryError)
case fserrors.IsNoRetryError(err), fserrors.IsNoLowLevelRetryError(err):
case fserrors.IsNoRetryError(err):
os.Exit(exitcode.NoRetryError)
case fserrors.IsFatalError(err):
os.Exit(exitcode.FatalError)

View File

@@ -1,6 +1,7 @@
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows)
//go:build cmount && cgo && (linux || darwin || freebsd || windows)
// +build cmount
// +build linux,cgo darwin,cgo freebsd,cgo windows
// +build cgo
// +build linux darwin freebsd windows
package cmount
@@ -12,12 +13,12 @@ import (
"sync/atomic"
"time"
"github.com/billziss-gh/cgofuse/fuse"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/vfs"
"github.com/winfsp/cgofuse/fuse"
)
const fhUnset = ^uint64(0)

View File

@@ -2,9 +2,10 @@
//
// This uses the cgo based cgofuse library
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows)
//go:build cmount && cgo && (linux || darwin || freebsd || windows)
// +build cmount
// +build linux,cgo darwin,cgo freebsd,cgo windows
// +build cgo
// +build linux darwin freebsd windows
package cmount
@@ -17,12 +18,12 @@ import (
"sync/atomic"
"time"
"github.com/billziss-gh/cgofuse/fuse"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/buildinfo"
"github.com/rclone/rclone/vfs"
"github.com/winfsp/cgofuse/fuse"
)
func init() {
@@ -167,7 +168,7 @@ func mount(VFS *vfs.VFS, mountPath string, opt *mountlib.Options) (<-chan error,
host.SetCapCaseInsensitive(f.Features().CaseInsensitive)
// Create options
options := mountOptions(VFS, opt.DeviceName, mountpoint, opt)
options := mountOptions(VFS, f.Name()+":"+f.Root(), mountpoint, opt)
fs.Debugf(f, "Mounting with options: %q", options)
// Serve the mount point in the background returning error to errChan

View File

@@ -1,6 +1,7 @@
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows) && (!race || !windows)
//go:build cmount && cgo && (linux || darwin || freebsd || windows) && (!race || !windows)
// +build cmount
// +build linux,cgo darwin,cgo freebsd,cgo windows
// +build cgo
// +build linux darwin freebsd windows
// +build !race !windows
// FIXME this doesn't work with the race detector under Windows either
@@ -9,17 +10,11 @@
package cmount
import (
"runtime"
"testing"
"github.com/rclone/rclone/fstest/testy"
"github.com/rclone/rclone/vfs/vfstest"
)
func TestMount(t *testing.T) {
// Disable tests under macOS and the CI since they are locking up
if runtime.GOOS == "darwin" {
testy.SkipUnreliable(t)
}
vfstest.RunTests(t, false, mount)
}

View File

@@ -1,10 +1,7 @@
// Build for cmount for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build !((linux && cgo && cmount) || (darwin && cgo && cmount) || (freebsd && cgo && cmount) || (windows && cmount))
// +build !linux !cgo !cmount
// +build !darwin !cgo !cmount
// +build !freebsd !cgo !cmount
// +build !windows !cmount
//go:build (!linux && !darwin && !freebsd && !windows) || !brew || !cgo || !cmount
// +build !linux,!darwin,!freebsd,!windows !brew !cgo !cmount
package cmount

View File

@@ -1,5 +1,5 @@
//go:build cmount && windows
// +build cmount,windows
//go:build cmount && cgo && windows
// +build cmount,cgo,windows
package cmount

View File

@@ -79,7 +79,7 @@ rclone.org website.`,
var description = map[string]string{}
var addDescription func(root *cobra.Command)
addDescription = func(root *cobra.Command) {
name := strings.ReplaceAll(root.CommandPath(), " ", "_") + ".md"
name := strings.Replace(root.CommandPath(), " ", "_", -1) + ".md"
description[name] = root.Short
for _, c := range root.Commands() {
addDescription(c)
@@ -93,11 +93,11 @@ rclone.org website.`,
base := strings.TrimSuffix(name, path.Ext(name))
data := frontmatter{
Date: now,
Title: strings.ReplaceAll(base, "_", " "),
Title: strings.Replace(base, "_", " ", -1),
Description: description[name],
Slug: base,
URL: "/commands/" + strings.ToLower(base) + "/",
Source: strings.ReplaceAll(strings.ReplaceAll(base, "rclone", "cmd"), "_", "/") + "/",
Source: strings.Replace(strings.Replace(base, "rclone", "cmd", -1), "_", "/", -1) + "/",
}
var buf bytes.Buffer
err := frontmatterTemplate.Execute(&buf, data)

View File

@@ -165,7 +165,7 @@ func runRoot(cmd *cobra.Command, args []string) {
// setupRootCommand sets default usage, help, and error handling for
// the root command.
//
// Helpful example: https://github.com/moby/moby/blob/master/cli/cobra.go
// Helpful example: http://rtfcode.com/xref/moby-17.03.2-ce/cli/cobra.go
func setupRootCommand(rootCmd *cobra.Command) {
ci := fs.GetConfig(context.Background())
// Add global flags
@@ -329,29 +329,12 @@ func showBackend(name string) {
if opt.IsPassword {
fmt.Printf("**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).\n\n")
}
fmt.Printf("Properties:\n\n")
fmt.Printf("- Config: %s\n", opt.Name)
fmt.Printf("- Env Var: %s\n", opt.EnvVarName(backend.Prefix))
if opt.Provider != "" {
fmt.Printf("- Provider: %s\n", opt.Provider)
}
fmt.Printf("- Type: %s\n", opt.Type())
defaultValue := opt.GetValue()
// Default value and Required are related: Required means option must
// have a value, but if there is a default then a value does not have
// to be explicitely set and then Required makes no difference.
if defaultValue != "" {
fmt.Printf("- Default: %s\n", quoteString(defaultValue))
} else {
fmt.Printf("- Required: %v\n", opt.Required)
}
// List examples / possible choices
fmt.Printf("- Default: %s\n", quoteString(opt.GetValue()))
if len(opt.Examples) > 0 {
if opt.Exclusive {
fmt.Printf("- Choices:\n")
} else {
fmt.Printf("- Examples:\n")
}
fmt.Printf("- Examples:\n")
for _, ex := range opt.Examples {
fmt.Printf(" - %s\n", quoteString(ex.Value))
for _, line := range strings.Split(ex.Help, "\n") {

View File

@@ -86,7 +86,7 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
f := VFS.Fs()
fs.Debugf(f, "Mounting on %q", mountpoint)
c, err := fuse.Mount(mountpoint, mountOptions(VFS, opt.DeviceName, opt)...)
c, err := fuse.Mount(mountpoint, mountOptions(VFS, f.Name()+":"+f.Root(), opt)...)
if err != nil {
return nil, nil, err
}

View File

@@ -25,10 +25,11 @@ func init() {
// mountOptions configures the options from the command line flags
//
// man mount.fuse for more info and note the -o flag for other options
func mountOptions(fsys *FS, f fs.Fs, opt *mountlib.Options) (mountOpts *fuse.MountOptions) {
func mountOptions(fsys *FS, f fs.Fs) (mountOpts *fuse.MountOptions) {
device := f.Name() + ":" + f.Root()
mountOpts = &fuse.MountOptions{
AllowOther: fsys.opt.AllowOther,
FsName: opt.DeviceName,
FsName: device,
Name: "rclone",
DisableXAttrs: true,
Debug: fsys.opt.DebugFUSE,
@@ -119,7 +120,7 @@ func mountOptions(fsys *FS, f fs.Fs, opt *mountlib.Options) (mountOpts *fuse.Mou
if runtime.GOOS == "darwin" {
opts = append(opts,
// VolumeName sets the volume name shown in Finder.
fmt.Sprintf("volname=%s", opt.VolumeName),
fmt.Sprintf("volname=%s", device),
// NoAppleXattr makes OSXFUSE disallow extended attributes with the
// prefix "com.apple.". This disables persistent Finder state and
@@ -166,7 +167,7 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
//mOpts.Debug = mountlib.DebugFUSE
//conn := fusefs.NewFileSystemConnector(nodeFs.Root(), mOpts)
mountOpts := mountOptions(fsys, f, opt)
mountOpts := mountOptions(fsys, f)
// FIXME fill out
opts := fusefs.Options{

View File

@@ -65,10 +65,10 @@ at all, then 1 PiB is set as both the total and the free size.
To run rclone @ on Windows, you will need to
download and install [WinFsp](http://www.secfs.net/winfsp/).
[WinFsp](https://github.com/winfsp/winfsp) is an open-source
[WinFsp](https://github.com/billziss-gh/winfsp) is an open-source
Windows File System Proxy which makes it easy to write user space file
systems for Windows. It provides a FUSE emulation layer which rclone
uses combination with [cgofuse](https://github.com/winfsp/cgofuse).
uses combination with [cgofuse](https://github.com/billziss-gh/cgofuse).
Both of these packages are by Bill Zissimopoulos who was very helpful
during the implementation of rclone @ for Windows.
@@ -218,7 +218,7 @@ from Microsoft's Sysinternals suite, which has option |-s| to start
processes as the SYSTEM account. Another alternative is to run the mount
command from a Windows Scheduled Task, or a Windows Service, configured
to run as the SYSTEM account. A third alternative is to use the
[WinFsp.Launcher infrastructure](https://github.com/winfsp/winfsp/wiki/WinFsp-Service-Architecture)).
[WinFsp.Launcher infrastructure](https://github.com/billziss-gh/winfsp/wiki/WinFsp-Service-Architecture)).
Note that when running rclone as another user, it will not use
the configuration file from your profile unless you tell it to
with the [|--config|](https://rclone.org/docs/#config-config-file) option.

View File

@@ -40,7 +40,6 @@ type Options struct {
ExtraOptions []string
ExtraFlags []string
AttrTimeout time.Duration // how long the kernel caches attribute for
DeviceName string
VolumeName string
NoAppleDouble bool
NoAppleXattr bool
@@ -78,17 +77,6 @@ type MountPoint struct {
ErrChan <-chan error
}
// NewMountPoint makes a new mounting structure
func NewMountPoint(mount MountFn, mountPoint string, f fs.Fs, mountOpt *Options, vfsOpt *vfscommon.Options) *MountPoint {
return &MountPoint{
MountFn: mount,
MountPoint: mountPoint,
Fs: f,
MountOpt: *mountOpt,
VFSOpt: *vfsOpt,
}
}
// Global constants
const (
MaxLeafSize = 1024 // don't pass file names longer than this
@@ -137,7 +125,6 @@ func AddFlags(flagSet *pflag.FlagSet) {
flags.BoolVarP(flagSet, &Opt.AsyncRead, "async-read", "", Opt.AsyncRead, "Use asynchronous reads (not supported on Windows)")
flags.FVarP(flagSet, &Opt.MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads (not supported on Windows)")
flags.BoolVarP(flagSet, &Opt.WritebackCache, "write-back-cache", "", Opt.WritebackCache, "Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows)")
flags.StringVarP(flagSet, &Opt.DeviceName, "devname", "", Opt.DeviceName, "Set the device name - default is remote:path")
// Windows and OSX
flags.StringVarP(flagSet, &Opt.VolumeName, "volname", "", Opt.VolumeName, "Set the volume name (supported on Windows and OSX only)")
// OSX only
@@ -178,7 +165,14 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
defer cmd.StartStats()()
}
mnt := NewMountPoint(mount, args[1], cmd.NewFsDir(args), &Opt, &vfsflags.Opt)
mnt := &MountPoint{
MountFn: mount,
MountPoint: args[1],
Fs: cmd.NewFsDir(args),
MountOpt: Opt,
VFSOpt: vfsflags.Opt,
}
daemon, err := mnt.Mount()
// Wait for foreground mount, if any...
@@ -241,7 +235,6 @@ func (m *MountPoint) Mount() (daemon *os.Process, err error) {
return nil, err
}
m.SetVolumeName(m.MountOpt.VolumeName)
m.SetDeviceName(m.MountOpt.DeviceName)
// Start background task if --daemon is specified
if m.MountOpt.Daemon {
@@ -257,7 +250,6 @@ func (m *MountPoint) Mount() (daemon *os.Process, err error) {
if err != nil {
return nil, fmt.Errorf("failed to mount FUSE fs: %w", err)
}
m.MountedOn = time.Now()
return nil, nil
}

View File

@@ -10,6 +10,7 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
)
@@ -116,15 +117,23 @@ func mountRc(ctx context.Context, in rc.Params) (out rc.Params, err error) {
return nil, err
}
mnt := NewMountPoint(mountFn, mountPoint, fdst, &mountOpt, &vfsOpt)
_, err = mnt.Mount()
VFS := vfs.New(fdst, &vfsOpt)
_, unmountFn, err := mountFn(VFS, mountPoint, &mountOpt)
if err != nil {
log.Printf("mount FAILED: %v", err)
return nil, err
}
// Add mount to list if mount point was successfully created
liveMounts[mountPoint] = mnt
liveMounts[mountPoint] = &MountPoint{
MountPoint: mountPoint,
MountedOn: time.Now(),
MountFn: mountFn,
UnmountFn: unmountFn,
MountOpt: mountOpt,
VFSOpt: vfsOpt,
Fs: fdst,
}
fs.Debugf(nil, "Mount for %s created at %s using %s", fdst.String(), mountPoint, mountType)
return nil, nil

View File

@@ -16,16 +16,11 @@ import (
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fstest/testy"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestRc(t *testing.T) {
// Disable tests under macOS and the CI since they are locking up
if runtime.GOOS == "darwin" {
testy.SkipUnreliable(t)
}
ctx := context.Background()
configfile.Install()
mount := rc.Calls.Get("mount/mount")
@@ -35,14 +30,19 @@ func TestRc(t *testing.T) {
getMountTypes := rc.Calls.Get("mount/types")
assert.NotNil(t, getMountTypes)
localDir := t.TempDir()
err := ioutil.WriteFile(filepath.Join(localDir, "file.txt"), []byte("hello"), 0666)
localDir, err := ioutil.TempDir("", "rclone-mountlib-localDir")
require.NoError(t, err)
defer func() { _ = os.RemoveAll(localDir) }()
err = ioutil.WriteFile(filepath.Join(localDir, "file.txt"), []byte("hello"), 0666)
require.NoError(t, err)
mountPoint := t.TempDir()
mountPoint, err := ioutil.TempDir("", "rclone-mountlib-mountPoint")
require.NoError(t, err)
if runtime.GOOS == "windows" {
// Windows requires the mount point not to exist
require.NoError(t, os.RemoveAll(mountPoint))
} else {
defer func() { _ = os.RemoveAll(mountPoint) }()
}
out, err := getMountTypes.Fn(ctx, nil)

View File

@@ -87,7 +87,7 @@ func (m *MountPoint) CheckAllowings() error {
// SetVolumeName with sensible default
func (m *MountPoint) SetVolumeName(vol string) {
if vol == "" {
vol = fs.ConfigString(m.Fs)
vol = m.Fs.Name() + ":" + m.Fs.Root()
}
m.MountOpt.SetVolumeName(vol)
}
@@ -102,11 +102,3 @@ func (o *Options) SetVolumeName(vol string) {
}
o.VolumeName = vol
}
// SetDeviceName with sensible default
func (m *MountPoint) SetDeviceName(dev string) {
if dev == "" {
dev = fs.ConfigString(m.Fs)
}
m.MountOpt.DeviceName = dev
}

View File

@@ -42,31 +42,15 @@ builds an in memory representation. rclone ncdu can be used during
this scanning phase and you will see it building up the directory
structure as it goes along.
You can interact with the user interface using key presses,
press '?' to toggle the help on and off. The supported keys are:
Here are the keys - press '?' to toggle the help on and off
` + strings.Join(helpText()[1:], "\n ") + `
Listed files/directories may be prefixed by a one-character flag,
some of them combined with a description in brackes at end of line.
These flags have the following meaning:
e means this is an empty directory, i.e. contains no files (but
may contain empty subdirectories)
~ means this is a directory where some of the files (possibly in
subdirectories) have unknown size, and therefore the directory
size may be underestimated (and average size inaccurate, as it
is average of the files with known sizes).
. means an error occurred while reading a subdirectory, and
therefore the directory size may be underestimated (and average
size inaccurate)
! means an error occurred while reading this directory
This an homage to the [ncdu tool](https://dev.yorhel.nl/ncdu) but for
rclone remotes. It is missing lots of features at the moment
but is useful as it stands.
Note that it might take some time to delete big files/directories. The
Note that it might take some time to delete big files/folders. The
UI won't respond in the meantime since the deletion is done synchronously.
`,
Run: func(command *cobra.Command, args []string) {
@@ -299,9 +283,9 @@ func (u *UI) biggestEntry() (biggest int64) {
return
}
for i := range u.entries {
attrs, _ := u.d.AttrI(u.sortPerm[i])
if attrs.Size > biggest {
biggest = attrs.Size
size, _, _, _, _, _ := u.d.AttrI(u.sortPerm[i])
if size > biggest {
biggest = size
}
}
return
@@ -313,8 +297,8 @@ func (u *UI) hasEmptyDir() bool {
return false
}
for i := range u.entries {
attrs, _ := u.d.AttrI(u.sortPerm[i])
if attrs.IsDir && attrs.Count == 0 {
_, count, isDir, _, _, _ := u.d.AttrI(u.sortPerm[i])
if isDir && count == 0 {
return true
}
}
@@ -359,9 +343,9 @@ func (u *UI) Draw() error {
if y >= h-1 {
break
}
attrs, err := u.d.AttrI(u.sortPerm[n])
size, count, isDir, readable, entriesHaveErrors, err := u.d.AttrI(u.sortPerm[n])
fg := termbox.ColorWhite
if attrs.EntriesHaveErrors {
if entriesHaveErrors {
fg = termbox.ColorYellow
}
if err != nil {
@@ -372,19 +356,15 @@ func (u *UI) Draw() error {
fg, bg = bg, fg
}
mark := ' '
if attrs.IsDir {
if isDir {
mark = '/'
}
fileFlag := ' '
message := ""
if !attrs.Readable {
if !readable {
message = " [not read yet]"
}
if attrs.CountUnknownSize > 0 {
message = fmt.Sprintf(" [%d of %d files have unknown size, size may be underestimated]", attrs.CountUnknownSize, attrs.Count)
fileFlag = '~'
}
if attrs.EntriesHaveErrors {
if entriesHaveErrors {
message = " [some subdirectories could not be read, size may be underestimated]"
fileFlag = '.'
}
@@ -394,29 +374,32 @@ func (u *UI) Draw() error {
}
extras := ""
if u.showCounts {
ss := operations.CountStringField(attrs.Count, u.humanReadable, 9) + " "
if attrs.Count > 0 {
ss := operations.CountStringField(count, u.humanReadable, 9) + " "
if count > 0 {
extras += ss
} else {
extras += strings.Repeat(" ", len(ss))
}
}
var averageSize float64
if count > 0 {
averageSize = float64(size) / float64(count)
}
if u.showDirAverageSize {
avg := attrs.AverageSize()
ss := operations.SizeStringField(int64(avg), u.humanReadable, 9) + " "
if avg > 0 {
ss := operations.SizeStringField(int64(averageSize), u.humanReadable, 9) + " "
if averageSize > 0 {
extras += ss
} else {
extras += strings.Repeat(" ", len(ss))
}
}
if showEmptyDir {
if attrs.IsDir && attrs.Count == 0 && fileFlag == ' ' {
if isDir && count == 0 && fileFlag == ' ' {
fileFlag = 'e'
}
}
if u.showGraph {
bars := (attrs.Size + perBar/2 - 1) / perBar
bars := (size + perBar/2 - 1) / perBar
// clip if necessary - only happens during startup
if bars > 10 {
bars = 10
@@ -425,7 +408,7 @@ func (u *UI) Draw() error {
}
extras += "[" + graph[graphBars-bars:2*graphBars-bars] + "] "
}
Linef(0, y, w, fg, bg, ' ', "%c %s %s%c%s%s", fileFlag, operations.SizeStringField(attrs.Size, u.humanReadable, 12), extras, mark, path.Base(entry.Remote()), message)
Linef(0, y, w, fg, bg, ' ', "%c %s %s%c%s%s", fileFlag, operations.SizeStringField(size, u.humanReadable, 12), extras, mark, path.Base(entry.Remote()), message)
y++
}
}
@@ -576,14 +559,14 @@ type ncduSort struct {
// Less is part of sort.Interface.
func (ds *ncduSort) Less(i, j int) bool {
var iAvgSize, jAvgSize float64
iattrs, _ := ds.d.AttrI(ds.sortPerm[i])
jattrs, _ := ds.d.AttrI(ds.sortPerm[j])
isize, icount, _, _, _, _ := ds.d.AttrI(ds.sortPerm[i])
jsize, jcount, _, _, _, _ := ds.d.AttrI(ds.sortPerm[j])
iname, jname := ds.entries[ds.sortPerm[i]].Remote(), ds.entries[ds.sortPerm[j]].Remote()
if iattrs.Count > 0 {
iAvgSize = iattrs.AverageSize()
if icount > 0 {
iAvgSize = float64(isize / icount)
}
if jattrs.Count > 0 {
jAvgSize = jattrs.AverageSize()
if jcount > 0 {
jAvgSize = float64(jsize / jcount)
}
switch {
@@ -592,33 +575,33 @@ func (ds *ncduSort) Less(i, j int) bool {
case ds.u.sortByName > 0:
break
case ds.u.sortBySize < 0:
if iattrs.Size != jattrs.Size {
return iattrs.Size < jattrs.Size
if isize != jsize {
return isize < jsize
}
case ds.u.sortBySize > 0:
if iattrs.Size != jattrs.Size {
return iattrs.Size > jattrs.Size
if isize != jsize {
return isize > jsize
}
case ds.u.sortByCount < 0:
if iattrs.Count != jattrs.Count {
return iattrs.Count < jattrs.Count
if icount != jcount {
return icount < jcount
}
case ds.u.sortByCount > 0:
if iattrs.Count != jattrs.Count {
return iattrs.Count > jattrs.Count
if icount != jcount {
return icount > jcount
}
case ds.u.sortByAverageSize < 0:
if iAvgSize != jAvgSize {
return iAvgSize < jAvgSize
}
// if avgSize is equal, sort by size
return iattrs.Size < jattrs.Size
return isize < jsize
case ds.u.sortByAverageSize > 0:
if iAvgSize != jAvgSize {
return iAvgSize > jAvgSize
}
// if avgSize is equal, sort by size
return iattrs.Size > jattrs.Size
return isize > jsize
}
// if everything equal, sort by name
return iname < jname

View File

@@ -16,42 +16,14 @@ type Dir struct {
parent *Dir
path string
mu sync.Mutex
size int64
count int64
countUnknownSize int64
size int64
entries fs.DirEntries
dirs map[string]*Dir
readError error
entriesHaveErrors bool
}
// Attrs contains accumulated properties for a directory entry
//
// Files with unknown size are counted separately but also included
// in the total count. They are not included in the size, i.e. treated
// as empty files, which means the size may be underestimated.
type Attrs struct {
Size int64
Count int64
CountUnknownSize int64
IsDir bool
Readable bool
EntriesHaveErrors bool
}
// AverageSize calculates average size of files in directory
//
// If there are files with unknown size, this returns the average over
// files with known sizes, which means it may be under- or
// overestimated.
func (a *Attrs) AverageSize() float64 {
countKnownSize := a.Count - a.CountUnknownSize
if countKnownSize > 0 {
return float64(a.Size) / float64(countKnownSize)
}
return 0
}
// Parent returns the directory above this one
func (d *Dir) Parent() *Dir {
// no locking needed since these are write once in newDir()
@@ -77,13 +49,7 @@ func newDir(parent *Dir, dirPath string, entries fs.DirEntries, err error) *Dir
for _, entry := range entries {
if o, ok := entry.(fs.Object); ok {
d.count++
size := o.Size()
if size < 0 {
// Some backends may return -1 because size of object is not known
d.countUnknownSize++
} else {
d.size += size
}
d.size += o.Size()
}
}
// Set my directory entry in parent
@@ -96,9 +62,8 @@ func newDir(parent *Dir, dirPath string, entries fs.DirEntries, err error) *Dir
// Accumulate counts in parents
for ; parent != nil; parent = parent.parent {
parent.mu.Lock()
parent.size += d.size
parent.count += d.count
parent.countUnknownSize += d.countUnknownSize
parent.size += d.size
if d.readError != nil {
parent.entriesHaveErrors = true
}
@@ -126,24 +91,17 @@ func (d *Dir) Remove(i int) {
// Call with d.mu held
func (d *Dir) remove(i int) {
size := d.entries[i].Size()
countUnknownSize := int64(0)
if size < 0 {
size = 0
countUnknownSize = 1
}
count := int64(1)
subDir, ok := d.getDir(i)
if ok {
size = subDir.size
count = subDir.count
countUnknownSize = subDir.countUnknownSize
delete(d.dirs, path.Base(subDir.path))
}
d.size -= size
d.count -= count
d.countUnknownSize -= countUnknownSize
d.entries = append(d.entries[:i], d.entries[i+1:]...)
dir := d
@@ -153,7 +111,6 @@ func (d *Dir) remove(i int) {
parent.dirs[path.Base(dir.path)] = dir
parent.size -= size
parent.count -= count
parent.countUnknownSize -= countUnknownSize
dir = parent
parent.mu.Unlock()
}
@@ -194,19 +151,19 @@ func (d *Dir) Attr() (size int64, count int64) {
}
// AttrI returns the size, count and flags for the i-th directory entry
func (d *Dir) AttrI(i int) (attrs Attrs, err error) {
func (d *Dir) AttrI(i int) (size int64, count int64, isDir bool, readable bool, entriesHaveErrors bool, err error) {
d.mu.Lock()
defer d.mu.Unlock()
subDir, isDir := d.getDir(i)
if !isDir {
return Attrs{d.entries[i].Size(), 0, 0, false, true, d.entriesHaveErrors}, d.readError
return d.entries[i].Size(), 0, false, true, d.entriesHaveErrors, d.readError
}
if subDir == nil {
return Attrs{0, 0, 0, true, false, false}, nil
return 0, 0, true, false, false, nil
}
size, count := subDir.Attr()
return Attrs{size, count, subDir.countUnknownSize, true, true, subDir.entriesHaveErrors}, subDir.readError
size, count = subDir.Attr()
return size, count, true, true, subDir.entriesHaveErrors, subDir.readError
}
// Scan the Fs passed in, returning a root directory channel and an

View File

@@ -290,7 +290,7 @@ func list(ctx context.Context) error {
if !ok {
return errors.New("bad JSON")
}
fmt.Printf("### %s: %s {#%s}\n\n", info["Path"], info["Title"], strings.ReplaceAll(info["Path"].(string), "/", "-"))
fmt.Printf("### %s: %s {#%s}\n\n", info["Path"], info["Title"], strings.Replace(info["Path"].(string), "/", "-", -1))
fmt.Printf("%s\n\n", info["Help"])
if authRequired := info["AuthRequired"]; authRequired != nil {
if authRequired.(bool) {

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.4 KiB

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 724 B

After

Width:  |  Height:  |  Size: 5.6 KiB

View File

@@ -23,7 +23,6 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/testy"
"github.com/rclone/rclone/lib/file"
"github.com/stretchr/testify/assert"
@@ -304,10 +303,6 @@ func (a *APIClient) request(path string, in, out interface{}, wantErr bool) {
}
func testMountAPI(t *testing.T, sockAddr string) {
// Disable tests under macOS and linux in the CI since they are locking up
if runtime.GOOS == "darwin" || runtime.GOOS == "linux" {
testy.SkipUnreliable(t)
}
if _, mountFn := mountlib.ResolveMountMethod(""); mountFn == nil {
t.Skip("Test requires working mount command")
}

View File

@@ -274,6 +274,7 @@ func (vol *Volume) mount(id string) error {
if _, err := vol.mnt.Mount(); err != nil {
return err
}
vol.mnt.MountedOn = time.Now()
vol.mountReqs[id] = nil
vol.drv.monChan <- false // ask monitor to refresh channels
return nil

View File

@@ -16,10 +16,7 @@ import (
)
// Help describes the options for the serve package
var Help = `
#### Template
--template allows a user to specify a custom markup template for http
var Help = `--template allows a user to specify a custom markup template for http
and webdav serve functions. The server exports the following markup
to be used within the template to server pages:

View File

@@ -23,7 +23,7 @@ func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *httplib.Options)
flags.StringVarP(flagSet, &Opt.SslKey, prefix+"key", "", Opt.SslKey, "SSL PEM Private key")
flags.StringVarP(flagSet, &Opt.ClientCA, prefix+"client-ca", "", Opt.ClientCA, "Client certificate authority to verify clients with")
flags.StringVarP(flagSet, &Opt.HtPasswd, prefix+"htpasswd", "", Opt.HtPasswd, "htpasswd file - if not provided no authentication is done")
flags.StringVarP(flagSet, &Opt.Realm, prefix+"realm", "", Opt.Realm, "Realm for authentication")
flags.StringVarP(flagSet, &Opt.Realm, prefix+"realm", "", Opt.Realm, "realm for authentication")
flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication")
flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication")
flags.StringVarP(flagSet, &Opt.BaseURL, prefix+"baseurl", "", Opt.BaseURL, "Prefix for URLs - leave blank for root")

View File

@@ -16,7 +16,6 @@ TestFichier:
TestFTP:
TestGoogleCloudStorage:
TestHubic:
TestNetStorage:
TestOneDrive:
TestPcloud:
TestQingStor:

View File

@@ -7,7 +7,9 @@ import (
"crypto/rand"
"encoding/hex"
"io"
"io/ioutil"
"net/http"
"os"
"strings"
"testing"
@@ -111,7 +113,14 @@ func TestResticHandler(t *testing.T) {
}
// setup rclone with a local backend in a temporary directory
tempdir := t.TempDir()
tempdir, err := ioutil.TempDir("", "rclone-restic-test-")
require.NoError(t, err)
// make sure the tempdir is properly removed
defer func() {
err := os.RemoveAll(tempdir)
require.NoError(t, err)
}()
// globally set append-only mode
prev := appendOnly

View File

@@ -7,7 +7,9 @@ import (
"context"
"crypto/rand"
"io"
"io/ioutil"
"net/http"
"os"
"strings"
"testing"
@@ -33,7 +35,14 @@ func TestResticPrivateRepositories(t *testing.T) {
require.NoError(t, err)
// setup rclone with a local backend in a temporary directory
tempdir := t.TempDir()
tempdir, err := ioutil.TempDir("", "rclone-restic-test-")
require.NoError(t, err)
// make sure the tempdir is properly removed
defer func() {
err := os.RemoveAll(tempdir)
require.NoError(t, err)
}()
// globally set private-repos mode & test user
prev := privateRepos

View File

@@ -43,7 +43,7 @@ var shellUnEscapeRegex = regexp.MustCompile(`\\(.)`)
// Unescape a string that was escaped by rclone
func shellUnEscape(str string) string {
str = strings.ReplaceAll(str, "'\n'", "\n")
str = strings.Replace(str, "'\n'", "\n", -1)
str = shellUnEscapeRegex.ReplaceAllString(str, `$1`)
return str
}

Some files were not shown because too many files have changed in this diff Show More