mirror of
https://github.com/rclone/rclone.git
synced 2026-01-03 00:53:43 +00:00
Compare commits
131 Commits
fix-deadlo
...
v1.58.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f9354fff2f | ||
|
|
ff1f173fc2 | ||
|
|
f8073a7b63 | ||
|
|
807f1cedaa | ||
|
|
bf9c68c88a | ||
|
|
189cba0fbe | ||
|
|
69f726f16c | ||
|
|
65652f7a75 | ||
|
|
47f9ab2f56 | ||
|
|
5dd51e6149 | ||
|
|
6a6d254a9f | ||
|
|
fd453f2c7b | ||
|
|
5d06a82c5d | ||
|
|
847868b4ba | ||
|
|
38ca178cf3 | ||
|
|
9427d22f99 | ||
|
|
7b1428a498 | ||
|
|
ec72432cec | ||
|
|
2339172df2 | ||
|
|
268b808bf8 | ||
|
|
74898bac3b | ||
|
|
e0fbca02d4 | ||
|
|
21355b4208 | ||
|
|
251b84ff2c | ||
|
|
537b62917f | ||
|
|
71a784cfa2 | ||
|
|
8ee0fe9863 | ||
|
|
8f164e4df5 | ||
|
|
06ecc6511b | ||
|
|
3529bdec9b | ||
|
|
486b43f8c7 | ||
|
|
89f0e4df80 | ||
|
|
399fb5b7fb | ||
|
|
19f1ed949c | ||
|
|
d3a1001094 | ||
|
|
dc7e3ea1e3 | ||
|
|
f22b703a51 | ||
|
|
c40129d610 | ||
|
|
8dc93f1792 | ||
|
|
f4c40bf79d | ||
|
|
9cc50a614b | ||
|
|
bcb07a67f6 | ||
|
|
25ea04f1db | ||
|
|
06ffd4882d | ||
|
|
19a5e1d63b | ||
|
|
ec88b66dad | ||
|
|
aa2d7f00c2 | ||
|
|
3e125443aa | ||
|
|
3c271b8b1e | ||
|
|
6d92ba2c6c | ||
|
|
c26dc69e1b | ||
|
|
b0de0b4609 | ||
|
|
f54641511a | ||
|
|
8cf76f5e11 | ||
|
|
18c24014da | ||
|
|
0ae39bda8d | ||
|
|
051685baa1 | ||
|
|
07f53aebdc | ||
|
|
bd6d36b3f6 | ||
|
|
b168479429 | ||
|
|
b447b0cd78 | ||
|
|
4bd2386632 | ||
|
|
83b6b62c1b | ||
|
|
5826cc9d9e | ||
|
|
252432ae54 | ||
|
|
8821629333 | ||
|
|
a2092a8faf | ||
|
|
2b6f4241b4 | ||
|
|
e3dd16d490 | ||
|
|
9e1fd923f6 | ||
|
|
3684789858 | ||
|
|
1ac1dd428a | ||
|
|
65dbd29c22 | ||
|
|
164774d7e1 | ||
|
|
507020f408 | ||
|
|
a667e03fc9 | ||
|
|
1045344943 | ||
|
|
5e469db420 | ||
|
|
946e84d194 | ||
|
|
162aba60eb | ||
|
|
d8a874c32b | ||
|
|
9c451d9ac6 | ||
|
|
8f3f24672c | ||
|
|
0eb7b716d9 | ||
|
|
ee9684e60f | ||
|
|
e0cbe413e1 | ||
|
|
2523dd6220 | ||
|
|
c504d97017 | ||
|
|
b783f09fc6 | ||
|
|
a301478a13 | ||
|
|
63b450a2a5 | ||
|
|
843b77aaaa | ||
|
|
3641727edb | ||
|
|
38e2f835ed | ||
|
|
bd4bbed592 | ||
|
|
994b501188 | ||
|
|
dfa9381814 | ||
|
|
2a85feda4b | ||
|
|
ad46af9168 | ||
|
|
2fed02211c | ||
|
|
237daa8aaf | ||
|
|
8aeca6c033 | ||
|
|
fd82876086 | ||
|
|
be1a668e95 | ||
|
|
9d4eab32d8 | ||
|
|
b4ba7b69b8 | ||
|
|
deef659aef | ||
|
|
4b99e84242 | ||
|
|
06bdf7c64c | ||
|
|
e1225b5729 | ||
|
|
871cc2f62d | ||
|
|
bc23bf11db | ||
|
|
b55575e622 | ||
|
|
328f0e7135 | ||
|
|
a52814eed9 | ||
|
|
071a9e882d | ||
|
|
4e2ca3330c | ||
|
|
408d9f3e7a | ||
|
|
0681a5c86a | ||
|
|
df09c3f555 | ||
|
|
c41814fd2d | ||
|
|
c2557cc432 | ||
|
|
3425726c50 | ||
|
|
46175a22d8 | ||
|
|
bcf0e15ad7 | ||
|
|
b91c349cd5 | ||
|
|
d252816706 | ||
|
|
729117af68 | ||
|
|
cd4d8d55ec | ||
|
|
f26abc89a6 | ||
|
|
b5abbe819f |
11
.github/workflows/build.yml
vendored
11
.github/workflows/build.yml
vendored
@@ -40,7 +40,7 @@ jobs:
|
||||
deploy: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macOS-latest
|
||||
os: macos-11
|
||||
go: '1.17.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
@@ -49,10 +49,10 @@ jobs:
|
||||
deploy: true
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macOS-latest
|
||||
os: macos-11
|
||||
go: '1.17.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows_amd64
|
||||
@@ -79,7 +79,7 @@ jobs:
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.17.x'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
build_flags: '-exclude "^(windows/(386|amd64)|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
@@ -110,6 +110,7 @@ jobs:
|
||||
with:
|
||||
stable: 'false'
|
||||
go-version: ${{ matrix.go }}
|
||||
check-latest: true
|
||||
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
@@ -134,7 +135,7 @@ jobs:
|
||||
run: |
|
||||
brew update
|
||||
brew install --cask macfuse
|
||||
if: matrix.os == 'macOS-latest'
|
||||
if: matrix.os == 'macos-11'
|
||||
|
||||
- name: Install Libraries on Windows
|
||||
shell: powershell
|
||||
|
||||
@@ -15,7 +15,7 @@ Current active maintainers of rclone are:
|
||||
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
||||
| Max Sum | @Max-Sum | union backend |
|
||||
| Fred | @creativeprojects | seafile backend |
|
||||
| Caleb Case | @calebcase | tardigrade backend |
|
||||
| Caleb Case | @calebcase | storj backend |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
5267
MANUAL.html
generated
5267
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
7550
MANUAL.txt
generated
7550
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,5 @@
|
||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/)
|
||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
||||
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
||||
|
||||
[Website](https://rclone.org) |
|
||||
[Documentation](https://rclone.org/docs/) |
|
||||
@@ -20,6 +21,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
## Storage providers
|
||||
|
||||
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
||||
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
@@ -28,6 +30,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
@@ -65,8 +68,8 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
|
||||
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/mailru"
|
||||
_ "github.com/rclone/rclone/backend/mega"
|
||||
_ "github.com/rclone/rclone/backend/memory"
|
||||
_ "github.com/rclone/rclone/backend/netstorage"
|
||||
_ "github.com/rclone/rclone/backend/onedrive"
|
||||
_ "github.com/rclone/rclone/backend/opendrive"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
@@ -39,9 +40,9 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
_ "github.com/rclone/rclone/backend/sharefile"
|
||||
_ "github.com/rclone/rclone/backend/sia"
|
||||
_ "github.com/rclone/rclone/backend/storj"
|
||||
_ "github.com/rclone/rclone/backend/sugarsync"
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
_ "github.com/rclone/rclone/backend/tardigrade"
|
||||
_ "github.com/rclone/rclone/backend/union"
|
||||
_ "github.com/rclone/rclone/backend/uptobox"
|
||||
_ "github.com/rclone/rclone/backend/webdav"
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
@@ -1461,6 +1461,10 @@ func (o *Object) clearMetaData() {
|
||||
// o.size
|
||||
// o.md5
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
container, _ := o.split()
|
||||
if !o.fs.containerOK(container) {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
if !o.modTime.IsZero() {
|
||||
return nil
|
||||
}
|
||||
@@ -1653,7 +1657,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return errCantUpdateArchiveTierBlobs
|
||||
}
|
||||
}
|
||||
container, _ := o.split()
|
||||
container, containerPath := o.split()
|
||||
if container == "" || containerPath == "" {
|
||||
return fmt.Errorf("can't upload to root - need a container")
|
||||
}
|
||||
err = o.fs.makeContainer(ctx, container)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/b2/api"
|
||||
@@ -160,7 +160,15 @@ free egress for data downloaded through the Cloudflare network.
|
||||
Rclone works with private buckets by sending an "Authorization" header.
|
||||
If the custom endpoint rewrites the requests for authentication,
|
||||
e.g., in Cloudflare Workers, this header needs to be handled properly.
|
||||
Leave blank if you want to use the endpoint provided by Backblaze.`,
|
||||
Leave blank if you want to use the endpoint provided by Backblaze.
|
||||
|
||||
The URL provided here SHOULD have the protocol and SHOULD NOT have
|
||||
a trailing slash or specify the /file/bucket subpath as rclone will
|
||||
request files with "{download_url}/file/{bucket_name}/{path}".
|
||||
|
||||
Example:
|
||||
> https://mysubdomain.mydomain.tld
|
||||
(No trailing "/", "file" or "bucket")`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "download_auth_duration",
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
gohash "hash"
|
||||
"io"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/backend/b2/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/box/api"
|
||||
|
||||
2
backend/cache/cache.go
vendored
2
backend/cache/cache.go
vendored
@@ -16,7 +16,7 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
|
||||
2
backend/cache/handle.go
vendored
2
backend/cache/handle.go
vendored
@@ -11,7 +11,7 @@ import (
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
|
||||
2
backend/cache/object.go
vendored
2
backend/cache/object.go
vendored
@@ -8,7 +8,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
|
||||
2
backend/cache/plex.go
vendored
2
backend/cache/plex.go
vendored
@@ -12,7 +12,7 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
cache "github.com/patrickmn/go-cache"
|
||||
|
||||
2
backend/cache/storage_persistent.go
vendored
2
backend/cache/storage_persistent.go
vendored
@@ -14,7 +14,7 @@ import (
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
|
||||
@@ -401,6 +401,10 @@ func isCompressible(r io.Reader) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
err = w.Close()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
ratio := float64(n) / float64(b.Len())
|
||||
return ratio > minCompressionRatio, nil
|
||||
}
|
||||
@@ -626,9 +630,11 @@ func (f *Fs) putMetadata(ctx context.Context, meta *ObjectMetadata, src fs.Objec
|
||||
// Put the data
|
||||
mo, err = put(ctx, metaReader, f.wrapInfo(src, makeMetadataName(src.Remote()), int64(len(data))), options...)
|
||||
if err != nil {
|
||||
removeErr := mo.Remove(ctx)
|
||||
if removeErr != nil {
|
||||
fs.Errorf(mo, "Failed to remove partially transferred object: %v", err)
|
||||
if mo != nil {
|
||||
removeErr := mo.Remove(ctx)
|
||||
if removeErr != nil {
|
||||
fs.Errorf(mo, "Failed to remove partially transferred object: %v", err)
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
|
||||
@@ -443,7 +443,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||
}
|
||||
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
|
||||
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
|
||||
}
|
||||
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"text/template"
|
||||
"time"
|
||||
@@ -84,7 +84,7 @@ var (
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
_mimeTypeToExtensionDuplicates = map[string]string{
|
||||
"application/x-vnd.oasis.opendocument.presentation": ".odp",
|
||||
@@ -299,6 +299,17 @@ a non root folder as its starting point.
|
||||
Default: true,
|
||||
Help: "Send files to the trash instead of deleting permanently.\n\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_shortcut_content",
|
||||
Default: false,
|
||||
Help: `Server side copy contents of shortcuts instead of the shortcut.
|
||||
|
||||
When doing server side copies, normally rclone will copy shortcuts as
|
||||
shortcuts.
|
||||
|
||||
If this flag is used then rclone will copy the contents of shortcuts
|
||||
rather than shortcuts themselves when doing server side copies.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_gdocs",
|
||||
Default: false,
|
||||
@@ -542,6 +553,14 @@ Google don't document so it may break in the future.
|
||||
Normally rclone dereferences shortcut files making them appear as if
|
||||
they are the original file (see [the shortcuts section](#shortcuts)).
|
||||
If this flag is set then rclone will ignore shortcut files completely.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "skip_dangling_shortcuts",
|
||||
Help: `If set skip dangling shortcut files.
|
||||
|
||||
If this is set then rclone will not show any dangling shortcuts in listings.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
@@ -578,6 +597,7 @@ type Options struct {
|
||||
TeamDriveID string `config:"team_drive"`
|
||||
AuthOwnerOnly bool `config:"auth_owner_only"`
|
||||
UseTrash bool `config:"use_trash"`
|
||||
CopyShortcutContent bool `config:"copy_shortcut_content"`
|
||||
SkipGdocs bool `config:"skip_gdocs"`
|
||||
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
|
||||
SharedWithMe bool `config:"shared_with_me"`
|
||||
@@ -604,6 +624,7 @@ type Options struct {
|
||||
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
|
||||
StopOnDownloadLimit bool `config:"stop_on_download_limit"`
|
||||
SkipShortcuts bool `config:"skip_shortcuts"`
|
||||
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -906,6 +927,11 @@ OUTER:
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("list: %w", err)
|
||||
}
|
||||
// leave the dangling shortcut out of the listings
|
||||
// we've already logged about the dangling shortcut in resolveShortcut
|
||||
if f.opt.SkipDanglingShortcuts && item.MimeType == shortcutMimeTypeDangling {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Check the case of items is correct since
|
||||
// the `=` operator is case insensitive.
|
||||
@@ -1571,6 +1597,15 @@ func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string
|
||||
}
|
||||
}
|
||||
|
||||
// If using a link type export and a more specific export
|
||||
// hasn't been found all docs should be exported
|
||||
for _, _extension := range f.exportExtensions {
|
||||
_mimeType := mime.TypeByExtension(_extension)
|
||||
if isLinkMimeType(_mimeType) {
|
||||
return _extension, _mimeType, true
|
||||
}
|
||||
}
|
||||
|
||||
// else return empty
|
||||
return "", "", isDocument
|
||||
}
|
||||
@@ -1581,6 +1616,14 @@ func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string
|
||||
// Look through the exportExtensions and find the first format that can be
|
||||
// converted. If none found then return ("", "", "", false)
|
||||
func (f *Fs) findExportFormat(ctx context.Context, item *drive.File) (extension, filename, mimeType string, isDocument bool) {
|
||||
// If item has MD5 sum it is a file stored on drive
|
||||
if item.Md5Checksum != "" {
|
||||
return
|
||||
}
|
||||
// Folders can't be documents
|
||||
if item.MimeType == driveFolderType {
|
||||
return
|
||||
}
|
||||
extension, mimeType, isDocument = f.findExportFormatByMimeType(ctx, item.MimeType)
|
||||
if extension != "" {
|
||||
filename = item.Name + extension
|
||||
@@ -2374,9 +2417,16 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
createInfo.Description = ""
|
||||
}
|
||||
|
||||
// get the ID of the thing to copy - this is the shortcut if available
|
||||
// get the ID of the thing to copy
|
||||
// copy the contents if CopyShortcutContent
|
||||
// else copy the shortcut only
|
||||
|
||||
id := shortcutID(srcObj.id)
|
||||
|
||||
if f.opt.CopyShortcutContent {
|
||||
id = actualID(srcObj.id)
|
||||
}
|
||||
|
||||
var info *drive.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, err = f.svc.Files.Copy(id, createInfo).
|
||||
|
||||
@@ -422,11 +422,7 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
o := obj.(*Object)
|
||||
|
||||
dir, err := ioutil.TempDir("", "rclone-drive-copyid-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(dir)
|
||||
}()
|
||||
dir := t.TempDir()
|
||||
|
||||
checkFile := func(name string) {
|
||||
filePath := filepath.Join(dir, name)
|
||||
@@ -491,19 +487,11 @@ func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
||||
subFs, isDriveFs := subFsResult.(*Fs)
|
||||
require.True(t, isDriveFs)
|
||||
|
||||
tempDir1, err := ioutil.TempDir("", "rclone-drive-agequery1-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(tempDir1)
|
||||
}()
|
||||
tempDir1 := t.TempDir()
|
||||
tempFs1, err := fs.NewFs(defCtx, tempDir1)
|
||||
require.NoError(t, err)
|
||||
|
||||
tempDir2, err := ioutil.TempDir("", "rclone-drive-agequery2-test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(tempDir2)
|
||||
}()
|
||||
tempDir2 := t.TempDir()
|
||||
tempFs2, err := fs.NewFs(defCtx, tempDir2)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
|
||||
|
||||
@@ -42,18 +42,15 @@ func init() {
|
||||
}, {
|
||||
Help: "If you want to download a shared folder, add this parameter.",
|
||||
Name: "shared_folder",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Help: "If you want to download a shared file that is password protected, add this parameter.",
|
||||
Name: "file_password",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Help: "If you want to list the files in a shared folder that is password protected, add this parameter.",
|
||||
Name: "folder_password",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
@@ -517,6 +514,32 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/user/info.cgi",
|
||||
ContentType: "application/json",
|
||||
}
|
||||
var accountInfo AccountInfo
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rest.CallJSON(ctx, &opts, nil, &accountInfo)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read user info: %w", err)
|
||||
}
|
||||
|
||||
// FIXME max upload size would be useful to use in Update
|
||||
usage = &fs.Usage{
|
||||
Used: fs.NewUsageValue(accountInfo.ColdStorage), // bytes in use
|
||||
Total: fs.NewUsageValue(accountInfo.AvailableColdStorage), // bytes total
|
||||
Free: fs.NewUsageValue(accountInfo.AvailableColdStorage - accountInfo.ColdStorage), // bytes free
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
|
||||
@@ -182,3 +182,34 @@ type FoldersList struct {
|
||||
Status string `json:"Status"`
|
||||
SubFolders []Folder `json:"sub_folders"`
|
||||
}
|
||||
|
||||
// AccountInfo is the structure how 1Fichier returns user info
|
||||
type AccountInfo struct {
|
||||
StatsDate string `json:"stats_date"`
|
||||
MailRM string `json:"mail_rm"`
|
||||
DefaultQuota int64 `json:"default_quota"`
|
||||
UploadForbidden string `json:"upload_forbidden"`
|
||||
PageLimit int `json:"page_limit"`
|
||||
ColdStorage int64 `json:"cold_storage"`
|
||||
Status string `json:"status"`
|
||||
UseCDN string `json:"use_cdn"`
|
||||
AvailableColdStorage int64 `json:"available_cold_storage"`
|
||||
DefaultPort string `json:"default_port"`
|
||||
DefaultDomain int `json:"default_domain"`
|
||||
Email string `json:"email"`
|
||||
DownloadMenu string `json:"download_menu"`
|
||||
FTPDID int `json:"ftp_did"`
|
||||
DefaultPortFiles string `json:"default_port_files"`
|
||||
FTPReport string `json:"ftp_report"`
|
||||
OverQuota int64 `json:"overquota"`
|
||||
AvailableStorage int64 `json:"available_storage"`
|
||||
CDN string `json:"cdn"`
|
||||
Offer string `json:"offer"`
|
||||
SubscriptionEnd string `json:"subscription_end"`
|
||||
TFA string `json:"2fa"`
|
||||
AllowedColdStorage int64 `json:"allowed_cold_storage"`
|
||||
HotStorage int64 `json:"hot_storage"`
|
||||
DefaultColdStorageQuota int64 `json:"default_cold_storage_quota"`
|
||||
FTPMode string `json:"ftp_mode"`
|
||||
RUReport string `json:"ru_report"`
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
|
||||
@@ -12,10 +12,10 @@ import (
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/rclone/ftp"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -52,11 +52,13 @@ func init() {
|
||||
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "FTP username, leave blank for current username, " + currentUser + ".",
|
||||
Name: "user",
|
||||
Help: "FTP username.",
|
||||
Default: currentUser,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "FTP port, leave blank to use default (21).",
|
||||
Name: "port",
|
||||
Help: "FTP port number.",
|
||||
Default: 21,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "FTP password.",
|
||||
|
||||
@@ -65,7 +65,7 @@ var (
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -182,15 +182,30 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
}, {
|
||||
Value: "asia-northeast1",
|
||||
Help: "Tokyo",
|
||||
}, {
|
||||
Value: "asia-northeast2",
|
||||
Help: "Osaka",
|
||||
}, {
|
||||
Value: "asia-northeast3",
|
||||
Help: "Seoul",
|
||||
}, {
|
||||
Value: "asia-south1",
|
||||
Help: "Mumbai",
|
||||
}, {
|
||||
Value: "asia-south2",
|
||||
Help: "Delhi",
|
||||
}, {
|
||||
Value: "asia-southeast1",
|
||||
Help: "Singapore",
|
||||
}, {
|
||||
Value: "asia-southeast2",
|
||||
Help: "Jakarta",
|
||||
}, {
|
||||
Value: "australia-southeast1",
|
||||
Help: "Sydney",
|
||||
}, {
|
||||
Value: "australia-southeast2",
|
||||
Help: "Melbourne",
|
||||
}, {
|
||||
Value: "europe-north1",
|
||||
Help: "Finland",
|
||||
@@ -206,6 +221,12 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
}, {
|
||||
Value: "europe-west4",
|
||||
Help: "Netherlands",
|
||||
}, {
|
||||
Value: "europe-west6",
|
||||
Help: "Zürich",
|
||||
}, {
|
||||
Value: "europe-central2",
|
||||
Help: "Warsaw",
|
||||
}, {
|
||||
Value: "us-central1",
|
||||
Help: "Iowa",
|
||||
@@ -221,6 +242,33 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
}, {
|
||||
Value: "us-west2",
|
||||
Help: "California",
|
||||
}, {
|
||||
Value: "us-west3",
|
||||
Help: "Salt Lake City",
|
||||
}, {
|
||||
Value: "us-west4",
|
||||
Help: "Las Vegas",
|
||||
}, {
|
||||
Value: "northamerica-northeast1",
|
||||
Help: "Montréal",
|
||||
}, {
|
||||
Value: "northamerica-northeast2",
|
||||
Help: "Toronto",
|
||||
}, {
|
||||
Value: "southamerica-east1",
|
||||
Help: "São Paulo",
|
||||
}, {
|
||||
Value: "southamerica-west1",
|
||||
Help: "Santiago",
|
||||
}, {
|
||||
Value: "asia1",
|
||||
Help: "Dual region: asia-northeast1 and asia-northeast2.",
|
||||
}, {
|
||||
Value: "eur4",
|
||||
Help: "Dual region: europe-north1 and europe-west4.",
|
||||
}, {
|
||||
Value: "nam4",
|
||||
Help: "Dual region: us-central1 and us-east1.",
|
||||
}},
|
||||
}, {
|
||||
Name: "storage_class",
|
||||
|
||||
@@ -5,7 +5,7 @@ package googlephotos
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||
)
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||
@@ -69,7 +69,7 @@ var (
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -202,7 +202,11 @@ func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries,
|
||||
for _, entry := range baseEntries {
|
||||
switch x := entry.(type) {
|
||||
case fs.Object:
|
||||
hashEntries = append(hashEntries, f.wrapObject(x, nil))
|
||||
obj, err := f.wrapObject(x, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hashEntries = append(hashEntries, obj)
|
||||
default:
|
||||
hashEntries = append(hashEntries, entry) // trash in - trash out
|
||||
}
|
||||
@@ -251,7 +255,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
if do := f.Fs.Features().PutStream; do != nil {
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := do(ctx, in, src, options...)
|
||||
return f.wrapObject(oResult, err), err
|
||||
return f.wrapObject(oResult, err)
|
||||
}
|
||||
return nil, errors.New("PutStream not supported")
|
||||
}
|
||||
@@ -261,7 +265,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
if do := f.Fs.Features().PutUnchecked; do != nil {
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := do(ctx, in, src, options...)
|
||||
return f.wrapObject(oResult, err), err
|
||||
return f.wrapObject(oResult, err)
|
||||
}
|
||||
return nil, errors.New("PutUnchecked not supported")
|
||||
}
|
||||
@@ -348,7 +352,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
oResult, err := do(ctx, o.Object, remote)
|
||||
return f.wrapObject(oResult, err), err
|
||||
return f.wrapObject(oResult, err)
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
@@ -371,7 +375,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
dir: false,
|
||||
fs: f,
|
||||
})
|
||||
return f.wrapObject(oResult, nil), nil
|
||||
return f.wrapObject(oResult, nil)
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote using server-side move operations.
|
||||
@@ -410,7 +414,7 @@ func (f *Fs) Shutdown(ctx context.Context) (err error) {
|
||||
// NewObject finds the Object at remote.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
o, err := f.Fs.NewObject(ctx, remote)
|
||||
return f.wrapObject(o, err), err
|
||||
return f.wrapObject(o, err)
|
||||
}
|
||||
|
||||
//
|
||||
@@ -424,11 +428,15 @@ type Object struct {
|
||||
}
|
||||
|
||||
// Wrap base object into hasher object
|
||||
func (f *Fs) wrapObject(o fs.Object, err error) *Object {
|
||||
if err != nil || o == nil {
|
||||
return nil
|
||||
func (f *Fs) wrapObject(o fs.Object, err error) (obj fs.Object, outErr error) {
|
||||
// log.Trace(o, "err=%v", err)("obj=%#v, outErr=%v", &obj, &outErr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Object{Object: o, f: f}
|
||||
if o == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return &Object{Object: o, f: f}, nil
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
|
||||
@@ -184,7 +184,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (r io.ReadC
|
||||
// Put data into the remote path with given modTime and size
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
var (
|
||||
o *Object
|
||||
o fs.Object
|
||||
common hash.Set
|
||||
rehash bool
|
||||
hashes hashMap
|
||||
@@ -210,8 +210,8 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
|
||||
_ = f.pruneHash(src.Remote())
|
||||
oResult, err := f.Fs.Put(ctx, wrapIn, src, options...)
|
||||
o = f.wrapObject(oResult, err)
|
||||
if o == nil {
|
||||
o, err = f.wrapObject(oResult, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -224,7 +224,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
}
|
||||
}
|
||||
if len(hashes) > 0 {
|
||||
err := o.putHashes(ctx, hashes)
|
||||
err := o.(*Object).putHashes(ctx, hashes)
|
||||
fs.Debugf(o, "Applied %d source hashes, err: %v", len(hashes), err)
|
||||
}
|
||||
return o, err
|
||||
|
||||
@@ -22,9 +22,8 @@ func init() {
|
||||
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "username",
|
||||
Help: "Hadoop user name.",
|
||||
Required: false,
|
||||
Name: "username",
|
||||
Help: "Hadoop user name.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "root",
|
||||
Help: "Connect to hdfs as root.",
|
||||
@@ -36,7 +35,6 @@ func init() {
|
||||
Enables KERBEROS authentication. Specifies the Service Principal Name
|
||||
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
|
||||
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "data_transfer_protection",
|
||||
@@ -46,7 +44,6 @@ Specifies whether or not authentication, data signature integrity
|
||||
checks, and wire encryption is required when communicating the the
|
||||
datanodes. Possible values are 'authentication', 'integrity' and
|
||||
'privacy'. Used only with KERBEROS enabled.`,
|
||||
Required: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "privacy",
|
||||
Help: "Ensure authentication, integrity and encryption enabled.",
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -52,8 +52,7 @@ The input format is comma separated list of key,value pairs. Standard
|
||||
|
||||
For example, to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
||||
|
||||
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
|
||||
`,
|
||||
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.`,
|
||||
Default: fs.CommaSepList{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -74,8 +73,9 @@ directories.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_head",
|
||||
Help: `Don't use HEAD requests to find file sizes in dir listing.
|
||||
Help: `Don't use HEAD requests.
|
||||
|
||||
HEAD requests are mainly used to find file sizes in dir listing.
|
||||
If your site is being very slow to load then you can try this option.
|
||||
Normally rclone does a HEAD request for each potential file in a
|
||||
directory listing to:
|
||||
@@ -84,12 +84,9 @@ directory listing to:
|
||||
- check it really exists
|
||||
- check to see if it is a directory
|
||||
|
||||
If you set this option, rclone will not do the HEAD request. This will mean
|
||||
|
||||
- directory listings are much quicker
|
||||
- rclone won't have the times or sizes of any files
|
||||
- some files that don't exist may be in the listing
|
||||
`,
|
||||
If you set this option, rclone will not do the HEAD request. This will mean
|
||||
that directory listings are much quicker, but rclone won't have the times or
|
||||
sizes of any files, and some files that don't exist may be in the listing.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
@@ -133,11 +130,87 @@ func statusError(res *http.Response, err error) error {
|
||||
}
|
||||
if res.StatusCode < 200 || res.StatusCode > 299 {
|
||||
_ = res.Body.Close()
|
||||
return fmt.Errorf("HTTP Error %d: %s", res.StatusCode, res.Status)
|
||||
return fmt.Errorf("HTTP Error: %s", res.Status)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getFsEndpoint decides if url is to be considered a file or directory,
|
||||
// and returns a proper endpoint url to use for the fs.
|
||||
func getFsEndpoint(ctx context.Context, client *http.Client, url string, opt *Options) (string, bool) {
|
||||
// If url ends with '/' it is already a proper url always assumed to be a directory.
|
||||
if url[len(url)-1] == '/' {
|
||||
return url, false
|
||||
}
|
||||
|
||||
// If url does not end with '/' we send a HEAD request to decide
|
||||
// if it is directory or file, and if directory appends the missing
|
||||
// '/', or if file returns the directory url to parent instead.
|
||||
createFileResult := func() (string, bool) {
|
||||
fs.Debugf(nil, "If path is a directory you must add a trailing '/'")
|
||||
parent, _ := path.Split(url)
|
||||
return parent, true
|
||||
}
|
||||
createDirResult := func() (string, bool) {
|
||||
fs.Debugf(nil, "To avoid the initial HEAD request add a trailing '/' to the path")
|
||||
return url + "/", false
|
||||
}
|
||||
|
||||
// If HEAD requests are not allowed we just have to assume it is a file.
|
||||
if opt.NoHead {
|
||||
fs.Debugf(nil, "Assuming path is a file as --http-no-head is set")
|
||||
return createFileResult()
|
||||
}
|
||||
|
||||
// Use a client which doesn't follow redirects so the server
|
||||
// doesn't redirect http://host/dir to http://host/dir/
|
||||
noRedir := *client
|
||||
noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be created: %v", err)
|
||||
return createFileResult()
|
||||
}
|
||||
addHeaders(req, opt)
|
||||
res, err := noRedir.Do(req)
|
||||
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be sent: %v", err)
|
||||
return createFileResult()
|
||||
}
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
fs.Debugf(nil, "Assuming path is a directory as HEAD response is it does not exist as a file (%s)", res.Status)
|
||||
return createDirResult()
|
||||
}
|
||||
if res.StatusCode == http.StatusMovedPermanently ||
|
||||
res.StatusCode == http.StatusFound ||
|
||||
res.StatusCode == http.StatusSeeOther ||
|
||||
res.StatusCode == http.StatusTemporaryRedirect ||
|
||||
res.StatusCode == http.StatusPermanentRedirect {
|
||||
redir := res.Header.Get("Location")
|
||||
if redir != "" {
|
||||
if redir[len(redir)-1] == '/' {
|
||||
fs.Debugf(nil, "Assuming path is a directory as HEAD response is redirect (%s) to a path that ends with '/': %s", res.Status, redir)
|
||||
return createDirResult()
|
||||
}
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD response is redirect (%s) to a path that does not end with '/': %s", res.Status, redir)
|
||||
return createFileResult()
|
||||
}
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD response is redirect (%s) but no location header", res.Status)
|
||||
return createFileResult()
|
||||
}
|
||||
if res.StatusCode < 200 || res.StatusCode > 299 {
|
||||
// Example is 403 (http.StatusForbidden) for servers not allowing HEAD requests.
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD response is an error (%s)", res.Status)
|
||||
return createFileResult()
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD response is success (%s)", res.Status)
|
||||
return createFileResult()
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
@@ -168,37 +241,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
|
||||
var isFile = false
|
||||
if !strings.HasSuffix(u.String(), "/") {
|
||||
// Make a client which doesn't follow redirects so the server
|
||||
// doesn't redirect http://host/dir to http://host/dir/
|
||||
noRedir := *client
|
||||
noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
// check to see if points to a file
|
||||
req, err := http.NewRequestWithContext(ctx, "HEAD", u.String(), nil)
|
||||
if err == nil {
|
||||
addHeaders(req, opt)
|
||||
res, err := noRedir.Do(req)
|
||||
err = statusError(res, err)
|
||||
if err == nil {
|
||||
isFile = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newRoot := u.String()
|
||||
if isFile {
|
||||
// Point to the parent if this is a file
|
||||
newRoot, _ = path.Split(u.String())
|
||||
} else {
|
||||
if !strings.HasSuffix(newRoot, "/") {
|
||||
newRoot += "/"
|
||||
}
|
||||
}
|
||||
|
||||
u, err = url.Parse(newRoot)
|
||||
endpoint, isFile := getFsEndpoint(ctx, client, u.String(), opt)
|
||||
fs.Debugf(nil, "Root: %s", endpoint)
|
||||
u, err = url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -216,12 +261,16 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
if isFile {
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(f.endpointURL, "/") {
|
||||
return nil, errors.New("internal error: url doesn't end with /")
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
@@ -297,7 +346,7 @@ func parseName(base *url.URL, name string) (string, error) {
|
||||
}
|
||||
// check it doesn't have URL parameters
|
||||
uStr := u.String()
|
||||
if strings.Index(uStr, "?") >= 0 {
|
||||
if strings.Contains(uStr, "?") {
|
||||
return "", errFoundQuestionMark
|
||||
}
|
||||
// check that this is going back to the same host and scheme
|
||||
@@ -409,7 +458,7 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
|
||||
return nil, fmt.Errorf("readDir: %w", err)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("Can't parse content type %q", contentType)
|
||||
return nil, fmt.Errorf("can't parse content type %q", contentType)
|
||||
}
|
||||
return names, nil
|
||||
}
|
||||
|
||||
@@ -8,8 +8,10 @@ import (
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -24,10 +26,11 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
remoteName = "TestHTTP"
|
||||
testPath = "test"
|
||||
filesPath = filepath.Join(testPath, "files")
|
||||
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
|
||||
remoteName = "TestHTTP"
|
||||
testPath = "test"
|
||||
filesPath = filepath.Join(testPath, "files")
|
||||
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
|
||||
lineEndSize = 1
|
||||
)
|
||||
|
||||
// prepareServer the test server and return a function to tidy it up afterwards
|
||||
@@ -35,6 +38,22 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// file server for test/files
|
||||
fileServer := http.FileServer(http.Dir(filesPath))
|
||||
|
||||
// verify the file path is correct, and also check which line endings
|
||||
// are used to get sizes right ("\n" except on Windows, but even there
|
||||
// we may have "\n" or "\r\n" depending on git crlf setting)
|
||||
fileList, err := ioutil.ReadDir(filesPath)
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, len(fileList), 0)
|
||||
for _, file := range fileList {
|
||||
if !file.IsDir() {
|
||||
data, _ := ioutil.ReadFile(filepath.Join(filesPath, file.Name()))
|
||||
if strings.HasSuffix(string(data), "\r\n") {
|
||||
lineEndSize = 2
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// test the headers are there then pass on to fileServer
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
|
||||
@@ -91,7 +110,7 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
|
||||
e = entries[1]
|
||||
assert.Equal(t, "one%.txt", e.Remote())
|
||||
assert.Equal(t, int64(6), e.Size())
|
||||
assert.Equal(t, int64(5+lineEndSize), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
@@ -108,7 +127,7 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
_, ok = e.(fs.Directory)
|
||||
assert.True(t, ok)
|
||||
} else {
|
||||
assert.Equal(t, int64(41), e.Size())
|
||||
assert.Equal(t, int64(40+lineEndSize), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
@@ -141,7 +160,7 @@ func TestListSubDir(t *testing.T) {
|
||||
|
||||
e := entries[0]
|
||||
assert.Equal(t, "three/underthree.txt", e.Remote())
|
||||
assert.Equal(t, int64(9), e.Size())
|
||||
assert.Equal(t, int64(8+lineEndSize), e.Size())
|
||||
_, ok := e.(*Object)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
@@ -154,7 +173,7 @@ func TestNewObject(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "four/under four.txt", o.Remote())
|
||||
assert.Equal(t, int64(9), o.Size())
|
||||
assert.Equal(t, int64(8+lineEndSize), o.Size())
|
||||
_, ok := o.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
@@ -187,7 +206,11 @@ func TestOpen(t *testing.T) {
|
||||
data, err := ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, "beetroot\n", string(data))
|
||||
if lineEndSize == 2 {
|
||||
assert.Equal(t, "beetroot\r\n", string(data))
|
||||
} else {
|
||||
assert.Equal(t, "beetroot\n", string(data))
|
||||
}
|
||||
|
||||
// Test with range request
|
||||
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
|
||||
@@ -236,7 +259,7 @@ func TestIsAFileSubDir(t *testing.T) {
|
||||
|
||||
e := entries[0]
|
||||
assert.Equal(t, "underthree.txt", e.Remote())
|
||||
assert.Equal(t, int64(9), e.Size())
|
||||
assert.Equal(t, int64(8+lineEndSize), e.Size())
|
||||
_, ok := e.(*Object)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
@@ -353,3 +376,106 @@ func TestParseCaddy(t *testing.T) {
|
||||
"v1.36-22-g06ea13a-ssh-agentβ/",
|
||||
})
|
||||
}
|
||||
|
||||
func TestFsNoSlashRoots(t *testing.T) {
|
||||
// Test Fs with roots that does not end with '/', the logic that
|
||||
// decides if url is to be considered a file or directory, based
|
||||
// on result from a HEAD request.
|
||||
|
||||
// Handler for faking HEAD responses with different status codes
|
||||
headCount := 0
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method == "HEAD" {
|
||||
headCount++
|
||||
responseCode, err := strconv.Atoi(path.Base(r.URL.String()))
|
||||
require.NoError(t, err)
|
||||
if strings.HasPrefix(r.URL.String(), "/redirect/") {
|
||||
var redir string
|
||||
if strings.HasPrefix(r.URL.String(), "/redirect/file/") {
|
||||
redir = "/redirected"
|
||||
} else if strings.HasPrefix(r.URL.String(), "/redirect/dir/") {
|
||||
redir = "/redirected/"
|
||||
} else {
|
||||
require.Fail(t, "Redirect test requests must start with '/redirect/file/' or '/redirect/dir/'")
|
||||
}
|
||||
http.Redirect(w, r, redir, responseCode)
|
||||
} else {
|
||||
http.Error(w, http.StatusText(responseCode), responseCode)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Make the test server
|
||||
ts := httptest.NewServer(handler)
|
||||
defer ts.Close()
|
||||
|
||||
// Configure the remote
|
||||
configfile.Install()
|
||||
m := configmap.Simple{
|
||||
"type": "http",
|
||||
"url": ts.URL,
|
||||
}
|
||||
|
||||
// Test
|
||||
for i, test := range []struct {
|
||||
root string
|
||||
isFile bool
|
||||
}{
|
||||
// 2xx success
|
||||
{"parent/200", true},
|
||||
{"parent/204", true},
|
||||
|
||||
// 3xx redirection Redirect status 301, 302, 303, 307, 308
|
||||
{"redirect/file/301", true}, // Request is redirected to "/redirected"
|
||||
{"redirect/dir/301", false}, // Request is redirected to "/redirected/"
|
||||
{"redirect/file/302", true}, // Request is redirected to "/redirected"
|
||||
{"redirect/dir/302", false}, // Request is redirected to "/redirected/"
|
||||
{"redirect/file/303", true}, // Request is redirected to "/redirected"
|
||||
{"redirect/dir/303", false}, // Request is redirected to "/redirected/"
|
||||
|
||||
{"redirect/file/304", true}, // Not really a redirect, handled like 4xx errors (below)
|
||||
{"redirect/file/305", true}, // Not really a redirect, handled like 4xx errors (below)
|
||||
{"redirect/file/306", true}, // Not really a redirect, handled like 4xx errors (below)
|
||||
|
||||
{"redirect/file/307", true}, // Request is redirected to "/redirected"
|
||||
{"redirect/dir/307", false}, // Request is redirected to "/redirected/"
|
||||
{"redirect/file/308", true}, // Request is redirected to "/redirected"
|
||||
{"redirect/dir/308", false}, // Request is redirected to "/redirected/"
|
||||
|
||||
// 4xx client errors
|
||||
{"parent/403", true}, // Forbidden status (head request blocked)
|
||||
{"parent/404", false}, // Not found status
|
||||
} {
|
||||
for _, noHead := range []bool{false, true} {
|
||||
var isFile bool
|
||||
if noHead {
|
||||
m.Set("no_head", "true")
|
||||
isFile = true
|
||||
} else {
|
||||
m.Set("no_head", "false")
|
||||
isFile = test.isFile
|
||||
}
|
||||
headCount = 0
|
||||
f, err := NewFs(context.Background(), remoteName, test.root, m)
|
||||
if noHead {
|
||||
assert.Equal(t, 0, headCount)
|
||||
} else {
|
||||
assert.Equal(t, 1, headCount)
|
||||
}
|
||||
if isFile {
|
||||
assert.ErrorIs(t, err, fs.ErrorIsFile)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
var endpoint string
|
||||
if isFile {
|
||||
parent, _ := path.Split(test.root)
|
||||
endpoint = "/" + parent
|
||||
} else {
|
||||
endpoint = "/" + test.root + "/"
|
||||
}
|
||||
what := fmt.Sprintf("i=%d, root=%q, isFile=%v, noHead=%v", i, test.root, isFile, noHead)
|
||||
assert.Equal(t, ts.URL+endpoint, f.String(), what)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -931,49 +932,121 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// listFileDirFn is called from listFileDir to handle an object.
|
||||
type listFileDirFn func(fs.DirEntry) error
|
||||
type listStreamTime time.Time
|
||||
|
||||
// List the objects and directories into entries, from a
|
||||
// special kind of JottaFolder representing a FileDirLis
|
||||
func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolder *api.JottaFolder, fn listFileDirFn) error {
|
||||
pathPrefix := "/" + f.filePathRaw("") // Non-escaped prefix of API paths to be cut off, to be left with the remote path including the remoteStartPath
|
||||
pathPrefixLength := len(pathPrefix)
|
||||
startPath := path.Join(pathPrefix, remoteStartPath) // Non-escaped API path up to and including remoteStartPath, to decide if it should be created as a new dir object
|
||||
startPathLength := len(startPath)
|
||||
for i := range startFolder.Folders {
|
||||
folder := &startFolder.Folders[i]
|
||||
if !f.validFolder(folder) {
|
||||
return nil
|
||||
func (c *listStreamTime) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
var v string
|
||||
if err := d.DecodeElement(&v, &start); err != nil {
|
||||
return err
|
||||
}
|
||||
t, err := time.Parse(time.RFC3339, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*c = listStreamTime(t)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c listStreamTime) MarshalJSON() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("\"%s\"", time.Time(c).Format(time.RFC3339))), nil
|
||||
}
|
||||
|
||||
func parseListRStream(ctx context.Context, r io.Reader, trimPrefix string, filesystem *Fs, callback func(fs.DirEntry) error) error {
|
||||
|
||||
type stats struct {
|
||||
Folders int `xml:"folders"`
|
||||
Files int `xml:"files"`
|
||||
}
|
||||
var expected, actual stats
|
||||
|
||||
type xmlFile struct {
|
||||
Path string `xml:"path"`
|
||||
Name string `xml:"filename"`
|
||||
Checksum string `xml:"md5"`
|
||||
Size int64 `xml:"size"`
|
||||
Modified listStreamTime `xml:"modified"`
|
||||
Created listStreamTime `xml:"created"`
|
||||
}
|
||||
|
||||
type xmlFolder struct {
|
||||
Path string `xml:"path"`
|
||||
}
|
||||
|
||||
addFolder := func(path string) error {
|
||||
return callback(fs.NewDir(filesystem.opt.Enc.ToStandardPath(path), time.Time{}))
|
||||
}
|
||||
|
||||
addFile := func(f *xmlFile) error {
|
||||
return callback(&Object{
|
||||
hasMetaData: true,
|
||||
fs: filesystem,
|
||||
remote: filesystem.opt.Enc.ToStandardPath(path.Join(f.Path, f.Name)),
|
||||
size: f.Size,
|
||||
md5: f.Checksum,
|
||||
modTime: time.Time(f.Modified),
|
||||
})
|
||||
}
|
||||
|
||||
trimPathPrefix := func(p string) string {
|
||||
p = strings.TrimPrefix(p, trimPrefix)
|
||||
p = strings.TrimPrefix(p, "/")
|
||||
return p
|
||||
}
|
||||
|
||||
uniqueFolders := map[string]bool{}
|
||||
decoder := xml.NewDecoder(r)
|
||||
|
||||
for {
|
||||
t, err := decoder.Token()
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
folderPath := f.opt.Enc.ToStandardPath(path.Join(folder.Path, folder.Name))
|
||||
folderPathLength := len(folderPath)
|
||||
var remoteDir string
|
||||
if folderPathLength > pathPrefixLength {
|
||||
remoteDir = folderPath[pathPrefixLength+1:]
|
||||
if folderPathLength > startPathLength {
|
||||
d := fs.NewDir(remoteDir, time.Time(folder.ModifiedAt))
|
||||
err := fn(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range folder.Files {
|
||||
file := &folder.Files[i]
|
||||
if f.validFile(file) {
|
||||
remoteFile := path.Join(remoteDir, f.opt.Enc.ToStandardName(file.Name))
|
||||
o, err := f.newObjectWithInfo(ctx, remoteFile, file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = fn(o)
|
||||
if err != nil {
|
||||
switch se := t.(type) {
|
||||
case xml.StartElement:
|
||||
switch se.Name.Local {
|
||||
case "file":
|
||||
var f xmlFile
|
||||
if err := decoder.DecodeElement(&f, &se); err != nil {
|
||||
return err
|
||||
}
|
||||
f.Path = trimPathPrefix(f.Path)
|
||||
actual.Files++
|
||||
if !uniqueFolders[f.Path] {
|
||||
uniqueFolders[f.Path] = true
|
||||
actual.Folders++
|
||||
if err := addFolder(f.Path); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := addFile(&f); err != nil {
|
||||
return err
|
||||
}
|
||||
case "folder":
|
||||
var f xmlFolder
|
||||
if err := decoder.DecodeElement(&f, &se); err != nil {
|
||||
return err
|
||||
}
|
||||
f.Path = trimPathPrefix(f.Path)
|
||||
uniqueFolders[f.Path] = true
|
||||
actual.Folders++
|
||||
if err := addFolder(f.Path); err != nil {
|
||||
return err
|
||||
}
|
||||
case "stats":
|
||||
if err := decoder.DecodeElement(&expected, &se); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if expected.Folders != actual.Folders ||
|
||||
expected.Files != actual.Files {
|
||||
return fmt.Errorf("Invalid result from listStream: expected[%#v] != actual[%#v]", expected, actual)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -988,12 +1061,27 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
Path: f.filePath(dir),
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
opts.Parameters.Set("mode", "list")
|
||||
opts.Parameters.Set("mode", "liststream")
|
||||
list := walk.NewListRHelper(callback)
|
||||
|
||||
var resp *http.Response
|
||||
var result api.JottaFolder // Could be JottaFileDirList, but JottaFolder is close enough
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
}
|
||||
|
||||
// liststream paths are /mountpoint/root/path
|
||||
// so the returned paths should have /mountpoint/root/ trimmed
|
||||
// as the caller is expecting path.
|
||||
trimPrefix := path.Join("/", f.opt.Mountpoint, f.root)
|
||||
err = parseListRStream(ctx, resp.Body, trimPrefix, f, func(d fs.DirEntry) error {
|
||||
if d.Remote() == dir {
|
||||
return nil
|
||||
}
|
||||
return list.Add(d)
|
||||
})
|
||||
_ = resp.Body.Close()
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1005,10 +1093,6 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
}
|
||||
return fmt.Errorf("couldn't list files: %w", err)
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.listFileDir(ctx, dir, &result, func(entry fs.DirEntry) error {
|
||||
return list.Add(entry)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -28,33 +28,57 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "koofr",
|
||||
Description: "Koofr",
|
||||
Description: "Koofr, Digi Storage and other Koofr-compatible storage providers",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: fs.ConfigProvider,
|
||||
Help: "Choose your storage provider.",
|
||||
// NOTE if you add a new provider here, then add it in the
|
||||
// setProviderDefaults() function and update options accordingly
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "koofr",
|
||||
Help: "Koofr, https://app.koofr.net/",
|
||||
}, {
|
||||
Value: "digistorage",
|
||||
Help: "Digi Storage, https://storage.rcs-rds.ro/",
|
||||
}, {
|
||||
Value: "other",
|
||||
Help: "Any other Koofr API compatible storage service",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "The Koofr API endpoint to use.",
|
||||
Default: "https://app.koofr.net",
|
||||
Provider: "other",
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "mountid",
|
||||
Help: "Mount ID of the mount to use.\n\nIf omitted, the primary mount is used.",
|
||||
Required: false,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "setmtime",
|
||||
Help: "Does the backend support setting modification time.\n\nSet this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
|
||||
Default: true,
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "Your Koofr user name.",
|
||||
Help: "Your user name.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
|
||||
Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
|
||||
Provider: "koofr",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your password for rclone (generate one at https://storage.rcs-rds.ro/app/admin/preferences/password).",
|
||||
Provider: "digistorage",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your password for rclone (generate one at your service's settings page).",
|
||||
Provider: "other",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
@@ -71,6 +95,7 @@ func init() {
|
||||
|
||||
// Options represent the configuration of the Koofr backend
|
||||
type Options struct {
|
||||
Provider string `config:"provider"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
MountID string `config:"mountid"`
|
||||
User string `config:"user"`
|
||||
@@ -255,13 +280,38 @@ func (f *Fs) fullPath(part string) string {
|
||||
return f.opt.Enc.FromStandardPath(path.Join("/", f.root, part))
|
||||
}
|
||||
|
||||
// NewFs constructs a new filesystem given a root path and configuration options
|
||||
func setProviderDefaults(opt *Options) {
|
||||
// handle old, provider-less configs
|
||||
if opt.Provider == "" {
|
||||
if opt.Endpoint == "" || strings.HasPrefix(opt.Endpoint, "https://app.koofr.net") {
|
||||
opt.Provider = "koofr"
|
||||
} else if strings.HasPrefix(opt.Endpoint, "https://storage.rcs-rds.ro") {
|
||||
opt.Provider = "digistorage"
|
||||
} else {
|
||||
opt.Provider = "other"
|
||||
}
|
||||
}
|
||||
// now assign an endpoint
|
||||
if opt.Provider == "koofr" {
|
||||
opt.Endpoint = "https://app.koofr.net"
|
||||
} else if opt.Provider == "digistorage" {
|
||||
opt.Endpoint = "https://storage.rcs-rds.ro"
|
||||
}
|
||||
}
|
||||
|
||||
// NewFs constructs a new filesystem given a root path and rclone configuration options
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
opt := new(Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
setProviderDefaults(opt)
|
||||
return NewFsFromOptions(ctx, name, root, opt)
|
||||
}
|
||||
|
||||
// NewFsFromOptions constructs a new filesystem given a root path and internal configuration options
|
||||
func NewFsFromOptions(ctx context.Context, name, root string, opt *Options) (ff fs.Fs, err error) {
|
||||
pass, err := obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
@@ -1133,6 +1133,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
// Wipe hashes before update
|
||||
o.clearHashCache()
|
||||
|
||||
var symlinkData bytes.Buffer
|
||||
// If the object is a regular file, create it.
|
||||
// If it is a translated link, just read in the contents, and
|
||||
@@ -1295,6 +1298,13 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
// clearHashCache wipes any cached hashes for the object
|
||||
func (o *Object) clearHashCache() {
|
||||
o.fs.objectMetaMu.Lock()
|
||||
o.hashes = nil
|
||||
o.fs.objectMetaMu.Unlock()
|
||||
}
|
||||
|
||||
// Stat an Object into info
|
||||
func (o *Object) lstat() error {
|
||||
info, err := o.fs.lstat(o.path)
|
||||
@@ -1306,6 +1316,7 @@ func (o *Object) lstat() error {
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
o.clearHashCache()
|
||||
return remove(o.path)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -12,6 +13,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
@@ -166,3 +168,64 @@ func TestSymlinkError(t *testing.T) {
|
||||
_, err := NewFs(context.Background(), "local", "/", m)
|
||||
assert.Equal(t, errLinksAndCopyLinks, err)
|
||||
}
|
||||
|
||||
// Test hashes on updating an object
|
||||
func TestHashOnUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
const filePath = "file.txt"
|
||||
when := time.Now()
|
||||
r.WriteFile(filePath, "content", when)
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Get the object
|
||||
o, err := f.NewObject(ctx, filePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test the hash is as we expect
|
||||
md5, err := o.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
|
||||
|
||||
// Reupload it with diferent contents but same size and timestamp
|
||||
var b = bytes.NewBufferString("CONTENT")
|
||||
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
|
||||
err = o.Update(ctx, b, src)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check the hash is as expected
|
||||
md5, err = o.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "45685e95985e20822fb2538a522a5ccf", md5)
|
||||
}
|
||||
|
||||
// Test hashes on deleting an object
|
||||
func TestHashOnDelete(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
const filePath = "file.txt"
|
||||
when := time.Now()
|
||||
r.WriteFile(filePath, "content", when)
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Get the object
|
||||
o, err := f.NewObject(ctx, filePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test the hash is as we expect
|
||||
md5, err := o.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
|
||||
|
||||
// Delete the object
|
||||
require.NoError(t, o.Remove(ctx))
|
||||
|
||||
// Test the hash cache is empty
|
||||
require.Nil(t, o.(*Object).hashes)
|
||||
|
||||
// Test the hash returns an error
|
||||
_, err = o.Hash(ctx, hash.MD5)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package local
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
||||
@@ -58,7 +58,7 @@ type UserInfoResponse struct {
|
||||
AutoProlong bool `json:"auto_prolong"`
|
||||
Basequota int64 `json:"basequota"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Expires int `json:"expires"`
|
||||
Expires int64 `json:"expires"`
|
||||
Prolong bool `json:"prolong"`
|
||||
Promocodes struct {
|
||||
} `json:"promocodes"`
|
||||
@@ -80,7 +80,7 @@ type UserInfoResponse struct {
|
||||
FileSizeLimit int64 `json:"file_size_limit"`
|
||||
Space struct {
|
||||
BytesTotal int64 `json:"bytes_total"`
|
||||
BytesUsed int `json:"bytes_used"`
|
||||
BytesUsed int64 `json:"bytes_used"`
|
||||
Overquota bool `json:"overquota"`
|
||||
} `json:"space"`
|
||||
} `json:"cloud"`
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"encoding/hex"
|
||||
@@ -1572,7 +1572,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
}
|
||||
|
||||
total := info.Body.Cloud.Space.BytesTotal
|
||||
used := int64(info.Body.Cloud.Space.BytesUsed)
|
||||
used := info.Body.Cloud.Space.BytesUsed
|
||||
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(total),
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
|
||||
1277
backend/netstorage/netstorage.go
Executable file
1277
backend/netstorage/netstorage.go
Executable file
File diff suppressed because it is too large
Load Diff
16
backend/netstorage/netstorage_test.go
Normal file
16
backend/netstorage/netstorage_test.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package netstorage_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/netstorage"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestnStorage:",
|
||||
NilObject: (*netstorage.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/onedrive/api"
|
||||
@@ -65,9 +65,12 @@ var (
|
||||
authPath = "/common/oauth2/v2.0/authorize"
|
||||
tokenPath = "/common/oauth2/v2.0/token"
|
||||
|
||||
scopesWithSitePermission = []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access", "Sites.Read.All"}
|
||||
scopesWithoutSitePermission = []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"}
|
||||
|
||||
// Description of how to auth for this app for a business account
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access", "Sites.Read.All"},
|
||||
Scopes: scopesWithSitePermission,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
@@ -137,6 +140,26 @@ Note that the chunks will be buffered into memory.`,
|
||||
Help: "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: `ID of the root folder.
|
||||
|
||||
This isn't normally needed, but in special circumstances you might
|
||||
know the folder ID that you wish to access but not be able to get
|
||||
there through a path traversal.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_site_permission",
|
||||
Help: `Disable the request for Sites.Read.All permission.
|
||||
|
||||
If set to true, you will no longer be able to search for a SharePoint site when
|
||||
configuring drive ID, because rclone will not request Sites.Read.All permission.
|
||||
Set it to true if your organization didn't assign Sites.Read.All permission to the
|
||||
application, and your organization disallows users to consent app permission
|
||||
request on their own.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "expose_onenote_files",
|
||||
Help: `Set to make OneNote files show up in directory listings.
|
||||
@@ -374,6 +397,12 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
region, graphURL := getRegionURL(m)
|
||||
|
||||
if config.State == "" {
|
||||
disableSitePermission, _ := m.Get("disable_site_permission")
|
||||
if disableSitePermission == "true" {
|
||||
oauthConfig.Scopes = scopesWithoutSitePermission
|
||||
} else {
|
||||
oauthConfig.Scopes = scopesWithSitePermission
|
||||
}
|
||||
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||
AuthURL: authEndpoint[region] + authPath,
|
||||
TokenURL: authEndpoint[region] + tokenPath,
|
||||
@@ -527,6 +556,8 @@ type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DriveID string `config:"drive_id"`
|
||||
DriveType string `config:"drive_type"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
DisableSitePermission bool `config:"disable_site_permission"`
|
||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
@@ -618,6 +649,12 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
||||
retry := false
|
||||
if resp != nil {
|
||||
switch resp.StatusCode {
|
||||
case 400:
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.ErrorInfo.InnerError.Code == "pathIsTooLong" {
|
||||
return false, fserrors.NoRetryError(err)
|
||||
}
|
||||
}
|
||||
case 401:
|
||||
if len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
||||
retry = true
|
||||
@@ -789,6 +826,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
rootURL := graphAPIEndpoint[opt.Region] + "/v1.0" + "/drives/" + opt.DriveID
|
||||
if opt.DisableSitePermission {
|
||||
oauthConfig.Scopes = scopesWithoutSitePermission
|
||||
} else {
|
||||
oauthConfig.Scopes = scopesWithSitePermission
|
||||
}
|
||||
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||
AuthURL: authEndpoint[opt.Region] + authPath,
|
||||
TokenURL: authEndpoint[opt.Region] + tokenPath,
|
||||
@@ -826,15 +868,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
})
|
||||
|
||||
// Get rootID
|
||||
rootInfo, _, err := f.readMetaDataForPath(ctx, "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get root: %w", err)
|
||||
var rootID = opt.RootFolderID
|
||||
if rootID == "" {
|
||||
rootInfo, _, err := f.readMetaDataForPath(ctx, "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get root: %w", err)
|
||||
}
|
||||
rootID = rootInfo.GetID()
|
||||
}
|
||||
if rootInfo.GetID() == "" {
|
||||
if rootID == "" {
|
||||
return nil, errors.New("failed to get root: ID was empty")
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(root, rootInfo.GetID(), f)
|
||||
f.dirCache = dircache.New(root, rootID, f)
|
||||
|
||||
// Find the current root
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
@@ -842,7 +888,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, rootInfo.ID, &tempF)
|
||||
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = tempF.dirCache.FindRoot(ctx, false)
|
||||
|
||||
@@ -136,7 +136,8 @@ func (q *quickXorHash) Write(p []byte) (n int, err error) {
|
||||
func (q *quickXorHash) checkSum() (h [Size]byte) {
|
||||
// Output the data as little endian bytes
|
||||
ph := 0
|
||||
for _, d := range q.data[:len(q.data)-1] {
|
||||
for i := 0; i < len(q.data)-1; i++ {
|
||||
d := q.data[i]
|
||||
_ = h[ph+7] // bounds check
|
||||
h[ph+0] = byte(d >> (8 * 0))
|
||||
h[ph+1] = byte(d >> (8 * 1))
|
||||
|
||||
@@ -2,8 +2,6 @@
|
||||
// object storage system.
|
||||
package pcloud
|
||||
|
||||
// FIXME implement ListR? /listfolder can do recursive lists
|
||||
|
||||
// FIXME cleanup returns login required?
|
||||
|
||||
// FIXME mime type? Fix overview if implement.
|
||||
@@ -27,6 +25,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
@@ -246,7 +245,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||
return nil, err
|
||||
}
|
||||
|
||||
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
|
||||
found, err := f.listAll(ctx, directoryID, false, true, false, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
info = item
|
||||
return true
|
||||
@@ -380,7 +379,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
||||
found, err = f.listAll(ctx, pathID, true, false, false, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
pathIDOut = item.ID
|
||||
return true
|
||||
@@ -446,14 +445,16 @@ type listAllFn func(*api.Item) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, recursive bool, fn listAllFn) (found bool, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/listfolder",
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
if recursive {
|
||||
opts.Parameters.Set("recursive", "1")
|
||||
}
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(dirID))
|
||||
// FIXME can do recursive
|
||||
|
||||
var result api.ItemResult
|
||||
var resp *http.Response
|
||||
@@ -465,26 +466,71 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
||||
if err != nil {
|
||||
return found, fmt.Errorf("couldn't list files: %w", err)
|
||||
}
|
||||
for i := range result.Metadata.Contents {
|
||||
item := &result.Metadata.Contents[i]
|
||||
if item.IsFolder {
|
||||
if filesOnly {
|
||||
continue
|
||||
var recursiveContents func(is []api.Item, path string)
|
||||
recursiveContents = func(is []api.Item, path string) {
|
||||
for i := range is {
|
||||
item := &is[i]
|
||||
if item.IsFolder {
|
||||
if filesOnly {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if directoriesOnly {
|
||||
continue
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if directoriesOnly {
|
||||
continue
|
||||
item.Name = path + f.opt.Enc.ToStandardName(item.Name)
|
||||
if fn(item) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
if recursive {
|
||||
recursiveContents(item.Contents, item.Name+"/")
|
||||
}
|
||||
}
|
||||
item.Name = f.opt.Enc.ToStandardName(item.Name)
|
||||
if fn(item) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
recursiveContents(result.Metadata.Contents, "")
|
||||
return
|
||||
}
|
||||
|
||||
// listHelper iterates over all items from the directory
|
||||
// and calls the callback for each element.
|
||||
func (f *Fs) listHelper(ctx context.Context, dir string, recursive bool, callback func(entries fs.DirEntry) error) (err error) {
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(ctx, directoryID, false, false, recursive, func(info *api.Item) bool {
|
||||
remote := path.Join(dir, info.Name)
|
||||
if info.IsFolder {
|
||||
// cache the directory ID for later lookups
|
||||
f.dirCache.Put(remote, info.ID)
|
||||
d := fs.NewDir(remote, info.ModTime()).SetID(info.ID)
|
||||
// FIXME more info from dir?
|
||||
iErr = callback(d)
|
||||
} else {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, info)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
iErr = callback(o)
|
||||
}
|
||||
if iErr != nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if iErr != nil {
|
||||
return iErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
@@ -495,36 +541,24 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
|
||||
remote := path.Join(dir, info.Name)
|
||||
if info.IsFolder {
|
||||
// cache the directory ID for later lookups
|
||||
f.dirCache.Put(remote, info.ID)
|
||||
d := fs.NewDir(remote, info.ModTime()).SetID(info.ID)
|
||||
// FIXME more info from dir?
|
||||
entries = append(entries, d)
|
||||
} else {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, info)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
return false
|
||||
err = f.listHelper(ctx, dir, false, func(o fs.DirEntry) error {
|
||||
entries = append(entries, o)
|
||||
return nil
|
||||
})
|
||||
return entries, err
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
// from dir recursively into out.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.listHelper(ctx, dir, true, func(o fs.DirEntry) error {
|
||||
return list.Add(o)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if iErr != nil {
|
||||
return nil, iErr
|
||||
}
|
||||
return entries, nil
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
@@ -656,7 +690,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
opts.Parameters.Set("fileid", fileIDtoNumber(srcObj.id))
|
||||
opts.Parameters.Set("toname", f.opt.Enc.FromStandardName(leaf))
|
||||
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
|
||||
opts.Parameters.Set("mtime", fmt.Sprintf("%d", srcObj.modTime.Unix()))
|
||||
opts.Parameters.Set("mtime", fmt.Sprintf("%d", uint64(srcObj.modTime.Unix())))
|
||||
var resp *http.Response
|
||||
var result api.ItemResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -1137,7 +1171,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
opts.Parameters.Set("filename", leaf)
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(directoryID))
|
||||
opts.Parameters.Set("nopartial", "1")
|
||||
opts.Parameters.Set("mtime", fmt.Sprintf("%d", modTime.Unix()))
|
||||
opts.Parameters.Set("mtime", fmt.Sprintf("%d", uint64(modTime.Unix())))
|
||||
|
||||
// Special treatment for a 0 length upload. This doesn't work
|
||||
// with PUT even with Content-Length set (by setting
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"hash"
|
||||
"io"
|
||||
"sort"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
|
||||
168
backend/s3/s3.go
168
backend/s3/s3.go
@@ -19,7 +19,7 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
@@ -58,7 +58,7 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, RackCorp, SeaweedFS, and Tencent COS",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Lyve Cloud, Minio, RackCorp, SeaweedFS, and Tencent COS",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
@@ -84,6 +84,9 @@ func init() {
|
||||
}, {
|
||||
Value: "IBMCOS",
|
||||
Help: "IBM COS S3",
|
||||
}, {
|
||||
Value: "LyveCloud",
|
||||
Help: "Seagate Lyve Cloud",
|
||||
}, {
|
||||
Value: "Minio",
|
||||
Help: "Minio Object Storage",
|
||||
@@ -102,6 +105,9 @@ func init() {
|
||||
}, {
|
||||
Value: "StackPath",
|
||||
Help: "StackPath Object Storage",
|
||||
}, {
|
||||
Value: "Storj",
|
||||
Help: "Storj (S3 Compatible Gateway)",
|
||||
}, {
|
||||
Value: "TencentCOS",
|
||||
Help: "Tencent Cloud Object Storage (COS)",
|
||||
@@ -288,7 +294,7 @@ func init() {
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba,RackCorp,Scaleway,TencentCOS",
|
||||
Provider: "!AWS,Alibaba,RackCorp,Scaleway,Storj,TencentCOS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
||||
@@ -597,6 +603,20 @@ func init() {
|
||||
Value: "s3.eu-central-1.stackpathstorage.com",
|
||||
Help: "EU Endpoint",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint of the Shared Gateway.",
|
||||
Provider: "Storj",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "gateway.eu1.storjshare.io",
|
||||
Help: "EU1 Shared Gateway",
|
||||
}, {
|
||||
Value: "gateway.us1.storjshare.io",
|
||||
Help: "US1 Shared Gateway",
|
||||
}, {
|
||||
Value: "gateway.ap1.storjshare.io",
|
||||
Help: "Asia-Pacific Shared Gateway",
|
||||
}},
|
||||
}, {
|
||||
// cos endpoints: https://intl.cloud.tencent.com/document/product/436/6224
|
||||
Name: "endpoint",
|
||||
@@ -726,7 +746,7 @@ func init() {
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,IBMCOS,TencentCOS,Alibaba,Scaleway,StackPath,RackCorp",
|
||||
Provider: "!AWS,IBMCOS,TencentCOS,Alibaba,Scaleway,StackPath,Storj,RackCorp",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -747,6 +767,18 @@ func init() {
|
||||
Value: "localhost:8333",
|
||||
Help: "SeaweedFS S3 localhost",
|
||||
Provider: "SeaweedFS",
|
||||
}, {
|
||||
Value: "s3.us-east-1.lyvecloud.seagate.com",
|
||||
Help: "Seagate Lyve Cloud US East 1 (Virginia)",
|
||||
Provider: "LyveCloud",
|
||||
}, {
|
||||
Value: "s3.us-west-1.lyvecloud.seagate.com",
|
||||
Help: "Seagate Lyve Cloud US West 1 (California)",
|
||||
Provider: "LyveCloud",
|
||||
}, {
|
||||
Value: "s3.ap-southeast-1.lyvecloud.seagate.com",
|
||||
Help: "Seagate Lyve Cloud AP Southeast 1 (Singapore)",
|
||||
Provider: "LyveCloud",
|
||||
}, {
|
||||
Value: "s3.wasabisys.com",
|
||||
Help: "Wasabi US East endpoint",
|
||||
@@ -761,7 +793,11 @@ func init() {
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.ap-northeast-1.wasabisys.com",
|
||||
Help: "Wasabi AP Northeast endpoint",
|
||||
Help: "Wasabi AP Northeast 1 (Tokyo) endpoint",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.ap-northeast-2.wasabisys.com",
|
||||
Help: "Wasabi AP Northeast 2 (Osaka) endpoint",
|
||||
Provider: "Wasabi",
|
||||
}},
|
||||
}, {
|
||||
@@ -1010,7 +1046,7 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,IBMCOS,Alibaba,RackCorp,Scaleway,StackPath,TencentCOS",
|
||||
Provider: "!AWS,IBMCOS,Alibaba,RackCorp,Scaleway,StackPath,Storj,TencentCOS",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -1021,6 +1057,7 @@ For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview
|
||||
|
||||
Note that this ACL is applied when server-side copying objects as S3
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.`,
|
||||
Provider: "!Storj",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "default",
|
||||
Help: "Owner gets Full_CONTROL.\nNo one else has access rights (default).",
|
||||
@@ -1180,6 +1217,9 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
}, {
|
||||
Value: "INTELLIGENT_TIERING",
|
||||
Help: "Intelligent-Tiering storage class",
|
||||
}, {
|
||||
Value: "GLACIER_IR",
|
||||
Help: "Glacier Instant Retrieval storage class",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://www.alibabacloud.com/help/doc-detail/64919.htm
|
||||
@@ -1523,6 +1563,14 @@ See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rcl
|
||||
This is usually set to a CloudFront CDN URL as AWS S3 offers
|
||||
cheaper egress for data downloaded through the CloudFront network.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_multipart_etag",
|
||||
Help: `Whether to use ETag in multipart uploads for verification
|
||||
|
||||
This should be true, false or left unset to use the default for the provider.
|
||||
`,
|
||||
Default: fs.Tristate{},
|
||||
Advanced: true,
|
||||
},
|
||||
}})
|
||||
}
|
||||
@@ -1587,6 +1635,7 @@ type Options struct {
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
DisableHTTP2 bool `config:"disable_http2"`
|
||||
DownloadURL string `config:"download_url"`
|
||||
UseMultipartEtag fs.Tristate `config:"use_multipart_etag"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
@@ -1890,12 +1939,13 @@ func setQuirks(opt *Options) {
|
||||
listObjectsV2 = true
|
||||
virtualHostStyle = true
|
||||
urlEncodeListings = true
|
||||
useMultipartEtag = true
|
||||
)
|
||||
switch opt.Provider {
|
||||
case "AWS":
|
||||
// No quirks
|
||||
case "Alibaba":
|
||||
// No quirks
|
||||
useMultipartEtag = false // Alibaba seems to calculate multipart Etags differently from AWS
|
||||
case "Ceph":
|
||||
listObjectsV2 = false
|
||||
virtualHostStyle = false
|
||||
@@ -1908,13 +1958,18 @@ func setQuirks(opt *Options) {
|
||||
listObjectsV2 = false // untested
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
useMultipartEtag = false // untested
|
||||
case "LyveCloud":
|
||||
useMultipartEtag = false // LyveCloud seems to calculate multipart Etags differently from AWS
|
||||
case "Minio":
|
||||
virtualHostStyle = false
|
||||
case "Netease":
|
||||
listObjectsV2 = false // untested
|
||||
urlEncodeListings = false
|
||||
useMultipartEtag = false // untested
|
||||
case "RackCorp":
|
||||
// No quirks
|
||||
useMultipartEtag = false // untested
|
||||
case "Scaleway":
|
||||
// Scaleway can only have 1000 parts in an upload
|
||||
if opt.MaxUploadParts > 1000 {
|
||||
@@ -1925,23 +1980,32 @@ func setQuirks(opt *Options) {
|
||||
listObjectsV2 = false // untested
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
useMultipartEtag = false // untested
|
||||
case "StackPath":
|
||||
listObjectsV2 = false // untested
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
case "Storj":
|
||||
// Force chunk size to >= 64 MiB
|
||||
if opt.ChunkSize < 64*fs.Mebi {
|
||||
opt.ChunkSize = 64 * fs.Mebi
|
||||
}
|
||||
case "TencentCOS":
|
||||
listObjectsV2 = false // untested
|
||||
listObjectsV2 = false // untested
|
||||
useMultipartEtag = false // untested
|
||||
case "Wasabi":
|
||||
// No quirks
|
||||
case "Other":
|
||||
listObjectsV2 = false
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
useMultipartEtag = false
|
||||
default:
|
||||
fs.Logf("s3", "s3 provider %q not known - please set correctly", opt.Provider)
|
||||
listObjectsV2 = false
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
useMultipartEtag = false
|
||||
}
|
||||
|
||||
// Path Style vs Virtual Host style
|
||||
@@ -1963,6 +2027,12 @@ func setQuirks(opt *Options) {
|
||||
opt.ListVersion = 1
|
||||
}
|
||||
}
|
||||
|
||||
// Set the correct use multipart Etag for error checking if not manually set
|
||||
if !opt.UseMultipartEtag.Valid {
|
||||
opt.UseMultipartEtag.Valid = true
|
||||
opt.UseMultipartEtag.Value = useMultipartEtag
|
||||
}
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
@@ -2063,6 +2133,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
if opt.Provider == "Storj" {
|
||||
f.features.Copy = nil
|
||||
f.features.SetTier = false
|
||||
f.features.GetTier = false
|
||||
}
|
||||
// f.listMultipartUploads()
|
||||
return f, nil
|
||||
}
|
||||
@@ -3209,9 +3284,6 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.LastModified == nil {
|
||||
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
|
||||
}
|
||||
o.setMetaData(resp.ETag, resp.ContentLength, resp.LastModified, resp.Metadata, resp.ContentType, resp.StorageClass)
|
||||
return nil
|
||||
}
|
||||
@@ -3241,6 +3313,7 @@ func (o *Object) setMetaData(etag *string, contentLength *int64, lastModified *t
|
||||
o.storageClass = aws.StringValue(storageClass)
|
||||
if lastModified == nil {
|
||||
o.lastModified = time.Now()
|
||||
fs.Logf(o, "Failed to read last modified")
|
||||
} else {
|
||||
o.lastModified = *lastModified
|
||||
}
|
||||
@@ -3321,11 +3394,7 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options
|
||||
return nil, err
|
||||
}
|
||||
|
||||
size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to parse content length from string %s, %v", resp.Header.Get("Content-Length"), err)
|
||||
}
|
||||
contentLength := &size
|
||||
contentLength := &resp.ContentLength
|
||||
if resp.Header.Get("Content-Range") != "" {
|
||||
var contentRange = resp.Header.Get("Content-Range")
|
||||
slash := strings.IndexRune(contentRange, '/')
|
||||
@@ -3416,9 +3485,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.LastModified == nil {
|
||||
fs.Logf(o, "Failed to read last modified: %v", err)
|
||||
}
|
||||
|
||||
// read size from ContentLength or ContentRange
|
||||
size := resp.ContentLength
|
||||
if resp.ContentRange != nil {
|
||||
@@ -3441,7 +3508,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
var warnStreamUpload sync.Once
|
||||
|
||||
func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, size int64, in io.Reader) (err error) {
|
||||
func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, size int64, in io.Reader) (etag string, err error) {
|
||||
f := o.fs
|
||||
|
||||
// make concurrency machinery
|
||||
@@ -3488,7 +3555,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("multipart upload failed to initialise: %w", err)
|
||||
return etag, fmt.Errorf("multipart upload failed to initialise: %w", err)
|
||||
}
|
||||
uid := cout.UploadId
|
||||
|
||||
@@ -3517,8 +3584,21 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
partsMu sync.Mutex // to protect parts
|
||||
parts []*s3.CompletedPart
|
||||
off int64
|
||||
md5sMu sync.Mutex
|
||||
md5s []byte
|
||||
)
|
||||
|
||||
addMd5 := func(md5binary *[md5.Size]byte, partNum int64) {
|
||||
md5sMu.Lock()
|
||||
defer md5sMu.Unlock()
|
||||
start := partNum * md5.Size
|
||||
end := start + md5.Size
|
||||
if extend := end - int64(len(md5s)); extend > 0 {
|
||||
md5s = append(md5s, make([]byte, extend)...)
|
||||
}
|
||||
copy(md5s[start:end], (*md5binary)[:])
|
||||
}
|
||||
|
||||
for partNum := int64(1); !finished; partNum++ {
|
||||
// Get a block of memory from the pool and token which limits concurrency.
|
||||
tokens.Get()
|
||||
@@ -3548,7 +3628,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
finished = true
|
||||
} else if err != nil {
|
||||
free()
|
||||
return fmt.Errorf("multipart upload failed to read source: %w", err)
|
||||
return etag, fmt.Errorf("multipart upload failed to read source: %w", err)
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
@@ -3561,6 +3641,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
|
||||
// create checksum of buffer for integrity checking
|
||||
md5sumBinary := md5.Sum(buf)
|
||||
addMd5(&md5sumBinary, partNum-1)
|
||||
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:])
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -3602,7 +3683,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
}
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
return etag, err
|
||||
}
|
||||
|
||||
// sort the completed parts by part number
|
||||
@@ -3623,9 +3704,11 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("multipart upload failed to finalise: %w", err)
|
||||
return etag, fmt.Errorf("multipart upload failed to finalise: %w", err)
|
||||
}
|
||||
return nil
|
||||
hashOfHashes := md5.Sum(md5s)
|
||||
etag = fmt.Sprintf("%s-%d", hex.EncodeToString(hashOfHashes[:]), len(parts))
|
||||
return etag, nil
|
||||
}
|
||||
|
||||
// Update the Object from in with modTime and size
|
||||
@@ -3651,19 +3734,20 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
|
||||
// - for multipart provided checksums aren't disabled
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash
|
||||
var md5sum string
|
||||
var md5sumBase64 string
|
||||
var md5sumHex string
|
||||
if !multipart || !o.fs.opt.DisableChecksum {
|
||||
hash, err := src.Hash(ctx, hash.MD5)
|
||||
if err == nil && matchMd5.MatchString(hash) {
|
||||
hashBytes, err := hex.DecodeString(hash)
|
||||
md5sumHex, err = src.Hash(ctx, hash.MD5)
|
||||
if err == nil && matchMd5.MatchString(md5sumHex) {
|
||||
hashBytes, err := hex.DecodeString(md5sumHex)
|
||||
if err == nil {
|
||||
md5sum = base64.StdEncoding.EncodeToString(hashBytes)
|
||||
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
|
||||
if (multipart || o.fs.etagIsNotMD5) && !o.fs.opt.DisableChecksum {
|
||||
// Set the md5sum as metadata on the object if
|
||||
// - a multipart upload
|
||||
// - the Etag is not an MD5, eg when using SSE/SSE-C
|
||||
// provided checksums aren't disabled
|
||||
metadata[metaMD5Hash] = &md5sum
|
||||
metadata[metaMD5Hash] = &md5sumBase64
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3678,8 +3762,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
}
|
||||
if md5sum != "" {
|
||||
req.ContentMD5 = &md5sum
|
||||
if md5sumBase64 != "" {
|
||||
req.ContentMD5 = &md5sumBase64
|
||||
}
|
||||
if o.fs.opt.RequesterPays {
|
||||
req.RequestPayer = aws.String(s3.RequestPayerRequester)
|
||||
@@ -3733,8 +3817,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
var resp *http.Response // response from PUT
|
||||
var wantETag string // Multipart upload Etag to check
|
||||
if multipart {
|
||||
err = o.uploadMultipart(ctx, &req, size, in)
|
||||
wantETag, err = o.uploadMultipart(ctx, &req, size, in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -3796,7 +3881,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// so make up the object as best we can assuming it got
|
||||
// uploaded properly. If size < 0 then we need to do the HEAD.
|
||||
if o.fs.opt.NoHead && size >= 0 {
|
||||
o.md5 = md5sum
|
||||
o.md5 = md5sumHex
|
||||
o.bytes = size
|
||||
o.lastModified = time.Now()
|
||||
o.meta = req.Metadata
|
||||
@@ -3814,7 +3899,18 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Read the metadata from the newly created object
|
||||
o.meta = nil // wipe old metadata
|
||||
err = o.readMetaData(ctx)
|
||||
head, err := o.headObject(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.setMetaData(head.ETag, head.ContentLength, head.LastModified, head.Metadata, head.ContentType, head.StorageClass)
|
||||
if o.fs.opt.UseMultipartEtag.Value && !o.fs.etagIsNotMD5 && wantETag != "" && head.ETag != nil && *head.ETag != "" {
|
||||
gotETag := strings.Trim(strings.ToLower(*head.ETag), `"`)
|
||||
if wantETag != gotETag {
|
||||
return fmt.Errorf("multipart upload corrupted: Etag differ: expecting %s but got %s", wantETag, gotETag)
|
||||
}
|
||||
fs.Debugf(o, "Multipart upload Etag: %s OK", wantETag)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-semver/semver"
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
@@ -60,11 +60,13 @@ func init() {
|
||||
Help: "SSH host to connect to.\n\nE.g. \"example.com\".",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "SSH username, leave blank for current username, " + currentUser + ".",
|
||||
Name: "user",
|
||||
Help: "SSH username.",
|
||||
Default: currentUser,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "SSH port, leave blank to use default (22).",
|
||||
Name: "port",
|
||||
Help: "SSH port number.",
|
||||
Default: 22,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "SSH password, leave blank to use ssh-agent.",
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
package sftp
|
||||
|
||||
import "github.com/rclone/rclone/lib/sync"
|
||||
import "sync"
|
||||
|
||||
// stringLock locks for string IDs passed in
|
||||
type stringLock struct {
|
||||
|
||||
@@ -5,7 +5,7 @@ package sftp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/backend/sharefile/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
// Package tardigrade provides an interface to Tardigrade decentralized object storage.
|
||||
package tardigrade
|
||||
// Package storj provides an interface to Storj decentralized object storage.
|
||||
package storj
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -31,16 +31,17 @@ const (
|
||||
)
|
||||
|
||||
var satMap = map[string]string{
|
||||
"us-central-1.tardigrade.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777",
|
||||
"europe-west-1.tardigrade.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777",
|
||||
"asia-east-1.tardigrade.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777",
|
||||
"us-central-1.storj.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777",
|
||||
"europe-west-1.storj.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777",
|
||||
"asia-east-1.storj.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777",
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "tardigrade",
|
||||
Description: "Tardigrade Decentralized Cloud Storage",
|
||||
Name: "storj",
|
||||
Description: "Storj Decentralized Cloud Storage",
|
||||
Aliases: []string{"tardigrade"},
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, configIn fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
provider, _ := m.Get(fs.ConfigProvider)
|
||||
@@ -84,10 +85,9 @@ func init() {
|
||||
},
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: fs.ConfigProvider,
|
||||
Help: "Choose an authentication method.",
|
||||
Required: true,
|
||||
Default: existingProvider,
|
||||
Name: fs.ConfigProvider,
|
||||
Help: "Choose an authentication method.",
|
||||
Default: existingProvider,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "existing",
|
||||
Help: "Use an existing access grant.",
|
||||
@@ -99,23 +99,21 @@ func init() {
|
||||
{
|
||||
Name: "access_grant",
|
||||
Help: "Access grant.",
|
||||
Required: false,
|
||||
Provider: "existing",
|
||||
},
|
||||
{
|
||||
Name: "satellite_address",
|
||||
Help: "Satellite address.\n\nCustom satellite address should match the format: `<nodeid>@<address>:<port>`.",
|
||||
Required: false,
|
||||
Provider: newProvider,
|
||||
Default: "us-central-1.tardigrade.io",
|
||||
Default: "us-central-1.storj.io",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "us-central-1.tardigrade.io",
|
||||
Value: "us-central-1.storj.io",
|
||||
Help: "US Central 1",
|
||||
}, {
|
||||
Value: "europe-west-1.tardigrade.io",
|
||||
Value: "europe-west-1.storj.io",
|
||||
Help: "Europe West 1",
|
||||
}, {
|
||||
Value: "asia-east-1.tardigrade.io",
|
||||
Value: "asia-east-1.storj.io",
|
||||
Help: "Asia East 1",
|
||||
},
|
||||
},
|
||||
@@ -123,13 +121,11 @@ func init() {
|
||||
{
|
||||
Name: "api_key",
|
||||
Help: "API key.",
|
||||
Required: false,
|
||||
Provider: newProvider,
|
||||
},
|
||||
{
|
||||
Name: "passphrase",
|
||||
Help: "Encryption passphrase.\n\nTo access existing objects enter passphrase used for uploading.",
|
||||
Required: false,
|
||||
Provider: newProvider,
|
||||
},
|
||||
},
|
||||
@@ -145,7 +141,7 @@ type Options struct {
|
||||
Passphrase string `config:"passphrase"`
|
||||
}
|
||||
|
||||
// Fs represents a remote to Tardigrade
|
||||
// Fs represents a remote to Storj
|
||||
type Fs struct {
|
||||
name string // the name of the remote
|
||||
root string // root of the filesystem
|
||||
@@ -163,11 +159,12 @@ var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
)
|
||||
|
||||
// NewFs creates a filesystem backed by Tardigrade.
|
||||
// NewFs creates a filesystem backed by Storj.
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs, err error) {
|
||||
// Setup filesystem and connection to Tardigrade
|
||||
// Setup filesystem and connection to Storj
|
||||
root = norm.NFC.String(root)
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
@@ -188,24 +185,24 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs,
|
||||
if f.opts.Access != "" {
|
||||
access, err = uplink.ParseAccess(f.opts.Access)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tardigrade: access: %w", err)
|
||||
return nil, fmt.Errorf("storj: access: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if access == nil && f.opts.SatelliteAddress != "" && f.opts.APIKey != "" && f.opts.Passphrase != "" {
|
||||
access, err = uplink.RequestAccessWithPassphrase(ctx, f.opts.SatelliteAddress, f.opts.APIKey, f.opts.Passphrase)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tardigrade: access: %w", err)
|
||||
return nil, fmt.Errorf("storj: access: %w", err)
|
||||
}
|
||||
|
||||
serializedAccess, err := access.Serialize()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tardigrade: access: %w", err)
|
||||
return nil, fmt.Errorf("storj: access: %w", err)
|
||||
}
|
||||
|
||||
err = config.SetValueAndSave(f.name, "access_grant", serializedAccess)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tardigrade: access: %w", err)
|
||||
return nil, fmt.Errorf("storj: access: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -237,7 +234,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs,
|
||||
if bucketName != "" && bucketPath != "" {
|
||||
_, err = project.StatBucket(ctx, bucketName)
|
||||
if err != nil {
|
||||
return f, fmt.Errorf("tardigrade: bucket: %w", err)
|
||||
return f, fmt.Errorf("storj: bucket: %w", err)
|
||||
}
|
||||
|
||||
object, err := project.StatObject(ctx, bucketName, bucketPath)
|
||||
@@ -263,7 +260,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs,
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// connect opens a connection to Tardigrade.
|
||||
// connect opens a connection to Storj.
|
||||
func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
|
||||
fs.Debugf(f, "connecting...")
|
||||
defer fs.Debugf(f, "connected: %+v", err)
|
||||
@@ -274,7 +271,7 @@ func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
|
||||
|
||||
project, err = cfg.OpenProject(ctx, f.access)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tardigrade: project: %w", err)
|
||||
return nil, fmt.Errorf("storj: project: %w", err)
|
||||
}
|
||||
|
||||
return
|
||||
@@ -683,3 +680,34 @@ func newPrefix(prefix string) string {
|
||||
|
||||
return prefix + "/"
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Move parameters
|
||||
srcBucket, srcKey := bucket.Split(srcObj.absolute)
|
||||
dstBucket, dstKey := f.absolute(remote)
|
||||
options := uplink.MoveObjectOptions{}
|
||||
|
||||
// Do the move
|
||||
err := f.project.MoveObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rename object failed: %w", err)
|
||||
}
|
||||
|
||||
// Read the new object
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package tardigrade
|
||||
package storj
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
"storj.io/uplink"
|
||||
)
|
||||
|
||||
// Object describes a Tardigrade object
|
||||
// Object describes a Storj object
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
|
||||
@@ -32,7 +32,7 @@ type Object struct {
|
||||
// Check the interfaces are satisfied.
|
||||
var _ fs.Object = &Object{}
|
||||
|
||||
// newObjectFromUplink creates a new object from a Tardigrade uplink object.
|
||||
// newObjectFromUplink creates a new object from a Storj uplink object.
|
||||
func newObjectFromUplink(f *Fs, relative string, object *uplink.Object) *Object {
|
||||
// Attempt to use the modified time from the metadata. Otherwise
|
||||
// fallback to the server time.
|
||||
@@ -1,20 +1,20 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
// Test Tardigrade filesystem interface
|
||||
package tardigrade_test
|
||||
// Test Storj filesystem interface
|
||||
package storj_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/tardigrade"
|
||||
"github.com/rclone/rclone/backend/storj"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestTardigrade:",
|
||||
NilObject: (*tardigrade.Object)(nil),
|
||||
RemoteName: "TestStorj:",
|
||||
NilObject: (*storj.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build plan9
|
||||
// +build plan9
|
||||
|
||||
package tardigrade
|
||||
package storj
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/sugarsync/api"
|
||||
|
||||
@@ -754,22 +754,34 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var containers []swift.Container
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
containers, err = f.c.ContainersAll(ctx, nil)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("container listing failed: %w", err)
|
||||
}
|
||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
var total, objects int64
|
||||
for _, c := range containers {
|
||||
total += c.Bytes
|
||||
objects += c.Count
|
||||
if f.rootContainer != "" {
|
||||
var container swift.Container
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
container, _, err = f.c.Container(ctx, f.rootContainer)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("container info failed: %w", err)
|
||||
}
|
||||
total = container.Bytes
|
||||
objects = container.Count
|
||||
} else {
|
||||
var containers []swift.Container
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
containers, err = f.c.ContainersAll(ctx, nil)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("container listing failed: %w", err)
|
||||
}
|
||||
for _, c := range containers {
|
||||
total += c.Bytes
|
||||
objects += c.Count
|
||||
}
|
||||
}
|
||||
usage := &fs.Usage{
|
||||
usage = &fs.Usage{
|
||||
Used: fs.NewUsageValue(total), // bytes in use
|
||||
Objects: fs.NewUsageValue(objects), // objects in use
|
||||
}
|
||||
|
||||
@@ -4,7 +4,8 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
@@ -84,6 +85,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
err := o.Update(ctx, readers[i], src, options...)
|
||||
if err != nil {
|
||||
errs[i] = fmt.Errorf("%s: %w", o.UpstreamFs().Name(), err)
|
||||
if len(entries) > 1 {
|
||||
// Drain the input buffer to allow other uploads to continue
|
||||
_, _ = io.Copy(ioutil.Discard, readers[i])
|
||||
}
|
||||
}
|
||||
} else {
|
||||
errs[i] = fs.ErrorNotAFile
|
||||
|
||||
@@ -3,7 +3,7 @@ package policy
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
|
||||
@@ -3,7 +3,7 @@ package policy
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
|
||||
@@ -6,10 +6,11 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/policy"
|
||||
@@ -33,25 +34,21 @@ func init() {
|
||||
Help: "List of space separated upstreams.\n\nCan be 'upstreama:test/dir upstreamb:', '\"upstreama:test/space:ro dir\" upstreamb:', etc.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "action_policy",
|
||||
Help: "Policy to choose upstream on ACTION category.",
|
||||
Required: true,
|
||||
Default: "epall",
|
||||
Name: "action_policy",
|
||||
Help: "Policy to choose upstream on ACTION category.",
|
||||
Default: "epall",
|
||||
}, {
|
||||
Name: "create_policy",
|
||||
Help: "Policy to choose upstream on CREATE category.",
|
||||
Required: true,
|
||||
Default: "epmfs",
|
||||
Name: "create_policy",
|
||||
Help: "Policy to choose upstream on CREATE category.",
|
||||
Default: "epmfs",
|
||||
}, {
|
||||
Name: "search_policy",
|
||||
Help: "Policy to choose upstream on SEARCH category.",
|
||||
Required: true,
|
||||
Default: "ff",
|
||||
Name: "search_policy",
|
||||
Help: "Policy to choose upstream on SEARCH category.",
|
||||
Default: "ff",
|
||||
}, {
|
||||
Name: "cache_time",
|
||||
Help: "Cache time of usage and free space (in seconds).\n\nThis option is only useful when a path preserving policy is used.",
|
||||
Required: true,
|
||||
Default: 120,
|
||||
Name: "cache_time",
|
||||
Help: "Cache time of usage and free space (in seconds).\n\nThis option is only useful when a path preserving policy is used.",
|
||||
Default: 120,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -490,6 +487,10 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
|
||||
}
|
||||
if err != nil {
|
||||
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
|
||||
if len(upstreams) > 1 {
|
||||
// Drain the input buffer to allow other uploads to continue
|
||||
_, _ = io.Copy(ioutil.Discard, readers[i])
|
||||
}
|
||||
return
|
||||
}
|
||||
objs[i] = u.WrapObject(o)
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -20,19 +18,12 @@ import (
|
||||
)
|
||||
|
||||
// MakeTestDirs makes directories in /tmp for testing
|
||||
func MakeTestDirs(t *testing.T, n int) (dirs []string, clean func()) {
|
||||
func MakeTestDirs(t *testing.T, n int) (dirs []string) {
|
||||
for i := 1; i <= n; i++ {
|
||||
dir, err := ioutil.TempDir("", fmt.Sprintf("rclone-union-test-%d", n))
|
||||
require.NoError(t, err)
|
||||
dir := t.TempDir()
|
||||
dirs = append(dirs, dir)
|
||||
}
|
||||
clean = func() {
|
||||
for _, dir := range dirs {
|
||||
err := os.RemoveAll(dir)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
return dirs, clean
|
||||
return dirs
|
||||
}
|
||||
|
||||
func (f *Fs) TestInternalReadOnly(t *testing.T) {
|
||||
@@ -95,8 +86,7 @@ func TestMoveCopy(t *testing.T) {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
ctx := context.Background()
|
||||
dirs, clean := MakeTestDirs(t, 1)
|
||||
defer clean()
|
||||
dirs := MakeTestDirs(t, 1)
|
||||
fsString := fmt.Sprintf(":union,upstreams='%s :memory:bucket':", dirs[0])
|
||||
f, err := fs.NewFs(ctx, fsString)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -27,8 +27,7 @@ func TestStandard(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs, clean := union.MakeTestDirs(t, 3)
|
||||
defer clean()
|
||||
dirs := union.MakeTestDirs(t, 3)
|
||||
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
|
||||
name := "TestUnion"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -49,8 +48,7 @@ func TestRO(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs, clean := union.MakeTestDirs(t, 3)
|
||||
defer clean()
|
||||
dirs := union.MakeTestDirs(t, 3)
|
||||
upstreams := dirs[0] + " " + dirs[1] + ":ro " + dirs[2] + ":ro"
|
||||
name := "TestUnionRO"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -71,8 +69,7 @@ func TestNC(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs, clean := union.MakeTestDirs(t, 3)
|
||||
defer clean()
|
||||
dirs := union.MakeTestDirs(t, 3)
|
||||
upstreams := dirs[0] + " " + dirs[1] + ":nc " + dirs[2] + ":nc"
|
||||
name := "TestUnionNC"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -93,8 +90,7 @@ func TestPolicy1(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs, clean := union.MakeTestDirs(t, 3)
|
||||
defer clean()
|
||||
dirs := union.MakeTestDirs(t, 3)
|
||||
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
|
||||
name := "TestUnionPolicy1"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -115,8 +111,7 @@ func TestPolicy2(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs, clean := union.MakeTestDirs(t, 3)
|
||||
defer clean()
|
||||
dirs := union.MakeTestDirs(t, 3)
|
||||
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
|
||||
name := "TestUnionPolicy2"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -137,8 +132,7 @@ func TestPolicy3(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
dirs, clean := union.MakeTestDirs(t, 3)
|
||||
defer clean()
|
||||
dirs := union.MakeTestDirs(t, 3)
|
||||
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
|
||||
name := "TestUnionPolicy3"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
|
||||
@@ -6,10 +6,8 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
@@ -91,7 +89,7 @@ func New(ctx context.Context, remote, root string, cacheTime time.Duration) (*Fs
|
||||
return nil, err
|
||||
}
|
||||
f.RootFs = rFs
|
||||
rootString := path.Join(remote, filepath.ToSlash(root))
|
||||
rootString := fspath.JoinRootPath(remote, root)
|
||||
myFs, err := cache.Get(ctx, rootString)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, err
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/webdav/api"
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
@@ -52,6 +52,7 @@ var (
|
||||
var osarches = []string{
|
||||
"windows/386",
|
||||
"windows/amd64",
|
||||
"windows/arm64",
|
||||
"darwin/amd64",
|
||||
"darwin/arm64",
|
||||
"linux/386",
|
||||
@@ -85,6 +86,13 @@ var archFlags = map[string][]string{
|
||||
"arm-v7": {"GOARM=7"},
|
||||
}
|
||||
|
||||
// Map Go architectures to NFPM architectures
|
||||
// Any missing are passed straight through
|
||||
var goarchToNfpm = map[string]string{
|
||||
"arm": "arm6",
|
||||
"arm-v7": "arm7",
|
||||
}
|
||||
|
||||
// runEnv - run a shell command with env
|
||||
func runEnv(args, env []string) error {
|
||||
if *debug {
|
||||
@@ -167,11 +175,15 @@ func buildDebAndRpm(dir, version, goarch string) []string {
|
||||
pkgVersion := version[1:]
|
||||
pkgVersion = strings.Replace(pkgVersion, "β", "-beta", -1)
|
||||
pkgVersion = strings.Replace(pkgVersion, "-", ".", -1)
|
||||
nfpmArch, ok := goarchToNfpm[goarch]
|
||||
if !ok {
|
||||
nfpmArch = goarch
|
||||
}
|
||||
|
||||
// Make nfpm.yaml from the template
|
||||
substitute("../bin/nfpm.yaml", path.Join(dir, "nfpm.yaml"), map[string]string{
|
||||
"Version": pkgVersion,
|
||||
"Arch": goarch,
|
||||
"Arch": nfpmArch,
|
||||
})
|
||||
|
||||
// build them
|
||||
@@ -377,7 +389,7 @@ func compileArch(version, goos, goarch, dir string) bool {
|
||||
artifacts := []string{buildZip(dir)}
|
||||
// build a .deb and .rpm if appropriate
|
||||
if goos == "linux" {
|
||||
artifacts = append(artifacts, buildDebAndRpm(dir, version, stripVersion(goarch))...)
|
||||
artifacts = append(artifacts, buildDebAndRpm(dir, version, goarch)...)
|
||||
}
|
||||
if *copyAs != "" {
|
||||
for _, artifact := range artifacts {
|
||||
|
||||
@@ -24,6 +24,7 @@ docs = [
|
||||
"overview.md",
|
||||
"flags.md",
|
||||
"docker.md",
|
||||
"bisync.md",
|
||||
|
||||
# Keep these alphabetical by full name
|
||||
"fichier.md",
|
||||
@@ -52,6 +53,7 @@ docs = [
|
||||
"mailru.md",
|
||||
"mega.md",
|
||||
"memory.md",
|
||||
"netstorage.md",
|
||||
"azureblob.md",
|
||||
"onedrive.md",
|
||||
"opendrive.md",
|
||||
@@ -63,8 +65,9 @@ docs = [
|
||||
"putio.md",
|
||||
"seafile.md",
|
||||
"sftp.md",
|
||||
"storj.md",
|
||||
"sugarsync.md",
|
||||
"tardigrade.md",
|
||||
"tardigrade.md", # stub only to redirect to storj.md
|
||||
"uptobox.md",
|
||||
"union.md",
|
||||
"webdav.md",
|
||||
|
||||
@@ -41,7 +41,7 @@ You can discover what commands a backend implements by using
|
||||
rclone backend help <backendname>
|
||||
|
||||
You can also discover information about the backend using (see
|
||||
[operations/fsinfo](/rc/#operations/fsinfo) in the remote control docs
|
||||
[operations/fsinfo](/rc/#operations-fsinfo) in the remote control docs
|
||||
for more info).
|
||||
|
||||
rclone backend features remote:
|
||||
@@ -55,7 +55,7 @@ Pass arguments to the backend by placing them on the end of the line
|
||||
rclone backend cleanup remote:path file1 file2 file3
|
||||
|
||||
Note to run these commands on a running backend then see
|
||||
[backend/command](/rc/#backend/command) in the rc docs.
|
||||
[backend/command](/rc/#backend-command) in the rc docs.
|
||||
`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 1e6, command, args)
|
||||
@@ -149,7 +149,7 @@ See [the "rclone backend" command](/commands/rclone_backend/) for more
|
||||
info on how to pass options and arguments.
|
||||
|
||||
These can be run on a running backend using the rc command
|
||||
[backend/command](/rc/#backend/command).
|
||||
[backend/command](/rc/#backend-command).
|
||||
|
||||
`, name)
|
||||
for _, cmd := range cmds {
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
"runtime/pprof"
|
||||
"strconv"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
|
||||
@@ -168,7 +168,7 @@ func mount(VFS *vfs.VFS, mountPath string, opt *mountlib.Options) (<-chan error,
|
||||
host.SetCapCaseInsensitive(f.Features().CaseInsensitive)
|
||||
|
||||
// Create options
|
||||
options := mountOptions(VFS, f.Name()+":"+f.Root(), mountpoint, opt)
|
||||
options := mountOptions(VFS, opt.DeviceName, mountpoint, opt)
|
||||
fs.Debugf(f, "Mounting with options: %q", options)
|
||||
|
||||
// Serve the mount point in the background returning error to errChan
|
||||
|
||||
@@ -10,11 +10,17 @@
|
||||
package cmount
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest/testy"
|
||||
"github.com/rclone/rclone/vfs/vfstest"
|
||||
)
|
||||
|
||||
func TestMount(t *testing.T) {
|
||||
// Disable tests under macOS and the CI since they are locking up
|
||||
if runtime.GOOS == "darwin" {
|
||||
testy.SkipUnreliable(t)
|
||||
}
|
||||
vfstest.RunTests(t, false, mount)
|
||||
}
|
||||
|
||||
23
cmd/help.go
23
cmd/help.go
@@ -165,7 +165,7 @@ func runRoot(cmd *cobra.Command, args []string) {
|
||||
// setupRootCommand sets default usage, help, and error handling for
|
||||
// the root command.
|
||||
//
|
||||
// Helpful example: http://rtfcode.com/xref/moby-17.03.2-ce/cli/cobra.go
|
||||
// Helpful example: https://github.com/moby/moby/blob/master/cli/cobra.go
|
||||
func setupRootCommand(rootCmd *cobra.Command) {
|
||||
ci := fs.GetConfig(context.Background())
|
||||
// Add global flags
|
||||
@@ -329,12 +329,29 @@ func showBackend(name string) {
|
||||
if opt.IsPassword {
|
||||
fmt.Printf("**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).\n\n")
|
||||
}
|
||||
fmt.Printf("Properties:\n\n")
|
||||
fmt.Printf("- Config: %s\n", opt.Name)
|
||||
fmt.Printf("- Env Var: %s\n", opt.EnvVarName(backend.Prefix))
|
||||
if opt.Provider != "" {
|
||||
fmt.Printf("- Provider: %s\n", opt.Provider)
|
||||
}
|
||||
fmt.Printf("- Type: %s\n", opt.Type())
|
||||
fmt.Printf("- Default: %s\n", quoteString(opt.GetValue()))
|
||||
defaultValue := opt.GetValue()
|
||||
// Default value and Required are related: Required means option must
|
||||
// have a value, but if there is a default then a value does not have
|
||||
// to be explicitely set and then Required makes no difference.
|
||||
if defaultValue != "" {
|
||||
fmt.Printf("- Default: %s\n", quoteString(defaultValue))
|
||||
} else {
|
||||
fmt.Printf("- Required: %v\n", opt.Required)
|
||||
}
|
||||
// List examples / possible choices
|
||||
if len(opt.Examples) > 0 {
|
||||
fmt.Printf("- Examples:\n")
|
||||
if opt.Exclusive {
|
||||
fmt.Printf("- Choices:\n")
|
||||
} else {
|
||||
fmt.Printf("- Examples:\n")
|
||||
}
|
||||
for _, ex := range opt.Examples {
|
||||
fmt.Printf(" - %s\n", quoteString(ex.Value))
|
||||
for _, line := range strings.Split(ex.Help, "\n") {
|
||||
|
||||
@@ -86,7 +86,7 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
|
||||
|
||||
f := VFS.Fs()
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
c, err := fuse.Mount(mountpoint, mountOptions(VFS, f.Name()+":"+f.Root(), opt)...)
|
||||
c, err := fuse.Mount(mountpoint, mountOptions(VFS, opt.DeviceName, opt)...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
||||
@@ -25,11 +25,10 @@ func init() {
|
||||
// mountOptions configures the options from the command line flags
|
||||
//
|
||||
// man mount.fuse for more info and note the -o flag for other options
|
||||
func mountOptions(fsys *FS, f fs.Fs) (mountOpts *fuse.MountOptions) {
|
||||
device := f.Name() + ":" + f.Root()
|
||||
func mountOptions(fsys *FS, f fs.Fs, opt *mountlib.Options) (mountOpts *fuse.MountOptions) {
|
||||
mountOpts = &fuse.MountOptions{
|
||||
AllowOther: fsys.opt.AllowOther,
|
||||
FsName: device,
|
||||
FsName: opt.DeviceName,
|
||||
Name: "rclone",
|
||||
DisableXAttrs: true,
|
||||
Debug: fsys.opt.DebugFUSE,
|
||||
@@ -120,7 +119,7 @@ func mountOptions(fsys *FS, f fs.Fs) (mountOpts *fuse.MountOptions) {
|
||||
if runtime.GOOS == "darwin" {
|
||||
opts = append(opts,
|
||||
// VolumeName sets the volume name shown in Finder.
|
||||
fmt.Sprintf("volname=%s", device),
|
||||
fmt.Sprintf("volname=%s", opt.VolumeName),
|
||||
|
||||
// NoAppleXattr makes OSXFUSE disallow extended attributes with the
|
||||
// prefix "com.apple.". This disables persistent Finder state and
|
||||
@@ -167,7 +166,7 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
|
||||
//mOpts.Debug = mountlib.DebugFUSE
|
||||
|
||||
//conn := fusefs.NewFileSystemConnector(nodeFs.Root(), mOpts)
|
||||
mountOpts := mountOptions(fsys, f)
|
||||
mountOpts := mountOptions(fsys, f, opt)
|
||||
|
||||
// FIXME fill out
|
||||
opts := fusefs.Options{
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
@@ -40,6 +40,7 @@ type Options struct {
|
||||
ExtraOptions []string
|
||||
ExtraFlags []string
|
||||
AttrTimeout time.Duration // how long the kernel caches attribute for
|
||||
DeviceName string
|
||||
VolumeName string
|
||||
NoAppleDouble bool
|
||||
NoAppleXattr bool
|
||||
@@ -125,6 +126,7 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.BoolVarP(flagSet, &Opt.AsyncRead, "async-read", "", Opt.AsyncRead, "Use asynchronous reads (not supported on Windows)")
|
||||
flags.FVarP(flagSet, &Opt.MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads (not supported on Windows)")
|
||||
flags.BoolVarP(flagSet, &Opt.WritebackCache, "write-back-cache", "", Opt.WritebackCache, "Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows)")
|
||||
flags.StringVarP(flagSet, &Opt.DeviceName, "devname", "", Opt.DeviceName, "Set the device name - default is remote:path")
|
||||
// Windows and OSX
|
||||
flags.StringVarP(flagSet, &Opt.VolumeName, "volname", "", Opt.VolumeName, "Set the volume name (supported on Windows and OSX only)")
|
||||
// OSX only
|
||||
@@ -235,6 +237,7 @@ func (m *MountPoint) Mount() (daemon *os.Process, err error) {
|
||||
return nil, err
|
||||
}
|
||||
m.SetVolumeName(m.MountOpt.VolumeName)
|
||||
m.SetDeviceName(m.MountOpt.DeviceName)
|
||||
|
||||
// Start background task if --daemon is specified
|
||||
if m.MountOpt.Daemon {
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"errors"
|
||||
"log"
|
||||
"sort"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
|
||||
@@ -16,11 +16,16 @@ import (
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs/config/configfile"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/fstest/testy"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRc(t *testing.T) {
|
||||
// Disable tests under macOS and the CI since they are locking up
|
||||
if runtime.GOOS == "darwin" {
|
||||
testy.SkipUnreliable(t)
|
||||
}
|
||||
ctx := context.Background()
|
||||
configfile.Install()
|
||||
mount := rc.Calls.Get("mount/mount")
|
||||
@@ -30,19 +35,14 @@ func TestRc(t *testing.T) {
|
||||
getMountTypes := rc.Calls.Get("mount/types")
|
||||
assert.NotNil(t, getMountTypes)
|
||||
|
||||
localDir, err := ioutil.TempDir("", "rclone-mountlib-localDir")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(localDir) }()
|
||||
err = ioutil.WriteFile(filepath.Join(localDir, "file.txt"), []byte("hello"), 0666)
|
||||
localDir := t.TempDir()
|
||||
err := ioutil.WriteFile(filepath.Join(localDir, "file.txt"), []byte("hello"), 0666)
|
||||
require.NoError(t, err)
|
||||
|
||||
mountPoint, err := ioutil.TempDir("", "rclone-mountlib-mountPoint")
|
||||
require.NoError(t, err)
|
||||
mountPoint := t.TempDir()
|
||||
if runtime.GOOS == "windows" {
|
||||
// Windows requires the mount point not to exist
|
||||
require.NoError(t, os.RemoveAll(mountPoint))
|
||||
} else {
|
||||
defer func() { _ = os.RemoveAll(mountPoint) }()
|
||||
}
|
||||
|
||||
out, err := getMountTypes.Fn(ctx, nil)
|
||||
|
||||
@@ -87,7 +87,7 @@ func (m *MountPoint) CheckAllowings() error {
|
||||
// SetVolumeName with sensible default
|
||||
func (m *MountPoint) SetVolumeName(vol string) {
|
||||
if vol == "" {
|
||||
vol = m.Fs.Name() + ":" + m.Fs.Root()
|
||||
vol = fs.ConfigString(m.Fs)
|
||||
}
|
||||
m.MountOpt.SetVolumeName(vol)
|
||||
}
|
||||
@@ -102,3 +102,11 @@ func (o *Options) SetVolumeName(vol string) {
|
||||
}
|
||||
o.VolumeName = vol
|
||||
}
|
||||
|
||||
// SetDeviceName with sensible default
|
||||
func (m *MountPoint) SetDeviceName(dev string) {
|
||||
if dev == "" {
|
||||
dev = fs.ConfigString(m.Fs)
|
||||
}
|
||||
m.MountOpt.DeviceName = dev
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
|
||||
@@ -3,7 +3,7 @@ package rcd
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
|
||||
sysdnotify "github.com/iguanesolutions/go-systemd/v5/notify"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 20 KiB After Width: | Height: | Size: 1.4 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 5.6 KiB After Width: | Height: | Size: 724 B |
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/testy"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -303,6 +304,10 @@ func (a *APIClient) request(path string, in, out interface{}, wantErr bool) {
|
||||
}
|
||||
|
||||
func testMountAPI(t *testing.T, sockAddr string) {
|
||||
// Disable tests under macOS and linux in the CI since they are locking up
|
||||
if runtime.GOOS == "darwin" || runtime.GOOS == "linux" {
|
||||
testy.SkipUnreliable(t)
|
||||
}
|
||||
if _, mountFn := mountlib.ResolveMountMethod(""); mountFn == nil {
|
||||
t.Skip("Test requires working mount command")
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
sysdnotify "github.com/iguanesolutions/go-systemd/v5/notify"
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"os"
|
||||
"os/user"
|
||||
"strconv"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
|
||||
@@ -16,7 +16,10 @@ import (
|
||||
)
|
||||
|
||||
// Help describes the options for the serve package
|
||||
var Help = `--template allows a user to specify a custom markup template for http
|
||||
var Help = `
|
||||
#### Template
|
||||
|
||||
--template allows a user to specify a custom markup template for http
|
||||
and webdav serve functions. The server exports the following markup
|
||||
to be used within the template to server pages:
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *httplib.Options)
|
||||
flags.StringVarP(flagSet, &Opt.SslKey, prefix+"key", "", Opt.SslKey, "SSL PEM Private key")
|
||||
flags.StringVarP(flagSet, &Opt.ClientCA, prefix+"client-ca", "", Opt.ClientCA, "Client certificate authority to verify clients with")
|
||||
flags.StringVarP(flagSet, &Opt.HtPasswd, prefix+"htpasswd", "", Opt.HtPasswd, "htpasswd file - if not provided no authentication is done")
|
||||
flags.StringVarP(flagSet, &Opt.Realm, prefix+"realm", "", Opt.Realm, "realm for authentication")
|
||||
flags.StringVarP(flagSet, &Opt.Realm, prefix+"realm", "", Opt.Realm, "Realm for authentication")
|
||||
flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication")
|
||||
flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication")
|
||||
flags.StringVarP(flagSet, &Opt.BaseURL, prefix+"baseurl", "", Opt.BaseURL, "Prefix for URLs - leave blank for root")
|
||||
|
||||
@@ -5,7 +5,7 @@ package restic
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"github.com/rclone/rclone/lib/sync"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
@@ -16,6 +16,7 @@ TestFichier:
|
||||
TestFTP:
|
||||
TestGoogleCloudStorage:
|
||||
TestHubic:
|
||||
TestNetStorage:
|
||||
TestOneDrive:
|
||||
TestPcloud:
|
||||
TestQingStor:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user