mirror of
https://github.com/rclone/rclone.git
synced 2026-01-07 19:13:19 +00:00
Compare commits
5 Commits
fix-7973-n
...
fix-7337-s
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3fa5a424a9 | ||
|
|
9fb0afad88 | ||
|
|
2f9c2cf75e | ||
|
|
1ac18e5765 | ||
|
|
3e8cee148a |
4
.github/workflows/build.yml
vendored
4
.github/workflows/build.yml
vendored
@@ -124,7 +124,7 @@ jobs:
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
sudo apt-get install fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
|
||||
sudo apt-get install fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
|
||||
- name: Install Libraries on macOS
|
||||
@@ -237,7 +237,7 @@ jobs:
|
||||
id: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.23.0-rc.1'
|
||||
go-version: '>=1.22.0-rc.1'
|
||||
check-latest: true
|
||||
cache: false
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ linters:
|
||||
- stylecheck
|
||||
- unused
|
||||
- misspell
|
||||
- gocritic
|
||||
#- prealloc
|
||||
#- maligned
|
||||
disable-all: true
|
||||
@@ -99,11 +98,3 @@ linters-settings:
|
||||
# Only enable the checks performed by the staticcheck stand-alone tool,
|
||||
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
||||
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
||||
gocritic:
|
||||
disabled-checks:
|
||||
- appendAssign
|
||||
- captLocal
|
||||
- commentFormatting
|
||||
- exitAfterDefer
|
||||
- ifElseChain
|
||||
- singleCaseSwitch
|
||||
|
||||
@@ -209,7 +209,7 @@ altogether with an HTML report and test retries then from the
|
||||
project root:
|
||||
|
||||
go install github.com/rclone/rclone/fstest/test_all
|
||||
test_all -backends drive
|
||||
test_all -backend drive
|
||||
|
||||
### Full integration testing
|
||||
|
||||
@@ -508,7 +508,7 @@ You'll need to modify the following files
|
||||
- `backend/s3/s3.go`
|
||||
- Add the provider to `providerOption` at the top of the file
|
||||
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
|
||||
- Exclude your provider from generic config questions (eg `region` and `endpoint).
|
||||
- Exclude your provider from genric config questions (eg `region` and `endpoint).
|
||||
- Add the provider to the `setQuirks` function - see the documentation there.
|
||||
- `docs/content/s3.md`
|
||||
- Add the provider at the top of the page.
|
||||
|
||||
@@ -21,8 +21,6 @@ Current active maintainers of rclone are:
|
||||
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
|
||||
| Hideo Aoyama | @boukendesho | snap packaging |
|
||||
| nielash | @nielash | bisync |
|
||||
| Dan McArdle | @dmcardle | gitannex |
|
||||
| Sam Harrison | @childish-sambino | filescom |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
@@ -55,14 +55,11 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||
* Files.com [:page_facing_up:](https://rclone.org/filescom/)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* GoFile [:page_facing_up:](https://rclone.org/gofile/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||
* Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
||||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||
@@ -96,7 +93,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
||||
* Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
|
||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
||||
@@ -105,7 +101,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||
* rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
|
||||
@@ -23,8 +23,8 @@ func prepare(t *testing.T, root string) {
|
||||
configfile.Install()
|
||||
|
||||
// Configure the remote
|
||||
config.FileSetValue(remoteName, "type", "alias")
|
||||
config.FileSetValue(remoteName, "remote", root)
|
||||
config.FileSet(remoteName, "type", "alias")
|
||||
config.FileSet(remoteName, "remote", root)
|
||||
}
|
||||
|
||||
func TestNewFS(t *testing.T) {
|
||||
|
||||
@@ -17,9 +17,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/dropbox"
|
||||
_ "github.com/rclone/rclone/backend/fichier"
|
||||
_ "github.com/rclone/rclone/backend/filefabric"
|
||||
_ "github.com/rclone/rclone/backend/filescom"
|
||||
_ "github.com/rclone/rclone/backend/ftp"
|
||||
_ "github.com/rclone/rclone/backend/gofile"
|
||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||
_ "github.com/rclone/rclone/backend/hasher"
|
||||
@@ -41,7 +39,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
_ "github.com/rclone/rclone/backend/pikpak"
|
||||
_ "github.com/rclone/rclone/backend/pixeldrain"
|
||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||
_ "github.com/rclone/rclone/backend/protondrive"
|
||||
_ "github.com/rclone/rclone/backend/putio"
|
||||
|
||||
@@ -2094,6 +2094,7 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
||||
return 0, nil
|
||||
}
|
||||
md5sum := m.Sum(nil)
|
||||
transactionalMD5 := md5sum[:]
|
||||
|
||||
// increment the blockID and save the blocks for finalize
|
||||
var binaryBlockID [8]byte // block counter as LSB first 8 bytes
|
||||
@@ -2116,7 +2117,7 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
||||
}
|
||||
options := blockblob.StageBlockOptions{
|
||||
// Specify the transactional md5 for the body, to be validated by the service.
|
||||
TransactionalValidation: blob.TransferValidationTypeMD5(md5sum),
|
||||
TransactionalValidation: blob.TransferValidationTypeMD5(transactionalMD5),
|
||||
}
|
||||
_, err = w.ui.blb.StageBlock(ctx, blockID, &readSeekCloser{Reader: reader, Seeker: reader}, &options)
|
||||
if err != nil {
|
||||
|
||||
@@ -1035,10 +1035,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if _, createErr := fc.Create(ctx, size, nil); createErr != nil {
|
||||
return fmt.Errorf("update: unable to create file: %w", createErr)
|
||||
}
|
||||
} else if size != o.Size() {
|
||||
} else {
|
||||
// Resize the file if needed
|
||||
if _, resizeErr := fc.Resize(ctx, size, nil); resizeErr != nil {
|
||||
return fmt.Errorf("update: unable to resize while trying to update: %w ", resizeErr)
|
||||
if size != o.Size() {
|
||||
if _, resizeErr := fc.Resize(ctx, size, nil); resizeErr != nil {
|
||||
return fmt.Errorf("update: unable to resize while trying to update: %w ", resizeErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -42,11 +42,11 @@ func TestTimestampIsZero(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTimestampEqual(t *testing.T) {
|
||||
assert.False(t, emptyT.Equal(emptyT)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
||||
assert.False(t, emptyT.Equal(emptyT))
|
||||
assert.False(t, t0.Equal(emptyT))
|
||||
assert.False(t, emptyT.Equal(t0))
|
||||
assert.False(t, t0.Equal(t1))
|
||||
assert.False(t, t1.Equal(t0))
|
||||
assert.True(t, t0.Equal(t0)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
||||
assert.True(t, t1.Equal(t1)) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid dupArg: suspicious method call with the same argument and receiver
|
||||
assert.True(t, t0.Equal(t0))
|
||||
assert.True(t, t1.Equal(t1))
|
||||
}
|
||||
|
||||
22
backend/cache/cache.go
vendored
22
backend/cache/cache.go
vendored
@@ -409,16 +409,18 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
}
|
||||
} else if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||
decPass, err := obscure.Reveal(opt.PlexPassword)
|
||||
if err != nil {
|
||||
decPass = opt.PlexPassword
|
||||
}
|
||||
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
||||
m.Set("plex_token", token)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
} else {
|
||||
if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||
decPass, err := obscure.Reveal(opt.PlexPassword)
|
||||
if err != nil {
|
||||
decPass = opt.PlexPassword
|
||||
}
|
||||
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
||||
m.Set("plex_token", token)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
18
backend/cache/cache_internal_test.go
vendored
18
backend/cache/cache_internal_test.go
vendored
@@ -417,7 +417,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
if runInstance.rootIsCrypt {
|
||||
data2, err = base64.StdEncoding.DecodeString(cryptedText3Base64)
|
||||
require.NoError(t, err)
|
||||
expectedSize++ // FIXME newline gets in, likely test data issue
|
||||
expectedSize = expectedSize + 1 // FIXME newline gets in, likely test data issue
|
||||
} else {
|
||||
data2 = []byte("test content")
|
||||
}
|
||||
@@ -850,8 +850,8 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
|
||||
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
||||
fstest.Initialise()
|
||||
remoteExists := false
|
||||
for _, s := range config.GetRemotes() {
|
||||
if s.Name == remote {
|
||||
for _, s := range config.FileSections() {
|
||||
if s == remote {
|
||||
remoteExists = true
|
||||
}
|
||||
}
|
||||
@@ -875,12 +875,12 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
cacheRemote := remote
|
||||
if !remoteExists {
|
||||
localRemote := remote + "-local"
|
||||
config.FileSetValue(localRemote, "type", "local")
|
||||
config.FileSetValue(localRemote, "nounc", "true")
|
||||
config.FileSet(localRemote, "type", "local")
|
||||
config.FileSet(localRemote, "nounc", "true")
|
||||
m.Set("type", "cache")
|
||||
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
|
||||
} else {
|
||||
remoteType := config.GetValue(remote, "type")
|
||||
remoteType := config.FileGet(remote, "type")
|
||||
if remoteType == "" {
|
||||
t.Skipf("skipped due to invalid remote type for %v", remote)
|
||||
return nil, nil
|
||||
@@ -891,14 +891,14 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
m.Set("password", cryptPassword1)
|
||||
m.Set("password2", cryptPassword2)
|
||||
}
|
||||
remoteRemote := config.GetValue(remote, "remote")
|
||||
remoteRemote := config.FileGet(remote, "remote")
|
||||
if remoteRemote == "" {
|
||||
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
|
||||
return nil, nil
|
||||
}
|
||||
remoteRemoteParts := strings.Split(remoteRemote, ":")
|
||||
remoteWrapping := remoteRemoteParts[0]
|
||||
remoteType := config.GetValue(remoteWrapping, "type")
|
||||
remoteType := config.FileGet(remoteWrapping, "type")
|
||||
if remoteType != "cache" {
|
||||
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
|
||||
return nil, nil
|
||||
@@ -1192,7 +1192,7 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
||||
func (r *run) cleanSize(t *testing.T, size int64) int64 {
|
||||
if r.rootIsCrypt {
|
||||
denominator := int64(65536 + 16)
|
||||
size -= 32
|
||||
size = size - 32
|
||||
quotient := size / denominator
|
||||
remainder := size % denominator
|
||||
return (quotient*65536 + remainder - 16)
|
||||
|
||||
10
backend/cache/handle.go
vendored
10
backend/cache/handle.go
vendored
@@ -208,7 +208,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
|
||||
|
||||
// we align the start offset of the first chunk to a likely chunk in the storage
|
||||
chunkStart -= offset
|
||||
chunkStart = chunkStart - offset
|
||||
r.queueOffset(chunkStart)
|
||||
found := false
|
||||
|
||||
@@ -327,7 +327,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
||||
|
||||
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
||||
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
|
||||
chunkStart -= int64(r.cacheFs().opt.ChunkSize)
|
||||
chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
|
||||
}
|
||||
r.queueOffset(chunkStart)
|
||||
|
||||
@@ -415,8 +415,10 @@ func (w *worker) run() {
|
||||
continue
|
||||
}
|
||||
}
|
||||
} else if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
||||
continue
|
||||
} else {
|
||||
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
|
||||
|
||||
@@ -987,7 +987,7 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
||||
}
|
||||
}
|
||||
|
||||
if o.main == nil && len(o.chunks) == 0 {
|
||||
if o.main == nil && (o.chunks == nil || len(o.chunks) == 0) {
|
||||
// Scanning hasn't found data chunks with conforming names.
|
||||
if f.useMeta || quickScan {
|
||||
// Metadata is required but absent and there are no chunks.
|
||||
|
||||
@@ -38,7 +38,6 @@ import (
|
||||
const (
|
||||
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
|
||||
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
|
||||
chunkStreams = 0 // Streams to use for reading
|
||||
|
||||
bufferSize = 8388608
|
||||
heuristicBytes = 1048576
|
||||
@@ -1363,7 +1362,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
}
|
||||
}
|
||||
// Get a chunkedreader for the wrapped object
|
||||
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize, chunkStreams)
|
||||
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize)
|
||||
// Get file handle
|
||||
var file io.Reader
|
||||
if offset != 0 {
|
||||
|
||||
@@ -329,7 +329,7 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
|
||||
for _, runeValue := range plaintext {
|
||||
dir += int(runeValue)
|
||||
}
|
||||
dir %= 256
|
||||
dir = dir % 256
|
||||
|
||||
// We'll use this number to store in the result filename...
|
||||
var result bytes.Buffer
|
||||
@@ -450,7 +450,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
||||
if pos >= 26 {
|
||||
pos -= 6
|
||||
}
|
||||
pos -= thisdir
|
||||
pos = pos - thisdir
|
||||
if pos < 0 {
|
||||
pos += 52
|
||||
}
|
||||
@@ -888,7 +888,7 @@ func (fh *decrypter) fillBuffer() (err error) {
|
||||
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
|
||||
// Zero out the bad block and continue
|
||||
for i := range (*fh.buf)[:n] {
|
||||
fh.buf[i] = 0
|
||||
(*fh.buf)[i] = 0
|
||||
}
|
||||
}
|
||||
fh.bufIndex = 0
|
||||
|
||||
@@ -2219,7 +2219,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
case in <- job:
|
||||
default:
|
||||
overflow = append(overflow, job)
|
||||
wg.Done()
|
||||
wg.Add(-1)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3965,7 +3965,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 && t != hash.SHA1 && t != hash.SHA256 {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return "", nil
|
||||
|
||||
@@ -386,7 +386,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
oldToken = strings.TrimSpace(oldToken)
|
||||
if ok && oldToken != "" && oldToken[0] != '{' {
|
||||
fs.Infof(name, "Converting token to new format")
|
||||
newToken := fmt.Sprintf(`{"access_token":%q,"token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
||||
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
||||
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NewFS convert token: %w", err)
|
||||
|
||||
@@ -1,901 +0,0 @@
|
||||
// Package filescom provides an interface to the Files.com
|
||||
// object storage system.
|
||||
package filescom
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
files_sdk "github.com/Files-com/files-sdk-go/v3"
|
||||
"github.com/Files-com/files-sdk-go/v3/bundle"
|
||||
"github.com/Files-com/files-sdk-go/v3/file"
|
||||
file_migration "github.com/Files-com/files-sdk-go/v3/filemigration"
|
||||
"github.com/Files-com/files-sdk-go/v3/folder"
|
||||
"github.com/Files-com/files-sdk-go/v3/session"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
/*
|
||||
Run of rclone info
|
||||
stringNeedsEscaping = []rune{
|
||||
'/', '\x00'
|
||||
}
|
||||
maxFileLength = 512 // for 1 byte unicode characters
|
||||
maxFileLength = 512 // for 2 byte unicode characters
|
||||
maxFileLength = 512 // for 3 byte unicode characters
|
||||
maxFileLength = 512 // for 4 byte unicode characters
|
||||
canWriteUnnormalized = true
|
||||
canReadUnnormalized = true
|
||||
canReadRenormalized = true
|
||||
canStream = true
|
||||
*/
|
||||
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
|
||||
folderNotEmpty = "processing-failure/folder-not-empty"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "filescom",
|
||||
Description: "Files.com",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "site",
|
||||
Help: "Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com).",
|
||||
}, {
|
||||
Name: "username",
|
||||
Help: "The username used to authenticate with Files.com.",
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "The password used to authenticate with Files.com.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "api_key",
|
||||
Help: "The API key used to authenticate with Files.com.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeRightCrLfHtVt |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Site string `config:"site"`
|
||||
Username string `config:"username"`
|
||||
Password string `config:"password"`
|
||||
APIKey string `config:"api_key"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote files.com server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
fileClient *file.Client // the connection to the file API
|
||||
folderClient *folder.Client // the connection to the folder API
|
||||
migrationClient *file_migration.Client // the connection to the file migration API
|
||||
bundleClient *bundle.Client // the connection to the bundle API
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
}
|
||||
|
||||
// Object describes a files object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
size int64 // size of the object
|
||||
crc32 string // CRC32 of the object content
|
||||
md5 string // MD5 of the object content
|
||||
mimeType string // Content-Type of the object
|
||||
modTime time.Time // modification time of the object
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("files root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Encode remote and turn it into an absolute path in the share
|
||||
func (f *Fs) absPath(remote string) string {
|
||||
return f.opt.Enc.FromStandardPath(path.Join(f.root, remote))
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if apiErr, ok := err.(files_sdk.ResponseError); ok {
|
||||
for _, e := range retryErrorCodes {
|
||||
if apiErr.HttpCode == e {
|
||||
fs.Debugf(nil, "Retrying API error %v", err)
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *files_sdk.File, err error) {
|
||||
params := files_sdk.FileFindParams{
|
||||
Path: f.absPath(path),
|
||||
}
|
||||
|
||||
var file files_sdk.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
file, err = f.fileClient.Find(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &file, nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
config, err := newClientConfig(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
fileClient: &file.Client{Config: config},
|
||||
folderClient: &folder.Client{Config: config},
|
||||
migrationClient: &file_migration.Client{Config: config},
|
||||
bundleClient: &bundle.Client{Config: config},
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ReadMimeType: true,
|
||||
DirModTimeUpdatesOnWrite: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
if f.root != "" {
|
||||
info, err := f.readMetaDataForPath(ctx, "")
|
||||
if err == nil && !info.IsDir() {
|
||||
f.root = path.Dir(f.root)
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
}
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
}
|
||||
|
||||
return f, err
|
||||
}
|
||||
|
||||
func newClientConfig(ctx context.Context, opt *Options) (config files_sdk.Config, err error) {
|
||||
if opt.Site != "" {
|
||||
if strings.Contains(opt.Site, ".") {
|
||||
config.EndpointOverride = opt.Site
|
||||
} else {
|
||||
config.Subdomain = opt.Site
|
||||
}
|
||||
|
||||
_, err = url.ParseRequestURI(config.Endpoint())
|
||||
if err != nil {
|
||||
err = fmt.Errorf("invalid domain or subdomain: %v", opt.Site)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
config = config.Init().SetCustomClient(fshttp.NewClient(ctx))
|
||||
|
||||
if opt.APIKey != "" {
|
||||
config.APIKey = opt.APIKey
|
||||
return
|
||||
}
|
||||
|
||||
if opt.Username == "" {
|
||||
err = errors.New("username not found")
|
||||
return
|
||||
}
|
||||
if opt.Password == "" {
|
||||
err = errors.New("password not found")
|
||||
return
|
||||
}
|
||||
opt.Password, err = obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
sessionClient := session.Client{Config: config}
|
||||
params := files_sdk.SessionCreateParams{
|
||||
Username: opt.Username,
|
||||
Password: opt.Password,
|
||||
}
|
||||
|
||||
thisSession, err := sessionClient.Create(params, files_sdk.WithContext(ctx))
|
||||
if err != nil {
|
||||
err = fmt.Errorf("couldn't create session: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
config.SessionId = thisSession.Id
|
||||
return
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *files_sdk.File) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
var err error
|
||||
if file != nil {
|
||||
err = o.setMetaData(file)
|
||||
} else {
|
||||
err = o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
var it *folder.Iter
|
||||
params := files_sdk.FolderListForParams{
|
||||
Path: f.absPath(dir),
|
||||
}
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
it, err = f.folderClient.ListFor(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't list files: %w", err)
|
||||
}
|
||||
|
||||
for it.Next() {
|
||||
item := ptr(it.File())
|
||||
remote := f.opt.Enc.ToStandardPath(item.DisplayName)
|
||||
remote = path.Join(dir, remote)
|
||||
if remote == dir {
|
||||
continue
|
||||
}
|
||||
|
||||
if item.IsDir() {
|
||||
d := fs.NewDir(remote, item.ModTime())
|
||||
entries = append(entries, d)
|
||||
} else {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, item)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
}
|
||||
err = it.Err()
|
||||
if files_sdk.IsNotExist(err) {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object and error.
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string) (o *Object, err error) {
|
||||
// Create the directory for the object if it doesn't exist
|
||||
err = f.mkParentDir(ctx, remote)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Temporary Object under construction
|
||||
o = &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
fs := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
return fs, fs.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
func (f *Fs) mkdir(ctx context.Context, path string) error {
|
||||
if path == "" || path == "." {
|
||||
return nil
|
||||
}
|
||||
|
||||
params := files_sdk.FolderCreateParams{
|
||||
Path: path,
|
||||
MkdirParents: ptr(true),
|
||||
}
|
||||
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.folderClient.Create(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if files_sdk.IsExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Make the parent directory of remote
|
||||
func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
|
||||
return f.mkdir(ctx, path.Dir(f.absPath(remote)))
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return f.mkdir(ctx, f.absPath(dir))
|
||||
}
|
||||
|
||||
// DirSetModTime sets the directory modtime for dir
|
||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
||||
o := Object{
|
||||
fs: f,
|
||||
remote: dir,
|
||||
}
|
||||
return o.SetModTime(ctx, modTime)
|
||||
}
|
||||
|
||||
// purgeCheck removes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
path := f.absPath(dir)
|
||||
if path == "" || path == "." {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
|
||||
params := files_sdk.FileDeleteParams{
|
||||
Path: path,
|
||||
Recursive: ptr(!check),
|
||||
}
|
||||
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
err := f.fileClient.Delete(params, files_sdk.WithContext(ctx))
|
||||
// Allow for eventual consistency deletion of child objects.
|
||||
if isFolderNotEmpty(err) {
|
||||
return true, err
|
||||
}
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
if files_sdk.IsNotExist(err) {
|
||||
return fs.ErrorDirNotFound
|
||||
} else if isFolderNotEmpty(err) {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
|
||||
return fmt.Errorf("rmdir failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir deletes the root folder
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dstObj fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
err = srcObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
srcPath := srcObj.fs.absPath(srcObj.remote)
|
||||
dstPath := f.absPath(remote)
|
||||
if strings.EqualFold(srcPath, dstPath) {
|
||||
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, err = f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Copy the object
|
||||
params := files_sdk.FileCopyParams{
|
||||
Path: srcPath,
|
||||
Destination: dstPath,
|
||||
Overwrite: ptr(true),
|
||||
}
|
||||
|
||||
var action files_sdk.FileAction
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
action, err = f.fileClient.Copy(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = f.waitForAction(ctx, action, "copy")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = dstObj.SetModTime(ctx, srcObj.modTime)
|
||||
return
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// move a file or folder
|
||||
func (f *Fs) move(ctx context.Context, src *Fs, srcRemote string, dstRemote string) (info *files_sdk.File, err error) {
|
||||
// Move the object
|
||||
params := files_sdk.FileMoveParams{
|
||||
Path: src.absPath(srcRemote),
|
||||
Destination: f.absPath(dstRemote),
|
||||
}
|
||||
|
||||
var action files_sdk.FileAction
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
action, err = f.fileClient.Move(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = f.waitForAction(ctx, action, "move")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info, err = f.readMetaDataForPath(ctx, dstRemote)
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) waitForAction(ctx context.Context, action files_sdk.FileAction, operation string) (err error) {
|
||||
var migration files_sdk.FileMigration
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
migration, err = f.migrationClient.Wait(action, func(migration files_sdk.FileMigration) {
|
||||
// noop
|
||||
}, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil && migration.Status != "completed" {
|
||||
return fmt.Errorf("%v did not complete successfully: %v", operation, migration.Status)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, err := f.createObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
info, err := f.move(ctx, srcObj.fs, srcObj.remote, dstObj.remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = dstObj.setMetaData(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
// Check if destination exists
|
||||
_, err = f.readMetaDataForPath(ctx, dstRemote)
|
||||
if err == nil {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, err := f.createObject(ctx, dstRemote)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Do the move
|
||||
_, err = f.move(ctx, srcFs, srcRemote, dstObj.remote)
|
||||
return
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (url string, err error) {
|
||||
params := files_sdk.BundleCreateParams{
|
||||
Paths: []string{f.absPath(remote)},
|
||||
}
|
||||
if expire < fs.DurationOff {
|
||||
params.ExpiresAt = ptr(time.Now().Add(time.Duration(expire)))
|
||||
}
|
||||
|
||||
var bundle files_sdk.Bundle
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
bundle, err = f.bundleClient.Create(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
url = bundle.Url
|
||||
return
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.NewHashSet(hash.CRC32, hash.MD5)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
switch t {
|
||||
case hash.CRC32:
|
||||
if o.crc32 == "" {
|
||||
return "", nil
|
||||
}
|
||||
return fmt.Sprintf("%08s", o.crc32), nil
|
||||
case hash.MD5:
|
||||
return o.md5, nil
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(file *files_sdk.File) error {
|
||||
o.modTime = file.ModTime()
|
||||
|
||||
if !file.IsDir() {
|
||||
o.size = file.Size
|
||||
o.crc32 = file.Crc32
|
||||
o.md5 = file.Md5
|
||||
o.mimeType = file.MimeType
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
file, err := o.fs.readMetaDataForPath(ctx, o.remote)
|
||||
if err != nil {
|
||||
if files_sdk.IsNotExist(err) {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
if file.IsDir() {
|
||||
return fs.ErrorIsDir
|
||||
}
|
||||
return o.setMetaData(file)
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
||||
params := files_sdk.FileUpdateParams{
|
||||
Path: o.fs.absPath(o.remote),
|
||||
ProvidedMtime: &modTime,
|
||||
}
|
||||
|
||||
var file files_sdk.File
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
file, err = o.fs.fileClient.Update(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return o.setMetaData(&file)
|
||||
}
|
||||
|
||||
// Storable returns a boolean showing whether this object storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// Offset and Count for range download
|
||||
var offset, count int64
|
||||
fs.FixRangeOption(options, o.size)
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
offset, count = x.Decode(o.size)
|
||||
if count < 0 {
|
||||
count = o.size - offset
|
||||
}
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
count = o.size - offset
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
params := files_sdk.FileDownloadParams{
|
||||
Path: o.fs.absPath(o.remote),
|
||||
}
|
||||
|
||||
headers := &http.Header{}
|
||||
headers.Set("Range", fmt.Sprintf("bytes=%v-%v", offset, offset+count-1))
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err = o.fs.fileClient.Download(
|
||||
params,
|
||||
files_sdk.WithContext(ctx),
|
||||
files_sdk.RequestHeadersOption(headers),
|
||||
files_sdk.ResponseBodyOption(func(closer io.ReadCloser) error {
|
||||
in = closer
|
||||
return err
|
||||
}),
|
||||
)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Returns a pointer to t - useful for returning pointers to constants
|
||||
func ptr[T any](t T) *T {
|
||||
return &t
|
||||
}
|
||||
|
||||
func isFolderNotEmpty(err error) bool {
|
||||
var re files_sdk.ResponseError
|
||||
ok := errors.As(err, &re)
|
||||
return ok && re.Type == folderNotEmpty
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
//
|
||||
// The new object may have been created if an error is returned.
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
uploadOpts := []file.UploadOption{
|
||||
file.UploadWithContext(ctx),
|
||||
file.UploadWithReader(in),
|
||||
file.UploadWithDestinationPath(o.fs.absPath(o.remote)),
|
||||
file.UploadWithProvidedMtime(src.ModTime(ctx)),
|
||||
}
|
||||
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
err := o.fs.fileClient.Upload(uploadOpts...)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return o.readMetaData(ctx)
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
params := files_sdk.FileDeleteParams{
|
||||
Path: o.fs.absPath(o.remote),
|
||||
}
|
||||
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err := o.fs.fileClient.Delete(params, files_sdk.WithContext(ctx))
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
)
|
||||
@@ -1,17 +0,0 @@
|
||||
// Test Files filesystem interface
|
||||
package filescom_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/filescom"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFilesCom:",
|
||||
NilObject: (*filescom.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -85,7 +85,7 @@ to an encrypted one. Cannot be used in combination with implicit FTPS.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
Help: strings.ReplaceAll(`Maximum number of FTP simultaneous connections, 0 for unlimited.
|
||||
Help: strings.Replace(`Maximum number of FTP simultaneous connections, 0 for unlimited.
|
||||
|
||||
Note that setting this is very likely to cause deadlocks so it should
|
||||
be used with care.
|
||||
@@ -99,7 +99,7 @@ maximum of |--checkers| and |--transfers|.
|
||||
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
|
||||
--check-first| or |--checkers 1 --transfers 1|.
|
||||
|
||||
`, "|", "`"),
|
||||
`, "|", "`", -1),
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
}, {
|
||||
|
||||
@@ -1,311 +0,0 @@
|
||||
// Package api has type definitions for gofile
|
||||
//
|
||||
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// 2017-05-03T07:26:10-07:00
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
)
|
||||
|
||||
// Time represents date and time information for the
|
||||
// gofile API, by using RFC3339
|
||||
type Time time.Time
|
||||
|
||||
// MarshalJSON turns a Time into JSON (in UTC)
|
||||
func (t *Time) MarshalJSON() (out []byte, err error) {
|
||||
timeString := (*time.Time)(t).Format(timeFormat)
|
||||
return []byte(timeString), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Time
|
||||
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
newT, err := time.Parse(timeFormat, string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Time(newT)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Error is returned from gofile when things go wrong
|
||||
type Error struct {
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q", e.Status)
|
||||
return out
|
||||
}
|
||||
|
||||
// IsError returns true if there is an error
|
||||
func (e Error) IsError() bool {
|
||||
return e.Status != "ok"
|
||||
}
|
||||
|
||||
// Err returns err if not nil, or e if IsError or nil
|
||||
func (e Error) Err(err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.IsError() {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
ItemTypeFolder = "folder"
|
||||
ItemTypeFile = "file"
|
||||
)
|
||||
|
||||
// Item describes a folder or a file as returned by /contents
|
||||
type Item struct {
|
||||
ID string `json:"id"`
|
||||
ParentFolder string `json:"parentFolder"`
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Code string `json:"code"`
|
||||
CreateTime int64 `json:"createTime"`
|
||||
ModTime int64 `json:"modTime"`
|
||||
Link string `json:"link"`
|
||||
MD5 string `json:"md5"`
|
||||
MimeType string `json:"mimetype"`
|
||||
ChildrenCount int `json:"childrenCount"`
|
||||
DirectLinks map[string]*DirectLink `json:"directLinks"`
|
||||
//Public bool `json:"public"`
|
||||
//ServerSelected string `json:"serverSelected"`
|
||||
//Thumbnail string `json:"thumbnail"`
|
||||
//DownloadCount int `json:"downloadCount"`
|
||||
//TotalDownloadCount int64 `json:"totalDownloadCount"`
|
||||
//TotalSize int64 `json:"totalSize"`
|
||||
//ChildrenIDs []string `json:"childrenIds"`
|
||||
Children map[string]*Item `json:"children"`
|
||||
}
|
||||
|
||||
// ToNativeTime converts a go time to a native time
|
||||
func ToNativeTime(t time.Time) int64 {
|
||||
return t.Unix()
|
||||
}
|
||||
|
||||
// FromNativeTime converts native time to a go time
|
||||
func FromNativeTime(t int64) time.Time {
|
||||
return time.Unix(t, 0)
|
||||
}
|
||||
|
||||
// DirectLink describes a direct link to a file so it can be
|
||||
// downloaded by third parties.
|
||||
type DirectLink struct {
|
||||
ExpireTime int64 `json:"expireTime"`
|
||||
SourceIpsAllowed []any `json:"sourceIpsAllowed"`
|
||||
DomainsAllowed []any `json:"domainsAllowed"`
|
||||
Auth []any `json:"auth"`
|
||||
IsReqLink bool `json:"isReqLink"`
|
||||
DirectLink string `json:"directLink"`
|
||||
}
|
||||
|
||||
// Contents is returned from the /contents call
|
||||
type Contents struct {
|
||||
Error
|
||||
Data struct {
|
||||
Item
|
||||
} `json:"data"`
|
||||
Metadata Metadata `json:"metadata"`
|
||||
}
|
||||
|
||||
// Metadata is returned when paging is in use
|
||||
type Metadata struct {
|
||||
TotalCount int `json:"totalCount"`
|
||||
TotalPages int `json:"totalPages"`
|
||||
Page int `json:"page"`
|
||||
PageSize int `json:"pageSize"`
|
||||
HasNextPage bool `json:"hasNextPage"`
|
||||
}
|
||||
|
||||
// AccountsGetID is the result of /accounts/getid
|
||||
type AccountsGetID struct {
|
||||
Error
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// Stats of storage and traffic
|
||||
type Stats struct {
|
||||
FolderCount int64 `json:"folderCount"`
|
||||
FileCount int64 `json:"fileCount"`
|
||||
Storage int64 `json:"storage"`
|
||||
TrafficDirectGenerated int64 `json:"trafficDirectGenerated"`
|
||||
TrafficReqDownloaded int64 `json:"trafficReqDownloaded"`
|
||||
TrafficWebDownloaded int64 `json:"trafficWebDownloaded"`
|
||||
}
|
||||
|
||||
// AccountsGet is the result of /accounts/{id}
|
||||
type AccountsGet struct {
|
||||
Error
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Email string `json:"email"`
|
||||
Tier string `json:"tier"`
|
||||
PremiumType string `json:"premiumType"`
|
||||
Token string `json:"token"`
|
||||
RootFolder string `json:"rootFolder"`
|
||||
SubscriptionProvider string `json:"subscriptionProvider"`
|
||||
SubscriptionEndDate int `json:"subscriptionEndDate"`
|
||||
SubscriptionLimitDirectTraffic int64 `json:"subscriptionLimitDirectTraffic"`
|
||||
SubscriptionLimitStorage int64 `json:"subscriptionLimitStorage"`
|
||||
StatsCurrent Stats `json:"statsCurrent"`
|
||||
// StatsHistory map[int]map[int]map[int]Stats `json:"statsHistory"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// CreateFolderRequest is the input to /contents/createFolder
|
||||
type CreateFolderRequest struct {
|
||||
ParentFolderID string `json:"parentFolderId"`
|
||||
FolderName string `json:"folderName"`
|
||||
ModTime int64 `json:"modTime,omitempty"`
|
||||
}
|
||||
|
||||
// CreateFolderResponse is the output from /contents/createFolder
|
||||
type CreateFolderResponse struct {
|
||||
Error
|
||||
Data Item `json:"data"`
|
||||
}
|
||||
|
||||
// DeleteRequest is the input to DELETE /contents
|
||||
type DeleteRequest struct {
|
||||
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
||||
}
|
||||
|
||||
// DeleteResponse is the input to DELETE /contents
|
||||
type DeleteResponse struct {
|
||||
Error
|
||||
Data map[string]Error
|
||||
}
|
||||
|
||||
// Server is an upload server
|
||||
type Server struct {
|
||||
Name string `json:"name"`
|
||||
Zone string `json:"zone"`
|
||||
}
|
||||
|
||||
// String returns a string representation of the Server
|
||||
func (s *Server) String() string {
|
||||
return fmt.Sprintf("%s (%s)", s.Name, s.Zone)
|
||||
}
|
||||
|
||||
// Root returns the root URL for the server
|
||||
func (s *Server) Root() string {
|
||||
return fmt.Sprintf("https://%s.gofile.io/", s.Name)
|
||||
}
|
||||
|
||||
// URL returns the upload URL for the server
|
||||
func (s *Server) URL() string {
|
||||
return fmt.Sprintf("https://%s.gofile.io/contents/uploadfile", s.Name)
|
||||
}
|
||||
|
||||
// ServersResponse is the output from /servers
|
||||
type ServersResponse struct {
|
||||
Error
|
||||
Data struct {
|
||||
Servers []Server `json:"servers"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UploadResponse is returned by POST /contents/uploadfile
|
||||
type UploadResponse struct {
|
||||
Error
|
||||
Data Item `json:"data"`
|
||||
}
|
||||
|
||||
// DirectLinksRequest specifies the parameters for the direct link
|
||||
type DirectLinksRequest struct {
|
||||
ExpireTime int64 `json:"expireTime,omitempty"`
|
||||
SourceIpsAllowed []any `json:"sourceIpsAllowed,omitempty"`
|
||||
DomainsAllowed []any `json:"domainsAllowed,omitempty"`
|
||||
Auth []any `json:"auth,omitempty"`
|
||||
}
|
||||
|
||||
// DirectLinksResult is returned from POST /contents/{id}/directlinks
|
||||
type DirectLinksResult struct {
|
||||
Error
|
||||
Data struct {
|
||||
ExpireTime int64 `json:"expireTime"`
|
||||
SourceIpsAllowed []any `json:"sourceIpsAllowed"`
|
||||
DomainsAllowed []any `json:"domainsAllowed"`
|
||||
Auth []any `json:"auth"`
|
||||
IsReqLink bool `json:"isReqLink"`
|
||||
ID string `json:"id"`
|
||||
DirectLink string `json:"directLink"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UpdateItemRequest describes the updates to be done to an item for PUT /contents/{id}/update
|
||||
//
|
||||
// The Value of the attribute to define :
|
||||
// For Attribute "name" : The name of the content (file or folder)
|
||||
// For Attribute "description" : The description displayed on the download page (folder only)
|
||||
// For Attribute "tags" : A comma-separated list of tags (folder only)
|
||||
// For Attribute "public" : either true or false (folder only)
|
||||
// For Attribute "expiry" : A unix timestamp of the expiration date (folder only)
|
||||
// For Attribute "password" : The password to set (folder only)
|
||||
type UpdateItemRequest struct {
|
||||
Attribute string `json:"attribute"`
|
||||
Value any `json:"attributeValue"`
|
||||
}
|
||||
|
||||
// UpdateItemResponse is returned by PUT /contents/{id}/update
|
||||
type UpdateItemResponse struct {
|
||||
Error
|
||||
Data Item `json:"data"`
|
||||
}
|
||||
|
||||
// MoveRequest is the input to /contents/move
|
||||
type MoveRequest struct {
|
||||
FolderID string `json:"folderId"`
|
||||
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
||||
}
|
||||
|
||||
// MoveResponse is returned by POST /contents/move
|
||||
type MoveResponse struct {
|
||||
Error
|
||||
Data map[string]struct {
|
||||
Error
|
||||
Item `json:"data"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// CopyRequest is the input to /contents/copy
|
||||
type CopyRequest struct {
|
||||
FolderID string `json:"folderId"`
|
||||
ContentsID string `json:"contentsId"` // comma separated list of IDs
|
||||
}
|
||||
|
||||
// CopyResponse is returned by POST /contents/copy
|
||||
type CopyResponse struct {
|
||||
Error
|
||||
Data map[string]struct {
|
||||
Error
|
||||
Item `json:"data"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UploadServerStatus is returned when fetching the root of an upload server
|
||||
type UploadServerStatus struct {
|
||||
Error
|
||||
Data struct {
|
||||
Server string `json:"server"`
|
||||
Test string `json:"test"`
|
||||
} `json:"data"`
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,17 +0,0 @@
|
||||
// Test Gofile filesystem interface
|
||||
package gofile_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/gofile"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestGoFile:",
|
||||
NilObject: (*gofile.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -56,7 +56,7 @@ func (ik *ImageKit) URL(params URLParam) (string, error) {
|
||||
var expires = strconv.FormatInt(now+params.ExpireSeconds, 10)
|
||||
var path = strings.Replace(resultURL, endpoint, "", 1)
|
||||
|
||||
path += expires
|
||||
path = path + expires
|
||||
mac := hmac.New(sha1.New, []byte(ik.PrivateKey))
|
||||
mac.Write([]byte(path))
|
||||
signature := hex.EncodeToString(mac.Sum(nil))
|
||||
|
||||
@@ -1,82 +0,0 @@
|
||||
//go:build darwin && cgo
|
||||
|
||||
// Package local provides a filesystem interface
|
||||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/go-darwin/apfs"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// # This is stored with the remote path given
|
||||
//
|
||||
// # It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
if runtime.GOOS != "darwin" || f.opt.TranslateSymlinks || f.opt.NoClone {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't clone - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Fetch metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy: failed to read metadata: %w", err)
|
||||
}
|
||||
|
||||
// Create destination
|
||||
dstObj := f.newObject(remote)
|
||||
err = dstObj.mkdirAll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = Clone(srcObj.path, f.localPath(remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(remote, "server-side cloned!")
|
||||
|
||||
// Set metadata if --metadata is in use
|
||||
if meta != nil {
|
||||
err = dstObj.writeMetadata(meta)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy: failed to set metadata: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// Clone uses APFS cloning if possible, otherwise falls back to copying (with full metadata preservation)
|
||||
// note that this is closely related to unix.Clonefile(src, dst, unix.CLONE_NOFOLLOW) but not 100% identical
|
||||
// https://opensource.apple.com/source/copyfile/copyfile-173.40.2/copyfile.c.auto.html
|
||||
func Clone(src, dst string) error {
|
||||
state := apfs.CopyFileStateAlloc()
|
||||
defer func() {
|
||||
if err := apfs.CopyFileStateFree(state); err != nil {
|
||||
fs.Errorf(dst, "free state error: %v", err)
|
||||
}
|
||||
}()
|
||||
cloned, err := apfs.CopyFile(src, dst, state, apfs.COPYFILE_CLONE)
|
||||
fs.Debugf(dst, "isCloned: %v, error: %v", cloned, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Copier = &Fs{}
|
||||
)
|
||||
@@ -32,11 +32,9 @@ import (
|
||||
)
|
||||
|
||||
// Constants
|
||||
const (
|
||||
devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
||||
linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
|
||||
useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
|
||||
)
|
||||
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
||||
const linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
|
||||
const useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
|
||||
|
||||
// timeType allows the user to choose what exactly ModTime() returns
|
||||
type timeType = fs.Enum[timeTypeChoices]
|
||||
@@ -80,46 +78,41 @@ supported by all file systems) under the "user.*" prefix.
|
||||
Metadata is supported on files and directories.
|
||||
`,
|
||||
},
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows.",
|
||||
Default: false,
|
||||
Advanced: runtime.GOOS != "windows",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "true",
|
||||
Help: "Disables long file names.",
|
||||
}},
|
||||
},
|
||||
{
|
||||
Name: "copy_links",
|
||||
Help: "Follow symlinks and copy the pointed to item.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "L",
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "links",
|
||||
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "l",
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "skip_links",
|
||||
Help: `Don't warn about skipped symlinks.
|
||||
Options: []fs.Option{{
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows.",
|
||||
Default: false,
|
||||
Advanced: runtime.GOOS != "windows",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "true",
|
||||
Help: "Disables long file names.",
|
||||
}},
|
||||
}, {
|
||||
Name: "copy_links",
|
||||
Help: "Follow symlinks and copy the pointed to item.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "L",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "links",
|
||||
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "l",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_links",
|
||||
Help: `Don't warn about skipped symlinks.
|
||||
|
||||
This flag disables warning messages on skipped symlinks or junction
|
||||
points, as you explicitly acknowledge that they should be skipped.`,
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "zero_size_links",
|
||||
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "zero_size_links",
|
||||
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
|
||||
|
||||
Rclone used to use the Stat size of links as the link size, but this fails in quite a few places:
|
||||
|
||||
@@ -129,12 +122,11 @@ Rclone used to use the Stat size of links as the link size, but this fails in qu
|
||||
|
||||
So rclone now always reads the link.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "unicode_normalization",
|
||||
Help: `Apply unicode NFC normalization to paths and filenames.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "unicode_normalization",
|
||||
Help: `Apply unicode NFC normalization to paths and filenames.
|
||||
|
||||
This flag can be used to normalize file names into unicode NFC form
|
||||
that are read from the local filesystem.
|
||||
@@ -148,12 +140,11 @@ some OSes.
|
||||
|
||||
Note that rclone compares filenames with unicode normalization in the sync
|
||||
routine so this flag shouldn't normally be used.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_check_updated",
|
||||
Help: `Don't check to see if the files change during upload.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_updated",
|
||||
Help: `Don't check to see if the files change during upload.
|
||||
|
||||
Normally rclone checks the size and modification time of files as they
|
||||
are being uploaded and aborts with a message which starts "can't copy -
|
||||
@@ -184,96 +175,68 @@ directory listing (where the initial stat value comes from on Windows)
|
||||
and when stat is called on them directly. Other copy tools always use
|
||||
the direct stat value and setting this flag will disable that.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "one_file_system",
|
||||
Help: "Don't cross filesystem boundaries (unix/macOS only).",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "x",
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "case_sensitive",
|
||||
Help: `Force the filesystem to report itself as case sensitive.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "one_file_system",
|
||||
Help: "Don't cross filesystem boundaries (unix/macOS only).",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "x",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "case_sensitive",
|
||||
Help: `Force the filesystem to report itself as case sensitive.
|
||||
|
||||
Normally the local backend declares itself as case insensitive on
|
||||
Windows/macOS and case sensitive for everything else. Use this flag
|
||||
to override the default choice.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "case_insensitive",
|
||||
Help: `Force the filesystem to report itself as case insensitive.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "case_insensitive",
|
||||
Help: `Force the filesystem to report itself as case insensitive.
|
||||
|
||||
Normally the local backend declares itself as case insensitive on
|
||||
Windows/macOS and case sensitive for everything else. Use this flag
|
||||
to override the default choice.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_clone",
|
||||
Help: `Disable reflink cloning for server-side copies.
|
||||
|
||||
Normally, for local-to-local transfers, rclone will "clone" the file when
|
||||
possible, and fall back to "copying" only when cloning is not supported.
|
||||
|
||||
Cloning creates a shallow copy (or "reflink") which initially shares blocks with
|
||||
the original file. Unlike a "hardlink", the two files are independent and
|
||||
neither will affect the other if subsequently modified.
|
||||
|
||||
Cloning is usually preferable to copying, as it is much faster and is
|
||||
deduplicated by default (i.e. having two identical files does not consume more
|
||||
storage than having just one.) However, for use cases where data redundancy is
|
||||
preferable, --local-no-clone can be used to disable cloning and force "deep" copies.
|
||||
|
||||
Currently, cloning is only supported when using APFS on macOS (support for other
|
||||
platforms may be added in the future.)`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_preallocate",
|
||||
Help: `Disable preallocation of disk space for transferred files.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_preallocate",
|
||||
Help: `Disable preallocation of disk space for transferred files.
|
||||
|
||||
Preallocation of disk space helps prevent filesystem fragmentation.
|
||||
However, some virtual filesystem layers (such as Google Drive File
|
||||
Stream) may incorrectly set the actual file size equal to the
|
||||
preallocated space, causing checksum and file size checks to fail.
|
||||
Use this flag to disable preallocation.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_sparse",
|
||||
Help: `Disable sparse files for multi-thread downloads.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_sparse",
|
||||
Help: `Disable sparse files for multi-thread downloads.
|
||||
|
||||
On Windows platforms rclone will make sparse files when doing
|
||||
multi-thread downloads. This avoids long pauses on large files where
|
||||
the OS zeros the file. However sparse files may be undesirable as they
|
||||
cause disk fragmentation and can be slow to work with.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "no_set_modtime",
|
||||
Help: `Disable setting modtime.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_set_modtime",
|
||||
Help: `Disable setting modtime.
|
||||
|
||||
Normally rclone updates modification time of files after they are done
|
||||
uploading. This can cause permissions issues on Linux platforms when
|
||||
the user rclone is running as does not own the file uploaded, such as
|
||||
when copying to a CIFS mount owned by another user. If this option is
|
||||
enabled, rclone will no longer update the modtime after copying a file.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "time_type",
|
||||
Help: `Set what kind of time is returned.
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "time_type",
|
||||
Help: `Set what kind of time is returned.
|
||||
|
||||
Normally rclone does all operations on the mtime or Modification time.
|
||||
|
||||
@@ -292,29 +255,27 @@ will silently replace it with the modification time which all OSes support.
|
||||
Note that setting the time will still set the modified time so this is
|
||||
only useful for reading.
|
||||
`,
|
||||
Default: mTime,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: mTime.String(),
|
||||
Help: "The last modification time.",
|
||||
}, {
|
||||
Value: aTime.String(),
|
||||
Help: "The last access time.",
|
||||
}, {
|
||||
Value: bTime.String(),
|
||||
Help: "The creation time.",
|
||||
}, {
|
||||
Value: cTime.String(),
|
||||
Help: "The last status change time.",
|
||||
}},
|
||||
},
|
||||
{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encoder.OS,
|
||||
},
|
||||
},
|
||||
Default: mTime,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: mTime.String(),
|
||||
Help: "The last modification time.",
|
||||
}, {
|
||||
Value: aTime.String(),
|
||||
Help: "The last access time.",
|
||||
}, {
|
||||
Value: bTime.String(),
|
||||
Help: "The creation time.",
|
||||
}, {
|
||||
Value: cTime.String(),
|
||||
Help: "The last status change time.",
|
||||
}},
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encoder.OS,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
@@ -335,7 +296,6 @@ type Options struct {
|
||||
NoSetModTime bool `config:"no_set_modtime"`
|
||||
TimeType timeType `config:"time_type"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
NoClone bool `config:"no_clone"`
|
||||
}
|
||||
|
||||
// Fs represents a local filesystem rooted at root
|
||||
@@ -424,10 +384,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.FollowSymlinks {
|
||||
f.lstat = os.Stat
|
||||
}
|
||||
if opt.NoClone {
|
||||
// Disable server-side copy when --local-no-clone is set
|
||||
f.features.Copy = nil
|
||||
}
|
||||
|
||||
// Check to see if this points to a file
|
||||
fi, err := f.lstat(f.root)
|
||||
|
||||
@@ -2,7 +2,6 @@ package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
@@ -73,12 +72,12 @@ func (o *Object) parseMetadataInt(m fs.Metadata, key string, base int) (result i
|
||||
value, ok := m[key]
|
||||
if ok {
|
||||
var err error
|
||||
parsed, err := strconv.ParseInt(value, base, 0)
|
||||
result64, err := strconv.ParseInt(value, base, 64)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", key, value, err)
|
||||
ok = false
|
||||
}
|
||||
result = int(parsed)
|
||||
result = int(result64)
|
||||
}
|
||||
return result, ok
|
||||
}
|
||||
@@ -129,14 +128,9 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
||||
}
|
||||
mode, hasMode := o.parseMetadataInt(m, "mode", 8)
|
||||
if hasMode {
|
||||
if mode >= 0 {
|
||||
umode := uint(mode)
|
||||
if umode <= math.MaxUint32 {
|
||||
err = os.Chmod(o.path, os.FileMode(umode))
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to change permissions: %w", err)
|
||||
}
|
||||
}
|
||||
err = os.Chmod(o.path, os.FileMode(mode))
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to change permissions: %w", err)
|
||||
}
|
||||
}
|
||||
// FIXME not parsing rdev yet
|
||||
|
||||
@@ -923,7 +923,9 @@ func (f *Fs) netStorageStatRequest(ctx context.Context, URL string, directory bo
|
||||
entrywanted := (directory && files[i].Type == "dir") ||
|
||||
(!directory && files[i].Type != "dir")
|
||||
if entrywanted {
|
||||
files[0], files[i] = files[i], files[0]
|
||||
filestamp := files[0]
|
||||
files[0] = files[i]
|
||||
files[i] = filestamp
|
||||
}
|
||||
}
|
||||
return files, nil
|
||||
|
||||
@@ -1927,7 +1927,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return shareURL, nil
|
||||
}
|
||||
|
||||
const cnvFailMsg = "Don't know how to convert share link to direct link - returning the link as is"
|
||||
cnvFailMsg := "Don't know how to convert share link to direct link - returning the link as is"
|
||||
directURL := ""
|
||||
segments := strings.Split(shareURL, "/")
|
||||
switch f.driveType {
|
||||
|
||||
@@ -58,10 +58,12 @@ func populateSSECustomerKeys(opt *Options) error {
|
||||
sha256Checksum := base64.StdEncoding.EncodeToString(getSha256(decoded))
|
||||
if opt.SSECustomerKeySha256 == "" {
|
||||
opt.SSECustomerKeySha256 = sha256Checksum
|
||||
} else if opt.SSECustomerKeySha256 != sha256Checksum {
|
||||
return fmt.Errorf("the computed SHA256 checksum "+
|
||||
"(%v) of the key doesn't match the config entry sse_customer_key_sha256=(%v)",
|
||||
sha256Checksum, opt.SSECustomerKeySha256)
|
||||
} else {
|
||||
if opt.SSECustomerKeySha256 != sha256Checksum {
|
||||
return fmt.Errorf("the computed SHA256 checksum "+
|
||||
"(%v) of the key doesn't match the config entry sse_customer_key_sha256=(%v)",
|
||||
sha256Checksum, opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
if opt.SSECustomerAlgorithm == "" {
|
||||
opt.SSECustomerAlgorithm = sseDefaultAlgorithm
|
||||
|
||||
@@ -148,7 +148,7 @@ func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, rea
|
||||
}
|
||||
md5sumBinary := m.Sum([]byte{})
|
||||
w.addMd5(&md5sumBinary, int64(chunkNumber))
|
||||
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary)
|
||||
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:])
|
||||
|
||||
// Object storage requires 1 <= PartNumber <= 10000
|
||||
ossPartNumber := chunkNumber + 1
|
||||
@@ -279,7 +279,7 @@ func (w *objectChunkWriter) addMd5(md5binary *[]byte, chunkNumber int64) {
|
||||
if extend := end - int64(len(w.md5s)); extend > 0 {
|
||||
w.md5s = append(w.md5s, make([]byte, extend)...)
|
||||
}
|
||||
copy(w.md5s[start:end], (*md5binary))
|
||||
copy(w.md5s[start:end], (*md5binary)[:])
|
||||
}
|
||||
|
||||
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (ui uploadInfo, err error) {
|
||||
|
||||
@@ -109,37 +109,6 @@ type Hashes struct {
|
||||
SHA256 string `json:"sha256"`
|
||||
}
|
||||
|
||||
// FileTruncateResponse is the response from /file_truncate
|
||||
type FileTruncateResponse struct {
|
||||
Error
|
||||
}
|
||||
|
||||
// FileCloseResponse is the response from /file_close
|
||||
type FileCloseResponse struct {
|
||||
Error
|
||||
}
|
||||
|
||||
// FileOpenResponse is the response from /file_open
|
||||
type FileOpenResponse struct {
|
||||
Error
|
||||
Fileid int64 `json:"fileid"`
|
||||
FileDescriptor int64 `json:"fd"`
|
||||
}
|
||||
|
||||
// FileChecksumResponse is the response from /file_checksum
|
||||
type FileChecksumResponse struct {
|
||||
Error
|
||||
MD5 string `json:"md5"`
|
||||
SHA1 string `json:"sha1"`
|
||||
SHA256 string `json:"sha256"`
|
||||
}
|
||||
|
||||
// FilePWriteResponse is the response from /file_pwrite
|
||||
type FilePWriteResponse struct {
|
||||
Error
|
||||
Bytes int64 `json:"bytes"`
|
||||
}
|
||||
|
||||
// UploadFileResponse is the response from /uploadfile
|
||||
type UploadFileResponse struct {
|
||||
Error
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -147,8 +146,7 @@ we have to rely on user password authentication for it.`,
|
||||
Help: "Your pcloud password.",
|
||||
IsPassword: true,
|
||||
Advanced: true,
|
||||
},
|
||||
}...),
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -163,16 +161,15 @@ type Options struct {
|
||||
|
||||
// Fs represents a remote pcloud
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
ts *oauthutil.TokenSource // the token source, used to create new clients
|
||||
srv *rest.Client // the connection to the server
|
||||
cleanupSrv *rest.Client // the connection used for the cleanup method
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
cleanupSrv *rest.Client // the connection used for the cleanup method
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
}
|
||||
|
||||
// Object describes a pcloud object
|
||||
@@ -320,7 +317,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ts: ts,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot("https://" + opt.Hostname),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
@@ -330,7 +326,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: false,
|
||||
CanHaveEmptyDirectories: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
if !canCleanup {
|
||||
f.features.CleanUp = nil
|
||||
@@ -338,7 +333,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), f.ts, func() error {
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath(ctx, "")
|
||||
return err
|
||||
})
|
||||
@@ -380,56 +375,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// OpenWriterAt opens with a handle for random access writes
|
||||
//
|
||||
// Pass in the remote desired and the size if known.
|
||||
//
|
||||
// It truncates any existing object
|
||||
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||
client, err := f.newSingleConnClient(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create client: %w", err)
|
||||
}
|
||||
// init an empty file
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("resolve src: %w", err)
|
||||
}
|
||||
openResult, err := fileOpenNew(ctx, client, f, directoryID, leaf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open file: %w", err)
|
||||
}
|
||||
|
||||
writer := &writerAt{
|
||||
ctx: ctx,
|
||||
client: client,
|
||||
fs: f,
|
||||
size: size,
|
||||
remote: remote,
|
||||
fd: openResult.FileDescriptor,
|
||||
fileID: openResult.Fileid,
|
||||
}
|
||||
|
||||
return writer, nil
|
||||
}
|
||||
|
||||
// Create a new http client, accepting keep-alive headers, limited to single connection.
|
||||
// Necessary for pcloud fileops API, as it binds the session to the underlying TCP connection.
|
||||
// File descriptors are only valid within the same connection and auto-closed when the connection is closed,
|
||||
// hence we need a separate client (with single connection) for each fd to avoid all sorts of errors and race conditions.
|
||||
func (f *Fs) newSingleConnClient(ctx context.Context) (*rest.Client, error) {
|
||||
baseClient := fshttp.NewClient(ctx)
|
||||
baseClient.Transport = fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
|
||||
t.MaxConnsPerHost = 1
|
||||
t.DisableKeepAlives = false
|
||||
})
|
||||
// Set our own http client in the context
|
||||
ctx = oauthutil.Context(ctx, baseClient)
|
||||
// create a new oauth client, re-use the token source
|
||||
oAuthClient := oauth2.NewClient(ctx, f.ts)
|
||||
return rest.NewClient(oAuthClient).SetRoot("https://" + f.opt.Hostname), nil
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
@@ -1149,42 +1094,9 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
filename, directoryID, err := o.fs.dirCache.FindPath(ctx, o.Remote(), true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fileID := fileIDtoNumber(o.id)
|
||||
filename = o.fs.opt.Enc.FromStandardName(filename)
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/copyfile",
|
||||
Parameters: url.Values{},
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
ExtraHeaders: map[string]string{
|
||||
"Connection": "keep-alive",
|
||||
},
|
||||
}
|
||||
opts.Parameters.Set("fileid", fileID)
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(directoryID))
|
||||
opts.Parameters.Set("toname", filename)
|
||||
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
|
||||
opts.Parameters.Set("ctime", strconv.FormatInt(modTime.Unix(), 10))
|
||||
opts.Parameters.Set("mtime", strconv.FormatInt(modTime.Unix(), 10))
|
||||
|
||||
result := &api.ItemResult{}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("update mtime: copyfile: %w", err)
|
||||
}
|
||||
if err := o.setMetaData(&result.Metadata); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
// Pcloud doesn't have a way of doing this so returning this
|
||||
// error will cause the file to be re-uploaded to set the time.
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// Storable returns a boolean showing whether this object storable
|
||||
|
||||
@@ -1,216 +0,0 @@
|
||||
package pcloud
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/pcloud/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// writerAt implements fs.WriterAtCloser, adding the OpenWrtierAt feature to pcloud.
|
||||
type writerAt struct {
|
||||
ctx context.Context
|
||||
client *rest.Client
|
||||
fs *Fs
|
||||
size int64
|
||||
remote string
|
||||
fd int64
|
||||
fileID int64
|
||||
}
|
||||
|
||||
// Close implements WriterAt.Close.
|
||||
func (c *writerAt) Close() error {
|
||||
// close fd
|
||||
if _, err := c.fileClose(c.ctx); err != nil {
|
||||
return fmt.Errorf("close fd: %w", err)
|
||||
}
|
||||
|
||||
// Avoiding race conditions: Depending on the tcp connection, there might be
|
||||
// caching issues when checking the size immediately after write.
|
||||
// Hence we try avoiding them by checking the resulting size on a different connection.
|
||||
if c.size < 0 {
|
||||
// Without knowing the size, we cannot do size checks.
|
||||
// Falling back to a sleep of 1s for sake of hope.
|
||||
time.Sleep(1 * time.Second)
|
||||
return nil
|
||||
}
|
||||
sizeOk := false
|
||||
sizeLastSeen := int64(0)
|
||||
for retry := 0; retry < 5; retry++ {
|
||||
fs.Debugf(c.remote, "checking file size: try %d/5", retry)
|
||||
obj, err := c.fs.NewObject(c.ctx, c.remote)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get uploaded obj: %w", err)
|
||||
}
|
||||
sizeLastSeen = obj.Size()
|
||||
if obj.Size() == c.size {
|
||||
sizeOk = true
|
||||
break
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
if !sizeOk {
|
||||
return fmt.Errorf("incorrect size after upload: got %d, want %d", sizeLastSeen, c.size)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteAt implements fs.WriteAt.
|
||||
func (c *writerAt) WriteAt(buffer []byte, offset int64) (n int, err error) {
|
||||
contentLength := len(buffer)
|
||||
|
||||
inSHA1Bytes := sha1.Sum(buffer)
|
||||
inSHA1 := hex.EncodeToString(inSHA1Bytes[:])
|
||||
|
||||
// get target hash
|
||||
outChecksum, err := c.fileChecksum(c.ctx, offset, int64(contentLength))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
outSHA1 := outChecksum.SHA1
|
||||
|
||||
if outSHA1 == "" || inSHA1 == "" {
|
||||
return 0, fmt.Errorf("expect both hashes to be filled: src: %q, target: %q", inSHA1, outSHA1)
|
||||
}
|
||||
|
||||
// check hash of buffer, skip if fits
|
||||
if inSHA1 == outSHA1 {
|
||||
return contentLength, nil
|
||||
}
|
||||
|
||||
// upload buffer with offset if necessary
|
||||
if _, err := c.filePWrite(c.ctx, offset, buffer); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return contentLength, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_open using folderid and name with O_CREAT and O_WRITE flags, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_open.html
|
||||
func fileOpenNew(ctx context.Context, c *rest.Client, srcFs *Fs, directoryID, filename string) (*api.FileOpenResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/file_open",
|
||||
Parameters: url.Values{},
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
ExtraHeaders: map[string]string{
|
||||
"Connection": "keep-alive",
|
||||
},
|
||||
}
|
||||
filename = srcFs.opt.Enc.FromStandardName(filename)
|
||||
opts.Parameters.Set("name", filename)
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(directoryID))
|
||||
opts.Parameters.Set("flags", "0x0042") // O_CREAT, O_WRITE
|
||||
|
||||
result := &api.FileOpenResponse{}
|
||||
err := srcFs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open new file descriptor: %w", err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_checksum, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_checksum.html
|
||||
func (c *writerAt) fileChecksum(
|
||||
ctx context.Context,
|
||||
offset, count int64,
|
||||
) (*api.FileChecksumResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/file_checksum",
|
||||
Parameters: url.Values{},
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
ExtraHeaders: map[string]string{
|
||||
"Connection": "keep-alive",
|
||||
},
|
||||
}
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
|
||||
opts.Parameters.Set("count", strconv.FormatInt(count, 10))
|
||||
|
||||
result := &api.FileChecksumResponse{}
|
||||
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("checksum of fd %d with offset %d and size %d: %w", c.fd, offset, count, err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_pwrite, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_pwrite.html
|
||||
func (c *writerAt) filePWrite(
|
||||
ctx context.Context,
|
||||
offset int64,
|
||||
buf []byte,
|
||||
) (*api.FilePWriteResponse, error) {
|
||||
contentLength := int64(len(buf))
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/file_pwrite",
|
||||
Body: bytes.NewReader(buf),
|
||||
ContentLength: &contentLength,
|
||||
Parameters: url.Values{},
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
Close: false,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Connection": "keep-alive",
|
||||
},
|
||||
}
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
|
||||
|
||||
result := &api.FilePWriteResponse{}
|
||||
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("write %d bytes to fd %d with offset %d: %w", contentLength, c.fd, offset, err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_close, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_close.html
|
||||
func (c *writerAt) fileClose(ctx context.Context) (*api.FileCloseResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/file_close",
|
||||
Parameters: url.Values{},
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
Close: true,
|
||||
}
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||
|
||||
result := &api.FileCloseResponse{}
|
||||
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("close file descriptor: %w", err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
@@ -347,7 +347,7 @@ func calcGcid(r io.Reader, size int64) (string, error) {
|
||||
calcBlockSize := func(j int64) int64 {
|
||||
var psize int64 = 0x40000
|
||||
for float64(j)/float64(psize) > 0x200 && psize < 0x200000 {
|
||||
psize <<= 1
|
||||
psize = psize << 1
|
||||
}
|
||||
return psize
|
||||
}
|
||||
|
||||
@@ -37,11 +37,10 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
awsconfig "github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/rclone/rclone/backend/pikpak/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
@@ -71,8 +70,8 @@ const (
|
||||
taskWaitTime = 500 * time.Millisecond
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
rootURL = "https://api-drive.mypikpak.com"
|
||||
minChunkSize = fs.SizeSuffix(manager.MinUploadPartSize)
|
||||
defaultUploadConcurrency = manager.DefaultUploadConcurrency
|
||||
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
|
||||
defaultUploadConcurrency = s3manager.DefaultUploadConcurrency
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -1191,33 +1190,32 @@ func (f *Fs) uploadByForm(ctx context.Context, in io.Reader, name string, size i
|
||||
|
||||
func (f *Fs) uploadByResumable(ctx context.Context, in io.Reader, name string, size int64, resumable *api.Resumable) (err error) {
|
||||
p := resumable.Params
|
||||
endpoint := strings.Join(strings.Split(p.Endpoint, ".")[1:], ".") // "mypikpak.com"
|
||||
|
||||
// Create a credentials provider
|
||||
creds := credentials.NewStaticCredentialsProvider(p.AccessKeyID, p.AccessKeySecret, p.SecurityToken)
|
||||
|
||||
cfg, err := awsconfig.LoadDefaultConfig(ctx,
|
||||
awsconfig.WithCredentialsProvider(creds),
|
||||
awsconfig.WithRegion("pikpak"))
|
||||
cfg := &aws.Config{
|
||||
Credentials: credentials.NewStaticCredentials(p.AccessKeyID, p.AccessKeySecret, p.SecurityToken),
|
||||
Region: aws.String("pikpak"),
|
||||
Endpoint: &endpoint,
|
||||
}
|
||||
sess, err := session.NewSession(cfg)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
partSize := chunksize.Calculator(name, size, s3manager.MaxUploadParts, f.opt.ChunkSize)
|
||||
|
||||
client := s3.NewFromConfig(cfg, func(o *s3.Options) {
|
||||
o.BaseEndpoint = aws.String("https://mypikpak.com/")
|
||||
})
|
||||
partSize := chunksize.Calculator(name, size, int(manager.MaxUploadParts), f.opt.ChunkSize)
|
||||
|
||||
// Create an uploader with custom options
|
||||
uploader := manager.NewUploader(client, func(u *manager.Uploader) {
|
||||
// Create an uploader with the session and custom options
|
||||
uploader := s3manager.NewUploader(sess, func(u *s3manager.Uploader) {
|
||||
u.PartSize = int64(partSize)
|
||||
u.Concurrency = f.opt.UploadConcurrency
|
||||
})
|
||||
// Perform an upload
|
||||
_, err = uploader.Upload(ctx, &s3.PutObjectInput{
|
||||
// Upload input parameters
|
||||
uParams := &s3manager.UploadInput{
|
||||
Bucket: &p.Bucket,
|
||||
Key: &p.Key,
|
||||
Body: in,
|
||||
})
|
||||
}
|
||||
// Perform an upload
|
||||
_, err = uploader.UploadWithContext(ctx, uParams)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -1,397 +0,0 @@
|
||||
package pixeldrain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// FilesystemPath is the object which is returned from the pixeldrain API when
|
||||
// running the stat command on a path. It includes the node information for all
|
||||
// the members of the path and for all the children of the requested directory.
|
||||
type FilesystemPath struct {
|
||||
Path []FilesystemNode `json:"path"`
|
||||
BaseIndex int `json:"base_index"`
|
||||
Children []FilesystemNode `json:"children"`
|
||||
}
|
||||
|
||||
// Base returns the base node of the path, this is the node that the path points
|
||||
// to
|
||||
func (fsp *FilesystemPath) Base() FilesystemNode {
|
||||
return fsp.Path[fsp.BaseIndex]
|
||||
}
|
||||
|
||||
// FilesystemNode is a single node in the pixeldrain filesystem. Usually part of
|
||||
// a Path or Children slice. The Node is also returned as response from update
|
||||
// commands, if requested
|
||||
type FilesystemNode struct {
|
||||
Type string `json:"type"`
|
||||
Path string `json:"path"`
|
||||
Name string `json:"name"`
|
||||
Created time.Time `json:"created"`
|
||||
Modified time.Time `json:"modified"`
|
||||
ModeOctal string `json:"mode_octal"`
|
||||
|
||||
// File params
|
||||
FileSize int64 `json:"file_size"`
|
||||
FileType string `json:"file_type"`
|
||||
SHA256Sum string `json:"sha256_sum"`
|
||||
|
||||
// ID is only filled in when the file/directory is publicly shared
|
||||
ID string `json:"id,omitempty"`
|
||||
}
|
||||
|
||||
// ChangeLog is a log of changes that happened in a filesystem. Changes returned
|
||||
// from the API are on chronological order from old to new. A change log can be
|
||||
// requested for any directory or file, but change logging needs to be enabled
|
||||
// with the update API before any log entries will be made. Changes are logged
|
||||
// for 24 hours after logging was enabled. Each time a change log is requested
|
||||
// the timer is reset to 24 hours.
|
||||
type ChangeLog []ChangeLogEntry
|
||||
|
||||
// ChangeLogEntry is a single entry in a directory's change log. It contains the
|
||||
// time at which the change occurred. The path relative to the requested
|
||||
// directory and the action that was performend (update, move or delete). In
|
||||
// case of a move operation the new path of the file is stored in the path_new
|
||||
// field
|
||||
type ChangeLogEntry struct {
|
||||
Time time.Time `json:"time"`
|
||||
Path string `json:"path"`
|
||||
PathNew string `json:"path_new"`
|
||||
Action string `json:"action"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// UserInfo contains information about the logged in user
|
||||
type UserInfo struct {
|
||||
Username string `json:"username"`
|
||||
Subscription SubscriptionType `json:"subscription"`
|
||||
StorageSpaceUsed int64 `json:"storage_space_used"`
|
||||
}
|
||||
|
||||
// SubscriptionType contains information about a subscription type. It's not the
|
||||
// active subscription itself, only the properties of the subscription. Like the
|
||||
// perks and cost
|
||||
type SubscriptionType struct {
|
||||
Name string `json:"name"`
|
||||
StorageSpace int64 `json:"storage_space"`
|
||||
}
|
||||
|
||||
// APIError is the error type returned by the pixeldrain API
|
||||
type APIError struct {
|
||||
StatusCode string `json:"value"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
func (e APIError) Error() string { return e.StatusCode }
|
||||
|
||||
// Generalized errors which are caught in our own handlers and translated to
|
||||
// more specific errors from the fs package.
|
||||
var (
|
||||
errNotFound = errors.New("pd api: path not found")
|
||||
errExists = errors.New("pd api: node already exists")
|
||||
errAuthenticationFailed = errors.New("pd api: authentication failed")
|
||||
)
|
||||
|
||||
func apiErrorHandler(resp *http.Response) (err error) {
|
||||
var e APIError
|
||||
if err = json.NewDecoder(resp.Body).Decode(&e); err != nil {
|
||||
return fmt.Errorf("failed to parse error json: %w", err)
|
||||
}
|
||||
|
||||
// We close the body here so that the API handlers can be sure that the
|
||||
// response body is not still open when an error was returned
|
||||
if err = resp.Body.Close(); err != nil {
|
||||
return fmt.Errorf("failed to close resp body: %w", err)
|
||||
}
|
||||
|
||||
if e.StatusCode == "path_not_found" {
|
||||
return errNotFound
|
||||
} else if e.StatusCode == "directory_not_empty" {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
} else if e.StatusCode == "node_already_exists" {
|
||||
return errExists
|
||||
} else if e.StatusCode == "authentication_failed" {
|
||||
return errAuthenticationFailed
|
||||
} else if e.StatusCode == "permission_denied" {
|
||||
return fs.ErrorPermissionDenied
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err deserve to be
|
||||
// retried. It returns the err as a convenience so it can be used as the return
|
||||
// value in the pacer function
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// paramsFromMetadata turns the fs.Metadata into instructions the pixeldrain API
|
||||
// can understand.
|
||||
func paramsFromMetadata(meta fs.Metadata) (params url.Values) {
|
||||
params = make(url.Values)
|
||||
|
||||
if modified, ok := meta["mtime"]; ok {
|
||||
params.Set("modified", modified)
|
||||
}
|
||||
if created, ok := meta["btime"]; ok {
|
||||
params.Set("created", created)
|
||||
}
|
||||
if mode, ok := meta["mode"]; ok {
|
||||
params.Set("mode", mode)
|
||||
}
|
||||
if shared, ok := meta["shared"]; ok {
|
||||
params.Set("shared", shared)
|
||||
}
|
||||
if loggingEnabled, ok := meta["logging_enabled"]; ok {
|
||||
params.Set("logging_enabled", loggingEnabled)
|
||||
}
|
||||
|
||||
return params
|
||||
}
|
||||
|
||||
// nodeToObject converts a single FilesystemNode API response to an object. The
|
||||
// node is usually a single element from a directory listing
|
||||
func (f *Fs) nodeToObject(node FilesystemNode) (o *Object) {
|
||||
// Trim the path prefix. The path prefix is hidden from rclone during all
|
||||
// operations. Saving it here would confuse rclone a lot. So instead we
|
||||
// strip it here and add it back for every API request we need to perform
|
||||
node.Path = strings.TrimPrefix(node.Path, f.pathPrefix)
|
||||
return &Object{fs: f, base: node}
|
||||
}
|
||||
|
||||
func (f *Fs) nodeToDirectory(node FilesystemNode) fs.DirEntry {
|
||||
return fs.NewDir(strings.TrimPrefix(node.Path, f.pathPrefix), node.Modified).SetID(node.ID)
|
||||
}
|
||||
|
||||
func (f *Fs) escapePath(p string) (out string) {
|
||||
// Add the path prefix, encode all the parts and combine them together
|
||||
var parts = strings.Split(f.pathPrefix+p, "/")
|
||||
for i := range parts {
|
||||
parts[i] = url.PathEscape(parts[i])
|
||||
}
|
||||
return strings.Join(parts, "/")
|
||||
}
|
||||
|
||||
func (f *Fs) put(
|
||||
ctx context.Context,
|
||||
path string,
|
||||
body io.Reader,
|
||||
meta fs.Metadata,
|
||||
options []fs.OpenOption,
|
||||
) (node FilesystemNode, err error) {
|
||||
var params = paramsFromMetadata(meta)
|
||||
|
||||
// Tell the server to automatically create parent directories if they don't
|
||||
// exist yet
|
||||
params.Set("make_parents", "true")
|
||||
|
||||
return node, f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: f.escapePath(path),
|
||||
Body: body,
|
||||
Parameters: params,
|
||||
Options: options,
|
||||
},
|
||||
nil,
|
||||
&node,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) read(ctx context.Context, path string, options []fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.escapePath(path),
|
||||
Options: options,
|
||||
})
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
func (f *Fs) stat(ctx context.Context, path string) (fsp FilesystemPath, err error) {
|
||||
return fsp, f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.escapePath(path),
|
||||
// To receive node info from the pixeldrain API you need to add the
|
||||
// ?stat query. Without it pixeldrain will return the file contents
|
||||
// in the URL points to a file
|
||||
Parameters: url.Values{"stat": []string{""}},
|
||||
},
|
||||
nil,
|
||||
&fsp,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) changeLog(ctx context.Context, start, end time.Time) (changeLog ChangeLog, err error) {
|
||||
return changeLog, f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.escapePath(""),
|
||||
Parameters: url.Values{
|
||||
"change_log": []string{""},
|
||||
"start": []string{start.Format(time.RFC3339Nano)},
|
||||
"end": []string{end.Format(time.RFC3339Nano)},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
&changeLog,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) update(ctx context.Context, path string, fields fs.Metadata) (node FilesystemNode, err error) {
|
||||
var params = paramsFromMetadata(fields)
|
||||
params.Set("action", "update")
|
||||
|
||||
return node, f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "POST",
|
||||
Path: f.escapePath(path),
|
||||
MultipartParams: params,
|
||||
},
|
||||
nil,
|
||||
&node,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) mkdir(ctx context.Context, dir string) (err error) {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "POST",
|
||||
Path: f.escapePath(dir),
|
||||
MultipartParams: url.Values{"action": []string{"mkdirall"}},
|
||||
NoResponse: true,
|
||||
},
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
var errIncompatibleSourceFS = errors.New("source filesystem is not the same as target")
|
||||
|
||||
// Renames a file on the server side. Can be used for both directories and files
|
||||
func (f *Fs) rename(ctx context.Context, src fs.Fs, from, to string, meta fs.Metadata) (node FilesystemNode, err error) {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
// This is not a pixeldrain FS, can't move
|
||||
return node, errIncompatibleSourceFS
|
||||
} else if srcFs.opt.RootFolderID != f.opt.RootFolderID {
|
||||
// Path is not in the same root dir, can't move
|
||||
return node, errIncompatibleSourceFS
|
||||
}
|
||||
|
||||
var params = paramsFromMetadata(meta)
|
||||
params.Set("action", "rename")
|
||||
|
||||
// The target is always in our own filesystem so here we use our
|
||||
// own pathPrefix
|
||||
params.Set("target", f.pathPrefix+to)
|
||||
|
||||
// Create parent directories if the parent directory of the file
|
||||
// does not exist yet
|
||||
params.Set("make_parents", "true")
|
||||
|
||||
return node, f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "POST",
|
||||
// Important: We use the source FS path prefix here
|
||||
Path: srcFs.escapePath(from),
|
||||
MultipartParams: params,
|
||||
},
|
||||
nil,
|
||||
&node,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) delete(ctx context.Context, path string, recursive bool) (err error) {
|
||||
var params url.Values
|
||||
if recursive {
|
||||
// Tell the server to recursively delete all child files
|
||||
params = url.Values{"recursive": []string{"true"}}
|
||||
}
|
||||
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: f.escapePath(path),
|
||||
Parameters: params,
|
||||
NoResponse: true,
|
||||
},
|
||||
nil, nil,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) userInfo(ctx context.Context) (user UserInfo, err error) {
|
||||
return user, f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(
|
||||
ctx,
|
||||
&rest.Opts{
|
||||
Method: "GET",
|
||||
// The default RootURL points at the filesystem endpoint. We can't
|
||||
// use that to request user information. So here we override it to
|
||||
// the user endpoint
|
||||
RootURL: f.opt.APIURL + "/user",
|
||||
},
|
||||
nil,
|
||||
&user,
|
||||
)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
}
|
||||
@@ -1,567 +0,0 @@
|
||||
// Package pixeldrain provides an interface to the Pixeldrain object storage
|
||||
// system.
|
||||
package pixeldrain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
timeFormat = time.RFC3339Nano
|
||||
minSleep = pacer.MinSleep(10 * time.Millisecond)
|
||||
maxSleep = pacer.MaxSleep(1 * time.Second)
|
||||
decayConstant = pacer.DecayConstant(2) // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "pixeldrain",
|
||||
Description: "Pixeldrain Filesystem",
|
||||
NewFs: NewFs,
|
||||
Config: nil,
|
||||
Options: []fs.Option{{
|
||||
Name: "api_key",
|
||||
Help: "API key for your pixeldrain account.\n" +
|
||||
"Found on https://pixeldrain.com/user/api_keys.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: "Root of the filesystem to use.\n\n" +
|
||||
"Set to 'me' to use your personal filesystem. " +
|
||||
"Set to a shared directory ID to use a shared directory.",
|
||||
Default: "me",
|
||||
}, {
|
||||
Name: "api_url",
|
||||
Help: "The API endpoint to connect to. In the vast majority of cases it's fine to leave\n" +
|
||||
"this at default. It is only intended to be changed for testing purposes.",
|
||||
Default: "https://pixeldrain.com/api",
|
||||
Advanced: true,
|
||||
Required: true,
|
||||
}},
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
System: map[string]fs.MetadataHelp{
|
||||
"mode": {
|
||||
Help: "File mode",
|
||||
Type: "octal, unix style",
|
||||
Example: "755",
|
||||
},
|
||||
"mtime": {
|
||||
Help: "Time of last modification",
|
||||
Type: "RFC 3339",
|
||||
Example: timeFormat,
|
||||
},
|
||||
"btime": {
|
||||
Help: "Time of file birth (creation)",
|
||||
Type: "RFC 3339",
|
||||
Example: timeFormat,
|
||||
},
|
||||
},
|
||||
Help: "Pixeldrain supports file modes and creation times.",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
APIKey string `config:"api_key"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
APIURL string `config:"api_url"`
|
||||
}
|
||||
|
||||
// Fs represents a remote box
|
||||
type Fs struct {
|
||||
name string // name of this remote, as given to NewFS
|
||||
root string // the path we are working on, as given to NewFS
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
pacer *fs.Pacer
|
||||
loggedIn bool // if the user is authenticated
|
||||
|
||||
// Pathprefix is the directory we're working in. The pathPrefix is stripped
|
||||
// from every API response containing a path. The pathPrefix always begins
|
||||
// and ends with a slash for concatenation convenience
|
||||
pathPrefix string
|
||||
}
|
||||
|
||||
// Object describes a pixeldrain file
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
base FilesystemNode // the node this object references
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(fshttp.NewClient(ctx)).SetErrorHandler(apiErrorHandler),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(minSleep, maxSleep, decayConstant)),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
// Set the path prefix. This is the path to the root directory on the
|
||||
// server. We add it to each request and strip it from each response because
|
||||
// rclone does not want to see it
|
||||
f.pathPrefix = "/" + path.Join(opt.RootFolderID, f.root) + "/"
|
||||
|
||||
// The root URL equates to https://pixeldrain.com/api/filesystem during
|
||||
// normal operation. API handlers need to manually add the pathPrefix to
|
||||
// each request
|
||||
f.srv.SetRoot(opt.APIURL + "/filesystem")
|
||||
|
||||
// If using an APIKey, set the Authorization header
|
||||
if len(opt.APIKey) > 0 {
|
||||
f.srv.SetUserPass("", opt.APIKey)
|
||||
|
||||
// Check if credentials are correct
|
||||
user, err := f.userInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get user data: %w", err)
|
||||
}
|
||||
|
||||
f.loggedIn = true
|
||||
|
||||
fs.Infof(f,
|
||||
"Logged in as '%s', subscription '%s', storage limit %d",
|
||||
user.Username, user.Subscription.Name, user.Subscription.StorageSpace,
|
||||
)
|
||||
}
|
||||
|
||||
if !f.loggedIn && opt.RootFolderID == "me" {
|
||||
return nil, errors.New("authentication required: the 'me' directory can only be accessed while logged in")
|
||||
}
|
||||
|
||||
// Satisfy TestFsIsFile. This test expects that we throw an error if the
|
||||
// filesystem root is a file
|
||||
fsp, err := f.stat(ctx, "")
|
||||
if err != errNotFound && err != nil {
|
||||
// It doesn't matter if the root directory does not exist, as long as it
|
||||
// is not a file. This is what the test dictates
|
||||
return f, err
|
||||
} else if err == nil && fsp.Base().Type == "file" {
|
||||
// The filesystem root is a file, rclone wants us to set the root to the
|
||||
// parent directory
|
||||
f.root = path.Dir(f.root)
|
||||
f.pathPrefix = "/" + path.Join(opt.RootFolderID, f.root) + "/"
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
fsp, err := f.stat(ctx, dir)
|
||||
if err == errNotFound {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
} else if fsp.Base().Type == "file" {
|
||||
return nil, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
entries = make(fs.DirEntries, len(fsp.Children))
|
||||
for i := range fsp.Children {
|
||||
if fsp.Children[i].Type == "dir" {
|
||||
entries[i] = f.nodeToDirectory(fsp.Children[i])
|
||||
} else {
|
||||
entries[i] = f.nodeToObject(fsp.Children[i])
|
||||
}
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
fsp, err := f.stat(ctx, remote)
|
||||
if err == errNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
} else if fsp.Base().Type == "dir" {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
return f.nodeToObject(fsp.Base()), nil
|
||||
}
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get object metadata")
|
||||
}
|
||||
|
||||
// Overwrite the mtime if it was not already set in the metadata
|
||||
if _, ok := meta["mtime"]; !ok {
|
||||
if meta == nil {
|
||||
meta = make(fs.Metadata)
|
||||
}
|
||||
meta["mtime"] = src.ModTime(ctx).Format(timeFormat)
|
||||
}
|
||||
|
||||
node, err := f.put(ctx, src.Remote(), in, meta, options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to put object: %w", err)
|
||||
}
|
||||
|
||||
return f.nodeToObject(node), nil
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
err = f.mkdir(ctx, dir)
|
||||
if err == errNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
} else if err == errExists {
|
||||
// Spec says we do not return an error if the directory already exists
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir deletes the root folder
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
err = f.delete(ctx, dir, false)
|
||||
if err == errNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string { return f.name }
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string { return f.root }
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string { return fmt.Sprintf("pixeldrain root '%s'", f.root) }
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration { return time.Millisecond }
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set { return hash.Set(hash.SHA256) }
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features { return f.features }
|
||||
|
||||
// Purge all files in the directory specified
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
|
||||
err = f.delete(ctx, dir, true)
|
||||
if err == errNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
// This is not a pixeldrain object. Can't move
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
node, err := f.rename(ctx, srcObj.fs, srcObj.base.Path, remote, fs.GetConfig(ctx).MetadataSet)
|
||||
if err == errIncompatibleSourceFS {
|
||||
return nil, fs.ErrorCantMove
|
||||
} else if err == errNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
return f.nodeToObject(node), nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
_, err = f.rename(ctx, src, srcRemote, dstRemote, nil)
|
||||
if err == errIncompatibleSourceFS {
|
||||
return fs.ErrorCantDirMove
|
||||
} else if err == errNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
} else if err == errExists {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path
|
||||
// that has had changes. If the implementation
|
||||
// uses polling, it should adhere to the given interval.
|
||||
// At least one value will be written to the channel,
|
||||
// specifying the initial value and updated values might
|
||||
// follow. A 0 Duration should pause the polling.
|
||||
// The ChangeNotify implementation must empty the channel
|
||||
// regularly. When the channel gets closed, the implementation
|
||||
// should stop polling and release resources.
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notify func(string, fs.EntryType), newInterval <-chan time.Duration) {
|
||||
// If the bucket ID is not /me we need to explicitly enable change logging
|
||||
// for this directory or file
|
||||
if f.pathPrefix != "/me/" {
|
||||
_, err := f.update(ctx, "", fs.Metadata{"logging_enabled": "true"})
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to set up change logging for path '%s': %s", f.pathPrefix, err)
|
||||
}
|
||||
}
|
||||
|
||||
go f.changeNotify(ctx, notify, newInterval)
|
||||
}
|
||||
func (f *Fs) changeNotify(ctx context.Context, notify func(string, fs.EntryType), newInterval <-chan time.Duration) {
|
||||
var ticker = time.NewTicker(<-newInterval)
|
||||
var lastPoll = time.Now()
|
||||
|
||||
for {
|
||||
select {
|
||||
case dur, ok := <-newInterval:
|
||||
if !ok {
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
|
||||
fs.Debugf(f, "Polling changes at an interval of %s", dur)
|
||||
ticker.Reset(dur)
|
||||
|
||||
case t := <-ticker.C:
|
||||
clog, err := f.changeLog(ctx, lastPoll, t)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to get change log for path '%s': %s", f.pathPrefix, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for i := range clog {
|
||||
fs.Debugf(f, "Path '%s' (%s) changed (%s) in directory '%s'",
|
||||
clog[i].Path, clog[i].Type, clog[i].Action, f.pathPrefix)
|
||||
|
||||
if clog[i].Type == "dir" {
|
||||
notify(strings.TrimPrefix(clog[i].Path, "/"), fs.EntryDirectory)
|
||||
} else if clog[i].Type == "file" {
|
||||
notify(strings.TrimPrefix(clog[i].Path, "/"), fs.EntryObject)
|
||||
}
|
||||
}
|
||||
|
||||
lastPoll = t
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// Put already supports streaming so we just use that
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// DirSetModTime sets the mtime metadata on a directory
|
||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) (err error) {
|
||||
_, err = f.update(ctx, dir, fs.Metadata{"mtime": modTime.Format(timeFormat)})
|
||||
return err
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
fsn, err := f.update(ctx, remote, fs.Metadata{"shared": strconv.FormatBool(!unlink)})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if fsn.ID != "" {
|
||||
return strings.Replace(f.opt.APIURL, "/api", "/d/", 1) + fsn.ID, nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
user, err := f.userInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read user info: %w", err)
|
||||
}
|
||||
|
||||
usage = &fs.Usage{Used: fs.NewUsageValue(user.StorageSpaceUsed)}
|
||||
|
||||
if user.Subscription.StorageSpace > -1 {
|
||||
usage.Total = fs.NewUsageValue(user.Subscription.StorageSpace)
|
||||
}
|
||||
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
||||
_, err = o.fs.update(ctx, o.base.Path, fs.Metadata{"mtime": modTime.Format(timeFormat)})
|
||||
if err == nil {
|
||||
o.base.Modified = modTime
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
return o.fs.read(ctx, o.base.Path, options)
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
//
|
||||
// The new object may have been created if an error is returned.
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
// Copy the parameters and update the object
|
||||
o.base.Modified = src.ModTime(ctx)
|
||||
o.base.FileSize = src.Size()
|
||||
o.base.SHA256Sum, _ = src.Hash(ctx, hash.SHA256)
|
||||
_, err = o.fs.Put(ctx, in, o, options...)
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.delete(ctx, o.base.Path, false)
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Hash returns the SHA-256 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.SHA256 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return o.base.SHA256Sum, nil
|
||||
}
|
||||
|
||||
// Storable returns a boolean showing whether this object storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.base.Path
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.base.Path
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.base.Modified
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.base.FileSize
|
||||
}
|
||||
|
||||
// MimeType returns the content type of the Object if known, or "" if not
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.base.FileType
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
return fs.Metadata{
|
||||
"mode": o.base.ModeOctal,
|
||||
"mtime": o.base.Modified.Format(timeFormat),
|
||||
"btime": o.base.Created.Format(timeFormat),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Verify that all the interfaces are implemented correctly
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Info = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.DirEntry = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
_ fs.Metadataer = (*Object)(nil)
|
||||
)
|
||||
@@ -1,18 +0,0 @@
|
||||
// Test pixeldrain filesystem interface
|
||||
package pixeldrain_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/pixeldrain"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestPixeldrain:",
|
||||
NilObject: (*pixeldrain.Object)(nil),
|
||||
SkipInvalidUTF8: true, // Pixeldrain throws an error on invalid utf-8
|
||||
})
|
||||
}
|
||||
@@ -13,8 +13,7 @@ import (
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
)
|
||||
|
||||
// flags
|
||||
@@ -83,18 +82,15 @@ func main() {
|
||||
|
||||
package s3
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
)
|
||||
import "github.com/aws/aws-sdk-go/service/s3"
|
||||
`)
|
||||
|
||||
genSetFrom(new(s3.ListObjectsInput), new(s3.ListObjectsV2Input))
|
||||
genSetFrom(new(s3.ListObjectsV2Output), new(s3.ListObjectsOutput))
|
||||
genSetFrom(new(s3.ListObjectVersionsInput), new(s3.ListObjectsV2Input))
|
||||
genSetFrom(new(types.ObjectVersion), new(types.DeleteMarkerEntry))
|
||||
genSetFrom(new(s3.ObjectVersion), new(s3.DeleteMarkerEntry))
|
||||
genSetFrom(new(s3.ListObjectsV2Output), new(s3.ListObjectVersionsOutput))
|
||||
genSetFrom(new(types.Object), new(types.ObjectVersion))
|
||||
genSetFrom(new(s3.Object), new(s3.ObjectVersion))
|
||||
genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.HeadObjectOutput))
|
||||
genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.CopyObjectInput))
|
||||
genSetFrom(new(s3.UploadPartCopyInput), new(s3.CopyObjectInput))
|
||||
|
||||
1059
backend/s3/s3.go
1059
backend/s3/s3.go
File diff suppressed because it is too large
Load Diff
@@ -5,17 +5,15 @@ import (
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/aws/smithy-go"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -60,16 +58,6 @@ func (f *Fs) InternalTestMetadata(t *testing.T) {
|
||||
// "tier" - read only
|
||||
// "btime" - read only
|
||||
}
|
||||
// Cloudflare insists on decompressing `Content-Encoding: gzip` unless
|
||||
// `Cache-Control: no-transform` is supplied. This is a deviation from
|
||||
// AWS but we fudge the tests here rather than breaking peoples
|
||||
// expectations of what Cloudflare does.
|
||||
//
|
||||
// This can always be overridden by using
|
||||
// `--header-upload "Cache-Control: no-transform"`
|
||||
if f.opt.Provider == "Cloudflare" {
|
||||
metadata["cache-control"] = "no-transform"
|
||||
}
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/html", metadata)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
@@ -143,20 +131,20 @@ func TestVersionLess(t *testing.T) {
|
||||
t1 := fstest.Time("2022-01-21T12:00:00+01:00")
|
||||
t2 := fstest.Time("2022-01-21T12:00:01+01:00")
|
||||
for n, test := range []struct {
|
||||
a, b *types.ObjectVersion
|
||||
a, b *s3.ObjectVersion
|
||||
want bool
|
||||
}{
|
||||
{a: nil, b: nil, want: true},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1}, b: nil, want: false},
|
||||
{a: nil, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1}, b: &types.ObjectVersion{Key: &key1, LastModified: &t2}, want: false},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t2}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1}, b: &types.ObjectVersion{Key: &key2, LastModified: &t1}, want: true},
|
||||
{a: &types.ObjectVersion{Key: &key2, LastModified: &t1}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(false)}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(true)}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &types.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(false)}, b: &types.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(true)}, want: false},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: nil, want: false},
|
||||
{a: nil, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t2}, want: false},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t2}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key2, LastModified: &t1}, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key2, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(false)}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(true)}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(false)}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(true)}, want: false},
|
||||
} {
|
||||
got := versionLess(test.a, test.b)
|
||||
assert.Equal(t, test.want, got, fmt.Sprintf("%d: %+v", n, test))
|
||||
@@ -169,24 +157,24 @@ func TestMergeDeleteMarkers(t *testing.T) {
|
||||
t1 := fstest.Time("2022-01-21T12:00:00+01:00")
|
||||
t2 := fstest.Time("2022-01-21T12:00:01+01:00")
|
||||
for n, test := range []struct {
|
||||
versions []types.ObjectVersion
|
||||
markers []types.DeleteMarkerEntry
|
||||
want []types.ObjectVersion
|
||||
versions []*s3.ObjectVersion
|
||||
markers []*s3.DeleteMarkerEntry
|
||||
want []*s3.ObjectVersion
|
||||
}{
|
||||
{
|
||||
versions: []types.ObjectVersion{},
|
||||
markers: []types.DeleteMarkerEntry{},
|
||||
want: []types.ObjectVersion{},
|
||||
versions: []*s3.ObjectVersion{},
|
||||
markers: []*s3.DeleteMarkerEntry{},
|
||||
want: []*s3.ObjectVersion{},
|
||||
},
|
||||
{
|
||||
versions: []types.ObjectVersion{
|
||||
versions: []*s3.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
},
|
||||
},
|
||||
markers: []types.DeleteMarkerEntry{},
|
||||
want: []types.ObjectVersion{
|
||||
markers: []*s3.DeleteMarkerEntry{},
|
||||
want: []*s3.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
@@ -194,14 +182,14 @@ func TestMergeDeleteMarkers(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
versions: []types.ObjectVersion{},
|
||||
markers: []types.DeleteMarkerEntry{
|
||||
versions: []*s3.ObjectVersion{},
|
||||
markers: []*s3.DeleteMarkerEntry{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
},
|
||||
},
|
||||
want: []types.ObjectVersion{
|
||||
want: []*s3.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
@@ -210,7 +198,7 @@ func TestMergeDeleteMarkers(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
versions: []types.ObjectVersion{
|
||||
versions: []*s3.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t2,
|
||||
@@ -220,13 +208,13 @@ func TestMergeDeleteMarkers(t *testing.T) {
|
||||
LastModified: &t2,
|
||||
},
|
||||
},
|
||||
markers: []types.DeleteMarkerEntry{
|
||||
markers: []*s3.DeleteMarkerEntry{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
},
|
||||
},
|
||||
want: []types.ObjectVersion{
|
||||
want: []*s3.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t2,
|
||||
@@ -411,23 +399,22 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
||||
// quirk is set correctly
|
||||
req := s3.CreateBucketInput{
|
||||
Bucket: &f.rootBucket,
|
||||
ACL: types.BucketCannedACL(f.opt.BucketACL),
|
||||
ACL: stringPointerOrNil(f.opt.BucketACL),
|
||||
}
|
||||
if f.opt.LocationConstraint != "" {
|
||||
req.CreateBucketConfiguration = &types.CreateBucketConfiguration{
|
||||
LocationConstraint: types.BucketLocationConstraint(f.opt.LocationConstraint),
|
||||
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
|
||||
LocationConstraint: &f.opt.LocationConstraint,
|
||||
}
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.CreateBucket(ctx, &req)
|
||||
_, err := f.c.CreateBucketWithContext(ctx, &req)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
var errString string
|
||||
var awsError smithy.APIError
|
||||
if err == nil {
|
||||
errString = "No Error"
|
||||
} else if errors.As(err, &awsError) {
|
||||
errString = awsError.ErrorCode()
|
||||
} else if awsErr, ok := err.(awserr.Error); ok {
|
||||
errString = awsErr.Code()
|
||||
} else {
|
||||
assert.Fail(t, "Unknown error %T %v", err, err)
|
||||
}
|
||||
|
||||
@@ -4,14 +4,12 @@ package s3
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func SetupS3Test(t *testing.T) (context.Context, *Options, *http.Client) {
|
||||
@@ -56,16 +54,20 @@ func TestAWSDualStackOption(t *testing.T) {
|
||||
// test enabled
|
||||
ctx, opt, client := SetupS3Test(t)
|
||||
opt.UseDualStack = true
|
||||
s3Conn, err := s3Connection(ctx, opt, client)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, aws.DualStackEndpointStateEnabled, s3Conn.Options().EndpointOptions.UseDualStackEndpoint)
|
||||
s3Conn, _, _ := s3Connection(ctx, opt, client)
|
||||
if !strings.Contains(s3Conn.Endpoint, "dualstack") {
|
||||
t.Errorf("dualstack failed got: %s, wanted: dualstack", s3Conn.Endpoint)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
{
|
||||
// test default case
|
||||
ctx, opt, client := SetupS3Test(t)
|
||||
s3Conn, err := s3Connection(ctx, opt, client)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, aws.DualStackEndpointStateDisabled, s3Conn.Options().EndpointOptions.UseDualStackEndpoint)
|
||||
s3Conn, _, _ := s3Connection(ctx, opt, client)
|
||||
if strings.Contains(s3Conn.Endpoint, "dualstack") {
|
||||
t.Errorf("dualstack failed got: %s, NOT wanted: dualstack", s3Conn.Endpoint)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,10 +2,7 @@
|
||||
|
||||
package s3
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
)
|
||||
import "github.com/aws/aws-sdk-go/service/s3"
|
||||
|
||||
// setFrom_s3ListObjectsInput_s3ListObjectsV2Input copies matching elements from a to b
|
||||
func setFrom_s3ListObjectsInput_s3ListObjectsV2Input(a *s3.ListObjectsInput, b *s3.ListObjectsV2Input) {
|
||||
@@ -30,7 +27,6 @@ func setFrom_s3ListObjectsV2Output_s3ListObjectsOutput(a *s3.ListObjectsV2Output
|
||||
a.Name = b.Name
|
||||
a.Prefix = b.Prefix
|
||||
a.RequestCharged = b.RequestCharged
|
||||
a.ResultMetadata = b.ResultMetadata
|
||||
}
|
||||
|
||||
// setFrom_s3ListObjectVersionsInput_s3ListObjectsV2Input copies matching elements from a to b
|
||||
@@ -45,8 +41,8 @@ func setFrom_s3ListObjectVersionsInput_s3ListObjectsV2Input(a *s3.ListObjectVers
|
||||
a.RequestPayer = b.RequestPayer
|
||||
}
|
||||
|
||||
// setFrom_typesObjectVersion_typesDeleteMarkerEntry copies matching elements from a to b
|
||||
func setFrom_typesObjectVersion_typesDeleteMarkerEntry(a *types.ObjectVersion, b *types.DeleteMarkerEntry) {
|
||||
// setFrom_s3ObjectVersion_s3DeleteMarkerEntry copies matching elements from a to b
|
||||
func setFrom_s3ObjectVersion_s3DeleteMarkerEntry(a *s3.ObjectVersion, b *s3.DeleteMarkerEntry) {
|
||||
a.IsLatest = b.IsLatest
|
||||
a.Key = b.Key
|
||||
a.LastModified = b.LastModified
|
||||
@@ -64,11 +60,10 @@ func setFrom_s3ListObjectsV2Output_s3ListObjectVersionsOutput(a *s3.ListObjectsV
|
||||
a.Name = b.Name
|
||||
a.Prefix = b.Prefix
|
||||
a.RequestCharged = b.RequestCharged
|
||||
a.ResultMetadata = b.ResultMetadata
|
||||
}
|
||||
|
||||
// setFrom_typesObject_typesObjectVersion copies matching elements from a to b
|
||||
func setFrom_typesObject_typesObjectVersion(a *types.Object, b *types.ObjectVersion) {
|
||||
// setFrom_s3Object_s3ObjectVersion copies matching elements from a to b
|
||||
func setFrom_s3Object_s3ObjectVersion(a *s3.Object, b *s3.ObjectVersion) {
|
||||
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||
a.ETag = b.ETag
|
||||
a.Key = b.Key
|
||||
@@ -76,6 +71,7 @@ func setFrom_typesObject_typesObjectVersion(a *types.Object, b *types.ObjectVers
|
||||
a.Owner = b.Owner
|
||||
a.RestoreStatus = b.RestoreStatus
|
||||
a.Size = b.Size
|
||||
a.StorageClass = b.StorageClass
|
||||
}
|
||||
|
||||
// setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput copies matching elements from a to b
|
||||
@@ -86,7 +82,6 @@ func setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput(a *s3.CreateMultipa
|
||||
a.ContentEncoding = b.ContentEncoding
|
||||
a.ContentLanguage = b.ContentLanguage
|
||||
a.ContentType = b.ContentType
|
||||
a.Expires = b.Expires
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
@@ -101,9 +96,8 @@ func setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput(a *s3.CreateMultipa
|
||||
|
||||
// setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput copies matching elements from a to b
|
||||
func setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput(a *s3.CreateMultipartUploadInput, b *s3.CopyObjectInput) {
|
||||
a.Bucket = b.Bucket
|
||||
a.Key = b.Key
|
||||
a.ACL = b.ACL
|
||||
a.Bucket = b.Bucket
|
||||
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||
a.CacheControl = b.CacheControl
|
||||
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||
@@ -117,6 +111,7 @@ func setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput(a *s3.CreateMultipar
|
||||
a.GrantRead = b.GrantRead
|
||||
a.GrantReadACP = b.GrantReadACP
|
||||
a.GrantWriteACP = b.GrantWriteACP
|
||||
a.Key = b.Key
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
@@ -137,7 +132,6 @@ func setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput(a *s3.CreateMultipar
|
||||
func setFrom_s3UploadPartCopyInput_s3CopyObjectInput(a *s3.UploadPartCopyInput, b *s3.CopyObjectInput) {
|
||||
a.Bucket = b.Bucket
|
||||
a.CopySource = b.CopySource
|
||||
a.Key = b.Key
|
||||
a.CopySourceIfMatch = b.CopySourceIfMatch
|
||||
a.CopySourceIfModifiedSince = b.CopySourceIfModifiedSince
|
||||
a.CopySourceIfNoneMatch = b.CopySourceIfNoneMatch
|
||||
@@ -147,6 +141,7 @@ func setFrom_s3UploadPartCopyInput_s3CopyObjectInput(a *s3.UploadPartCopyInput,
|
||||
a.CopySourceSSECustomerKeyMD5 = b.CopySourceSSECustomerKeyMD5
|
||||
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
||||
a.ExpectedSourceBucketOwner = b.ExpectedSourceBucketOwner
|
||||
a.Key = b.Key
|
||||
a.RequestPayer = b.RequestPayer
|
||||
a.SSECustomerAlgorithm = b.SSECustomerAlgorithm
|
||||
a.SSECustomerKey = b.SSECustomerKey
|
||||
@@ -171,7 +166,6 @@ func setFrom_s3HeadObjectOutput_s3GetObjectOutput(a *s3.HeadObjectOutput, b *s3.
|
||||
a.ETag = b.ETag
|
||||
a.Expiration = b.Expiration
|
||||
a.Expires = b.Expires
|
||||
a.ExpiresString = b.ExpiresString
|
||||
a.LastModified = b.LastModified
|
||||
a.Metadata = b.Metadata
|
||||
a.MissingMeta = b.MissingMeta
|
||||
@@ -189,14 +183,12 @@ func setFrom_s3HeadObjectOutput_s3GetObjectOutput(a *s3.HeadObjectOutput, b *s3.
|
||||
a.StorageClass = b.StorageClass
|
||||
a.VersionId = b.VersionId
|
||||
a.WebsiteRedirectLocation = b.WebsiteRedirectLocation
|
||||
a.ResultMetadata = b.ResultMetadata
|
||||
}
|
||||
|
||||
// setFrom_s3CreateMultipartUploadInput_s3PutObjectInput copies matching elements from a to b
|
||||
func setFrom_s3CreateMultipartUploadInput_s3PutObjectInput(a *s3.CreateMultipartUploadInput, b *s3.PutObjectInput) {
|
||||
a.Bucket = b.Bucket
|
||||
a.Key = b.Key
|
||||
a.ACL = b.ACL
|
||||
a.Bucket = b.Bucket
|
||||
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||
a.CacheControl = b.CacheControl
|
||||
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||
@@ -210,6 +202,7 @@ func setFrom_s3CreateMultipartUploadInput_s3PutObjectInput(a *s3.CreateMultipart
|
||||
a.GrantRead = b.GrantRead
|
||||
a.GrantReadACP = b.GrantReadACP
|
||||
a.GrantWriteACP = b.GrantWriteACP
|
||||
a.Key = b.Key
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
@@ -239,7 +232,6 @@ func setFrom_s3HeadObjectOutput_s3PutObjectInput(a *s3.HeadObjectOutput, b *s3.P
|
||||
a.ContentLanguage = b.ContentLanguage
|
||||
a.ContentLength = b.ContentLength
|
||||
a.ContentType = b.ContentType
|
||||
a.Expires = b.Expires
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
@@ -254,9 +246,8 @@ func setFrom_s3HeadObjectOutput_s3PutObjectInput(a *s3.HeadObjectOutput, b *s3.P
|
||||
|
||||
// setFrom_s3CopyObjectInput_s3PutObjectInput copies matching elements from a to b
|
||||
func setFrom_s3CopyObjectInput_s3PutObjectInput(a *s3.CopyObjectInput, b *s3.PutObjectInput) {
|
||||
a.Bucket = b.Bucket
|
||||
a.Key = b.Key
|
||||
a.ACL = b.ACL
|
||||
a.Bucket = b.Bucket
|
||||
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||
a.CacheControl = b.CacheControl
|
||||
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||
@@ -270,6 +261,7 @@ func setFrom_s3CopyObjectInput_s3PutObjectInput(a *s3.CopyObjectInput, b *s3.Put
|
||||
a.GrantRead = b.GrantRead
|
||||
a.GrantReadACP = b.GrantReadACP
|
||||
a.GrantWriteACP = b.GrantWriteACP
|
||||
a.Key = b.Key
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/hmac"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
@@ -11,9 +10,6 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
v4signer "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
)
|
||||
|
||||
// URL parameters that need to be added to the signature
|
||||
@@ -40,17 +36,10 @@ var s3ParamsToSign = map[string]struct{}{
|
||||
"response-content-encoding": {},
|
||||
}
|
||||
|
||||
// Implement HTTPSignerV4 interface
|
||||
type v2Signer struct {
|
||||
opt *Options
|
||||
}
|
||||
|
||||
// SignHTTP signs requests using v2 auth.
|
||||
// sign signs requests using v2 auth
|
||||
//
|
||||
// Cobbled together from goamz and aws-sdk-go.
|
||||
//
|
||||
// Bodged up to compile with AWS SDK v2
|
||||
func (v2 *v2Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, req *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4signer.SignerOptions)) error {
|
||||
// Cobbled together from goamz and aws-sdk-go
|
||||
func sign(AccessKey, SecretKey string, req *http.Request) {
|
||||
// Set date
|
||||
date := time.Now().UTC().Format(time.RFC1123)
|
||||
req.Header.Set("Date", date)
|
||||
@@ -118,12 +107,11 @@ func (v2 *v2Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r
|
||||
|
||||
// Make signature
|
||||
payload := req.Method + "\n" + md5 + "\n" + contentType + "\n" + date + "\n" + joinedHeadersToSign + uri
|
||||
hash := hmac.New(sha1.New, []byte(v2.opt.SecretAccessKey))
|
||||
hash := hmac.New(sha1.New, []byte(SecretKey))
|
||||
_, _ = hash.Write([]byte(payload))
|
||||
signature := make([]byte, base64.StdEncoding.EncodedLen(hash.Size()))
|
||||
base64.StdEncoding.Encode(signature, hash.Sum(nil))
|
||||
|
||||
// Set signature in request
|
||||
req.Header.Set("Authorization", "AWS "+v2.opt.AccessKeyID+":"+string(signature))
|
||||
return nil
|
||||
req.Header.Set("Authorization", "AWS "+AccessKey+":"+string(signature))
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ func getAuthorizationToken(ctx context.Context, srv *rest.Client, user, password
|
||||
// This is only going to be http errors here
|
||||
return "", fmt.Errorf("failed to authenticate: %w", err)
|
||||
}
|
||||
if len(result.Errors) > 0 {
|
||||
if result.Errors != nil && len(result.Errors) > 0 {
|
||||
return "", errors.New(strings.Join(result.Errors, ", "))
|
||||
}
|
||||
if result.Token == "" {
|
||||
|
||||
@@ -344,7 +344,7 @@ cost of using more memory.
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "connections",
|
||||
Help: strings.ReplaceAll(`Maximum number of SFTP simultaneous connections, 0 for unlimited.
|
||||
Help: strings.Replace(`Maximum number of SFTP simultaneous connections, 0 for unlimited.
|
||||
|
||||
Note that setting this is very likely to cause deadlocks so it should
|
||||
be used with care.
|
||||
@@ -358,7 +358,7 @@ maximum of |--checkers| and |--transfers|.
|
||||
So for |connections 3| you'd use |--checkers 2 --transfers 2
|
||||
--check-first| or |--checkers 1 --transfers 1|.
|
||||
|
||||
`, "|", "`"),
|
||||
`, "|", "`", -1),
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
}, {
|
||||
|
||||
@@ -1410,6 +1410,14 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return
|
||||
}
|
||||
|
||||
// min returns the smallest of x, y
|
||||
func min(x, y int64) int64 {
|
||||
if x < y {
|
||||
return x
|
||||
}
|
||||
return y
|
||||
}
|
||||
|
||||
// Get the segments for a large object
|
||||
//
|
||||
// It returns the names of the segments and the container that they live in
|
||||
|
||||
@@ -903,7 +903,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// Backward compatible to old config
|
||||
if len(opt.Upstreams) == 0 && len(opt.Remotes) > 0 {
|
||||
for i := 0; i < len(opt.Remotes)-1; i++ {
|
||||
opt.Remotes[i] += ":ro"
|
||||
opt.Remotes[i] = opt.Remotes[i] + ":ro"
|
||||
}
|
||||
opt.Upstreams = opt.Remotes
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -96,12 +95,6 @@ func TestMoveCopy(t *testing.T) {
|
||||
fLocal := unionFs.upstreams[0].Fs
|
||||
fMemory := unionFs.upstreams[1].Fs
|
||||
|
||||
if runtime.GOOS == "darwin" {
|
||||
// need to disable as this test specifically tests a local that can't Copy
|
||||
f.Features().Disable("Copy")
|
||||
fLocal.Features().Disable("Copy")
|
||||
}
|
||||
|
||||
t.Run("Features", func(t *testing.T) {
|
||||
assert.NotNil(t, f.Features().Move)
|
||||
assert.Nil(t, f.Features().Copy)
|
||||
|
||||
@@ -159,9 +159,7 @@ Set to 0 to disable chunked uploading.
|
||||
Help: "Exclude ownCloud mounted storages",
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
},
|
||||
fshttp.UnixSocketConfig,
|
||||
},
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -179,7 +177,6 @@ type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"nextcloud_chunk_size"`
|
||||
ExcludeShares bool `config:"owncloud_exclude_shares"`
|
||||
ExcludeMounts bool `config:"owncloud_exclude_mounts"`
|
||||
UnixSocket string `config:"unix_socket"`
|
||||
}
|
||||
|
||||
// Fs represents a remote webdav
|
||||
@@ -461,12 +458,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
precision: fs.ModTimeNotSupported,
|
||||
}
|
||||
|
||||
var client *http.Client
|
||||
if opt.UnixSocket == "" {
|
||||
client = fshttp.NewClient(ctx)
|
||||
} else {
|
||||
client = fshttp.NewClientWithUnixSocket(ctx, opt.UnixSocket)
|
||||
}
|
||||
client := fshttp.NewClient(ctx)
|
||||
if opt.Vendor == "sharepoint-ntlm" {
|
||||
// Disable transparent HTTP/2 support as per https://golang.org/pkg/net/http/ ,
|
||||
// otherwise any connection to IIS 10.0 fails with 'stream error: stream ID 39; HTTP_1_1_REQUIRED'
|
||||
@@ -643,7 +635,7 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
|
||||
odrvcookie.NewRenew(12*time.Hour, func() {
|
||||
spCookies, err := spCk.Cookies(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "could not renew cookies: %s", err.Error())
|
||||
fs.Errorf("could not renew cookies: %s", err.Error())
|
||||
return
|
||||
}
|
||||
f.srv.SetCookie(&spCookies.FedAuth, &spCookies.RtFa)
|
||||
|
||||
@@ -296,14 +296,16 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
//request object meta info
|
||||
if info, err := f.readMetaDataForPath(ctx, f.diskRoot, &api.ResourceInfoRequestOptions{}); err != nil {
|
||||
|
||||
} else if info.ResourceType == "file" {
|
||||
rootDir := path.Dir(root)
|
||||
if rootDir == "." {
|
||||
rootDir = ""
|
||||
} else {
|
||||
if info.ResourceType == "file" {
|
||||
rootDir := path.Dir(root)
|
||||
if rootDir == "." {
|
||||
rootDir = ""
|
||||
}
|
||||
f.setRoot(rootDir)
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
f.setRoot(rootDir)
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
@@ -73,7 +73,7 @@ var osarches = []string{
|
||||
"plan9/386",
|
||||
"plan9/amd64",
|
||||
"solaris/amd64",
|
||||
// "js/wasm", // Rclone is too big for js/wasm until https://github.com/golang/go/issues/64856 is fixed
|
||||
"js/wasm",
|
||||
}
|
||||
|
||||
// Special environment flags for a given arch
|
||||
|
||||
@@ -41,9 +41,7 @@ docs = [
|
||||
"combine.md",
|
||||
"dropbox.md",
|
||||
"filefabric.md",
|
||||
"filescom.md",
|
||||
"ftp.md",
|
||||
"gofile.md",
|
||||
"googlecloudstorage.md",
|
||||
"drive.md",
|
||||
"googlephotos.md",
|
||||
@@ -71,7 +69,6 @@ docs = [
|
||||
"swift.md",
|
||||
"pcloud.md",
|
||||
"pikpak.md",
|
||||
"pixeldrain.md",
|
||||
"premiumizeme.md",
|
||||
"protondrive.md",
|
||||
"putio.md",
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test the sizes in the rclone binary of each backend by compiling
|
||||
rclone with and without the backend and measuring the difference.
|
||||
|
||||
Run with no arguments to test all backends or a supply a list of
|
||||
backends to test.
|
||||
"""
|
||||
|
||||
all_backends = "backend/all/all.go"
|
||||
|
||||
# compile command which is more or less like the production builds
|
||||
compile_command = ["go", "build", "--ldflags", "-s", "-trimpath"]
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
match_backend = re.compile(r'"github.com/rclone/rclone/backend/(.*?)"')
|
||||
|
||||
def read_backends():
|
||||
"""
|
||||
Reads the backends file, returning a list of backends and the original file
|
||||
"""
|
||||
with open(all_backends) as fd:
|
||||
orig_all = fd.read()
|
||||
# find the backends
|
||||
backends = []
|
||||
for line in orig_all.split("\n"):
|
||||
match = match_backend.search(line)
|
||||
if match:
|
||||
backends.append(match.group(1))
|
||||
return backends, orig_all
|
||||
|
||||
def write_all(orig_all, backend):
|
||||
"""
|
||||
Write the all backends file without the backend given
|
||||
"""
|
||||
with open(all_backends, "w") as fd:
|
||||
for line in orig_all.split("\n"):
|
||||
match = re.search(r'"github.com/rclone/rclone/backend/(.*?)"', line)
|
||||
# Comment out line matching backend
|
||||
if match and match.group(1) == backend:
|
||||
line = "// " + line
|
||||
fd.write(line+"\n")
|
||||
|
||||
def compile():
|
||||
"""
|
||||
Compile the binary, returning the size
|
||||
"""
|
||||
subprocess.check_call(compile_command)
|
||||
return os.stat("rclone").st_size
|
||||
|
||||
def main():
|
||||
# change directory to the one with this script in
|
||||
os.chdir(os.path.dirname(os.path.abspath(__file__)))
|
||||
# change directory to the main rclone source
|
||||
os.chdir("..")
|
||||
|
||||
to_test = sys.argv[1:]
|
||||
backends, orig_all = read_backends()
|
||||
if len(to_test) == 0:
|
||||
to_test = backends
|
||||
# Compile with all backends
|
||||
ref = compile()
|
||||
print(f"Total binary size {ref/1024/1024:.3f} MiB")
|
||||
print("Backend,Size MiB")
|
||||
for test_backend in to_test:
|
||||
write_all(orig_all, test_backend)
|
||||
new_size = compile()
|
||||
print(f"{test_backend},{(ref-new_size)/1024/1024:.3f}")
|
||||
# restore all file
|
||||
with open(all_backends, "w") as fd:
|
||||
fd.write(orig_all)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -46,7 +46,8 @@ func printValue(what string, uv *int64, isSize bool) {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "about remote:",
|
||||
Short: `Get quota information from the remote.`,
|
||||
Long: `Prints quota information about a remote to standard
|
||||
Long: `
|
||||
` + "`rclone about`" + ` prints quota information about a remote to standard
|
||||
output. The output is typically used, free, quota and trash contents.
|
||||
|
||||
E.g. Typical output from ` + "`rclone about remote:`" + ` is:
|
||||
|
||||
@@ -25,7 +25,8 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "authorize",
|
||||
Short: `Remote authorization.`,
|
||||
Long: `Remote authorization. Used to authorize a remote or headless
|
||||
Long: `
|
||||
Remote authorization. Used to authorize a remote or headless
|
||||
rclone from a machine with a browser - use as instructed by
|
||||
rclone config.
|
||||
|
||||
|
||||
@@ -31,7 +31,8 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "backend <command> remote:path [opts] <args>",
|
||||
Short: `Run a backend-specific command.`,
|
||||
Long: `This runs a backend-specific command. The commands themselves (except
|
||||
Long: `
|
||||
This runs a backend-specific command. The commands themselves (except
|
||||
for "help" and "features") are defined by the backends and you should
|
||||
see the backend docs for definitions.
|
||||
|
||||
|
||||
@@ -1304,7 +1304,7 @@ func touchFiles(ctx context.Context, dateStr string, f fs.Fs, dir, glob string)
|
||||
return files, fmt.Errorf("invalid date %q: %w", dateStr, err)
|
||||
}
|
||||
|
||||
matcher, firstErr := filter.GlobPathToRegexp(glob, false)
|
||||
matcher, firstErr := filter.GlobToRegexp(glob, false)
|
||||
if firstErr != nil {
|
||||
return files, fmt.Errorf("invalid glob %q", glob)
|
||||
}
|
||||
@@ -1742,8 +1742,8 @@ func (b *bisyncTest) newReplacer(mangle bool) *strings.Replacer {
|
||||
b.path2, "{path2/}",
|
||||
b.replaceHex(b.path1), "{path1/}",
|
||||
b.replaceHex(b.path2), "{path2/}",
|
||||
"//?/" + strings.TrimSuffix(strings.ReplaceAll(b.path1, slash, "/"), "/"), "{path1}", // fix windows-specific issue
|
||||
"//?/" + strings.TrimSuffix(strings.ReplaceAll(b.path2, slash, "/"), "/"), "{path2}",
|
||||
"//?/" + strings.TrimSuffix(strings.Replace(b.path1, slash, "/", -1), "/"), "{path1}", // fix windows-specific issue
|
||||
"//?/" + strings.TrimSuffix(strings.Replace(b.path2, slash, "/", -1), "/"), "{path2}",
|
||||
strings.TrimSuffix(b.path1, slash), "{path1}", // ensure it's still recognized without trailing slash
|
||||
strings.TrimSuffix(b.path2, slash), "{path2}",
|
||||
b.workDir, "{workdir}",
|
||||
|
||||
@@ -107,10 +107,10 @@ func CryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash
|
||||
}
|
||||
if cryptHash != underlyingHash {
|
||||
err = fmt.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash)
|
||||
fs.Debugf(src, "%s", err.Error())
|
||||
fs.Debugf(src, err.Error())
|
||||
// using same error msg as CheckFn so integration tests match
|
||||
err = fmt.Errorf("%v differ", hashType)
|
||||
fs.Errorf(src, "%s", err.Error())
|
||||
fs.Errorf(src, err.Error())
|
||||
return true, false, nil
|
||||
}
|
||||
return false, false, nil
|
||||
|
||||
@@ -62,41 +62,42 @@ func (b *bisyncRun) setCompareDefaults(ctx context.Context) error {
|
||||
b.setHashType(ci)
|
||||
}
|
||||
|
||||
// Checks and Warnings
|
||||
if b.opt.Compare.SlowHashSyncOnly && b.opt.Compare.SlowHashDetected && b.opt.Resync {
|
||||
fs.Logf(nil, Color(terminal.Dim, "Ignoring checksums during --resync as --slow-hash-sync-only is set.")) ///nolint:govet
|
||||
fs.Logf(nil, Color(terminal.Dim, "Ignoring checksums during --resync as --slow-hash-sync-only is set."))
|
||||
ci.CheckSum = false
|
||||
// note not setting b.opt.Compare.Checksum = false as we still want to build listings on the non-slow side, if any
|
||||
} else if b.opt.Compare.Checksum && !ci.CheckSum {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Checksums will be compared for deltas but not during sync as --checksum is not set.")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Checksums will be compared for deltas but not during sync as --checksum is not set."))
|
||||
}
|
||||
if b.opt.Compare.Modtime && (b.fs1.Precision() == fs.ModTimeNotSupported || b.fs2.Precision() == fs.ModTimeNotSupported) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Modtime compare was requested but at least one remote does not support it. It is recommended to use --checksum or --size-only instead.")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Modtime compare was requested but at least one remote does not support it. It is recommended to use --checksum or --size-only instead."))
|
||||
}
|
||||
if (ci.CheckSum || b.opt.Compare.Checksum) && b.opt.IgnoreListingChecksum {
|
||||
if (b.opt.Compare.HashType1 == hash.None || b.opt.Compare.HashType2 == hash.None) && !b.opt.Compare.DownloadHash {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, `WARNING: Checksum compare was requested but at least one remote does not support checksums (or checksums are being ignored) and --ignore-listing-checksum is set.
|
||||
Ignoring Checksums globally and falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime). Path1 (%s): %s, Path2 (%s): %s`),
|
||||
b.fs1.String(), b.opt.Compare.HashType1.String(), b.fs2.String(), b.opt.Compare.HashType2.String()) //nolint:govet
|
||||
b.fs1.String(), b.opt.Compare.HashType1.String(), b.fs2.String(), b.opt.Compare.HashType2.String())
|
||||
b.opt.Compare.Modtime = true
|
||||
b.opt.Compare.Size = true
|
||||
ci.CheckSum = false
|
||||
b.opt.Compare.Checksum = false
|
||||
} else {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksum for deltas as --ignore-listing-checksum is set")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksum for deltas as --ignore-listing-checksum is set"))
|
||||
// note: --checksum will still affect the internal sync calls
|
||||
}
|
||||
}
|
||||
if !ci.CheckSum && !b.opt.Compare.Checksum && !b.opt.IgnoreListingChecksum {
|
||||
fs.Infof(nil, Color(terminal.Dim, "Setting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.")) //nolint:govet
|
||||
fs.Infof(nil, Color(terminal.Dim, "Setting --ignore-listing-checksum as neither --checksum nor --compare checksum are set."))
|
||||
b.opt.IgnoreListingChecksum = true
|
||||
}
|
||||
if !b.opt.Compare.Size && !b.opt.Compare.Modtime && !b.opt.Compare.Checksum {
|
||||
return errors.New(Color(terminal.RedFg, "must set a Compare method. (size, modtime, and checksum can't all be false.)")) //nolint:govet
|
||||
return errors.New(Color(terminal.RedFg, "must set a Compare method. (size, modtime, and checksum can't all be false.)"))
|
||||
}
|
||||
|
||||
notSupported := func(label string, value bool, opt *bool) {
|
||||
if value {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: %s is set but bisync does not support it. It will be ignored."), label) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: %s is set but bisync does not support it. It will be ignored."), label)
|
||||
*opt = false
|
||||
}
|
||||
}
|
||||
@@ -123,13 +124,13 @@ func sizeDiffers(a, b int64) bool {
|
||||
func hashDiffers(a, b string, ht1, ht2 hash.Type, size1, size2 int64) bool {
|
||||
if a == "" || b == "" {
|
||||
if ht1 != hash.None && ht2 != hash.None && !(size1 <= 0 || size2 <= 0) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), a, b) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), a, b)
|
||||
}
|
||||
return false
|
||||
}
|
||||
if ht1 != ht2 {
|
||||
if !(downloadHash && ((ht1 == hash.MD5 && ht2 == hash.None) || (ht1 == hash.None && ht2 == hash.MD5))) {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "WARNING: Can't compare hashes of different types (%s, %s)"), ht1.String(), ht2.String()) //nolint:govet
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "WARNING: Can't compare hashes of different types (%s, %s)"), ht1.String(), ht2.String())
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -151,7 +152,7 @@ func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
|
||||
return
|
||||
}
|
||||
} else if b.opt.Compare.SlowHashSyncOnly && b.opt.Compare.SlowHashDetected {
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "Ignoring --slow-hash-sync-only and falling back to --no-slow-hash as Path1 and Path2 have no hashes in common.")) //nolint:govet
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "Ignoring --slow-hash-sync-only and falling back to --no-slow-hash as Path1 and Path2 have no hashes in common."))
|
||||
b.opt.Compare.SlowHashSyncOnly = false
|
||||
b.opt.Compare.NoSlowHash = true
|
||||
ci.CheckSum = false
|
||||
@@ -159,7 +160,7 @@ func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
|
||||
}
|
||||
|
||||
if !b.opt.Compare.DownloadHash && !b.opt.Compare.SlowHashSyncOnly {
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "--checksum is in use but Path1 and Path2 have no hashes in common; falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime)")) //nolint:govet
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "--checksum is in use but Path1 and Path2 have no hashes in common; falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime)"))
|
||||
fs.Infof("Path1 hashes", "%v", b.fs1.Hashes().String())
|
||||
fs.Infof("Path2 hashes", "%v", b.fs2.Hashes().String())
|
||||
b.opt.Compare.Modtime = true
|
||||
@@ -167,25 +168,25 @@ func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
|
||||
ci.CheckSum = false
|
||||
}
|
||||
if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs1.Features().SlowHash {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Slow hash detected on Path1. Will ignore checksum due to slow-hash settings")) //nolint:govet
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Slow hash detected on Path1. Will ignore checksum due to slow-hash settings"))
|
||||
b.opt.Compare.HashType1 = hash.None
|
||||
} else {
|
||||
b.opt.Compare.HashType1 = b.fs1.Hashes().GetOne()
|
||||
if b.opt.Compare.HashType1 != hash.None {
|
||||
fs.Logf(b.fs1, Color(terminal.YellowFg, "will use %s for same-side diffs on Path1 only"), b.opt.Compare.HashType1) //nolint:govet
|
||||
fs.Logf(b.fs1, Color(terminal.YellowFg, "will use %s for same-side diffs on Path1 only"), b.opt.Compare.HashType1)
|
||||
}
|
||||
}
|
||||
if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs2.Features().SlowHash {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Slow hash detected on Path2. Will ignore checksum due to slow-hash settings")) //nolint:govet
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Slow hash detected on Path2. Will ignore checksum due to slow-hash settings"))
|
||||
b.opt.Compare.HashType1 = hash.None
|
||||
} else {
|
||||
b.opt.Compare.HashType2 = b.fs2.Hashes().GetOne()
|
||||
if b.opt.Compare.HashType2 != hash.None {
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "will use %s for same-side diffs on Path2 only"), b.opt.Compare.HashType2) //nolint:govet
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "will use %s for same-side diffs on Path2 only"), b.opt.Compare.HashType2)
|
||||
}
|
||||
}
|
||||
if b.opt.Compare.HashType1 == hash.None && b.opt.Compare.HashType2 == hash.None && !b.opt.Compare.DownloadHash {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksums globally as hashes are ignored or unavailable on both sides.")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksums globally as hashes are ignored or unavailable on both sides."))
|
||||
b.opt.Compare.Checksum = false
|
||||
ci.CheckSum = false
|
||||
b.opt.IgnoreListingChecksum = true
|
||||
@@ -232,7 +233,7 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
|
||||
b.opt.Compare.Checksum = true
|
||||
CompareFlag.Checksum = true
|
||||
default:
|
||||
return fmt.Errorf(Color(terminal.RedFg, "unknown compare option: %s (must be size, modtime, or checksum)"), opt) //nolint:govet
|
||||
return fmt.Errorf(Color(terminal.RedFg, "unknown compare option: %s (must be size, modtime, or checksum)"), opt)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -284,14 +285,14 @@ func tryDownloadHash(ctx context.Context, o fs.DirEntry, hashVal string) (string
|
||||
}
|
||||
if o.Size() < 0 {
|
||||
downloadHashWarn.Do(func() {
|
||||
fs.Logf(o, Color(terminal.YellowFg, "Skipping hash download as checksum not reliable with files of unknown length.")) //nolint:govet
|
||||
fs.Logf(o, Color(terminal.YellowFg, "Skipping hash download as checksum not reliable with files of unknown length."))
|
||||
})
|
||||
fs.Debugf(o, "Skipping hash download as checksum not reliable with files of unknown length.")
|
||||
return hashVal, hash.ErrUnsupported
|
||||
}
|
||||
|
||||
firstDownloadHash.Do(func() {
|
||||
fs.Infof(obj.Fs().Name(), Color(terminal.Dim, "Downloading hashes...")) //nolint:govet
|
||||
fs.Infof(obj.Fs().Name(), Color(terminal.Dim, "Downloading hashes..."))
|
||||
})
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "computing hash with --download-hash")
|
||||
defer func() {
|
||||
|
||||
@@ -190,48 +190,50 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
|
||||
b.indent(msg, file, Color(terminal.RedFg, "File was deleted"))
|
||||
ds.deleted++
|
||||
d |= deltaDeleted
|
||||
} else if !now.isDir(file) {
|
||||
} else {
|
||||
// skip dirs here, as we only care if they are new/deleted, not newer/older
|
||||
whatchanged := []string{}
|
||||
if b.opt.Compare.Size {
|
||||
if sizeDiffers(old.getSize(file), now.getSize(file)) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getSize(file), now.getSize(file))
|
||||
if now.getSize(file) > old.getSize(file) {
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "size (larger)"))
|
||||
d |= deltaLarger
|
||||
} else {
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "size (smaller)"))
|
||||
d |= deltaSmaller
|
||||
if !now.isDir(file) {
|
||||
whatchanged := []string{}
|
||||
if b.opt.Compare.Size {
|
||||
if sizeDiffers(old.getSize(file), now.getSize(file)) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getSize(file), now.getSize(file))
|
||||
if now.getSize(file) > old.getSize(file) {
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "size (larger)"))
|
||||
d |= deltaLarger
|
||||
} else {
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "size (smaller)"))
|
||||
d |= deltaSmaller
|
||||
}
|
||||
s = now.getSize(file)
|
||||
}
|
||||
s = now.getSize(file)
|
||||
}
|
||||
}
|
||||
if b.opt.Compare.Modtime {
|
||||
if timeDiffers(fctx, old.getTime(file), now.getTime(file), f, f) {
|
||||
if old.beforeOther(now, file) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getTime(file), now.getTime(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "time (newer)"))
|
||||
d |= deltaNewer
|
||||
} else { // Current version is older than prior sync.
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getTime(file), now.getTime(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "time (older)"))
|
||||
d |= deltaOlder
|
||||
if b.opt.Compare.Modtime {
|
||||
if timeDiffers(fctx, old.getTime(file), now.getTime(file), f, f) {
|
||||
if old.beforeOther(now, file) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getTime(file), now.getTime(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "time (newer)"))
|
||||
d |= deltaNewer
|
||||
} else { // Current version is older than prior sync.
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getTime(file), now.getTime(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "time (older)"))
|
||||
d |= deltaOlder
|
||||
}
|
||||
t = now.getTime(file)
|
||||
}
|
||||
t = now.getTime(file)
|
||||
}
|
||||
}
|
||||
if b.opt.Compare.Checksum {
|
||||
if hashDiffers(old.getHash(file), now.getHash(file), old.hash, now.hash, old.getSize(file), now.getSize(file)) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getHash(file), now.getHash(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "hash"))
|
||||
d |= deltaHash
|
||||
h = now.getHash(file)
|
||||
if b.opt.Compare.Checksum {
|
||||
if hashDiffers(old.getHash(file), now.getHash(file), old.hash, now.hash, old.getSize(file), now.getSize(file)) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getHash(file), now.getHash(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "hash"))
|
||||
d |= deltaHash
|
||||
h = now.getHash(file)
|
||||
}
|
||||
}
|
||||
// concat changes and print log
|
||||
if d.is(deltaModified) {
|
||||
summary := fmt.Sprintf(Color(terminal.YellowFg, "File changed: %s"), strings.Join(whatchanged, ", "))
|
||||
b.indent(msg, file, summary)
|
||||
}
|
||||
}
|
||||
// concat changes and print log
|
||||
if d.is(deltaModified) {
|
||||
summary := fmt.Sprintf(Color(terminal.YellowFg, "File changed: %s"), strings.Join(whatchanged, ", "))
|
||||
b.indent(msg, file, summary)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -43,10 +43,8 @@ var lineRegex = regexp.MustCompile(`^(\S) +(-?\d+) (\S+) (\S+) (\d{4}-\d\d-\d\dT
|
||||
const timeFormat = "2006-01-02T15:04:05.000000000-0700"
|
||||
|
||||
// TZ defines time zone used in listings
|
||||
var (
|
||||
TZ = time.UTC
|
||||
tzLocal = false
|
||||
)
|
||||
var TZ = time.UTC
|
||||
var tzLocal = false
|
||||
|
||||
// fileInfo describes a file
|
||||
type fileInfo struct {
|
||||
@@ -85,7 +83,7 @@ func (ls *fileList) has(file string) bool {
|
||||
}
|
||||
_, found := ls.info[file]
|
||||
if !found {
|
||||
// try unquoting
|
||||
//try unquoting
|
||||
file, _ = strconv.Unquote(`"` + file + `"`)
|
||||
_, found = ls.info[file]
|
||||
}
|
||||
@@ -95,7 +93,7 @@ func (ls *fileList) has(file string) bool {
|
||||
func (ls *fileList) get(file string) *fileInfo {
|
||||
info, found := ls.info[file]
|
||||
if !found {
|
||||
// try unquoting
|
||||
//try unquoting
|
||||
file, _ = strconv.Unquote(`"` + file + `"`)
|
||||
info = ls.info[fmt.Sprint(file)]
|
||||
}
|
||||
@@ -422,7 +420,7 @@ func (b *bisyncRun) loadListingNum(listingNum int) (*fileList, error) {
|
||||
|
||||
func (b *bisyncRun) listDirsOnly(listingNum int) (*fileList, error) {
|
||||
var fulllisting *fileList
|
||||
dirsonly := newFileList()
|
||||
var dirsonly = newFileList()
|
||||
var err error
|
||||
|
||||
if !b.opt.CreateEmptySrcDirs {
|
||||
@@ -452,6 +450,24 @@ func (b *bisyncRun) listDirsOnly(listingNum int) (*fileList, error) {
|
||||
return dirsonly, err
|
||||
}
|
||||
|
||||
// ConvertPrecision returns the Modtime rounded to Dest's precision if lower, otherwise unchanged
|
||||
// Need to use the other fs's precision (if lower) when copying
|
||||
// Note: we need to use Truncate rather than Round so that After() is reliable.
|
||||
// (2023-11-02 20:22:45.552679442 +0000 < UTC 2023-11-02 20:22:45.553 +0000 UTC)
|
||||
func ConvertPrecision(Modtime time.Time, dst fs.Fs) time.Time {
|
||||
DestPrecision := dst.Precision()
|
||||
|
||||
// In case it's wrapping an Fs with lower precision, try unwrapping and use the lowest.
|
||||
if Modtime.Truncate(DestPrecision).After(Modtime.Truncate(fs.UnWrapFs(dst).Precision())) {
|
||||
DestPrecision = fs.UnWrapFs(dst).Precision()
|
||||
}
|
||||
|
||||
if Modtime.After(Modtime.Truncate(DestPrecision)) {
|
||||
return Modtime.Truncate(DestPrecision)
|
||||
}
|
||||
return Modtime
|
||||
}
|
||||
|
||||
// modifyListing will modify the listing based on the results of the sync
|
||||
func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, results []Results, queues queues, is1to2 bool) (err error) {
|
||||
queue := queues.copy2to1
|
||||
@@ -517,13 +533,13 @@ func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, res
|
||||
|
||||
// build src winners list
|
||||
if result.IsSrc && result.Src != "" && (result.Winner.Err == nil || result.Flags == "d") {
|
||||
srcWinners.put(result.Name, result.Size, result.Modtime, result.Hash, "-", result.Flags)
|
||||
srcWinners.put(result.Name, result.Size, ConvertPrecision(result.Modtime, src), result.Hash, "-", result.Flags)
|
||||
prettyprint(result, "winner: copy to src", fs.LogLevelDebug)
|
||||
}
|
||||
|
||||
// build dst winners list
|
||||
if result.IsWinner && result.Winner.Side != "none" && (result.Winner.Err == nil || result.Flags == "d") {
|
||||
dstWinners.put(result.Name, result.Size, result.Modtime, result.Hash, "-", result.Flags)
|
||||
dstWinners.put(result.Name, result.Size, ConvertPrecision(result.Modtime, dst), result.Hash, "-", result.Flags)
|
||||
prettyprint(result, "winner: copy to dst", fs.LogLevelDebug)
|
||||
}
|
||||
|
||||
@@ -607,7 +623,7 @@ func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, res
|
||||
}
|
||||
if srcNewName != "" { // if it was renamed and not deleted
|
||||
srcList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags)
|
||||
dstList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags)
|
||||
dstList.put(srcNewName, new.size, ConvertPrecision(new.time, src), new.hash, new.id, new.flags)
|
||||
}
|
||||
if srcNewName != srcOldName {
|
||||
srcList.remove(srcOldName)
|
||||
|
||||
@@ -39,8 +39,8 @@ func (b *bisyncRun) indent(tag, file, msg string) {
|
||||
tag = Color(terminal.BlueFg, tag)
|
||||
}
|
||||
msg = Color(terminal.MagentaFg, msg)
|
||||
msg = strings.ReplaceAll(msg, "Queue copy to", Color(terminal.GreenFg, "Queue copy to"))
|
||||
msg = strings.ReplaceAll(msg, "Queue delete", Color(terminal.RedFg, "Queue delete"))
|
||||
msg = strings.Replace(msg, "Queue copy to", Color(terminal.GreenFg, "Queue copy to"), -1)
|
||||
msg = strings.Replace(msg, "Queue delete", Color(terminal.RedFg, "Queue delete"), -1)
|
||||
file = Color(terminal.CyanFg, escapePath(file, false))
|
||||
logf(nil, "- %-18s%-43s - %s", tag, msg, file)
|
||||
}
|
||||
|
||||
@@ -131,18 +131,18 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
||||
finaliseOnce.Do(func() {
|
||||
if atexit.Signalled() {
|
||||
if b.opt.Resync {
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "No need to gracefully shutdown during --resync (just run it again.)")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "No need to gracefully shutdown during --resync (just run it again.)"))
|
||||
} else {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Attempting to gracefully shutdown. (Send exit signal again for immediate un-graceful shutdown.)")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Attempting to gracefully shutdown. (Send exit signal again for immediate un-graceful shutdown.)"))
|
||||
b.InGracefulShutdown = true
|
||||
if b.SyncCI != nil {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Telling Sync to wrap up early.")) //nolint:govet
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Telling Sync to wrap up early."))
|
||||
b.SyncCI.MaxTransfer = 1
|
||||
b.SyncCI.MaxDuration = 1 * time.Second
|
||||
b.SyncCI.CutoffMode = fs.CutoffModeSoft
|
||||
gracePeriod := 30 * time.Second // TODO: flag to customize this?
|
||||
if !waitFor("Canceling Sync if not done in", gracePeriod, func() bool { return b.CleanupCompleted }) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Canceling sync and cleaning up")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Canceling sync and cleaning up"))
|
||||
b.CancelSync()
|
||||
waitFor("Aborting Bisync if not done in", 60*time.Second, func() bool { return b.CleanupCompleted })
|
||||
}
|
||||
@@ -150,13 +150,13 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
||||
// we haven't started to sync yet, so we're good.
|
||||
// no need to worry about the listing files, as we haven't overwritten them yet.
|
||||
b.CleanupCompleted = true
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully.")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully."))
|
||||
}
|
||||
}
|
||||
if !b.CleanupCompleted {
|
||||
if !b.opt.Resync {
|
||||
fs.Logf(nil, Color(terminal.HiRedFg, "Graceful shutdown failed.")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.RedFg, "Bisync interrupted. Must run --resync to recover.")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.HiRedFg, "Graceful shutdown failed."))
|
||||
fs.Logf(nil, Color(terminal.RedFg, "Bisync interrupted. Must run --resync to recover."))
|
||||
}
|
||||
markFailed(b.listing1)
|
||||
markFailed(b.listing2)
|
||||
@@ -180,14 +180,14 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
||||
b.critical = false
|
||||
}
|
||||
if err == nil {
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully.")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully."))
|
||||
}
|
||||
}
|
||||
|
||||
if b.critical {
|
||||
if b.retryable && b.opt.Resilient {
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err) //nolint:govet
|
||||
fs.Errorf(nil, Color(terminal.YellowFg, "Bisync aborted. Error is retryable without --resync due to --resilient mode.")) //nolint:govet
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err)
|
||||
fs.Errorf(nil, Color(terminal.YellowFg, "Bisync aborted. Error is retryable without --resync due to --resilient mode."))
|
||||
} else {
|
||||
if bilib.FileExists(b.listing1) {
|
||||
_ = os.Rename(b.listing1, b.listing1+"-err")
|
||||
@@ -196,15 +196,15 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
||||
_ = os.Rename(b.listing2, b.listing2+"-err")
|
||||
}
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err)
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync aborted. Must run --resync to recover.")) //nolint:govet
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync aborted. Must run --resync to recover."))
|
||||
}
|
||||
return ErrBisyncAborted
|
||||
}
|
||||
if b.abort && !b.InGracefulShutdown {
|
||||
fs.Logf(nil, Color(terminal.RedFg, "Bisync aborted. Please try again.")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.RedFg, "Bisync aborted. Please try again."))
|
||||
}
|
||||
if err == nil {
|
||||
fs.Infof(nil, Color(terminal.GreenFg, "Bisync successful")) //nolint:govet
|
||||
fs.Infof(nil, Color(terminal.GreenFg, "Bisync successful"))
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -270,7 +270,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
if b.opt.Recover && bilib.FileExists(b.listing1+"-old") && bilib.FileExists(b.listing2+"-old") {
|
||||
errTip := fmt.Sprintf(Color(terminal.CyanFg, "Path1: %s\n"), Color(terminal.HiBlueFg, b.listing1))
|
||||
errTip += fmt.Sprintf(Color(terminal.CyanFg, "Path2: %s"), Color(terminal.HiBlueFg, b.listing2))
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Listings not found. Reverting to prior backup as --recover is set. \n")+errTip) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Listings not found. Reverting to prior backup as --recover is set. \n")+errTip)
|
||||
if opt.CheckSync != CheckSyncFalse {
|
||||
// Run CheckSync to ensure old listing is valid (garbage in, garbage out!)
|
||||
fs.Infof(nil, "Validating backup listings for Path1 %s vs Path2 %s", quotePath(path1), quotePath(path2))
|
||||
@@ -279,7 +279,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
b.retryable = true
|
||||
return err
|
||||
}
|
||||
fs.Infof(nil, Color(terminal.GreenFg, "Backup listing is valid.")) //nolint:govet
|
||||
fs.Infof(nil, Color(terminal.GreenFg, "Backup listing is valid."))
|
||||
}
|
||||
b.revertToOldListings()
|
||||
} else {
|
||||
@@ -299,7 +299,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
fs.Infof(nil, "Building Path1 and Path2 listings")
|
||||
ls1, ls2, err = b.makeMarchListing(fctx)
|
||||
if err != nil || accounting.Stats(fctx).Errored() {
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "There were errors while building listings. Aborting as it is too dangerous to continue.")) //nolint:govet
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "There were errors while building listings. Aborting as it is too dangerous to continue."))
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return err
|
||||
@@ -476,8 +476,10 @@ func (b *bisyncRun) checkSync(listing1, listing2 string) error {
|
||||
if !files2.has(file) && !files2.has(b.aliases.Alias(file)) {
|
||||
b.indent("ERROR", file, "Path1 file not found in Path2")
|
||||
ok = false
|
||||
} else if !b.fileInfoEqual(file, files2.getTryAlias(file, b.aliases.Alias(file)), files1, files2) {
|
||||
ok = false
|
||||
} else {
|
||||
if !b.fileInfoEqual(file, files2.getTryAlias(file, b.aliases.Alias(file)), files1, files2) {
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, file := range files2.list {
|
||||
@@ -567,7 +569,7 @@ func (b *bisyncRun) setBackupDir(ctx context.Context, destPath int) context.Cont
|
||||
|
||||
func (b *bisyncRun) overlappingPathsCheck(fctx context.Context, fs1, fs2 fs.Fs) error {
|
||||
if operations.OverlappingFilterCheck(fctx, fs2, fs1) {
|
||||
err = errors.New(Color(terminal.RedFg, "Overlapping paths detected. Cannot bisync between paths that overlap, unless excluded by filters."))
|
||||
err = fmt.Errorf(Color(terminal.RedFg, "Overlapping paths detected. Cannot bisync between paths that overlap, unless excluded by filters."))
|
||||
return err
|
||||
}
|
||||
// need to test our BackupDirs too, as sync will be fooled by our --files-from filters
|
||||
@@ -623,7 +625,7 @@ func (b *bisyncRun) checkSyntax() error {
|
||||
|
||||
func (b *bisyncRun) debug(nametocheck, msgiftrue string) {
|
||||
if b.DebugName != "" && b.DebugName == nametocheck {
|
||||
fs.Infof(Color(terminal.MagentaBg, "DEBUGNAME "+b.DebugName), Color(terminal.MagentaBg, msgiftrue)) //nolint:govet
|
||||
fs.Infof(Color(terminal.MagentaBg, "DEBUGNAME "+b.DebugName), Color(terminal.MagentaBg, msgiftrue))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -161,7 +161,7 @@ func WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEn
|
||||
prettyprint(result, "writing result", fs.LogLevelDebug)
|
||||
if result.Size < 0 && result.Flags != "d" && ((queueCI.CheckSum && !downloadHash) || queueCI.SizeOnly) {
|
||||
once.Do(func() {
|
||||
fs.Logf(result.Name, Color(terminal.YellowFg, "Files of unknown size (such as Google Docs) do not sync reliably with --checksum or --size-only. Consider using modtime instead (the default) or --drive-skip-gdocs")) //nolint:govet
|
||||
fs.Logf(result.Name, Color(terminal.YellowFg, "Files of unknown size (such as Google Docs) do not sync reliably with --checksum or --size-only. Consider using modtime instead (the default) or --drive-skip-gdocs"))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -142,7 +142,7 @@ func (b *bisyncRun) resolve(ctxMove context.Context, path1, path2, file, alias s
|
||||
if winningPath > 0 {
|
||||
fs.Infof(file, Color(terminal.GreenFg, "The winner is: Path%d"), winningPath)
|
||||
} else {
|
||||
fs.Infof(file, Color(terminal.RedFg, "A winner could not be determined.")) //nolint:govet
|
||||
fs.Infof(file, Color(terminal.RedFg, "A winner could not be determined."))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
// and either flag is sufficient without the other.
|
||||
func (b *bisyncRun) setResyncDefaults() {
|
||||
if b.opt.Resync && b.opt.ResyncMode == PreferNone {
|
||||
fs.Debugf(nil, Color(terminal.Dim, "defaulting to --resync-mode path1 as --resync is set")) //nolint:govet
|
||||
fs.Debugf(nil, Color(terminal.Dim, "defaulting to --resync-mode path1 as --resync is set"))
|
||||
b.opt.ResyncMode = PreferPath1
|
||||
}
|
||||
if b.opt.ResyncMode != PreferNone {
|
||||
|
||||
@@ -20,7 +20,8 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "cachestats source:",
|
||||
Short: `Print cache stats for a remote`,
|
||||
Long: `Print cache stats for a remote in JSON format
|
||||
Long: `
|
||||
Print cache stats for a remote in JSON format
|
||||
`,
|
||||
Hidden: true,
|
||||
Annotations: map[string]string{
|
||||
|
||||
@@ -39,7 +39,8 @@ var commandDefinition = &cobra.Command{
|
||||
Use: "cat remote:path",
|
||||
Short: `Concatenates any files and sends them to stdout.`,
|
||||
// Warning! "|" will be replaced by backticks below
|
||||
Long: strings.ReplaceAll(`Sends any files to standard output.
|
||||
Long: strings.ReplaceAll(`
|
||||
rclone cat sends any files to standard output.
|
||||
|
||||
You can use it like this to output a single file
|
||||
|
||||
|
||||
@@ -138,7 +138,8 @@ func GetCheckOpt(fsrc, fdst fs.Fs) (opt *operations.CheckOpt, close func(), err
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "check source:path dest:path",
|
||||
Short: `Checks the files in the source and destination match.`,
|
||||
Long: strings.ReplaceAll(`Checks the files in the source and destination match. It compares
|
||||
Long: strings.ReplaceAll(`
|
||||
Checks the files in the source and destination match. It compares
|
||||
sizes and hashes (MD5 or SHA1) and logs a report of files that don't
|
||||
match. It doesn't alter the source or destination.
|
||||
|
||||
|
||||
@@ -26,7 +26,8 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "checksum <hash> sumfile dst:path",
|
||||
Short: `Checks the files in the destination against a SUM file.`,
|
||||
Long: strings.ReplaceAll(`Checks that hashsums of destination files match the SUM file.
|
||||
Long: strings.ReplaceAll(`
|
||||
Checks that hashsums of destination files match the SUM file.
|
||||
It compares hashes (MD5, SHA1, etc) and logs a report of files which
|
||||
don't match. It doesn't alter the file system.
|
||||
|
||||
|
||||
@@ -16,7 +16,8 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "cleanup remote:path",
|
||||
Short: `Clean up the remote if possible.`,
|
||||
Long: `Clean up the remote if possible. Empty the trash or delete old file
|
||||
Long: `
|
||||
Clean up the remote if possible. Empty the trash or delete old file
|
||||
versions. Not supported by all remotes.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
|
||||
@@ -116,7 +116,7 @@ func newFsFileAddFilter(remote string) (fs.Fs, string) {
|
||||
if !fi.InActive() {
|
||||
err := fmt.Errorf("can't limit to single files when using filters: %v", remote)
|
||||
err = fs.CountError(err)
|
||||
log.Fatal(err.Error())
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
// Limit transfers to this file
|
||||
err := fi.AddFile(fileName)
|
||||
|
||||
@@ -268,7 +268,8 @@ as a readable demonstration.
|
||||
var configCreateCommand = &cobra.Command{
|
||||
Use: "create name type [key value]*",
|
||||
Short: `Create a new remote with name, type and options.`,
|
||||
Long: strings.ReplaceAll(`Create a new remote of |name| with |type| and options. The options
|
||||
Long: strings.ReplaceAll(`
|
||||
Create a new remote of |name| with |type| and options. The options
|
||||
should be passed in pairs of |key| |value| or as |key=value|.
|
||||
|
||||
For example, to make a swift remote of name myremote using auto config
|
||||
@@ -333,7 +334,8 @@ func init() {
|
||||
var configUpdateCommand = &cobra.Command{
|
||||
Use: "update name [key value]+",
|
||||
Short: `Update options in an existing remote.`,
|
||||
Long: strings.ReplaceAll(`Update an existing remote's options. The options should be passed in
|
||||
Long: strings.ReplaceAll(`
|
||||
Update an existing remote's options. The options should be passed in
|
||||
pairs of |key| |value| or as |key=value|.
|
||||
|
||||
For example, to update the env_auth field of a remote of name myremote
|
||||
@@ -377,7 +379,8 @@ var configDeleteCommand = &cobra.Command{
|
||||
var configPasswordCommand = &cobra.Command{
|
||||
Use: "password name [key value]+",
|
||||
Short: `Update password in an existing remote.`,
|
||||
Long: strings.ReplaceAll(`Update an existing remote's password. The password
|
||||
Long: strings.ReplaceAll(`
|
||||
Update an existing remote's password. The password
|
||||
should be passed in pairs of |key| |password| or as |key=password|.
|
||||
The |password| should be passed in in clear (unobscured).
|
||||
|
||||
@@ -432,7 +435,8 @@ func argsToMap(args []string) (out rc.Params, err error) {
|
||||
var configReconnectCommand = &cobra.Command{
|
||||
Use: "reconnect remote:",
|
||||
Short: `Re-authenticates user with remote.`,
|
||||
Long: `This reconnects remote: passed in to the cloud storage system.
|
||||
Long: `
|
||||
This reconnects remote: passed in to the cloud storage system.
|
||||
|
||||
To disconnect the remote use "rclone config disconnect".
|
||||
|
||||
@@ -452,7 +456,8 @@ This normally means going through the interactive oauth flow again.
|
||||
var configDisconnectCommand = &cobra.Command{
|
||||
Use: "disconnect remote:",
|
||||
Short: `Disconnects user from remote`,
|
||||
Long: `This disconnects the remote: passed in to the cloud storage system.
|
||||
Long: `
|
||||
This disconnects the remote: passed in to the cloud storage system.
|
||||
|
||||
This normally means revoking the oauth token.
|
||||
|
||||
@@ -484,7 +489,8 @@ func init() {
|
||||
var configUserInfoCommand = &cobra.Command{
|
||||
Use: "userinfo remote:",
|
||||
Short: `Prints info about logged in user of remote.`,
|
||||
Long: `This prints the details of the person logged in to the cloud storage
|
||||
Long: `
|
||||
This prints the details of the person logged in to the cloud storage
|
||||
system.
|
||||
`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
|
||||
@@ -26,7 +26,8 @@ var commandDefinition = &cobra.Command{
|
||||
Use: "copy source:path dest:path",
|
||||
Short: `Copy files from source to dest, skipping identical files.`,
|
||||
// Note: "|" will be replaced by backticks below
|
||||
Long: strings.ReplaceAll(`Copy the source to the destination. Does not transfer files that are
|
||||
Long: strings.ReplaceAll(`
|
||||
Copy the source to the destination. Does not transfer files that are
|
||||
identical on source and destination, testing by size and modification
|
||||
time or MD5SUM. Doesn't delete files from the destination. If you
|
||||
want to also delete files from destination, to make it match source,
|
||||
|
||||
@@ -17,7 +17,8 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "copyto source:path dest:path",
|
||||
Short: `Copy files from source to dest, skipping identical files.`,
|
||||
Long: `If source:path is a file or directory then it copies it to a file or
|
||||
Long: `
|
||||
If source:path is a file or directory then it copies it to a file or
|
||||
directory named dest:path.
|
||||
|
||||
This can be used to upload single files to other than their current
|
||||
|
||||
@@ -36,7 +36,8 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "copyurl https://example.com dest:path",
|
||||
Short: `Copy the contents of the URL supplied content to dest:path.`,
|
||||
Long: strings.ReplaceAll(`Download a URL's content and copy it to the destination without saving
|
||||
Long: strings.ReplaceAll(`
|
||||
Download a URL's content and copy it to the destination without saving
|
||||
it in temporary storage.
|
||||
|
||||
Setting |--auto-filename| will attempt to automatically determine the
|
||||
|
||||
@@ -23,9 +23,10 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "cryptcheck remote:path cryptedremote:path",
|
||||
Short: `Cryptcheck checks the integrity of an encrypted remote.`,
|
||||
Long: `Checks a remote against a [crypted](/crypt/) remote. This is the equivalent
|
||||
of running rclone [check](/commands/rclone_check/), but able to check the
|
||||
checksums of the encrypted remote.
|
||||
Long: `
|
||||
rclone cryptcheck checks a remote against a [crypted](/crypt/) remote.
|
||||
This is the equivalent of running rclone [check](/commands/rclone_check/),
|
||||
but able to check the checksums of the encrypted remote.
|
||||
|
||||
For it to work the underlying remote of the cryptedremote must support
|
||||
some kind of checksum.
|
||||
@@ -103,7 +104,7 @@ func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error {
|
||||
}
|
||||
if cryptHash != underlyingHash {
|
||||
err = fmt.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash)
|
||||
fs.Errorf(src, "%s", err.Error())
|
||||
fs.Errorf(src, err.Error())
|
||||
return true, false, nil
|
||||
}
|
||||
return false, false, nil
|
||||
|
||||
@@ -26,8 +26,9 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "cryptdecode encryptedremote: encryptedfilename",
|
||||
Short: `Cryptdecode returns unencrypted file names.`,
|
||||
Long: `Returns unencrypted file names when provided with a list of encrypted file
|
||||
names. List limit is 10 items.
|
||||
Long: `
|
||||
rclone cryptdecode returns unencrypted file names when provided with
|
||||
a list of encrypted file names. List limit is 10 items.
|
||||
|
||||
If you supply the ` + "`--reverse`" + ` flag, it will return encrypted file names.
|
||||
|
||||
|
||||
@@ -27,7 +27,9 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "dedupe [mode] remote:path",
|
||||
Short: `Interactively find duplicate filenames and delete/rename them.`,
|
||||
Long: `By default ` + "`dedupe`" + ` interactively finds files with duplicate
|
||||
Long: `
|
||||
|
||||
By default ` + "`dedupe`" + ` interactively finds files with duplicate
|
||||
names and offers to delete all but one or rename them to be
|
||||
different. This is known as deduping by name.
|
||||
|
||||
|
||||
@@ -25,7 +25,8 @@ var commandDefinition = &cobra.Command{
|
||||
Use: "delete remote:path",
|
||||
Short: `Remove the files in path.`,
|
||||
// Warning! "|" will be replaced by backticks below
|
||||
Long: strings.ReplaceAll(`Remove the files in path. Unlike [purge](/commands/rclone_purge/) it
|
||||
Long: strings.ReplaceAll(`
|
||||
Remove the files in path. Unlike [purge](/commands/rclone_purge/) it
|
||||
obeys include/exclude filters so can be used to selectively delete files.
|
||||
|
||||
|rclone delete| only deletes files but leaves the directory structure
|
||||
|
||||
@@ -18,7 +18,8 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "deletefile remote:path",
|
||||
Short: `Remove a single file from remote.`,
|
||||
Long: `Remove a single file from remote. Unlike ` + "`" + `delete` + "`" + ` it cannot be used to
|
||||
Long: `
|
||||
Remove a single file from remote. Unlike ` + "`" + `delete` + "`" + ` it cannot be used to
|
||||
remove a directory and it doesn't obey include/exclude filters - if the specified file exists,
|
||||
it will always be removed.
|
||||
`,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Package genautocomplete provides the completion command.
|
||||
// Package genautocomplete provides the genautocomplete command.
|
||||
package genautocomplete
|
||||
|
||||
import (
|
||||
@@ -13,7 +13,8 @@ func init() {
|
||||
var completionDefinition = &cobra.Command{
|
||||
Use: "completion [shell]",
|
||||
Short: `Output completion script for a given shell.`,
|
||||
Long: `Generates a shell completion script for rclone.
|
||||
Long: `
|
||||
Generates a shell completion script for rclone.
|
||||
Run with ` + "`--help`" + ` to list the supported shells.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
|
||||
@@ -15,11 +15,12 @@ func init() {
|
||||
var bashCommandDefinition = &cobra.Command{
|
||||
Use: "bash [output_file]",
|
||||
Short: `Output bash completion script for rclone.`,
|
||||
Long: `Generates a bash shell autocompletion script for rclone.
|
||||
Long: `
|
||||
Generates a bash shell autocompletion script for rclone.
|
||||
|
||||
By default, when run without any arguments,
|
||||
|
||||
rclone completion bash
|
||||
rclone genautocomplete bash
|
||||
|
||||
the generated script will be written to
|
||||
|
||||
|
||||
@@ -15,12 +15,13 @@ func init() {
|
||||
var fishCommandDefinition = &cobra.Command{
|
||||
Use: "fish [output_file]",
|
||||
Short: `Output fish completion script for rclone.`,
|
||||
Long: `Generates a fish autocompletion script for rclone.
|
||||
Long: `
|
||||
Generates a fish autocompletion script for rclone.
|
||||
|
||||
This writes to /etc/fish/completions/rclone.fish by default so will
|
||||
probably need to be run with sudo or as root, e.g.
|
||||
|
||||
sudo rclone completion fish
|
||||
sudo rclone genautocomplete fish
|
||||
|
||||
Logout and login again to use the autocompletion scripts, or source
|
||||
them directly
|
||||
|
||||
@@ -15,7 +15,8 @@ func init() {
|
||||
var powershellCommandDefinition = &cobra.Command{
|
||||
Use: "powershell [output_file]",
|
||||
Short: `Output powershell completion script for rclone.`,
|
||||
Long: `Generate the autocompletion script for powershell.
|
||||
Long: `
|
||||
Generate the autocompletion script for powershell.
|
||||
|
||||
To load completions in your current shell session:
|
||||
|
||||
|
||||
@@ -15,12 +15,13 @@ func init() {
|
||||
var zshCommandDefinition = &cobra.Command{
|
||||
Use: "zsh [output_file]",
|
||||
Short: `Output zsh completion script for rclone.`,
|
||||
Long: `Generates a zsh autocompletion script for rclone.
|
||||
Long: `
|
||||
Generates a zsh autocompletion script for rclone.
|
||||
|
||||
This writes to /usr/share/zsh/vendor-completions/_rclone by default so will
|
||||
probably need to be run with sudo or as root, e.g.
|
||||
|
||||
sudo rclone completion zsh
|
||||
sudo rclone genautocomplete zsh
|
||||
|
||||
Logout and login again to use the autocompletion scripts, or source
|
||||
them directly
|
||||
|
||||
@@ -30,19 +30,12 @@ type frontmatter struct {
|
||||
Title string
|
||||
Description string
|
||||
Source string
|
||||
Aliases []string
|
||||
Annotations map[string]string
|
||||
}
|
||||
|
||||
var frontmatterTemplate = template.Must(template.New("frontmatter").Parse(`---
|
||||
title: "{{ .Title }}"
|
||||
description: "{{ .Description }}"
|
||||
{{- if .Aliases }}
|
||||
aliases:
|
||||
{{- range $value := .Aliases }}
|
||||
- {{ $value }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range $key, $value := .Annotations }}
|
||||
{{ $key }}: {{ $value }}
|
||||
{{- end }}
|
||||
@@ -53,7 +46,8 @@ aliases:
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "gendocs output_directory",
|
||||
Short: `Output markdown docs for rclone to the directory supplied.`,
|
||||
Long: `This produces markdown docs for the rclone commands to the directory
|
||||
Long: `
|
||||
This produces markdown docs for the rclone commands to the directory
|
||||
supplied. These are in a format suitable for hugo to render into the
|
||||
rclone.org website.`,
|
||||
Annotations: map[string]string{
|
||||
@@ -88,37 +82,23 @@ rclone.org website.`,
|
||||
// Look up name => details for prepender
|
||||
type commandDetails struct {
|
||||
Short string
|
||||
Aliases []string
|
||||
Annotations map[string]string
|
||||
}
|
||||
var commands = map[string]commandDetails{}
|
||||
var addCommandDetails func(root *cobra.Command, parentAliases []string)
|
||||
addCommandDetails = func(root *cobra.Command, parentAliases []string) {
|
||||
var aliases []string
|
||||
var addCommandDetails func(root *cobra.Command)
|
||||
addCommandDetails = func(root *cobra.Command) {
|
||||
name := strings.ReplaceAll(root.CommandPath(), " ", "_") + ".md"
|
||||
var aliases []string
|
||||
for _, p := range parentAliases {
|
||||
aliases = append(aliases, p+" "+root.Name())
|
||||
for _, v := range root.Aliases {
|
||||
aliases = append(aliases, p+" "+v)
|
||||
}
|
||||
}
|
||||
for _, v := range root.Aliases {
|
||||
if root.HasParent() {
|
||||
aliases = append(aliases, root.Parent().CommandPath()+" "+v)
|
||||
} else {
|
||||
aliases = append(aliases, v)
|
||||
}
|
||||
}
|
||||
commands[name] = commandDetails{
|
||||
Short: root.Short,
|
||||
Aliases: aliases,
|
||||
Annotations: root.Annotations,
|
||||
}
|
||||
aliases = append(aliases, root.Aliases...)
|
||||
for _, c := range root.Commands() {
|
||||
addCommandDetails(c, aliases)
|
||||
addCommandDetails(c)
|
||||
}
|
||||
}
|
||||
addCommandDetails(cmd.Root, []string{})
|
||||
addCommandDetails(cmd.Root)
|
||||
|
||||
// markup for the docs files
|
||||
prepender := func(filename string) string {
|
||||
@@ -129,12 +109,8 @@ rclone.org website.`,
|
||||
Title: strings.ReplaceAll(base, "_", " "),
|
||||
Description: commands[name].Short,
|
||||
Source: strings.ReplaceAll(strings.ReplaceAll(base, "rclone", "cmd"), "_", "/") + "/",
|
||||
Aliases: []string{},
|
||||
Annotations: map[string]string{},
|
||||
}
|
||||
for _, v := range commands[name].Aliases {
|
||||
data.Aliases = append(data.Aliases, "/commands/"+strings.ReplaceAll(v, " ", "_")+"/")
|
||||
}
|
||||
// Filter out annotations that confuse hugo from the frontmatter
|
||||
for k, v := range commands[name].Annotations {
|
||||
if k != "groups" {
|
||||
@@ -169,6 +145,12 @@ rclone.org website.`,
|
||||
name := filepath.Base(path)
|
||||
cmd, ok := commands[name]
|
||||
if !ok {
|
||||
// Avoid man pages which are for aliases. This is a bit messy!
|
||||
for _, alias := range aliases {
|
||||
if strings.Contains(name, alias) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("didn't find command for %q", name)
|
||||
}
|
||||
b, err := os.ReadFile(path)
|
||||
@@ -177,36 +159,33 @@ rclone.org website.`,
|
||||
}
|
||||
doc := string(b)
|
||||
|
||||
startCut := strings.Index(doc, `### Options inherited from parent commands`)
|
||||
endCut := strings.Index(doc, `### SEE ALSO`)
|
||||
if startCut < 0 || endCut < 0 {
|
||||
if name != "rclone.md" {
|
||||
return fmt.Errorf("internal error: failed to find cut points: startCut = %d, endCut = %d", startCut, endCut)
|
||||
}
|
||||
if endCut >= 0 {
|
||||
doc = doc[:endCut] + "### See Also" + doc[endCut+12:]
|
||||
}
|
||||
} else {
|
||||
var out strings.Builder
|
||||
if groupsString := cmd.Annotations["groups"]; groupsString != "" {
|
||||
_, _ = out.WriteString("Options shared with other commands are described next.\n")
|
||||
_, _ = out.WriteString("See the [global flags page](/flags/) for global options not listed here.\n\n")
|
||||
groups := flags.All.Include(groupsString)
|
||||
for _, group := range groups.Groups {
|
||||
if group.Flags.HasFlags() {
|
||||
_, _ = fmt.Fprintf(&out, "#### %s Options\n\n", group.Name)
|
||||
_, _ = fmt.Fprintf(&out, "%s\n\n", group.Help)
|
||||
_, _ = out.WriteString("```\n")
|
||||
_, _ = out.WriteString(group.Flags.FlagUsages())
|
||||
_, _ = out.WriteString("```\n\n")
|
||||
}
|
||||
var out strings.Builder
|
||||
if groupsString := cmd.Annotations["groups"]; groupsString != "" {
|
||||
groups := flags.All.Include(groupsString)
|
||||
for _, group := range groups.Groups {
|
||||
if group.Flags.HasFlags() {
|
||||
_, _ = fmt.Fprintf(&out, "\n### %s Options\n\n", group.Name)
|
||||
_, _ = fmt.Fprintf(&out, "%s\n\n", group.Help)
|
||||
_, _ = fmt.Fprintln(&out, "```")
|
||||
_, _ = out.WriteString(group.Flags.FlagUsages())
|
||||
_, _ = fmt.Fprintln(&out, "```")
|
||||
}
|
||||
} else {
|
||||
_, _ = out.WriteString("See the [global flags page](/flags/) for global options not listed here.\n\n")
|
||||
}
|
||||
doc = doc[:startCut] + out.String() + "### See Also" + doc[endCut+12:]
|
||||
}
|
||||
_, _ = out.WriteString(`
|
||||
See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
`)
|
||||
|
||||
startCut := strings.Index(doc, `### Options inherited from parent commands`)
|
||||
endCut := strings.Index(doc, `## SEE ALSO`)
|
||||
if startCut < 0 || endCut < 0 {
|
||||
if name == "rclone.md" {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("internal error: failed to find cut points: startCut = %d, endCut = %d", startCut, endCut)
|
||||
}
|
||||
doc = doc[:startCut] + out.String() + doc[endCut:]
|
||||
// outdent all the titles by one
|
||||
doc = outdentTitle.ReplaceAllString(doc, `$1`)
|
||||
err = os.WriteFile(path, []byte(doc), 0777)
|
||||
|
||||
@@ -157,7 +157,7 @@ type server struct {
|
||||
}
|
||||
|
||||
func (s *server) sendMsg(msg string) {
|
||||
msg += "\n"
|
||||
msg = msg + "\n"
|
||||
if _, err := io.WriteString(s.writer, msg); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -84,7 +84,8 @@ func CreateFromStdinArg(ht hash.Type, args []string, startArg int) (bool, error)
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "hashsum [<hash> remote:path]",
|
||||
Short: `Produces a hashsum file for all the objects in the path.`,
|
||||
Long: `Produces a hash file for all the objects in the path using the hash
|
||||
Long: `
|
||||
Produces a hash file for all the objects in the path using the hash
|
||||
named. The output is in the same format as the standard
|
||||
md5sum/sha1sum tool.
|
||||
|
||||
|
||||
77
cmd/help.go
77
cmd/help.go
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configflags"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/filter/filterflags"
|
||||
"github.com/rclone/rclone/fs/log/logflags"
|
||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||
@@ -27,7 +26,8 @@ import (
|
||||
var Root = &cobra.Command{
|
||||
Use: "rclone",
|
||||
Short: "Show help for rclone commands, flags and backends.",
|
||||
Long: `Rclone syncs files to and from cloud storage providers as well as
|
||||
Long: `
|
||||
Rclone syncs files to and from cloud storage providers as well as
|
||||
mounting them, listing them in lots of different ways.
|
||||
|
||||
See the home page (https://rclone.org/) for installation, usage,
|
||||
@@ -58,34 +58,23 @@ var helpCommand = &cobra.Command{
|
||||
}
|
||||
|
||||
// to filter the flags with
|
||||
var (
|
||||
filterFlagsGroup string
|
||||
filterFlagsRe *regexp.Regexp
|
||||
filterFlagsNamesOnly bool
|
||||
)
|
||||
var flagsRe *regexp.Regexp
|
||||
|
||||
// Show the flags
|
||||
var helpFlags = &cobra.Command{
|
||||
Use: "flags [<filter>]",
|
||||
Use: "flags [<regexp to match>]",
|
||||
Short: "Show the global flags for rclone",
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
command.Flags()
|
||||
if len(args) > 0 {
|
||||
re, err := regexp.Compile(`(?i)` + args[0])
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to compile flags regexp: %v", err)
|
||||
}
|
||||
flagsRe = re
|
||||
}
|
||||
if GeneratingDocs {
|
||||
Root.SetUsageTemplate(docFlagsTemplate)
|
||||
} else {
|
||||
if len(args) > 0 {
|
||||
re, err := filter.GlobStringToRegexp(args[0], false, true)
|
||||
if err != nil {
|
||||
log.Fatalf("Invalid flag filter: %v", err)
|
||||
}
|
||||
fs.Debugf(nil, "Flag filter: %s", re.String())
|
||||
filterFlagsRe = re
|
||||
}
|
||||
if filterFlagsGroup != "" {
|
||||
Root.SetUsageTemplate(filterFlagsSingleGroupTemplate)
|
||||
} else if len(args) > 0 {
|
||||
Root.SetUsageTemplate(filterFlagsMultiGroupTemplate)
|
||||
}
|
||||
Root.SetOutput(os.Stdout)
|
||||
}
|
||||
_ = command.Usage()
|
||||
@@ -157,7 +146,7 @@ func setupRootCommand(rootCmd *cobra.Command) {
|
||||
})
|
||||
cobra.AddTemplateFunc("flagGroups", func(cmd *cobra.Command) []*flags.Group {
|
||||
// Add the backend flags and check all flags
|
||||
backendGroup := flags.All.NewGroup("Backend", "Backend-only flags (these can be set in the config file also)")
|
||||
backendGroup := flags.All.NewGroup("Backend", "Backend only flags. These can be set in the config file also.")
|
||||
allRegistered := flags.All.AllRegistered()
|
||||
cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
|
||||
if _, ok := backendFlags[flag.Name]; ok {
|
||||
@@ -168,7 +157,7 @@ func setupRootCommand(rootCmd *cobra.Command) {
|
||||
fs.Errorf(nil, "Flag --%s is unknown", flag.Name)
|
||||
}
|
||||
})
|
||||
groups := flags.All.Filter(filterFlagsGroup, filterFlagsRe, filterFlagsNamesOnly).Include(cmd.Annotations["groups"])
|
||||
groups := flags.All.Filter(flagsRe).Include(cmd.Annotations["groups"])
|
||||
return groups.Groups
|
||||
})
|
||||
rootCmd.SetUsageTemplate(usageTemplate)
|
||||
@@ -180,9 +169,6 @@ func setupRootCommand(rootCmd *cobra.Command) {
|
||||
|
||||
rootCmd.AddCommand(helpCommand)
|
||||
helpCommand.AddCommand(helpFlags)
|
||||
helpFlagsFlags := helpFlags.Flags()
|
||||
flags.StringVarP(helpFlagsFlags, &filterFlagsGroup, "group", "", "", "Only include flags from specific group", "")
|
||||
flags.BoolVarP(helpFlagsFlags, &filterFlagsNamesOnly, "name", "", false, "Apply filter only on flag names", "")
|
||||
helpCommand.AddCommand(helpBackends)
|
||||
helpCommand.AddCommand(helpBackend)
|
||||
|
||||
@@ -215,15 +201,20 @@ Aliases:
|
||||
Examples:
|
||||
{{.Example}}{{end}}{{if and (showCommands .) .HasAvailableSubCommands}}
|
||||
|
||||
Available commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
|
||||
{{rpad .Name .NamePadding}} {{.Short}}{{end}}{{end}}{{end}}{{if and (showLocalFlags .) .HasAvailableLocalFlags}}
|
||||
Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
|
||||
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if and (showLocalFlags .) .HasAvailableLocalFlags}}
|
||||
|
||||
Flags:
|
||||
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if and (showGlobalFlags .) .HasAvailableInheritedFlags}}{{range flagGroups .}}{{if .Flags.HasFlags}}
|
||||
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if and (showGlobalFlags .) .HasAvailableInheritedFlags}}
|
||||
|
||||
{{ range flagGroups . }}{{ if .Flags.HasFlags }}
|
||||
# {{ .Name }} Flags
|
||||
|
||||
{{ .Help }}
|
||||
|
||||
{{ .Flags.FlagUsages | trimTrailingWhitespaces}}
|
||||
{{ end }}{{ end }}
|
||||
|
||||
{{.Help}} (flag group {{.Name}}):
|
||||
{{.Flags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{end}}{{end}}{{if .HasHelpSubCommands}}
|
||||
|
||||
Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}}
|
||||
{{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}
|
||||
|
||||
@@ -232,19 +223,9 @@ Use "rclone help flags" for to see the global flags.
|
||||
Use "rclone help backends" for a list of supported services.
|
||||
`
|
||||
|
||||
var filterFlagsSingleGroupTemplate = `{{range flagGroups .}}{{if .Flags.HasFlags}}{{.Flags.FlagUsages | trimTrailingWhitespaces}}
|
||||
{{end}}{{end}}
|
||||
`
|
||||
|
||||
var filterFlagsMultiGroupTemplate = `{{range flagGroups .}}{{if .Flags.HasFlags}}{{.Help}} (flag group {{.Name}}):
|
||||
{{.Flags.FlagUsages | trimTrailingWhitespaces}}
|
||||
|
||||
{{end}}{{end}}`
|
||||
|
||||
var docFlagsTemplate = `---
|
||||
title: "Global Flags"
|
||||
description: "Rclone Global Flags"
|
||||
# autogenerated - DO NOT EDIT
|
||||
---
|
||||
|
||||
# Global Flags
|
||||
@@ -252,16 +233,16 @@ description: "Rclone Global Flags"
|
||||
This describes the global flags available to every rclone command
|
||||
split into groups.
|
||||
|
||||
{{range flagGroups .}}{{if .Flags.HasFlags}}
|
||||
## {{.Name}}
|
||||
{{ range flagGroups . }}{{ if .Flags.HasFlags }}
|
||||
## {{ .Name }}
|
||||
|
||||
{{.Help}}.
|
||||
{{ .Help }}
|
||||
|
||||
` + "```" + `
|
||||
{{.Flags.FlagUsages | trimTrailingWhitespaces}}
|
||||
{{ .Flags.FlagUsages | trimTrailingWhitespaces}}
|
||||
` + "```" + `
|
||||
|
||||
{{end}}{{end}}
|
||||
{{ end }}{{ end }}
|
||||
`
|
||||
|
||||
// show all the backends
|
||||
|
||||
@@ -27,7 +27,8 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "link remote:path",
|
||||
Short: `Generate public link to file/folder.`,
|
||||
Long: `Create, retrieve or remove a public link to the given file or folder.
|
||||
Long: `rclone link will create, retrieve or remove a public link to the given
|
||||
file or folder.
|
||||
|
||||
rclone link remote:path/to/file
|
||||
rclone link remote:path/to/folder/
|
||||
|
||||
@@ -2,232 +2,60 @@
|
||||
package ls
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
listLong bool
|
||||
jsonOutput bool
|
||||
filterName string
|
||||
filterType string
|
||||
filterSource string
|
||||
filterDescription string
|
||||
orderBy string
|
||||
listLong bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &listLong, "long", "", false, "Show type and description in addition to name", "")
|
||||
flags.StringVarP(cmdFlags, &filterName, "name", "", "", "Filter remotes by name", "")
|
||||
flags.StringVarP(cmdFlags, &filterType, "type", "", "", "Filter remotes by type", "")
|
||||
flags.StringVarP(cmdFlags, &filterSource, "source", "", "", "Filter remotes by source, e.g. 'file' or 'environment'", "")
|
||||
flags.StringVarP(cmdFlags, &filterDescription, "description", "", "", "Filter remotes by description", "")
|
||||
flags.StringVarP(cmdFlags, &orderBy, "order-by", "", "", "Instructions on how to order the result, e.g. 'type,name=descending'", "")
|
||||
flags.BoolVarP(cmdFlags, &jsonOutput, "json", "", false, "Format output as JSON", "")
|
||||
}
|
||||
|
||||
// lessFn compares to remotes for order by
|
||||
type lessFn func(a, b config.Remote) bool
|
||||
|
||||
// newLess returns a function for comparing remotes based on an order by string
|
||||
func newLess(orderBy string) (less lessFn, err error) {
|
||||
if orderBy == "" {
|
||||
return nil, nil
|
||||
}
|
||||
parts := strings.Split(strings.ToLower(orderBy), ",")
|
||||
n := len(parts)
|
||||
for i := n - 1; i >= 0; i-- {
|
||||
fieldAndDirection := strings.SplitN(parts[i], "=", 2)
|
||||
|
||||
descending := false
|
||||
if len(fieldAndDirection) > 1 {
|
||||
switch fieldAndDirection[1] {
|
||||
case "ascending", "asc":
|
||||
case "descending", "desc":
|
||||
descending = true
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown --order-by direction %q", fieldAndDirection[1])
|
||||
}
|
||||
}
|
||||
|
||||
var field func(o config.Remote) string
|
||||
switch fieldAndDirection[0] {
|
||||
case "name":
|
||||
field = func(o config.Remote) string {
|
||||
return o.Name
|
||||
}
|
||||
case "type":
|
||||
field = func(o config.Remote) string {
|
||||
return o.Type
|
||||
}
|
||||
case "source":
|
||||
field = func(o config.Remote) string {
|
||||
return o.Source
|
||||
}
|
||||
case "description":
|
||||
field = func(o config.Remote) string {
|
||||
return o.Description
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown --order-by field %q", fieldAndDirection[0])
|
||||
}
|
||||
|
||||
var thisLess lessFn
|
||||
if descending {
|
||||
thisLess = func(a, b config.Remote) bool {
|
||||
return field(a) > field(b)
|
||||
}
|
||||
} else {
|
||||
thisLess = func(a, b config.Remote) bool {
|
||||
return field(a) < field(b)
|
||||
}
|
||||
}
|
||||
|
||||
if i == n-1 {
|
||||
less = thisLess
|
||||
} else {
|
||||
nextLess := less
|
||||
less = func(a, b config.Remote) bool {
|
||||
if field(a) == field(b) {
|
||||
return nextLess(a, b)
|
||||
}
|
||||
return thisLess(a, b)
|
||||
}
|
||||
}
|
||||
}
|
||||
return less, nil
|
||||
flags.BoolVarP(cmdFlags, &listLong, "long", "", listLong, "Show the type and the description as well as names", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "listremotes [<filter>]",
|
||||
Use: "listremotes",
|
||||
Short: `List all the remotes in the config file and defined in environment variables.`,
|
||||
Long: `
|
||||
Lists all the available remotes from the config file, or the remotes matching
|
||||
an optional filter.
|
||||
rclone listremotes lists all the available remotes from the config file.
|
||||
|
||||
Prints the result in human-readable format by default, and as a simple list of
|
||||
remote names, or if used with flag ` + "`--long`" + ` a tabular format including
|
||||
the remote names, types and descriptions. Using flag ` + "`--json`" + ` produces
|
||||
machine-readable output instead, which always includes all attributes - including
|
||||
the source (file or environment).
|
||||
|
||||
Result can be filtered by a filter argument which applies to all attributes,
|
||||
and/or filter flags specific for each attribute. The values must be specified
|
||||
according to regular rclone filtering pattern syntax.
|
||||
When used with the ` + "`--long`" + ` flag it lists the types and the descriptions too.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.34",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
var filterDefault string
|
||||
if len(args) > 0 {
|
||||
filterDefault = args[0]
|
||||
}
|
||||
filters := make(map[string]*regexp.Regexp)
|
||||
for k, v := range map[string]string{
|
||||
"all": filterDefault,
|
||||
"name": filterName,
|
||||
"type": filterType,
|
||||
"source": filterSource,
|
||||
"description": filterDescription,
|
||||
} {
|
||||
if v != "" {
|
||||
filterRe, err := filter.GlobStringToRegexp(v, false, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid %s filter argument: %w", k, err)
|
||||
}
|
||||
fs.Debugf(nil, "Filter for %s: %s", k, filterRe.String())
|
||||
filters[k] = filterRe
|
||||
}
|
||||
}
|
||||
remotes := config.GetRemotes()
|
||||
maxName := 0
|
||||
maxType := 0
|
||||
i := 0
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
remotes := config.FileSections()
|
||||
sort.Strings(remotes)
|
||||
maxlen := 1
|
||||
maxlentype := 1
|
||||
for _, remote := range remotes {
|
||||
include := true
|
||||
for k, v := range filters {
|
||||
if k == "all" && !(v.MatchString(remote.Name) || v.MatchString(remote.Type) || v.MatchString(remote.Source) || v.MatchString(remote.Description)) {
|
||||
include = false
|
||||
} else if k == "name" && !v.MatchString(remote.Name) {
|
||||
include = false
|
||||
} else if k == "type" && !v.MatchString(remote.Type) {
|
||||
include = false
|
||||
} else if k == "source" && !v.MatchString(remote.Source) {
|
||||
include = false
|
||||
} else if k == "description" && !v.MatchString(remote.Description) {
|
||||
include = false
|
||||
}
|
||||
if len(remote) > maxlen {
|
||||
maxlen = len(remote)
|
||||
}
|
||||
if include {
|
||||
if len(remote.Name) > maxName {
|
||||
maxName = len(remote.Name)
|
||||
}
|
||||
if len(remote.Type) > maxType {
|
||||
maxType = len(remote.Type)
|
||||
}
|
||||
remotes[i] = remote
|
||||
i++
|
||||
t := config.FileGet(remote, "type")
|
||||
if len(t) > maxlentype {
|
||||
maxlentype = len(t)
|
||||
}
|
||||
}
|
||||
remotes = remotes[:i]
|
||||
|
||||
less, err := newLess(orderBy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if less != nil {
|
||||
sliceLessFn := func(i, j int) bool {
|
||||
return less(remotes[i], remotes[j])
|
||||
}
|
||||
sort.SliceStable(remotes, sliceLessFn)
|
||||
}
|
||||
|
||||
if jsonOutput {
|
||||
fmt.Println("[")
|
||||
first := true
|
||||
for _, remote := range remotes {
|
||||
out, err := json.Marshal(remote)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal remote object: %w", err)
|
||||
}
|
||||
if first {
|
||||
first = false
|
||||
} else {
|
||||
fmt.Print(",\n")
|
||||
}
|
||||
_, err = os.Stdout.Write(out)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write to output: %w", err)
|
||||
}
|
||||
}
|
||||
if !first {
|
||||
fmt.Println()
|
||||
}
|
||||
fmt.Println("]")
|
||||
} else if listLong {
|
||||
for _, remote := range remotes {
|
||||
fmt.Printf("%-*s %-*s %s\n", maxName+1, remote.Name+":", maxType, remote.Type, remote.Description)
|
||||
}
|
||||
} else {
|
||||
for _, remote := range remotes {
|
||||
fmt.Printf("%s:\n", remote.Name)
|
||||
for _, remote := range remotes {
|
||||
if listLong {
|
||||
remoteType := config.FileGet(remote, "type")
|
||||
description := config.FileGet(remote, "description")
|
||||
fmt.Printf("%-*s %-*s %s\n", maxlen+1, remote+":", maxlentype+1, remoteType, description)
|
||||
} else {
|
||||
fmt.Printf("%s:\n", remote)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -18,7 +18,8 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "ls remote:path",
|
||||
Short: `List the objects in the path with size and path.`,
|
||||
Long: `Lists the objects in the source path to standard output in a human
|
||||
Long: `
|
||||
Lists the objects in the source path to standard output in a human
|
||||
readable format with size and path. Recurses by default.
|
||||
|
||||
Eg
|
||||
|
||||
@@ -26,7 +26,8 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "lsd remote:path",
|
||||
Short: `List all directories/containers/buckets in the path.`,
|
||||
Long: `Lists the directories in the source path to standard output. Does not
|
||||
Long: `
|
||||
Lists the directories in the source path to standard output. Does not
|
||||
recurse by default. Use the ` + "`-R`" + ` flag to recurse.
|
||||
|
||||
This command lists the total size of the directory (if known, -1 if
|
||||
|
||||
@@ -47,7 +47,8 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "lsf remote:path",
|
||||
Short: `List directories and objects in remote:path formatted for parsing.`,
|
||||
Long: `List the contents of the source path (directories and objects) to
|
||||
Long: `
|
||||
List the contents of the source path (directories and objects) to
|
||||
standard output in a form which is easy to parse by scripts. By
|
||||
default this will just be the names of the objects and directories,
|
||||
one per line. The directories will have a / suffix.
|
||||
|
||||
@@ -18,7 +18,8 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "lsl remote:path",
|
||||
Short: `List the objects in path with modification time, size and path.`,
|
||||
Long: `Lists the objects in the source path to standard output in a human
|
||||
Long: `
|
||||
Lists the objects in the source path to standard output in a human
|
||||
readable format with modification time, size and path. Recurses by default.
|
||||
|
||||
Eg
|
||||
|
||||
@@ -20,7 +20,8 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "md5sum remote:path",
|
||||
Short: `Produces an md5sum file for all the objects in the path.`,
|
||||
Long: `Produces an md5sum file for all the objects in the path. This
|
||||
Long: `
|
||||
Produces an md5sum file for all the objects in the path. This
|
||||
is in the same format as the standard md5sum tool produces.
|
||||
|
||||
By default, the hash is requested from the remote. If MD5 is
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user