mirror of
https://github.com/rclone/rclone.git
synced 2025-12-25 12:43:42 +00:00
Compare commits
106 Commits
fix-onedri
...
v1.54.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7f5ee5d81f | ||
|
|
8b41dfa50a | ||
|
|
0d8bcc08da | ||
|
|
d3b7f14b66 | ||
|
|
f66928a846 | ||
|
|
3b1122c888 | ||
|
|
463a18aa07 | ||
|
|
0a932dc1f2 | ||
|
|
8856e0e559 | ||
|
|
3b6df71838 | ||
|
|
31de631b22 | ||
|
|
189ef5f257 | ||
|
|
2f67681e3b | ||
|
|
41127965b0 | ||
|
|
8171671d82 | ||
|
|
75617c0c3b | ||
|
|
8b9d23916b | ||
|
|
e43b79e33d | ||
|
|
459cc70a50 | ||
|
|
20578f3f89 | ||
|
|
15da53696e | ||
|
|
2bddba118e | ||
|
|
c7e5976e11 | ||
|
|
f0bf9cfda1 | ||
|
|
671dd047f7 | ||
|
|
6272ca74bc | ||
|
|
f5af761466 | ||
|
|
06f1c0c61c | ||
|
|
e6a9f005d6 | ||
|
|
8f6f4b053c | ||
|
|
fe15a2eeeb | ||
|
|
019667170f | ||
|
|
7a496752f3 | ||
|
|
b569dc11a0 | ||
|
|
df4e6079f1 | ||
|
|
6156f90601 | ||
|
|
cdaea62932 | ||
|
|
78afe01d15 | ||
|
|
4eac88babf | ||
|
|
b4217fabd3 | ||
|
|
92b9dabf3c | ||
|
|
4323ff8a63 | ||
|
|
3e188495f5 | ||
|
|
acb9e17eb3 | ||
|
|
c8ab4f1d02 | ||
|
|
e776a1b122 | ||
|
|
c57af26de9 | ||
|
|
7d89912666 | ||
|
|
cd075f1703 | ||
|
|
35b2ca642c | ||
|
|
127f48e8ad | ||
|
|
3e986cdf54 | ||
|
|
b80d498304 | ||
|
|
757e696a6b | ||
|
|
e3979131f2 | ||
|
|
a774f6bfdb | ||
|
|
d7cd35e2ca | ||
|
|
38e70f1797 | ||
|
|
3b49440c25 | ||
|
|
7c0287b824 | ||
|
|
f97c2c85bd | ||
|
|
14c0d8a93e | ||
|
|
768ad4de2a | ||
|
|
817987dfc4 | ||
|
|
eb090d3544 | ||
|
|
4daf8b7083 | ||
|
|
0be69018b8 | ||
|
|
9b9ab5f3e8 | ||
|
|
072464cbdb | ||
|
|
b0491dec88 | ||
|
|
ccfefedb47 | ||
|
|
2fffcf9e7f | ||
|
|
a39a5d261c | ||
|
|
45b57822d5 | ||
|
|
d8984cd37f | ||
|
|
80e63af470 | ||
|
|
db2c38b21b | ||
|
|
cef51d58ac | ||
|
|
e0b5a13a13 | ||
|
|
de21356154 | ||
|
|
35a4de2030 | ||
|
|
847625822f | ||
|
|
3877df4e62 | ||
|
|
ec73d2fb9a | ||
|
|
a7689d7023 | ||
|
|
847a44e7ad | ||
|
|
b3710c962e | ||
|
|
35ccfe1721 | ||
|
|
ef2bfb9718 | ||
|
|
a97effa27c | ||
|
|
01adee7554 | ||
|
|
78a76b0d29 | ||
|
|
e775328523 | ||
|
|
50344e7792 | ||
|
|
d58fdb10db | ||
|
|
feaacfd226 | ||
|
|
e3c238ac95 | ||
|
|
752997c5e8 | ||
|
|
71edc75ca6 | ||
|
|
768e4c4735 | ||
|
|
c553ad5158 | ||
|
|
c66b901320 | ||
|
|
dd67a3d5f5 | ||
|
|
e972f2c98a | ||
|
|
acbcb1ea9d | ||
|
|
d4444375ac |
16
.github/workflows/build.yml
vendored
16
.github/workflows/build.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'go1.11', 'go1.12', 'go1.13', 'go1.14']
|
||||
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'go1.12', 'go1.13', 'go1.14', 'go1.16']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
@@ -46,6 +46,7 @@ jobs:
|
||||
go: '1.15.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^windows/amd64" -cgo'
|
||||
build_args: '-buildmode exe'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
deploy: true
|
||||
@@ -57,6 +58,7 @@ jobs:
|
||||
goarch: '386'
|
||||
cgo: '1'
|
||||
build_flags: '-include "^windows/386" -cgo'
|
||||
build_args: '-buildmode exe'
|
||||
quicktest: true
|
||||
deploy: true
|
||||
|
||||
@@ -67,11 +69,6 @@ jobs:
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.11
|
||||
os: ubuntu-latest
|
||||
go: '1.11.x'
|
||||
quicktest: true
|
||||
|
||||
- job_name: go1.12
|
||||
os: ubuntu-latest
|
||||
go: '1.12.x'
|
||||
@@ -88,6 +85,12 @@ jobs:
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.16
|
||||
os: ubuntu-latest
|
||||
go: '1.16.0-rc1'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
@@ -109,6 +112,7 @@ jobs:
|
||||
run: |
|
||||
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
|
||||
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
|
||||
echo 'BUILD_ARGS=${{ matrix.build_args }}' >> $GITHUB_ENV
|
||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
|
||||
|
||||
|
||||
20496
MANUAL.html
generated
20496
MANUAL.html
generated
File diff suppressed because one or more lines are too long
21032
MANUAL.txt
generated
21032
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
12
Makefile
12
Makefile
@@ -46,13 +46,13 @@ endif
|
||||
.PHONY: rclone test_all vars version
|
||||
|
||||
rclone:
|
||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
|
||||
mkdir -p `go env GOPATH`/bin/
|
||||
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
||||
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
||||
|
||||
test_all:
|
||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
|
||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
|
||||
|
||||
vars:
|
||||
@echo SHELL="'$(SHELL)'"
|
||||
@@ -188,10 +188,10 @@ upload_github:
|
||||
./bin/upload-github $(TAG)
|
||||
|
||||
cross: doc
|
||||
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
|
||||
go run bin/cross-compile.go -release current $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
|
||||
beta:
|
||||
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)
|
||||
go run bin/cross-compile.go $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
|
||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
||||
|
||||
@@ -199,7 +199,7 @@ log_since_last_release:
|
||||
git log $(LAST_TAG)..
|
||||
|
||||
compile_all:
|
||||
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
|
||||
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
|
||||
ci_upload:
|
||||
sudo chown -R $$USER build
|
||||
@@ -213,7 +213,7 @@ endif
|
||||
|
||||
ci_beta:
|
||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
||||
|
||||
@@ -36,6 +36,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/ftp"
|
||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||
_ "github.com/rclone/rclone/backend/hdfs"
|
||||
_ "github.com/rclone/rclone/backend/http"
|
||||
_ "github.com/rclone/rclone/backend/hubic"
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
|
||||
// +build !plan9,!solaris,!js,go1.13
|
||||
// +build !plan9,!solaris,!js,go1.14
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
@@ -26,7 +24,6 @@ import (
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
@@ -39,8 +36,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/pool"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -51,15 +46,12 @@ const (
|
||||
modTimeKey = "mtime"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
maxTotalParts = 50000 // in multipart upload
|
||||
storageDefaultBaseURL = "blob.core.windows.net"
|
||||
// maxUncommittedSize = 9 << 30 // can't upload bigger than this
|
||||
defaultChunkSize = 4 * fs.MebiByte
|
||||
maxChunkSize = 100 * fs.MebiByte
|
||||
defaultUploadCutoff = 256 * fs.MebiByte
|
||||
maxUploadCutoff = 256 * fs.MebiByte
|
||||
defaultAccessTier = azblob.AccessTierNone
|
||||
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
|
||||
defaultChunkSize = 4 * fs.MebiByte
|
||||
maxChunkSize = 100 * fs.MebiByte
|
||||
uploadConcurrency = 4
|
||||
defaultAccessTier = azblob.AccessTierNone
|
||||
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
|
||||
// Default storage account, key and blob endpoint for emulator support,
|
||||
// though it is a base64 key checked in here, it is publicly available secret.
|
||||
emulatorAccount = "devstoreaccount1"
|
||||
@@ -137,8 +129,7 @@ msi_client_id, or msi_mi_res_id parameters.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to chunked upload (<= 256MB).",
|
||||
Default: defaultUploadCutoff,
|
||||
Help: "Cutoff for switching to chunked upload (<= 256MB). (Deprecated)",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
@@ -241,7 +232,6 @@ type Options struct {
|
||||
MSIResourceID string `config:"msi_mi_res_id"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
SASURL string `config:"sas_url"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
ListChunkSize uint `config:"list_chunk"`
|
||||
AccessTier string `config:"access_tier"`
|
||||
@@ -397,21 +387,6 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||
if cs > maxUploadCutoff {
|
||||
return errors.Errorf("%v must be less than or equal to %v", cs, maxUploadCutoff)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadCutoff(cs)
|
||||
if err == nil {
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// httpClientFactory creates a Factory object that sends HTTP requests
|
||||
// to an rclone's http.Client.
|
||||
//
|
||||
@@ -506,10 +481,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = checkUploadCutoff(opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "azure: upload cutoff")
|
||||
}
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "azure: chunk size")
|
||||
@@ -1510,12 +1481,6 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
// readSeeker joins an io.Reader and an io.Seeker
|
||||
type readSeeker struct {
|
||||
io.Reader
|
||||
io.Seeker
|
||||
}
|
||||
|
||||
// increment the slice passed in as LSB binary
|
||||
func increment(xs []byte) {
|
||||
for i, digit := range xs {
|
||||
@@ -1528,141 +1493,46 @@ func increment(xs []byte) {
|
||||
}
|
||||
}
|
||||
|
||||
var warnStreamUpload sync.Once
|
||||
// poolWrapper wraps a pool.Pool as an azblob.TransferManager
|
||||
type poolWrapper struct {
|
||||
pool *pool.Pool
|
||||
bufToken chan struct{}
|
||||
runToken chan struct{}
|
||||
}
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
//
|
||||
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
|
||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, blob *azblob.BlobURL, httpHeaders *azblob.BlobHTTPHeaders) (err error) {
|
||||
// Calculate correct chunkSize
|
||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||
totalParts := -1
|
||||
|
||||
// Note that the max size of file is 4.75 TB (100 MB X 50,000
|
||||
// blocks) and this is bigger than the max uncommitted block
|
||||
// size (9.52 TB) so we do not need to part commit block lists
|
||||
// or garbage collect uncommitted blocks.
|
||||
//
|
||||
// See: https://docs.microsoft.com/en-gb/rest/api/storageservices/put-block
|
||||
|
||||
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
|
||||
// buffers here (default 4MB). With a maximum number of parts (50,000) this will be a file of
|
||||
// 195GB which seems like a not too unreasonable limit.
|
||||
if size == -1 {
|
||||
warnStreamUpload.Do(func() {
|
||||
fs.Logf(o, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||
o.fs.opt.ChunkSize, fs.SizeSuffix(chunkSize*maxTotalParts))
|
||||
})
|
||||
} else {
|
||||
// Adjust partSize until the number of parts is small enough.
|
||||
if size/chunkSize >= maxTotalParts {
|
||||
// Calculate partition size rounded up to the nearest MB
|
||||
chunkSize = (((size / maxTotalParts) >> 20) + 1) << 20
|
||||
}
|
||||
if chunkSize > int64(maxChunkSize) {
|
||||
return errors.Errorf("can't upload as it is too big %v - takes more than %d chunks of %v", fs.SizeSuffix(size), totalParts, fs.SizeSuffix(chunkSize/2))
|
||||
}
|
||||
totalParts = int(size / chunkSize)
|
||||
if size%chunkSize != 0 {
|
||||
totalParts++
|
||||
}
|
||||
// newPoolWrapper creates an azblob.TransferManager that will use a
|
||||
// pool.Pool with maximum concurrency as specified.
|
||||
func (f *Fs) newPoolWrapper(concurrency int) azblob.TransferManager {
|
||||
return &poolWrapper{
|
||||
pool: f.pool,
|
||||
bufToken: make(chan struct{}, concurrency),
|
||||
runToken: make(chan struct{}, concurrency),
|
||||
}
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", totalParts, fs.SizeSuffix(chunkSize))
|
||||
// Get implements TransferManager.Get().
|
||||
func (pw *poolWrapper) Get() []byte {
|
||||
pw.bufToken <- struct{}{}
|
||||
return pw.pool.Get()
|
||||
}
|
||||
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
// Put implements TransferManager.Put().
|
||||
func (pw *poolWrapper) Put(b []byte) {
|
||||
pw.pool.Put(b)
|
||||
<-pw.bufToken
|
||||
}
|
||||
|
||||
// Upload the chunks
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
remaining = size // remaining size in file for logging only, -1 if size < 0
|
||||
position = int64(0) // position in file
|
||||
memPool = o.fs.getMemoryPool(chunkSize) // pool to get memory from
|
||||
finished = false // set when we have read EOF
|
||||
blocks []string // list of blocks for finalize
|
||||
blockBlobURL = blob.ToBlockBlobURL() // Get BlockBlobURL, we will use default pipeline here
|
||||
ac = azblob.LeaseAccessConditions{} // Use default lease access conditions
|
||||
binaryBlockID = make([]byte, 8) // block counter as LSB first 8 bytes
|
||||
)
|
||||
for part := 0; !finished; part++ {
|
||||
// Get a block of memory from the pool and a token which limits concurrency
|
||||
o.fs.uploadToken.Get()
|
||||
buf := memPool.Get()
|
||||
// Run implements TransferManager.Run().
|
||||
func (pw *poolWrapper) Run(f func()) {
|
||||
pw.runToken <- struct{}{}
|
||||
go func() {
|
||||
f()
|
||||
<-pw.runToken
|
||||
}()
|
||||
}
|
||||
|
||||
free := func() {
|
||||
memPool.Put(buf) // return the buf
|
||||
o.fs.uploadToken.Put() // return the token
|
||||
}
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
free()
|
||||
break
|
||||
}
|
||||
|
||||
// Read the chunk
|
||||
n, err := readers.ReadFill(in, buf) // this can never return 0, nil
|
||||
if err == io.EOF {
|
||||
if n == 0 { // end if no data
|
||||
free()
|
||||
break
|
||||
}
|
||||
finished = true
|
||||
} else if err != nil {
|
||||
free()
|
||||
return errors.Wrap(err, "multipart upload failed to read source")
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
// increment the blockID and save the blocks for finalize
|
||||
increment(binaryBlockID)
|
||||
blockID := base64.StdEncoding.EncodeToString(binaryBlockID)
|
||||
blocks = append(blocks, blockID)
|
||||
|
||||
// Transfer the chunk
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, totalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||
g.Go(func() (err error) {
|
||||
defer free()
|
||||
|
||||
// Upload the block, with MD5 for check
|
||||
md5sum := md5.Sum(buf)
|
||||
transactionalMD5 := md5sum[:]
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
bufferReader := bytes.NewReader(buf)
|
||||
wrappedReader := wrap(bufferReader)
|
||||
rs := readSeeker{wrappedReader, bufferReader}
|
||||
_, err = blockBlobURL.StageBlock(ctx, blockID, &rs, ac, transactionalMD5, azblob.ClientProvidedKeyOptions{})
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload failed to upload part")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// ready for next block
|
||||
if size >= 0 {
|
||||
remaining -= chunkSize
|
||||
}
|
||||
position += chunkSize
|
||||
}
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Finalise the upload session
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blockBlobURL.CommitBlockList(ctx, blocks, *httpHeaders, o.meta, azblob.BlobAccessConditions{}, azblob.AccessTierType(o.fs.opt.AccessTier), nil, azblob.ClientProvidedKeyOptions{})
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload failed to finalize")
|
||||
}
|
||||
return nil
|
||||
// Close implements TransferManager.Close().
|
||||
func (pw *poolWrapper) Close() {
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
@@ -1685,7 +1555,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
size := src.Size()
|
||||
|
||||
// Update Mod time
|
||||
o.updateMetadataWithModTime(src.ModTime(ctx))
|
||||
if err != nil {
|
||||
@@ -1695,10 +1565,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
blob := o.getBlobReference()
|
||||
httpHeaders := azblob.BlobHTTPHeaders{}
|
||||
httpHeaders.ContentType = fs.MimeType(ctx, src)
|
||||
// Compute the Content-MD5 of the file, for multiparts uploads it
|
||||
|
||||
// Compute the Content-MD5 of the file. As we stream all uploads it
|
||||
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
|
||||
// Note: If multipart, an MD5 checksum will also be computed for each uploaded block
|
||||
// in order to validate its integrity during transport
|
||||
if !o.fs.opt.DisableCheckSum {
|
||||
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
|
||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||
@@ -1712,30 +1581,17 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
|
||||
BufferSize: int(o.fs.opt.ChunkSize),
|
||||
MaxBuffers: 4,
|
||||
MaxBuffers: uploadConcurrency,
|
||||
Metadata: o.meta,
|
||||
BlobHTTPHeaders: httpHeaders,
|
||||
}
|
||||
// FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75
|
||||
// is merged the SDK can't upload a single blob of exactly the chunk
|
||||
// size, so upload with a multipart upload to work around.
|
||||
// See: https://github.com/rclone/rclone/issues/2653
|
||||
multipartUpload := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
if size == int64(o.fs.opt.ChunkSize) {
|
||||
multipartUpload = true
|
||||
fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size)
|
||||
TransferManager: o.fs.newPoolWrapper(uploadConcurrency),
|
||||
}
|
||||
|
||||
// Don't retry, return a retry error instead
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
if multipartUpload {
|
||||
// If a large file upload in chunks
|
||||
err = o.uploadMultipart(ctx, in, size, &blob, &httpHeaders)
|
||||
} else {
|
||||
// Write a small blob in one transaction
|
||||
blockBlobURL := blob.ToBlockBlobURL()
|
||||
_, err = azblob.UploadStreamToBlockBlob(ctx, in, blockBlobURL, putBlobOptions)
|
||||
}
|
||||
// Stream contents of the reader object to the given blob URL
|
||||
blockBlobURL := blob.ToBlockBlobURL()
|
||||
_, err = azblob.UploadStreamToBlockBlob(ctx, in, blockBlobURL, putBlobOptions)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,!solaris,!js,go1.13
|
||||
// +build !plan9,!solaris,!js,go1.14
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
// +build !plan9,!solaris,!js,go1.13
|
||||
// +build !plan9,!solaris,!js,go1.14
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -29,13 +29,8 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
// TestServicePrincipalFileSuccess checks that, given a proper JSON file, we can create a token.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9 solaris js !go1.13
|
||||
// +build plan9 solaris js !go1.14
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,!solaris,!js,go1.13
|
||||
// +build !plan9,!solaris,!js,go1.14
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,!solaris,!js,go1.13
|
||||
// +build !plan9,!solaris,!js,go1.14
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -708,7 +708,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
remote := file.Name[len(prefix):]
|
||||
// Check for directory
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
if isDirectory {
|
||||
if isDirectory && len(remote) > 1 {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
if addBucket {
|
||||
|
||||
@@ -514,7 +514,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
if strings.EqualFold(item.Name, leaf) {
|
||||
pathIDOut = item.ID
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -97,7 +97,8 @@ var (
|
||||
//
|
||||
// And still chunker's primary function is to chunk large files
|
||||
// rather than serve as a generic metadata container.
|
||||
const maxMetadataSize = 255
|
||||
const maxMetadataSize = 1023
|
||||
const maxMetadataSizeWritten = 255
|
||||
|
||||
// Current/highest supported metadata format.
|
||||
const metadataVersion = 1
|
||||
@@ -121,6 +122,8 @@ const maxTransactionProbes = 100
|
||||
// standard chunker errors
|
||||
var (
|
||||
ErrChunkOverflow = errors.New("chunk number overflow")
|
||||
ErrMetaTooBig = errors.New("metadata is too big")
|
||||
ErrMetaUnknown = errors.New("unknown metadata, please upgrade rclone")
|
||||
)
|
||||
|
||||
// variants of baseMove's parameter delMode
|
||||
@@ -150,6 +153,7 @@ Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
|
||||
}, {
|
||||
Name: "name_format",
|
||||
Advanced: true,
|
||||
Hide: fs.OptionHideCommandLine,
|
||||
Default: `*.rclone_chunk.###`,
|
||||
Help: `String format of chunk file names.
|
||||
The two placeholders are: base file name (*) and chunk number (#...).
|
||||
@@ -160,12 +164,14 @@ Possible chunk files are ignored if their name does not match given format.`,
|
||||
}, {
|
||||
Name: "start_from",
|
||||
Advanced: true,
|
||||
Hide: fs.OptionHideCommandLine,
|
||||
Default: 1,
|
||||
Help: `Minimum valid chunk number. Usually 0 or 1.
|
||||
By default chunk numbers start from 1.`,
|
||||
}, {
|
||||
Name: "meta_format",
|
||||
Advanced: true,
|
||||
Hide: fs.OptionHideCommandLine,
|
||||
Default: "simplejson",
|
||||
Help: `Format of the metadata object or "none". By default "simplejson".
|
||||
Metadata is a small JSON file named after the composite file.`,
|
||||
@@ -693,43 +699,50 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
||||
switch entry := dirOrObject.(type) {
|
||||
case fs.Object:
|
||||
remote := entry.Remote()
|
||||
if mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(remote); mainRemote != "" {
|
||||
if xactID != "" {
|
||||
if revealHidden {
|
||||
fs.Infof(f, "ignore temporary chunk %q", remote)
|
||||
}
|
||||
break
|
||||
mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(remote)
|
||||
if mainRemote == "" {
|
||||
// this is meta object or standalone file
|
||||
object := f.newObject("", entry, nil)
|
||||
byRemote[remote] = object
|
||||
tempEntries = append(tempEntries, object)
|
||||
break
|
||||
}
|
||||
// this is some kind of chunk
|
||||
// metobject should have been created above if present
|
||||
isSpecial := xactID != "" || ctrlType != ""
|
||||
mainObject := byRemote[mainRemote]
|
||||
if mainObject == nil && f.useMeta && !isSpecial {
|
||||
fs.Debugf(f, "skip orphan data chunk %q", remote)
|
||||
break
|
||||
}
|
||||
if mainObject == nil && !f.useMeta {
|
||||
// this is the "nometa" case
|
||||
// create dummy chunked object without metadata
|
||||
mainObject = f.newObject(mainRemote, nil, nil)
|
||||
byRemote[mainRemote] = mainObject
|
||||
if !badEntry[mainRemote] {
|
||||
tempEntries = append(tempEntries, mainObject)
|
||||
}
|
||||
if ctrlType != "" {
|
||||
if revealHidden {
|
||||
fs.Infof(f, "ignore control chunk %q", remote)
|
||||
}
|
||||
break
|
||||
}
|
||||
if isSpecial {
|
||||
if revealHidden {
|
||||
fs.Infof(f, "ignore non-data chunk %q", remote)
|
||||
}
|
||||
mainObject := byRemote[mainRemote]
|
||||
if mainObject == nil && f.useMeta {
|
||||
fs.Debugf(f, "skip chunk %q without meta object", remote)
|
||||
break
|
||||
}
|
||||
if mainObject == nil {
|
||||
// useMeta is false - create chunked object without metadata
|
||||
mainObject = f.newObject(mainRemote, nil, nil)
|
||||
byRemote[mainRemote] = mainObject
|
||||
if !badEntry[mainRemote] {
|
||||
tempEntries = append(tempEntries, mainObject)
|
||||
}
|
||||
}
|
||||
if err := mainObject.addChunk(entry, chunkNo); err != nil {
|
||||
if f.opt.FailHard {
|
||||
return nil, err
|
||||
}
|
||||
badEntry[mainRemote] = true
|
||||
// need to read metadata to ensure actual object type
|
||||
// no need to read if metaobject is too big or absent,
|
||||
// use the fact that before calling validate()
|
||||
// the `size` field caches metaobject size, if any
|
||||
if f.useMeta && mainObject != nil && mainObject.size <= maxMetadataSize {
|
||||
mainObject.unsure = true
|
||||
}
|
||||
break
|
||||
}
|
||||
object := f.newObject("", entry, nil)
|
||||
byRemote[remote] = object
|
||||
tempEntries = append(tempEntries, object)
|
||||
if err := mainObject.addChunk(entry, chunkNo); err != nil {
|
||||
if f.opt.FailHard {
|
||||
return nil, err
|
||||
}
|
||||
badEntry[mainRemote] = true
|
||||
}
|
||||
case fs.Directory:
|
||||
isSubdir[entry.Remote()] = true
|
||||
wrapDir := fs.NewDirCopy(ctx, entry)
|
||||
@@ -784,14 +797,22 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
||||
// but opening even a small file can be slow on some backends.
|
||||
//
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.scanObject(ctx, remote, false)
|
||||
}
|
||||
|
||||
// scanObject is like NewObject with optional quick scan mode.
|
||||
// The quick mode avoids directory requests other than `List`,
|
||||
// ignores non-chunked objects and skips chunk size checks.
|
||||
func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.Object, error) {
|
||||
if err := f.forbidChunk(false, remote); err != nil {
|
||||
return nil, errors.Wrap(err, "can't access")
|
||||
}
|
||||
|
||||
var (
|
||||
o *Object
|
||||
baseObj fs.Object
|
||||
err error
|
||||
o *Object
|
||||
baseObj fs.Object
|
||||
err error
|
||||
sameMain bool
|
||||
)
|
||||
|
||||
if f.useMeta {
|
||||
@@ -805,6 +826,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// as a hard limit. Anything larger than that is treated as a
|
||||
// non-chunked file without even checking its contents, so it's
|
||||
// paramount to prevent metadata from exceeding the maximum size.
|
||||
// Anything smaller is additionally checked for format.
|
||||
o = f.newObject("", baseObj, nil)
|
||||
if o.size > maxMetadataSize {
|
||||
return o, nil
|
||||
@@ -834,18 +856,34 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return nil, errors.Wrap(err, "can't detect composite file")
|
||||
}
|
||||
|
||||
caseInsensitive := f.features.CaseInsensitive
|
||||
for _, dirOrObject := range entries {
|
||||
entry, ok := dirOrObject.(fs.Object)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
entryRemote := entry.Remote()
|
||||
if !strings.Contains(entryRemote, remote) {
|
||||
if !caseInsensitive && !strings.Contains(entryRemote, remote) {
|
||||
continue // bypass regexp to save cpu
|
||||
}
|
||||
mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(entryRemote)
|
||||
if mainRemote == "" || mainRemote != remote || ctrlType != "" || xactID != "" {
|
||||
continue // skip non-conforming, temporary and control chunks
|
||||
if mainRemote == "" {
|
||||
continue // skip non-chunks
|
||||
}
|
||||
if caseInsensitive {
|
||||
sameMain = strings.EqualFold(mainRemote, remote)
|
||||
} else {
|
||||
sameMain = mainRemote == remote
|
||||
}
|
||||
if !sameMain {
|
||||
continue // skip alien chunks
|
||||
}
|
||||
if ctrlType != "" || xactID != "" {
|
||||
if f.useMeta {
|
||||
// temporary/control chunk calls for lazy metadata read
|
||||
o.unsure = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
//fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
|
||||
if err := o.addChunk(entry, chunkNo); err != nil {
|
||||
@@ -855,7 +893,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
if o.main == nil && (o.chunks == nil || len(o.chunks) == 0) {
|
||||
// Scanning hasn't found data chunks with conforming names.
|
||||
if f.useMeta {
|
||||
if f.useMeta || quickScan {
|
||||
// Metadata is required but absent and there are no chunks.
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
@@ -878,23 +916,48 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// file without metadata. Validate it and update the total data size.
|
||||
// As an optimization, skip metadata reading here - we will call
|
||||
// readMetadata lazily when needed (reading can be expensive).
|
||||
if err := o.validate(); err != nil {
|
||||
return nil, err
|
||||
if !quickScan {
|
||||
if err := o.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// readMetadata reads composite object metadata and caches results,
|
||||
// in case of critical errors metadata is not cached.
|
||||
// Returns ErrMetaUnknown if an unsupported metadata format is detected.
|
||||
// If object is not chunked but marked by List or NewObject for recheck,
|
||||
// readMetadata will attempt to parse object as composite with fallback
|
||||
// to non-chunked representation if the attempt fails.
|
||||
func (o *Object) readMetadata(ctx context.Context) error {
|
||||
// return quickly if metadata is absent or has been already cached
|
||||
if !o.f.useMeta {
|
||||
o.isFull = true
|
||||
}
|
||||
if o.isFull {
|
||||
return nil
|
||||
}
|
||||
if !o.isComposite() || !o.f.useMeta {
|
||||
if !o.isComposite() && !o.unsure {
|
||||
// this for sure is a non-chunked standalone file
|
||||
o.isFull = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// validate metadata
|
||||
metaObject := o.main
|
||||
if metaObject.Size() > maxMetadataSize {
|
||||
if o.unsure {
|
||||
// this is not metadata but a foreign object
|
||||
o.unsure = false
|
||||
o.chunks = nil // make isComposite return false
|
||||
o.isFull = true // cache results
|
||||
return nil
|
||||
}
|
||||
return ErrMetaTooBig
|
||||
}
|
||||
|
||||
// size is within limits, perform consistency checks
|
||||
reader, err := metaObject.Open(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -907,8 +970,22 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
||||
|
||||
switch o.f.opt.MetaFormat {
|
||||
case "simplejson":
|
||||
metaInfo, err := unmarshalSimpleJSON(ctx, metaObject, metadata, true)
|
||||
if err != nil {
|
||||
metaInfo, madeByChunker, err := unmarshalSimpleJSON(ctx, metaObject, metadata)
|
||||
if o.unsure {
|
||||
o.unsure = false
|
||||
if !madeByChunker {
|
||||
// this is not metadata but a foreign object
|
||||
o.chunks = nil // make isComposite return false
|
||||
o.isFull = true // cache results
|
||||
return nil
|
||||
}
|
||||
}
|
||||
switch err {
|
||||
case nil:
|
||||
// fall thru
|
||||
case ErrMetaTooBig, ErrMetaUnknown:
|
||||
return err // return these errors unwrapped for unit tests
|
||||
default:
|
||||
return errors.Wrap(err, "invalid metadata")
|
||||
}
|
||||
if o.size != metaInfo.Size() || len(o.chunks) != metaInfo.nChunks {
|
||||
@@ -918,12 +995,36 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
||||
o.sha1 = metaInfo.sha1
|
||||
}
|
||||
|
||||
o.isFull = true
|
||||
o.isFull = true // cache results
|
||||
return nil
|
||||
}
|
||||
|
||||
// put implements Put, PutStream, PutUnchecked, Update
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption, basePut putFn) (obj fs.Object, err error) {
|
||||
func (f *Fs) put(
|
||||
ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption,
|
||||
basePut putFn, action string, target fs.Object) (obj fs.Object, err error) {
|
||||
|
||||
// Perform consistency checks
|
||||
if err := f.forbidChunk(src, remote); err != nil {
|
||||
return nil, errors.Wrap(err, action+" refused")
|
||||
}
|
||||
if target == nil {
|
||||
// Get target object with a quick directory scan
|
||||
// skip metadata check if target object does not exist.
|
||||
// ignore not-chunked objects, skip chunk size checks.
|
||||
if obj, err := f.scanObject(ctx, remote, true); err == nil {
|
||||
target = obj
|
||||
}
|
||||
}
|
||||
if target != nil {
|
||||
obj := target.(*Object)
|
||||
if err := obj.readMetadata(ctx); err == ErrMetaUnknown {
|
||||
// refuse to update a file of unsupported format
|
||||
return nil, errors.Wrap(err, "refusing to "+action)
|
||||
}
|
||||
}
|
||||
|
||||
// Prepare to upload
|
||||
c := f.newChunkingReader(src)
|
||||
wrapIn := c.wrapStream(ctx, in, src)
|
||||
|
||||
@@ -1013,8 +1114,8 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
|
||||
// Check for input that looks like valid metadata
|
||||
needMeta := len(c.chunks) > 1
|
||||
if c.readCount <= maxMetadataSize && len(c.chunks) == 1 {
|
||||
_, err := unmarshalSimpleJSON(ctx, c.chunks[0], c.smallHead, false)
|
||||
needMeta = err == nil
|
||||
_, madeByChunker, _ := unmarshalSimpleJSON(ctx, c.chunks[0], c.smallHead)
|
||||
needMeta = madeByChunker
|
||||
}
|
||||
|
||||
// Finalize small object as non-chunked.
|
||||
@@ -1273,29 +1374,16 @@ func (f *Fs) removeOldChunks(ctx context.Context, remote string) {
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if err := f.forbidChunk(src, src.Remote()); err != nil {
|
||||
return nil, errors.Wrap(err, "refusing to put")
|
||||
}
|
||||
return f.put(ctx, in, src, src.Remote(), options, f.base.Put)
|
||||
return f.put(ctx, in, src, src.Remote(), options, f.base.Put, "put", nil)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if err := f.forbidChunk(src, src.Remote()); err != nil {
|
||||
return nil, errors.Wrap(err, "refusing to upload")
|
||||
}
|
||||
return f.put(ctx, in, src, src.Remote(), options, f.base.Features().PutStream)
|
||||
return f.put(ctx, in, src, src.Remote(), options, f.base.Features().PutStream, "upload", nil)
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
|
||||
return errors.Wrap(err, "update refused")
|
||||
}
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
// refuse to update a file of unsupported format
|
||||
return errors.Wrap(err, "refusing to update")
|
||||
}
|
||||
basePut := o.f.base.Put
|
||||
if src.Size() < 0 {
|
||||
basePut = o.f.base.Features().PutStream
|
||||
@@ -1303,7 +1391,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return errors.New("wrapped file system does not support streaming uploads")
|
||||
}
|
||||
}
|
||||
oNew, err := o.f.put(ctx, in, src, o.Remote(), options, basePut)
|
||||
oNew, err := o.f.put(ctx, in, src, o.Remote(), options, basePut, "update", o)
|
||||
if err == nil {
|
||||
*o = *oNew.(*Object)
|
||||
}
|
||||
@@ -1417,7 +1505,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
// to corrupt file in hard mode. Hence, refuse to Remove, too.
|
||||
return errors.Wrap(err, "refuse to corrupt")
|
||||
}
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
if err := o.readMetadata(ctx); err == ErrMetaUnknown {
|
||||
// Proceed but warn user that unexpected things can happen.
|
||||
fs.Errorf(o, "Removing a file with unsupported metadata: %v", err)
|
||||
}
|
||||
@@ -1445,6 +1533,11 @@ func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMo
|
||||
if err := f.forbidChunk(o, remote); err != nil {
|
||||
return nil, errors.Wrapf(err, "can't %s", opName)
|
||||
}
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
// Refuse to copy/move composite files with invalid or future
|
||||
// metadata format which might involve unsupported chunk types.
|
||||
return nil, errors.Wrapf(err, "can't %s this file", opName)
|
||||
}
|
||||
if !o.isComposite() {
|
||||
fs.Debugf(o, "%s non-chunked object...", opName)
|
||||
oResult, err := do(ctx, o.mainChunk(), remote) // chain operation to a single wrapped chunk
|
||||
@@ -1453,11 +1546,6 @@ func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMo
|
||||
}
|
||||
return f.newObject("", oResult, nil), nil
|
||||
}
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
// Refuse to copy/move composite files with invalid or future
|
||||
// metadata format which might involve unsupported chunk types.
|
||||
return nil, errors.Wrapf(err, "can't %s this file", opName)
|
||||
}
|
||||
|
||||
fs.Debugf(o, "%s %d data chunks...", opName, len(o.chunks))
|
||||
mainRemote := o.remote
|
||||
@@ -1539,6 +1627,8 @@ func (f *Fs) okForServerSide(ctx context.Context, src fs.Object, opName string)
|
||||
diff = "chunk sizes"
|
||||
case f.opt.NameFormat != obj.f.opt.NameFormat:
|
||||
diff = "chunk name formats"
|
||||
case f.opt.StartFrom != obj.f.opt.StartFrom:
|
||||
diff = "chunk numbering"
|
||||
case f.opt.MetaFormat != obj.f.opt.MetaFormat:
|
||||
diff = "meta formats"
|
||||
}
|
||||
@@ -1548,6 +1638,10 @@ func (f *Fs) okForServerSide(ctx context.Context, src fs.Object, opName string)
|
||||
return
|
||||
}
|
||||
|
||||
if obj.unsure {
|
||||
// ensure object is composite if need to re-read metadata
|
||||
_ = obj.readMetadata(ctx)
|
||||
}
|
||||
requireMetaHash := obj.isComposite() && f.opt.MetaFormat == "simplejson"
|
||||
if !requireMetaHash && !f.hashAll {
|
||||
ok = true // hash is not required for metadata
|
||||
@@ -1741,6 +1835,7 @@ type Object struct {
|
||||
chunks []fs.Object // active data chunks if file is composite, or wrapped file as a single chunk if meta format is 'none'
|
||||
size int64 // cached total size of chunks in a composite file or -1 for non-chunked files
|
||||
isFull bool // true if metadata has been read
|
||||
unsure bool // true if need to read metadata to detect object type
|
||||
md5 string
|
||||
sha1 string
|
||||
f *Fs
|
||||
@@ -1762,6 +1857,9 @@ func (o *Object) addChunk(chunk fs.Object, chunkNo int) error {
|
||||
copy(newChunks, o.chunks)
|
||||
o.chunks = newChunks
|
||||
}
|
||||
if o.chunks[chunkNo] != nil {
|
||||
return fmt.Errorf("duplicate chunk number %d", chunkNo+o.f.opt.StartFrom)
|
||||
}
|
||||
o.chunks[chunkNo] = chunk
|
||||
return nil
|
||||
}
|
||||
@@ -1891,15 +1989,16 @@ func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
|
||||
// on the level of wrapped remote but chunker is unaware of that.
|
||||
//
|
||||
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (string, error) {
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
return "", err // valid metadata is required to get hash, abort
|
||||
}
|
||||
if !o.isComposite() {
|
||||
// First, chain to the wrapped non-chunked file if possible.
|
||||
if value, err := o.mainChunk().Hash(ctx, hashType); err == nil && value != "" {
|
||||
return value, nil
|
||||
}
|
||||
}
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
return "", err // valid metadata is required to get hash, abort
|
||||
}
|
||||
|
||||
// Try hash from metadata if the file is composite or if wrapped remote fails.
|
||||
switch hashType {
|
||||
case hash.MD5:
|
||||
@@ -1924,13 +2023,13 @@ func (o *Object) UnWrap() fs.Object {
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
if !o.isComposite() {
|
||||
return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file
|
||||
}
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
// refuse to open unsupported format
|
||||
return nil, errors.Wrap(err, "can't open")
|
||||
}
|
||||
if !o.isComposite() {
|
||||
return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file
|
||||
}
|
||||
|
||||
var openOptions []fs.OpenOption
|
||||
var offset, limit int64 = 0, -1
|
||||
@@ -2188,72 +2287,74 @@ func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1 s
|
||||
SHA1: sha1,
|
||||
}
|
||||
data, err := json.Marshal(&metadata)
|
||||
if err == nil && data != nil && len(data) >= maxMetadataSize {
|
||||
if err == nil && data != nil && len(data) >= maxMetadataSizeWritten {
|
||||
// be a nitpicker, never produce something you can't consume
|
||||
return nil, errors.New("metadata can't be this big, please report to rclone developers")
|
||||
}
|
||||
return data, err
|
||||
}
|
||||
|
||||
// unmarshalSimpleJSON
|
||||
// unmarshalSimpleJSON parses metadata.
|
||||
//
|
||||
// In case of errors returns a flag telling whether input has been
|
||||
// produced by incompatible version of rclone vs wasn't metadata at all.
|
||||
// Only metadata format version 1 is supported atm.
|
||||
// Future releases will transparently migrate older metadata objects.
|
||||
// New format will have a higher version number and cannot be correctly
|
||||
// handled by current implementation.
|
||||
// The version check below will then explicitly ask user to upgrade rclone.
|
||||
//
|
||||
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte, strictChecks bool) (info *ObjectInfo, err error) {
|
||||
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
|
||||
// Be strict about JSON format
|
||||
// to reduce possibility that a random small file resembles metadata.
|
||||
if data != nil && len(data) > maxMetadataSize {
|
||||
return nil, errors.New("too big")
|
||||
if data != nil && len(data) > maxMetadataSizeWritten {
|
||||
return nil, false, ErrMetaTooBig
|
||||
}
|
||||
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||
return nil, errors.New("invalid json")
|
||||
return nil, false, errors.New("invalid json")
|
||||
}
|
||||
var metadata metaSimpleJSON
|
||||
err = json.Unmarshal(data, &metadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
// Basic fields are strictly required
|
||||
// to reduce possibility that a random small file resembles metadata.
|
||||
if metadata.Version == nil || metadata.Size == nil || metadata.ChunkNum == nil {
|
||||
return nil, errors.New("missing required field")
|
||||
return nil, false, errors.New("missing required field")
|
||||
}
|
||||
// Perform strict checks, avoid corruption of future metadata formats.
|
||||
if *metadata.Version < 1 {
|
||||
return nil, errors.New("wrong version")
|
||||
return nil, false, errors.New("wrong version")
|
||||
}
|
||||
if *metadata.Size < 0 {
|
||||
return nil, errors.New("negative file size")
|
||||
return nil, false, errors.New("negative file size")
|
||||
}
|
||||
if *metadata.ChunkNum < 0 {
|
||||
return nil, errors.New("negative number of chunks")
|
||||
return nil, false, errors.New("negative number of chunks")
|
||||
}
|
||||
if *metadata.ChunkNum > maxSafeChunkNumber {
|
||||
return nil, ErrChunkOverflow
|
||||
return nil, true, ErrChunkOverflow // produced by incompatible version of rclone
|
||||
}
|
||||
if metadata.MD5 != "" {
|
||||
_, err = hex.DecodeString(metadata.MD5)
|
||||
if len(metadata.MD5) != 32 || err != nil {
|
||||
return nil, errors.New("wrong md5 hash")
|
||||
return nil, false, errors.New("wrong md5 hash")
|
||||
}
|
||||
}
|
||||
if metadata.SHA1 != "" {
|
||||
_, err = hex.DecodeString(metadata.SHA1)
|
||||
if len(metadata.SHA1) != 40 || err != nil {
|
||||
return nil, errors.New("wrong sha1 hash")
|
||||
return nil, false, errors.New("wrong sha1 hash")
|
||||
}
|
||||
}
|
||||
// ChunkNum is allowed to be 0 in future versions
|
||||
if *metadata.ChunkNum < 1 && *metadata.Version <= metadataVersion {
|
||||
return nil, errors.New("wrong number of chunks")
|
||||
return nil, false, errors.New("wrong number of chunks")
|
||||
}
|
||||
// Non-strict mode also accepts future metadata versions
|
||||
if *metadata.Version > metadataVersion && strictChecks {
|
||||
return nil, fmt.Errorf("version %d is not supported, please upgrade rclone", metadata.Version)
|
||||
if *metadata.Version > metadataVersion {
|
||||
return nil, true, ErrMetaUnknown // produced by incompatible version of rclone
|
||||
}
|
||||
|
||||
var nilFs *Fs // nil object triggers appropriate type method
|
||||
@@ -2261,7 +2362,7 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte,
|
||||
info.nChunks = *metadata.ChunkNum
|
||||
info.md5 = metadata.MD5
|
||||
info.sha1 = metadata.SHA1
|
||||
return info, nil
|
||||
return info, true, nil
|
||||
}
|
||||
|
||||
func silentlyRemove(ctx context.Context, o fs.Object) {
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
@@ -663,6 +664,80 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
||||
runSubtest(futureMeta, "future")
|
||||
}
|
||||
|
||||
// test that chunker refuses to change on objects with future/unknowm metadata
|
||||
func testFutureProof(t *testing.T, f *Fs) {
|
||||
if f.opt.MetaFormat == "none" {
|
||||
t.Skip("this test requires metadata support")
|
||||
}
|
||||
|
||||
saveOpt := f.opt
|
||||
ctx := context.Background()
|
||||
f.opt.FailHard = true
|
||||
const dir = "future"
|
||||
const file = dir + "/test"
|
||||
defer func() {
|
||||
f.opt.FailHard = false
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
f.opt = saveOpt
|
||||
}()
|
||||
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
putPart := func(name string, part int, data, msg string) {
|
||||
if part > 0 {
|
||||
name = f.makeChunkName(name, part-1, "", "")
|
||||
}
|
||||
item := fstest.Item{Path: name, ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
|
||||
assert.NotNil(t, obj, msg)
|
||||
}
|
||||
|
||||
// simulate chunked object from future
|
||||
meta := `{"ver":999,"nchunks":3,"size":9,"garbage":"litter","sha1":"0707f2970043f9f7c22029482db27733deaec029"}`
|
||||
putPart(file, 0, meta, "metaobject")
|
||||
putPart(file, 1, "abc", "chunk1")
|
||||
putPart(file, 2, "def", "chunk2")
|
||||
putPart(file, 3, "ghi", "chunk3")
|
||||
|
||||
// List should succeed
|
||||
ls, err := f.List(ctx, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(ls))
|
||||
assert.Equal(t, int64(9), ls[0].Size())
|
||||
|
||||
// NewObject should succeed
|
||||
obj, err := f.NewObject(ctx, file)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, file, obj.Remote())
|
||||
assert.Equal(t, int64(9), obj.Size())
|
||||
|
||||
// Hash must fail
|
||||
_, err = obj.Hash(ctx, hash.SHA1)
|
||||
assert.Equal(t, ErrMetaUnknown, err)
|
||||
|
||||
// Move must fail
|
||||
mobj, err := operations.Move(ctx, f, nil, file+"2", obj)
|
||||
assert.Nil(t, mobj)
|
||||
assert.Error(t, err)
|
||||
if err != nil {
|
||||
assert.Contains(t, err.Error(), "please upgrade rclone")
|
||||
}
|
||||
|
||||
// Put must fail
|
||||
oi := object.NewStaticObjectInfo(file, modTime, 3, true, nil, nil)
|
||||
buf := bytes.NewBufferString("abc")
|
||||
_, err = f.Put(ctx, buf, oi)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Rcat must fail
|
||||
in := ioutil.NopCloser(bytes.NewBufferString("abc"))
|
||||
robj, err := operations.Rcat(ctx, f, file, in, modTime)
|
||||
assert.Nil(t, robj)
|
||||
assert.NotNil(t, err)
|
||||
if err != nil {
|
||||
assert.Contains(t, err.Error(), "please upgrade rclone")
|
||||
}
|
||||
}
|
||||
|
||||
// InternalTest dispatches all internal tests
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("PutLarge", func(t *testing.T) {
|
||||
@@ -686,6 +761,9 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("MetadataInput", func(t *testing.T) {
|
||||
testMetadataInput(t, f)
|
||||
})
|
||||
t.Run("FutureProof", func(t *testing.T) {
|
||||
testFutureProof(t, f)
|
||||
})
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -12,6 +12,8 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -27,6 +29,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
|
||||
@@ -89,15 +92,26 @@ func init() {
|
||||
Level 0 turns off compression.`,
|
||||
Default: sgzip.DefaultCompression,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "ram_cache_limit",
|
||||
Help: `Some remotes don't allow the upload of files with unknown size.
|
||||
In this case the compressed file will need to be cached to determine
|
||||
it's size.
|
||||
|
||||
Files smaller than this limit will be cached in RAM, file larger than
|
||||
this limit will be cached on disk`,
|
||||
Default: fs.SizeSuffix(20 * 1024 * 1024),
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
CompressionMode string `config:"mode"`
|
||||
CompressionLevel int `config:"level"`
|
||||
Remote string `config:"remote"`
|
||||
CompressionMode string `config:"mode"`
|
||||
CompressionLevel int `config:"level"`
|
||||
RAMCacheLimit fs.SizeSuffix `config:"ram_cache_limit"`
|
||||
}
|
||||
|
||||
/*** FILESYSTEM FUNCTIONS ***/
|
||||
@@ -416,8 +430,55 @@ type compressionResult struct {
|
||||
meta sgzip.GzipMetadata
|
||||
}
|
||||
|
||||
// replicating some of operations.Rcat functionality because we want to support remotes without streaming
|
||||
// support and of course cannot know the size of a compressed file before compressing it.
|
||||
func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, modTime time.Time, options []fs.OpenOption) (o fs.Object, err error) {
|
||||
|
||||
// cache small files in memory and do normal upload
|
||||
buf := make([]byte, f.opt.RAMCacheLimit)
|
||||
if n, err := io.ReadFull(in, buf); err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
src := object.NewStaticObjectInfo(dstFileName, modTime, int64(len(buf[:n])), false, nil, f.Fs)
|
||||
return f.Fs.Put(ctx, bytes.NewBuffer(buf[:n]), src, options...)
|
||||
}
|
||||
|
||||
// Need to include what we allready read
|
||||
in = &ReadCloserWrapper{
|
||||
Reader: io.MultiReader(bytes.NewReader(buf), in),
|
||||
Closer: in,
|
||||
}
|
||||
|
||||
canStream := f.Fs.Features().PutStream != nil
|
||||
if canStream {
|
||||
src := object.NewStaticObjectInfo(dstFileName, modTime, -1, false, nil, f.Fs)
|
||||
return f.Fs.Features().PutStream(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
fs.Debugf(f, "Target remote doesn't support streaming uploads, creating temporary local file")
|
||||
tempFile, err := ioutil.TempFile("", "rclone-press-")
|
||||
defer func() {
|
||||
// these errors should be relatively uncritical and the upload should've succeeded so it's okay-ish
|
||||
// to ignore them
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed to create temporary local FS to spool file")
|
||||
}
|
||||
if _, err = io.Copy(tempFile, in); err != nil {
|
||||
return nil, errors.Wrap(err, "Failed to write temporary local file")
|
||||
}
|
||||
if _, err = tempFile.Seek(0, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
finfo, err := tempFile.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.Fs.Put(ctx, tempFile, object.NewStaticObjectInfo(dstFileName, modTime, finfo.Size(), false, nil, f.Fs))
|
||||
}
|
||||
|
||||
// Put a compressed version of a file. Returns a wrappable object and metadata.
|
||||
func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn, mimeType string) (fs.Object, *ObjectMetadata, error) {
|
||||
func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, mimeType string) (fs.Object, *ObjectMetadata, error) {
|
||||
// Unwrap reader accounting
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
|
||||
@@ -471,7 +532,7 @@ func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, o
|
||||
}
|
||||
|
||||
// Transfer the data
|
||||
o, err := put(ctx, wrappedIn, f.wrapInfo(src, makeDataName(src.Remote(), src.Size(), f.mode), src.Size()), options...)
|
||||
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
||||
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx))
|
||||
if err != nil {
|
||||
if o != nil {
|
||||
@@ -510,7 +571,7 @@ func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, o
|
||||
}
|
||||
|
||||
// Put an uncompressed version of a file. Returns a wrappable object and metadata.
|
||||
func (f *Fs) putUncompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn, mimeType string) (fs.Object, *ObjectMetadata, error) {
|
||||
func (f *Fs) putUncompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, put putFn, options []fs.OpenOption, mimeType string) (fs.Object, *ObjectMetadata, error) {
|
||||
// Unwrap the accounting, add our metadata hasher, then wrap it back on
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
|
||||
@@ -577,6 +638,8 @@ func (f *Fs) putMetadata(ctx context.Context, meta *ObjectMetadata, src fs.Objec
|
||||
|
||||
// This function will put both the data and metadata for an Object.
|
||||
// putData is the function used for data, while putMeta is the function used for metadata.
|
||||
// The putData function will only be used when the object is not compressible if the
|
||||
// data is compressible this parameter will be ignored.
|
||||
func (f *Fs) putWithCustomFunctions(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption,
|
||||
putData putFn, putMeta putFn, compressible bool, mimeType string) (*Object, error) {
|
||||
// Put file then metadata
|
||||
@@ -584,9 +647,9 @@ func (f *Fs) putWithCustomFunctions(ctx context.Context, in io.Reader, src fs.Ob
|
||||
var meta *ObjectMetadata
|
||||
var err error
|
||||
if compressible {
|
||||
dataObject, meta, err = f.putCompress(ctx, in, src, options, putData, mimeType)
|
||||
dataObject, meta, err = f.putCompress(ctx, in, src, options, mimeType)
|
||||
} else {
|
||||
dataObject, meta, err = f.putUncompress(ctx, in, src, options, putData, mimeType)
|
||||
dataObject, meta, err = f.putUncompress(ctx, in, src, putData, options, mimeType)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -837,7 +900,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
do := f.Fs.Features().CleanUp
|
||||
if do == nil {
|
||||
return errors.New("can't CleanUp")
|
||||
return errors.New("can't CleanUp: not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
@@ -846,7 +909,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
do := f.Fs.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("About not supported")
|
||||
return nil, errors.New("can't About: not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
@@ -922,7 +985,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, duration fs.Duration, unlink bool) (string, error) {
|
||||
do := f.Fs.Features().PublicLink
|
||||
if do == nil {
|
||||
return "", errors.New("PublicLink not supported")
|
||||
return "", errors.New("can't PublicLink: not supported by underlying remote")
|
||||
}
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
@@ -1033,7 +1096,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Function that updates object
|
||||
// We can only support update when BOTH the old and the new object are uncompressed because only then
|
||||
// the filesize will be known beforehand and name will stay the same
|
||||
update := func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return o.Object, o.Object.Update(ctx, in, src, options...)
|
||||
}
|
||||
@@ -1121,7 +1185,7 @@ func (o *Object) String() string {
|
||||
func (o *Object) Remote() string {
|
||||
origFileName, _, _, err := processFileName(o.Object.Remote())
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Could not get remote path for: %s", o.Object.Remote())
|
||||
fs.Errorf(o.f, "Could not get remote path for: %s", o.Object.Remote())
|
||||
return o.Object.Remote()
|
||||
}
|
||||
return origFileName
|
||||
@@ -1161,15 +1225,19 @@ func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||
// multiple storage classes supported
|
||||
func (o *Object) SetTier(tier string) error {
|
||||
do, ok := o.Object.(fs.SetTierer)
|
||||
if !ok {
|
||||
mdo, mok := o.mo.(fs.SetTierer)
|
||||
if !(ok && mok) {
|
||||
return errors.New("press: underlying remote does not support SetTier")
|
||||
}
|
||||
if err := mdo.SetTier(tier); err != nil {
|
||||
return err
|
||||
}
|
||||
return do.SetTier(tier)
|
||||
}
|
||||
|
||||
// GetTier returns storage tier or class of the Object
|
||||
func (o *Object) GetTier() string {
|
||||
do, ok := o.Object.(fs.GetTierer)
|
||||
do, ok := o.mo.(fs.GetTierer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
@@ -1272,10 +1340,7 @@ func (o *ObjectInfo) Remote() string {
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *ObjectInfo) Size() int64 {
|
||||
if o.size != -1 {
|
||||
return o.size
|
||||
}
|
||||
return o.src.Size()
|
||||
return o.size
|
||||
}
|
||||
|
||||
// ModTime returns the modification time
|
||||
@@ -1286,14 +1351,7 @@ func (o *ObjectInfo) ModTime(ctx context.Context) time.Time {
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *ObjectInfo) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||
if ht != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
value, err := o.src.Hash(ctx, ht)
|
||||
if err == hash.ErrUnsupported {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return value, err
|
||||
return "", nil // cannot know the checksum
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
|
||||
@@ -6,18 +6,17 @@ import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/dropbox"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
_ "github.com/rclone/rclone/backend/s3"
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
t.Skip("Skipping as -remote not set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
opt := fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*Object)(nil),
|
||||
UnimplementableFsMethods: []string{
|
||||
@@ -29,11 +28,9 @@ func TestIntegration(t *testing.T) {
|
||||
"UserInfo",
|
||||
"Disconnect",
|
||||
},
|
||||
UnimplementableObjectMethods: []string{
|
||||
"GetTier",
|
||||
"SetTier",
|
||||
},
|
||||
})
|
||||
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
||||
UnimplementableObjectMethods: []string{}}
|
||||
fstests.Run(t, &opt)
|
||||
}
|
||||
|
||||
// TestRemoteGzip tests GZIP compression
|
||||
|
||||
@@ -73,6 +73,7 @@ const (
|
||||
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks"
|
||||
listRGrouping = 50 // number of IDs to search at once when using ListR
|
||||
listRInputBuffer = 1000 // size of input buffer when using ListR
|
||||
defaultXDGIcon = "text-html"
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -128,6 +129,12 @@ var (
|
||||
_mimeTypeCustomTransform = map[string]string{
|
||||
"application/vnd.google-apps.script+json": "application/json",
|
||||
}
|
||||
_mimeTypeToXDGLinkIcons = map[string]string{
|
||||
"application/vnd.google-apps.document": "x-office-document",
|
||||
"application/vnd.google-apps.drawing": "x-office-drawing",
|
||||
"application/vnd.google-apps.presentation": "x-office-presentation",
|
||||
"application/vnd.google-apps.spreadsheet": "x-office-spreadsheet",
|
||||
}
|
||||
fetchFormatsOnce sync.Once // make sure we fetch the export/import formats only once
|
||||
_exportFormats map[string][]string // allowed export MIME type conversions
|
||||
_importFormats map[string][]string // allowed import MIME type conversions
|
||||
@@ -1294,11 +1301,15 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim
|
||||
if t == nil {
|
||||
return nil, errors.Errorf("unsupported link type %s", exportMimeType)
|
||||
}
|
||||
xdgIcon := _mimeTypeToXDGLinkIcons[info.MimeType]
|
||||
if xdgIcon == "" {
|
||||
xdgIcon = defaultXDGIcon
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err := t.Execute(&buf, struct {
|
||||
URL, Title string
|
||||
URL, Title, XDGIcon string
|
||||
}{
|
||||
info.WebViewLink, info.Name,
|
||||
info.WebViewLink, info.Name, xdgIcon,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "executing template failed")
|
||||
@@ -2449,6 +2460,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Logf(f, "Note that emptying the trash happens in the background and can take some time.")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3052,6 +3064,9 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||
if destLeaf == "" {
|
||||
destLeaf = info.Name
|
||||
}
|
||||
if destDir == "" {
|
||||
destDir = "."
|
||||
}
|
||||
dstFs, err := cache.Get(ctx, destDir)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -3749,7 +3764,7 @@ URL={{ .URL }}{{"\r"}}
|
||||
Encoding=UTF-8
|
||||
Name={{ .Title }}
|
||||
URL={{ .URL }}
|
||||
Icon=text-html
|
||||
Icon={{ .XDGIcon }}
|
||||
Type=Link
|
||||
`
|
||||
htmlTemplate = `<html>
|
||||
|
||||
@@ -536,7 +536,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
if strings.EqualFold(item.Name, leaf) {
|
||||
pathIDOut = item.ID
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
@@ -258,6 +259,7 @@ func (f *Fs) getFtpConnection(ctx context.Context) (c *ftp.ServerConn, err error
|
||||
if f.opt.Concurrency > 0 {
|
||||
f.tokens.Get()
|
||||
}
|
||||
accounting.LimitTPS(ctx)
|
||||
f.poolMu.Lock()
|
||||
if len(f.pool) > 0 {
|
||||
c = f.pool[0]
|
||||
|
||||
@@ -411,7 +411,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
opt.ServiceAccountCredentials = string(loadedCreds)
|
||||
}
|
||||
if opt.Anonymous {
|
||||
oAuthClient = &http.Client{}
|
||||
oAuthClient = fshttp.NewClient(ctx)
|
||||
} else if opt.ServiceAccountCredentials != "" {
|
||||
oAuthClient, err = getServiceAccountClient(ctx, []byte(opt.ServiceAccountCredentials))
|
||||
if err != nil {
|
||||
@@ -564,7 +564,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
// is this a directory marker?
|
||||
if isDirectory && object.Size == 0 {
|
||||
if isDirectory {
|
||||
continue // skip directory marker
|
||||
}
|
||||
err = fn(remote, object, false)
|
||||
|
||||
@@ -132,15 +132,33 @@ you want to read the media.`,
|
||||
Default: 2000,
|
||||
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "include_archived",
|
||||
Default: false,
|
||||
Help: `Also view and download archived media.
|
||||
|
||||
By default rclone does not request archived media. Thus, when syncing,
|
||||
archived media is not visible in directory listings or transferred.
|
||||
|
||||
Note that media in albums is always visible and synced, no matter
|
||||
their archive status.
|
||||
|
||||
With this flag, archived media are always visible in directory
|
||||
listings and transferred.
|
||||
|
||||
Without this flag, archived media will not be visible in directory
|
||||
listings and won't be transferred.`,
|
||||
Advanced: true,
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ReadOnly bool `config:"read_only"`
|
||||
ReadSize bool `config:"read_size"`
|
||||
StartYear int `config:"start_year"`
|
||||
ReadOnly bool `config:"read_only"`
|
||||
ReadSize bool `config:"read_size"`
|
||||
StartYear int `config:"start_year"`
|
||||
IncludeArchived bool `config:"include_archived"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
@@ -206,6 +224,10 @@ func (f *Fs) startYear() int {
|
||||
return f.opt.StartYear
|
||||
}
|
||||
|
||||
func (f *Fs) includeArchived() bool {
|
||||
return f.opt.IncludeArchived
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
@@ -497,6 +519,12 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
|
||||
}
|
||||
filter.PageSize = listChunks
|
||||
filter.PageToken = ""
|
||||
if filter.AlbumID == "" { // album ID and filters cannot be set together, else error 400 INVALID_ARGUMENT
|
||||
if filter.Filters == nil {
|
||||
filter.Filters = &api.Filters{}
|
||||
}
|
||||
filter.Filters.IncludeArchivedMedia = &f.opt.IncludeArchived
|
||||
}
|
||||
lastID := ""
|
||||
for {
|
||||
var result api.MediaItems
|
||||
|
||||
@@ -24,6 +24,7 @@ type lister interface {
|
||||
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
|
||||
dirTime() time.Time
|
||||
startYear() int
|
||||
includeArchived() bool
|
||||
}
|
||||
|
||||
// dirPattern describes a single directory pattern
|
||||
|
||||
@@ -64,6 +64,11 @@ func (f *testLister) startYear() int {
|
||||
return 2000
|
||||
}
|
||||
|
||||
// mock includeArchived for testing
|
||||
func (f *testLister) includeArchived() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func TestPatternMatch(t *testing.T) {
|
||||
for testNumber, test := range []struct {
|
||||
// input
|
||||
|
||||
320
backend/hdfs/fs.go
Normal file
320
backend/hdfs/fs.go
Normal file
@@ -0,0 +1,320 @@
|
||||
// +build !plan9
|
||||
|
||||
package hdfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/user"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/colinmarc/hdfs/v2"
|
||||
krb "github.com/jcmturner/gokrb5/v8/client"
|
||||
"github.com/jcmturner/gokrb5/v8/config"
|
||||
"github.com/jcmturner/gokrb5/v8/credentials"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// Fs represents a HDFS server
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this backend
|
||||
ci *fs.ConfigInfo // global config
|
||||
client *hdfs.Client
|
||||
}
|
||||
|
||||
// copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go
|
||||
func getKerberosClient() (*krb.Client, error) {
|
||||
configPath := os.Getenv("KRB5_CONFIG")
|
||||
if configPath == "" {
|
||||
configPath = "/etc/krb5.conf"
|
||||
}
|
||||
|
||||
cfg, err := config.Load(configPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine the ccache location from the environment, falling back to the
|
||||
// default location.
|
||||
ccachePath := os.Getenv("KRB5CCNAME")
|
||||
if strings.Contains(ccachePath, ":") {
|
||||
if strings.HasPrefix(ccachePath, "FILE:") {
|
||||
ccachePath = strings.SplitN(ccachePath, ":", 2)[1]
|
||||
} else {
|
||||
return nil, fmt.Errorf("unusable ccache: %s", ccachePath)
|
||||
}
|
||||
} else if ccachePath == "" {
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ccachePath = fmt.Sprintf("/tmp/krb5cc_%s", u.Uid)
|
||||
}
|
||||
|
||||
ccache, err := credentials.LoadCCache(ccachePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client, err := krb.NewFromCCache(ccache, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
options := hdfs.ClientOptions{
|
||||
Addresses: []string{opt.Namenode},
|
||||
UseDatanodeHostname: false,
|
||||
}
|
||||
|
||||
if opt.ServicePrincipalName != "" {
|
||||
options.KerberosClient, err = getKerberosClient()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Problem with kerberos authentication: %s", err)
|
||||
}
|
||||
options.KerberosServicePrincipleName = opt.ServicePrincipalName
|
||||
|
||||
if opt.DataTransferProtection != "" {
|
||||
options.DataTransferProtection = opt.DataTransferProtection
|
||||
}
|
||||
} else {
|
||||
options.User = opt.Username
|
||||
}
|
||||
|
||||
client, err := hdfs.NewClient(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: fs.GetConfig(ctx),
|
||||
client: client,
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
info, err := f.client.Stat(f.realpath(""))
|
||||
if err == nil && !info.IsDir() {
|
||||
f.root = path.Dir(f.root)
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Name of this fs
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("hdfs://%s", f.opt.Namenode)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Hashes are not supported
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// NewObject finds file at remote or return fs.ErrorObjectNotFound
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
realpath := f.realpath(remote)
|
||||
fs.Debugf(f, "new [%s]", realpath)
|
||||
|
||||
info, err := f.ensureFile(realpath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: info.Size(),
|
||||
modTime: info.ModTime(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
realpath := f.realpath(dir)
|
||||
fs.Debugf(f, "list [%s]", realpath)
|
||||
|
||||
err = f.ensureDirectory(realpath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
list, err := f.client.ReadDir(realpath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, x := range list {
|
||||
stdName := f.opt.Enc.ToStandardName(x.Name())
|
||||
remote := path.Join(dir, stdName)
|
||||
if x.IsDir() {
|
||||
entries = append(entries, fs.NewDir(remote, x.ModTime()))
|
||||
} else {
|
||||
entries = append(entries, &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: x.Size(),
|
||||
modTime: x.ModTime()})
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Put the object
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
err := o.Update(ctx, in, src, options...)
|
||||
return o, err
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Mkdir makes a directory
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
fs.Debugf(f, "mkdir [%s]", f.realpath(dir))
|
||||
return f.client.MkdirAll(f.realpath(dir), 0755)
|
||||
}
|
||||
|
||||
// Rmdir deletes the directory
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
realpath := f.realpath(dir)
|
||||
fs.Debugf(f, "rmdir [%s]", realpath)
|
||||
|
||||
err := f.ensureDirectory(realpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// do not remove empty directory
|
||||
list, err := f.client.ReadDir(realpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(list) > 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
|
||||
return f.client.Remove(realpath)
|
||||
}
|
||||
|
||||
// Purge deletes all the files in the directory
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
realpath := f.realpath(dir)
|
||||
fs.Debugf(f, "purge [%s]", realpath)
|
||||
|
||||
err := f.ensureDirectory(realpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return f.client.RemoveAll(realpath)
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
info, err := f.client.StatFs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &fs.Usage{
|
||||
Total: fs.NewUsageValue(int64(info.Capacity)),
|
||||
Used: fs.NewUsageValue(int64(info.Used)),
|
||||
Free: fs.NewUsageValue(int64(info.Remaining)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *Fs) ensureDirectory(realpath string) error {
|
||||
info, err := f.client.Stat(realpath)
|
||||
|
||||
if e, ok := err.(*os.PathError); ok && e.Err == os.ErrNotExist {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) ensureFile(realpath string) (os.FileInfo, error) {
|
||||
info, err := f.client.Stat(realpath)
|
||||
|
||||
if e, ok := err.(*os.PathError); ok && e.Err == os.ErrNotExist {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (f *Fs) realpath(dir string) string {
|
||||
return f.opt.Enc.FromStandardPath(xPath(f.Root(), dir))
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
)
|
||||
86
backend/hdfs/hdfs.go
Normal file
86
backend/hdfs/hdfs.go
Normal file
@@ -0,0 +1,86 @@
|
||||
// +build !plan9
|
||||
|
||||
package hdfs
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
)
|
||||
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "hdfs",
|
||||
Description: "Hadoop distributed file system",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "namenode",
|
||||
Help: "hadoop name node and port",
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "namenode:8020",
|
||||
Help: "Connect to host namenode at port 8020",
|
||||
}},
|
||||
}, {
|
||||
Name: "username",
|
||||
Help: "hadoop user name",
|
||||
Required: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "root",
|
||||
Help: "Connect to hdfs as root",
|
||||
}},
|
||||
}, {
|
||||
Name: "service_principal_name",
|
||||
Help: `Kerberos service principal name for the namenode
|
||||
|
||||
Enables KERBEROS authentication. Specifies the Service Principal Name
|
||||
(<SERVICE>/<FQDN>) for the namenode.`,
|
||||
Required: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "hdfs/namenode.hadoop.docker",
|
||||
Help: "Namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.",
|
||||
}},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "data_transfer_protection",
|
||||
Help: `Kerberos data transfer protection: authentication|integrity|privacy
|
||||
|
||||
Specifies whether or not authentication, data signature integrity
|
||||
checks, and wire encryption is required when communicating the the
|
||||
datanodes. Possible values are 'authentication', 'integrity' and
|
||||
'privacy'. Used only with KERBEROS enabled.`,
|
||||
Required: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "privacy",
|
||||
Help: "Ensure authentication, integrity and encryption enabled.",
|
||||
}},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: (encoder.Display | encoder.EncodeInvalidUtf8 | encoder.EncodeColon),
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options for this backend
|
||||
type Options struct {
|
||||
Namenode string `config:"namenode"`
|
||||
Username string `config:"username"`
|
||||
ServicePrincipalName string `config:"service_principal_name"`
|
||||
DataTransferProtection string `config:"data_transfer_protection"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// xPath make correct file path with leading '/'
|
||||
func xPath(root string, tail string) string {
|
||||
if !strings.HasPrefix(root, "/") {
|
||||
root = "/" + root
|
||||
}
|
||||
return path.Join(root, tail)
|
||||
}
|
||||
20
backend/hdfs/hdfs_test.go
Normal file
20
backend/hdfs/hdfs_test.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// Test HDFS filesystem interface
|
||||
|
||||
// +build !plan9
|
||||
|
||||
package hdfs_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/hdfs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestHdfs:",
|
||||
NilObject: (*hdfs.Object)(nil),
|
||||
})
|
||||
}
|
||||
6
backend/hdfs/hdfs_unsupported.go
Normal file
6
backend/hdfs/hdfs_unsupported.go
Normal file
@@ -0,0 +1,6 @@
|
||||
// Build for hdfs for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9
|
||||
|
||||
package hdfs
|
||||
177
backend/hdfs/object.go
Normal file
177
backend/hdfs/object.go
Normal file
@@ -0,0 +1,177 @@
|
||||
// +build !plan9
|
||||
|
||||
package hdfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
// Object describes an HDFS file
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
size int64
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
realpath := o.fs.realpath(o.Remote())
|
||||
err := o.fs.client.Chtimes(realpath, modTime, modTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.modTime = modTime
|
||||
return nil
|
||||
}
|
||||
|
||||
// Storable returns whether this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Remote()
|
||||
}
|
||||
|
||||
// Hash is not supported
|
||||
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
realpath := o.realpath()
|
||||
fs.Debugf(o.fs, "open [%s]", realpath)
|
||||
f, err := o.fs.client.Open(realpath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.Size())
|
||||
}
|
||||
}
|
||||
|
||||
_, err = f.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if limit != -1 {
|
||||
in = readers.NewLimitedReadCloser(f, limit)
|
||||
} else {
|
||||
in = f
|
||||
}
|
||||
|
||||
return in, err
|
||||
}
|
||||
|
||||
// Update object
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
realpath := o.fs.realpath(src.Remote())
|
||||
dirname := path.Dir(realpath)
|
||||
fs.Debugf(o.fs, "update [%s]", realpath)
|
||||
|
||||
err := o.fs.client.MkdirAll(dirname, 755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
info, err := o.fs.client.Stat(realpath)
|
||||
if err == nil {
|
||||
err = o.fs.client.Remove(realpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
out, err := o.fs.client.Create(realpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
rerr := o.fs.client.Remove(realpath)
|
||||
if rerr != nil {
|
||||
fs.Errorf(o.fs, "failed to remove [%v]: %v", realpath, rerr)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
if err != nil {
|
||||
cleanup()
|
||||
return err
|
||||
}
|
||||
|
||||
err = out.Close()
|
||||
if err != nil {
|
||||
cleanup()
|
||||
return err
|
||||
}
|
||||
|
||||
info, err = o.fs.client.Stat(realpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = o.SetModTime(ctx, src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.size = info.Size()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
realpath := o.fs.realpath(o.remote)
|
||||
fs.Debugf(o.fs, "remove [%s]", realpath)
|
||||
return o.fs.client.Remove(realpath)
|
||||
}
|
||||
|
||||
func (o *Object) realpath() string {
|
||||
return o.fs.opt.Enc.FromStandardPath(xPath(o.Fs().Root(), o.remote))
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
@@ -63,6 +63,10 @@ const (
|
||||
v1ClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||
v1EncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||
v1configVersion = 0
|
||||
|
||||
teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
|
||||
teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
|
||||
teliaCloudClientID = "desktop"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -105,11 +109,18 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Use legacy authentication?.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n")
|
||||
if config.Confirm(false) {
|
||||
v1config(ctx, name, m)
|
||||
} else {
|
||||
fmt.Printf("Choose authentication type:\n" +
|
||||
"1: Standard authentication - use this if you're a normal Jottacloud user.\n" +
|
||||
"2: Legacy authentication - this is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n" +
|
||||
"3: Telia Cloud authentication - use this if you are using Telia Cloud.\n")
|
||||
|
||||
switch config.ChooseNumber("Your choice", 1, 3) {
|
||||
case 1:
|
||||
v2config(ctx, name, m)
|
||||
case 2:
|
||||
v1config(ctx, name, m)
|
||||
case 3:
|
||||
teliaCloudConfig(ctx, name, m)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
@@ -228,6 +239,46 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
func teliaCloudConfig(ctx context.Context, name string, m configmap.Mapper) {
|
||||
teliaCloudOauthConfig := &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: teliaCloudAuthURL,
|
||||
TokenURL: teliaCloudTokenURL,
|
||||
},
|
||||
ClientID: teliaCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
|
||||
err := oauthutil.Config(ctx, "jottacloud", name, m, teliaCloudOauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||
if config.Confirm(false) {
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, teliaCloudOauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
}
|
||||
|
||||
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
|
||||
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to setup mountpoint: %s", err)
|
||||
}
|
||||
m.Set(configDevice, device)
|
||||
m.Set(configMountpoint, mountpoint)
|
||||
}
|
||||
|
||||
m.Set("configVersion", strconv.Itoa(configVersion))
|
||||
m.Set(configClientID, teliaCloudClientID)
|
||||
m.Set(configTokenURL, teliaCloudTokenURL)
|
||||
}
|
||||
|
||||
// v1config configure a jottacloud backend using legacy authentication
|
||||
func v1config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
@@ -727,6 +778,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath(ctx, "")
|
||||
if err == fs.ErrorNotAFile {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
})
|
||||
|
||||
@@ -1463,6 +1517,8 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
o.fs.tokenRenewer.Start()
|
||||
defer o.fs.tokenRenewer.Stop()
|
||||
size := src.Size()
|
||||
md5String, err := src.Hash(ctx, hash.MD5)
|
||||
if err != nil || md5String == "" {
|
||||
|
||||
@@ -70,6 +70,20 @@ points, as you explicitly acknowledge that they should be skipped.`,
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "zero_size_links",
|
||||
Help: `Assume the Stat size of links is zero (and read them instead)
|
||||
|
||||
On some virtual filesystems (such ash LucidLink), reading a link size via a Stat call always returns 0.
|
||||
However, on unix it reads as the length of the text in the link. This may cause errors like this when
|
||||
syncing:
|
||||
|
||||
Failed to copy: corrupted on transfer: sizes differ 0 vs 13
|
||||
|
||||
Setting this flag causes rclone to read the link and use that as the size of the link
|
||||
instead of 0 which in most cases fixes the problem.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_unicode_normalization",
|
||||
Help: `Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
@@ -170,6 +184,7 @@ type Options struct {
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
TranslateSymlinks bool `config:"links"`
|
||||
SkipSymlinks bool `config:"skip_links"`
|
||||
ZeroSizeLinks bool `config:"zero_size_links"`
|
||||
NoUTFNorm bool `config:"no_unicode_normalization"`
|
||||
NoCheckUpdated bool `config:"no_check_updated"`
|
||||
NoUNC bool `config:"nounc"`
|
||||
@@ -1232,7 +1247,8 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
||||
o.mode = info.Mode()
|
||||
o.fs.objectMetaMu.Unlock()
|
||||
// On Windows links read as 0 size so set the correct size here
|
||||
if runtime.GOOS == "windows" && o.translatedLink {
|
||||
// Optionally, users can turn this feature on with the zero_size_links flag
|
||||
if (runtime.GOOS == "windows" || o.fs.opt.ZeroSizeLinks) && o.translatedLink {
|
||||
linkdst, err := os.Readlink(o.path)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to read link size: %v", err)
|
||||
|
||||
@@ -253,8 +253,10 @@ type MoveItemRequest struct {
|
||||
//CreateShareLinkRequest is the request to create a sharing link
|
||||
//Always Type:view and Scope:anonymous for public sharing
|
||||
type CreateShareLinkRequest struct {
|
||||
Type string `json:"type"` //Link type in View, Edit or Embed
|
||||
Scope string `json:"scope,omitempty"` //Optional. Scope in anonymous, organization
|
||||
Type string `json:"type"` // Link type in View, Edit or Embed
|
||||
Scope string `json:"scope,omitempty"` // Scope in anonymous, organization
|
||||
Password string `json:"password,omitempty"` // The password of the sharing link that is set by the creator. Optional and OneDrive Personal only.
|
||||
Expiry *time.Time `json:"expirationDateTime,omitempty"` // A String with format of yyyy-MM-ddTHH:mm:ssZ of DateTime indicates the expiration time of the permission.
|
||||
}
|
||||
|
||||
//CreateShareLinkResponse is the response from CreateShareLinkRequest
|
||||
|
||||
@@ -11,7 +11,9 @@ import (
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -45,7 +47,6 @@ const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
graphURL = "https://graph.microsoft.com/v1.0"
|
||||
configDriveID = "drive_id"
|
||||
configDriveType = "drive_type"
|
||||
driveTypePersonal = "personal"
|
||||
@@ -53,22 +54,40 @@ const (
|
||||
driveTypeSharepoint = "documentLibrary"
|
||||
defaultChunkSize = 10 * fs.MebiByte
|
||||
chunkSizeMultiple = 320 * fs.KibiByte
|
||||
|
||||
regionGlobal = "global"
|
||||
regionUS = "us"
|
||||
regionDE = "de"
|
||||
regionCN = "cn"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
authPath = "/common/oauth2/v2.0/authorize"
|
||||
tokenPath = "/common/oauth2/v2.0/token"
|
||||
|
||||
// Description of how to auth for this app for a business account
|
||||
oauthConfig = &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://login.microsoftonline.com/common/oauth2/v2.0/authorize",
|
||||
TokenURL: "https://login.microsoftonline.com/common/oauth2/v2.0/token",
|
||||
},
|
||||
Scopes: []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access", "Sites.Read.All"},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
|
||||
graphAPIEndpoint = map[string]string{
|
||||
"global": "https://graph.microsoft.com",
|
||||
"us": "https://graph.microsoft.us",
|
||||
"de": "https://graph.microsoft.de",
|
||||
"cn": "https://microsoftgraph.chinacloudapi.cn",
|
||||
}
|
||||
|
||||
authEndpoint = map[string]string{
|
||||
"global": "https://login.microsoftonline.com",
|
||||
"us": "https://login.microsoftonline.us",
|
||||
"de": "https://login.microsoftonline.de",
|
||||
"cn": "https://login.chinacloudapi.cn",
|
||||
}
|
||||
|
||||
// QuickXorHashType is the hash.Type for OneDrive
|
||||
QuickXorHashType hash.Type
|
||||
)
|
||||
@@ -81,6 +100,12 @@ func init() {
|
||||
Description: "Microsoft OneDrive",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
region, _ := m.Get("region")
|
||||
graphURL := graphAPIEndpoint[region] + "/v1.0"
|
||||
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||
AuthURL: authEndpoint[region] + authPath,
|
||||
TokenURL: authEndpoint[region] + tokenPath,
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
err := oauthutil.Config(ctx, "onedrive", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
@@ -120,9 +145,18 @@ func init() {
|
||||
var opts rest.Opts
|
||||
var finalDriveID string
|
||||
var siteID string
|
||||
var relativePath string
|
||||
switch config.Choose("Your choice",
|
||||
[]string{"onedrive", "sharepoint", "driveid", "siteid", "search"},
|
||||
[]string{"OneDrive Personal or Business", "Root Sharepoint site", "Type in driveID", "Type in SiteID", "Search a Sharepoint site"},
|
||||
[]string{"onedrive", "sharepoint", "url", "search", "driveid", "siteid", "path"},
|
||||
[]string{
|
||||
"OneDrive Personal or Business",
|
||||
"Root Sharepoint site",
|
||||
"Sharepoint site name or URL (e.g. mysite or https://contoso.sharepoint.com/sites/mysite)",
|
||||
"Search for a Sharepoint site",
|
||||
"Type in driveID (advanced)",
|
||||
"Type in SiteID (advanced)",
|
||||
"Sharepoint server-relative path (advanced, e.g. /teams/hr)",
|
||||
},
|
||||
false) {
|
||||
|
||||
case "onedrive":
|
||||
@@ -143,6 +177,20 @@ func init() {
|
||||
case "siteid":
|
||||
fmt.Printf("Paste your Site ID here> ")
|
||||
siteID = config.ReadLine()
|
||||
case "url":
|
||||
fmt.Println("Example: \"https://contoso.sharepoint.com/sites/mysite\" or \"mysite\"")
|
||||
fmt.Printf("Paste your Site URL here> ")
|
||||
siteURL := config.ReadLine()
|
||||
re := regexp.MustCompile(`https://.*\.sharepoint.com/sites/(.*)`)
|
||||
match := re.FindStringSubmatch(siteURL)
|
||||
if len(match) == 2 {
|
||||
relativePath = "/sites/" + match[1]
|
||||
} else {
|
||||
relativePath = "/sites/" + siteURL
|
||||
}
|
||||
case "path":
|
||||
fmt.Printf("Enter server-relative URL here> ")
|
||||
relativePath = config.ReadLine()
|
||||
case "search":
|
||||
fmt.Printf("What to search for> ")
|
||||
searchTerm := config.ReadLine()
|
||||
@@ -169,6 +217,21 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
// if we use server-relative URL for finding the drive
|
||||
if relativePath != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/sites/root:" + relativePath,
|
||||
}
|
||||
site := siteResource{}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &site)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query available site by relative path: %v", err)
|
||||
}
|
||||
siteID = site.SiteID
|
||||
}
|
||||
|
||||
// if we have a siteID we need to ask for the drives
|
||||
if siteID != "" {
|
||||
opts = rest.Opts{
|
||||
@@ -242,6 +305,25 @@ func init() {
|
||||
config.SaveConfig()
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "region",
|
||||
Help: "Choose national cloud region for OneDrive.",
|
||||
Default: "global",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: regionGlobal,
|
||||
Help: "Microsoft Cloud Global",
|
||||
}, {
|
||||
Value: regionUS,
|
||||
Help: "Microsoft Cloud for US Government",
|
||||
}, {
|
||||
Value: regionDE,
|
||||
Help: "Microsoft Cloud Germany",
|
||||
}, {
|
||||
Value: regionCN,
|
||||
Help: "Azure and Office 365 operated by 21Vianet in China",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
|
||||
|
||||
@@ -295,6 +377,41 @@ modification time and removes all but the last version.
|
||||
|
||||
**NB** Onedrive personal can't currently delete versions so don't use
|
||||
this flag there.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "link_scope",
|
||||
Default: "anonymous",
|
||||
Help: `Set the scope of the links created by the link command.`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "anonymous",
|
||||
Help: "Anyone with the link has access, without needing to sign in. This may include people outside of your organization. Anonymous link support may be disabled by an administrator.",
|
||||
}, {
|
||||
Value: "organization",
|
||||
Help: "Anyone signed into your organization (tenant) can use the link to get access. Only available in OneDrive for Business and SharePoint.",
|
||||
}},
|
||||
}, {
|
||||
Name: "link_type",
|
||||
Default: "view",
|
||||
Help: `Set the type of the links created by the link command.`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "view",
|
||||
Help: "Creates a read-only link to the item.",
|
||||
}, {
|
||||
Value: "edit",
|
||||
Help: "Creates a read-write link to the item.",
|
||||
}, {
|
||||
Value: "embed",
|
||||
Help: "Creates an embeddable link to the item.",
|
||||
}},
|
||||
}, {
|
||||
Name: "link_password",
|
||||
Default: "",
|
||||
Help: `Set the password for links created by the link command.
|
||||
|
||||
At the time of writing this only works with OneDrive personal paid accounts.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -310,8 +427,6 @@ this flag there.
|
||||
// | (vertical line) -> '|' // FULLWIDTH VERTICAL LINE
|
||||
// ? (question mark) -> '?' // FULLWIDTH QUESTION MARK
|
||||
// * (asterisk) -> '*' // FULLWIDTH ASTERISK
|
||||
// # (number sign) -> '#' // FULLWIDTH NUMBER SIGN
|
||||
// % (percent sign) -> '%' // FULLWIDTH PERCENT SIGN
|
||||
//
|
||||
// Folder names cannot begin with a tilde ('~')
|
||||
// List of replaced characters:
|
||||
@@ -336,7 +451,6 @@ this flag there.
|
||||
// https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/addressing-driveitems?view=odsp-graph-online#path-encoding
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeHashPercent |
|
||||
encoder.EncodeLeftSpace |
|
||||
encoder.EncodeLeftTilde |
|
||||
encoder.EncodeRightPeriod |
|
||||
@@ -349,12 +463,16 @@ this flag there.
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Region string `config:"region"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DriveID string `config:"drive_id"`
|
||||
DriveType string `config:"drive_type"`
|
||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
NoVersions bool `config:"no_versions"`
|
||||
LinkScope string `config:"link_scope"`
|
||||
LinkType string `config:"link_type"`
|
||||
LinkPassword string `config:"link_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -475,10 +593,8 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
//
|
||||
// If `relPath` == '', do not append the slash (See #3664)
|
||||
func (f *Fs) readMetaDataForPathRelativeToID(ctx context.Context, normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
|
||||
if relPath != "" {
|
||||
relPath = "/" + withTrailingColon(rest.URLPathEscape(f.opt.Enc.FromStandardPath(relPath)))
|
||||
}
|
||||
opts := newOptsCall(normalizedID, "GET", ":"+relPath)
|
||||
opts, _ := f.newOptsCallWithIDPath(normalizedID, relPath, true, "GET", "")
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
@@ -493,17 +609,8 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||
|
||||
if f.driveType != driveTypePersonal || firstSlashIndex == -1 {
|
||||
var opts rest.Opts
|
||||
if len(path) == 0 {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root",
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root:/" + rest.URLPathEscape(f.opt.Enc.FromStandardPath(path)),
|
||||
}
|
||||
}
|
||||
opts = f.newOptsCallWithPath(ctx, path, "GET", "")
|
||||
opts.Path = strings.TrimSuffix(opts.Path, ":")
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
@@ -614,6 +721,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, errors.New("unable to get drive_id and drive_type - if you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
|
||||
}
|
||||
|
||||
rootURL := graphAPIEndpoint[opt.Region] + "/v1.0" + "/drives/" + opt.DriveID
|
||||
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||
AuthURL: authEndpoint[opt.Region] + authPath,
|
||||
TokenURL: authEndpoint[opt.Region] + tokenPath,
|
||||
}
|
||||
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
@@ -628,7 +741,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
ci: ci,
|
||||
driveID: opt.DriveID,
|
||||
driveType: opt.DriveType,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(graphURL + "/drives/" + opt.DriveID),
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
@@ -749,7 +862,7 @@ func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, e
|
||||
// fs.Debugf(f, "CreateDir(%q, %q)\n", dirID, leaf)
|
||||
var resp *http.Response
|
||||
var info *api.Item
|
||||
opts := newOptsCall(dirID, "POST", "/children")
|
||||
opts := f.newOptsCall(dirID, "POST", "/children")
|
||||
mkdir := api.CreateItemRequest{
|
||||
Name: f.opt.Enc.FromStandardName(leaf),
|
||||
ConflictBehavior: "fail",
|
||||
@@ -781,7 +894,7 @@ type listAllFn func(*api.Item) bool
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
// Top parameter asks for bigger pages of data
|
||||
// https://dev.onedrive.com/odata/optional-query-parameters.htm
|
||||
opts := newOptsCall(dirID, "GET", "/children?$top=1000")
|
||||
opts := f.newOptsCall(dirID, "GET", "/children?$top=1000")
|
||||
OUTER:
|
||||
for {
|
||||
var result api.ListChildrenResponse
|
||||
@@ -920,7 +1033,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
|
||||
// deleteObject removes an object by ID
|
||||
func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
||||
opts := newOptsCall(id, "DELETE", "")
|
||||
opts := f.newOptsCall(id, "DELETE", "")
|
||||
opts.NoResponse = true
|
||||
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
@@ -1063,11 +1176,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// Copy the object
|
||||
opts := newOptsCall(srcObj.id, "POST", "/copy")
|
||||
// The query param is a workaround for OneDrive Business for #4590
|
||||
opts := f.newOptsCall(srcObj.id, "POST", "/copy?@microsoft.graph.conflictBehavior=replace")
|
||||
opts.ExtraHeaders = map[string]string{"Prefer": "respond-async"}
|
||||
opts.NoResponse = true
|
||||
|
||||
id, dstDriveID, _ := parseNormalizedID(directoryID)
|
||||
id, dstDriveID, _ := f.parseNormalizedID(directoryID)
|
||||
|
||||
replacedLeaf := f.opt.Enc.FromStandardName(leaf)
|
||||
copyReq := api.CopyItemRequest{
|
||||
@@ -1144,8 +1258,8 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
id, dstDriveID, _ := parseNormalizedID(directoryID)
|
||||
_, srcObjDriveID, _ := parseNormalizedID(srcObj.id)
|
||||
id, dstDriveID, _ := f.parseNormalizedID(directoryID)
|
||||
_, srcObjDriveID, _ := f.parseNormalizedID(srcObj.id)
|
||||
|
||||
if f.canonicalDriveID(dstDriveID) != srcObj.fs.canonicalDriveID(srcObjDriveID) {
|
||||
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
|
||||
@@ -1155,7 +1269,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// Move the object
|
||||
opts := newOptsCall(srcObj.id, "PATCH", "")
|
||||
opts := f.newOptsCall(srcObj.id, "PATCH", "")
|
||||
|
||||
move := api.MoveItemRequest{
|
||||
Name: f.opt.Enc.FromStandardName(leaf),
|
||||
@@ -1206,8 +1320,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return err
|
||||
}
|
||||
|
||||
parsedDstDirID, dstDriveID, _ := parseNormalizedID(dstDirectoryID)
|
||||
_, srcDriveID, _ := parseNormalizedID(srcID)
|
||||
parsedDstDirID, dstDriveID, _ := f.parseNormalizedID(dstDirectoryID)
|
||||
_, srcDriveID, _ := f.parseNormalizedID(srcID)
|
||||
|
||||
if f.canonicalDriveID(dstDriveID) != srcFs.canonicalDriveID(srcDriveID) {
|
||||
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
|
||||
@@ -1223,7 +1337,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
|
||||
// Do the move
|
||||
opts := newOptsCall(srcID, "PATCH", "")
|
||||
opts := f.newOptsCall(srcID, "PATCH", "")
|
||||
move := api.MoveItemRequest{
|
||||
Name: f.opt.Enc.FromStandardName(dstLeaf),
|
||||
ParentReference: &api.ItemReference{
|
||||
@@ -1299,11 +1413,17 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
opts := newOptsCall(info.GetID(), "POST", "/createLink")
|
||||
opts := f.newOptsCall(info.GetID(), "POST", "/createLink")
|
||||
|
||||
share := api.CreateShareLinkRequest{
|
||||
Type: "view",
|
||||
Scope: "anonymous",
|
||||
Type: f.opt.LinkType,
|
||||
Scope: f.opt.LinkScope,
|
||||
Password: f.opt.LinkPassword,
|
||||
}
|
||||
|
||||
if expire < fs.Duration(time.Hour*24*365*100) {
|
||||
expiry := time.Now().Add(time.Duration(expire))
|
||||
share.Expiry = &expiry
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
@@ -1351,7 +1471,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
|
||||
// Finds and removes any old versions for o
|
||||
func (o *Object) deleteVersions(ctx context.Context) error {
|
||||
opts := newOptsCall(o.id, "GET", "/versions")
|
||||
opts := o.fs.newOptsCall(o.id, "GET", "/versions")
|
||||
var versions api.VersionsResponse
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &versions)
|
||||
@@ -1378,7 +1498,7 @@ func (o *Object) deleteVersion(ctx context.Context, ID string) error {
|
||||
return nil
|
||||
}
|
||||
fs.Infof(o, "removing version %q", ID)
|
||||
opts := newOptsCall(o.id, "DELETE", "/versions/"+ID)
|
||||
opts := o.fs.newOptsCall(o.id, "DELETE", "/versions/"+ID)
|
||||
opts.NoResponse = true
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.Call(ctx, &opts)
|
||||
@@ -1523,21 +1643,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
|
||||
// setModTime sets the modification time of the local fs object
|
||||
func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item, error) {
|
||||
var opts rest.Opts
|
||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(ctx, o.remote, false)
|
||||
trueDirID, drive, rootURL := parseNormalizedID(directoryID)
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "PATCH",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf))),
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "PATCH",
|
||||
Path: "/root:/" + withTrailingColon(rest.URLPathEscape(o.srvPath())),
|
||||
}
|
||||
}
|
||||
opts := o.fs.newOptsCallWithPath(ctx, o.remote, "PATCH", "")
|
||||
update := api.SetFileSystemInfo{
|
||||
FileSystemInfo: api.FileSystemInfoFacet{
|
||||
CreatedDateTime: api.Timestamp(modTime),
|
||||
@@ -1584,7 +1690,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
fs.FixRangeOption(options, o.size)
|
||||
var resp *http.Response
|
||||
opts := newOptsCall(o.id, "GET", "/content")
|
||||
opts := o.fs.newOptsCall(o.id, "GET", "/content")
|
||||
opts.Options = options
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -1604,22 +1710,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// createUploadSession creates an upload session for the object
|
||||
func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (response *api.CreateUploadResponse, err error) {
|
||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(ctx, o.remote, false)
|
||||
id, drive, rootURL := parseNormalizedID(directoryID)
|
||||
var opts rest.Opts
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: rootURL,
|
||||
Path: fmt.Sprintf("/%s/items/%s:/%s:/createUploadSession",
|
||||
drive, id, rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf))),
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/createUploadSession",
|
||||
}
|
||||
}
|
||||
opts := o.fs.newOptsCallWithPath(ctx, o.remote, "POST", "/createUploadSession")
|
||||
createRequest := api.CreateUploadRequest{}
|
||||
createRequest.Item.FileSystemInfo.CreatedDateTime = api.Timestamp(modTime)
|
||||
createRequest.Item.FileSystemInfo.LastModifiedDateTime = api.Timestamp(modTime)
|
||||
@@ -1792,27 +1883,10 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
|
||||
|
||||
fs.Debugf(o, "Starting singlepart upload")
|
||||
var resp *http.Response
|
||||
var opts rest.Opts
|
||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(ctx, o.remote, false)
|
||||
trueDirID, drive, rootURL := parseNormalizedID(directoryID)
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf)) + ":/content",
|
||||
ContentLength: &size,
|
||||
Body: in,
|
||||
Options: options,
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
|
||||
ContentLength: &size,
|
||||
Body: in,
|
||||
Options: options,
|
||||
}
|
||||
}
|
||||
opts := o.fs.newOptsCallWithPath(ctx, o.remote, "PUT", "/content")
|
||||
opts.ContentLength = &size
|
||||
opts.Body = in
|
||||
opts.Options = options
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
@@ -1888,8 +1962,42 @@ func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
func newOptsCall(normalizedID string, method string, route string) (opts rest.Opts) {
|
||||
id, drive, rootURL := parseNormalizedID(normalizedID)
|
||||
/*
|
||||
* URL Build routine area start
|
||||
* 1. In this area, region-related URL rewrites are applied. As the API is blackbox,
|
||||
* we cannot thoroughly test this part. Please be extremely careful while changing them.
|
||||
* 2. If possible, please don't introduce region related code in other region, but patch these helper functions.
|
||||
* 3. To avoid region-related issues, please don't manually build rest.Opts from scratch.
|
||||
* Instead, use these helper function, and customize the URL afterwards if needed.
|
||||
*
|
||||
* currently, the 21ViaNet's API differs in the following places:
|
||||
* - https://{Endpoint}/drives/{driveID}/items/{leaf}:/{route}
|
||||
* - this API doesn't work (gives invalid request)
|
||||
* - can be replaced with the following API:
|
||||
* - https://{Endpoint}/drives/{driveID}/items/children('{leaf}')/{route}
|
||||
* - however, this API does NOT support multi-level leaf like a/b/c
|
||||
* - https://{Endpoint}/drives/{driveID}/items/children('@a1')/{route}?@a1=URLEncode("'{leaf}'")
|
||||
* - this API does support multi-level leaf like a/b/c
|
||||
* - https://{Endpoint}/drives/{driveID}/root/children('@a1')/{route}?@a1=URLEncode({path})
|
||||
* - Same as above
|
||||
*/
|
||||
|
||||
// parseNormalizedID parses a normalized ID (may be in the form `driveID#itemID` or just `itemID`)
|
||||
// and returns itemID, driveID, rootURL.
|
||||
// Such a normalized ID can come from (*Item).GetID()
|
||||
func (f *Fs) parseNormalizedID(ID string) (string, string, string) {
|
||||
rootURL := graphAPIEndpoint[f.opt.Region] + "/v1.0/drives"
|
||||
if strings.Index(ID, "#") >= 0 {
|
||||
s := strings.Split(ID, "#")
|
||||
return s[1], s[0], rootURL
|
||||
}
|
||||
return ID, "", ""
|
||||
}
|
||||
|
||||
// newOptsCall build the rest.Opts structure with *a normalizedID(driveID#fileID, or simply fileID)*
|
||||
// using url template https://{Endpoint}/drives/{driveID}/items/{itemID}/{route}
|
||||
func (f *Fs) newOptsCall(normalizedID string, method string, route string) (opts rest.Opts) {
|
||||
id, drive, rootURL := f.parseNormalizedID(normalizedID)
|
||||
|
||||
if drive != "" {
|
||||
return rest.Opts{
|
||||
@@ -1904,17 +2012,91 @@ func newOptsCall(normalizedID string, method string, route string) (opts rest.Op
|
||||
}
|
||||
}
|
||||
|
||||
// parseNormalizedID parses a normalized ID (may be in the form `driveID#itemID` or just `itemID`)
|
||||
// and returns itemID, driveID, rootURL.
|
||||
// Such a normalized ID can come from (*Item).GetID()
|
||||
func parseNormalizedID(ID string) (string, string, string) {
|
||||
if strings.Index(ID, "#") >= 0 {
|
||||
s := strings.Split(ID, "#")
|
||||
return s[1], s[0], graphURL + "/drives"
|
||||
}
|
||||
return ID, "", ""
|
||||
func escapeSingleQuote(str string) string {
|
||||
return strings.ReplaceAll(str, "'", "''")
|
||||
}
|
||||
|
||||
// newOptsCallWithIDPath build the rest.Opts structure with *a normalizedID (driveID#fileID, or simply fileID) and leaf*
|
||||
// using url template https://{Endpoint}/drives/{driveID}/items/{leaf}:/{route} (for international OneDrive)
|
||||
// or https://{Endpoint}/drives/{driveID}/items/children('{leaf}')/{route}
|
||||
// and https://{Endpoint}/drives/{driveID}/items/children('@a1')/{route}?@a1=URLEncode("'{leaf}'") (for 21ViaNet)
|
||||
// if isPath is false, this function will only work when the leaf is "" or a child name (i.e. it doesn't accept multi-level leaf)
|
||||
// if isPath is true, multi-level leaf like a/b/c can be passed
|
||||
func (f *Fs) newOptsCallWithIDPath(normalizedID string, leaf string, isPath bool, method string, route string) (opts rest.Opts, ok bool) {
|
||||
encoder := f.opt.Enc.FromStandardName
|
||||
if isPath {
|
||||
encoder = f.opt.Enc.FromStandardPath
|
||||
}
|
||||
trueDirID, drive, rootURL := f.parseNormalizedID(normalizedID)
|
||||
if drive == "" {
|
||||
trueDirID = normalizedID
|
||||
}
|
||||
entity := "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(encoder(leaf))) + route
|
||||
if f.opt.Region == regionCN {
|
||||
if isPath {
|
||||
entity = "/items/" + trueDirID + "/children('@a1')" + route + "?@a1=" + url.QueryEscape("'"+encoder(escapeSingleQuote(leaf))+"'")
|
||||
} else {
|
||||
entity = "/items/" + trueDirID + "/children('" + rest.URLPathEscape(encoder(escapeSingleQuote(leaf))) + "')" + route
|
||||
}
|
||||
}
|
||||
if drive == "" {
|
||||
ok = false
|
||||
opts = rest.Opts{
|
||||
Method: method,
|
||||
Path: entity,
|
||||
}
|
||||
return
|
||||
}
|
||||
ok = true
|
||||
opts = rest.Opts{
|
||||
Method: method,
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + entity,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// newOptsCallWithIDPath build the rest.Opts structure with an *absolute path start from root*
|
||||
// using url template https://{Endpoint}/drives/{driveID}/root:/{path}:/{route}
|
||||
// or https://{Endpoint}/drives/{driveID}/root/children('@a1')/{route}?@a1=URLEncode({path})
|
||||
func (f *Fs) newOptsCallWithRootPath(path string, method string, route string) (opts rest.Opts) {
|
||||
path = strings.TrimSuffix(path, "/")
|
||||
newURL := "/root:/" + withTrailingColon(rest.URLPathEscape(f.opt.Enc.FromStandardPath(path))) + route
|
||||
if f.opt.Region == regionCN {
|
||||
newURL = "/root/children('@a1')" + route + "?@a1=" + url.QueryEscape("'"+escapeSingleQuote(f.opt.Enc.FromStandardPath(path))+"'")
|
||||
}
|
||||
return rest.Opts{
|
||||
Method: method,
|
||||
Path: newURL,
|
||||
}
|
||||
}
|
||||
|
||||
// newOptsCallWithPath build the rest.Opt intelligently.
|
||||
// It will first try to resolve the path using dircache, which enables support for "Share with me" files.
|
||||
// If present in cache, then use ID + Path variant, else fallback into RootPath variant
|
||||
func (f *Fs) newOptsCallWithPath(ctx context.Context, path string, method string, route string) (opts rest.Opts) {
|
||||
if path == "" {
|
||||
url := "/root" + route
|
||||
return rest.Opts{
|
||||
Method: method,
|
||||
Path: url,
|
||||
}
|
||||
}
|
||||
|
||||
// find dircache
|
||||
leaf, directoryID, _ := f.dirCache.FindPath(ctx, path, false)
|
||||
// try to use IDPath variant first
|
||||
if opts, ok := f.newOptsCallWithIDPath(directoryID, leaf, false, method, route); ok {
|
||||
return opts
|
||||
}
|
||||
// fallback to use RootPath variant first
|
||||
return f.newOptsCallWithRootPath(path, method, route)
|
||||
}
|
||||
|
||||
/*
|
||||
* URL Build routine area end
|
||||
*/
|
||||
|
||||
// Returns the canonical form of the driveID
|
||||
func (f *Fs) canonicalDriveID(driveID string) (canonicalDriveID string) {
|
||||
if driveID == "" {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
@@ -19,6 +20,20 @@ func TestIntegration(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// TestIntegrationCn runs integration tests against the remote
|
||||
func TestIntegrationCn(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("skipping as -remote is set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestOneDriveCn:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
CeilChunkSize: fstests.NextMultipleOf(chunkSizeMultiple),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
@@ -721,7 +721,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
for _, folder := range folderList.Folders {
|
||||
// fs.Debugf(nil, "Folder: %s (%s)", folder.Name, folder.FolderID)
|
||||
|
||||
if leaf == folder.Name {
|
||||
if strings.EqualFold(leaf, folder.Name) {
|
||||
// found
|
||||
return folder.FolderID, true, nil
|
||||
}
|
||||
|
||||
@@ -345,7 +345,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
if strings.EqualFold(item.Name, leaf) {
|
||||
pathIDOut = item.ID
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -872,11 +872,12 @@ func (f *Fs) cleanUpBucket(ctx context.Context, bucket string) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
maxLimit := int(listLimitSize)
|
||||
// maxLimit := int(listLimitSize)
|
||||
var marker *string
|
||||
for {
|
||||
req := qs.ListMultipartUploadsInput{
|
||||
Limit: &maxLimit,
|
||||
// The default is 200 but this errors if more than 200 is put in so leave at the default
|
||||
// Limit: &maxLimit,
|
||||
KeyMarker: marker,
|
||||
}
|
||||
var resp *qs.ListMultipartUploadsOutput
|
||||
@@ -927,7 +928,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
}
|
||||
for _, entry := range entries {
|
||||
cleanErr := f.cleanUpBucket(ctx, f.opt.Enc.FromStandardName(entry.Remote()))
|
||||
if err != nil {
|
||||
if cleanErr != nil {
|
||||
fs.Errorf(f, "Failed to cleanup bucket: %q", cleanErr)
|
||||
err = cleanErr
|
||||
}
|
||||
|
||||
@@ -1181,6 +1181,43 @@ In Ceph, this can be increased with the "rgw list buckets max chunk" option.
|
||||
|
||||
This can be useful when trying to minimise the number of transactions
|
||||
rclone does if you know the bucket exists already.
|
||||
|
||||
It can also be needed if the user you are using does not have bucket
|
||||
creation permissions. Before v1.52.0 this would have passed silently
|
||||
due to a bug.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_head",
|
||||
Help: `If set, don't HEAD uploaded objects to check integrity
|
||||
|
||||
This can be useful when trying to minimise the number of transactions
|
||||
rclone does.
|
||||
|
||||
Setting it means that if rclone receives a 200 OK message after
|
||||
uploading an object with PUT then it will assume that it got uploaded
|
||||
properly.
|
||||
|
||||
In particular it will assume:
|
||||
|
||||
- the metadata, including modtime, storage class and content type was as uploaded
|
||||
- the size was as uploaded
|
||||
|
||||
It reads the following items from the response for a single part PUT:
|
||||
|
||||
- the MD5SUM
|
||||
- The uploaded date
|
||||
|
||||
For multipart uploads these items aren't read.
|
||||
|
||||
If an source object of unknown length is uploaded then rclone **will** do a
|
||||
HEAD request.
|
||||
|
||||
Setting this flag increases the chance for undetected upload failures,
|
||||
in particular an incorrect size, so it isn't recommended for normal
|
||||
operation. In practice the chance of an undetected upload failure is
|
||||
very small even with this flag.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
@@ -1281,6 +1318,7 @@ type Options struct {
|
||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
NoHead bool `config:"no_head"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
@@ -3230,6 +3268,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
}
|
||||
|
||||
var resp *http.Response // response from PUT
|
||||
if multipart {
|
||||
err = o.uploadMultipart(ctx, &req, size, in)
|
||||
if err != nil {
|
||||
@@ -3270,7 +3309,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
httpReq.ContentLength = size
|
||||
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := o.fs.srv.Do(httpReq)
|
||||
var err error
|
||||
resp, err = o.fs.srv.Do(httpReq)
|
||||
if err != nil {
|
||||
return o.fs.shouldRetry(err)
|
||||
}
|
||||
@@ -3289,6 +3329,26 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
}
|
||||
|
||||
// User requested we don't HEAD the object after uploading it
|
||||
// so make up the object as best we can assuming it got
|
||||
// uploaded properly. If size < 0 then we need to do the HEAD.
|
||||
if o.fs.opt.NoHead && size >= 0 {
|
||||
o.md5 = md5sum
|
||||
o.bytes = size
|
||||
o.lastModified = time.Now()
|
||||
o.meta = req.Metadata
|
||||
o.mimeType = aws.StringValue(req.ContentType)
|
||||
o.storageClass = aws.StringValue(req.StorageClass)
|
||||
// If we have done a single part PUT request then we can read these
|
||||
if resp != nil {
|
||||
if date, err := http.ParseTime(resp.Header.Get("Date")); err == nil {
|
||||
o.lastModified = date
|
||||
}
|
||||
o.setMD5FromEtag(resp.Header.Get("Etag"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read the metadata from the newly created object
|
||||
o.meta = nil // wipe old metadata
|
||||
err = o.readMetaData(ctx)
|
||||
|
||||
@@ -53,6 +53,7 @@ func sign(AccessKey, SecretKey string, req *http.Request) {
|
||||
var md5 string
|
||||
var contentType string
|
||||
var headersToSign []string
|
||||
tmpHeadersToSign := make(map[string][]string)
|
||||
for k, v := range req.Header {
|
||||
k = strings.ToLower(k)
|
||||
switch k {
|
||||
@@ -62,15 +63,24 @@ func sign(AccessKey, SecretKey string, req *http.Request) {
|
||||
contentType = v[0]
|
||||
default:
|
||||
if strings.HasPrefix(k, "x-amz-") {
|
||||
vall := strings.Join(v, ",")
|
||||
headersToSign = append(headersToSign, k+":"+vall)
|
||||
tmpHeadersToSign[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
var keys []string
|
||||
for k := range tmpHeadersToSign {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, key := range keys {
|
||||
vall := strings.Join(tmpHeadersToSign[key], ",")
|
||||
headersToSign = append(headersToSign, key+":"+vall)
|
||||
}
|
||||
// Make headers of interest into canonical string
|
||||
var joinedHeadersToSign string
|
||||
if len(headersToSign) > 0 {
|
||||
sort.StringSlice(headersToSign).Sort()
|
||||
joinedHeadersToSign = strings.Join(headersToSign, "\n") + "\n"
|
||||
}
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pkg/sftp"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
@@ -191,6 +192,20 @@ Home directory can be found in a shared folder called "home"
|
||||
|
||||
The subsystem option is ignored when server_command is defined.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_fstat",
|
||||
Default: false,
|
||||
Help: `If set use fstat instead of stat
|
||||
|
||||
Some servers limit the amount of open files and calling Stat after opening
|
||||
the file will throw an error from the server. Setting this flag will call
|
||||
Fstat instead of Stat which is called on an already open file handle.
|
||||
|
||||
It has been found that this helps with IBM Sterling SFTP servers which have
|
||||
"extractability" level set to 1 which means only 1 file can be opened at
|
||||
any given time.
|
||||
`,
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -218,6 +233,7 @@ type Options struct {
|
||||
SkipLinks bool `config:"skip_links"`
|
||||
Subsystem string `config:"subsystem"`
|
||||
ServerCommand string `config:"server_command"`
|
||||
UseFstat bool `config:"use_fstat"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
@@ -343,12 +359,15 @@ func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.C
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
opts = opts[:len(opts):len(opts)] // make sure we don't overwrite the callers opts
|
||||
opts = append(opts, sftp.UseFstat(f.opt.UseFstat))
|
||||
|
||||
return sftp.NewClientPipe(pr, pw, opts...)
|
||||
}
|
||||
|
||||
// Get an SFTP connection from the pool, or open a new one
|
||||
func (f *Fs) getSftpConnection(ctx context.Context) (c *conn, err error) {
|
||||
accounting.LimitTPS(ctx)
|
||||
f.poolMu.Lock()
|
||||
for len(f.pool) > 0 {
|
||||
c = f.pool[0]
|
||||
|
||||
@@ -531,7 +531,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(ctx, pathID, nil, func(item *api.Collection) bool {
|
||||
if item.Name == leaf {
|
||||
if strings.EqualFold(item.Name, leaf) {
|
||||
pathIDOut = item.Ref
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
@@ -167,6 +168,11 @@ func init() {
|
||||
Help: "Admin",
|
||||
Value: "admin",
|
||||
}},
|
||||
}, {
|
||||
Name: "leave_parts_on_error",
|
||||
Help: `If true avoid calling abort upload on a failure. It should be set to true for resuming uploads across different sessions.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "storage_policy",
|
||||
Help: `The storage policy to use when creating a new container
|
||||
@@ -208,6 +214,7 @@ type Options struct {
|
||||
ApplicationCredentialID string `config:"application_credential_id"`
|
||||
ApplicationCredentialName string `config:"application_credential_name"`
|
||||
ApplicationCredentialSecret string `config:"application_credential_secret"`
|
||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||
StoragePolicy string `config:"storage_policy"`
|
||||
EndpointType string `config:"endpoint_type"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
@@ -1127,44 +1134,35 @@ func min(x, y int64) int64 {
|
||||
return y
|
||||
}
|
||||
|
||||
// removeSegments removes any old segments from o
|
||||
//
|
||||
// if except is passed in then segments with that prefix won't be deleted
|
||||
func (o *Object) removeSegments(except string) error {
|
||||
segmentsContainer, _, err := o.getSegmentsDlo()
|
||||
func (o *Object) getSegmentsLargeObject() (map[string][]string, error) {
|
||||
container, objectName := o.split()
|
||||
segmentContainer, segmentObjects, err := o.fs.c.LargeObjectGetSegments(container, objectName)
|
||||
if err != nil {
|
||||
return err
|
||||
fs.Debugf(o, "Failed to get list segments of object: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
except = path.Join(o.remote, except)
|
||||
// fs.Debugf(o, "segmentsContainer %q prefix %q", segmentsContainer, prefix)
|
||||
err = o.fs.listContainerRoot(segmentsContainer, o.remote, "", false, true, true, func(remote string, object *swift.Object, isDirectory bool) error {
|
||||
if isDirectory {
|
||||
return nil
|
||||
var containerSegments = make(map[string][]string)
|
||||
for _, segment := range segmentObjects {
|
||||
if _, ok := containerSegments[segmentContainer]; !ok {
|
||||
containerSegments[segmentContainer] = make([]string, 0, len(segmentObjects))
|
||||
}
|
||||
if except != "" && strings.HasPrefix(remote, except) {
|
||||
// fs.Debugf(o, "Ignoring current segment file %q in container %q", remote, segmentsContainer)
|
||||
return nil
|
||||
}
|
||||
fs.Debugf(o, "Removing segment file %q in container %q", remote, segmentsContainer)
|
||||
var err error
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectDelete(segmentsContainer, remote)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
segments, _ := containerSegments[segmentContainer]
|
||||
segments = append(segments, segment.Name)
|
||||
containerSegments[segmentContainer] = segments
|
||||
}
|
||||
// remove the segments container if empty, ignore errors
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ContainerDelete(segmentsContainer)
|
||||
if err == swift.ContainerNotFound || err == swift.ContainerNotEmpty {
|
||||
return false, err
|
||||
return containerSegments, nil
|
||||
}
|
||||
|
||||
func (o *Object) removeSegmentsLargeObject(containerSegments map[string][]string) error {
|
||||
if containerSegments == nil || len(containerSegments) <= 0 {
|
||||
return nil
|
||||
}
|
||||
for container, segments := range containerSegments {
|
||||
_, err := o.fs.c.BulkDelete(container, segments)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to delete bulk segments %v", err)
|
||||
return err
|
||||
}
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Debugf(o, "Removed empty container %q", segmentsContainer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1194,7 +1192,7 @@ func urlEncode(str string) string {
|
||||
var buf bytes.Buffer
|
||||
for i := 0; i < len(str); i++ {
|
||||
c := str[i]
|
||||
if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '/' || c == '.' {
|
||||
if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '/' || c == '.' || c == '_' || c == '-' {
|
||||
_ = buf.WriteByte(c)
|
||||
} else {
|
||||
_, _ = buf.WriteString(fmt.Sprintf("%%%02X", c))
|
||||
@@ -1234,10 +1232,20 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
uniquePrefix := fmt.Sprintf("%s/%d", swift.TimeToFloatString(time.Now()), size)
|
||||
segmentsPath := path.Join(containerPath, uniquePrefix)
|
||||
in := bufio.NewReader(in0)
|
||||
segmentInfos := make([]string, 0, ((size / int64(o.fs.opt.ChunkSize)) + 1))
|
||||
segmentInfos := make([]string, 0, (size/int64(o.fs.opt.ChunkSize))+1)
|
||||
defer atexit.OnError(&err, func() {
|
||||
if o.fs.opt.LeavePartsOnError {
|
||||
return
|
||||
}
|
||||
fs.Debugf(o, "Delete segments when err raise %v", err)
|
||||
if segmentInfos == nil || len(segmentInfos) == 0 {
|
||||
return
|
||||
}
|
||||
deleteChunks(o, segmentsContainer, segmentInfos)
|
||||
})()
|
||||
for {
|
||||
// can we read at least one byte?
|
||||
if _, err := in.Peek(1); err != nil {
|
||||
if _, err = in.Peek(1); err != nil {
|
||||
if left > 0 {
|
||||
return "", err // read less than expected
|
||||
}
|
||||
@@ -1262,8 +1270,6 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
if err != nil {
|
||||
deleteChunks(o, segmentsContainer, segmentInfos)
|
||||
segmentInfos = nil
|
||||
return "", err
|
||||
}
|
||||
i++
|
||||
@@ -1277,21 +1283,23 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
rxHeaders, err = o.fs.c.ObjectPut(container, containerPath, emptyReader, true, "", contentType, headers)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
if err != nil {
|
||||
deleteChunks(o, segmentsContainer, segmentInfos)
|
||||
|
||||
if err == nil {
|
||||
//reset data
|
||||
segmentInfos = nil
|
||||
}
|
||||
return uniquePrefix + "/", err
|
||||
}
|
||||
|
||||
func deleteChunks(o *Object, segmentsContainer string, segmentInfos []string) {
|
||||
if segmentInfos != nil && len(segmentInfos) > 0 {
|
||||
for _, v := range segmentInfos {
|
||||
fs.Debugf(o, "Delete segment file %q on %q", v, segmentsContainer)
|
||||
e := o.fs.c.ObjectDelete(segmentsContainer, v)
|
||||
if e != nil {
|
||||
fs.Errorf(o, "Error occurred in delete segment file %q on %q, error: %q", v, segmentsContainer, e)
|
||||
}
|
||||
if segmentInfos == nil || len(segmentInfos) == 0 {
|
||||
return
|
||||
}
|
||||
for _, v := range segmentInfos {
|
||||
fs.Debugf(o, "Delete segment file %q on %q", v, segmentsContainer)
|
||||
e := o.fs.c.ObjectDelete(segmentsContainer, v)
|
||||
if e != nil {
|
||||
fs.Errorf(o, "Error occurred in delete segment file %q on %q, error: %q", v, segmentsContainer, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1312,20 +1320,26 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
// Note whether this is a dynamic large object before starting
|
||||
isDynamicLargeObject, err := o.isDynamicLargeObject()
|
||||
isLargeObject, err := o.isLargeObject()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//capture segments before upload
|
||||
var segmentsContainer map[string][]string
|
||||
if isLargeObject {
|
||||
segmentsContainer, _ = o.getSegmentsLargeObject()
|
||||
}
|
||||
|
||||
// Set the mtime
|
||||
m := swift.Metadata{}
|
||||
m.SetModTime(modTime)
|
||||
contentType := fs.MimeType(ctx, src)
|
||||
headers := m.ObjectHeaders()
|
||||
fs.OpenOptionAddHeaders(options, headers)
|
||||
uniquePrefix := ""
|
||||
|
||||
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
|
||||
uniquePrefix, err = o.updateChunks(in, headers, size, contentType)
|
||||
_, err = o.updateChunks(in, headers, size, contentType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1359,10 +1373,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
o.size = int64(inCount.BytesRead())
|
||||
}
|
||||
}
|
||||
|
||||
// If file was a dynamic large object then remove old/all segments
|
||||
if isDynamicLargeObject {
|
||||
err = o.removeSegments(uniquePrefix)
|
||||
isInContainerVersioning, _ := o.isInContainerVersioning(container)
|
||||
// If file was a large object and the container is not enable versioning then remove old/all segments
|
||||
if isLargeObject && len(segmentsContainer) > 0 && !isInContainerVersioning {
|
||||
err := o.removeSegmentsLargeObject(segmentsContainer)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to remove old segments - carrying on with upload: %v", err)
|
||||
}
|
||||
@@ -1389,15 +1403,10 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
isStaticLargeObject, err := o.isStaticLargeObject()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var segmentContainer string
|
||||
var segmentObjects []swift.Object
|
||||
|
||||
if isStaticLargeObject {
|
||||
segmentContainer, segmentObjects, err = o.fs.c.LargeObjectGetSegments(container, containerPath)
|
||||
//capture segments object if this object is large object
|
||||
var containerSegments map[string][]string
|
||||
if isLargeObject {
|
||||
containerSegments, err = o.getSegmentsLargeObject()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1415,31 +1424,9 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
isDynamicLargeObject, err := o.isDynamicLargeObject()
|
||||
if err != nil {
|
||||
return err
|
||||
if isLargeObject {
|
||||
return o.removeSegmentsLargeObject(containerSegments)
|
||||
}
|
||||
// ...then segments if required
|
||||
//delete segment for dynamic large object
|
||||
if isDynamicLargeObject {
|
||||
return o.removeSegments("")
|
||||
}
|
||||
|
||||
//delete segment for static large object
|
||||
if isStaticLargeObject && len(segmentContainer) > 0 && segmentObjects != nil && len(segmentObjects) > 0 {
|
||||
var segmentNames []string
|
||||
for _, segmentObject := range segmentObjects {
|
||||
if len(segmentObject.Name) == 0 {
|
||||
continue
|
||||
}
|
||||
segmentNames = append(segmentNames, segmentObject.Name)
|
||||
}
|
||||
_, err := o.fs.c.BulkDelete(segmentContainer, segmentNames)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -4,15 +4,19 @@ package swift
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/swift"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -74,6 +78,80 @@ func (f *Fs) testNoChunk(t *testing.T) {
|
||||
// Additional tests that aren't in the framework
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("NoChunk", f.testNoChunk)
|
||||
t.Run("WithChunk", f.testWithChunk)
|
||||
t.Run("WithChunkFail", f.testWithChunkFail)
|
||||
}
|
||||
|
||||
func (f *Fs) testWithChunk(t *testing.T) {
|
||||
preConfChunkSize := f.opt.ChunkSize
|
||||
preConfChunk := f.opt.NoChunk
|
||||
f.opt.NoChunk = false
|
||||
f.opt.ChunkSize = 1024 * fs.Byte
|
||||
defer func() {
|
||||
//restore old config after test
|
||||
f.opt.ChunkSize = preConfChunkSize
|
||||
f.opt.NoChunk = preConfChunk
|
||||
}()
|
||||
|
||||
file := fstest.Item{
|
||||
ModTime: fstest.Time("2020-12-31T04:05:06.499999999Z"),
|
||||
Path: "piped data chunk.txt",
|
||||
Size: -1, // use unknown size during upload
|
||||
}
|
||||
const contentSize = 2048
|
||||
contents := random.String(contentSize)
|
||||
buf := bytes.NewBufferString(contents)
|
||||
uploadHash := hash.NewMultiHasher()
|
||||
in := io.TeeReader(buf, uploadHash)
|
||||
|
||||
file.Size = -1
|
||||
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
|
||||
ctx := context.TODO()
|
||||
obj, err := f.Features().PutStream(ctx, in, obji)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, obj)
|
||||
}
|
||||
|
||||
func (f *Fs) testWithChunkFail(t *testing.T) {
|
||||
preConfChunkSize := f.opt.ChunkSize
|
||||
preConfChunk := f.opt.NoChunk
|
||||
f.opt.NoChunk = false
|
||||
f.opt.ChunkSize = 1024 * fs.Byte
|
||||
segmentContainer := f.root + "_segments"
|
||||
defer func() {
|
||||
//restore config
|
||||
f.opt.ChunkSize = preConfChunkSize
|
||||
f.opt.NoChunk = preConfChunk
|
||||
}()
|
||||
path := "piped data chunk with error.txt"
|
||||
file := fstest.Item{
|
||||
ModTime: fstest.Time("2021-01-04T03:46:00.499999999Z"),
|
||||
Path: path,
|
||||
Size: -1, // use unknown size during upload
|
||||
}
|
||||
const contentSize = 4096
|
||||
const errPosition = 3072
|
||||
contents := random.String(contentSize)
|
||||
buf := bytes.NewBufferString(contents[:errPosition])
|
||||
errMessage := "potato"
|
||||
er := &readers.ErrorReader{Err: errors.New(errMessage)}
|
||||
in := ioutil.NopCloser(io.MultiReader(buf, er))
|
||||
|
||||
file.Size = contentSize
|
||||
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
|
||||
ctx := context.TODO()
|
||||
_, err := f.Features().PutStream(ctx, in, obji)
|
||||
// error is potato
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, errMessage, err.Error())
|
||||
_, _, err = f.c.Object(f.rootContainer, path)
|
||||
assert.Equal(t, swift.ObjectNotFound, err)
|
||||
prefix := path
|
||||
objs, err := f.c.Objects(segmentContainer, &swift.ObjectsOpts{
|
||||
Prefix: prefix,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, objs)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -36,6 +36,7 @@ var (
|
||||
cgo = flag.Bool("cgo", false, "Use cgo for the build")
|
||||
noClean = flag.Bool("no-clean", false, "Don't clean the build directory before running.")
|
||||
tags = flag.String("tags", "", "Space separated list of build tags")
|
||||
buildmode = flag.String("buildmode", "", "Passed to go build -buildmode flag")
|
||||
compileOnly = flag.Bool("compile-only", false, "Just build the binary, not the zip.")
|
||||
)
|
||||
|
||||
@@ -300,8 +301,15 @@ func compileArch(version, goos, goarch, dir string) bool {
|
||||
"-trimpath",
|
||||
"-o", output,
|
||||
"-tags", *tags,
|
||||
"..",
|
||||
}
|
||||
if *buildmode != "" {
|
||||
args = append(args,
|
||||
"-buildmode", *buildmode,
|
||||
)
|
||||
}
|
||||
args = append(args,
|
||||
"..",
|
||||
)
|
||||
env := []string{
|
||||
"GOOS=" + goos,
|
||||
"GOARCH=" + stripVersion(goarch),
|
||||
|
||||
@@ -42,6 +42,7 @@ docs = [
|
||||
"googlecloudstorage.md",
|
||||
"drive.md",
|
||||
"googlephotos.md",
|
||||
"hdfs.md",
|
||||
"http.md",
|
||||
"hubic.md",
|
||||
"jottacloud.md",
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
@@ -35,7 +36,8 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "cat remote:path",
|
||||
Short: `Concatenates any files and sends them to stdout.`,
|
||||
Long: `
|
||||
// Warning! "|" will be replaced by backticks below
|
||||
Long: strings.ReplaceAll(`
|
||||
rclone cat sends any files to standard output.
|
||||
|
||||
You can use it like this to output a single file
|
||||
@@ -50,11 +52,11 @@ Or like this to output any .txt files in dir or its subdirectories.
|
||||
|
||||
rclone --include "*.txt" cat remote:path/to/dir
|
||||
|
||||
Use the --head flag to print characters only at the start, --tail for
|
||||
the end and --offset and --count to print a section in the middle.
|
||||
Use the |--head| flag to print characters only at the start, |--tail| for
|
||||
the end and |--offset| and |--count| to print a section in the middle.
|
||||
Note that if offset is negative it will count from the end, so
|
||||
--offset -1 --count 1 is equivalent to --tail 1.
|
||||
`,
|
||||
|--offset -1 --count 1| is equivalent to |--tail 1|.
|
||||
`, "|", "`"),
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
usedOffset := offset != 0 || count >= 0
|
||||
usedHead := head > 0
|
||||
|
||||
@@ -45,7 +45,8 @@ func AddFlags(cmdFlags *pflag.FlagSet) {
|
||||
}
|
||||
|
||||
// FlagsHelp describes the flags for the help
|
||||
var FlagsHelp = strings.Replace(`
|
||||
// Warning! "|" will be replaced by backticks below
|
||||
var FlagsHelp = strings.ReplaceAll(`
|
||||
If you supply the |--one-way| flag, it will only check that files in
|
||||
the source match the files in the destination, not the other way
|
||||
around. This means that extra files in the destination that are not in
|
||||
@@ -66,7 +67,7 @@ you what happened to it. These are reminiscent of diff files.
|
||||
- |+ path| means path was missing on the destination, so only in the source
|
||||
- |* path| means path was present in source and destination but different.
|
||||
- |! path| means there was an error reading or hashing the source or dest.
|
||||
`, "|", "`", -1)
|
||||
`, "|", "`")
|
||||
|
||||
// GetCheckOpt gets the options corresponding to the check flags
|
||||
func GetCheckOpt(fsrc, fdst fs.Fs) (opt *operations.CheckOpt, close func(), err error) {
|
||||
@@ -130,19 +131,19 @@ func GetCheckOpt(fsrc, fdst fs.Fs) (opt *operations.CheckOpt, close func(), err
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "check source:path dest:path",
|
||||
Short: `Checks the files in the source and destination match.`,
|
||||
Long: `
|
||||
Long: strings.ReplaceAll(`
|
||||
Checks the files in the source and destination match. It compares
|
||||
sizes and hashes (MD5 or SHA1) and logs a report of files which don't
|
||||
match. It doesn't alter the source or destination.
|
||||
|
||||
If you supply the --size-only flag, it will only compare the sizes not
|
||||
If you supply the |--size-only| flag, it will only compare the sizes not
|
||||
the hashes as well. Use this for a quick check.
|
||||
|
||||
If you supply the --download flag, it will download the data from
|
||||
If you supply the |--download| flag, it will download the data from
|
||||
both remotes and check them against each other on the fly. This can
|
||||
be useful for remotes that don't support hashes or if you really want
|
||||
to check all the data.
|
||||
` + FlagsHelp,
|
||||
`, "|", "`") + FlagsHelp,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, fdst := cmd.NewFsSrcDst(args)
|
||||
|
||||
@@ -382,6 +382,11 @@ func initConfig() {
|
||||
// Finish parsing any command line flags
|
||||
configflags.SetFlags(ci)
|
||||
|
||||
// Hide console window
|
||||
if ci.NoConsole {
|
||||
terminal.HideConsole()
|
||||
}
|
||||
|
||||
// Load filters
|
||||
err := filterflags.Reload(ctx)
|
||||
if err != nil {
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/billziss-gh/cgofuse/fuse"
|
||||
@@ -23,11 +24,12 @@ const fhUnset = ^uint64(0)
|
||||
|
||||
// FS represents the top level filing system
|
||||
type FS struct {
|
||||
VFS *vfs.VFS
|
||||
f fs.Fs
|
||||
ready chan (struct{})
|
||||
mu sync.Mutex // to protect the below
|
||||
handles []vfs.Handle
|
||||
VFS *vfs.VFS
|
||||
f fs.Fs
|
||||
ready chan (struct{})
|
||||
mu sync.Mutex // to protect the below
|
||||
handles []vfs.Handle
|
||||
destroyed int32 // read/write with sync/atomic
|
||||
}
|
||||
|
||||
// NewFS makes a new FS
|
||||
@@ -187,6 +189,7 @@ func (fsys *FS) Init() {
|
||||
// Destroy call).
|
||||
func (fsys *FS) Destroy() {
|
||||
defer log.Trace(fsys.f, "")("")
|
||||
atomic.StoreInt32(&fsys.destroyed, 1)
|
||||
}
|
||||
|
||||
// Getattr reads the attributes for path
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/billziss-gh/cgofuse/fuse"
|
||||
@@ -168,7 +169,10 @@ func mount(VFS *vfs.VFS, mountPath string, opt *mountlib.Options) (<-chan error,
|
||||
// Shutdown the VFS
|
||||
fsys.VFS.Shutdown()
|
||||
var umountOK bool
|
||||
if atexit.Signalled() {
|
||||
if atomic.LoadInt32(&fsys.destroyed) != 0 {
|
||||
fs.Debugf(nil, "Not calling host.Unmount as mount already Destroyed")
|
||||
umountOK = true
|
||||
} else if atexit.Signalled() {
|
||||
// If we have received a signal then FUSE will be shutting down already
|
||||
fs.Debugf(nil, "Not calling host.Unmount as signal received")
|
||||
umountOK = true
|
||||
|
||||
@@ -2,6 +2,7 @@ package copy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
@@ -23,7 +24,8 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "copy source:path dest:path",
|
||||
Short: `Copy files from source to dest, skipping already copied.`,
|
||||
Long: `
|
||||
// Note: "|" will be replaced by backticks below
|
||||
Long: strings.ReplaceAll(`
|
||||
Copy the source to the destination. Doesn't transfer
|
||||
unchanged files, testing by size and modification time or
|
||||
MD5SUM. Doesn't delete files from the destination.
|
||||
@@ -55,8 +57,8 @@ Not to
|
||||
destpath/sourcepath/one.txt
|
||||
destpath/sourcepath/two.txt
|
||||
|
||||
If you are familiar with ` + "`rsync`" + `, rclone always works as if you had
|
||||
written a trailing / - meaning "copy the contents of this directory".
|
||||
If you are familiar with |rsync|, rclone always works as if you had
|
||||
written a trailing |/| - meaning "copy the contents of this directory".
|
||||
This applies to all commands and whether you are talking about the
|
||||
source or destination.
|
||||
|
||||
@@ -71,10 +73,10 @@ recently very efficiently like this:
|
||||
|
||||
rclone copy --max-age 24h --no-traverse /path/to/src remote:
|
||||
|
||||
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics.
|
||||
**Note**: Use the |-P|/|--progress| flag to view real-time transfer statistics.
|
||||
|
||||
**Note**: Use the ` + "`--dry-run` or the `--interactive`/`-i`" + ` flag to test without copying anything.
|
||||
`,
|
||||
**Note**: Use the |--dry-run| or the |--interactive|/|-i| flag to test without copying anything.
|
||||
`, "|", "`"),
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args)
|
||||
|
||||
@@ -36,6 +36,9 @@ use it like this
|
||||
rclone cryptdecode encryptedremote: encryptedfilename1 encryptedfilename2
|
||||
|
||||
rclone cryptdecode --reverse encryptedremote: filename1 filename2
|
||||
|
||||
Another way to accomplish this is by using the ` + "`rclone backend encode` (or `decode`)" + `command.
|
||||
See the documentation on the ` + "`crypt`" + ` overlay for more info.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 11, command, args)
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"log"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -143,6 +144,9 @@ Or
|
||||
args = args[1:]
|
||||
}
|
||||
fdst := cmd.NewFsSrc(args)
|
||||
if !byHash && !fdst.Features().DuplicateFiles {
|
||||
fs.Logf(fdst, "Can't have duplicate names here. Perhaps you wanted --by-hash ? Continuing anyway.")
|
||||
}
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return operations.Deduplicate(context.Background(), fdst, dedupeMode, byHash)
|
||||
})
|
||||
|
||||
@@ -2,6 +2,7 @@ package delete
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
@@ -22,16 +23,17 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "delete remote:path",
|
||||
Short: `Remove the files in path.`,
|
||||
Long: `
|
||||
Remove the files in path. Unlike ` + "`" + `purge` + "`" + ` it obeys include/exclude
|
||||
// Warning! "|" will be replaced by backticks below
|
||||
Long: strings.ReplaceAll(`
|
||||
Remove the files in path. Unlike |purge| it obeys include/exclude
|
||||
filters so can be used to selectively delete files.
|
||||
|
||||
` + "`rclone delete`" + ` only deletes files but leaves the directory structure
|
||||
|rclone delete| only deletes files but leaves the directory structure
|
||||
alone. If you want to delete a directory and all of its contents use
|
||||
the ` + "`purge`" + ` command.
|
||||
the |purge| command.
|
||||
|
||||
If you supply the --rmdirs flag, it will remove all empty directories along with it.
|
||||
You can also use the separate command ` + "`rmdir`" + ` or ` + "`rmdirs`" + ` to
|
||||
If you supply the |--rmdirs| flag, it will remove all empty directories along with it.
|
||||
You can also use the separate command |rmdir| or |rmdirs| to
|
||||
delete empty directories only.
|
||||
|
||||
For example, to delete all files bigger than 100MBytes, you may first want to check what
|
||||
@@ -48,8 +50,8 @@ That reads "delete everything with a minimum size of 100 MB", hence
|
||||
delete all files bigger than 100MBytes.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
` + "`--dry-run` or the `--interactive`/`-i`" + ` flag.
|
||||
`,
|
||||
|--dry-run| or the |--interactive|/|-i| flag.
|
||||
`, "|", "`"),
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
|
||||
@@ -1,26 +1,31 @@
|
||||
package lshelp
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Help describes the common help for all the list commands
|
||||
var Help = `
|
||||
// Warning! "|" will be replaced by backticks below
|
||||
var Help = strings.ReplaceAll(`
|
||||
Any of the filtering options can be applied to this command.
|
||||
|
||||
There are several related list commands
|
||||
|
||||
* ` + "`ls`" + ` to list size and path of objects only
|
||||
* ` + "`lsl`" + ` to list modification time, size and path of objects only
|
||||
* ` + "`lsd`" + ` to list directories only
|
||||
* ` + "`lsf`" + ` to list objects and directories in easy to parse format
|
||||
* ` + "`lsjson`" + ` to list objects and directories in JSON format
|
||||
* |ls| to list size and path of objects only
|
||||
* |lsl| to list modification time, size and path of objects only
|
||||
* |lsd| to list directories only
|
||||
* |lsf| to list objects and directories in easy to parse format
|
||||
* |lsjson| to list objects and directories in JSON format
|
||||
|
||||
` + "`ls`,`lsl`,`lsd`" + ` are designed to be human readable.
|
||||
` + "`lsf`" + ` is designed to be human and machine readable.
|
||||
` + "`lsjson`" + ` is designed to be machine readable.
|
||||
|ls|,|lsl|,|lsd| are designed to be human readable.
|
||||
|lsf| is designed to be human and machine readable.
|
||||
|lsjson| is designed to be machine readable.
|
||||
|
||||
Note that ` + "`ls` and `lsl`" + ` recurse by default - use "--max-depth 1" to stop the recursion.
|
||||
Note that |ls| and |lsl| recurse by default - use |--max-depth 1| to stop the recursion.
|
||||
|
||||
The other list commands ` + "`lsd`,`lsf`,`lsjson`" + ` do not recurse by default - use "-R" to make them recurse.
|
||||
The other list commands |lsd|,|lsf|,|lsjson| do not recurse by default - use |-R| to make them recurse.
|
||||
|
||||
Listing a non existent directory will produce an error except for
|
||||
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
||||
the bucket based remotes).
|
||||
`
|
||||
`, "|", "`")
|
||||
|
||||
@@ -159,34 +159,36 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
|
||||
Use: commandName + " remote:path /path/to/mountpoint",
|
||||
Hidden: hidden,
|
||||
Short: `Mount the remote as file system on a mountpoint.`,
|
||||
Long: `
|
||||
rclone ` + commandName + ` allows Linux, FreeBSD, macOS and Windows to
|
||||
// Warning! "|" will be replaced by backticks below
|
||||
// "@" will be replaced by the command name
|
||||
Long: strings.ReplaceAll(strings.ReplaceAll(`
|
||||
rclone @ allows Linux, FreeBSD, macOS and Windows to
|
||||
mount any of Rclone's cloud storage systems as a file system with
|
||||
FUSE.
|
||||
|
||||
First set up your remote using ` + "`rclone config`" + `. Check it works with ` + "`rclone ls`" + ` etc.
|
||||
First set up your remote using |rclone config|. Check it works with |rclone ls| etc.
|
||||
|
||||
On Linux and OSX, you can either run mount in foreground mode or background (daemon) mode.
|
||||
Mount runs in foreground mode by default, use the ` + "`--daemon`" + ` flag to specify background mode.
|
||||
Mount runs in foreground mode by default, use the |--daemon| flag to specify background mode.
|
||||
You can only run mount in foreground mode on Windows.
|
||||
|
||||
On Linux/macOS/FreeBSD start the mount like this, where ` + "`/path/to/local/mount`" + `
|
||||
On Linux/macOS/FreeBSD start the mount like this, where |/path/to/local/mount|
|
||||
is an **empty** **existing** directory:
|
||||
|
||||
rclone ` + commandName + ` remote:path/to/files /path/to/local/mount
|
||||
rclone @ remote:path/to/files /path/to/local/mount
|
||||
|
||||
On Windows you can start a mount in different ways. See [below](#mounting-modes-on-windows)
|
||||
for details. The following examples will mount to an automatically assigned drive,
|
||||
to specific drive letter ` + "`X:`" + `, to path ` + "`C:\\path\\to\\nonexistent\\directory`" + `
|
||||
to specific drive letter |X:|, to path |C:\path\to\nonexistent\directory|
|
||||
(which must be **non-existent** subdirectory of an **existing** parent directory or drive,
|
||||
and is not supported when [mounting as a network drive](#mounting-modes-on-windows)), and
|
||||
the last example will mount as network share ` + "`\\cloud\remote`" + ` and map it to an
|
||||
the last example will mount as network share |\\cloud\remote| and map it to an
|
||||
automatically assigned drive:
|
||||
|
||||
rclone ` + commandName + ` remote:path/to/files *
|
||||
rclone ` + commandName + ` remote:path/to/files X:
|
||||
rclone ` + commandName + ` remote:path/to/files C:\path\to\nonexistent\directory
|
||||
rclone ` + commandName + ` remote:path/to/files \\cloud\remote
|
||||
rclone @ remote:path/to/files *
|
||||
rclone @ remote:path/to/files X:
|
||||
rclone @ remote:path/to/files C:\path\to\nonexistent\directory
|
||||
rclone @ remote:path/to/files \\cloud\remote
|
||||
|
||||
When the program ends while in foreground mode, either via Ctrl+C or receiving
|
||||
a SIGINT or SIGTERM signal, the mount should be automatically stopped.
|
||||
@@ -208,12 +210,12 @@ then an additional 1PB of free space is assumed. If the remote does not
|
||||
[support](https://rclone.org/overview/#optional-features) the about feature
|
||||
at all, then 1PB is set as both the total and the free size.
|
||||
|
||||
**Note**: As of ` + "`rclone` 1.52.2, `rclone mount`" + ` now requires Go version 1.13
|
||||
**Note**: As of |rclone| 1.52.2, |rclone mount| now requires Go version 1.13
|
||||
or newer on some platforms depending on the underlying FUSE library in use.
|
||||
|
||||
### Installing on Windows
|
||||
|
||||
To run rclone ` + commandName + ` on Windows, you will need to
|
||||
To run rclone @ on Windows, you will need to
|
||||
download and install [WinFsp](http://www.secfs.net/winfsp/).
|
||||
|
||||
[WinFsp](https://github.com/billziss-gh/winfsp) is an open source
|
||||
@@ -221,7 +223,7 @@ Windows File System Proxy which makes it easy to write user space file
|
||||
systems for Windows. It provides a FUSE emulation layer which rclone
|
||||
uses combination with [cgofuse](https://github.com/billziss-gh/cgofuse).
|
||||
Both of these packages are by Bill Zissimopoulos who was very helpful
|
||||
during the implementation of rclone ` + commandName + ` for Windows.
|
||||
during the implementation of rclone @ for Windows.
|
||||
|
||||
#### Mounting modes on windows
|
||||
|
||||
@@ -240,54 +242,54 @@ as a network drive instead.
|
||||
|
||||
When mounting as a fixed disk drive you can either mount to an unused drive letter,
|
||||
or to a path - which must be **non-existent** subdirectory of an **existing** parent
|
||||
directory or drive. Using the special value ` + "`*`" + ` will tell rclone to
|
||||
directory or drive. Using the special value |*| will tell rclone to
|
||||
automatically assign the next available drive letter, starting with Z: and moving backward.
|
||||
Examples:
|
||||
|
||||
rclone ` + commandName + ` remote:path/to/files *
|
||||
rclone ` + commandName + ` remote:path/to/files X:
|
||||
rclone ` + commandName + ` remote:path/to/files C:\path\to\nonexistent\directory
|
||||
rclone ` + commandName + ` remote:path/to/files X:
|
||||
rclone @ remote:path/to/files *
|
||||
rclone @ remote:path/to/files X:
|
||||
rclone @ remote:path/to/files C:\path\to\nonexistent\directory
|
||||
rclone @ remote:path/to/files X:
|
||||
|
||||
Option ` + "`--volname`" + ` can be used to set a custom volume name for the mounted
|
||||
Option |--volname| can be used to set a custom volume name for the mounted
|
||||
file system. The default is to use the remote name and path.
|
||||
|
||||
To mount as network drive, you can add option ` + "`--network-mode`" + `
|
||||
to your ` + commandName + ` command. Mounting to a directory path is not supported in
|
||||
To mount as network drive, you can add option |--network-mode|
|
||||
to your @ command. Mounting to a directory path is not supported in
|
||||
this mode, it is a limitation Windows imposes on junctions, so the remote must always
|
||||
be mounted to a drive letter.
|
||||
|
||||
rclone ` + commandName + ` remote:path/to/files X: --network-mode
|
||||
rclone @ remote:path/to/files X: --network-mode
|
||||
|
||||
A volume name specified with ` + "`--volname`" + ` will be used to create the network share path.
|
||||
A complete UNC path, such as ` + "`\\\\cloud\\remote`" + `, optionally with path
|
||||
` + "`\\\\cloud\\remote\\madeup\\path`" + `, will be used as is. Any other
|
||||
string will be used as the share part, after a default prefix ` + "`\\\\server\\`" + `.
|
||||
If no volume name is specified then ` + "`\\\\server\\share`" + ` will be used.
|
||||
A volume name specified with |--volname| will be used to create the network share path.
|
||||
A complete UNC path, such as |\\cloud\remote|, optionally with path
|
||||
|\\cloud\remote\madeup\path|, will be used as is. Any other
|
||||
string will be used as the share part, after a default prefix |\\server\|.
|
||||
If no volume name is specified then |\\server\share| will be used.
|
||||
You must make sure the volume name is unique when you are mounting more than one drive,
|
||||
or else the mount command will fail. The share name will treated as the volume label for
|
||||
the mapped drive, shown in Windows Explorer etc, while the complete
|
||||
` + "`\\\\server\\share`" + ` will be reported as the remote UNC path by
|
||||
` + "`net use`" + ` etc, just like a normal network drive mapping.
|
||||
|\\server\share| will be reported as the remote UNC path by
|
||||
|net use| etc, just like a normal network drive mapping.
|
||||
|
||||
If you specify a full network share UNC path with ` + "`--volname`" + `, this will implicitely
|
||||
set the ` + "`--network-mode`" + ` option, so the following two examples have same result:
|
||||
If you specify a full network share UNC path with |--volname|, this will implicitely
|
||||
set the |--network-mode| option, so the following two examples have same result:
|
||||
|
||||
rclone ` + commandName + ` remote:path/to/files X: --network-mode
|
||||
rclone ` + commandName + ` remote:path/to/files X: --volname \\server\share
|
||||
rclone @ remote:path/to/files X: --network-mode
|
||||
rclone @ remote:path/to/files X: --volname \\server\share
|
||||
|
||||
You may also specify the network share UNC path as the mountpoint itself. Then rclone
|
||||
will automatically assign a drive letter, same as with ` + "`*`" + ` and use that as
|
||||
will automatically assign a drive letter, same as with |*| and use that as
|
||||
mountpoint, and instead use the UNC path specified as the volume name, as if it were
|
||||
specified with the ` + "`--volname`" + ` option. This will also implicitely set
|
||||
the ` + "`--network-mode`" + ` option. This means the following two examples have same result:
|
||||
specified with the |--volname| option. This will also implicitely set
|
||||
the |--network-mode| option. This means the following two examples have same result:
|
||||
|
||||
rclone ` + commandName + ` remote:path/to/files \\cloud\remote
|
||||
rclone ` + commandName + ` remote:path/to/files * --volname \\cloud\remote
|
||||
rclone @ remote:path/to/files \\cloud\remote
|
||||
rclone @ remote:path/to/files * --volname \\cloud\remote
|
||||
|
||||
There is yet another way to enable network mode, and to set the share path,
|
||||
and that is to pass the "native" libfuse/WinFsp option directly:
|
||||
` + "`--fuse-flag --VolumePrefix=\\server\\share`" + `. Note that the path
|
||||
|--fuse-flag --VolumePrefix=\server\share|. Note that the path
|
||||
must be with just a single backslash prefix in this case.
|
||||
|
||||
|
||||
@@ -308,12 +310,12 @@ representing permissions for the POSIX permission scopes: Owner, group and other
|
||||
By default, the owner and group will be taken from the current user, and the built-in
|
||||
group "Everyone" will be used to represent others. The user/group can be customized
|
||||
with FUSE options "UserName" and "GroupName",
|
||||
e.g. ` + "`-o UserName=user123 -o GroupName=\"Authenticated Users\"`" + `.
|
||||
e.g. |-o UserName=user123 -o GroupName="Authenticated Users"|.
|
||||
|
||||
The permissions on each entry will be set according to
|
||||
[options](#options) ` + "`--dir-perms`" + ` and ` + "`--file-perms`" + `,
|
||||
[options](#options) |--dir-perms| and |--file-perms|,
|
||||
which takes a value in traditional [numeric notation](https://en.wikipedia.org/wiki/File-system_permissions#Numeric_notation),
|
||||
where the default corresponds to ` + "`--file-perms 0666 --dir-perms 0777`" + `.
|
||||
where the default corresponds to |--file-perms 0666 --dir-perms 0777|.
|
||||
|
||||
Note that the mapping of permissions is not always trivial, and the result
|
||||
you see in Windows Explorer may not be exactly like you expected.
|
||||
@@ -342,10 +344,10 @@ alternatively using [the nssm service manager](https://nssm.cc/usage).
|
||||
|
||||
### Limitations
|
||||
|
||||
Without the use of ` + "`--vfs-cache-mode`" + ` this can only write files
|
||||
Without the use of |--vfs-cache-mode| this can only write files
|
||||
sequentially, it can only seek when reading. This means that many
|
||||
applications won't work with their files on an rclone mount without
|
||||
` + "`--vfs-cache-mode writes`" + ` or ` + "`--vfs-cache-mode full`" + `.
|
||||
|--vfs-cache-mode writes| or |--vfs-cache-mode full|.
|
||||
See the [File Caching](#file-caching) section for more info.
|
||||
|
||||
The bucket based remotes (e.g. Swift, S3, Google Compute Storage, B2,
|
||||
@@ -355,21 +357,21 @@ the directory cache.
|
||||
|
||||
Only supported on Linux, FreeBSD, OS X and Windows at the moment.
|
||||
|
||||
### rclone ` + commandName + ` vs rclone sync/copy
|
||||
### rclone @ vs rclone sync/copy
|
||||
|
||||
File systems expect things to be 100% reliable, whereas cloud storage
|
||||
systems are a long way from 100% reliable. The rclone sync/copy
|
||||
commands cope with this with lots of retries. However rclone ` + commandName + `
|
||||
commands cope with this with lots of retries. However rclone @
|
||||
can't use retries in the same way without making local copies of the
|
||||
uploads. Look at the [file caching](#file-caching)
|
||||
for solutions to make ` + commandName + ` more reliable.
|
||||
for solutions to make @ more reliable.
|
||||
|
||||
### Attribute caching
|
||||
|
||||
You can use the flag ` + "`--attr-timeout`" + ` to set the time the kernel caches
|
||||
You can use the flag |--attr-timeout| to set the time the kernel caches
|
||||
the attributes (size, modification time, etc.) for directory entries.
|
||||
|
||||
The default is "1s" which caches files just long enough to avoid
|
||||
The default is |1s| which caches files just long enough to avoid
|
||||
too many callbacks to rclone from the kernel.
|
||||
|
||||
In theory 0s should be the correct value for filesystems which can
|
||||
@@ -380,14 +382,14 @@ few problems such as
|
||||
and [excessive time listing directories](https://github.com/rclone/rclone/issues/2095#issuecomment-371141147).
|
||||
|
||||
The kernel can cache the info about a file for the time given by
|
||||
` + "`--attr-timeout`" + `. You may see corruption if the remote file changes
|
||||
|--attr-timeout|. You may see corruption if the remote file changes
|
||||
length during this window. It will show up as either a truncated file
|
||||
or a file with garbage on the end. With ` + "`--attr-timeout 1s`" + ` this is
|
||||
very unlikely but not impossible. The higher you set ` + "`--attr-timeout`" + `
|
||||
or a file with garbage on the end. With |--attr-timeout 1s| this is
|
||||
very unlikely but not impossible. The higher you set |--attr-timeout|
|
||||
the more likely it is. The default setting of "1s" is the lowest
|
||||
setting which mitigates the problems above.
|
||||
|
||||
If you set it higher ('10s' or '1m' say) then the kernel will call
|
||||
If you set it higher (|10s| or |1m| say) then the kernel will call
|
||||
back to rclone less often making it more efficient, however there is
|
||||
more chance of the corruption issue above.
|
||||
|
||||
@@ -403,28 +405,28 @@ files to be visible in the mount.
|
||||
|
||||
### systemd
|
||||
|
||||
When running rclone ` + commandName + ` as a systemd service, it is possible
|
||||
When running rclone @ as a systemd service, it is possible
|
||||
to use Type=notify. In this case the service will enter the started state
|
||||
after the mountpoint has been successfully set up.
|
||||
Units having the rclone ` + commandName + ` service specified as a requirement
|
||||
Units having the rclone @ service specified as a requirement
|
||||
will see all files and folders immediately in this mode.
|
||||
|
||||
### chunked reading
|
||||
|
||||
` + "`--vfs-read-chunk-size`" + ` will enable reading the source objects in parts.
|
||||
|--vfs-read-chunk-size| will enable reading the source objects in parts.
|
||||
This can reduce the used download quota for some remotes by requesting only chunks
|
||||
from the remote that are actually read at the cost of an increased number of requests.
|
||||
|
||||
When ` + "`--vfs-read-chunk-size-limit`" + ` is also specified and greater than
|
||||
` + "`--vfs-read-chunk-size`" + `, the chunk size for each open file will get doubled
|
||||
for each chunk read, until the specified value is reached. A value of -1 will disable
|
||||
When |--vfs-read-chunk-size-limit| is also specified and greater than
|
||||
|--vfs-read-chunk-size|, the chunk size for each open file will get doubled
|
||||
for each chunk read, until the specified value is reached. A value of |-1| will disable
|
||||
the limit and the chunk size will grow indefinitely.
|
||||
|
||||
With ` + "`--vfs-read-chunk-size 100M`" + ` and ` + "`--vfs-read-chunk-size-limit 0`" + `
|
||||
With |--vfs-read-chunk-size 100M| and |--vfs-read-chunk-size-limit 0|
|
||||
the following parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
|
||||
When ` + "`--vfs-read-chunk-size-limit 500M`" + ` is specified, the result would be
|
||||
When |--vfs-read-chunk-size-limit 500M| is specified, the result would be
|
||||
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
|
||||
` + vfs.Help,
|
||||
`, "|", "`"), "@", commandName) + vfs.Help,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
opt := Opt // make a copy of the options
|
||||
|
||||
@@ -2,6 +2,7 @@ package move
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
@@ -26,20 +27,21 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "move source:path dest:path",
|
||||
Short: `Move files from source to dest.`,
|
||||
Long: `
|
||||
// Warning! "|" will be replaced by backticks below
|
||||
Long: strings.ReplaceAll(`
|
||||
Moves the contents of the source directory to the destination
|
||||
directory. Rclone will error if the source and destination overlap and
|
||||
the remote does not support a server-side directory move operation.
|
||||
|
||||
If no filters are in use and if possible this will server-side move
|
||||
` + "`source:path`" + ` into ` + "`dest:path`" + `. After this ` + "`source:path`" + ` will no
|
||||
|source:path| into |dest:path|. After this |source:path| will no
|
||||
longer exist.
|
||||
|
||||
Otherwise for each file in ` + "`source:path`" + ` selected by the filters (if
|
||||
any) this will move it into ` + "`dest:path`" + `. If possible a server-side
|
||||
Otherwise for each file in |source:path| selected by the filters (if
|
||||
any) this will move it into |dest:path|. If possible a server-side
|
||||
move will be used, otherwise it will copy it (server-side if possible)
|
||||
into ` + "`dest:path`" + ` then delete the original (if no errors on copy) in
|
||||
` + "`source:path`" + `.
|
||||
into |dest:path| then delete the original (if no errors on copy) in
|
||||
|source:path|.
|
||||
|
||||
If you want to delete empty source directories after move, use the --delete-empty-src-dirs flag.
|
||||
|
||||
@@ -49,10 +51,10 @@ option when moving a small number of files into a large destination
|
||||
can speed transfers up greatly.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
` + "`--dry-run` or the `--interactive`/`-i`" + ` flag.
|
||||
|--dry-run| or the |--interactive|/|-i| flag.
|
||||
|
||||
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics.
|
||||
`,
|
||||
**Note**: Use the |-P|/|--progress| flag to view real-time transfer statistics.
|
||||
`, "|", "`"),
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args)
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
package obscure
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
@@ -30,7 +30,8 @@ the config file. However it is very hard to shoulder surf a 64
|
||||
character hex token.
|
||||
|
||||
This command can also accept a password through STDIN instead of an
|
||||
argument by passing a hyphen as an argument. Example:
|
||||
argument by passing a hyphen as an argument. This will use the first
|
||||
line of STDIN as the password not including the trailing newline.
|
||||
|
||||
echo "secretpassword" | rclone obscure -
|
||||
|
||||
@@ -40,13 +41,18 @@ obfuscating the hyphen itself.
|
||||
If you want to encrypt the config file then please use config file
|
||||
encryption - see [rclone config](/commands/rclone_config/) for more
|
||||
info.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
var password string
|
||||
fi, _ := os.Stdin.Stat()
|
||||
if args[0] == "-" && (fi.Mode()&os.ModeCharDevice) == 0 {
|
||||
bytes, _ := ioutil.ReadAll(os.Stdin)
|
||||
password = string(bytes)
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
if scanner.Scan() {
|
||||
password = scanner.Text()
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
password = args[0]
|
||||
}
|
||||
@@ -55,5 +61,6 @@ info.`,
|
||||
fmt.Println(obscured)
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -78,6 +78,39 @@ func (s *server) getVFS(what string, sshConn *ssh.ServerConn) (VFS *vfs.VFS) {
|
||||
return VFS
|
||||
}
|
||||
|
||||
// Accept a single connection - run in a go routine as the ssh
|
||||
// authentication can block
|
||||
func (s *server) acceptConnection(nConn net.Conn) {
|
||||
what := describeConn(nConn)
|
||||
|
||||
// Before use, a handshake must be performed on the incoming net.Conn.
|
||||
sshConn, chans, reqs, err := ssh.NewServerConn(nConn, s.config)
|
||||
if err != nil {
|
||||
fs.Errorf(what, "SSH login failed: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
fs.Infof(what, "SSH login from %s using %s", sshConn.User(), sshConn.ClientVersion())
|
||||
|
||||
// Discard all global out-of-band Requests
|
||||
go ssh.DiscardRequests(reqs)
|
||||
|
||||
c := &conn{
|
||||
what: what,
|
||||
vfs: s.getVFS(what, sshConn),
|
||||
}
|
||||
if c.vfs == nil {
|
||||
fs.Infof(what, "Closing unauthenticated connection (couldn't find VFS)")
|
||||
_ = nConn.Close()
|
||||
return
|
||||
}
|
||||
c.handlers = newVFSHandler(c.vfs)
|
||||
|
||||
// Accept all channels
|
||||
go c.handleChannels(chans)
|
||||
}
|
||||
|
||||
// Accept connections and call them in a go routine
|
||||
func (s *server) acceptConnections() {
|
||||
for {
|
||||
nConn, err := s.listener.Accept()
|
||||
@@ -88,33 +121,7 @@ func (s *server) acceptConnections() {
|
||||
fs.Errorf(nil, "Failed to accept incoming connection: %v", err)
|
||||
continue
|
||||
}
|
||||
what := describeConn(nConn)
|
||||
|
||||
// Before use, a handshake must be performed on the incoming net.Conn.
|
||||
sshConn, chans, reqs, err := ssh.NewServerConn(nConn, s.config)
|
||||
if err != nil {
|
||||
fs.Errorf(what, "SSH login failed: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
fs.Infof(what, "SSH login from %s using %s", sshConn.User(), sshConn.ClientVersion())
|
||||
|
||||
// Discard all global out-of-band Requests
|
||||
go ssh.DiscardRequests(reqs)
|
||||
|
||||
c := &conn{
|
||||
what: what,
|
||||
vfs: s.getVFS(what, sshConn),
|
||||
}
|
||||
if c.vfs == nil {
|
||||
fs.Infof(what, "Closing unauthenticated connection (couldn't find VFS)")
|
||||
_ = nConn.Close()
|
||||
continue
|
||||
}
|
||||
c.handlers = newVFSHandler(c.vfs)
|
||||
|
||||
// Accept all channels
|
||||
go c.handleChannels(chans)
|
||||
go s.acceptConnection(nConn)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -120,6 +120,7 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="Google Cloud Storage" home="https://cloud.google.com/storage/" config="/googlecloudstorage/" >}}
|
||||
{{< provider name="Google Drive" home="https://www.google.com/drive/" config="/drive/" >}}
|
||||
{{< provider name="Google Photos" home="https://www.google.com/photos/about/" config="/googlephotos/" >}}
|
||||
{{< provider name="HDFS" home="https://hadoop.apache.org/" config="/hdfs/" >}}
|
||||
{{< provider name="HTTP" home="https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol" config="/http/" >}}
|
||||
{{< provider name="Hubic" home="https://hubic.com/" config="/hubic/" >}}
|
||||
{{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}}
|
||||
|
||||
@@ -184,7 +184,7 @@ put them back in again.` >}}
|
||||
* Andres Alvarez <1671935+kir4h@users.noreply.github.com>
|
||||
* reddi1 <xreddi@gmail.com>
|
||||
* Matt Tucker <matthewtckr@gmail.com>
|
||||
* Sebastian Bünger <buengese@gmail.com>
|
||||
* Sebastian Bünger <buengese@gmail.com> <buengese@protonmail.com>
|
||||
* Martin Polden <mpolden@mpolden.no>
|
||||
* Alex Chen <Cnly@users.noreply.github.com>
|
||||
* Denis <deniskovpen@gmail.com>
|
||||
@@ -440,3 +440,19 @@ put them back in again.` >}}
|
||||
* Claudio Bantaloukas <rockdreamer@gmail.com>
|
||||
* Benjamin Gustin <gustin.ben@gmail.com>
|
||||
* Ingo Weiss <ingo@redhat.com>
|
||||
* Kerry Su <me@sshockwave.net>
|
||||
* Ilyess Bachiri <ilyess.bachiri@sonder.com>
|
||||
* Yury Stankevich <urykhy@gmail.com>
|
||||
* kice <wslikerqs@gmail.com>
|
||||
* Denis Neuling <denisneuling@gmail.com>
|
||||
* Janne Johansson <icepic.dz@gmail.com>
|
||||
* Patrik Nordlén <patriki@gmail.com>
|
||||
* CokeMine <aptx4561@gmail.com>
|
||||
* Sơn Trần-Nguyễn <github@sntran.com>
|
||||
* lluuaapp <266615+lluuaapp@users.noreply.github.com>
|
||||
* Zach Kipp <kipp.zach@gmail.com>
|
||||
* Riccardo Iaconelli <riccardo@kde.org>
|
||||
* Sakuragawa Misty <gyc990326@gmail.com>
|
||||
* Nicolas Rueff <nicolas@rueff.fr>
|
||||
* Pau Rodriguez-Estivill <prodrigestivill@gmail.com>
|
||||
* Bob Pusateri <BobPusateri@users.noreply.github.com>
|
||||
|
||||
@@ -146,27 +146,6 @@ Container level SAS URLs are useful for temporarily allowing third
|
||||
parties access to a single container or putting credentials into an
|
||||
untrusted environment such as a CI build server.
|
||||
|
||||
### Multipart uploads ###
|
||||
|
||||
Rclone supports multipart uploads with Azure Blob storage. Files
|
||||
bigger than 256MB will be uploaded using chunked upload by default.
|
||||
|
||||
The files will be uploaded in parallel in 4MB chunks (by default).
|
||||
Note that these chunks are buffered in memory and there may be up to
|
||||
`--transfers` of them being uploaded at once.
|
||||
|
||||
Files can't be split into more than 50,000 chunks so by default, so
|
||||
the largest file that can be uploaded with 4MB chunk size is 195GB.
|
||||
Above this rclone will double the chunk size until it creates less
|
||||
than 50,000 chunks. By default this will mean a maximum file size of
|
||||
3.2TB can be uploaded. This can be raised to 5TB using
|
||||
`--azureblob-chunk-size 100M`.
|
||||
|
||||
Note that rclone doesn't commit the block list until the end of the
|
||||
upload which means that there is a limit of 9.5TB of multipart uploads
|
||||
in progress as Azure won't allow more than that amount of uncommitted
|
||||
blocks.
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/azureblob/azureblob.go then run make backenddocs" >}}
|
||||
### Standard Options
|
||||
|
||||
@@ -181,6 +160,26 @@ Storage Account Name (leave blank to use SAS URL or Emulator)
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --azureblob-service-principal-file
|
||||
|
||||
Path to file containing credentials for use with a service principal.
|
||||
|
||||
Leave blank normally. Needed only if you want to use a service principal instead of interactive login.
|
||||
|
||||
$ az sp create-for-rbac --name "<name>" \
|
||||
--role "Storage Blob Data Owner" \
|
||||
--scopes "/subscriptions/<subscription>/resourceGroups/<resource-group>/providers/Microsoft.Storage/storageAccounts/<storage-account>/blobServices/default/containers/<container>" \
|
||||
> azure-principal.json
|
||||
|
||||
See [Use Azure CLI to assign an Azure role for access to blob and queue data](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli)
|
||||
for more details.
|
||||
|
||||
|
||||
- Config: service_principal_file
|
||||
- Env Var: RCLONE_AZUREBLOB_SERVICE_PRINCIPAL_FILE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --azureblob-key
|
||||
|
||||
Storage Account Key (leave blank to use SAS URL or Emulator)
|
||||
@@ -200,6 +199,24 @@ SAS URL for container level access only
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --azureblob-use-msi
|
||||
|
||||
Use a managed service identity to authenticate (only works in Azure)
|
||||
|
||||
When true, use a [managed service identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/)
|
||||
to authenticate to Azure Storage instead of a SAS token or account key.
|
||||
|
||||
If the VM(SS) on which this program is running has a system-assigned identity, it will
|
||||
be used by default. If the resource has no system-assigned but exactly one user-assigned identity,
|
||||
the user-assigned identity will be used by default. If the resource has multiple user-assigned
|
||||
identities, the identity to use must be explicitly specified using exactly one of the msi_object_id,
|
||||
msi_client_id, or msi_mi_res_id parameters.
|
||||
|
||||
- Config: use_msi
|
||||
- Env Var: RCLONE_AZUREBLOB_USE_MSI
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --azureblob-use-emulator
|
||||
|
||||
Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)
|
||||
@@ -213,6 +230,33 @@ Uses local storage emulator if provided as 'true' (leave blank if using real azu
|
||||
|
||||
Here are the advanced options specific to azureblob (Microsoft Azure Blob Storage).
|
||||
|
||||
#### --azureblob-msi-object-id
|
||||
|
||||
Object ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_mi_res_id specified.
|
||||
|
||||
- Config: msi_object_id
|
||||
- Env Var: RCLONE_AZUREBLOB_MSI_OBJECT_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --azureblob-msi-client-id
|
||||
|
||||
Object ID of the user-assigned MSI to use, if any. Leave blank if msi_object_id or msi_mi_res_id specified.
|
||||
|
||||
- Config: msi_client_id
|
||||
- Env Var: RCLONE_AZUREBLOB_MSI_CLIENT_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --azureblob-msi-mi-res-id
|
||||
|
||||
Azure resource ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_object_id specified.
|
||||
|
||||
- Config: msi_mi_res_id
|
||||
- Env Var: RCLONE_AZUREBLOB_MSI_MI_RES_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --azureblob-endpoint
|
||||
|
||||
Endpoint for the service
|
||||
@@ -225,12 +269,12 @@ Leave blank normally.
|
||||
|
||||
#### --azureblob-upload-cutoff
|
||||
|
||||
Cutoff for switching to chunked upload (<= 256MB).
|
||||
Cutoff for switching to chunked upload (<= 256MB). (Deprecated)
|
||||
|
||||
- Config: upload_cutoff
|
||||
- Env Var: RCLONE_AZUREBLOB_UPLOAD_CUTOFF
|
||||
- Type: SizeSuffix
|
||||
- Default: 256M
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --azureblob-chunk-size
|
||||
|
||||
@@ -281,6 +325,28 @@ tiering blob to "Hot" or "Cool".
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --azureblob-archive-tier-delete
|
||||
|
||||
Delete archive tier blobs before overwriting.
|
||||
|
||||
Archive tier blobs cannot be updated. So without this flag, if you
|
||||
attempt to update an archive tier blob, then rclone will produce the
|
||||
error:
|
||||
|
||||
can't update archive tier blob without --azureblob-archive-tier-delete
|
||||
|
||||
With this flag set then before rclone attempts to overwrite an archive
|
||||
tier blob, it will delete the existing blob before uploading its
|
||||
replacement. This has the potential for data loss if the upload fails
|
||||
(unlike updating a normal blob) and also may cost more since deleting
|
||||
archive tier blobs early may be chargable.
|
||||
|
||||
|
||||
- Config: archive_tier_delete
|
||||
- Env Var: RCLONE_AZUREBLOB_ARCHIVE_TIER_DELETE
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --azureblob-disable-checksum
|
||||
|
||||
Don't store MD5 checksum with object metadata.
|
||||
|
||||
@@ -461,7 +461,9 @@ Custom endpoint for downloads.
|
||||
|
||||
This is usually set to a Cloudflare CDN URL as Backblaze offers
|
||||
free egress for data downloaded through the Cloudflare network.
|
||||
This is probably only useful for a public bucket.
|
||||
Rclone works with private buckets by sending an "Authorization" header.
|
||||
If the custom endpoint rewrites the requests for authentication,
|
||||
e.g., in Cloudflare Workers, this header needs to be handled properly.
|
||||
Leave blank if you want to use the endpoint provided by Backblaze.
|
||||
|
||||
- Config: download_url
|
||||
|
||||
@@ -5,6 +5,257 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.54.0 - 2021-02-02
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.53.0...v1.54.0)
|
||||
|
||||
* New backends
|
||||
* Compression remote (experimental) (buengese)
|
||||
* Enterprise File Fabric (Nick Craig-Wood)
|
||||
* This work was sponsored by [Storage Made Easy](https://storagemadeeasy.com/)
|
||||
* HDFS (Hadoop Distributed File System) (Yury Stankevich)
|
||||
* Zoho workdrive (buengese)
|
||||
* New Features
|
||||
* Deglobalise the config (Nick Craig-Wood)
|
||||
* Global config now read from the context
|
||||
* This will enable passing of global config via the rc
|
||||
* This work was sponsored by [Digitalis](digitalis.io)
|
||||
* Add `--bwlimit` for upload and download (Nick Craig-Wood)
|
||||
* Obey bwlimit in http Transport for better limiting
|
||||
* Enhance systemd integration (Hekmon)
|
||||
* log level identification, manual activation with flag, automatic systemd launch detection
|
||||
* Don't compile systemd log integration for non unix systems (Benjamin Gustin)
|
||||
* Add a `--download` flag to md5sum/sha1sum/hashsum to force rclone to download and hash files locally (lostheli)
|
||||
* Add `--progress-terminal-title` to print ETA to terminal title (LaSombra)
|
||||
* Make backend env vars show in help as the defaults for backend flags (Nick Craig-Wood)
|
||||
* build
|
||||
* Raise minimum go version to go1.12 (Nick Craig-Wood)
|
||||
* dedupe
|
||||
* Add `--by-hash` to dedupe on content hash not file name (Nick Craig-Wood)
|
||||
* Add `--dedupe-mode list` to just list dupes, changing nothing (Nick Craig-Wood)
|
||||
* Add warning if used on a remote which can't have duplicate names (Nick Craig-Wood)
|
||||
* fs
|
||||
* Add Shutdown optional method for backends (Nick Craig-Wood)
|
||||
* When using `--files-from` check files concurrently (zhucan)
|
||||
* Accumulate stats when using `--dry-run` (Ingo Weiss)
|
||||
* Always show stats when using `--dry-run` or `--interactive` (Nick Craig-Wood)
|
||||
* Add support for flag `--no-console` on windows to hide the console window (albertony)
|
||||
* genautocomplete: Add support to output to stdout (Ingo)
|
||||
* ncdu
|
||||
* Highlight read errors instead of aborting (Claudio Bantaloukas)
|
||||
* Add sort by average size in directory (Adam Plánský)
|
||||
* Add toggle option for average s3ize in directory - key 'a' (Adam Plánský)
|
||||
* Add empty folder flag into ncdu browser (Adam Plánský)
|
||||
* Add `!` (errror) and `.` (unreadable) file flags to go with `e` (empty) (Nick Craig-Wood)
|
||||
* obscure: Make `rclone osbcure -` ignore newline at end of line (Nick Craig-Wood)
|
||||
* operations
|
||||
* Add logs when need to upload files to set mod times (Nick Craig-Wood)
|
||||
* Move and copy log name of the destination object in verbose (Adam Plánský)
|
||||
* Add size if known to skipped items and JSON log (Nick Craig-Wood)
|
||||
* rc
|
||||
* Prefer actual listener address if using ":port" or "addr:0" only (Nick Craig-Wood)
|
||||
* Add listener for finished jobs (Aleksandar Jankovic)
|
||||
* serve ftp: Add options to enable TLS (Deepak Sah)
|
||||
* serve http/webdav: Redirect requests to the base url without the / (Nick Craig-Wood)
|
||||
* serve restic: Implement object cache (Nick Craig-Wood)
|
||||
* stats: Add counter for deleted directories (Nick Craig-Wood)
|
||||
* sync: Only print "There was nothing to transfer" if no errors (Nick Craig-Wood)
|
||||
* webui
|
||||
* Prompt user for updating webui if an update is available (Chaitanya Bankanhal)
|
||||
* Fix plugins initialization (negative0)
|
||||
* Bug Fixes
|
||||
* fs
|
||||
* Fix nil pointer on copy & move operations directly to remote (Anagh Kumar Baranwal)
|
||||
* Fix parsing of .. when joining remotes (Nick Craig-Wood)
|
||||
* log: Fix enabling systemd logging when using `--log-file` (Nick Craig-Wood)
|
||||
* check
|
||||
* Make the error count match up in the log message (Nick Craig-Wood)
|
||||
* move: Fix data loss when source and destination are the same object (Nick Craig-Wood)
|
||||
* operations
|
||||
* Fix `--cutof-mode` hard not cutting off immediately (Nick Craig-Wood)
|
||||
* Fix `--immutable` error message (Nick Craig-Wood)
|
||||
* sync
|
||||
* Fix `--cutoff-mode` soft & cautious so it doesn't end the transfer early (Nick Craig-Wood)
|
||||
* Fix `--immutable` errors retrying many times (Nick Craig-Wood)
|
||||
* Docs
|
||||
* Many fixes and a rewrite of the filtering docs (edwardxml)
|
||||
* Many spelling and grammar fixes (Josh Soref)
|
||||
* Doc fixes for commands delete, purge, rmdir, rmdirs and mount (albertony)
|
||||
* And thanks to these people for many doc fixes too numerous to list
|
||||
* Ameer Dawood, Antoine GIRARD, Bob Bagwill, Christopher Stewart
|
||||
* CokeMine, David, Dov Murik, Durval Menezes, Evan Harris, gtorelly
|
||||
* Ilyess Bachiri, Janne Johansson, Kerry Su, Marcin Zelent,
|
||||
* Martin Michlmayr, Milly, Sơn Trần-Nguyễn
|
||||
* Mount
|
||||
* Update systemd status with cache stats (Hekmon)
|
||||
* Disable bazil/fuse based mount on macOS (Nick Craig-Wood)
|
||||
* Make `rclone mount` actually run `rclone cmount` under macOS (Nick Craig-Wood)
|
||||
* Implement mknod to make NFS file creation work (Nick Craig-Wood)
|
||||
* Make sure we don't call umount more than once (Nick Craig-Wood)
|
||||
* More user friendly mounting as network drive on windows (albertony)
|
||||
* Detect if uid or gid are set in same option string: -o uid=123,gid=456 (albertony)
|
||||
* Don't attempt to unmount if fs has been destroyed already (Nick Craig-Wood)
|
||||
* VFS
|
||||
* Fix virtual entries causing deleted files to still appear (Nick Craig-Wood)
|
||||
* Fix "file already exists" error for stale cache files (Nick Craig-Wood)
|
||||
* Fix file leaks with `--vfs-cache-mode` full and `--buffer-size 0` (Nick Craig-Wood)
|
||||
* Fix invalid cache path on windows when using :backend: as remote (albertony)
|
||||
* Local
|
||||
* Continue listing files/folders when a circular symlink is detected (Manish Gupta)
|
||||
* New flag `--local-zero-size-links` to fix sync on some virtual filesystems (Riccardo Iaconelli)
|
||||
* Azure Blob
|
||||
* Add support for service principals (James Lim)
|
||||
* Add support for managed identities (Brad Ackerman)
|
||||
* Add examples for access tier (Bob Pusateri)
|
||||
* Utilize the streaming capabilities from the SDK for multipart uploads (Denis Neuling)
|
||||
* Fix setting of mime types (Nick Craig-Wood)
|
||||
* Fix crash when listing outside a SAS URL's root (Nick Craig-Wood)
|
||||
* Delete archive tier blobs before update if `--azureblob-archive-tier-delete` (Nick Craig-Wood)
|
||||
* Fix crash on startup (Nick Craig-Wood)
|
||||
* Fix memory usage by upgrading the SDK to v0.13.0 and implementing a TransferManager (Nick Craig-Wood)
|
||||
* Require go1.14+ to compile due to SDK changes (Nick Craig-Wood)
|
||||
* B2
|
||||
* Make NewObject use less expensive API calls (Nick Craig-Wood)
|
||||
* This will improve `--files-from` and `restic serve` in particular
|
||||
* Fixed crash on an empty file name (lluuaapp)
|
||||
* Box
|
||||
* Fix NewObject for files that differ in case (Nick Craig-Wood)
|
||||
* Fix finding directories in a case insentive way (Nick Craig-Wood)
|
||||
* Chunker
|
||||
* Skip long local hashing, hash in-transit (fixes) (Ivan Andreev)
|
||||
* Set Features ReadMimeType to false as Object.MimeType not supported (Nick Craig-Wood)
|
||||
* Fix case-insensitive NewObject, test metadata detection (Ivan Andreev)
|
||||
* Drive
|
||||
* Implement `rclone backend copyid` command for copying files by ID (Nick Craig-Wood)
|
||||
* Added flag `--drive-stop-on-download-limit` to stop transfers when the download limit is exceeded (Anagh Kumar Baranwal)
|
||||
* Implement CleanUp workaround for team drives (buengese)
|
||||
* Allow shortcut resolution and creation to be retried (Nick Craig-Wood)
|
||||
* Log that emptying the trash can take some time (Nick Craig-Wood)
|
||||
* Add xdg office icons to xdg desktop files (Pau Rodriguez-Estivill)
|
||||
* Dropbox
|
||||
* Add support for viewing shared files and folders (buengese)
|
||||
* Enable short lived access tokens (Nick Craig-Wood)
|
||||
* Implement IDer on Objects so `rclone lsf` etc can read the IDs (buengese)
|
||||
* Set Features ReadMimeType to false as Object.MimeType not supported (Nick Craig-Wood)
|
||||
* Make malformed_path errors from too long files not retriable (Nick Craig-Wood)
|
||||
* Test file name length before upload to fix upload loop (Nick Craig-Wood)
|
||||
* Fichier
|
||||
* Set Features ReadMimeType to true as Object.MimeType is supported (Nick Craig-Wood)
|
||||
* FTP
|
||||
* Add `--ftp-disable-msld` option to ignore MLSD for really old servers (Nick Craig-Wood)
|
||||
* Make `--tpslimit apply` (Nick Craig-Wood)
|
||||
* Google Cloud Storage
|
||||
* Storage class object header support (Laurens Janssen)
|
||||
* Fix anonymous client to use rclone's HTTP client (Nick Craig-Wood)
|
||||
* Fix `Entry doesn't belong in directory "" (same as directory) - ignoring` (Nick Craig-Wood)
|
||||
* Googlephotos
|
||||
* New flag `--gphotos-include-archived` to show archived photos as well (Nicolas Rueff)
|
||||
* Jottacloud
|
||||
* Don't erroneously report support for writing mime types (buengese)
|
||||
* Add support for Telia Cloud (Patrik Nordlén)
|
||||
* Mailru
|
||||
* Accept special folders eg camera-upload (Ivan Andreev)
|
||||
* Avoid prehashing of large local files (Ivan Andreev)
|
||||
* Fix uploads after recent changes on server (Ivan Andreev)
|
||||
* Fix range requests after June 2020 changes on server (Ivan Andreev)
|
||||
* Fix invalid timestamp on corrupted files (fixes) (Ivan Andreev)
|
||||
* Remove deprecated protocol quirks (Ivan Andreev)
|
||||
* Memory
|
||||
* Fix setting of mime types (Nick Craig-Wood)
|
||||
* Onedrive
|
||||
* Add support for China region operated by 21vianet and other regional suppliers (NyaMisty)
|
||||
* Warn on gateway timeout errors (Nick Craig-Wood)
|
||||
* Fall back to normal copy if server-side copy unavailable (Alex Chen)
|
||||
* Fix server-side copy completely disabled on OneDrive for Business (Cnly)
|
||||
* (business only) workaround to replace existing file on server-side copy (Alex Chen)
|
||||
* Enhance link creation with expiry, scope, type and password (Nick Craig-Wood)
|
||||
* Remove % and # from the set of encoded characters (Alex Chen)
|
||||
* Support addressing site by server-relative URL (kice)
|
||||
* Opendrive
|
||||
* Fix finding directories in a case insensitive way (Nick Craig-Wood)
|
||||
* Pcloud
|
||||
* Fix setting of mime types (Nick Craig-Wood)
|
||||
* Premiumizeme
|
||||
* Fix finding directories in a case insensitive way (Nick Craig-Wood)
|
||||
* Qingstor
|
||||
* Fix error propagation in CleanUp (Nick Craig-Wood)
|
||||
* Fix rclone cleanup (Nick Craig-Wood)
|
||||
* S3
|
||||
* Added `--s3-disable-http2` to disable http/2 (Anagh Kumar Baranwal)
|
||||
* Complete SSE-C implementation (Nick Craig-Wood)
|
||||
* Fix hashes on small files with AWS:KMS and SSE-C (Nick Craig-Wood)
|
||||
* Add MD5 metadata to objects uploaded with SSE-AWS/SSE-C (Nick Craig-Wood)
|
||||
* Add `--s3-no-head parameter` to minimise transactions on upload (Nick Craig-Wood)
|
||||
* Update docs with a Reducing Costs section (Nick Craig-Wood)
|
||||
* Added error handling for error code 429 indicating too many requests (Anagh Kumar Baranwal)
|
||||
* Add requester pays option (kelv)
|
||||
* Fix copy multipart with v2 auth failing with 'SignatureDoesNotMatch' (Louis Koo)
|
||||
* SFTP
|
||||
* Allow cert based auth via optional pubkey (Stephen Harris)
|
||||
* Allow user to optionally check server hosts key to add security (Stephen Harris)
|
||||
* Defer asking for user passwords until the SSH connection succeeds (Stephen Harris)
|
||||
* Remember entered password in AskPass mode (Stephen Harris)
|
||||
* Implement Shutdown method (Nick Craig-Wood)
|
||||
* Implement keyboard interactive authentication (Nick Craig-Wood)
|
||||
* Make `--tpslimit` apply (Nick Craig-Wood)
|
||||
* Implement `--sftp-use-fstat` for unusual SFTP servers (Nick Craig-Wood)
|
||||
* Sugarsync
|
||||
* Fix NewObject for files that differ in case (Nick Craig-Wood)
|
||||
* Fix finding directories in a case insentive way (Nick Craig-Wood)
|
||||
* Swift
|
||||
* Fix deletion of parts of Static Large Object (SLO) (Nguyễn Hữu Luân)
|
||||
* Ensure partially uploaded large files are uploaded unless `--swift-leave-parts-on-error` (Nguyễn Hữu Luân)
|
||||
* Tardigrade
|
||||
* Upgrade to uplink v1.4.1 (Caleb Case)
|
||||
* WebDAV
|
||||
* Updated docs to show streaming to nextcloud is working (Durval Menezes)
|
||||
* Yandex
|
||||
* Set Features WriteMimeType to false as Yandex ignores mime types (Nick Craig-Wood)
|
||||
|
||||
## v1.53.4 - 2021-01-20
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.53.3...v1.53.4)
|
||||
|
||||
* Bug Fixes
|
||||
* accounting: Fix data race in Transferred() (Maciej Zimnoch)
|
||||
* build
|
||||
* Stop tagged releases making a current beta (Nick Craig-Wood)
|
||||
* Upgrade docker buildx action (Matteo Pietro Dazzi)
|
||||
* Add -buildmode to cross-compile.go (Nick Craig-Wood)
|
||||
* Fix docker build by upgrading ilteoood/docker_buildx (Nick Craig-Wood)
|
||||
* Revert GitHub actions brew fix since this is now fixed (Nick Craig-Wood)
|
||||
* Fix brew install --cask syntax for macOS build (Nick Craig-Wood)
|
||||
* Update nfpm syntax to fix build of .deb/.rpm packages (Nick Craig-Wood)
|
||||
* Fix for Windows build errors (Ivan Andreev)
|
||||
* fs: Parseduration: fixed tests to use UTC time (Ankur Gupta)
|
||||
* fshttp: Prevent overlap of HTTP headers in logs (Nathan Collins)
|
||||
* rc
|
||||
* Fix core/command giving 500 internal error (Nick Craig-Wood)
|
||||
* Add Copy method to rc.Params (Nick Craig-Wood)
|
||||
* Fix 500 error when marshalling errors from core/command (Nick Craig-Wood)
|
||||
* plugins: Create plugins files only if webui is enabled. (negative0)
|
||||
* serve http: Fix serving files of unknown length (Nick Craig-Wood)
|
||||
* serve sftp: Fix authentication on one connection blocking others (Nick Craig-Wood)
|
||||
* Mount
|
||||
* Add optional `brew` tag to throw an error when using mount in the binaries installed via Homebrew (Anagh Kumar Baranwal)
|
||||
* Add "." and ".." to directories to match cmount and expectations (Nick Craig-Wood)
|
||||
* VFS
|
||||
* Make cache dir absolute before using it to fix path too long errors (Nick Craig-Wood)
|
||||
* Chunker
|
||||
* Improve detection of incompatible metadata (Ivan Andreev)
|
||||
* Google Cloud Storage
|
||||
* Fix server side copy of large objects (Nick Craig-Wood)
|
||||
* Jottacloud
|
||||
* Fix token renewer to fix long uploads (Nick Craig-Wood)
|
||||
* Fix token refresh failed: is not a regular file error (Nick Craig-Wood)
|
||||
* Pcloud
|
||||
* Only use SHA1 hashes in EU region (Nick Craig-Wood)
|
||||
* Sharefile
|
||||
* Undo Fix backend due to API swapping integers for strings (Nick Craig-Wood)
|
||||
* WebDAV
|
||||
* Fix Open Range requests to fix 4shared mount (Nick Craig-Wood)
|
||||
* Add "Depth: 0" to GET requests to fix bitrix (Nick Craig-Wood)
|
||||
|
||||
## v1.53.3 - 2020-11-19
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.53.2...v1.53.3)
|
||||
|
||||
@@ -299,6 +299,9 @@ If wrapped remote is case insensitive, the chunker overlay will inherit
|
||||
that property (so you can't have a file called "Hello.doc" and "hello.doc"
|
||||
in the same directory).
|
||||
|
||||
Chunker included in rclone releases up to `v1.54` can sometimes fail to
|
||||
detect metadata produced by recent versions of rclone. We recommend users
|
||||
to keep rclone up-to-date to avoid data corruption.
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/chunker/chunker.go then run make backenddocs" >}}
|
||||
### Standard Options
|
||||
|
||||
@@ -39,15 +39,15 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
* [rclone backend](/commands/rclone_backend/) - Run a backend specific command.
|
||||
* [rclone cat](/commands/rclone_cat/) - Concatenates any files and sends them to stdout.
|
||||
* [rclone check](/commands/rclone_check/) - Checks the files in the source and destination match.
|
||||
* [rclone cleanup](/commands/rclone_cleanup/) - Clean up the remote if possible
|
||||
* [rclone cleanup](/commands/rclone_cleanup/) - Clean up the remote if possible.
|
||||
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
|
||||
* [rclone copy](/commands/rclone_copy/) - Copy files from source to dest, skipping already copied
|
||||
* [rclone copyto](/commands/rclone_copyto/) - Copy files from source to dest, skipping already copied
|
||||
* [rclone copy](/commands/rclone_copy/) - Copy files from source to dest, skipping already copied.
|
||||
* [rclone copyto](/commands/rclone_copyto/) - Copy files from source to dest, skipping already copied.
|
||||
* [rclone copyurl](/commands/rclone_copyurl/) - Copy url content to dest.
|
||||
* [rclone cryptcheck](/commands/rclone_cryptcheck/) - Cryptcheck checks the integrity of a crypted remote.
|
||||
* [rclone cryptdecode](/commands/rclone_cryptdecode/) - Cryptdecode returns unencrypted file names.
|
||||
* [rclone dedupe](/commands/rclone_dedupe/) - Interactively find duplicate filenames and delete/rename them.
|
||||
* [rclone delete](/commands/rclone_delete/) - Remove the contents of path.
|
||||
* [rclone delete](/commands/rclone_delete/) - Remove the files in path.
|
||||
* [rclone deletefile](/commands/rclone_deletefile/) - Remove a single file from remote.
|
||||
* [rclone genautocomplete](/commands/rclone_genautocomplete/) - Output completion script for a given shell.
|
||||
* [rclone gendocs](/commands/rclone_gendocs/) - Output markdown docs for rclone to the directory supplied.
|
||||
@@ -56,7 +56,7 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
* [rclone listremotes](/commands/rclone_listremotes/) - List all the remotes in the config file.
|
||||
* [rclone ls](/commands/rclone_ls/) - List the objects in the path with size and path.
|
||||
* [rclone lsd](/commands/rclone_lsd/) - List all directories/containers/buckets in the path.
|
||||
* [rclone lsf](/commands/rclone_lsf/) - List directories and objects in remote:path formatted for parsing
|
||||
* [rclone lsf](/commands/rclone_lsf/) - List directories and objects in remote:path formatted for parsing.
|
||||
* [rclone lsjson](/commands/rclone_lsjson/) - List directories and objects in the path in JSON format.
|
||||
* [rclone lsl](/commands/rclone_lsl/) - List the objects in path with modification time, size and path.
|
||||
* [rclone md5sum](/commands/rclone_md5sum/) - Produces an md5sum file for all the objects in the path.
|
||||
@@ -65,12 +65,12 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
* [rclone move](/commands/rclone_move/) - Move files from source to dest.
|
||||
* [rclone moveto](/commands/rclone_moveto/) - Move file or directory from source to dest.
|
||||
* [rclone ncdu](/commands/rclone_ncdu/) - Explore a remote with a text based user interface.
|
||||
* [rclone obscure](/commands/rclone_obscure/) - Obscure password for use in the rclone config file
|
||||
* [rclone obscure](/commands/rclone_obscure/) - Obscure password for use in the rclone config file.
|
||||
* [rclone purge](/commands/rclone_purge/) - Remove the path and all of its contents.
|
||||
* [rclone rc](/commands/rclone_rc/) - Run a command against a running rclone.
|
||||
* [rclone rcat](/commands/rclone_rcat/) - Copies standard input to file on remote.
|
||||
* [rclone rcd](/commands/rclone_rcd/) - Run rclone listening to remote control commands only.
|
||||
* [rclone rmdir](/commands/rclone_rmdir/) - Remove the path if empty.
|
||||
* [rclone rmdir](/commands/rclone_rmdir/) - Remove the empty directory at path.
|
||||
* [rclone rmdirs](/commands/rclone_rmdirs/) - Remove empty directories under the path.
|
||||
* [rclone serve](/commands/rclone_serve/) - Serve a remote over a protocol.
|
||||
* [rclone settier](/commands/rclone_settier/) - Changes storage class/tier of objects in remote.
|
||||
|
||||
@@ -12,10 +12,10 @@ Get quota information from the remote.
|
||||
## Synopsis
|
||||
|
||||
|
||||
Get quota information from the remote, like bytes used/free/quota and bytes
|
||||
used in the trash. Not supported by all remotes.
|
||||
`rclone about`prints quota information about a remote to standard
|
||||
output. The output is typically used, free, quota and trash contents.
|
||||
|
||||
This will print to stdout something like this:
|
||||
E.g. Typical output from`rclone about remote:`is:
|
||||
|
||||
Total: 17G
|
||||
Used: 7.444G
|
||||
@@ -27,16 +27,15 @@ Where the fields are:
|
||||
|
||||
* Total: total size available.
|
||||
* Used: total size used
|
||||
* Free: total amount this user could upload.
|
||||
* Trashed: total amount in the trash
|
||||
* Other: total amount in other storage (eg Gmail, Google Photos)
|
||||
* Free: total space available to this user.
|
||||
* Trashed: total space used by trash
|
||||
* Other: total amount in other storage (e.g. Gmail, Google Photos)
|
||||
* Objects: total number of objects in the storage
|
||||
|
||||
Note that not all the backends provide all the fields - they will be
|
||||
missing if they are not known for that backend. Where it is known
|
||||
that the value is unlimited the value will also be omitted.
|
||||
Not all backends print all fields. Information is not included if it is not
|
||||
provided by a backend. Where the value is unlimited it is omitted.
|
||||
|
||||
Use the --full flag to see the numbers written out in full, eg
|
||||
Applying a `--full` flag to the command prints the bytes in full, e.g.
|
||||
|
||||
Total: 18253611008
|
||||
Used: 7993453766
|
||||
@@ -44,7 +43,7 @@ Use the --full flag to see the numbers written out in full, eg
|
||||
Trashed: 104857602
|
||||
Other: 8849156022
|
||||
|
||||
Use the --json flag for a computer readable output, eg
|
||||
A `--json`flag generates conveniently computer readable output, e.g.
|
||||
|
||||
{
|
||||
"total": 18253611008,
|
||||
@@ -54,6 +53,10 @@ Use the --json flag for a computer readable output, eg
|
||||
"free": 1411001220
|
||||
}
|
||||
|
||||
Not all backends support the `rclone about` command.
|
||||
|
||||
See [List of backends that do not support about](https://rclone.org/overview/#optional-features)
|
||||
|
||||
|
||||
```
|
||||
rclone about remote: [flags]
|
||||
|
||||
@@ -27,7 +27,7 @@ for more info).
|
||||
|
||||
rclone backend features remote:
|
||||
|
||||
Pass options to the backend command with -o. This should be key=value or key, eg:
|
||||
Pass options to the backend command with -o. This should be key=value or key, e.g.:
|
||||
|
||||
rclone backend stats remote:path stats -o format=json -o long
|
||||
|
||||
|
||||
@@ -26,10 +26,10 @@ Or like this to output any .txt files in dir or its subdirectories.
|
||||
|
||||
rclone --include "*.txt" cat remote:path/to/dir
|
||||
|
||||
Use the --head flag to print characters only at the start, --tail for
|
||||
the end and --offset and --count to print a section in the middle.
|
||||
Use the `--head` flag to print characters only at the start, `--tail` for
|
||||
the end and `--offset` and `--count` to print a section in the middle.
|
||||
Note that if offset is negative it will count from the end, so
|
||||
--offset -1 --count 1 is equivalent to --tail 1.
|
||||
`--offset -1 --count 1` is equivalent to `--tail 1`.
|
||||
|
||||
|
||||
```
|
||||
|
||||
@@ -16,10 +16,10 @@ Checks the files in the source and destination match. It compares
|
||||
sizes and hashes (MD5 or SHA1) and logs a report of files which don't
|
||||
match. It doesn't alter the source or destination.
|
||||
|
||||
If you supply the --size-only flag, it will only compare the sizes not
|
||||
If you supply the `--size-only` flag, it will only compare the sizes not
|
||||
the hashes as well. Use this for a quick check.
|
||||
|
||||
If you supply the --download flag, it will download the data from
|
||||
If you supply the `--download` flag, it will download the data from
|
||||
both remotes and check them against each other on the fly. This can
|
||||
be useful for remotes that don't support hashes or if you really want
|
||||
to check all the data.
|
||||
@@ -29,7 +29,7 @@ the source match the files in the destination, not the other way
|
||||
around. This means that extra files in the destination that are not in
|
||||
the source will not be detected.
|
||||
|
||||
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--src-only`
|
||||
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match`
|
||||
and `--error` flags write paths, one per line, to the file name (or
|
||||
stdout if it is `-`) supplied. What they write is described in the
|
||||
help below. For example `--differ` will write all paths which are
|
||||
@@ -55,6 +55,7 @@ rclone check source:path dest:path [flags]
|
||||
```
|
||||
--combined string Make a combined report of changes to this file
|
||||
--differ string Report all non-matching files to this file
|
||||
--download Check by downloading rather than with hash.
|
||||
--error string Report all files with errors (hashing or reading) to this file
|
||||
-h, --help help for check
|
||||
--match string Report all matching files to this file
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
title: "rclone cleanup"
|
||||
description: "Clean up the remote if possible"
|
||||
description: "Clean up the remote if possible."
|
||||
slug: rclone_cleanup
|
||||
url: /commands/rclone_cleanup/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/cleanup/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone cleanup
|
||||
|
||||
Clean up the remote if possible
|
||||
Clean up the remote if possible.
|
||||
|
||||
## Synopsis
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ whether the password is already obscured or not and put unobscured
|
||||
passwords into the config file. If you want to be 100% certain that
|
||||
the passwords get obscured then use the "--obscure" flag, or if you
|
||||
are 100% certain you are already passing obscured passwords then use
|
||||
"--no-obscure". You can also set osbscured passwords using the
|
||||
"--no-obscure". You can also set obscured passwords using the
|
||||
"rclone config password" command.
|
||||
|
||||
So for example if you wanted to configure a Google Drive remote but
|
||||
|
||||
@@ -9,10 +9,6 @@ url: /commands/rclone_config_delete/
|
||||
|
||||
Delete an existing remote `name`.
|
||||
|
||||
## Synopsis
|
||||
|
||||
Delete an existing remote `name`.
|
||||
|
||||
```
|
||||
rclone config delete `name` [flags]
|
||||
```
|
||||
|
||||
@@ -9,10 +9,6 @@ url: /commands/rclone_config_dump/
|
||||
|
||||
Dump the config file as JSON.
|
||||
|
||||
## Synopsis
|
||||
|
||||
Dump the config file as JSON.
|
||||
|
||||
```
|
||||
rclone config dump [flags]
|
||||
```
|
||||
|
||||
@@ -9,10 +9,6 @@ url: /commands/rclone_config_file/
|
||||
|
||||
Show path of configuration file in use.
|
||||
|
||||
## Synopsis
|
||||
|
||||
Show path of configuration file in use.
|
||||
|
||||
```
|
||||
rclone config file [flags]
|
||||
```
|
||||
|
||||
@@ -9,10 +9,6 @@ url: /commands/rclone_config_providers/
|
||||
|
||||
List in JSON format all the providers and options.
|
||||
|
||||
## Synopsis
|
||||
|
||||
List in JSON format all the providers and options.
|
||||
|
||||
```
|
||||
rclone config providers [flags]
|
||||
```
|
||||
|
||||
@@ -9,10 +9,6 @@ url: /commands/rclone_config_show/
|
||||
|
||||
Print (decrypted) config file, or the config for a single remote.
|
||||
|
||||
## Synopsis
|
||||
|
||||
Print (decrypted) config file, or the config for a single remote.
|
||||
|
||||
```
|
||||
rclone config show [<remote>] [flags]
|
||||
```
|
||||
|
||||
@@ -30,7 +30,7 @@ whether the password is already obscured or not and put unobscured
|
||||
passwords into the config file. If you want to be 100% certain that
|
||||
the passwords get obscured then use the "--obscure" flag, or if you
|
||||
are 100% certain you are already passing obscured passwords then use
|
||||
"--no-obscure". You can also set osbscured passwords using the
|
||||
"--no-obscure". You can also set obscured passwords using the
|
||||
"rclone config password" command.
|
||||
|
||||
If the remote uses OAuth the token will be updated, if you don't
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
title: "rclone copy"
|
||||
description: "Copy files from source to dest, skipping already copied"
|
||||
description: "Copy files from source to dest, skipping already copied."
|
||||
slug: rclone_copy
|
||||
url: /commands/rclone_copy/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/copy/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone copy
|
||||
|
||||
Copy files from source to dest, skipping already copied
|
||||
Copy files from source to dest, skipping already copied.
|
||||
|
||||
## Synopsis
|
||||
|
||||
@@ -44,7 +44,7 @@ Not to
|
||||
destpath/sourcepath/two.txt
|
||||
|
||||
If you are familiar with `rsync`, rclone always works as if you had
|
||||
written a trailing / - meaning "copy the contents of this directory".
|
||||
written a trailing `/` - meaning "copy the contents of this directory".
|
||||
This applies to all commands and whether you are talking about the
|
||||
source or destination.
|
||||
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
title: "rclone copyto"
|
||||
description: "Copy files from source to dest, skipping already copied"
|
||||
description: "Copy files from source to dest, skipping already copied."
|
||||
slug: rclone_copyto
|
||||
url: /commands/rclone_copyto/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/copyto/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone copyto
|
||||
|
||||
Copy files from source to dest, skipping already copied
|
||||
Copy files from source to dest, skipping already copied.
|
||||
|
||||
## Synopsis
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ the source match the files in the destination, not the other way
|
||||
around. This means that extra files in the destination that are not in
|
||||
the source will not be detected.
|
||||
|
||||
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--src-only`
|
||||
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match`
|
||||
and `--error` flags write paths, one per line, to the file name (or
|
||||
stdout if it is `-`) supplied. What they write is described in the
|
||||
help below. For example `--differ` will write all paths which are
|
||||
|
||||
@@ -23,6 +23,9 @@ use it like this
|
||||
|
||||
rclone cryptdecode --reverse encryptedremote: filename1 filename2
|
||||
|
||||
Another way to accomplish this is by using the `rclone backend encode` (or `decode`)command.
|
||||
See the documentation on the `crypt` overlay for more info.
|
||||
|
||||
|
||||
```
|
||||
rclone cryptdecode encryptedremote: encryptedfilename [flags]
|
||||
|
||||
@@ -15,28 +15,37 @@ Interactively find duplicate filenames and delete/rename them.
|
||||
|
||||
By default `dedupe` interactively finds files with duplicate
|
||||
names and offers to delete all but one or rename them to be
|
||||
different.
|
||||
different. This is known as deduping by name.
|
||||
|
||||
This is only useful with backends like Google Drive which can have
|
||||
duplicate file names. It can be run on wrapping backends (eg crypt) if
|
||||
they wrap a backend which supports duplicate file names.
|
||||
Deduping by name is only useful with backends like Google Drive which
|
||||
can have duplicate file names. It can be run on wrapping backends
|
||||
(e.g. crypt) if they wrap a backend which supports duplicate file
|
||||
names.
|
||||
|
||||
In the first pass it will merge directories with the same name. It
|
||||
will do this iteratively until all the identically named directories
|
||||
have been merged.
|
||||
However if --by-hash is passed in then dedupe will find files with
|
||||
duplicate hashes instead which will work on any backend which supports
|
||||
at least one hash. This can be used to find files with duplicate
|
||||
content. This is known as deduping by hash.
|
||||
|
||||
In the second pass, for every group of duplicate file names, it will
|
||||
delete all but one identical files it finds without confirmation.
|
||||
This means that for most duplicated files the `dedupe`
|
||||
command will not be interactive.
|
||||
If deduping by name, first rclone will merge directories with the same
|
||||
name. It will do this iteratively until all the identically named
|
||||
directories have been merged.
|
||||
|
||||
Next, if deduping by name, for every group of duplicate file names /
|
||||
hashes, it will delete all but one identical files it finds without
|
||||
confirmation. This means that for most duplicated files the `dedupe` command will not be interactive.
|
||||
|
||||
`dedupe` considers files to be identical if they have the
|
||||
same hash. If the backend does not support hashes (eg crypt wrapping
|
||||
same file path and the same hash. If the backend does not support hashes (e.g. crypt wrapping
|
||||
Google Drive) then they will never be found to be identical. If you
|
||||
use the `--size-only` flag then files will be considered
|
||||
identical if they have the same size (any hash will be ignored). This
|
||||
can be useful on crypt backends which do not support hashes.
|
||||
|
||||
Next rclone will resolve the remaining duplicates. Exactly which
|
||||
action is taken depends on the dedupe mode. By default rclone will
|
||||
interactively query the user for each one.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
`--dry-run` or the `--interactive`/`-i` flag.
|
||||
|
||||
@@ -68,7 +77,7 @@ Now the `dedupe` session
|
||||
s/k/r> k
|
||||
Enter the number of the file to keep> 1
|
||||
one.txt: Deleted 1 extra copies
|
||||
two.txt: Found 3 files with duplicates names
|
||||
two.txt: Found 3 files with duplicate names
|
||||
two.txt: 3 duplicates remain
|
||||
1: 564374 bytes, 2016-03-05 16:22:52.118000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
|
||||
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
@@ -99,6 +108,7 @@ Dedupe can be run non interactively using the `--dedupe-mode` flag or by using a
|
||||
* `--dedupe-mode largest` - removes identical files then keeps the largest one.
|
||||
* `--dedupe-mode smallest` - removes identical files then keeps the smallest one.
|
||||
* `--dedupe-mode rename` - removes identical files then renames the rest to be different.
|
||||
* `--dedupe-mode list` - lists duplicate dirs and files only and changes nothing.
|
||||
|
||||
For example to rename all the identically named photos in your Google Photos directory, do
|
||||
|
||||
@@ -116,6 +126,7 @@ rclone dedupe [mode] remote:path [flags]
|
||||
## Options
|
||||
|
||||
```
|
||||
--by-hash Find indentical hashes rather than names
|
||||
--dedupe-mode string Dedupe mode interactive|skip|first|newest|oldest|largest|smallest|rename. (default "interactive")
|
||||
-h, --help help for dedupe
|
||||
```
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
title: "rclone delete"
|
||||
description: "Remove the contents of path."
|
||||
description: "Remove the files in path."
|
||||
slug: rclone_delete
|
||||
url: /commands/rclone_delete/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/delete/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone delete
|
||||
|
||||
Remove the contents of path.
|
||||
Remove the files in path.
|
||||
|
||||
## Synopsis
|
||||
|
||||
@@ -15,20 +15,21 @@ Remove the contents of path.
|
||||
Remove the files in path. Unlike `purge` it obeys include/exclude
|
||||
filters so can be used to selectively delete files.
|
||||
|
||||
`rclone delete` only deletes objects but leaves the directory structure
|
||||
`rclone delete` only deletes files but leaves the directory structure
|
||||
alone. If you want to delete a directory and all of its contents use
|
||||
`rclone purge`
|
||||
the `purge` command.
|
||||
|
||||
If you supply the --rmdirs flag, it will remove all empty directories along with it.
|
||||
If you supply the `--rmdirs` flag, it will remove all empty directories along with it.
|
||||
You can also use the separate command `rmdir` or `rmdirs` to
|
||||
delete empty directories only.
|
||||
|
||||
Eg delete all files bigger than 100MBytes
|
||||
|
||||
Check what would be deleted first (use either)
|
||||
For example, to delete all files bigger than 100MBytes, you may first want to check what
|
||||
would be deleted (use either):
|
||||
|
||||
rclone --min-size 100M lsl remote:path
|
||||
rclone --dry-run --min-size 100M delete remote:path
|
||||
|
||||
Then delete
|
||||
Then proceed with the actual delete:
|
||||
|
||||
rclone --min-size 100M delete remote:path
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ Output bash completion script for rclone.
|
||||
Generates a bash shell autocompletion script for rclone.
|
||||
|
||||
This writes to /etc/bash_completion.d/rclone by default so will
|
||||
probably need to be run with sudo or as root, eg
|
||||
probably need to be run with sudo or as root, e.g.
|
||||
|
||||
sudo rclone genautocomplete bash
|
||||
|
||||
@@ -27,7 +27,8 @@ them directly
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is `-`, then the output will be written to stdout.
|
||||
If output_file is "-", then the output will be written to stdout.
|
||||
|
||||
|
||||
```
|
||||
rclone genautocomplete bash [output_file] [flags]
|
||||
|
||||
@@ -15,7 +15,7 @@ Output fish completion script for rclone.
|
||||
Generates a fish autocompletion script for rclone.
|
||||
|
||||
This writes to /etc/fish/completions/rclone.fish by default so will
|
||||
probably need to be run with sudo or as root, eg
|
||||
probably need to be run with sudo or as root, e.g.
|
||||
|
||||
sudo rclone genautocomplete fish
|
||||
|
||||
@@ -27,7 +27,8 @@ them directly
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is `-`, then the output will be written to stdout.
|
||||
If output_file is "-", then the output will be written to stdout.
|
||||
|
||||
|
||||
```
|
||||
rclone genautocomplete fish [output_file] [flags]
|
||||
|
||||
@@ -15,7 +15,7 @@ Output zsh completion script for rclone.
|
||||
Generates a zsh autocompletion script for rclone.
|
||||
|
||||
This writes to /usr/share/zsh/vendor-completions/_rclone by default so will
|
||||
probably need to be run with sudo or as root, eg
|
||||
probably need to be run with sudo or as root, e.g.
|
||||
|
||||
sudo rclone genautocomplete zsh
|
||||
|
||||
@@ -27,7 +27,8 @@ them directly
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is `-`, then the output will be written to stdout.
|
||||
If output_file is "-", then the output will be written to stdout.
|
||||
|
||||
|
||||
```
|
||||
rclone genautocomplete zsh [output_file] [flags]
|
||||
|
||||
@@ -38,12 +38,12 @@ There are several related list commands
|
||||
`lsf` is designed to be human and machine readable.
|
||||
`lsjson` is designed to be machine readable.
|
||||
|
||||
Note that `ls` and `lsl` recurse by default - use "--max-depth 1" to stop the recursion.
|
||||
Note that `ls` and `lsl` recurse by default - use `--max-depth 1` to stop the recursion.
|
||||
|
||||
The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - use "-R" to make them recurse.
|
||||
The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - use `-R` to make them recurse.
|
||||
|
||||
Listing a non existent directory will produce an error except for
|
||||
remotes which can't have empty directories (eg s3, swift, gcs, etc -
|
||||
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
||||
the bucket based remotes).
|
||||
|
||||
|
||||
|
||||
@@ -48,12 +48,12 @@ There are several related list commands
|
||||
`lsf` is designed to be human and machine readable.
|
||||
`lsjson` is designed to be machine readable.
|
||||
|
||||
Note that `ls` and `lsl` recurse by default - use "--max-depth 1" to stop the recursion.
|
||||
Note that `ls` and `lsl` recurse by default - use `--max-depth 1` to stop the recursion.
|
||||
|
||||
The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - use "-R" to make them recurse.
|
||||
The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - use `-R` to make them recurse.
|
||||
|
||||
Listing a non existent directory will produce an error except for
|
||||
remotes which can't have empty directories (eg s3, swift, gcs, etc -
|
||||
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
||||
the bucket based remotes).
|
||||
|
||||
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
title: "rclone lsf"
|
||||
description: "List directories and objects in remote:path formatted for parsing"
|
||||
description: "List directories and objects in remote:path formatted for parsing."
|
||||
slug: rclone_lsf
|
||||
url: /commands/rclone_lsf/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/lsf/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone lsf
|
||||
|
||||
List directories and objects in remote:path formatted for parsing
|
||||
List directories and objects in remote:path formatted for parsing.
|
||||
|
||||
## Synopsis
|
||||
|
||||
@@ -38,7 +38,7 @@ output:
|
||||
o - Original ID of underlying object
|
||||
m - MimeType of object if known
|
||||
e - encrypted name
|
||||
T - tier of storage if known, eg "Hot" or "Cool"
|
||||
T - tier of storage if known, e.g. "Hot" or "Cool"
|
||||
|
||||
So if you wanted the path, size and modification time, you would use
|
||||
--format "pst", or maybe --format "tsp" to put the path last.
|
||||
@@ -121,12 +121,12 @@ There are several related list commands
|
||||
`lsf` is designed to be human and machine readable.
|
||||
`lsjson` is designed to be machine readable.
|
||||
|
||||
Note that `ls` and `lsl` recurse by default - use "--max-depth 1" to stop the recursion.
|
||||
Note that `ls` and `lsl` recurse by default - use `--max-depth 1` to stop the recursion.
|
||||
|
||||
The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - use "-R" to make them recurse.
|
||||
The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - use `-R` to make them recurse.
|
||||
|
||||
Listing a non existent directory will produce an error except for
|
||||
remotes which can't have empty directories (eg s3, swift, gcs, etc -
|
||||
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
||||
the bucket based remotes).
|
||||
|
||||
|
||||
|
||||
@@ -41,11 +41,11 @@ may be repeated). If --hash-type is set then it implies --hash.
|
||||
|
||||
If --no-modtime is specified then ModTime will be blank. This can
|
||||
speed things up on remotes where reading the ModTime takes an extra
|
||||
request (eg s3, swift).
|
||||
request (e.g. s3, swift).
|
||||
|
||||
If --no-mimetype is specified then MimeType will be blank. This can
|
||||
speed things up on remotes where reading the MimeType takes an extra
|
||||
request (eg s3, swift).
|
||||
request (e.g. s3, swift).
|
||||
|
||||
If --encrypted is not specified the Encrypted won't be emitted.
|
||||
|
||||
@@ -67,9 +67,9 @@ If the directory is a bucket in a bucket based backend, then
|
||||
The time is in RFC3339 format with up to nanosecond precision. The
|
||||
number of decimal digits in the seconds will depend on the precision
|
||||
that the remote can hold the times, so if times are accurate to the
|
||||
nearest millisecond (eg Google Drive) then 3 digits will always be
|
||||
nearest millisecond (e.g. Google Drive) then 3 digits will always be
|
||||
shown ("2017-05-31T16:15:57.034+01:00") whereas if the times are
|
||||
accurate to the nearest second (Dropbox, Box, WebDav etc) no digits
|
||||
accurate to the nearest second (Dropbox, Box, WebDav, etc.) no digits
|
||||
will be shown ("2017-05-31T16:15:57+01:00").
|
||||
|
||||
The whole output can be processed as a JSON blob, or alternatively it
|
||||
@@ -89,12 +89,12 @@ There are several related list commands
|
||||
`lsf` is designed to be human and machine readable.
|
||||
`lsjson` is designed to be machine readable.
|
||||
|
||||
Note that `ls` and `lsl` recurse by default - use "--max-depth 1" to stop the recursion.
|
||||
Note that `ls` and `lsl` recurse by default - use `--max-depth 1` to stop the recursion.
|
||||
|
||||
The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - use "-R" to make them recurse.
|
||||
The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - use `-R` to make them recurse.
|
||||
|
||||
Listing a non existent directory will produce an error except for
|
||||
remotes which can't have empty directories (eg s3, swift, gcs, etc -
|
||||
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
||||
the bucket based remotes).
|
||||
|
||||
|
||||
|
||||
@@ -38,12 +38,12 @@ There are several related list commands
|
||||
`lsf` is designed to be human and machine readable.
|
||||
`lsjson` is designed to be machine readable.
|
||||
|
||||
Note that `ls` and `lsl` recurse by default - use "--max-depth 1" to stop the recursion.
|
||||
Note that `ls` and `lsl` recurse by default - use `--max-depth 1` to stop the recursion.
|
||||
|
||||
The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - use "-R" to make them recurse.
|
||||
The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - use `-R` to make them recurse.
|
||||
|
||||
Listing a non existent directory will produce an error except for
|
||||
remotes which can't have empty directories (eg s3, swift, gcs, etc -
|
||||
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
||||
the bucket based remotes).
|
||||
|
||||
|
||||
|
||||
@@ -9,10 +9,6 @@ url: /commands/rclone_mkdir/
|
||||
|
||||
Make the path if it doesn't already exist.
|
||||
|
||||
## Synopsis
|
||||
|
||||
Make the path if it doesn't already exist.
|
||||
|
||||
```
|
||||
rclone mkdir remote:path [flags]
|
||||
```
|
||||
|
||||
@@ -18,37 +18,51 @@ FUSE.
|
||||
|
||||
First set up your remote using `rclone config`. Check it works with `rclone ls` etc.
|
||||
|
||||
You can either run mount in foreground mode or background (daemon) mode. Mount runs in
|
||||
foreground mode by default, use the --daemon flag to specify background mode behaviour.
|
||||
Background mode is only supported on Linux and OSX, you can only run mount in
|
||||
foreground mode on Windows.
|
||||
On Linux and OSX, you can either run mount in foreground mode or background (daemon) mode.
|
||||
Mount runs in foreground mode by default, use the `--daemon` flag to specify background mode.
|
||||
You can only run mount in foreground mode on Windows.
|
||||
|
||||
On Linux/macOS/FreeBSD Start the mount like this where `/path/to/local/mount`
|
||||
is an **empty** **existing** directory.
|
||||
On Linux/macOS/FreeBSD start the mount like this, where `/path/to/local/mount`
|
||||
is an **empty** **existing** directory:
|
||||
|
||||
rclone mount remote:path/to/files /path/to/local/mount
|
||||
|
||||
Or on Windows like this where `X:` is an unused drive letter
|
||||
or use a path to **non-existent** directory.
|
||||
On Windows you can start a mount in different ways. See [below](#mounting-modes-on-windows)
|
||||
for details. The following examples will mount to an automatically assigned drive,
|
||||
to specific drive letter `X:`, to path `C:\path\to\nonexistent\directory`
|
||||
(which must be **non-existent** subdirectory of an **existing** parent directory or drive,
|
||||
and is not supported when [mounting as a network drive](#mounting-modes-on-windows)), and
|
||||
the last example will mount as network share `\\cloud\remote` and map it to an
|
||||
automatically assigned drive:
|
||||
|
||||
rclone mount remote:path/to/files *
|
||||
rclone mount remote:path/to/files X:
|
||||
rclone mount remote:path/to/files C:\path\to\nonexistent\directory
|
||||
|
||||
When running in background mode the user will have to stop the mount manually (specified below).
|
||||
rclone mount remote:path/to/files \\cloud\remote
|
||||
|
||||
When the program ends while in foreground mode, either via Ctrl+C or receiving
|
||||
a SIGINT or SIGTERM signal, the mount is automatically stopped.
|
||||
a SIGINT or SIGTERM signal, the mount should be automatically stopped.
|
||||
|
||||
The umount operation can fail, for example when the mountpoint is busy.
|
||||
When that happens, it is the user's responsibility to stop the mount manually.
|
||||
|
||||
Stopping the mount manually:
|
||||
When running in background mode the user will have to stop the mount manually:
|
||||
|
||||
# Linux
|
||||
fusermount -u /path/to/local/mount
|
||||
# OS X
|
||||
umount /path/to/local/mount
|
||||
|
||||
The umount operation can fail, for example when the mountpoint is busy.
|
||||
When that happens, it is the user's responsibility to stop the mount manually.
|
||||
|
||||
The size of the mounted file system will be set according to information retrieved
|
||||
from the remote, the same as returned by the [rclone about](https://rclone.org/commands/rclone_about/)
|
||||
command. Remotes with unlimited storage may report the used size only,
|
||||
then an additional 1PB of free space is assumed. If the remote does not
|
||||
[support](https://rclone.org/overview/#optional-features) the about feature
|
||||
at all, then 1PB is set as both the total and the free size.
|
||||
|
||||
**Note**: As of `rclone` 1.52.2, `rclone mount` now requires Go version 1.13
|
||||
or newer on some platforms depending on the underlying FUSE library in use.
|
||||
|
||||
## Installing on Windows
|
||||
|
||||
To run rclone mount on Windows, you will need to
|
||||
@@ -57,10 +71,110 @@ download and install [WinFsp](http://www.secfs.net/winfsp/).
|
||||
[WinFsp](https://github.com/billziss-gh/winfsp) is an open source
|
||||
Windows File System Proxy which makes it easy to write user space file
|
||||
systems for Windows. It provides a FUSE emulation layer which rclone
|
||||
uses in combination with
|
||||
[cgofuse](https://github.com/billziss-gh/cgofuse). Both of these
|
||||
packages are by Bill Zissimopoulos who was very helpful during the
|
||||
implementation of rclone mount for Windows.
|
||||
uses combination with [cgofuse](https://github.com/billziss-gh/cgofuse).
|
||||
Both of these packages are by Bill Zissimopoulos who was very helpful
|
||||
during the implementation of rclone mount for Windows.
|
||||
|
||||
### Mounting modes on windows
|
||||
|
||||
Unlike other operating systems, Microsoft Windows provides a different filesystem
|
||||
type for network and fixed drives. It optimises access on the assumption fixed
|
||||
disk drives are fast and reliable, while network drives have relatively high latency
|
||||
and less reliability. Some settings can also be differentiated between the two types,
|
||||
for example that Windows Explorer should just display icons and not create preview
|
||||
thumbnails for image and video files on network drives.
|
||||
|
||||
In most cases, rclone will mount the remote as a normal, fixed disk drive by default.
|
||||
However, you can also choose to mount it as a remote network drive, often described
|
||||
as a network share. If you mount an rclone remote using the default, fixed drive mode
|
||||
and experience unexpected program errors, freezes or other issues, consider mounting
|
||||
as a network drive instead.
|
||||
|
||||
When mounting as a fixed disk drive you can either mount to an unused drive letter,
|
||||
or to a path - which must be **non-existent** subdirectory of an **existing** parent
|
||||
directory or drive. Using the special value `*` will tell rclone to
|
||||
automatically assign the next available drive letter, starting with Z: and moving backward.
|
||||
Examples:
|
||||
|
||||
rclone mount remote:path/to/files *
|
||||
rclone mount remote:path/to/files X:
|
||||
rclone mount remote:path/to/files C:\path\to\nonexistent\directory
|
||||
rclone mount remote:path/to/files X:
|
||||
|
||||
Option `--volname` can be used to set a custom volume name for the mounted
|
||||
file system. The default is to use the remote name and path.
|
||||
|
||||
To mount as network drive, you can add option `--network-mode`
|
||||
to your mount command. Mounting to a directory path is not supported in
|
||||
this mode, it is a limitation Windows imposes on junctions, so the remote must always
|
||||
be mounted to a drive letter.
|
||||
|
||||
rclone mount remote:path/to/files X: --network-mode
|
||||
|
||||
A volume name specified with `--volname` will be used to create the network share path.
|
||||
A complete UNC path, such as `\\cloud\remote`, optionally with path
|
||||
`\\cloud\remote\madeup\path`, will be used as is. Any other
|
||||
string will be used as the share part, after a default prefix `\\server\`.
|
||||
If no volume name is specified then `\\server\share` will be used.
|
||||
You must make sure the volume name is unique when you are mounting more than one drive,
|
||||
or else the mount command will fail. The share name will treated as the volume label for
|
||||
the mapped drive, shown in Windows Explorer etc, while the complete
|
||||
`\\server\share` will be reported as the remote UNC path by
|
||||
`net use` etc, just like a normal network drive mapping.
|
||||
|
||||
If you specify a full network share UNC path with `--volname`, this will implicitely
|
||||
set the `--network-mode` option, so the following two examples have same result:
|
||||
|
||||
rclone mount remote:path/to/files X: --network-mode
|
||||
rclone mount remote:path/to/files X: --volname \\server\share
|
||||
|
||||
You may also specify the network share UNC path as the mountpoint itself. Then rclone
|
||||
will automatically assign a drive letter, same as with `*` and use that as
|
||||
mountpoint, and instead use the UNC path specified as the volume name, as if it were
|
||||
specified with the `--volname` option. This will also implicitely set
|
||||
the `--network-mode` option. This means the following two examples have same result:
|
||||
|
||||
rclone mount remote:path/to/files \\cloud\remote
|
||||
rclone mount remote:path/to/files * --volname \\cloud\remote
|
||||
|
||||
There is yet another way to enable network mode, and to set the share path,
|
||||
and that is to pass the "native" libfuse/WinFsp option directly:
|
||||
`--fuse-flag --VolumePrefix=\server\share`. Note that the path
|
||||
must be with just a single backslash prefix in this case.
|
||||
|
||||
|
||||
*Note:* In previous versions of rclone this was the only supported method.
|
||||
|
||||
[Read more about drive mapping](https://en.wikipedia.org/wiki/Drive_mapping)
|
||||
|
||||
See also [Limitations](#limitations) section below.
|
||||
|
||||
### Windows filesystem permissions
|
||||
|
||||
The FUSE emulation layer on Windows must convert between the POSIX-based
|
||||
permission model used in FUSE, and the permission model used in Windows,
|
||||
based on access-control lists (ACL).
|
||||
|
||||
The mounted filesystem will normally get three entries in its access-control list (ACL),
|
||||
representing permissions for the POSIX permission scopes: Owner, group and others.
|
||||
By default, the owner and group will be taken from the current user, and the built-in
|
||||
group "Everyone" will be used to represent others. The user/group can be customized
|
||||
with FUSE options "UserName" and "GroupName",
|
||||
e.g. `-o UserName=user123 -o GroupName="Authenticated Users"`.
|
||||
|
||||
The permissions on each entry will be set according to
|
||||
[options](#options) `--dir-perms` and `--file-perms`,
|
||||
which takes a value in traditional [numeric notation](https://en.wikipedia.org/wiki/File-system_permissions#Numeric_notation),
|
||||
where the default corresponds to `--file-perms 0666 --dir-perms 0777`.
|
||||
|
||||
Note that the mapping of permissions is not always trivial, and the result
|
||||
you see in Windows Explorer may not be exactly like you expected.
|
||||
For example, when setting a value that includes write access, this will be
|
||||
mapped to individual permissions "write attributes", "write data" and "append data",
|
||||
but not "write extended attributes" (WinFsp does not support extended attributes,
|
||||
see [this](https://github.com/billziss-gh/winfsp/wiki/NTFS-Compatibility)).
|
||||
Windows will then show this as basic permission "Special" instead of "Write",
|
||||
because "Write" includes the "write extended attributes" permission.
|
||||
|
||||
### Windows caveats
|
||||
|
||||
@@ -78,43 +192,15 @@ infrastructure](https://github.com/billziss-gh/winfsp/wiki/WinFsp-Service-Archit
|
||||
which creates drives accessible for everyone on the system or
|
||||
alternatively using [the nssm service manager](https://nssm.cc/usage).
|
||||
|
||||
### Mount as a network drive
|
||||
|
||||
By default, rclone will mount the remote as a normal drive. However,
|
||||
you can also mount it as a **Network Drive** (or **Network Share**, as
|
||||
mentioned in some places)
|
||||
|
||||
Unlike other systems, Windows provides a different filesystem type for
|
||||
network drives. Windows and other programs treat the network drives
|
||||
and fixed/removable drives differently: In network drives, many I/O
|
||||
operations are optimized, as the high latency and low reliability
|
||||
(compared to a normal drive) of a network is expected.
|
||||
|
||||
Although many people prefer network shares to be mounted as normal
|
||||
system drives, this might cause some issues, such as programs not
|
||||
working as expected or freezes and errors while operating with the
|
||||
mounted remote in Windows Explorer. If you experience any of those,
|
||||
consider mounting rclone remotes as network shares, as Windows expects
|
||||
normal drives to be fast and reliable, while cloud storage is far from
|
||||
that. See also [Limitations](#limitations) section below for more
|
||||
info
|
||||
|
||||
Add "--fuse-flag --VolumePrefix=\server\share" to your "mount"
|
||||
command, **replacing "share" with any other name of your choice if you
|
||||
are mounting more than one remote**. Otherwise, the mountpoints will
|
||||
conflict and your mounted filesystems will overlap.
|
||||
|
||||
[Read more about drive mapping](https://en.wikipedia.org/wiki/Drive_mapping)
|
||||
|
||||
## Limitations
|
||||
|
||||
Without the use of "--vfs-cache-mode" this can only write files
|
||||
Without the use of `--vfs-cache-mode` this can only write files
|
||||
sequentially, it can only seek when reading. This means that many
|
||||
applications won't work with their files on an rclone mount without
|
||||
"--vfs-cache-mode writes" or "--vfs-cache-mode full". See the [File
|
||||
Caching](#vfs-file-caching) section for more info.
|
||||
`--vfs-cache-mode writes` or `--vfs-cache-mode full`.
|
||||
See the [File Caching](#file-caching) section for more info.
|
||||
|
||||
The bucket based remotes (eg Swift, S3, Google Compute Storage, B2,
|
||||
The bucket based remotes (e.g. Swift, S3, Google Compute Storage, B2,
|
||||
Hubic) do not support the concept of empty directories, so empty
|
||||
directories will have a tendency to disappear once they fall out of
|
||||
the directory cache.
|
||||
@@ -127,15 +213,15 @@ File systems expect things to be 100% reliable, whereas cloud storage
|
||||
systems are a long way from 100% reliable. The rclone sync/copy
|
||||
commands cope with this with lots of retries. However rclone mount
|
||||
can't use retries in the same way without making local copies of the
|
||||
uploads. Look at the [file caching](#vfs-file-caching)
|
||||
uploads. Look at the [file caching](#file-caching)
|
||||
for solutions to make mount more reliable.
|
||||
|
||||
## Attribute caching
|
||||
|
||||
You can use the flag --attr-timeout to set the time the kernel caches
|
||||
the attributes (size, modification time etc) for directory entries.
|
||||
You can use the flag `--attr-timeout` to set the time the kernel caches
|
||||
the attributes (size, modification time, etc.) for directory entries.
|
||||
|
||||
The default is "1s" which caches files just long enough to avoid
|
||||
The default is `1s` which caches files just long enough to avoid
|
||||
too many callbacks to rclone from the kernel.
|
||||
|
||||
In theory 0s should be the correct value for filesystems which can
|
||||
@@ -146,14 +232,14 @@ few problems such as
|
||||
and [excessive time listing directories](https://github.com/rclone/rclone/issues/2095#issuecomment-371141147).
|
||||
|
||||
The kernel can cache the info about a file for the time given by
|
||||
"--attr-timeout". You may see corruption if the remote file changes
|
||||
`--attr-timeout`. You may see corruption if the remote file changes
|
||||
length during this window. It will show up as either a truncated file
|
||||
or a file with garbage on the end. With "--attr-timeout 1s" this is
|
||||
very unlikely but not impossible. The higher you set "--attr-timeout"
|
||||
or a file with garbage on the end. With `--attr-timeout 1s` this is
|
||||
very unlikely but not impossible. The higher you set `--attr-timeout`
|
||||
the more likely it is. The default setting of "1s" is the lowest
|
||||
setting which mitigates the problems above.
|
||||
|
||||
If you set it higher ('10s' or '1m' say) then the kernel will call
|
||||
If you set it higher (`10s` or `1m` say) then the kernel will call
|
||||
back to rclone less often making it more efficient, however there is
|
||||
more chance of the corruption issue above.
|
||||
|
||||
@@ -164,7 +250,7 @@ This is the same as setting the attr_timeout option in mount.fuse.
|
||||
|
||||
## Filters
|
||||
|
||||
Rclone's filters can be used to select a subset of the
|
||||
Note that all the rclone filters can be used to select a subset of the
|
||||
files to be visible in the mount.
|
||||
|
||||
## systemd
|
||||
@@ -175,28 +261,25 @@ after the mountpoint has been successfully set up.
|
||||
Units having the rclone mount service specified as a requirement
|
||||
will see all files and folders immediately in this mode.
|
||||
|
||||
## chunked reading ###
|
||||
## chunked reading
|
||||
|
||||
--vfs-read-chunk-size will enable reading the source objects in parts.
|
||||
`--vfs-read-chunk-size` will enable reading the source objects in parts.
|
||||
This can reduce the used download quota for some remotes by requesting only chunks
|
||||
from the remote that are actually read at the cost of an increased number of requests.
|
||||
|
||||
When --vfs-read-chunk-size-limit is also specified and greater than --vfs-read-chunk-size,
|
||||
the chunk size for each open file will get doubled for each chunk read, until the
|
||||
specified value is reached. A value of -1 will disable the limit and the chunk size will
|
||||
grow indefinitely.
|
||||
When `--vfs-read-chunk-size-limit` is also specified and greater than
|
||||
`--vfs-read-chunk-size`, the chunk size for each open file will get doubled
|
||||
for each chunk read, until the specified value is reached. A value of `-1` will disable
|
||||
the limit and the chunk size will grow indefinitely.
|
||||
|
||||
With --vfs-read-chunk-size 100M and --vfs-read-chunk-size-limit 0 the following
|
||||
parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
|
||||
When --vfs-read-chunk-size-limit 500M is specified, the result would be
|
||||
With `--vfs-read-chunk-size 100M` and `--vfs-read-chunk-size-limit 0`
|
||||
the following parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
|
||||
When `--vfs-read-chunk-size-limit 500M` is specified, the result would be
|
||||
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
|
||||
|
||||
Chunked reading will only work with --vfs-cache-mode < full, as the file will always
|
||||
be copied to the vfs cache before opening with --vfs-cache-mode full.
|
||||
|
||||
## VFS - Virtual File System
|
||||
|
||||
Mount uses rclone's VFS layer. This adapts the cloud storage objects
|
||||
This command uses the VFS layer. This adapts the cloud storage objects
|
||||
that rclone uses into something which looks much more like a disk
|
||||
filing system.
|
||||
|
||||
@@ -290,9 +373,9 @@ second. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using --vfs-cache-max-size note that the cache may exceed this size
|
||||
If using `--vfs-cache-max-size` note that the cache may exceed this size
|
||||
for two reasons. Firstly because it is only checked every
|
||||
--vfs-cache-poll-interval. Secondly because open files cannot be
|
||||
`--vfs-cache-poll-interval`. Secondly because open files cannot be
|
||||
evicted from the cache.
|
||||
|
||||
### --vfs-cache-mode off
|
||||
@@ -340,7 +423,7 @@ In this mode all reads and writes are buffered to and from disk. When
|
||||
data is read from the remote this is buffered to disk as well.
|
||||
|
||||
In this mode the files in the cache will be sparse files and rclone
|
||||
will keep track of which bits of the files it has dowloaded.
|
||||
will keep track of which bits of the files it has downloaded.
|
||||
|
||||
So if an application only reads the starts of each file, then rclone
|
||||
will only buffer the start of the file. These files will appear to be
|
||||
@@ -357,6 +440,11 @@ whereas the --vfs-read-ahead is buffered on disk.
|
||||
When using this mode it is recommended that --buffer-size is not set
|
||||
too big and --vfs-read-ahead is set large if required.
|
||||
|
||||
**IMPORTANT** not all file systems support sparse files. In particular
|
||||
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||
directory is on a filesystem which doesn't support sparse files and it
|
||||
will log an ERROR message if one is detected.
|
||||
|
||||
## VFS Performance
|
||||
|
||||
These flags may be used to enable/disable features of the VFS for
|
||||
@@ -392,6 +480,12 @@ on disk cache file.
|
||||
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
||||
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
||||
|
||||
When using VFS write caching (--vfs-cache-mode with value writes or full),
|
||||
the global flag --transfers can be set to adjust the number of parallel uploads of
|
||||
modified files from cache (the related global flag --checkers have no effect on mount).
|
||||
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
|
||||
## VFS Case Sensitivity
|
||||
|
||||
Linux file systems are case-sensitive: two files can differ only
|
||||
@@ -405,7 +499,7 @@ It is not allowed for two files in the same directory to differ only by case.
|
||||
Usually file systems on macOS are case-insensitive. It is possible to make macOS
|
||||
file systems case-sensitive but that is not the default
|
||||
|
||||
The "--vfs-case-insensitive" mount flag controls how rclone handles these
|
||||
The `--vfs-case-insensitive` mount flag controls how rclone handles these
|
||||
two cases. If its value is "false", rclone passes file names to the mounted
|
||||
file system as-is. If the flag is "true" (or appears without a value on
|
||||
command line), rclone may perform a "fixup" as explained below.
|
||||
@@ -435,30 +529,33 @@ rclone mount remote:path /path/to/mountpoint [flags]
|
||||
## Options
|
||||
|
||||
```
|
||||
--allow-non-empty Allow mounting over a non-empty directory (not Windows).
|
||||
--allow-other Allow access to other users.
|
||||
--allow-root Allow access to root user.
|
||||
--async-read Use asynchronous reads. (default true)
|
||||
--allow-non-empty Allow mounting over a non-empty directory. Not supported on Windows.
|
||||
--allow-other Allow access to other users. Not supported on Windows.
|
||||
--allow-root Allow access to root user. Not supported on Windows.
|
||||
--async-read Use asynchronous reads. Not supported on Windows. (default true)
|
||||
--attr-timeout duration Time for which file/directory attributes are cached. (default 1s)
|
||||
--daemon Run mount as a daemon (background mode).
|
||||
--daemon-timeout duration Time limit for rclone to respond to kernel (not supported by all OSes).
|
||||
--daemon Run mount as a daemon (background mode). Not supported on Windows.
|
||||
--daemon-timeout duration Time limit for rclone to respond to kernel. Not supported on Windows.
|
||||
--debug-fuse Debug the FUSE internals - needs -v.
|
||||
--default-permissions Makes kernel enforce access control based on the file mode.
|
||||
--default-permissions Makes kernel enforce access control based on the file mode. Not supported on Windows.
|
||||
--dir-cache-time duration Time to cache directory entries for. (default 5m0s)
|
||||
--dir-perms FileMode Directory permissions (default 0777)
|
||||
--file-perms FileMode File permissions (default 0666)
|
||||
--fuse-flag stringArray Flags or arguments to be passed direct to libfuse/WinFsp. Repeat if required.
|
||||
--gid uint32 Override the gid field set by the filesystem. (default 1000)
|
||||
--gid uint32 Override the gid field set by the filesystem. Not supported on Windows. (default 1000)
|
||||
-h, --help help for mount
|
||||
--max-read-ahead SizeSuffix The number of bytes that can be prefetched for sequential reads. (default 128k)
|
||||
--max-read-ahead SizeSuffix The number of bytes that can be prefetched for sequential reads. Not supported on Windows. (default 128k)
|
||||
--network-mode Mount as remote network drive, instead of fixed disk drive. Supported on Windows only
|
||||
--no-checksum Don't compare checksums on up/download.
|
||||
--no-modtime Don't read/write the modification time (can speed things up).
|
||||
--no-seek Don't allow seeking in files.
|
||||
--noappledouble Ignore Apple Double (._) and .DS_Store files. Supported on OSX only. (default true)
|
||||
--noapplexattr Ignore all "com.apple.*" extended attributes. Supported on OSX only.
|
||||
-o, --option stringArray Option for libfuse/WinFsp. Repeat if required.
|
||||
--poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable. (default 1m0s)
|
||||
--read-only Mount read-only.
|
||||
--uid uint32 Override the uid field set by the filesystem. (default 1000)
|
||||
--umask int Override the permission bits set by the filesystem.
|
||||
--uid uint32 Override the uid field set by the filesystem. Not supported on Windows. (default 1000)
|
||||
--umask int Override the permission bits set by the filesystem. Not supported on Windows.
|
||||
--vfs-cache-max-age duration Max age of objects in the cache. (default 1h0m0s)
|
||||
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache. (default off)
|
||||
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
|
||||
@@ -470,8 +567,8 @@ rclone mount remote:path /path/to/mountpoint [flags]
|
||||
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
||||
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
||||
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
||||
--volname string Set the volume name (not supported by all OSes).
|
||||
--write-back-cache Makes kernel buffer writes before sending them to rclone. Without this, writethrough caching is used.
|
||||
--volname string Set the volume name. Supported on Windows and OSX only.
|
||||
--write-back-cache Makes kernel buffer writes before sending them to rclone. Without this, writethrough caching is used. Not supported on Windows.
|
||||
```
|
||||
|
||||
See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
@@ -14,15 +14,15 @@ Move files from source to dest.
|
||||
|
||||
Moves the contents of the source directory to the destination
|
||||
directory. Rclone will error if the source and destination overlap and
|
||||
the remote does not support a server side directory move operation.
|
||||
the remote does not support a server-side directory move operation.
|
||||
|
||||
If no filters are in use and if possible this will server side move
|
||||
If no filters are in use and if possible this will server-side move
|
||||
`source:path` into `dest:path`. After this `source:path` will no
|
||||
longer exist.
|
||||
|
||||
Otherwise for each file in `source:path` selected by the filters (if
|
||||
any) this will move it into `dest:path`. If possible a server side
|
||||
move will be used, otherwise it will copy it (server side if possible)
|
||||
any) this will move it into `dest:path`. If possible a server-side
|
||||
move will be used, otherwise it will copy it (server-side if possible)
|
||||
into `dest:path` then delete the original (if no errors on copy) in
|
||||
`source:path`.
|
||||
|
||||
|
||||
@@ -30,9 +30,10 @@ Here are the keys - press '?' to toggle the help on and off
|
||||
←,h to return
|
||||
c toggle counts
|
||||
g toggle graph
|
||||
n,s,C sort by name,size,count
|
||||
a toggle average size in directory
|
||||
n,s,C,A sort by name,size,count,average size
|
||||
d delete file/directory
|
||||
y copy current path to clipbard
|
||||
y copy current path to clipboard
|
||||
Y display current path
|
||||
^L refresh screen
|
||||
? to toggle help on and off
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
title: "rclone obscure"
|
||||
description: "Obscure password for use in the rclone config file"
|
||||
description: "Obscure password for use in the rclone config file."
|
||||
slug: rclone_obscure
|
||||
url: /commands/rclone_obscure/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/obscure/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone obscure
|
||||
|
||||
Obscure password for use in the rclone config file
|
||||
Obscure password for use in the rclone config file.
|
||||
|
||||
## Synopsis
|
||||
|
||||
@@ -23,7 +23,8 @@ the config file. However it is very hard to shoulder surf a 64
|
||||
character hex token.
|
||||
|
||||
This command can also accept a password through STDIN instead of an
|
||||
argument by passing a hyphen as an argument. Example:
|
||||
argument by passing a hyphen as an argument. This will use the first
|
||||
line of STDIN as the password not including the trailing newline.
|
||||
|
||||
echo "secretpassword" | rclone obscure -
|
||||
|
||||
|
||||
@@ -13,8 +13,9 @@ Remove the path and all of its contents.
|
||||
|
||||
|
||||
Remove the path and all of its contents. Note that this does not obey
|
||||
include/exclude filters - everything will be removed. Use `delete` if
|
||||
you want to selectively delete files.
|
||||
include/exclude filters - everything will be removed. Use the `delete`
|
||||
command if you want to selectively delete files. To delete empty directories only,
|
||||
use command `rmdir` or `rmdirs`.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
`--dry-run` or the `--interactive`/`-i` flag.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user