mirror of
https://github.com/rclone/rclone.git
synced 2026-01-24 05:13:23 +00:00
Compare commits
130 Commits
v1.61.1
...
fix-vfs-mo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1a66ed9315 | ||
|
|
f07abea072 | ||
|
|
fb4600f6f9 | ||
|
|
1d0c75b0c2 | ||
|
|
2e435af4de | ||
|
|
62a7765e57 | ||
|
|
5ad942ed87 | ||
|
|
96609e3d6e | ||
|
|
28a8ebce5b | ||
|
|
17854663de | ||
|
|
a4a6b5930a | ||
|
|
e9ae620844 | ||
|
|
e7cfb8ad8e | ||
|
|
786a1c212c | ||
|
|
03bc270730 | ||
|
|
7cef042231 | ||
|
|
1155cc0d3f | ||
|
|
13c3f67ab0 | ||
|
|
ab2cdd840f | ||
|
|
143285e2b7 | ||
|
|
19e8c8d42a | ||
|
|
de9c4a3611 | ||
|
|
d7ad13d929 | ||
|
|
f9d50f677d | ||
|
|
3641993fab | ||
|
|
93d3ae04c7 | ||
|
|
e25e9fbf22 | ||
|
|
fe26d6116d | ||
|
|
06e1e18793 | ||
|
|
23d17b76be | ||
|
|
dfe4e78a77 | ||
|
|
59e7982040 | ||
|
|
c6b0587dc0 | ||
|
|
9baa4d1c3c | ||
|
|
a5390dbbeb | ||
|
|
019a486d5b | ||
|
|
34ce11d2be | ||
|
|
88e8ede0aa | ||
|
|
f6f250c507 | ||
|
|
2c45e901f0 | ||
|
|
9e1443799a | ||
|
|
dd72aff98a | ||
|
|
5039f9be48 | ||
|
|
56b582cdb9 | ||
|
|
745c0af571 | ||
|
|
2dabbe83ac | ||
|
|
90561176fb | ||
|
|
a0b5d77427 | ||
|
|
ce8b1cd861 | ||
|
|
5bd6e3d1e9 | ||
|
|
d4d7a6a55e | ||
|
|
b3e0672535 | ||
|
|
a407437e92 | ||
|
|
0164a4e686 | ||
|
|
b8ea79042c | ||
|
|
49a6533bc1 | ||
|
|
21459f3cc0 | ||
|
|
04f7e52803 | ||
|
|
25535e5eac | ||
|
|
c37b6b1a43 | ||
|
|
0328878e46 | ||
|
|
67132ecaec | ||
|
|
120cfcde70 | ||
|
|
37db2a0e44 | ||
|
|
f92816899c | ||
|
|
5386ffc8f2 | ||
|
|
3898d534f3 | ||
|
|
34333d9fa8 | ||
|
|
14e852ee9d | ||
|
|
37623732c6 | ||
|
|
adbcc83fa5 | ||
|
|
d4ea6632ca | ||
|
|
21849fd0d9 | ||
|
|
ac20ee41ca | ||
|
|
d376fb1df2 | ||
|
|
8e63a08d7f | ||
|
|
3aee5b3c55 | ||
|
|
0145d98314 | ||
|
|
4c03c71a5f | ||
|
|
82e2801aae | ||
|
|
dc5d5de35c | ||
|
|
41cc4530f3 | ||
|
|
c5acb10151 | ||
|
|
8c8ee9905c | ||
|
|
e2afd00118 | ||
|
|
5b82576dbf | ||
|
|
b9d9f9edb0 | ||
|
|
c40b706186 | ||
|
|
351fc609b1 | ||
|
|
a6f6a9dcdf | ||
|
|
267a09001d | ||
|
|
37db2abecd | ||
|
|
0272d44192 | ||
|
|
6b17044f8e | ||
|
|
844e8fb8bd | ||
|
|
ca9182d6ae | ||
|
|
ec20c48523 | ||
|
|
ec68b72387 | ||
|
|
2d1c2725e4 | ||
|
|
1680c5af8f | ||
|
|
88c0d78639 | ||
|
|
559157cb58 | ||
|
|
10bf8a769e | ||
|
|
f31ab6d178 | ||
|
|
f08bb5bf66 | ||
|
|
e2886aaddf | ||
|
|
71227986db | ||
|
|
8c6ff1fa7e | ||
|
|
9d1b786a39 | ||
|
|
8ee0e2efb1 | ||
|
|
d66f5e8db0 | ||
|
|
02d6d28ec4 | ||
|
|
1cafc12e8c | ||
|
|
98fa93f6d1 | ||
|
|
c6c67a29eb | ||
|
|
ad5395e953 | ||
|
|
1925ceaade | ||
|
|
8aebf12797 | ||
|
|
ffeefe8a56 | ||
|
|
81ce5e4961 | ||
|
|
638058ef91 | ||
|
|
b1b62f70d3 | ||
|
|
823d89af9a | ||
|
|
448fff9a04 | ||
|
|
6257a6035c | ||
|
|
54c0f17f2a | ||
|
|
d049cbb59e | ||
|
|
00e853144e | ||
|
|
5ac8cfee56 | ||
|
|
496ae8adf6 |
40
.github/workflows/build.yml
vendored
40
.github/workflows/build.yml
vendored
@@ -15,22 +15,24 @@ on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
required: true
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.17', 'go1.18']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.18', 'go1.19']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.19'
|
||||
go: '1.20'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -41,14 +43,14 @@ jobs:
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '1.19'
|
||||
go: '1.20'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-11
|
||||
go: '1.19'
|
||||
go: '1.20'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -57,14 +59,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-11
|
||||
go: '1.19'
|
||||
go: '1.20'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '1.19'
|
||||
go: '1.20'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
@@ -74,23 +76,23 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.19'
|
||||
go: '1.20'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.17
|
||||
os: ubuntu-latest
|
||||
go: '1.17'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.18
|
||||
os: ubuntu-latest
|
||||
go: '1.18'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.19
|
||||
os: ubuntu-latest
|
||||
go: '1.19'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
@@ -122,7 +124,7 @@ jobs:
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
sudo apt-get install fuse libfuse-dev rpm pkg-config
|
||||
sudo apt-get install fuse3 libfuse-dev rpm pkg-config
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
|
||||
- name: Install Libraries on macOS
|
||||
@@ -218,7 +220,7 @@ jobs:
|
||||
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
lint:
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
timeout-minutes: 30
|
||||
name: "lint"
|
||||
runs-on: ubuntu-latest
|
||||
@@ -237,7 +239,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19
|
||||
go-version: '1.20'
|
||||
check-latest: true
|
||||
|
||||
- name: Install govulncheck
|
||||
@@ -247,7 +249,7 @@ jobs:
|
||||
run: govulncheck ./...
|
||||
|
||||
android:
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
timeout-minutes: 30
|
||||
name: "android-all"
|
||||
runs-on: ubuntu-latest
|
||||
@@ -262,7 +264,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19
|
||||
go-version: '1.20'
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v3
|
||||
|
||||
@@ -74,8 +74,7 @@ Set vars
|
||||
First make the release branch. If this is a second point release then
|
||||
this will be done already.
|
||||
|
||||
* git branch ${BASE_TAG} ${BASE_TAG}-stable
|
||||
* git co ${BASE_TAG}-stable
|
||||
* git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
|
||||
* make startstable
|
||||
|
||||
Now
|
||||
|
||||
@@ -4,32 +4,6 @@
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
package azureblob
|
||||
|
||||
/* FIXME
|
||||
|
||||
Note these Azure SDK bugs which are affecting the backend
|
||||
|
||||
azblob UploadStream produces panic: send on closed channel if input stream has error #19612
|
||||
https://github.com/Azure/azure-sdk-for-go/issues/19612
|
||||
- FIXED by re-implementing UploadStream
|
||||
|
||||
azblob: when using SharedKey credentials, can't reference some blob names with ? in #19613
|
||||
https://github.com/Azure/azure-sdk-for-go/issues/19613
|
||||
- FIXED by url encoding getBlobSVC and getBlockBlobSVC
|
||||
|
||||
Azure Blob Storage paths are not URL-escaped #19475
|
||||
https://github.com/Azure/azure-sdk-for-go/issues/19475
|
||||
- FIXED by url encoding getBlobSVC and getBlockBlobSVC
|
||||
|
||||
Controlling TransferManager #19579
|
||||
https://github.com/Azure/azure-sdk-for-go/issues/19579
|
||||
- FIXED by re-implementing UploadStream
|
||||
|
||||
azblob: blob.StartCopyFromURL doesn't work with UTF-8 characters in the source blob #19614
|
||||
https://github.com/Azure/azure-sdk-for-go/issues/19614
|
||||
- FIXED by url encoding getBlobSVC and getBlockBlobSVC
|
||||
|
||||
*/
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
@@ -959,18 +933,12 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
// getBlobSVC creates a blob client
|
||||
func (f *Fs) getBlobSVC(container, containerPath string) *blob.Client {
|
||||
// FIXME the urlEncode here is a workaround for
|
||||
// https://github.com/Azure/azure-sdk-for-go/issues/19613
|
||||
// https://github.com/Azure/azure-sdk-for-go/issues/19475
|
||||
return f.cntSVC(container).NewBlobClient(urlEncode(containerPath))
|
||||
return f.cntSVC(container).NewBlobClient(containerPath)
|
||||
}
|
||||
|
||||
// getBlockBlobSVC creates a block blob client
|
||||
func (f *Fs) getBlockBlobSVC(container, containerPath string) *blockblob.Client {
|
||||
// FIXME the urlEncode here is a workaround for
|
||||
// https://github.com/Azure/azure-sdk-for-go/issues/19613
|
||||
// https://github.com/Azure/azure-sdk-for-go/issues/19475
|
||||
return f.cntSVC(container).NewBlockBlobClient(urlEncode(containerPath))
|
||||
return f.cntSVC(container).NewBlockBlobClient(containerPath)
|
||||
}
|
||||
|
||||
// updateMetadataWithModTime adds the modTime passed in to o.meta.
|
||||
@@ -1363,15 +1331,16 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
||||
return nil
|
||||
}
|
||||
opt := service.CreateContainerOptions{
|
||||
// Specifies whether data in the container may be accessed publicly and the level of access
|
||||
Access: &f.publicAccess,
|
||||
|
||||
// Optional. Specifies a user-defined name-value pair associated with the blob.
|
||||
//Metadata map[string]string
|
||||
|
||||
// Optional. Specifies the encryption scope settings to set on the container.
|
||||
//CpkScopeInfo *CpkScopeInfo
|
||||
}
|
||||
if f.publicAccess != "" {
|
||||
// Specifies whether data in the container may be accessed publicly and the level of access
|
||||
opt.Access = &f.publicAccess
|
||||
}
|
||||
// now try to create the container
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.svc.CreateContainer(ctx, container, &opt)
|
||||
@@ -1902,41 +1871,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return downloadResponse.Body, nil
|
||||
}
|
||||
|
||||
// dontEncode is the characters that do not need percent-encoding
|
||||
//
|
||||
// The characters that do not need percent-encoding are a subset of
|
||||
// the printable ASCII characters: upper-case letters, lower-case
|
||||
// letters, digits, ".", "_", "-", "/", "~", "!", "$", "'", "(", ")",
|
||||
// "*", ";", "=", ":", and "@". All other byte values in a UTF-8 must
|
||||
// be replaced with "%" and the two-digit hex value of the byte.
|
||||
const dontEncode = (`abcdefghijklmnopqrstuvwxyz` +
|
||||
`ABCDEFGHIJKLMNOPQRSTUVWXYZ` +
|
||||
`0123456789` +
|
||||
`._-/~!$'()*;=:@`)
|
||||
|
||||
// noNeedToEncode is a bitmap of characters which don't need % encoding
|
||||
var noNeedToEncode [256]bool
|
||||
|
||||
func init() {
|
||||
for _, c := range dontEncode {
|
||||
noNeedToEncode[c] = true
|
||||
}
|
||||
}
|
||||
|
||||
// urlEncode encodes in with % encoding
|
||||
func urlEncode(in string) string {
|
||||
var out bytes.Buffer
|
||||
for i := 0; i < len(in); i++ {
|
||||
c := in[i]
|
||||
if noNeedToEncode[c] {
|
||||
_ = out.WriteByte(c)
|
||||
} else {
|
||||
_, _ = out.WriteString(fmt.Sprintf("%%%02X", c))
|
||||
}
|
||||
}
|
||||
return out.String()
|
||||
}
|
||||
|
||||
// poolWrapper wraps a pool.Pool as an azblob.TransferManager
|
||||
type poolWrapper struct {
|
||||
pool *pool.Pool
|
||||
@@ -2138,7 +2072,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
rs := readSeekCloser{wrappedReader, bufferReader}
|
||||
options := blockblob.StageBlockOptions{
|
||||
// Specify the transactional md5 for the body, to be validated by the service.
|
||||
TransactionalContentMD5: transactionalMD5,
|
||||
TransactionalValidation: blob.TransferValidationTypeMD5(transactionalMD5),
|
||||
}
|
||||
_, err = blb.StageBlock(ctx, blockID, &rs, &options)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
|
||||
@@ -1221,7 +1221,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
fs.Errorf(object.Name, "Can't create object %v", err)
|
||||
continue
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "deleting")
|
||||
err = f.deleteByID(ctx, object.ID, object.Name)
|
||||
checkErr(err)
|
||||
tr.Done(ctx, err)
|
||||
@@ -1235,7 +1235,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
if err != nil {
|
||||
fs.Errorf(object, "Can't create object %+v", err)
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
|
||||
if oldOnly && last != remote {
|
||||
// Check current version of the file
|
||||
if object.Action == "hide" {
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/b2/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -21,6 +22,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/pool"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
@@ -428,18 +430,47 @@ func (up *largeUpload) Upload(ctx context.Context) (err error) {
|
||||
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
|
||||
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
remaining = up.size
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
remaining = up.size
|
||||
uploadPool *pool.Pool
|
||||
ci = fs.GetConfig(ctx)
|
||||
)
|
||||
// If using large chunk size then make a temporary pool
|
||||
if up.chunkSize <= int64(up.f.opt.ChunkSize) {
|
||||
uploadPool = up.f.pool
|
||||
} else {
|
||||
uploadPool = pool.New(
|
||||
time.Duration(up.f.opt.MemoryPoolFlushTime),
|
||||
int(up.chunkSize),
|
||||
ci.Transfers,
|
||||
up.f.opt.MemoryPoolUseMmap,
|
||||
)
|
||||
defer uploadPool.Flush()
|
||||
}
|
||||
// Get an upload token and a buffer
|
||||
getBuf := func() (buf []byte) {
|
||||
up.f.getBuf(true)
|
||||
if !up.doCopy {
|
||||
buf = uploadPool.Get()
|
||||
}
|
||||
return buf
|
||||
}
|
||||
// Put an upload token and a buffer
|
||||
putBuf := func(buf []byte) {
|
||||
if !up.doCopy {
|
||||
uploadPool.Put(buf)
|
||||
}
|
||||
up.f.putBuf(nil, true)
|
||||
}
|
||||
g.Go(func() error {
|
||||
for part := int64(1); part <= up.parts; part++ {
|
||||
// Get a block of memory from the pool and token which limits concurrency.
|
||||
buf := up.f.getBuf(up.doCopy)
|
||||
buf := getBuf()
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
up.f.putBuf(buf, up.doCopy)
|
||||
putBuf(buf)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -453,14 +484,14 @@ func (up *largeUpload) Upload(ctx context.Context) (err error) {
|
||||
buf = buf[:reqSize]
|
||||
_, err = io.ReadFull(up.in, buf)
|
||||
if err != nil {
|
||||
up.f.putBuf(buf, up.doCopy)
|
||||
putBuf(buf)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
part := part // for the closure
|
||||
g.Go(func() (err error) {
|
||||
defer up.f.putBuf(buf, up.doCopy)
|
||||
defer putBuf(buf)
|
||||
if !up.doCopy {
|
||||
err = up.transferChunk(gCtx, part, buf)
|
||||
} else {
|
||||
|
||||
2
backend/cache/cache.go
vendored
2
backend/cache/cache.go
vendored
@@ -1038,7 +1038,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
fs.Debugf(dir, "list: remove entry: %v", entryRemote)
|
||||
}
|
||||
entries = nil
|
||||
entries = nil //nolint:ineffassign
|
||||
|
||||
// and then iterate over the ones from source (temp Objects will override source ones)
|
||||
var batchDirectories []*Directory
|
||||
|
||||
@@ -235,7 +235,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: cipher.NameEncryptionMode() == NameEncryptionOff,
|
||||
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff,
|
||||
DuplicateFiles: true,
|
||||
ReadMimeType: false, // MimeTypes not supported with crypt
|
||||
WriteMimeType: false,
|
||||
@@ -396,6 +396,8 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
|
||||
|
||||
// put implements Put or PutStream
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
if f.opt.NoDataEncryption {
|
||||
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
|
||||
if err == nil && o != nil {
|
||||
@@ -413,6 +415,9 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
||||
// Find a hash the destination supports to compute a hash of
|
||||
// the encrypted data
|
||||
ht := f.Fs.Hashes().GetOne()
|
||||
if ci.IgnoreChecksum {
|
||||
ht = hash.None
|
||||
}
|
||||
var hasher *hash.MultiHasher
|
||||
if ht != hash.None {
|
||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||
|
||||
@@ -451,7 +451,11 @@ If downloading a file returns the error "This file has been identified
|
||||
as malware or spam and cannot be downloaded" with the error code
|
||||
"cannotDownloadAbusiveFile" then supply this flag to rclone to
|
||||
indicate you acknowledge the risks of downloading the file and rclone
|
||||
will download it anyway.`,
|
||||
will download it anyway.
|
||||
|
||||
Note that if you are using service account it will need Manager
|
||||
permission (not Content Manager) to for this flag to work. If the SA
|
||||
does not have the right permission, Google will just ignore the flag.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "keep_revision_forever",
|
||||
@@ -757,7 +761,7 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
|
||||
fs.Errorf(f, "Received download limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if f.opt.StopOnUploadLimit && reason == "quotaExceeded" {
|
||||
} else if f.opt.StopOnUploadLimit && (reason == "quotaExceeded" || reason == "storageQuotaExceeded") {
|
||||
fs.Errorf(f, "Received upload limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
|
||||
@@ -3322,9 +3326,9 @@ This takes an optional directory to trash which make this easier to
|
||||
use via the API.
|
||||
|
||||
rclone backend untrash drive:directory
|
||||
rclone backend -i untrash drive:directory subdir
|
||||
rclone backend --interactive untrash drive:directory subdir
|
||||
|
||||
Use the -i flag to see what would be restored before restoring it.
|
||||
Use the --interactive/-i or --dry-run flag to see what would be restored before restoring it.
|
||||
|
||||
Result:
|
||||
|
||||
@@ -3354,7 +3358,7 @@ component will be used as the file name.
|
||||
If the destination is a drive backend then server-side copying will be
|
||||
attempted if possible.
|
||||
|
||||
Use the -i flag to see what would be copied before copying.
|
||||
Use the --interactive/-i or --dry-run flag to see what would be copied before copying.
|
||||
`,
|
||||
}, {
|
||||
Name: "exportformats",
|
||||
|
||||
@@ -243,6 +243,15 @@ func (f *Fs) InternalTestShouldRetry(t *testing.T) {
|
||||
quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, quotaExceededRetry)
|
||||
assert.Equal(t, quotaExceededError, expectedQuotaError)
|
||||
|
||||
sqEItem := googleapi.ErrorItem{
|
||||
Reason: "storageQuotaExceeded",
|
||||
}
|
||||
generic403.Errors[0] = sqEItem
|
||||
expectedStorageQuotaError := fserrors.FatalError(&generic403)
|
||||
storageQuotaExceededRetry, storageQuotaExceededError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, storageQuotaExceededRetry)
|
||||
assert.Equal(t, storageQuotaExceededError, expectedStorageQuotaError)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/ftp"
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -315,18 +315,26 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// returns true if this FTP error should be retried
|
||||
func isRetriableFtpError(err error) bool {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusNotAvailable, ftp.StatusTransfertAborted:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserve to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusNotAvailable:
|
||||
return true, err
|
||||
}
|
||||
if isRetriableFtpError(err) {
|
||||
return true, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
@@ -1186,15 +1194,26 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
}
|
||||
}
|
||||
}
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
|
||||
var (
|
||||
fd *ftp.Response
|
||||
c *ftp.ServerConn
|
||||
)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
c, err = o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return false, err // getFtpConnection has retries already
|
||||
}
|
||||
fd, err = c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
|
||||
if err != nil {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
}
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open: %w", err)
|
||||
}
|
||||
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
|
||||
if err != nil {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
return nil, fmt.Errorf("open: %w", err)
|
||||
}
|
||||
|
||||
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
@@ -82,7 +82,8 @@ func init() {
|
||||
saFile, _ := m.Get("service_account_file")
|
||||
saCreds, _ := m.Get("service_account_credentials")
|
||||
anonymous, _ := m.Get("anonymous")
|
||||
if saFile != "" || saCreds != "" || anonymous == "true" {
|
||||
envAuth, _ := m.Get("env_auth")
|
||||
if saFile != "" || saCreds != "" || anonymous == "true" || envAuth == "true" {
|
||||
return nil, nil
|
||||
}
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
@@ -330,6 +331,17 @@ can't check the size and hash but the file contents will be decompressed.
|
||||
Default: (encoder.Base |
|
||||
encoder.EncodeCrLf |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}, {
|
||||
Name: "env_auth",
|
||||
Help: "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
|
||||
Default: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "false",
|
||||
Help: "Enter credentials in the next step.",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
@@ -349,6 +361,7 @@ type Options struct {
|
||||
Decompress bool `config:"decompress"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
@@ -500,6 +513,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err)
|
||||
}
|
||||
} else if opt.EnvAuth {
|
||||
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
||||
if err != nil {
|
||||
|
||||
@@ -161,7 +161,7 @@ func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bo
|
||||
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
|
||||
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
|
||||
}
|
||||
accounting.Stats(ctx).NewCheckingTransfer(obj).Done(ctx, err)
|
||||
accounting.Stats(ctx).NewCheckingTransfer(obj, "importing").Done(ctx, err)
|
||||
doneCount++
|
||||
}
|
||||
})
|
||||
|
||||
@@ -503,7 +503,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
continue
|
||||
}
|
||||
}
|
||||
err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
|
||||
err = fmt.Errorf("failed to read directory %q: %w", namepath, fierr)
|
||||
fs.Errorf(dir, "%v", fierr)
|
||||
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
||||
continue
|
||||
@@ -524,6 +524,10 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
localPath := filepath.Join(fsDirPath, name)
|
||||
fi, err = os.Stat(localPath)
|
||||
// Quietly skip errors on excluded files and directories
|
||||
if err != nil && useFilter && !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
if os.IsNotExist(err) || isCircularSymlinkError(err) {
|
||||
// Skip bad symlinks and circular symlinks
|
||||
err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err))
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -395,3 +396,73 @@ func TestFilter(t *testing.T) {
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
func TestFilterSymlink(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
when := time.Now()
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Create a file, a directory, a symlink to a file, a symlink to a directory and a dangling symlink
|
||||
r.WriteFile("included.file", "included file", when)
|
||||
r.WriteFile("included.dir/included.sub.file", "included sub file", when)
|
||||
require.NoError(t, os.Symlink("included.file", filepath.Join(r.LocalName, "included.file.link")))
|
||||
require.NoError(t, os.Symlink("included.dir", filepath.Join(r.LocalName, "included.dir.link")))
|
||||
require.NoError(t, os.Symlink("dangling", filepath.Join(r.LocalName, "dangling.link")))
|
||||
|
||||
// Set fs into "-L" mode
|
||||
f.opt.FollowSymlinks = true
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Stat
|
||||
|
||||
// Set fs into "-l" mode
|
||||
// f.opt.FollowSymlinks = false
|
||||
// f.opt.TranslateSymlinks = true
|
||||
// f.lstat = os.Lstat
|
||||
|
||||
// Check set up for filtering
|
||||
assert.True(t, f.Features().FilterAware)
|
||||
|
||||
// Reset global error count
|
||||
accounting.Stats(ctx).ResetErrors()
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
|
||||
// Add a filter
|
||||
ctx, fi := filter.AddConfig(ctx)
|
||||
require.NoError(t, fi.AddRule("+ included.file"))
|
||||
require.NoError(t, fi.AddRule("+ included.file.link"))
|
||||
require.NoError(t, fi.AddRule("+ included.dir/**"))
|
||||
require.NoError(t, fi.AddRule("+ included.dir.link/**"))
|
||||
require.NoError(t, fi.AddRule("- *"))
|
||||
|
||||
// Check listing without use filter flag
|
||||
entries, err := f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check 1 global errors one for each dangling symlink
|
||||
assert.Equal(t, int64(1), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
accounting.Stats(ctx).ResetErrors()
|
||||
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
|
||||
|
||||
// Add user filter flag
|
||||
ctx = filter.SetUseFilter(ctx, true)
|
||||
|
||||
// Check listing with use filter flag
|
||||
entries, err = f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
|
||||
|
||||
// Check listing through a symlink still works
|
||||
entries, err = f.List(ctx, "included.dir")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included.dir/included.sub.file]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
@@ -83,6 +83,17 @@ than permanently deleting them. If you specify this then rclone will
|
||||
permanently delete objects instead.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_https",
|
||||
Help: `Use HTTPS for transfers.
|
||||
|
||||
MEGA uses plain text HTTP connections by default.
|
||||
Some ISPs throttle HTTP connections, this causes transfers to become very slow.
|
||||
Enabling this will force MEGA to use HTTPS for all transfers.
|
||||
HTTPS is normally not necesary since all data is already encrypted anyway.
|
||||
Enabling it will increase CPU usage and add network overhead.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -100,6 +111,7 @@ type Options struct {
|
||||
Pass string `config:"pass"`
|
||||
Debug bool `config:"debug"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UseHTTPS bool `config:"use_https"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -204,6 +216,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if srv == nil {
|
||||
srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
||||
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
|
||||
srv.SetHTTPS(opt.UseHTTPS)
|
||||
srv.SetLogger(func(format string, v ...interface{}) {
|
||||
fs.Infof("*go-mega*", format, v...)
|
||||
})
|
||||
|
||||
@@ -126,6 +126,7 @@ type HashesType struct {
|
||||
Sha1Hash string `json:"sha1Hash"` // hex encoded SHA1 hash for the contents of the file (if available)
|
||||
Crc32Hash string `json:"crc32Hash"` // hex encoded CRC32 value of the file (if available)
|
||||
QuickXorHash string `json:"quickXorHash"` // base64 encoded QuickXorHash value of the file (if available)
|
||||
Sha256Hash string `json:"sha256Hash"` // hex encoded SHA256 value of the file (if available)
|
||||
}
|
||||
|
||||
// FileFacet groups file-related data on OneDrive into a single structure.
|
||||
|
||||
@@ -259,6 +259,48 @@ this flag there.
|
||||
At the time of writing this only works with OneDrive personal paid accounts.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hash_type",
|
||||
Default: "auto",
|
||||
Help: `Specify the hash in use for the backend.
|
||||
|
||||
This specifies the hash type in use. If set to "auto" it will use the
|
||||
default hash which is is QuickXorHash.
|
||||
|
||||
Before rclone 1.62 an SHA1 hash was used by default for Onedrive
|
||||
Personal. For 1.62 and later the default is to use a QuickXorHash for
|
||||
all onedrive types. If an SHA1 hash is desired then set this option
|
||||
accordingly.
|
||||
|
||||
From July 2023 QuickXorHash will be the only available hash for
|
||||
both OneDrive for Business and OneDriver Personal.
|
||||
|
||||
This can be set to "none" to not use any hashes.
|
||||
|
||||
If the hash requested does not exist on the object, it will be
|
||||
returned as an empty string which is treated as a missing hash by
|
||||
rclone.
|
||||
`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "auto",
|
||||
Help: "Rclone chooses the best hash",
|
||||
}, {
|
||||
Value: "quickxor",
|
||||
Help: "QuickXor",
|
||||
}, {
|
||||
Value: "sha1",
|
||||
Help: "SHA1",
|
||||
}, {
|
||||
Value: "sha256",
|
||||
Help: "SHA256",
|
||||
}, {
|
||||
Value: "crc32",
|
||||
Help: "CRC32",
|
||||
}, {
|
||||
Value: "none",
|
||||
Help: "None - don't use any hashes",
|
||||
}},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -511,7 +553,7 @@ Example: "https://contoso.sharepoint.com/sites/mysite" or "mysite"
|
||||
`)
|
||||
case "url_end":
|
||||
siteURL := config.Result
|
||||
re := regexp.MustCompile(`https://.*\.sharepoint.com/sites/(.*)`)
|
||||
re := regexp.MustCompile(`https://.*\.sharepoint\.com/sites/(.*)`)
|
||||
match := re.FindStringSubmatch(siteURL)
|
||||
if len(match) == 2 {
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
@@ -597,6 +639,7 @@ type Options struct {
|
||||
LinkScope string `config:"link_scope"`
|
||||
LinkType string `config:"link_type"`
|
||||
LinkPassword string `config:"link_password"`
|
||||
HashType string `config:"hash_type"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -613,6 +656,7 @@ type Fs struct {
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
driveID string // ID to use for querying Microsoft Graph
|
||||
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
|
||||
hashType hash.Type // type of the hash we are using
|
||||
}
|
||||
|
||||
// Object describes a OneDrive object
|
||||
@@ -626,8 +670,7 @@ type Object struct {
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
id string // ID of the object
|
||||
sha1 string // SHA-1 of the object content
|
||||
quickxorhash string // QuickXorHash of the object content
|
||||
hash string // Hash of the content, usually QuickXorHash but set as hash_type
|
||||
mimeType string // Content-Type of object from server (may not be as uploaded)
|
||||
}
|
||||
|
||||
@@ -882,6 +925,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
driveType: opt.DriveType,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
hashType: QuickXorHashType,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@@ -891,6 +935,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}).Fill(ctx, f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// Set the user defined hash
|
||||
if opt.HashType == "auto" || opt.HashType == "" {
|
||||
opt.HashType = QuickXorHashType.String()
|
||||
}
|
||||
err = f.hashType.Set(opt.HashType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Disable change polling in China region
|
||||
// See: https://github.com/rclone/rclone/issues/6444
|
||||
if f.opt.Region == regionCN {
|
||||
@@ -1556,10 +1609,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
if f.driveType == driveTypePersonal {
|
||||
return hash.Set(hash.SHA1)
|
||||
}
|
||||
return hash.Set(QuickXorHashType)
|
||||
return hash.Set(f.hashType)
|
||||
}
|
||||
|
||||
// PublicLink returns a link for downloading without account.
|
||||
@@ -1768,14 +1818,8 @@ func (o *Object) rootPath() string {
|
||||
|
||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if o.fs.driveType == driveTypePersonal {
|
||||
if t == hash.SHA1 {
|
||||
return o.sha1, nil
|
||||
}
|
||||
} else {
|
||||
if t == QuickXorHashType {
|
||||
return o.quickxorhash, nil
|
||||
}
|
||||
if t == o.fs.hashType {
|
||||
return o.hash, nil
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
@@ -1806,16 +1850,23 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
file := info.GetFile()
|
||||
if file != nil {
|
||||
o.mimeType = file.MimeType
|
||||
if file.Hashes.Sha1Hash != "" {
|
||||
o.sha1 = strings.ToLower(file.Hashes.Sha1Hash)
|
||||
}
|
||||
if file.Hashes.QuickXorHash != "" {
|
||||
h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
|
||||
} else {
|
||||
o.quickxorhash = hex.EncodeToString(h)
|
||||
o.hash = ""
|
||||
switch o.fs.hashType {
|
||||
case QuickXorHashType:
|
||||
if file.Hashes.QuickXorHash != "" {
|
||||
h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
|
||||
} else {
|
||||
o.hash = hex.EncodeToString(h)
|
||||
}
|
||||
}
|
||||
case hash.SHA1:
|
||||
o.hash = strings.ToLower(file.Hashes.Sha1Hash)
|
||||
case hash.SHA256:
|
||||
o.hash = strings.ToLower(file.Hashes.Sha256Hash)
|
||||
case hash.CRC32:
|
||||
o.hash = strings.ToLower(file.Hashes.Crc32Hash)
|
||||
}
|
||||
}
|
||||
fileSystemInfo := info.GetFileSystemInfo()
|
||||
|
||||
@@ -7,51 +7,40 @@
|
||||
// See: https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
|
||||
package quickxorhash
|
||||
|
||||
// This code was ported from the code snippet linked from
|
||||
// https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
|
||||
// Which has the copyright
|
||||
// This code was ported from a fast C-implementation from
|
||||
// https://github.com/namazso/QuickXorHash
|
||||
// which has licenced as BSD Zero Clause License
|
||||
//
|
||||
// BSD Zero Clause License
|
||||
//
|
||||
// Copyright (c) 2022 namazso <admin@namazso.eu>
|
||||
//
|
||||
// Permission to use, copy, modify, and/or distribute this software for any
|
||||
// purpose with or without fee is hereby granted.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
|
||||
// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
||||
// AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|
||||
// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
|
||||
// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
|
||||
// OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
// PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// ------------------------------------------------------------------------------
|
||||
// Copyright (c) 2016 Microsoft Corporation
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
// ------------------------------------------------------------------------------
|
||||
|
||||
import (
|
||||
"hash"
|
||||
)
|
||||
import "hash"
|
||||
|
||||
const (
|
||||
// BlockSize is the preferred size for hashing
|
||||
BlockSize = 64
|
||||
// Size of the output checksum
|
||||
Size = 20
|
||||
bitsInLastCell = 32
|
||||
shift = 11
|
||||
widthInBits = 8 * Size
|
||||
dataSize = (widthInBits-1)/64 + 1
|
||||
Size = 20
|
||||
shift = 11
|
||||
widthInBits = 8 * Size
|
||||
dataSize = shift * widthInBits
|
||||
)
|
||||
|
||||
type quickXorHash struct {
|
||||
data [dataSize]uint64
|
||||
lengthSoFar uint64
|
||||
shiftSoFar int
|
||||
data [dataSize]byte
|
||||
size uint64
|
||||
}
|
||||
|
||||
// New returns a new hash.Hash computing the quickXorHash checksum.
|
||||
@@ -70,94 +59,37 @@ func New() hash.Hash {
|
||||
//
|
||||
// Implementations must not retain p.
|
||||
func (q *quickXorHash) Write(p []byte) (n int, err error) {
|
||||
currentshift := q.shiftSoFar
|
||||
|
||||
// The bitvector where we'll start xoring
|
||||
vectorArrayIndex := currentshift / 64
|
||||
|
||||
// The position within the bit vector at which we begin xoring
|
||||
vectorOffset := currentshift % 64
|
||||
iterations := len(p)
|
||||
if iterations > widthInBits {
|
||||
iterations = widthInBits
|
||||
var i int
|
||||
// fill last remain
|
||||
lastRemain := int(q.size) % dataSize
|
||||
if lastRemain != 0 {
|
||||
i += xorBytes(q.data[lastRemain:], p)
|
||||
}
|
||||
|
||||
for i := 0; i < iterations; i++ {
|
||||
isLastCell := vectorArrayIndex == len(q.data)-1
|
||||
var bitsInVectorCell int
|
||||
if isLastCell {
|
||||
bitsInVectorCell = bitsInLastCell
|
||||
} else {
|
||||
bitsInVectorCell = 64
|
||||
}
|
||||
|
||||
// There's at least 2 bitvectors before we reach the end of the array
|
||||
if vectorOffset <= bitsInVectorCell-8 {
|
||||
for j := i; j < len(p); j += widthInBits {
|
||||
q.data[vectorArrayIndex] ^= uint64(p[j]) << uint(vectorOffset)
|
||||
}
|
||||
} else {
|
||||
index1 := vectorArrayIndex
|
||||
var index2 int
|
||||
if isLastCell {
|
||||
index2 = 0
|
||||
} else {
|
||||
index2 = vectorArrayIndex + 1
|
||||
}
|
||||
low := byte(bitsInVectorCell - vectorOffset)
|
||||
|
||||
xoredByte := byte(0)
|
||||
for j := i; j < len(p); j += widthInBits {
|
||||
xoredByte ^= p[j]
|
||||
}
|
||||
q.data[index1] ^= uint64(xoredByte) << uint(vectorOffset)
|
||||
q.data[index2] ^= uint64(xoredByte) >> low
|
||||
}
|
||||
vectorOffset += shift
|
||||
for vectorOffset >= bitsInVectorCell {
|
||||
if isLastCell {
|
||||
vectorArrayIndex = 0
|
||||
} else {
|
||||
vectorArrayIndex = vectorArrayIndex + 1
|
||||
}
|
||||
vectorOffset -= bitsInVectorCell
|
||||
if i != len(p) {
|
||||
for len(p)-i >= dataSize {
|
||||
i += xorBytes(q.data[:], p[i:])
|
||||
}
|
||||
xorBytes(q.data[:], p[i:])
|
||||
}
|
||||
|
||||
// Update the starting position in a circular shift pattern
|
||||
q.shiftSoFar = (q.shiftSoFar + shift*(len(p)%widthInBits)) % widthInBits
|
||||
|
||||
q.lengthSoFar += uint64(len(p))
|
||||
|
||||
q.size += uint64(len(p))
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Calculate the current checksum
|
||||
func (q *quickXorHash) checkSum() (h [Size]byte) {
|
||||
// Output the data as little endian bytes
|
||||
ph := 0
|
||||
for i := 0; i < len(q.data)-1; i++ {
|
||||
d := q.data[i]
|
||||
_ = h[ph+7] // bounds check
|
||||
h[ph+0] = byte(d >> (8 * 0))
|
||||
h[ph+1] = byte(d >> (8 * 1))
|
||||
h[ph+2] = byte(d >> (8 * 2))
|
||||
h[ph+3] = byte(d >> (8 * 3))
|
||||
h[ph+4] = byte(d >> (8 * 4))
|
||||
h[ph+5] = byte(d >> (8 * 5))
|
||||
h[ph+6] = byte(d >> (8 * 6))
|
||||
h[ph+7] = byte(d >> (8 * 7))
|
||||
ph += 8
|
||||
func (q *quickXorHash) checkSum() (h [Size + 1]byte) {
|
||||
for i := 0; i < dataSize; i++ {
|
||||
shift := (i * 11) % 160
|
||||
shiftBytes := shift / 8
|
||||
shiftBits := shift % 8
|
||||
shifted := int(q.data[i]) << shiftBits
|
||||
h[shiftBytes] ^= byte(shifted)
|
||||
h[shiftBytes+1] ^= byte(shifted >> 8)
|
||||
}
|
||||
// remaining 32 bits
|
||||
d := q.data[len(q.data)-1]
|
||||
h[Size-4] = byte(d >> (8 * 0))
|
||||
h[Size-3] = byte(d >> (8 * 1))
|
||||
h[Size-2] = byte(d >> (8 * 2))
|
||||
h[Size-1] = byte(d >> (8 * 3))
|
||||
h[0] ^= h[20]
|
||||
|
||||
// XOR the file length with the least significant bits in little endian format
|
||||
d = q.lengthSoFar
|
||||
d := q.size
|
||||
h[Size-8] ^= byte(d >> (8 * 0))
|
||||
h[Size-7] ^= byte(d >> (8 * 1))
|
||||
h[Size-6] ^= byte(d >> (8 * 2))
|
||||
@@ -174,7 +106,7 @@ func (q *quickXorHash) checkSum() (h [Size]byte) {
|
||||
// It does not change the underlying hash state.
|
||||
func (q *quickXorHash) Sum(b []byte) []byte {
|
||||
hash := q.checkSum()
|
||||
return append(b, hash[:]...)
|
||||
return append(b, hash[:Size]...)
|
||||
}
|
||||
|
||||
// Reset resets the Hash to its initial state.
|
||||
@@ -196,8 +128,10 @@ func (q *quickXorHash) BlockSize() int {
|
||||
}
|
||||
|
||||
// Sum returns the quickXorHash checksum of the data.
|
||||
func Sum(data []byte) [Size]byte {
|
||||
func Sum(data []byte) (h [Size]byte) {
|
||||
var d quickXorHash
|
||||
_, _ = d.Write(data)
|
||||
return d.checkSum()
|
||||
s := d.checkSum()
|
||||
copy(h[:], s[:])
|
||||
return h
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"hash"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -166,3 +167,16 @@ func TestReset(t *testing.T) {
|
||||
|
||||
// check interface
|
||||
var _ hash.Hash = (*quickXorHash)(nil)
|
||||
|
||||
func BenchmarkQuickXorHash(b *testing.B) {
|
||||
b.SetBytes(1 << 20)
|
||||
buf := make([]byte, 1<<20)
|
||||
rand.Read(buf)
|
||||
h := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
h.Reset()
|
||||
h.Write(buf)
|
||||
h.Sum(nil)
|
||||
}
|
||||
}
|
||||
|
||||
20
backend/onedrive/quickxorhash/xor.go
Normal file
20
backend/onedrive/quickxorhash/xor.go
Normal file
@@ -0,0 +1,20 @@
|
||||
//go:build !go1.20
|
||||
|
||||
package quickxorhash
|
||||
|
||||
func xorBytes(dst, src []byte) int {
|
||||
n := len(dst)
|
||||
if len(src) < n {
|
||||
n = len(src)
|
||||
}
|
||||
if n == 0 {
|
||||
return 0
|
||||
}
|
||||
dst = dst[:n]
|
||||
//src = src[:n]
|
||||
src = src[:len(dst)] // remove bounds check in loop
|
||||
for i := range dst {
|
||||
dst[i] ^= src[i]
|
||||
}
|
||||
return n
|
||||
}
|
||||
9
backend/onedrive/quickxorhash/xor_1.20.go
Normal file
9
backend/onedrive/quickxorhash/xor_1.20.go
Normal file
@@ -0,0 +1,9 @@
|
||||
//go:build go1.20
|
||||
|
||||
package quickxorhash
|
||||
|
||||
import "crypto/subtle"
|
||||
|
||||
func xorBytes(dst, src []byte) int {
|
||||
return subtle.XORBytes(dst, src, dst)
|
||||
}
|
||||
145
backend/oracleobjectstorage/byok.go
Normal file
145
backend/oracleobjectstorage/byok.go
Normal file
@@ -0,0 +1,145 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
|
||||
)
|
||||
|
||||
const (
|
||||
sseDefaultAlgorithm = "AES256"
|
||||
)
|
||||
|
||||
func getSha256(p []byte) []byte {
|
||||
h := sha256.New()
|
||||
h.Write(p)
|
||||
return h.Sum(nil)
|
||||
}
|
||||
|
||||
func validateSSECustomerKeyOptions(opt *Options) error {
|
||||
if opt.SSEKMSKeyID != "" && (opt.SSECustomerKeyFile != "" || opt.SSECustomerKey != "") {
|
||||
return errors.New("oos: can't use vault sse_kms_key_id and local sse_customer_key at the same time")
|
||||
}
|
||||
if opt.SSECustomerKey != "" && opt.SSECustomerKeyFile != "" {
|
||||
return errors.New("oos: can't use sse_customer_key and sse_customer_key_file at the same time")
|
||||
}
|
||||
if opt.SSEKMSKeyID != "" {
|
||||
return nil
|
||||
}
|
||||
err := populateSSECustomerKeys(opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func populateSSECustomerKeys(opt *Options) error {
|
||||
if opt.SSECustomerKeyFile != "" {
|
||||
// Reads the base64-encoded AES key data from the specified file and computes its SHA256 checksum
|
||||
data, err := os.ReadFile(expandPath(opt.SSECustomerKeyFile))
|
||||
if err != nil {
|
||||
return fmt.Errorf("oos: error reading sse_customer_key_file: %v", err)
|
||||
}
|
||||
opt.SSECustomerKey = strings.TrimSpace(string(data))
|
||||
}
|
||||
if opt.SSECustomerKey != "" {
|
||||
decoded, err := base64.StdEncoding.DecodeString(opt.SSECustomerKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("oos: Could not decode sse_customer_key_file: %w", err)
|
||||
}
|
||||
sha256Checksum := base64.StdEncoding.EncodeToString(getSha256(decoded))
|
||||
if opt.SSECustomerKeySha256 == "" {
|
||||
opt.SSECustomerKeySha256 = sha256Checksum
|
||||
} else {
|
||||
if opt.SSECustomerKeySha256 != sha256Checksum {
|
||||
return fmt.Errorf("the computed SHA256 checksum "+
|
||||
"(%v) of the key doesn't match the config entry sse_customer_key_sha256=(%v)",
|
||||
sha256Checksum, opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
if opt.SSECustomerAlgorithm == "" {
|
||||
opt.SSECustomerAlgorithm = sseDefaultAlgorithm
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// https://docs.oracle.com/en-us/iaas/Content/Object/Tasks/usingyourencryptionkeys.htm
|
||||
func useBYOKPutObject(fs *Fs, request *objectstorage.PutObjectRequest) {
|
||||
if fs.opt.SSEKMSKeyID != "" {
|
||||
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
|
||||
}
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
|
||||
func useBYOKHeadObject(fs *Fs, request *objectstorage.HeadObjectRequest) {
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
|
||||
func useBYOKGetObject(fs *Fs, request *objectstorage.GetObjectRequest) {
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
|
||||
func useBYOKCopyObject(fs *Fs, request *objectstorage.CopyObjectRequest) {
|
||||
if fs.opt.SSEKMSKeyID != "" {
|
||||
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
|
||||
}
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
|
||||
func useBYOKUpload(fs *Fs, request *transfer.UploadRequest) {
|
||||
if fs.opt.SSEKMSKeyID != "" {
|
||||
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
|
||||
}
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/common/auth"
|
||||
@@ -18,15 +20,33 @@ import (
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
)
|
||||
|
||||
func expandPath(filepath string) (expandedPath string) {
|
||||
if filepath == "" {
|
||||
return filepath
|
||||
}
|
||||
cleanedPath := path.Clean(filepath)
|
||||
expandedPath = cleanedPath
|
||||
if strings.HasPrefix(cleanedPath, "~") {
|
||||
rest := cleanedPath[2:]
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return expandedPath
|
||||
}
|
||||
expandedPath = path.Join(home, rest)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getConfigurationProvider(opt *Options) (common.ConfigurationProvider, error) {
|
||||
switch opt.Provider {
|
||||
case instancePrincipal:
|
||||
return auth.InstancePrincipalConfigurationProvider()
|
||||
case userPrincipal:
|
||||
if opt.ConfigFile != "" && !fileExists(opt.ConfigFile) {
|
||||
fs.Errorf(userPrincipal, "oci config file doesn't exist at %v", opt.ConfigFile)
|
||||
expandConfigFilePath := expandPath(opt.ConfigFile)
|
||||
if expandConfigFilePath != "" && !fileExists(expandConfigFilePath) {
|
||||
fs.Errorf(userPrincipal, "oci config file doesn't exist at %v", expandConfigFilePath)
|
||||
}
|
||||
return common.CustomProfileConfigProvider(opt.ConfigFile, opt.ConfigProfile), nil
|
||||
return common.CustomProfileConfigProvider(expandConfigFilePath, opt.ConfigProfile), nil
|
||||
case resourcePrincipal:
|
||||
return auth.ResourcePrincipalConfigurationProvider()
|
||||
case noAuth:
|
||||
|
||||
@@ -65,8 +65,8 @@ a bucket or with a bucket and path.
|
||||
Long: `This command removes unfinished multipart uploads of age greater than
|
||||
max-age which defaults to 24 hours.
|
||||
|
||||
Note that you can use -i/--dry-run with this command to see what it
|
||||
would do.
|
||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||
it would do.
|
||||
|
||||
rclone backend cleanup oos:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
|
||||
|
||||
@@ -74,6 +74,7 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object) (err erro
|
||||
BucketName: common.String(srcBucket),
|
||||
CopyObjectDetails: copyObjectDetails,
|
||||
}
|
||||
useBYOKCopyObject(f, &req)
|
||||
var resp objectstorage.CopyObjectResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CopyObject(ctx, req)
|
||||
|
||||
@@ -87,6 +87,7 @@ func (o *Object) headObject(ctx context.Context) (info *objectstorage.HeadObject
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(objectPath),
|
||||
}
|
||||
useBYOKHeadObject(o.fs, &req)
|
||||
var response objectstorage.HeadObjectResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
@@ -99,6 +100,7 @@ func (o *Object) headObject(ctx context.Context) (info *objectstorage.HeadObject
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
fs.Errorf(o, "Failed to head object: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
o.fs.cache.MarkOK(bucketName)
|
||||
@@ -331,7 +333,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
||||
ObjectName: common.String(bucketPath),
|
||||
}
|
||||
o.applyGetObjectOptions(&req, options...)
|
||||
|
||||
useBYOKGetObject(o.fs, &req)
|
||||
var resp objectstorage.GetObjectResponse
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
@@ -433,6 +435,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
uploadRequest.StorageTier = storageTier
|
||||
}
|
||||
o.applyMultiPutOptions(&uploadRequest, options...)
|
||||
useBYOKUpload(o.fs, &uploadRequest)
|
||||
uploadStreamRequest := transfer.UploadStreamRequest{
|
||||
UploadRequest: uploadRequest,
|
||||
StreamReader: in,
|
||||
@@ -506,8 +509,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
req.StorageTier = storageTier
|
||||
}
|
||||
o.applyPutOptions(&req, options...)
|
||||
useBYOKPutObject(o.fs, &req)
|
||||
var resp objectstorage.PutObjectResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.PutObject(ctx, req)
|
||||
resp, err = o.fs.srv.PutObject(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -17,9 +17,7 @@ const (
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
defaultUploadConcurrency = 10
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 5 * time.Minute
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
minSleep = 10 * time.Millisecond
|
||||
defaultCopyTimeoutDuration = fs.Duration(time.Minute)
|
||||
)
|
||||
|
||||
@@ -47,23 +45,28 @@ https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfromins
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Provider string `config:"provider"`
|
||||
Compartment string `config:"compartment"`
|
||||
Namespace string `config:"namespace"`
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ConfigFile string `config:"config_file"`
|
||||
ConfigProfile string `config:"config_profile"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
CopyTimeout fs.Duration `config:"copy_timeout"`
|
||||
StorageTier string `config:"storage_tier"`
|
||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
Provider string `config:"provider"`
|
||||
Compartment string `config:"compartment"`
|
||||
Namespace string `config:"namespace"`
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ConfigFile string `config:"config_file"`
|
||||
ConfigProfile string `config:"config_profile"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
CopyTimeout fs.Duration `config:"copy_timeout"`
|
||||
StorageTier string `config:"storage_tier"`
|
||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
|
||||
SSECustomerKey string `config:"sse_customer_key"`
|
||||
SSECustomerKeyFile string `config:"sse_customer_key_file"`
|
||||
SSECustomerKeySha256 string `config:"sse_customer_key_sha256"`
|
||||
}
|
||||
|
||||
func newOptions() []fs.Option {
|
||||
@@ -123,6 +126,22 @@ func newOptions() []fs.Option {
|
||||
Value: "Default",
|
||||
Help: "Use the default profile",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://github.com/oracle/oci-go-sdk/blob/master/objectstorage/storage_tier.go
|
||||
Name: "storage_tier",
|
||||
Help: "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm",
|
||||
Default: "Standard",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "Standard",
|
||||
Help: "Standard storage tier, this is the default tier",
|
||||
}, {
|
||||
Value: "InfrequentAccess",
|
||||
Help: "InfrequentAccess storage tier",
|
||||
}, {
|
||||
Value: "Archive",
|
||||
Help: "Archive storage tier",
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload.
|
||||
@@ -238,5 +257,59 @@ creation permissions.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "sse_customer_key_file",
|
||||
Help: `To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated
|
||||
with the object. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.'`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_key",
|
||||
Help: `To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to
|
||||
encrypt or decrypt the data. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is
|
||||
needed. For more information, see Using Your Own Keys for Server-Side Encryption
|
||||
(https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm)`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_key_sha256",
|
||||
Help: `If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption
|
||||
key. This value is used to check the integrity of the encryption key. see Using Your Own Keys for
|
||||
Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm).`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_kms_key_id",
|
||||
Help: `if using using your own master key in vault, this header specifies the
|
||||
OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call
|
||||
the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key.
|
||||
Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_algorithm",
|
||||
Help: `If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm.
|
||||
Object Storage supports "AES256" as the encryption algorithm. For more information, see
|
||||
Using Your Own Keys for Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm).`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}, {
|
||||
Value: sseDefaultAlgorithm,
|
||||
Help: sseDefaultAlgorithm,
|
||||
}},
|
||||
}}
|
||||
}
|
||||
|
||||
@@ -59,19 +59,27 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = validateSSECustomerKeyOptions(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
objectStorageClient, err := newObjectStorageClient(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p := pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))
|
||||
pc := fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep)))
|
||||
// Set pacer retries to 2 (1 try and 1 retry) because we are
|
||||
// relying on SDK retry mechanism, but we allow 2 attempts to
|
||||
// retry directory listings after XMLSyntaxError
|
||||
pc.SetRetries(2)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
srv: objectStorageClient,
|
||||
cache: bucket.NewCache(),
|
||||
pacer: fs.NewPacer(ctx, p),
|
||||
pacer: pc,
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
|
||||
189
backend/s3/s3.go
189
backend/s3/s3.go
@@ -936,17 +936,11 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint of the Shared Gateway.",
|
||||
Help: "Endpoint for Storj Gateway.",
|
||||
Provider: "Storj",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "gateway.eu1.storjshare.io",
|
||||
Help: "EU1 Shared Gateway",
|
||||
}, {
|
||||
Value: "gateway.us1.storjshare.io",
|
||||
Help: "US1 Shared Gateway",
|
||||
}, {
|
||||
Value: "gateway.ap1.storjshare.io",
|
||||
Help: "Asia-Pacific Shared Gateway",
|
||||
Value: "gateway.storjshare.io",
|
||||
Help: "Global Hosted Gateway",
|
||||
}},
|
||||
}, {
|
||||
// cos endpoints: https://intl.cloud.tencent.com/document/product/436/6224
|
||||
@@ -2272,6 +2266,11 @@ rclone's choice here.
|
||||
Help: `Suppress setting and reading of system metadata`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "sts_endpoint",
|
||||
Help: "Endpoint for STS.\n\nLeave blank if using AWS to use the default endpoint for the region.",
|
||||
Provider: "AWS",
|
||||
Advanced: true,
|
||||
},
|
||||
}})
|
||||
}
|
||||
@@ -2358,6 +2357,7 @@ type Options struct {
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
STSEndpoint string `config:"sts_endpoint"`
|
||||
LocationConstraint string `config:"location_constraint"`
|
||||
ACL string `config:"acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
@@ -2534,7 +2534,7 @@ func parsePath(path string) (root string) {
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
@@ -2566,6 +2566,38 @@ func getClient(ctx context.Context, opt *Options) *http.Client {
|
||||
}
|
||||
}
|
||||
|
||||
// Default name resolver
|
||||
var defaultResolver = endpoints.DefaultResolver()
|
||||
|
||||
// resolve (service, region) to endpoint
|
||||
//
|
||||
// Used to set endpoint for s3 services and not for other services
|
||||
type resolver map[string]string
|
||||
|
||||
// Add a service to the resolver, ignoring empty urls
|
||||
func (r resolver) addService(service, url string) {
|
||||
if url == "" {
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(url, "http") {
|
||||
url = "https://" + url
|
||||
}
|
||||
r[service] = url
|
||||
}
|
||||
|
||||
// EndpointFor return the endpoint for s3 if set or the default if not
|
||||
func (r resolver) EndpointFor(service, region string, opts ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
|
||||
fs.Debugf(nil, "Resolving service %q region %q", service, region)
|
||||
url, ok := r[service]
|
||||
if ok {
|
||||
return endpoints.ResolvedEndpoint{
|
||||
URL: url,
|
||||
SigningRegion: region,
|
||||
}, nil
|
||||
}
|
||||
return defaultResolver.EndpointFor(service, region, opts...)
|
||||
}
|
||||
|
||||
// s3Connection makes a connection to s3
|
||||
func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S3, *session.Session, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
@@ -2644,8 +2676,12 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
|
||||
if opt.Region != "" {
|
||||
awsConfig.WithRegion(opt.Region)
|
||||
}
|
||||
if opt.Endpoint != "" {
|
||||
awsConfig.WithEndpoint(opt.Endpoint)
|
||||
if opt.Endpoint != "" || opt.STSEndpoint != "" {
|
||||
// If endpoints are set, override the relevant services only
|
||||
r := make(resolver)
|
||||
r.addService("s3", opt.Endpoint)
|
||||
r.addService("sts", opt.STSEndpoint)
|
||||
awsConfig.WithEndpointResolver(r)
|
||||
}
|
||||
|
||||
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
||||
@@ -2995,6 +3031,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
GetTier: true,
|
||||
SlowModTime: true,
|
||||
}).Fill(ctx, f)
|
||||
if opt.Provider == "Storj" {
|
||||
f.features.SetTier = false
|
||||
f.features.GetTier = false
|
||||
}
|
||||
if opt.Provider == "IDrive" {
|
||||
f.features.SetTier = false
|
||||
}
|
||||
// f.listMultipartUploads()
|
||||
|
||||
if f.rootBucket != "" && f.rootDirectory != "" && !opt.NoHeadObject && !strings.HasSuffix(root, "/") {
|
||||
// Check to see if the (bucket,directory) is actually an existing file
|
||||
oldRoot := f.root
|
||||
@@ -3009,14 +3054,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
if opt.Provider == "Storj" {
|
||||
f.features.SetTier = false
|
||||
f.features.GetTier = false
|
||||
}
|
||||
if opt.Provider == "IDrive" {
|
||||
f.features.SetTier = false
|
||||
}
|
||||
// f.listMultipartUploads()
|
||||
return f, nil
|
||||
}
|
||||
|
||||
@@ -3431,15 +3468,16 @@ var errEndList = errors.New("end list")
|
||||
|
||||
// list options
|
||||
type listOpt struct {
|
||||
bucket string // bucket to list
|
||||
directory string // directory with bucket
|
||||
prefix string // prefix to remove from listing
|
||||
addBucket bool // if set, the bucket is added to the start of the remote
|
||||
recurse bool // if set, recurse to read sub directories
|
||||
withVersions bool // if set, versions are produced
|
||||
hidden bool // if set, return delete markers as objects with size == isDeleteMarker
|
||||
findFile bool // if set, it will look for files called (bucket, directory)
|
||||
versionAt fs.Time // if set only show versions <= this time
|
||||
bucket string // bucket to list
|
||||
directory string // directory with bucket
|
||||
prefix string // prefix to remove from listing
|
||||
addBucket bool // if set, the bucket is added to the start of the remote
|
||||
recurse bool // if set, recurse to read sub directories
|
||||
withVersions bool // if set, versions are produced
|
||||
hidden bool // if set, return delete markers as objects with size == isDeleteMarker
|
||||
findFile bool // if set, it will look for files called (bucket, directory)
|
||||
versionAt fs.Time // if set only show versions <= this time
|
||||
noSkipMarkers bool // if set return dir marker objects
|
||||
}
|
||||
|
||||
// list lists the objects into the function supplied with the opt
|
||||
@@ -3552,7 +3590,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
}
|
||||
remote = remote[len(opt.prefix):]
|
||||
if opt.addBucket {
|
||||
remote = path.Join(opt.bucket, remote)
|
||||
remote = bucket.Join(opt.bucket, remote)
|
||||
}
|
||||
remote = strings.TrimSuffix(remote, "/")
|
||||
err = fn(remote, &s3.Object{Key: &remote}, nil, true)
|
||||
@@ -3581,10 +3619,10 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
remote = remote[len(opt.prefix):]
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
if opt.addBucket {
|
||||
remote = path.Join(opt.bucket, remote)
|
||||
remote = bucket.Join(opt.bucket, remote)
|
||||
}
|
||||
// is this a directory marker?
|
||||
if isDirectory && object.Size != nil && *object.Size == 0 {
|
||||
if isDirectory && object.Size != nil && *object.Size == 0 && !opt.noSkipMarkers {
|
||||
continue // skip directory marker
|
||||
}
|
||||
if versionIDs != nil {
|
||||
@@ -3874,7 +3912,7 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
|
||||
req.Bucket = &dstBucket
|
||||
req.ACL = stringPointerOrNil(f.opt.ACL)
|
||||
req.Key = &dstPath
|
||||
source := pathEscape(path.Join(srcBucket, srcPath))
|
||||
source := pathEscape(bucket.Join(srcBucket, srcPath))
|
||||
if src.versionID != nil {
|
||||
source += fmt.Sprintf("?versionId=%s", *src.versionID)
|
||||
}
|
||||
@@ -4109,9 +4147,9 @@ Usage Examples:
|
||||
rclone backend restore s3:bucket/path/to/directory [-o priority=PRIORITY] [-o lifetime=DAYS]
|
||||
rclone backend restore s3:bucket [-o priority=PRIORITY] [-o lifetime=DAYS]
|
||||
|
||||
This flag also obeys the filters. Test first with -i/--interactive or --dry-run flags
|
||||
This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
|
||||
|
||||
rclone -i backend restore --include "*.txt" s3:bucket/path -o priority=Standard
|
||||
rclone --interactive backend restore --include "*.txt" s3:bucket/path -o priority=Standard
|
||||
|
||||
All the objects shown will be marked for restore, then
|
||||
|
||||
@@ -4179,8 +4217,8 @@ a bucket or with a bucket and path.
|
||||
Long: `This command removes unfinished multipart uploads of age greater than
|
||||
max-age which defaults to 24 hours.
|
||||
|
||||
Note that you can use -i/--dry-run with this command to see what it
|
||||
would do.
|
||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||
it would do.
|
||||
|
||||
rclone backend cleanup s3:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w s3:bucket/path/to/object
|
||||
@@ -4196,8 +4234,8 @@ Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||
Long: `This command removes any old hidden versions of files
|
||||
on a versions enabled bucket.
|
||||
|
||||
Note that you can use -i/--dry-run with this command to see what it
|
||||
would do.
|
||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||
it would do.
|
||||
|
||||
rclone backend cleanup-hidden s3:bucket/path/to/dir
|
||||
`,
|
||||
@@ -4531,13 +4569,14 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
delErr <- operations.DeleteFiles(ctx, delChan)
|
||||
}()
|
||||
checkErr(f.list(ctx, listOpt{
|
||||
bucket: bucket,
|
||||
directory: directory,
|
||||
prefix: f.rootDirectory,
|
||||
addBucket: f.rootBucket == "",
|
||||
recurse: true,
|
||||
withVersions: versioned,
|
||||
hidden: true,
|
||||
bucket: bucket,
|
||||
directory: directory,
|
||||
prefix: f.rootDirectory,
|
||||
addBucket: f.rootBucket == "",
|
||||
recurse: true,
|
||||
withVersions: versioned,
|
||||
hidden: true,
|
||||
noSkipMarkers: true,
|
||||
}, func(remote string, object *s3.Object, versionID *string, isDirectory bool) error {
|
||||
if isDirectory {
|
||||
return nil
|
||||
@@ -4547,7 +4586,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
fs.Errorf(object, "Can't create object %+v", err)
|
||||
return nil
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
|
||||
// Work out whether the file is the current version or not
|
||||
isCurrentVersion := !versioned || !version.Match(remote)
|
||||
fs.Debugf(nil, "%q version %v", remote, version.Match(remote))
|
||||
@@ -4991,7 +5030,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
var warnStreamUpload sync.Once
|
||||
|
||||
func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, size int64, in io.Reader) (etag string, versionID *string, err error) {
|
||||
func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, size int64, in io.Reader) (wantETag, gotETag string, versionID *string, err error) {
|
||||
f := o.fs
|
||||
|
||||
// make concurrency machinery
|
||||
@@ -5035,7 +5074,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return etag, nil, fmt.Errorf("multipart upload failed to initialise: %w", err)
|
||||
return wantETag, gotETag, nil, fmt.Errorf("multipart upload failed to initialise: %w", err)
|
||||
}
|
||||
uid := cout.UploadId
|
||||
|
||||
@@ -5108,7 +5147,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
finished = true
|
||||
} else if err != nil {
|
||||
free()
|
||||
return etag, nil, fmt.Errorf("multipart upload failed to read source: %w", err)
|
||||
return wantETag, gotETag, nil, fmt.Errorf("multipart upload failed to read source: %w", err)
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
@@ -5163,7 +5202,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
}
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return etag, nil, err
|
||||
return wantETag, gotETag, nil, err
|
||||
}
|
||||
|
||||
// sort the completed parts by part number
|
||||
@@ -5185,14 +5224,17 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return etag, nil, fmt.Errorf("multipart upload failed to finalise: %w", err)
|
||||
return wantETag, gotETag, nil, fmt.Errorf("multipart upload failed to finalise: %w", err)
|
||||
}
|
||||
hashOfHashes := md5.Sum(md5s)
|
||||
etag = fmt.Sprintf("%s-%d", hex.EncodeToString(hashOfHashes[:]), len(parts))
|
||||
wantETag = fmt.Sprintf("%s-%d", hex.EncodeToString(hashOfHashes[:]), len(parts))
|
||||
if resp != nil {
|
||||
if resp.ETag != nil {
|
||||
gotETag = *resp.ETag
|
||||
}
|
||||
versionID = resp.VersionId
|
||||
}
|
||||
return etag, versionID, nil
|
||||
return wantETag, gotETag, versionID, nil
|
||||
}
|
||||
|
||||
// unWrapAwsError unwraps AWS errors, looking for a non AWS error
|
||||
@@ -5492,16 +5534,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
var wantETag string // Multipart upload Etag to check
|
||||
var gotEtag string // Etag we got from the upload
|
||||
var gotETag string // Etag we got from the upload
|
||||
var lastModified time.Time // Time we got from the upload
|
||||
var versionID *string // versionID we got from the upload
|
||||
if multipart {
|
||||
wantETag, versionID, err = o.uploadMultipart(ctx, &req, size, in)
|
||||
wantETag, gotETag, versionID, err = o.uploadMultipart(ctx, &req, size, in)
|
||||
} else {
|
||||
if o.fs.opt.UsePresignedRequest {
|
||||
gotEtag, lastModified, versionID, err = o.uploadSinglepartPresignedRequest(ctx, &req, size, in)
|
||||
gotETag, lastModified, versionID, err = o.uploadSinglepartPresignedRequest(ctx, &req, size, in)
|
||||
} else {
|
||||
gotEtag, lastModified, versionID, err = o.uploadSinglepartPutObject(ctx, &req, size, in)
|
||||
gotETag, lastModified, versionID, err = o.uploadSinglepartPutObject(ctx, &req, size, in)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@@ -5517,32 +5559,33 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// User requested we don't HEAD the object after uploading it
|
||||
// so make up the object as best we can assuming it got
|
||||
// uploaded properly. If size < 0 then we need to do the HEAD.
|
||||
var head *s3.HeadObjectOutput
|
||||
if o.fs.opt.NoHead && size >= 0 {
|
||||
var head s3.HeadObjectOutput
|
||||
//structs.SetFrom(&head, &req)
|
||||
setFrom_s3HeadObjectOutput_s3PutObjectInput(&head, &req)
|
||||
head = new(s3.HeadObjectOutput)
|
||||
//structs.SetFrom(head, &req)
|
||||
setFrom_s3HeadObjectOutput_s3PutObjectInput(head, &req)
|
||||
head.ETag = &md5sumHex // doesn't matter quotes are missing
|
||||
head.ContentLength = &size
|
||||
// If we have done a single part PUT request then we can read these
|
||||
if gotEtag != "" {
|
||||
head.ETag = &gotEtag
|
||||
// We get etag back from single and multipart upload so fill it in here
|
||||
if gotETag != "" {
|
||||
head.ETag = &gotETag
|
||||
}
|
||||
if lastModified.IsZero() {
|
||||
lastModified = time.Now()
|
||||
}
|
||||
head.LastModified = &lastModified
|
||||
head.VersionId = versionID
|
||||
o.setMetaData(&head)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read the metadata from the newly created object
|
||||
o.meta = nil // wipe old metadata
|
||||
head, err := o.headObject(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
// Read the metadata from the newly created object
|
||||
o.meta = nil // wipe old metadata
|
||||
head, err = o.headObject(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
o.setMetaData(head)
|
||||
|
||||
// Check multipart upload ETag if required
|
||||
if o.fs.opt.UseMultipartEtag.Value && !o.fs.etagIsNotMD5 && wantETag != "" && head.ETag != nil && *head.ETag != "" {
|
||||
gotETag := strings.Trim(strings.ToLower(*head.ETag), `"`)
|
||||
if wantETag != gotETag {
|
||||
|
||||
54
backend/seafile/renew.go
Normal file
54
backend/seafile/renew.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package seafile
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Renew allows tokens to be renewed on expiry.
|
||||
type Renew struct {
|
||||
ts *time.Ticker // timer indicating when it's time to renew the token
|
||||
run func() error // the callback to do the renewal
|
||||
done chan interface{} // channel to end the go routine
|
||||
shutdown *sync.Once
|
||||
}
|
||||
|
||||
// NewRenew creates a new Renew struct and starts a background process
|
||||
// which renews the token whenever it expires. It uses the run() call
|
||||
// to do the renewal.
|
||||
func NewRenew(every time.Duration, run func() error) *Renew {
|
||||
r := &Renew{
|
||||
ts: time.NewTicker(every),
|
||||
run: run,
|
||||
done: make(chan interface{}),
|
||||
shutdown: &sync.Once{},
|
||||
}
|
||||
go r.renewOnExpiry()
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *Renew) renewOnExpiry() {
|
||||
for {
|
||||
select {
|
||||
case <-r.ts.C:
|
||||
err := r.run()
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "error while refreshing decryption token: %s", err)
|
||||
}
|
||||
|
||||
case <-r.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown stops the ticker and no more renewal will take place.
|
||||
func (r *Renew) Shutdown() {
|
||||
// closing a channel can only be done once
|
||||
r.shutdown.Do(func() {
|
||||
r.ts.Stop()
|
||||
close(r.done)
|
||||
})
|
||||
}
|
||||
34
backend/seafile/renew_test.go
Normal file
34
backend/seafile/renew_test.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package seafile
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestShouldAllowShutdownTwice(t *testing.T) {
|
||||
renew := NewRenew(time.Hour, func() error {
|
||||
return nil
|
||||
})
|
||||
renew.Shutdown()
|
||||
renew.Shutdown()
|
||||
}
|
||||
|
||||
func TestRenewalInTimeLimit(t *testing.T) {
|
||||
var count int64
|
||||
|
||||
renew := NewRenew(100*time.Millisecond, func() error {
|
||||
atomic.AddInt64(&count, 1)
|
||||
return nil
|
||||
})
|
||||
time.Sleep(time.Second)
|
||||
renew.Shutdown()
|
||||
|
||||
// there's no guarantee the CI agent can handle a simple goroutine
|
||||
renewCount := atomic.LoadInt64(&count)
|
||||
t.Logf("renew count = %d", renewCount)
|
||||
assert.Greater(t, renewCount, int64(0))
|
||||
assert.Less(t, renewCount, int64(11))
|
||||
}
|
||||
@@ -143,6 +143,7 @@ type Fs struct {
|
||||
createDirMutex sync.Mutex // Protect creation of directories
|
||||
useOldDirectoryAPI bool // Use the old API v2 if seafile < 7
|
||||
moveDirNotAvailable bool // Version < 7.0 don't have an API to move a directory
|
||||
renew *Renew // Renew an encrypted library token
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -268,6 +269,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
// And remove the public link feature
|
||||
f.features.PublicLink = nil
|
||||
|
||||
// renew the library password every 45 minutes
|
||||
f.renew = NewRenew(45*time.Minute, func() error {
|
||||
return f.authorizeLibrary(context.Background(), libraryID)
|
||||
})
|
||||
}
|
||||
} else {
|
||||
// Deactivate the cleaner feature since there's no library selected
|
||||
@@ -383,6 +389,15 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
}
|
||||
|
||||
// Shutdown the Fs
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
if f.renew == nil {
|
||||
return nil
|
||||
}
|
||||
f.renew.Shutdown()
|
||||
return nil
|
||||
}
|
||||
|
||||
// sets the AuthorizationToken up
|
||||
func (f *Fs) setAuthorizationToken(token string) {
|
||||
f.srv.SetHeader("Authorization", "Token "+token)
|
||||
@@ -1331,6 +1346,7 @@ var (
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.PublicLinker = &Fs{}
|
||||
_ fs.UserInfoer = &Fs{}
|
||||
_ fs.Shutdowner = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
)
|
||||
|
||||
@@ -34,9 +34,10 @@ func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
|
||||
|
||||
d := &smb2.Dialer{
|
||||
Initiator: &smb2.NTLMInitiator{
|
||||
User: f.opt.User,
|
||||
Password: pass,
|
||||
Domain: f.opt.Domain,
|
||||
User: f.opt.User,
|
||||
Password: pass,
|
||||
Domain: f.opt.Domain,
|
||||
TargetSPN: f.opt.SPN,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -81,7 +82,7 @@ func (c *conn) closed() bool {
|
||||
// list the shares
|
||||
_, nopErr = c.smbSession.ListSharenames()
|
||||
}
|
||||
return nopErr == nil
|
||||
return nopErr != nil
|
||||
}
|
||||
|
||||
// Show that we are using a SMB session
|
||||
|
||||
@@ -60,6 +60,17 @@ func init() {
|
||||
Name: "domain",
|
||||
Help: "Domain name for NTLM authentication.",
|
||||
Default: "WORKGROUP",
|
||||
}, {
|
||||
Name: "spn",
|
||||
Help: `Service principal name.
|
||||
|
||||
Rclone presents this name to the server. Some servers use this as further
|
||||
authentication, and it often needs to be set for clusters. For example:
|
||||
|
||||
cifs/remotehost:1020
|
||||
|
||||
Leave blank if not sure.
|
||||
`,
|
||||
}, {
|
||||
Name: "idle_timeout",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
@@ -109,6 +120,7 @@ type Options struct {
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Domain string `config:"domain"`
|
||||
SPN string `config:"spn"`
|
||||
HideSpecial bool `config:"hide_special_share"`
|
||||
CaseInsensitive bool `config:"case_insensitive"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"golang.org/x/text/unicode/norm"
|
||||
|
||||
"storj.io/uplink"
|
||||
"storj.io/uplink/edge"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -31,9 +32,9 @@ const (
|
||||
)
|
||||
|
||||
var satMap = map[string]string{
|
||||
"us-central-1.storj.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777",
|
||||
"europe-west-1.storj.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777",
|
||||
"asia-east-1.storj.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777",
|
||||
"us1.storj.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us1.storj.io:7777",
|
||||
"eu1.storj.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@eu1.storj.io:7777",
|
||||
"ap1.storj.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@ap1.storj.io:7777",
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
@@ -105,16 +106,16 @@ func init() {
|
||||
Name: "satellite_address",
|
||||
Help: "Satellite address.\n\nCustom satellite address should match the format: `<nodeid>@<address>:<port>`.",
|
||||
Provider: newProvider,
|
||||
Default: "us-central-1.storj.io",
|
||||
Default: "us1.storj.io",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "us-central-1.storj.io",
|
||||
Help: "US Central 1",
|
||||
Value: "us1.storj.io",
|
||||
Help: "US1",
|
||||
}, {
|
||||
Value: "europe-west-1.storj.io",
|
||||
Help: "Europe West 1",
|
||||
Value: "eu1.storj.io",
|
||||
Help: "EU1",
|
||||
}, {
|
||||
Value: "asia-east-1.storj.io",
|
||||
Help: "Asia East 1",
|
||||
Value: "ap1.storj.io",
|
||||
Help: "AP1",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -156,11 +157,13 @@ type Fs struct {
|
||||
|
||||
// Check the interfaces are satisfied.
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.PublicLinker = &Fs{}
|
||||
)
|
||||
|
||||
// NewFs creates a filesystem backed by Storj.
|
||||
@@ -545,7 +548,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
defer func() {
|
||||
if err != nil {
|
||||
aerr := upload.Abort()
|
||||
if aerr != nil {
|
||||
if aerr != nil && !errors.Is(aerr, uplink.ErrUploadDone) {
|
||||
fs.Errorf(f, "cp input ./%s %+v: %+v", src.Remote(), options, aerr)
|
||||
}
|
||||
}
|
||||
@@ -560,6 +563,16 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
|
||||
_, err = io.Copy(upload, in)
|
||||
if err != nil {
|
||||
if errors.Is(err, uplink.ErrBucketNotFound) {
|
||||
// Rclone assumes the backend will create the bucket if not existing yet.
|
||||
// Here we create the bucket and return a retry error for rclone to retry the upload.
|
||||
_, err = f.project.EnsureBucket(ctx, bucketName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, fserrors.RetryError(errors.New("bucket was not available, now created, the upload must be retried"))
|
||||
}
|
||||
|
||||
err = fserrors.RetryError(err)
|
||||
fs.Errorf(f, "cp input ./%s %+v: %+v\n", src.Remote(), options, err)
|
||||
|
||||
@@ -761,3 +774,103 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// Return the new object
|
||||
return newObjectFromUplink(f, remote, newObject), nil
|
||||
}
|
||||
|
||||
// Purge all files in the directory specified
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
bucket, directory := f.absolute(dir)
|
||||
if bucket == "" {
|
||||
return errors.New("can't purge from root")
|
||||
}
|
||||
|
||||
if directory == "" {
|
||||
_, err := f.project.DeleteBucketWithObjects(ctx, bucket)
|
||||
if errors.Is(err, uplink.ErrBucketNotFound) {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
fs.Infof(directory, "Quick delete is available only for entire bucket. Falling back to list and delete.")
|
||||
objects := f.project.ListObjects(ctx, bucket,
|
||||
&uplink.ListObjectsOptions{
|
||||
Prefix: directory + "/",
|
||||
Recursive: true,
|
||||
},
|
||||
)
|
||||
if err := objects.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
empty := true
|
||||
for objects.Next() {
|
||||
empty = false
|
||||
_, err := f.project.DeleteObject(ctx, bucket, objects.Item().Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Infof(objects.Item().Key, "Deleted")
|
||||
}
|
||||
|
||||
if empty {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
bucket, key := f.absolute(remote)
|
||||
if bucket == "" {
|
||||
return "", errors.New("path must be specified")
|
||||
}
|
||||
|
||||
// Rclone requires that a link is only generated if the remote path exists
|
||||
if key == "" {
|
||||
_, err := f.project.StatBucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
_, err := f.project.StatObject(ctx, bucket, key)
|
||||
if err != nil {
|
||||
if !errors.Is(err, uplink.ErrObjectNotFound) {
|
||||
return "", err
|
||||
}
|
||||
// No object found, check if there is such a prefix
|
||||
iter := f.project.ListObjects(ctx, bucket, &uplink.ListObjectsOptions{Prefix: key + "/"})
|
||||
if iter.Err() != nil {
|
||||
return "", iter.Err()
|
||||
}
|
||||
if !iter.Next() {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sharedPrefix := uplink.SharePrefix{Bucket: bucket, Prefix: key}
|
||||
|
||||
permission := uplink.ReadOnlyPermission()
|
||||
if expire.IsSet() {
|
||||
permission.NotAfter = time.Now().Add(time.Duration(expire))
|
||||
}
|
||||
|
||||
sharedAccess, err := f.access.Share(permission, sharedPrefix)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("sharing access to object failed: %w", err)
|
||||
}
|
||||
|
||||
creds, err := (&edge.Config{
|
||||
AuthServiceAddress: "auth.storjshare.io:7777",
|
||||
}).RegisterAccess(ctx, sharedAccess, &edge.RegisterAccessOptions{Public: true})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("creating public link failed: %w", err)
|
||||
}
|
||||
|
||||
return edge.JoinShareURL("https://link.storjshare.io", creds.AccessKeyID, bucket, key, nil)
|
||||
}
|
||||
|
||||
@@ -214,7 +214,7 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
f.srv = rest.NewClient(client).SetRoot(apiBaseURL)
|
||||
f.IDRegexp = regexp.MustCompile("https://uptobox.com/([a-zA-Z0-9]+)")
|
||||
f.IDRegexp = regexp.MustCompile(`https://uptobox\.com/([a-zA-Z0-9]+)`)
|
||||
|
||||
_, err = f.readMetaDataForPath(ctx, f.dirPath(""), &api.MetadataRequestOptions{Limit: 10})
|
||||
if err != nil {
|
||||
|
||||
@@ -712,6 +712,7 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
|
||||
continue
|
||||
}
|
||||
subPath := u.Path[len(baseURL.Path):]
|
||||
subPath = strings.TrimPrefix(subPath, "/") // ignore leading / here for davrods
|
||||
if f.opt.Enc != encoder.EncodeZero {
|
||||
subPath = f.opt.Enc.ToStandardPath(subPath)
|
||||
}
|
||||
|
||||
@@ -24,15 +24,23 @@ var (
|
||||
// prepareServer the test server and return a function to tidy it up afterwards
|
||||
// with each request the headers option tests are executed
|
||||
func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// file server
|
||||
fileServer := http.FileServer(http.Dir(""))
|
||||
|
||||
// test the headers are there then pass on to fileServer
|
||||
// test the headers are there send send a dummy response to About
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
|
||||
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
|
||||
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
|
||||
fileServer.ServeHTTP(w, r)
|
||||
fmt.Fprintf(w, `<d:multistatus xmlns:d="DAV:" xmlns:s="http://sabredav.org/ns" xmlns:oc="http://owncloud.org/ns" xmlns:nc="http://nextcloud.org/ns">
|
||||
<d:response>
|
||||
<d:href>/remote.php/webdav/</d:href>
|
||||
<d:propstat>
|
||||
<d:prop>
|
||||
<d:quota-available-bytes>-3</d:quota-available-bytes>
|
||||
<d:quota-used-bytes>376461895</d:quota-used-bytes>
|
||||
</d:prop>
|
||||
<d:status>HTTP/1.1 200 OK</d:status>
|
||||
</d:propstat>
|
||||
</d:response>
|
||||
</d:multistatus>`)
|
||||
})
|
||||
|
||||
// Make the test server
|
||||
@@ -68,7 +76,7 @@ func TestHeaders(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
|
||||
// any request will do
|
||||
// send an About response since that is all the dummy server can return
|
||||
_, err := f.Features().About(context.Background())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -57,6 +57,7 @@ var osarches = []string{
|
||||
"linux/386",
|
||||
"linux/amd64",
|
||||
"linux/arm",
|
||||
"linux/arm-v6",
|
||||
"linux/arm-v7",
|
||||
"linux/arm64",
|
||||
"linux/mips",
|
||||
@@ -64,10 +65,12 @@ var osarches = []string{
|
||||
"freebsd/386",
|
||||
"freebsd/amd64",
|
||||
"freebsd/arm",
|
||||
"freebsd/arm-v6",
|
||||
"freebsd/arm-v7",
|
||||
"netbsd/386",
|
||||
"netbsd/amd64",
|
||||
"netbsd/arm",
|
||||
"netbsd/arm-v6",
|
||||
"netbsd/arm-v7",
|
||||
"openbsd/386",
|
||||
"openbsd/amd64",
|
||||
@@ -82,13 +85,16 @@ var archFlags = map[string][]string{
|
||||
"386": {"GO386=softfloat"},
|
||||
"mips": {"GOMIPS=softfloat"},
|
||||
"mipsle": {"GOMIPS=softfloat"},
|
||||
"arm": {"GOARM=5"},
|
||||
"arm-v6": {"GOARM=6"},
|
||||
"arm-v7": {"GOARM=7"},
|
||||
}
|
||||
|
||||
// Map Go architectures to NFPM architectures
|
||||
// Any missing are passed straight through
|
||||
var goarchToNfpm = map[string]string{
|
||||
"arm": "arm6",
|
||||
"arm": "arm5",
|
||||
"arm-v6": "arm6",
|
||||
"arm-v7": "arm7",
|
||||
}
|
||||
|
||||
|
||||
@@ -12,12 +12,14 @@ import (
|
||||
|
||||
var (
|
||||
noAutoBrowser bool
|
||||
template string
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &noAutoBrowser, "auth-no-open-browser", "", false, "Do not automatically open auth link in default browser")
|
||||
flags.StringVarP(cmdFlags, &template, "template", "", "", "The path to a custom Go template for generating HTML responses")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -28,13 +30,15 @@ Remote authorization. Used to authorize a remote or headless
|
||||
rclone from a machine with a browser - use as instructed by
|
||||
rclone config.
|
||||
|
||||
Use the --auth-no-open-browser to prevent rclone to open auth
|
||||
link in default browser automatically.`,
|
||||
Use --auth-no-open-browser to prevent rclone to open auth
|
||||
link in default browser automatically.
|
||||
|
||||
Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 3, command, args)
|
||||
return config.Authorize(context.Background(), args, noAutoBrowser)
|
||||
return config.Authorize(context.Background(), args, noAutoBrowser, template)
|
||||
},
|
||||
}
|
||||
|
||||
12
cmd/cmd.go
12
cmd/cmd.go
@@ -73,11 +73,13 @@ func ShowVersion() {
|
||||
|
||||
linking, tagString := buildinfo.GetLinkingAndTags()
|
||||
|
||||
arch := buildinfo.GetArch()
|
||||
|
||||
fmt.Printf("rclone %s\n", fs.Version)
|
||||
fmt.Printf("- os/version: %s\n", osVersion)
|
||||
fmt.Printf("- os/kernel: %s\n", osKernel)
|
||||
fmt.Printf("- os/type: %s\n", runtime.GOOS)
|
||||
fmt.Printf("- os/arch: %s\n", runtime.GOARCH)
|
||||
fmt.Printf("- os/arch: %s\n", arch)
|
||||
fmt.Printf("- go/version: %s\n", runtime.Version())
|
||||
fmt.Printf("- go/linking: %s\n", linking)
|
||||
fmt.Printf("- go/tags: %s\n", tagString)
|
||||
@@ -399,9 +401,15 @@ func initConfig() {
|
||||
// Start accounting
|
||||
accounting.Start(ctx)
|
||||
|
||||
// Hide console window
|
||||
// Configure console
|
||||
if ci.NoConsole {
|
||||
// Hide the console window
|
||||
terminal.HideConsole()
|
||||
} else {
|
||||
// Enable color support on stdout if possible.
|
||||
// This enables virtual terminal processing on Windows 10,
|
||||
// adding native support for ANSI/VT100 escape sequences.
|
||||
terminal.EnableColorsStdout()
|
||||
}
|
||||
|
||||
// Load filters
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@@ -567,6 +568,21 @@ func (fsys *FS) Listxattr(path string, fill func(name string) bool) (errc int) {
|
||||
return -fuse.ENOSYS
|
||||
}
|
||||
|
||||
// Getpath allows a case-insensitive file system to report the correct case of
|
||||
// a file path.
|
||||
func (fsys *FS) Getpath(path string, fh uint64) (errc int, normalisedPath string) {
|
||||
defer log.Trace(path, "Getpath fh=%d", fh)("errc=%d, normalisedPath=%q", &errc, &normalisedPath)
|
||||
node, _, errc := fsys.getNode(path, fh)
|
||||
if errc != 0 {
|
||||
return errc, ""
|
||||
}
|
||||
normalisedPath = node.Path()
|
||||
if !strings.HasPrefix("/", normalisedPath) {
|
||||
normalisedPath = "/" + normalisedPath
|
||||
}
|
||||
return 0, normalisedPath
|
||||
}
|
||||
|
||||
// Translate errors from mountlib
|
||||
func translateError(err error) (errc int) {
|
||||
if err == nil {
|
||||
@@ -631,6 +647,7 @@ func translateOpenFlags(inFlags int) (outFlags int) {
|
||||
var (
|
||||
_ fuse.FileSystemInterface = (*FS)(nil)
|
||||
_ fuse.FileSystemOpenEx = (*FS)(nil)
|
||||
_ fuse.FileSystemGetpath = (*FS)(nil)
|
||||
//_ fuse.FileSystemChflags = (*FS)(nil)
|
||||
//_ fuse.FileSystemSetcrtime = (*FS)(nil)
|
||||
//_ fuse.FileSystemSetchgtime = (*FS)(nil)
|
||||
|
||||
@@ -84,9 +84,6 @@ func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.
|
||||
if opt.DaemonTimeout != 0 {
|
||||
options = append(options, "-o", fmt.Sprintf("daemon_timeout=%d", int(opt.DaemonTimeout.Seconds())))
|
||||
}
|
||||
if opt.AllowNonEmpty {
|
||||
options = append(options, "-o", "nonempty")
|
||||
}
|
||||
if opt.AllowOther {
|
||||
options = append(options, "-o", "allow_other")
|
||||
}
|
||||
@@ -152,14 +149,14 @@ func waitFor(fn func() bool) (ok bool) {
|
||||
// report an error when fusermount is called.
|
||||
func mount(VFS *vfs.VFS, mountPath string, opt *mountlib.Options) (<-chan error, func() error, error) {
|
||||
// Get mountpoint using OS specific logic
|
||||
mountpoint, err := getMountpoint(mountPath, opt)
|
||||
f := VFS.Fs()
|
||||
mountpoint, err := getMountpoint(f, mountPath, opt)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
fs.Debugf(nil, "Mounting on %q (%q)", mountpoint, opt.VolumeName)
|
||||
|
||||
// Create underlying FS
|
||||
f := VFS.Fs()
|
||||
fsys := NewFS(VFS)
|
||||
host := fuse.NewFileSystemHost(fsys)
|
||||
host.SetCapReaddirPlus(true) // only works on Windows
|
||||
|
||||
@@ -9,9 +9,10 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func getMountpoint(mountPath string, opt *mountlib.Options) (string, error) {
|
||||
func getMountpoint(f fs.Fs, mountPath string, opt *mountlib.Options) (string, error) {
|
||||
fi, err := os.Stat(mountPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to retrieve mount path information: %w", err)
|
||||
@@ -19,5 +20,11 @@ func getMountpoint(mountPath string, opt *mountlib.Options) (string, error) {
|
||||
if !fi.IsDir() {
|
||||
return "", errors.New("mount path is not a directory")
|
||||
}
|
||||
if err = mountlib.CheckOverlap(f, mountPath); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err = mountlib.CheckAllowNonEmpty(mountPath, opt); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return mountPath, nil
|
||||
}
|
||||
|
||||
@@ -18,13 +18,13 @@ import (
|
||||
var isDriveRegex = regexp.MustCompile(`^[a-zA-Z]\:$`)
|
||||
var isDriveRootPathRegex = regexp.MustCompile(`^[a-zA-Z]\:\\$`)
|
||||
var isDriveOrRootPathRegex = regexp.MustCompile(`^[a-zA-Z]\:\\?$`)
|
||||
var isNetworkSharePathRegex = regexp.MustCompile(`^\\\\[^\\]+\\[^\\]`)
|
||||
var isNetworkSharePathRegex = regexp.MustCompile(`^\\\\[^\\\?]+\\[^\\]`)
|
||||
|
||||
// isNetworkSharePath returns true if the given string is a valid network share path,
|
||||
// in the basic UNC format "\\Server\Share\Path", where the first two path components
|
||||
// are required ("\\Server\Share", which represents the volume).
|
||||
// Extended-length UNC format "\\?\UNC\Server\Share\Path" is not considered, as it is
|
||||
// not supported by cgofuse/winfsp.
|
||||
// not supported by cgofuse/winfsp, so returns false for any paths with prefix "\\?\".
|
||||
// Note: There is a UNCPath function in lib/file, but it refers to any extended-length
|
||||
// paths using prefix "\\?\", and not necessarily network resource UNC paths.
|
||||
func isNetworkSharePath(l string) bool {
|
||||
@@ -94,7 +94,7 @@ func handleNetworkShareMountpath(mountpath string, opt *mountlib.Options) (strin
|
||||
}
|
||||
|
||||
// handleLocalMountpath handles the case where mount path is a local file system path.
|
||||
func handleLocalMountpath(mountpath string, opt *mountlib.Options) (string, error) {
|
||||
func handleLocalMountpath(f fs.Fs, mountpath string, opt *mountlib.Options) (string, error) {
|
||||
// Assuming path is drive letter or directory path, not network share (UNC) path.
|
||||
// If drive letter: Must be given as a single character followed by ":" and nothing else.
|
||||
// Else, assume directory path: Directory must not exist, but its parent must.
|
||||
@@ -125,6 +125,9 @@ func handleLocalMountpath(mountpath string, opt *mountlib.Options) (string, erro
|
||||
}
|
||||
return "", fmt.Errorf("failed to retrieve mountpoint directory parent information: %w", err)
|
||||
}
|
||||
if err = mountlib.CheckOverlap(f, mountpath); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return mountpath, nil
|
||||
}
|
||||
@@ -158,9 +161,19 @@ func handleVolumeName(opt *mountlib.Options, volumeName string) {
|
||||
|
||||
// getMountpoint handles mounting details on Windows,
|
||||
// where disk and network based file systems are treated different.
|
||||
func getMountpoint(mountpath string, opt *mountlib.Options) (mountpoint string, err error) {
|
||||
func getMountpoint(f fs.Fs, mountpath string, opt *mountlib.Options) (mountpoint string, err error) {
|
||||
// Inform about some options not relevant in this mode
|
||||
if opt.AllowNonEmpty {
|
||||
fs.Logf(nil, "--allow-non-empty flag does nothing on Windows")
|
||||
}
|
||||
if opt.AllowRoot {
|
||||
fs.Logf(nil, "--allow-root flag does nothing on Windows")
|
||||
}
|
||||
if opt.AllowOther {
|
||||
fs.Logf(nil, "--allow-other flag does nothing on Windows")
|
||||
}
|
||||
|
||||
// First handle mountpath
|
||||
// Handle mountpath
|
||||
var volumeName string
|
||||
if isDefaultPath(mountpath) {
|
||||
// Mount path indicates defaults, which will automatically pick an unused drive letter.
|
||||
@@ -172,10 +185,10 @@ func getMountpoint(mountpath string, opt *mountlib.Options) (mountpoint string,
|
||||
volumeName = mountpath[1:] // WinFsp requires volume prefix as UNC-like path but with only a single backslash
|
||||
} else {
|
||||
// Mount path is drive letter or directory path.
|
||||
mountpoint, err = handleLocalMountpath(mountpath, opt)
|
||||
mountpoint, err = handleLocalMountpath(f, mountpath, opt)
|
||||
}
|
||||
|
||||
// Second handle volume name
|
||||
// Handle volume name
|
||||
handleVolumeName(opt, volumeName)
|
||||
|
||||
// Done, return mountpoint to be used, together with updated mount options.
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -27,12 +28,12 @@ it will always be removed.
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fs, fileName := cmd.NewFsFile(args[0])
|
||||
f, fileName := cmd.NewFsFile(args[0])
|
||||
cmd.Run(true, false, command, func() error {
|
||||
if fileName == "" {
|
||||
return fmt.Errorf("%s is a directory or doesn't exist", args[0])
|
||||
return fmt.Errorf("%s is a directory or doesn't exist: %w", args[0], fs.ErrorObjectNotFound)
|
||||
}
|
||||
fileObj, err := fs.NewObject(context.Background(), fileName)
|
||||
fileObj, err := f.NewObject(context.Background(), fileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -34,9 +34,6 @@ func mountOptions(VFS *vfs.VFS, device string, opt *mountlib.Options) (options [
|
||||
if opt.AsyncRead {
|
||||
options = append(options, fuse.AsyncRead())
|
||||
}
|
||||
if opt.AllowNonEmpty {
|
||||
options = append(options, fuse.AllowNonEmptyMount())
|
||||
}
|
||||
if opt.AllowOther {
|
||||
options = append(options, fuse.AllowOther())
|
||||
}
|
||||
@@ -72,9 +69,17 @@ func mountOptions(VFS *vfs.VFS, device string, opt *mountlib.Options) (options [
|
||||
// returns an error, and an error channel for the serve process to
|
||||
// report an error when fusermount is called.
|
||||
func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error, func() error, error) {
|
||||
f := VFS.Fs()
|
||||
if runtime.GOOS == "darwin" {
|
||||
fs.Logf(nil, "macOS users: please try \"rclone cmount\" as it will be the default in v1.54")
|
||||
}
|
||||
if err := mountlib.CheckOverlap(f, mountpoint); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := mountlib.CheckAllowNonEmpty(mountpoint, opt); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
|
||||
if opt.DebugFUSE {
|
||||
fuse.Debug = func(msg interface{}) {
|
||||
@@ -82,8 +87,6 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
|
||||
}
|
||||
}
|
||||
|
||||
f := VFS.Fs()
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
c, err := fuse.Mount(mountpoint, mountOptions(VFS, opt.DeviceName, opt)...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
||||
@@ -95,9 +95,6 @@ func mountOptions(fsys *FS, f fs.Fs, opt *mountlib.Options) (mountOpts *fuse.Mou
|
||||
}
|
||||
var opts []string
|
||||
// FIXME doesn't work opts = append(opts, fmt.Sprintf("max_readahead=%d", maxReadAhead))
|
||||
if fsys.opt.AllowNonEmpty {
|
||||
opts = append(opts, "nonempty")
|
||||
}
|
||||
if fsys.opt.AllowOther {
|
||||
opts = append(opts, "allow_other")
|
||||
}
|
||||
@@ -148,9 +145,16 @@ func mountOptions(fsys *FS, f fs.Fs, opt *mountlib.Options) (mountOpts *fuse.Mou
|
||||
// report an error when fusermount is called.
|
||||
func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error, func() error, error) {
|
||||
f := VFS.Fs()
|
||||
if err := mountlib.CheckOverlap(f, mountpoint); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := mountlib.CheckAllowNonEmpty(mountpoint, opt); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
|
||||
fsys := NewFS(VFS, opt)
|
||||
|
||||
// nodeFsOpts := &fusefs.PathNodeFsOptions{
|
||||
// ClientInodes: false,
|
||||
// Debug: mountlib.DebugFUSE,
|
||||
|
||||
@@ -33,12 +33,20 @@ func CheckMountEmpty(mountpoint string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read %s: %w", mtabPath, err)
|
||||
}
|
||||
foundAutofs := false
|
||||
for _, entry := range entries {
|
||||
if entry.Dir == mountpointAbs && entry.Type != "autofs" {
|
||||
return fmt.Errorf(msg, mountpointAbs)
|
||||
if entry.Dir == mountpointAbs {
|
||||
if entry.Type != "autofs" {
|
||||
return fmt.Errorf(msg, mountpointAbs)
|
||||
}
|
||||
foundAutofs = true
|
||||
}
|
||||
}
|
||||
return nil
|
||||
// It isn't safe to list an autofs in the middle of mounting
|
||||
if foundAutofs {
|
||||
return nil
|
||||
}
|
||||
return checkMountEmpty(mountpoint)
|
||||
}
|
||||
|
||||
// CheckMountReady checks whether mountpoint is mounted by rclone.
|
||||
|
||||
@@ -4,33 +4,13 @@
|
||||
package mountlib
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// CheckMountEmpty checks if mountpoint folder is empty.
|
||||
// On non-Linux unixes we list directory to ensure that.
|
||||
func CheckMountEmpty(mountpoint string) error {
|
||||
fp, err := os.Open(mountpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open: %s: %w", mountpoint, err)
|
||||
}
|
||||
defer fs.CheckClose(fp, &err)
|
||||
|
||||
_, err = fp.Readdirnames(1)
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
|
||||
const msg = "directory is not empty, use --allow-non-empty to mount anyway: %s"
|
||||
if err == nil {
|
||||
return fmt.Errorf(msg, mountpoint)
|
||||
}
|
||||
return fmt.Errorf(msg+": %w", mountpoint, err)
|
||||
return checkMountEmpty(mountpoint)
|
||||
}
|
||||
|
||||
// CheckMountReady should check if mountpoint is mounted by rclone.
|
||||
|
||||
@@ -159,38 +159,59 @@ group "Everyone" will be used to represent others. The user/group can be customi
|
||||
with FUSE options "UserName" and "GroupName",
|
||||
e.g. |-o UserName=user123 -o GroupName="Authenticated Users"|.
|
||||
The permissions on each entry will be set according to [options](#options)
|
||||
|--dir-perms| and |--file-perms|, which takes a value in traditional
|
||||
|--dir-perms| and |--file-perms|, which takes a value in traditional Unix
|
||||
[numeric notation](https://en.wikipedia.org/wiki/File-system_permissions#Numeric_notation).
|
||||
|
||||
The default permissions corresponds to |--file-perms 0666 --dir-perms 0777|,
|
||||
i.e. read and write permissions to everyone. This means you will not be able
|
||||
to start any programs from the mount. To be able to do that you must add
|
||||
execute permissions, e.g. |--file-perms 0777 --dir-perms 0777| to add it
|
||||
to everyone. If the program needs to write files, chances are you will have
|
||||
to enable [VFS File Caching](#vfs-file-caching) as well (see also [limitations](#limitations)).
|
||||
to everyone. If the program needs to write files, chances are you will
|
||||
have to enable [VFS File Caching](#vfs-file-caching) as well (see also
|
||||
[limitations](#limitations)). Note that the default write permission have
|
||||
some restrictions for accounts other than the owner, specifically it lacks
|
||||
the "write extended attributes", as explained next.
|
||||
|
||||
Note that the mapping of permissions is not always trivial, and the result
|
||||
you see in Windows Explorer may not be exactly like you expected.
|
||||
For example, when setting a value that includes write access, this will be
|
||||
mapped to individual permissions "write attributes", "write data" and "append data",
|
||||
but not "write extended attributes". Windows will then show this as basic
|
||||
permission "Special" instead of "Write", because "Write" includes the
|
||||
"write extended attributes" permission.
|
||||
The mapping of permissions is not always trivial, and the result you see in
|
||||
Windows Explorer may not be exactly like you expected. For example, when setting
|
||||
a value that includes write access for the group or others scope, this will be
|
||||
mapped to individual permissions "write attributes", "write data" and
|
||||
"append data", but not "write extended attributes". Windows will then show this
|
||||
as basic permission "Special" instead of "Write", because "Write" also covers
|
||||
the "write extended attributes" permission. When setting digit 0 for group or
|
||||
others, to indicate no permissions, they will still get individual permissions
|
||||
"read attributes", "read extended attributes" and "read permissions". This is
|
||||
done for compatibility reasons, e.g. to allow users without additional
|
||||
permissions to be able to read basic metadata about files like in Unix.
|
||||
|
||||
If you set POSIX permissions for only allowing access to the owner, using
|
||||
|--file-perms 0600 --dir-perms 0700|, the user group and the built-in "Everyone"
|
||||
group will still be given some special permissions, such as "read attributes"
|
||||
and "read permissions", in Windows. This is done for compatibility reasons,
|
||||
e.g. to allow users without additional permissions to be able to read basic
|
||||
metadata about files like in UNIX. One case that may arise is that other programs
|
||||
(incorrectly) interprets this as the file being accessible by everyone. For example
|
||||
an SSH client may warn about "unprotected private key file".
|
||||
|
||||
WinFsp 2021 (version 1.9) introduces a new FUSE option "FileSecurity",
|
||||
WinFsp 2021 (version 1.9) introduced a new FUSE option "FileSecurity",
|
||||
that allows the complete specification of file security descriptors using
|
||||
[SDDL](https://docs.microsoft.com/en-us/windows/win32/secauthz/security-descriptor-string-format).
|
||||
With this you can work around issues such as the mentioned "unprotected private key file"
|
||||
by specifying |-o FileSecurity="D:P(A;;FA;;;OW)"|, for file all access (FA) to the owner (OW).
|
||||
With this you get detailed control of the resulting permissions, compared
|
||||
to use of the POSIX permissions described above, and no additional permissions
|
||||
will be added automatically for compatibility with Unix. Some example use
|
||||
cases will following.
|
||||
|
||||
If you set POSIX permissions for only allowing access to the owner,
|
||||
using |--file-perms 0600 --dir-perms 0700|, the user group and the built-in
|
||||
"Everyone" group will still be given some special permissions, as described
|
||||
above. Some programs may then (incorrectly) interpret this as the file being
|
||||
accessible by everyone, for example an SSH client may warn about "unprotected
|
||||
private key file". You can work around this by specifying
|
||||
|-o FileSecurity="D:P(A;;FA;;;OW)"|, which sets file all access (FA) to the
|
||||
owner (OW), and nothing else.
|
||||
|
||||
When setting write permissions then, except for the owner, this does not
|
||||
include the "write extended attributes" permission, as mentioned above.
|
||||
This may prevent applications from writing to files, giving permission denied
|
||||
error instead. To set working write permissions for the built-in "Everyone"
|
||||
group, similar to what it gets by default but with the addition of the
|
||||
"write extended attributes", you can specify
|
||||
|-o FileSecurity="D:P(A;;FRFW;;;WD)"|, which sets file read (FR) and file
|
||||
write (FW) to everyone (WD). If file execute (FX) is also needed, then change
|
||||
to |-o FileSecurity="D:P(A;;FRFWFX;;;WD)"|, or set file all access (FA) to
|
||||
get full access permissions, including delete, with
|
||||
|-o FileSecurity="D:P(A;;FA;;;WD)"|.
|
||||
|
||||
#### Windows caveats
|
||||
|
||||
@@ -219,10 +240,16 @@ processes as the SYSTEM account. Another alternative is to run the mount
|
||||
command from a Windows Scheduled Task, or a Windows Service, configured
|
||||
to run as the SYSTEM account. A third alternative is to use the
|
||||
[WinFsp.Launcher infrastructure](https://github.com/winfsp/winfsp/wiki/WinFsp-Service-Architecture)).
|
||||
Read more in the [install documentation](https://rclone.org/install/).
|
||||
Note that when running rclone as another user, it will not use
|
||||
the configuration file from your profile unless you tell it to
|
||||
with the [|--config|](https://rclone.org/docs/#config-config-file) option.
|
||||
Read more in the [install documentation](https://rclone.org/install/).
|
||||
Note also that it is now the SYSTEM account that will have the owner
|
||||
permissions, and other accounts will have permissions according to the
|
||||
group or others scopes. As mentioned above, these will then not get the
|
||||
"write extended attributes" permission, and this may prevent writing to
|
||||
files. You can work around this with the FileSecurity option, see
|
||||
example above.
|
||||
|
||||
Note that mapping to a directory path, instead of a drive letter,
|
||||
does not suffer from the same limitations.
|
||||
|
||||
@@ -237,13 +237,8 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
|
||||
|
||||
// Mount the remote at mountpoint
|
||||
func (m *MountPoint) Mount() (daemon *os.Process, err error) {
|
||||
if err = m.CheckOverlap(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = m.CheckAllowed(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Ensure sensible defaults
|
||||
m.SetVolumeName(m.MountOpt.VolumeName)
|
||||
m.SetDeviceName(m.MountOpt.DeviceName)
|
||||
|
||||
|
||||
@@ -2,6 +2,8 @@ package mountlib
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -32,22 +34,22 @@ func ClipBlocks(b *uint64) {
|
||||
}
|
||||
}
|
||||
|
||||
// CheckOverlap checks that root doesn't overlap with mountpoint
|
||||
func (m *MountPoint) CheckOverlap() error {
|
||||
name := m.Fs.Name()
|
||||
// CheckOverlap checks that root doesn't overlap with a mountpoint
|
||||
func CheckOverlap(f fs.Fs, mountpoint string) error {
|
||||
name := f.Name()
|
||||
if name != "" && name != "local" {
|
||||
return nil
|
||||
}
|
||||
rootAbs := absPath(m.Fs.Root())
|
||||
mountpointAbs := absPath(m.MountPoint)
|
||||
rootAbs := absPath(f.Root())
|
||||
mountpointAbs := absPath(mountpoint)
|
||||
if strings.HasPrefix(rootAbs, mountpointAbs) || strings.HasPrefix(mountpointAbs, rootAbs) {
|
||||
const msg = "mount point %q and directory to be mounted %q mustn't overlap"
|
||||
return fmt.Errorf(msg, m.MountPoint, m.Fs.Root())
|
||||
const msg = "mount point %q (%q) and directory to be mounted %q (%q) mustn't overlap"
|
||||
return fmt.Errorf(msg, mountpoint, mountpointAbs, f.Root(), rootAbs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// absPath is a helper function for MountPoint.CheckOverlap
|
||||
// absPath is a helper function for CheckOverlap
|
||||
func absPath(path string) string {
|
||||
if abs, err := filepath.EvalSymlinks(path); err == nil {
|
||||
path = abs
|
||||
@@ -56,34 +58,45 @@ func absPath(path string) string {
|
||||
path = abs
|
||||
}
|
||||
path = filepath.ToSlash(path)
|
||||
if runtime.GOOS == "windows" {
|
||||
// Removes any UNC long path prefix to make sure a simple HasPrefix test
|
||||
// in CheckOverlap works when one is UNC (root) and one is not (mountpoint).
|
||||
path = strings.TrimPrefix(path, `//?/`)
|
||||
}
|
||||
if !strings.HasSuffix(path, "/") {
|
||||
path += "/"
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
// CheckAllowed informs about ignored flags on Windows. If not on Windows
|
||||
// and not --allow-non-empty flag is used, verify that mountpoint is empty.
|
||||
func (m *MountPoint) CheckAllowed() error {
|
||||
opt := &m.MountOpt
|
||||
if runtime.GOOS == "windows" {
|
||||
if opt.AllowNonEmpty {
|
||||
fs.Logf(nil, "--allow-non-empty flag does nothing on Windows")
|
||||
}
|
||||
if opt.AllowRoot {
|
||||
fs.Logf(nil, "--allow-root flag does nothing on Windows")
|
||||
}
|
||||
if opt.AllowOther {
|
||||
fs.Logf(nil, "--allow-other flag does nothing on Windows")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// CheckAllowNonEmpty checks --allow-non-empty flag, and if not used verifies that mountpoint is empty.
|
||||
func CheckAllowNonEmpty(mountpoint string, opt *Options) error {
|
||||
if !opt.AllowNonEmpty {
|
||||
return CheckMountEmpty(m.MountPoint)
|
||||
return CheckMountEmpty(mountpoint)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkMountEmpty checks if mountpoint folder is empty by listing it.
|
||||
func checkMountEmpty(mountpoint string) error {
|
||||
fp, err := os.Open(mountpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open: %s: %w", mountpoint, err)
|
||||
}
|
||||
defer fs.CheckClose(fp, &err)
|
||||
|
||||
_, err = fp.Readdirnames(1)
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
|
||||
const msg = "%q is not empty, use --allow-non-empty to mount anyway"
|
||||
if err == nil {
|
||||
return fmt.Errorf(msg, mountpoint)
|
||||
}
|
||||
return fmt.Errorf(msg+": %w", mountpoint, err)
|
||||
}
|
||||
|
||||
// SetVolumeName with sensible default
|
||||
func (m *MountPoint) SetVolumeName(vol string) {
|
||||
if vol == "" {
|
||||
|
||||
@@ -156,6 +156,15 @@ func ParseOptions(options []string) (opt map[string]string) {
|
||||
func setAlternateFlag(flagName string, output *string) {
|
||||
if rcFlag := pflag.Lookup(flagName); rcFlag != nil && rcFlag.Changed {
|
||||
*output = rcFlag.Value.String()
|
||||
if sliceValue, ok := rcFlag.Value.(pflag.SliceValue); ok {
|
||||
stringSlice := sliceValue.GetSlice()
|
||||
for _, value := range stringSlice {
|
||||
if value != "" {
|
||||
*output = value
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -336,11 +336,31 @@ func makeRandomExeName(baseName, extension string) (string, error) {
|
||||
|
||||
func downloadUpdate(ctx context.Context, beta bool, version, siteURL, newFile, packageFormat string) error {
|
||||
osName := runtime.GOOS
|
||||
arch := runtime.GOARCH
|
||||
if osName == "darwin" {
|
||||
osName = "osx"
|
||||
}
|
||||
|
||||
arch := runtime.GOARCH
|
||||
if arch == "arm" {
|
||||
// Check the ARM compatibility level of the current CPU.
|
||||
// We don't know if this matches the rclone binary currently running, it
|
||||
// could for example be a ARMv6 variant running on a ARMv7 compatible CPU,
|
||||
// so we will simply pick the best possible variant.
|
||||
switch buildinfo.GetSupportedGOARM() {
|
||||
case 7:
|
||||
// This system can run any binaries built with GOARCH=arm, including GOARM=7.
|
||||
// Pick the ARMv7 variant of rclone, published with suffix "arm-v7".
|
||||
arch = "arm-v7"
|
||||
case 6:
|
||||
// This system can run binaries built with GOARCH=arm and GOARM=6 or lower.
|
||||
// Pick the ARMv6 variant of rclone, published with suffix "arm-v6".
|
||||
arch = "arm-v6"
|
||||
case 5:
|
||||
// This system can only run binaries built with GOARCH=arm and GOARM=5.
|
||||
// Pick the ARMv5 variant of rclone, which also works without hardfloat,
|
||||
// published with suffix "arm".
|
||||
arch = "arm"
|
||||
}
|
||||
}
|
||||
archiveFilename := fmt.Sprintf("rclone-%s-%s-%s.%s", version, osName, arch, packageFormat)
|
||||
archiveURL := fmt.Sprintf("%s/%s/%s", siteURL, version, archiveFilename)
|
||||
archiveBuf, err := downloadFile(ctx, archiveURL)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
// Code generated by vfsgen; DO NOT EDIT.
|
||||
|
||||
//go:build !dev
|
||||
// +build !dev
|
||||
|
||||
package data
|
||||
|
||||
@@ -60,12 +60,14 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
|
||||
case "":
|
||||
continue
|
||||
case "remote", "fs":
|
||||
p, err := fspath.Parse(str)
|
||||
if err != nil || p.Name == ":" {
|
||||
return fmt.Errorf("cannot parse path %q: %w", str, err)
|
||||
if str != "" {
|
||||
p, err := fspath.Parse(str)
|
||||
if err != nil || p.Name == ":" {
|
||||
return fmt.Errorf("cannot parse path %q: %w", str, err)
|
||||
}
|
||||
fsName, fsPath, fsOpt = p.Name, p.Path, p.Config
|
||||
vol.Fs = str
|
||||
}
|
||||
fsName, fsPath, fsOpt = p.Name, p.Path, p.Config
|
||||
vol.Fs = str
|
||||
case "type":
|
||||
fsType = str
|
||||
vol.Type = str
|
||||
|
||||
@@ -3,6 +3,7 @@ package http
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
@@ -11,14 +12,14 @@ import (
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
libhttp "github.com/rclone/rclone/lib/http"
|
||||
"github.com/rclone/rclone/lib/http/serve"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
@@ -49,6 +50,7 @@ func init() {
|
||||
libhttp.AddHTTPFlagsPrefix(flagSet, "", &Opt.HTTP)
|
||||
libhttp.AddTemplateFlagsPrefix(flagSet, "", &Opt.Template)
|
||||
vfsflags.AddFlags(flagSet)
|
||||
proxyflags.AddFlags(flagSet)
|
||||
}
|
||||
|
||||
// Command definition for cobra
|
||||
@@ -66,58 +68,85 @@ The server will log errors. Use ` + "`-v`" + ` to see access logs.
|
||||
|
||||
` + "`--bwlimit`" + ` will be respected for file transfers. Use ` + "`--stats`" + ` to
|
||||
control the stats printing.
|
||||
` + libhttp.Help + libhttp.TemplateHelp + libhttp.AuthHelp + vfs.Help,
|
||||
` + libhttp.Help + libhttp.TemplateHelp + libhttp.AuthHelp + vfs.Help + proxy.Help,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsSrc(args)
|
||||
var f fs.Fs
|
||||
if proxyflags.Opt.AuthProxy == "" {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f = cmd.NewFsSrc(args)
|
||||
} else {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
}
|
||||
|
||||
cmd.Run(false, true, command, func() error {
|
||||
ctx := context.Background()
|
||||
|
||||
s, err := run(ctx, f, Opt)
|
||||
s, err := run(context.Background(), f, Opt)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var finaliseOnce sync.Once
|
||||
finalise := func() {
|
||||
finaliseOnce.Do(func() {
|
||||
if err := s.server.Shutdown(); err != nil {
|
||||
log.Printf("error shutting down server: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
fnHandle := atexit.Register(finalise)
|
||||
defer atexit.Unregister(fnHandle)
|
||||
|
||||
s.server.Wait()
|
||||
return nil
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
// server contains everything to run the server
|
||||
type serveCmd struct {
|
||||
// HTTP contains everything to run the server
|
||||
type HTTP struct {
|
||||
f fs.Fs
|
||||
vfs *vfs.VFS
|
||||
_vfs *vfs.VFS // don't use directly, use getVFS
|
||||
server *libhttp.Server
|
||||
opt Options
|
||||
proxy *proxy.Proxy
|
||||
ctx context.Context // for global config
|
||||
}
|
||||
|
||||
func run(ctx context.Context, f fs.Fs, opt Options) (*serveCmd, error) {
|
||||
var err error
|
||||
// Gets the VFS in use for this request
|
||||
func (s *HTTP) getVFS(ctx context.Context) (VFS *vfs.VFS, err error) {
|
||||
if s._vfs != nil {
|
||||
return s._vfs, nil
|
||||
}
|
||||
value := libhttp.CtxGetAuth(ctx)
|
||||
if value == nil {
|
||||
return nil, errors.New("no VFS found in context")
|
||||
}
|
||||
VFS, ok := value.(*vfs.VFS)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("context value is not VFS: %#v", value)
|
||||
}
|
||||
return VFS, nil
|
||||
}
|
||||
|
||||
s := &serveCmd{
|
||||
// auth does proxy authorization
|
||||
func (s *HTTP) auth(user, pass string) (value interface{}, err error) {
|
||||
VFS, _, err := s.proxy.Call(user, pass, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return VFS, err
|
||||
}
|
||||
|
||||
func run(ctx context.Context, f fs.Fs, opt Options) (s *HTTP, err error) {
|
||||
s = &HTTP{
|
||||
f: f,
|
||||
vfs: vfs.New(f, &vfsflags.Opt),
|
||||
ctx: ctx,
|
||||
opt: opt,
|
||||
}
|
||||
|
||||
if proxyflags.Opt.AuthProxy != "" {
|
||||
s.proxy = proxy.New(ctx, &proxyflags.Opt)
|
||||
// override auth
|
||||
s.opt.Auth.CustomAuthFn = s.auth
|
||||
} else {
|
||||
s._vfs = vfs.New(f, &vfsflags.Opt)
|
||||
}
|
||||
|
||||
s.server, err = libhttp.NewServer(ctx,
|
||||
libhttp.WithConfig(opt.HTTP),
|
||||
libhttp.WithAuth(opt.Auth),
|
||||
libhttp.WithTemplate(opt.Template),
|
||||
libhttp.WithConfig(s.opt.HTTP),
|
||||
libhttp.WithAuth(s.opt.Auth),
|
||||
libhttp.WithTemplate(s.opt.Template),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to init server: %w", err)
|
||||
@@ -137,7 +166,7 @@ func run(ctx context.Context, f fs.Fs, opt Options) (*serveCmd, error) {
|
||||
}
|
||||
|
||||
// handler reads incoming requests and dispatches them
|
||||
func (s *serveCmd) handler(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *HTTP) handler(w http.ResponseWriter, r *http.Request) {
|
||||
isDir := strings.HasSuffix(r.URL.Path, "/")
|
||||
remote := strings.Trim(r.URL.Path, "/")
|
||||
if isDir {
|
||||
@@ -148,9 +177,15 @@ func (s *serveCmd) handler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// serveDir serves a directory index at dirRemote
|
||||
func (s *serveCmd) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string) {
|
||||
func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string) {
|
||||
VFS, err := s.getVFS(r.Context())
|
||||
if err != nil {
|
||||
http.Error(w, "Root directory not found", http.StatusNotFound)
|
||||
fs.Errorf(nil, "Failed to serve directory: %v", err)
|
||||
return
|
||||
}
|
||||
// List the directory
|
||||
node, err := s.vfs.Stat(dirRemote)
|
||||
node, err := VFS.Stat(dirRemote)
|
||||
if err == vfs.ENOENT {
|
||||
http.Error(w, "Directory not found", http.StatusNotFound)
|
||||
return
|
||||
@@ -190,8 +225,15 @@ func (s *serveCmd) serveDir(w http.ResponseWriter, r *http.Request, dirRemote st
|
||||
}
|
||||
|
||||
// serveFile serves a file object at remote
|
||||
func (s *serveCmd) serveFile(w http.ResponseWriter, r *http.Request, remote string) {
|
||||
node, err := s.vfs.Stat(remote)
|
||||
func (s *HTTP) serveFile(w http.ResponseWriter, r *http.Request, remote string) {
|
||||
VFS, err := s.getVFS(r.Context())
|
||||
if err != nil {
|
||||
http.Error(w, "File not found", http.StatusNotFound)
|
||||
fs.Errorf(nil, "Failed to serve file: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
node, err := VFS.Stat(remote)
|
||||
if err == vfs.ENOENT {
|
||||
fs.Infof(remote, "%s: File not found", r.RemoteAddr)
|
||||
http.Error(w, "File not found", http.StatusNotFound)
|
||||
|
||||
@@ -6,13 +6,14 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configfile"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
libhttp "github.com/rclone/rclone/lib/http"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -21,18 +22,16 @@ import (
|
||||
|
||||
var (
|
||||
updateGolden = flag.Bool("updategolden", false, "update golden files for regression test")
|
||||
sc *serveCmd
|
||||
testURL string
|
||||
)
|
||||
|
||||
const (
|
||||
testBindAddress = "localhost:0"
|
||||
testUser = "user"
|
||||
testPass = "pass"
|
||||
testTemplate = "testdata/golden/testindex.html"
|
||||
)
|
||||
|
||||
func start(t *testing.T, f fs.Fs) {
|
||||
ctx := context.Background()
|
||||
|
||||
func start(ctx context.Context, t *testing.T, f fs.Fs) (s *HTTP, testURL string) {
|
||||
opts := Options{
|
||||
HTTP: libhttp.DefaultCfg(),
|
||||
Template: libhttp.TemplateConfig{
|
||||
@@ -40,10 +39,13 @@ func start(t *testing.T, f fs.Fs) {
|
||||
},
|
||||
}
|
||||
opts.HTTP.ListenAddr = []string{testBindAddress}
|
||||
if proxyflags.Opt.AuthProxy == "" {
|
||||
opts.Auth.BasicUser = testUser
|
||||
opts.Auth.BasicPass = testPass
|
||||
}
|
||||
|
||||
s, err := run(ctx, f, opts)
|
||||
require.NoError(t, err, "failed to start server")
|
||||
sc = s
|
||||
|
||||
urls := s.server.URLs()
|
||||
require.Len(t, urls, 1, "expected one URL")
|
||||
@@ -63,6 +65,8 @@ func start(t *testing.T, f fs.Fs) {
|
||||
pause *= 2
|
||||
}
|
||||
t.Fatal("couldn't connect to server")
|
||||
|
||||
return s, testURL
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -70,31 +74,6 @@ var (
|
||||
expectedTime = time.Date(2000, 1, 2, 3, 4, 5, 0, time.UTC)
|
||||
)
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// Configure the remote
|
||||
configfile.Install()
|
||||
// fs.Config.LogLevel = fs.LogLevelDebug
|
||||
// fs.Config.DumpHeaders = true
|
||||
// fs.Config.DumpBodies = true
|
||||
|
||||
// exclude files called hidden.txt and directories called hidden
|
||||
fi := filter.GetConfig(ctx)
|
||||
require.NoError(t, fi.AddRule("- hidden.txt"))
|
||||
require.NoError(t, fi.AddRule("- hidden/**"))
|
||||
|
||||
// Create a test Fs
|
||||
f, err := fs.NewFs(context.Background(), "testdata/files")
|
||||
require.NoError(t, err)
|
||||
|
||||
// set date of datedObject to expectedTime
|
||||
obj, err := f.NewObject(context.Background(), datedObject)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, obj.SetModTime(context.Background(), expectedTime))
|
||||
|
||||
start(t, f)
|
||||
}
|
||||
|
||||
// check body against the file, or re-write body if -updategolden is
|
||||
// set.
|
||||
func checkGolden(t *testing.T, fileName string, got []byte) {
|
||||
@@ -111,7 +90,49 @@ func checkGolden(t *testing.T, fileName string, got []byte) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGET(t *testing.T) {
|
||||
func testGET(t *testing.T, useProxy bool) {
|
||||
ctx := context.Background()
|
||||
// ci := fs.GetConfig(ctx)
|
||||
// ci.LogLevel = fs.LogLevelDebug
|
||||
|
||||
// exclude files called hidden.txt and directories called hidden
|
||||
fi := filter.GetConfig(ctx)
|
||||
require.NoError(t, fi.AddRule("- hidden.txt"))
|
||||
require.NoError(t, fi.AddRule("- hidden/**"))
|
||||
|
||||
var f fs.Fs
|
||||
if useProxy {
|
||||
// the backend config will be made by the proxy
|
||||
prog, err := filepath.Abs("../servetest/proxy_code.go")
|
||||
require.NoError(t, err)
|
||||
files, err := filepath.Abs("testdata/files")
|
||||
require.NoError(t, err)
|
||||
cmd := "go run " + prog + " " + files
|
||||
|
||||
// FIXME this is untidy setting a global variable!
|
||||
proxyflags.Opt.AuthProxy = cmd
|
||||
defer func() {
|
||||
proxyflags.Opt.AuthProxy = ""
|
||||
}()
|
||||
|
||||
f = nil
|
||||
} else {
|
||||
// Create a test Fs
|
||||
var err error
|
||||
f, err = fs.NewFs(context.Background(), "testdata/files")
|
||||
require.NoError(t, err)
|
||||
|
||||
// set date of datedObject to expectedTime
|
||||
obj, err := f.NewObject(context.Background(), datedObject)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, obj.SetModTime(context.Background(), expectedTime))
|
||||
}
|
||||
|
||||
s, testURL := start(ctx, t, f)
|
||||
defer func() {
|
||||
assert.NoError(t, s.server.Shutdown())
|
||||
}()
|
||||
|
||||
for _, test := range []struct {
|
||||
URL string
|
||||
Status int
|
||||
@@ -216,6 +237,7 @@ func TestGET(t *testing.T) {
|
||||
if test.Range != "" {
|
||||
req.Header.Add("Range", test.Range)
|
||||
}
|
||||
req.SetBasicAuth(testUser, testPass)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.Status, resp.StatusCode, test.Golden)
|
||||
@@ -237,3 +259,11 @@ func TestGET(t *testing.T) {
|
||||
checkGolden(t, test.Golden, body)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGET(t *testing.T) {
|
||||
testGET(t, false)
|
||||
}
|
||||
|
||||
func TestAuthProxy(t *testing.T) {
|
||||
testGET(t, true)
|
||||
}
|
||||
|
||||
@@ -155,7 +155,6 @@ with a path of ` + "`/<username>/`" + `.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Logf(s.f, "Serving restic REST API on %s", s.URLs())
|
||||
if s.opt.Stdio {
|
||||
if terminal.IsTerminal(int(os.Stdout.Fd())) {
|
||||
return errors.New("refusing to run HTTP2 server directly on a terminal, please let restic start rclone")
|
||||
@@ -173,6 +172,8 @@ with a path of ` + "`/<username>/`" + `.
|
||||
httpSrv.ServeConn(conn, opts)
|
||||
return nil
|
||||
}
|
||||
fs.Logf(s.f, "Serving restic REST API on %s", s.URLs())
|
||||
s.Wait()
|
||||
return nil
|
||||
})
|
||||
},
|
||||
@@ -241,6 +242,10 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options) (s *server, err error
|
||||
cache: newCache(opt.CacheObjects),
|
||||
opt: *opt,
|
||||
}
|
||||
// Don't bind any HTTP listeners if running with --stdio
|
||||
if opt.Stdio {
|
||||
opt.HTTP.ListenAddr = nil
|
||||
}
|
||||
s.Server, err = libhttp.NewServer(ctx,
|
||||
libhttp.WithConfig(opt.HTTP),
|
||||
libhttp.WithAuth(opt.Auth),
|
||||
|
||||
@@ -79,6 +79,30 @@ supported hash on the backend or you can use a named hash such as
|
||||
"MD5" or "SHA-1". Use the [hashsum](/commands/rclone_hashsum/) command
|
||||
to see the full list.
|
||||
|
||||
### Access WebDAV on Windows
|
||||
WebDAV shared folder can be mapped as a drive on Windows, however the default settings prevent it.
|
||||
Windows will fail to connect to the server using insecure Basic authentication.
|
||||
It will not even display any login dialog. Windows requires SSL / HTTPS connection to be used with Basic.
|
||||
If you try to connect via Add Network Location Wizard you will get the following error:
|
||||
"The folder you entered does not appear to be valid. Please choose another".
|
||||
However, you still can connect if you set the following registry key on a client machine:
|
||||
HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\WebClient\Parameters\BasicAuthLevel to 2.
|
||||
The BasicAuthLevel can be set to the following values:
|
||||
0 - Basic authentication disabled
|
||||
1 - Basic authentication enabled for SSL connections only
|
||||
2 - Basic authentication enabled for SSL connections and for non-SSL connections
|
||||
If required, increase the FileSizeLimitInBytes to a higher value.
|
||||
Navigate to the Services interface, then restart the WebClient service.
|
||||
|
||||
### Access Office applications on WebDAV
|
||||
Navigate to following registry HKEY_CURRENT_USER\Software\Microsoft\Office\[14.0/15.0/16.0]\Common\Internet
|
||||
Create a new DWORD BasicAuthLevel with value 2.
|
||||
0 - Basic authentication disabled
|
||||
1 - Basic authentication enabled for SSL connections only
|
||||
2 - Basic authentication enabled for SSL and for non-SSL connections
|
||||
|
||||
https://learn.microsoft.com/en-us/office/troubleshoot/powerpoint/office-opens-blank-from-sharepoint
|
||||
|
||||
` + libhttp.Help + libhttp.TemplateHelp + libhttp.AuthHelp + vfs.Help + proxy.Help,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
@@ -199,8 +223,6 @@ func newWebDAV(ctx context.Context, f fs.Fs, opt *Options) (w *WebDAV, err error
|
||||
router.Method(method, "/*", w)
|
||||
}
|
||||
|
||||
w.Server.Serve()
|
||||
|
||||
return w, nil
|
||||
}
|
||||
|
||||
@@ -237,6 +259,9 @@ func (w *WebDAV) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
|
||||
w.serveDir(rw, r, remote)
|
||||
return
|
||||
}
|
||||
// Add URL Prefix back to path since webdavhandler needs to
|
||||
// return absolute references.
|
||||
r.URL.Path = w.opt.HTTP.BaseURL + r.URL.Path
|
||||
w.webdavhandler.ServeHTTP(rw, r)
|
||||
}
|
||||
|
||||
|
||||
@@ -51,6 +51,7 @@ func TestWebDav(t *testing.T) {
|
||||
start := func(f fs.Fs) (configmap.Simple, func()) {
|
||||
opt := DefaultOpt
|
||||
opt.HTTP.ListenAddr = []string{testBindAddress}
|
||||
opt.HTTP.BaseURL = "/prefix"
|
||||
opt.Auth.BasicUser = testUser
|
||||
opt.Auth.BasicPass = testPass
|
||||
opt.Template.Path = testTemplate
|
||||
|
||||
@@ -36,7 +36,7 @@ want to delete files from destination, use the
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
` + "`--dry-run` or the `--interactive`/`-i`" + ` flag.
|
||||
|
||||
rclone sync -i SOURCE remote:DESTINATION
|
||||
rclone sync --interactive SOURCE remote:DESTINATION
|
||||
|
||||
Note that files in the destination won't be deleted if there were any
|
||||
errors at any point. Duplicate objects (files with the same name, on
|
||||
|
||||
@@ -52,7 +52,7 @@ unless ` + "`--no-create`" + ` or ` + "`--recursive`" + ` is provided.
|
||||
|
||||
If ` + "`--recursive`" + ` is used then recursively sets the modification
|
||||
time on all existing files that is found under the path. Filters are supported,
|
||||
and you can test with the ` + "`--dry-run`" + ` or the ` + "`--interactive`" + ` flag.
|
||||
and you can test with the ` + "`--dry-run`" + ` or the ` + "`--interactive`/`-i`" + ` flag.
|
||||
|
||||
If ` + "`--timestamp`" + ` is used then sets the modification time to that
|
||||
time instead of the current time. Times may be specified as one of:
|
||||
|
||||
@@ -18,6 +18,8 @@ import (
|
||||
"github.com/rclone/rclone/fs/dirtree"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -26,6 +28,7 @@ var (
|
||||
outFileName string
|
||||
noReport bool
|
||||
sort string
|
||||
enc = encoder.OS
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -100,22 +103,26 @@ For a more interactive navigation of the remote see the
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
outFile := os.Stdout
|
||||
ci := fs.GetConfig(context.Background())
|
||||
var outFile io.Writer
|
||||
if outFileName != "" {
|
||||
var err error
|
||||
outFile, err = os.Create(outFileName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create output file: %w", err)
|
||||
}
|
||||
opts.Colorize = false
|
||||
} else {
|
||||
terminal.Start()
|
||||
outFile = terminal.Out
|
||||
opts.Colorize = true
|
||||
}
|
||||
opts.VerSort = opts.VerSort || sort == "version"
|
||||
opts.ModSort = opts.ModSort || sort == "mtime"
|
||||
opts.CTimeSort = opts.CTimeSort || sort == "ctime"
|
||||
opts.NameSort = sort == "name"
|
||||
opts.SizeSort = sort == "size"
|
||||
ci := fs.GetConfig(context.Background())
|
||||
opts.UnitSize = ci.HumanReadable
|
||||
opts.Colorize = ci.TerminalColorMode != fs.TerminalColorModeNever
|
||||
if opts.DeepLevel == 0 {
|
||||
opts.DeepLevel = ci.MaxDepth
|
||||
}
|
||||
@@ -158,7 +165,7 @@ type FileInfo struct {
|
||||
|
||||
// Name is base name of the file
|
||||
func (to *FileInfo) Name() string {
|
||||
return path.Base(to.entry.Remote())
|
||||
return enc.FromStandardName(path.Base(to.entry.Remote()))
|
||||
}
|
||||
|
||||
// Size in bytes for regular files; system-dependent for others
|
||||
@@ -192,7 +199,7 @@ func (to *FileInfo) Sys() interface{} {
|
||||
|
||||
// String returns the full path
|
||||
func (to *FileInfo) String() string {
|
||||
return to.entry.Remote()
|
||||
return filepath.FromSlash(enc.FromStandardPath(to.entry.Remote()))
|
||||
}
|
||||
|
||||
// Fs maps an fs.Fs into a tree.Fs
|
||||
@@ -207,6 +214,7 @@ func NewFs(dirs dirtree.DirTree) Fs {
|
||||
func (dirs Fs) Stat(filePath string) (fi os.FileInfo, err error) {
|
||||
defer log.Trace(nil, "filePath=%q", filePath)("fi=%+v, err=%v", &fi, &err)
|
||||
filePath = filepath.ToSlash(filePath)
|
||||
filePath = enc.ToStandardPath(filePath)
|
||||
filePath = strings.TrimLeft(filePath, "/")
|
||||
if filePath == "" {
|
||||
return &FileInfo{fs.NewDir("", time.Now())}, nil
|
||||
@@ -222,13 +230,14 @@ func (dirs Fs) Stat(filePath string) (fi os.FileInfo, err error) {
|
||||
func (dirs Fs) ReadDir(dir string) (names []string, err error) {
|
||||
defer log.Trace(nil, "dir=%s", dir)("names=%+v, err=%v", &names, &err)
|
||||
dir = filepath.ToSlash(dir)
|
||||
dir = enc.ToStandardPath(dir)
|
||||
dir = strings.TrimLeft(dir, "/")
|
||||
entries, ok := dirs[dir]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("couldn't find directory %q", dir)
|
||||
}
|
||||
for _, entry := range entries {
|
||||
names = append(names, path.Base(entry.Remote()))
|
||||
names = append(names, enc.FromStandardName(path.Base(entry.Remote())))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -673,3 +673,23 @@ put them back in again.` >}}
|
||||
* vanplus <60313789+vanplus@users.noreply.github.com>
|
||||
* Jack <16779171+jkpe@users.noreply.github.com>
|
||||
* Abdullah Saglam <abdullah.saglam@stonebranch.com>
|
||||
* Marks Polakovs <github@markspolakovs.me>
|
||||
* piyushgarg <piyushgarg80@gmail.com>
|
||||
* Kaloyan Raev <kaloyan-raev@users.noreply.github.com>
|
||||
* IMTheNachoMan <imthenachoman@gmail.com>
|
||||
* alankrit <alankrit@google.com>
|
||||
* Bryan Kaplan <#@bryankaplan.com>
|
||||
* LXY <767763591@qq.com>
|
||||
* Simmon Li (he/him) <li.simmon@gmail.com>
|
||||
* happyxhw <44490504+happyxhw@users.noreply.github.com>
|
||||
* Simmon Li (he/him) <hello@crespire.dev>
|
||||
* Matthias Baur <baurmatt@users.noreply.github.com>
|
||||
* Hunter Wittenborn <hunter@hunterwittenborn.com>
|
||||
* logopk <peter@kreuser.name>
|
||||
* Gerard Bosch <30733556+gerardbosch@users.noreply.github.com>
|
||||
* ToBeFree <github@tfrei.de>
|
||||
* NodudeWasTaken <75137537+NodudeWasTaken@users.noreply.github.com>
|
||||
* Peter Brunner <peter@lugoues.net>
|
||||
* Ninh Pham <dongian.rapclubkhtn@gmail.com>
|
||||
* Ryan Caezar Itang <sitiom@proton.me>
|
||||
* Peter Brunner <peter@psykhe.com>
|
||||
|
||||
@@ -67,7 +67,7 @@ List the contents of a container
|
||||
Sync `/home/local/directory` to the remote container, deleting any excess
|
||||
files in the container.
|
||||
|
||||
rclone sync -i /home/local/directory remote:container
|
||||
rclone sync --interactive /home/local/directory remote:container
|
||||
|
||||
### --fast-list
|
||||
|
||||
|
||||
@@ -72,7 +72,7 @@ List the contents of a bucket
|
||||
Sync `/home/local/directory` to the remote bucket, deleting any
|
||||
excess files in the bucket.
|
||||
|
||||
rclone sync -i /home/local/directory remote:bucket
|
||||
rclone sync --interactive /home/local/directory remote:bucket
|
||||
|
||||
### Application Keys
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "Bisync"
|
||||
description: "Bidirectional cloud sync solution in rclone"
|
||||
versionIntroduced: "v1.58.0"
|
||||
versionIntroduced: "v1.58"
|
||||
---
|
||||
|
||||
## Getting started {#getting-started}
|
||||
@@ -16,7 +16,7 @@ versionIntroduced: "v1.58.0"
|
||||
- For successive sync runs, leave off the `--resync` flag.
|
||||
- Consider using a [filters file](#filtering) for excluding
|
||||
unnecessary files and directories from the sync.
|
||||
- Consider setting up the [--check-access](#check-access-option) feature
|
||||
- Consider setting up the [--check-access](#check-access) feature
|
||||
for safety.
|
||||
- On Linux, consider setting up a [crontab entry](#cron). bisync can
|
||||
safely run in concurrent cron jobs thanks to lock files it maintains.
|
||||
@@ -146,9 +146,9 @@ The base directories on the both Path1 and Path2 filesystems must exist
|
||||
or bisync will fail. This is required for safety - that bisync can verify
|
||||
that both paths are valid.
|
||||
|
||||
When using `--resync` a newer version of a file on the Path2 filesystem
|
||||
will be overwritten by the Path1 filesystem version.
|
||||
Carefully evaluate deltas using [--dry-run](/flags/#non-backend-flags).
|
||||
When using `--resync`, a newer version of a file either on Path1 or Path2
|
||||
filesystem, will overwrite the file on the other path (only the last version
|
||||
will be kept). Carefully evaluate deltas using [--dry-run](/flags/#non-backend-flags).
|
||||
|
||||
For a resync run, one of the paths may be empty (no files in the path tree).
|
||||
The resync run should result in files on both paths, else a normal non-resync
|
||||
@@ -164,14 +164,27 @@ deleting **everything** in the other path.
|
||||
Access check files are an additional safety measure against data loss.
|
||||
bisync will ensure it can find matching `RCLONE_TEST` files in the same places
|
||||
in the Path1 and Path2 filesystems.
|
||||
`RCLONE_TEST` files are not generated automatically.
|
||||
For `--check-access`to succeed, you must first either:
|
||||
**A)** Place one or more `RCLONE_TEST` files in the Path1 or Path2 filesystem
|
||||
and then do either a run without `--check-access` or a [--resync](#resync) to
|
||||
set matching files on both filesystems, or
|
||||
**B)** Set `--check-filename` to a filename already in use in various locations
|
||||
throughout your sync'd fileset.
|
||||
Time stamps and file contents are not important, just the names and locations.
|
||||
Place one or more `RCLONE_TEST` files in the Path1 or Path2 filesystem and
|
||||
then do either a run without `--check-access` or a `--resync` to set
|
||||
matching files on both filesystems.
|
||||
If you have symbolic links in your sync tree it is recommended to place
|
||||
`RCLONE_TEST` files in the linked-to directory tree to protect against
|
||||
bisync assuming a bunch of deleted files if the linked-to tree should not be
|
||||
accessible. Also see the `--check-filename` flag.
|
||||
accessible.
|
||||
See also the [--check-filename](--check-filename) flag.
|
||||
|
||||
#### --check-filename
|
||||
|
||||
Name of the file(s) used in access health validation.
|
||||
The default `--check-filename` is `RCLONE_TEST`.
|
||||
One or more files having this filename must exist, synchronized between your
|
||||
source and destination filesets, in order for `--check-access` to succeed.
|
||||
See [--check-access](#check-access) for additional details.
|
||||
|
||||
#### --max-delete
|
||||
|
||||
|
||||
@@ -5,6 +5,27 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.61.1 - 2022-12-23
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.61.0...v1.61.1)
|
||||
|
||||
* Bug Fixes
|
||||
* docs:
|
||||
* Show only significant parts of version number in version introduced label (albertony)
|
||||
* Fix unescaped HTML (Nick Craig-Wood)
|
||||
* lib/http: Shutdown all servers on exit to remove unix socket (Nick Craig-Wood)
|
||||
* rc: Fix `--rc-addr` flag (which is an alternate for `--url`) (Anagh Kumar Baranwal)
|
||||
* serve restic
|
||||
* Don't serve via http if serving via `--stdio` (Nick Craig-Wood)
|
||||
* Fix immediate exit when not using stdio (Nick Craig-Wood)
|
||||
* serve webdav
|
||||
* Fix `--baseurl` handling after `lib/http` refactor (Nick Craig-Wood)
|
||||
* Fix running duplicate Serve call (Nick Craig-Wood)
|
||||
* Azure Blob
|
||||
* Fix "409 Public access is not permitted on this storage account" (Nick Craig-Wood)
|
||||
* S3
|
||||
* storj: Update endpoints (Kaloyan Raev)
|
||||
|
||||
## v1.61.0 - 2022-12-20
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.60.0...v1.61.0)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "Chunker"
|
||||
description: "Split-chunking overlay remote"
|
||||
versionIntroduced: "v1.50.0"
|
||||
versionIntroduced: "v1.50"
|
||||
status: Beta
|
||||
---
|
||||
|
||||
@@ -257,7 +257,7 @@ style or chunk naming scheme is to:
|
||||
- Create another directory (most probably on the same cloud storage)
|
||||
and configure a new remote with desired metadata format,
|
||||
hash type, chunk naming etc.
|
||||
- Now run `rclone sync -i oldchunks: newchunks:` and all your data
|
||||
- Now run `rclone sync --interactive oldchunks: newchunks:` and all your data
|
||||
will be transparently converted in transfer.
|
||||
This may take some time, yet chunker will try server-side
|
||||
copy if possible.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "Combine"
|
||||
description: "Combine several remotes into one"
|
||||
versionIntroduced: "v1.59.0"
|
||||
versionIntroduced: "v1.59"
|
||||
---
|
||||
|
||||
# {{< icon "fa fa-folder-plus" >}} Combine
|
||||
|
||||
@@ -17,9 +17,13 @@ Remote authorization. Used to authorize a remote or headless
|
||||
rclone from a machine with a browser - use as instructed by
|
||||
rclone config.
|
||||
|
||||
Use the --auth-no-open-browser to prevent rclone to open auth
|
||||
Use --auth-no-open-browser to prevent rclone to open auth
|
||||
link in default browser automatically.
|
||||
|
||||
Use --template to generate HTML output via a custom Go
|
||||
template. If a blank string is provided as an argument to
|
||||
this flag, the default template is used.
|
||||
|
||||
```
|
||||
rclone authorize [flags]
|
||||
```
|
||||
@@ -29,6 +33,7 @@ rclone authorize [flags]
|
||||
```
|
||||
--auth-no-open-browser Do not automatically open auth link in default browser
|
||||
-h, --help help for authorize
|
||||
--template string Use a custom Go template for generating HTML responses
|
||||
```
|
||||
|
||||
See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
@@ -437,6 +437,87 @@ _WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
|
||||
result is accurate. However, this is very inefficient and may cost lots of API
|
||||
calls resulting in extra charges. Use it as a last resort and only with caching.
|
||||
|
||||
## Auth Proxy
|
||||
|
||||
If you supply the parameter `--auth-proxy /path/to/program` then
|
||||
rclone will use that program to generate backends on the fly which
|
||||
then are used to authenticate incoming requests. This uses a simple
|
||||
JSON based protocol with input on STDIN and output on STDOUT.
|
||||
|
||||
**PLEASE NOTE:** `--auth-proxy` and `--authorized-keys` cannot be used
|
||||
together, if `--auth-proxy` is set the authorized keys option will be
|
||||
ignored.
|
||||
|
||||
There is an example program
|
||||
[bin/test_proxy.py](https://github.com/rclone/rclone/blob/master/test_proxy.py)
|
||||
in the rclone source code.
|
||||
|
||||
The program's job is to take a `user` and `pass` on the input and turn
|
||||
those into the config for a backend on STDOUT in JSON format. This
|
||||
config will have any default parameters for the backend added, but it
|
||||
won't use configuration from environment variables or command line
|
||||
options - it is the job of the proxy program to make a complete
|
||||
config.
|
||||
|
||||
This config generated must have this extra parameter
|
||||
- `_root` - root to use for the backend
|
||||
|
||||
And it may have this parameter
|
||||
- `_obscure` - comma separated strings for parameters to obscure
|
||||
|
||||
If password authentication was used by the client, input to the proxy
|
||||
process (on STDIN) would look similar to this:
|
||||
|
||||
```
|
||||
{
|
||||
"user": "me",
|
||||
"pass": "mypassword"
|
||||
}
|
||||
```
|
||||
|
||||
If public-key authentication was used by the client, input to the
|
||||
proxy process (on STDIN) would look similar to this:
|
||||
|
||||
```
|
||||
{
|
||||
"user": "me",
|
||||
"public_key": "AAAAB3NzaC1yc2EAAAADAQABAAABAQDuwESFdAe14hVS6omeyX7edc...JQdf"
|
||||
}
|
||||
```
|
||||
|
||||
And as an example return this on STDOUT
|
||||
|
||||
```
|
||||
{
|
||||
"type": "sftp",
|
||||
"_root": "",
|
||||
"_obscure": "pass",
|
||||
"user": "me",
|
||||
"pass": "mypassword",
|
||||
"host": "sftp.example.com"
|
||||
}
|
||||
```
|
||||
|
||||
This would mean that an SFTP backend would be created on the fly for
|
||||
the `user` and `pass`/`public_key` returned in the output to the host given. Note
|
||||
that since `_obscure` is set to `pass`, rclone will obscure the `pass`
|
||||
parameter before creating the backend (which is required for sftp
|
||||
backends).
|
||||
|
||||
The program can manipulate the supplied `user` in any way, for example
|
||||
to make proxy to many different sftp backends, you could make the
|
||||
`user` be `user@example.com` and then set the `host` to `example.com`
|
||||
in the output and the user to `user`. For security you'd probably want
|
||||
to restrict the `host` to a limited list.
|
||||
|
||||
Note that an internal cache is keyed on `user` so only use that for
|
||||
configuration, don't use `pass` or `public_key`. This also means that if a user's
|
||||
password or public-key is changed the cache will need to expire (which takes 5 mins)
|
||||
before it takes effect.
|
||||
|
||||
This can be used to build general purpose proxies to any kind of
|
||||
backend that rclone supports.
|
||||
|
||||
|
||||
```
|
||||
rclone serve http remote:path [flags]
|
||||
@@ -446,6 +527,7 @@ rclone serve http remote:path [flags]
|
||||
|
||||
```
|
||||
--addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080])
|
||||
--auth-proxy string A program to use to create the backend from the auth
|
||||
--baseurl string Prefix for URLs - leave blank for root
|
||||
--cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
--client-ca string Client certificate authority to verify clients with
|
||||
|
||||
@@ -23,7 +23,7 @@ want to delete files from destination, use the
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
`--dry-run` or the `--interactive`/`-i` flag.
|
||||
|
||||
rclone sync -i SOURCE remote:DESTINATION
|
||||
rclone sync --interactive SOURCE remote:DESTINATION
|
||||
|
||||
Note that files in the destination won't be deleted if there were any
|
||||
errors at any point. Duplicate objects (files with the same name, on
|
||||
|
||||
@@ -21,7 +21,7 @@ unless `--no-create` or `--recursive` is provided.
|
||||
|
||||
If `--recursive` is used then recursively sets the modification
|
||||
time on all existing files that is found under the path. Filters are supported,
|
||||
and you can test with the `--dry-run` or the `--interactive` flag.
|
||||
and you can test with the `--dry-run` or the `--interactive`/`-i` flag.
|
||||
|
||||
If `--timestamp` is used then sets the modification time to that
|
||||
time instead of the current time. Times may be specified as one of:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "Compress"
|
||||
description: "Compression Remote"
|
||||
versionIntroduced: "v1.54.0"
|
||||
versionIntroduced: "v1.54"
|
||||
status: Experimental
|
||||
---
|
||||
|
||||
|
||||
@@ -58,7 +58,7 @@ custom salt is effectively a second password that must be memorized.
|
||||
based on XSalsa20 cipher and Poly1305 for integrity.
|
||||
[Names](#name-encryption) (file- and directory names) are also encrypted
|
||||
by default, but this has some implications and is therefore
|
||||
possible to turned off.
|
||||
possible to be turned off.
|
||||
|
||||
## Configuration
|
||||
|
||||
@@ -455,6 +455,7 @@ Properties:
|
||||
- "off"
|
||||
- Don't encrypt the file names.
|
||||
- Adds a ".bin" extension only.
|
||||
- May cause problems on [case insensitive](/overview/#case-insensitive) [storage systems](/overview/#features) like OneDrive, Dropbox, Windows, OSX and SMB.
|
||||
|
||||
#### --crypt-directory-name-encryption
|
||||
|
||||
@@ -473,6 +474,7 @@ Properties:
|
||||
- Encrypt directory names.
|
||||
- "false"
|
||||
- Don't encrypt directory names, leave them intact.
|
||||
- May cause problems on [case insensitive](/overview/#case-insensitive) [storage systems](/overview/#features) like OneDrive, Dropbox, Windows, OSX and SMB.
|
||||
|
||||
#### --crypt-password
|
||||
|
||||
@@ -660,7 +662,7 @@ as `eremote:`.
|
||||
|
||||
To sync the two remotes you would do
|
||||
|
||||
rclone sync -i remote:crypt remote2:crypt
|
||||
rclone sync --interactive remote:crypt remote2:crypt
|
||||
|
||||
And to check the integrity you would do
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "Docker Volume Plugin"
|
||||
description: "Docker Volume Plugin"
|
||||
versionIntroduced: "v1.56.0"
|
||||
versionIntroduced: "v1.56"
|
||||
---
|
||||
|
||||
# Docker Volume Plugin
|
||||
|
||||
@@ -94,7 +94,7 @@ storage system in the config file then the sub path, e.g.
|
||||
|
||||
You can define as many storage paths as you like in the config file.
|
||||
|
||||
Please use the [`-i` / `--interactive`](#interactive) flag while
|
||||
Please use the [`--interactive`/`-i`](#interactive) flag while
|
||||
learning rclone to avoid accidental data loss.
|
||||
|
||||
Subcommands
|
||||
@@ -104,7 +104,7 @@ rclone uses a system of subcommands. For example
|
||||
|
||||
rclone ls remote:path # lists a remote
|
||||
rclone copy /local/path remote:path # copies /local/path to the remote
|
||||
rclone sync -i /local/path remote:path # syncs /local/path to the remote
|
||||
rclone sync --interactive /local/path remote:path # syncs /local/path to the remote
|
||||
|
||||
The main rclone commands with most used first
|
||||
|
||||
@@ -338,7 +338,7 @@ Will get their own names
|
||||
### Valid remote names
|
||||
|
||||
Remote names are case sensitive, and must adhere to the following rules:
|
||||
- May contain number, letter, `_`, `-`, `.` and space.
|
||||
- May contain number, letter, `_`, `-`, `.`, `+`, `@` and space.
|
||||
- May not start with `-` or space.
|
||||
- May not end with space.
|
||||
|
||||
@@ -396,11 +396,11 @@ file or directory like this then use the full path starting with a
|
||||
|
||||
So to sync a directory called `sync:me` to a remote called `remote:` use
|
||||
|
||||
rclone sync -i ./sync:me remote:path
|
||||
rclone sync --interactive ./sync:me remote:path
|
||||
|
||||
or
|
||||
|
||||
rclone sync -i /full/path/to/sync:me remote:path
|
||||
rclone sync --interactive /full/path/to/sync:me remote:path
|
||||
|
||||
Server Side Copy
|
||||
----------------
|
||||
@@ -433,8 +433,8 @@ same.
|
||||
|
||||
This can be used when scripting to make aged backups efficiently, e.g.
|
||||
|
||||
rclone sync -i remote:current-backup remote:previous-backup
|
||||
rclone sync -i /path/to/files remote:current-backup
|
||||
rclone sync --interactive remote:current-backup remote:previous-backup
|
||||
rclone sync --interactive /path/to/files remote:current-backup
|
||||
|
||||
## Metadata support {#metadata}
|
||||
|
||||
@@ -621,7 +621,7 @@ excluded by a filter rule.
|
||||
|
||||
For example
|
||||
|
||||
rclone sync -i /path/to/local remote:current --backup-dir remote:old
|
||||
rclone sync --interactive /path/to/local remote:current --backup-dir remote:old
|
||||
|
||||
will sync `/path/to/local` to `remote:current`, but for any files
|
||||
which would have been updated or deleted will be stored in
|
||||
@@ -1086,7 +1086,7 @@ Add an HTTP header for all download transactions. The flag can be repeated to
|
||||
add multiple headers.
|
||||
|
||||
```
|
||||
rclone sync -i s3:test/src ~/dst --header-download "X-Amz-Meta-Test: Foo" --header-download "X-Amz-Meta-Test2: Bar"
|
||||
rclone sync --interactive s3:test/src ~/dst --header-download "X-Amz-Meta-Test: Foo" --header-download "X-Amz-Meta-Test2: Bar"
|
||||
```
|
||||
|
||||
See the GitHub issue [here](https://github.com/rclone/rclone/issues/59) for
|
||||
@@ -1098,7 +1098,7 @@ Add an HTTP header for all upload transactions. The flag can be repeated to add
|
||||
multiple headers.
|
||||
|
||||
```
|
||||
rclone sync -i ~/src s3:test/dst --header-upload "Content-Disposition: attachment; filename='cool.html'" --header-upload "X-Amz-Meta-Test: FooBar"
|
||||
rclone sync --interactive ~/src s3:test/dst --header-upload "Content-Disposition: attachment; filename='cool.html'" --header-upload "X-Amz-Meta-Test: FooBar"
|
||||
```
|
||||
|
||||
See the GitHub issue [here](https://github.com/rclone/rclone/issues/59) for
|
||||
@@ -1208,7 +1208,7 @@ This can be useful as an additional layer of protection for immutable
|
||||
or append-only data sets (notably backup archives), where modification
|
||||
implies corruption and should not be propagated.
|
||||
|
||||
### -i / --interactive {#interactive}
|
||||
### -i, --interactive {#interactive}
|
||||
|
||||
This flag can be used to tell rclone that you wish a manual
|
||||
confirmation before destructive operations.
|
||||
@@ -1219,7 +1219,7 @@ especially with `rclone sync`.
|
||||
For example
|
||||
|
||||
```
|
||||
$ rclone delete -i /tmp/dir
|
||||
$ rclone delete --interactive /tmp/dir
|
||||
rclone: delete "important-file.txt"?
|
||||
y) Yes, this is OK (default)
|
||||
n) No, skip this
|
||||
@@ -1372,7 +1372,7 @@ When the limit is reached all transfers will stop immediately.
|
||||
|
||||
Rclone will exit with exit code 8 if the transfer limit is reached.
|
||||
|
||||
## --metadata / -M
|
||||
## -M, --metadata
|
||||
|
||||
Setting this flag enables rclone to copy the metadata from the source
|
||||
to the destination. For local backends this is ownership, permissions,
|
||||
@@ -1791,7 +1791,7 @@ or with `--backup-dir`. See `--backup-dir` for more info.
|
||||
|
||||
For example
|
||||
|
||||
rclone copy -i /path/to/local/file remote:current --suffix .bak
|
||||
rclone copy --interactive /path/to/local/file remote:current --suffix .bak
|
||||
|
||||
will copy `/path/to/local` to `remote:current`, but for any files
|
||||
which would have been updated or deleted have .bak added.
|
||||
@@ -1800,7 +1800,7 @@ If using `rclone sync` with `--suffix` and without `--backup-dir` then
|
||||
it is recommended to put a filter rule in excluding the suffix
|
||||
otherwise the `sync` will delete the backup files.
|
||||
|
||||
rclone sync -i /path/to/local/file remote:current --suffix .bak --exclude "*.bak"
|
||||
rclone sync --interactive /path/to/local/file remote:current --suffix .bak --exclude "*.bak"
|
||||
|
||||
### --suffix-keep-extension ###
|
||||
|
||||
@@ -2099,9 +2099,9 @@ these options. For example this can be very useful with the HTTP or
|
||||
WebDAV backends. Rclone HTTP servers have their own set of
|
||||
configuration for SSL/TLS which you can find in their documentation.
|
||||
|
||||
### --ca-cert string
|
||||
### --ca-cert stringArray
|
||||
|
||||
This loads the PEM encoded certificate authority certificate and uses
|
||||
This loads the PEM encoded certificate authority certificates and uses
|
||||
it to verify the certificates of the servers rclone connects to.
|
||||
|
||||
If you have generated certificates signed with a local CA then you
|
||||
|
||||
@@ -16,7 +16,8 @@ See the [install](https://rclone.org/install/) documentation for more details.
|
||||
|:-------:|:-------:|:-----:|:-----:|:----:|:----:|:-------:|:------:|:-------:|:-----:|:-------:|
|
||||
| Intel/AMD - 64 Bit | {{< download windows amd64 >}} | {{< download osx amd64 >}} | {{< download linux amd64 >}} | {{< download linux amd64 deb >}} | {{< download linux amd64 rpm >}} | {{< download freebsd amd64 >}} | {{< download netbsd amd64 >}} | {{< download openbsd amd64 >}} | {{< download plan9 amd64 >}} | {{< download solaris amd64 >}} |
|
||||
| Intel/AMD - 32 Bit | {{< download windows 386 >}} | - | {{< download linux 386 >}} | {{< download linux 386 deb >}} | {{< download linux 386 rpm >}} | {{< download freebsd 386 >}} | {{< download netbsd 386 >}} | {{< download openbsd 386 >}} | {{< download plan9 386 >}} | - |
|
||||
| ARMv6 - 32 Bit | - | - | {{< download linux arm >}} | {{< download linux arm deb >}} | {{< download linux arm rpm >}} | {{< download freebsd arm >}} | {{< download netbsd arm >}} | - | - | - |
|
||||
| ARMv5 - 32 Bit NOHF | - | - | {{< download linux arm >}} | {{< download linux arm deb >}} | {{< download linux arm rpm >}} | {{< download freebsd arm >}} | {{< download netbsd arm >}} | - | - | - |
|
||||
| ARMv6 - 32 Bit | - | - | {{< download linux arm-v6 >}} | {{< download linux arm-v6 deb >}} | {{< download linux arm-v6 rpm >}} | {{< download freebsd arm-v6 >}} | {{< download netbsd arm-v6 >}} | - | - | - |
|
||||
| ARMv7 - 32 Bit | - | - | {{< download linux arm-v7 >}} | {{< download linux arm-v7 deb >}} | {{< download linux arm-v7 rpm >}} | {{< download freebsd arm-v7 >}} | {{< download netbsd arm-v7 >}} | - | - | - |
|
||||
| ARM - 64 Bit | {{< download windows arm64 >}} | {{< download osx arm64 >}} | {{< download linux arm64 >}} | {{< download linux arm64 deb >}} | {{< download linux arm64 rpm >}} | - | - | - | - | - |
|
||||
| MIPS - Big Endian | - | - | {{< download linux mips >}} | {{< download linux mips deb >}} | {{< download linux mips rpm >}} | - | - | - | - | - |
|
||||
@@ -24,6 +25,10 @@ See the [install](https://rclone.org/install/) documentation for more details.
|
||||
|
||||
You can also find a [mirror of the downloads on GitHub](https://github.com/rclone/rclone/releases/tag/{{< version >}}).
|
||||
|
||||
See also [Android builds](https://beta.rclone.org/{{% version %}}/testbuilds/).
|
||||
These are built as part of the official release, but haven't been
|
||||
adopted as first class builds yet.
|
||||
|
||||
## Script download and install ##
|
||||
|
||||
To install rclone on Linux/macOS/BSD systems, run:
|
||||
@@ -83,7 +88,8 @@ script) from a URL which doesn't change then you can use these links.
|
||||
|:-------:|:-------:|:-----:|:-----:|:----:|:----:|:-------:|:------:|:-------:|:-----:|:-------:|
|
||||
| Intel/AMD - 64 Bit | {{< cdownload windows amd64 >}} | {{< cdownload osx amd64 >}} | {{< cdownload linux amd64 >}} | {{< cdownload linux amd64 deb >}} | {{< cdownload linux amd64 rpm >}} | {{< cdownload freebsd amd64 >}} | {{< cdownload netbsd amd64 >}} | {{< cdownload openbsd amd64 >}} | {{< cdownload plan9 amd64 >}} | {{< cdownload solaris amd64 >}} |
|
||||
| Intel/AMD - 32 Bit | {{< cdownload windows 386 >}} | - | {{< cdownload linux 386 >}} | {{< cdownload linux 386 deb >}} | {{< cdownload linux 386 rpm >}} | {{< cdownload freebsd 386 >}} | {{< cdownload netbsd 386 >}} | {{< cdownload openbsd 386 >}} | {{< cdownload plan9 386 >}} | - |
|
||||
| ARMv6 - 32 Bit | - | - | {{< cdownload linux arm >}} | {{< cdownload linux arm deb >}} | {{< cdownload linux arm rpm >}} | {{< cdownload freebsd arm >}} | {{< cdownload netbsd arm >}} | - | - | - |
|
||||
| ARMv5 - 32 Bit NOHF | - | - | {{< cdownload linux arm >}} | {{< cdownload linux arm deb >}} | {{< cdownload linux arm rpm >}} | {{< cdownload freebsd arm >}} | {{< cdownload netbsd arm >}} | - | - | - |
|
||||
| ARMv6 - 32 Bit | - | - | {{< cdownload linux arm-v6 >}} | {{< cdownload linux arm-v6 deb >}} | {{< cdownload linux arm-v6 rpm >}} | {{< cdownload freebsd arm-v6 >}} | {{< cdownload netbsd arm-v6 >}} | - | - | - |
|
||||
| ARMv7 - 32 Bit | - | - | {{< cdownload linux arm-v7 >}} | {{< cdownload linux arm-v7 deb >}} | {{< cdownload linux arm-v7 rpm >}} | {{< cdownload freebsd arm-v7 >}} | {{< cdownload netbsd arm-v7 >}} | - | - | - |
|
||||
| ARM - 64 Bit | {{< cdownload windows arm64 >}} | {{< cdownload osx arm64 >}} | {{< cdownload linux arm64 >}} | {{< cdownload linux arm64 deb >}} | {{< cdownload linux arm64 rpm >}} | - | - | - | - | - |
|
||||
| MIPS - Big Endian | - | - | {{< cdownload linux mips >}} | {{< cdownload linux mips deb >}} | {{< cdownload linux mips rpm >}} | - | - | - | - | - |
|
||||
|
||||
@@ -1508,9 +1508,15 @@ to the next step; if not, click on "CONFIGURE CONSENT SCREEN" button
|
||||
(near the top right corner of the right panel), then select "External"
|
||||
and click on "CREATE"; on the next screen, enter an "Application name"
|
||||
("rclone" is OK); enter "User Support Email" (your own email is OK);
|
||||
enter "Developer Contact Email" (your own email is OK); then click on "Save" (all other data is optional).
|
||||
Click again on "Credentials" on the left panel to go back to the
|
||||
"Credentials" screen.
|
||||
enter "Developer Contact Email" (your own email is OK); then click on
|
||||
"Save" (all other data is optional). You will also have to add some scopes,
|
||||
including `.../auth/docs` and `.../auth/drive` in order to be able to edit,
|
||||
create and delete files with RClone. You may also want to include the
|
||||
`../auth/drive.metadata.readonly` scope. After adding scopes, click
|
||||
"Save and continue" to add test users. Be sure to add your own account to
|
||||
the test users. Once you've added yourself as a test user and saved the
|
||||
changes, click again on "Credentials" on the left panel to go back to
|
||||
the "Credentials" screen.
|
||||
|
||||
(PS: if you are a GSuite user, you could also select "Internal" instead
|
||||
of "External" above, but this will restrict API use to Google Workspace
|
||||
@@ -1523,16 +1529,14 @@ then select "OAuth client ID".
|
||||
|
||||
8. It will show you a client ID and client secret. Make a note of these.
|
||||
|
||||
(If you selected "External" at Step 5 continue to "Publish App" in the Steps 9 and 10.
|
||||
(If you selected "External" at Step 5 continue to Step 9.
|
||||
If you chose "Internal" you don't need to publish and can skip straight to
|
||||
Step 11.)
|
||||
Step 10 but your destination drive must be part of the same Google Workspace.)
|
||||
|
||||
9. Go to "Oauth consent screen" and press "Publish App"
|
||||
9. Go to "Oauth consent screen" and then click "PUBLISH APP" button and confirm.
|
||||
You will also want to add yourself as a test user.
|
||||
|
||||
10. Click "OAuth consent screen", then click "PUBLISH APP" button and
|
||||
confirm, or add your account under "Test users".
|
||||
|
||||
11. Provide the noted client ID and client secret to rclone.
|
||||
10. Provide the noted client ID and client secret to rclone.
|
||||
|
||||
Be aware that, due to the "enhanced security" recently introduced by
|
||||
Google, you are theoretically expected to "submit your app for verification"
|
||||
@@ -1540,7 +1544,11 @@ and then wait a few weeks(!) for their response; in practice, you can go right
|
||||
ahead and use the client ID and client secret with rclone, the only issue will
|
||||
be a very scary confirmation screen shown when you connect via your browser
|
||||
for rclone to be able to get its token-id (but as this only happens during
|
||||
the remote configuration, it's not such a big deal).
|
||||
the remote configuration, it's not such a big deal). Keeping the application in
|
||||
"Testing" will work as well, but the limitation is that any grants will expire
|
||||
after a week, which can be annoying to refresh constantly. If, for whatever
|
||||
reason, a short grant time is not a problem, then keeping the application in
|
||||
testing mode would also be sufficient.
|
||||
|
||||
(Thanks to @balazer on github for these instructions.)
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ The syncs would be incremental (on a file by file basis).
|
||||
|
||||
e.g.
|
||||
|
||||
rclone sync -i drive:Folder s3:bucket
|
||||
rclone sync --interactive drive:Folder s3:bucket
|
||||
|
||||
|
||||
### Using rclone from multiple locations at the same time ###
|
||||
@@ -42,8 +42,8 @@ You can use rclone from multiple places at the same time if you choose
|
||||
different subdirectory for the output, e.g.
|
||||
|
||||
```
|
||||
Server A> rclone sync -i /tmp/whatever remote:ServerA
|
||||
Server B> rclone sync -i /tmp/whatever remote:ServerB
|
||||
Server A> rclone sync --interactive /tmp/whatever remote:ServerA
|
||||
Server B> rclone sync --interactive /tmp/whatever remote:ServerB
|
||||
```
|
||||
|
||||
If you sync to the same directory then you should use rclone copy
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "1Fichier"
|
||||
description: "Rclone docs for 1Fichier"
|
||||
versionIntroduced: "v1.49.0"
|
||||
versionIntroduced: "v1.49"
|
||||
---
|
||||
|
||||
# {{< icon "fa fa-archive" >}} 1Fichier
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "Enterprise File Fabric"
|
||||
description: "Rclone docs for the Enterprise File Fabric backend"
|
||||
versionIntroduced: "v1.54.0"
|
||||
versionIntroduced: "v1.54"
|
||||
---
|
||||
|
||||
# {{< icon "fa fa-cloud" >}} Enterprise File Fabric
|
||||
|
||||
@@ -723,7 +723,7 @@ and `-v` first.
|
||||
In conjunction with `rclone sync`, `--delete-excluded` deletes any files
|
||||
on the destination which are excluded from the command.
|
||||
|
||||
E.g. the scope of `rclone sync -i A: B:` can be restricted:
|
||||
E.g. the scope of `rclone sync --interactive A: B:` can be restricted:
|
||||
|
||||
rclone --min-size 50k --delete-excluded sync A: B:
|
||||
|
||||
|
||||
@@ -652,7 +652,7 @@ and may be set in the config file.
|
||||
--storj-api-key string API key
|
||||
--storj-passphrase string Encryption passphrase
|
||||
--storj-provider string Choose an authentication method (default "existing")
|
||||
--storj-satellite-address string Satellite address (default "us-central-1.storj.io")
|
||||
--storj-satellite-address string Satellite address (default "us1.storj.io")
|
||||
--sugarsync-access-key-id string Sugarsync Access Key ID
|
||||
--sugarsync-app-id string Sugarsync App ID
|
||||
--sugarsync-authorization string Sugarsync authorization
|
||||
|
||||
@@ -99,7 +99,7 @@ List the contents of a directory
|
||||
Sync `/home/local/directory` to the remote directory, deleting any
|
||||
excess files in the directory.
|
||||
|
||||
rclone sync -i /home/local/directory remote:directory
|
||||
rclone sync --interactive /home/local/directory remote:directory
|
||||
|
||||
### Anonymous FTP
|
||||
|
||||
|
||||
@@ -172,7 +172,7 @@ List the contents of a bucket
|
||||
Sync `/home/local/directory` to the remote bucket, deleting any excess
|
||||
files in the bucket.
|
||||
|
||||
rclone sync -i /home/local/directory remote:bucket
|
||||
rclone sync --interactive /home/local/directory remote:bucket
|
||||
|
||||
### Service Account support
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "Google Photos"
|
||||
description: "Rclone docs for Google Photos"
|
||||
versionIntroduced: "v1.49.0"
|
||||
versionIntroduced: "v1.49"
|
||||
---
|
||||
|
||||
# {{< icon "fa fa-images" >}} Google Photos
|
||||
@@ -117,11 +117,11 @@ List the contents of an album
|
||||
Sync `/home/local/images` to the Google Photos, removing any excess
|
||||
files in the album.
|
||||
|
||||
rclone sync -i /home/local/image remote:album/newAlbum
|
||||
rclone sync --interactive /home/local/image remote:album/newAlbum
|
||||
|
||||
### Layout
|
||||
|
||||
As Google Photos is not a general purpose cloud storage system the
|
||||
As Google Photos is not a general purpose cloud storage system, the
|
||||
backend is laid out to help you navigate it.
|
||||
|
||||
The directories under `media` show different ways of categorizing the
|
||||
@@ -471,4 +471,4 @@ Rclone cannot delete files anywhere except under `album`.
|
||||
|
||||
### Deleting albums
|
||||
|
||||
The Google Photos API does not support deleting albums - see [bug #135714733](https://issuetracker.google.com/issues/135714733).
|
||||
The Google Photos API does not support deleting albums - see [bug #135714733](https://issuetracker.google.com/issues/135714733).
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "GUI"
|
||||
description: "Web based Graphical User Interface"
|
||||
versionIntroduced: "v1.49.0"
|
||||
versionIntroduced: "v1.49"
|
||||
---
|
||||
|
||||
# GUI (Experimental)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "Hasher"
|
||||
description: "Better checksums for other remotes"
|
||||
versionIntroduced: "v1.57.0"
|
||||
versionIntroduced: "v1.57"
|
||||
status: Experimental
|
||||
---
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "HDFS Remote"
|
||||
description: "Remote for Hadoop Distributed Filesystem"
|
||||
versionIntroduced: "v1.54.0"
|
||||
versionIntroduced: "v1.54"
|
||||
---
|
||||
|
||||
# {{< icon "fa fa-globe" >}} HDFS
|
||||
@@ -91,7 +91,7 @@ List the contents of a directory
|
||||
|
||||
Sync the remote `directory` to `/home/local/directory`, deleting any excess files.
|
||||
|
||||
rclone sync -i remote:directory /home/local/directory
|
||||
rclone sync --interactive remote:directory /home/local/directory
|
||||
|
||||
### Setting up your own HDFS instance for testing
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "HiDrive"
|
||||
description: "Rclone docs for HiDrive"
|
||||
versionIntroduced: "v1.59.0"
|
||||
versionIntroduced: "v1.59"
|
||||
---
|
||||
|
||||
# {{< icon "fa fa-cloud" >}} HiDrive
|
||||
|
||||
@@ -99,7 +99,7 @@ List the contents of a directory
|
||||
|
||||
Sync the remote `directory` to `/home/local/directory`, deleting any excess files.
|
||||
|
||||
rclone sync -i remote:directory /home/local/directory
|
||||
rclone sync --interactive remote:directory /home/local/directory
|
||||
|
||||
### Read only
|
||||
|
||||
|
||||
@@ -165,6 +165,19 @@ developers so it may be out of date. Its current version is as below.
|
||||
|
||||
[](https://repology.org/project/rclone/versions)
|
||||
|
||||
### Scoop package manager {#windows-scoop}
|
||||
|
||||
Make sure you have [Scoop](https://scoop.sh/) installed
|
||||
|
||||
```
|
||||
scoop install rclone
|
||||
```
|
||||
|
||||
Note that this is a third party installer not controlled by the rclone
|
||||
developers so it may be out of date. Its current version is as below.
|
||||
|
||||
[](https://repology.org/project/rclone/versions)
|
||||
|
||||
## Package manager installation {#package-manager}
|
||||
|
||||
Many Linux, Windows, macOS and other OS distributions package and
|
||||
|
||||
@@ -109,6 +109,9 @@ case "$OS_type" in
|
||||
armv7*)
|
||||
OS_type='arm-v7'
|
||||
;;
|
||||
armv6*)
|
||||
OS_type='arm-v6'
|
||||
;;
|
||||
arm*)
|
||||
OS_type='arm'
|
||||
;;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "Internet Archive"
|
||||
description: "Rclone docs for Internet Archive"
|
||||
versionIntroduced: "v1.59.0"
|
||||
versionIntroduced: "v1.59"
|
||||
---
|
||||
|
||||
# {{< icon "fa fa-archive" >}} Internet Archive
|
||||
@@ -28,7 +28,7 @@ List the contents of a item
|
||||
Sync `/home/local/directory` to the remote item, deleting any excess
|
||||
files in the item.
|
||||
|
||||
rclone sync -i /home/local/directory remote:item
|
||||
rclone sync --interactive /home/local/directory remote:item
|
||||
|
||||
## Notes
|
||||
Because of Internet Archive's architecture, it enqueues write operations (and extra post-processings) in a per-item queue. You can check item's queue at https://catalogd.archive.org/history/item-name-here . Because of that, all uploads/deletes will not show up immediately and takes some time to be available.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "Koofr"
|
||||
description: "Rclone docs for Koofr"
|
||||
versionIntroduced: "v1.47.0"
|
||||
versionIntroduced: "v1.47"
|
||||
---
|
||||
|
||||
# {{< icon "fa fa-suitcase" >}} Koofr
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user