mirror of
https://github.com/rclone/rclone.git
synced 2026-02-01 17:23:39 +00:00
Compare commits
12 Commits
v1.62-stab
...
v1.61-stab
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
486e713337 | ||
|
|
46e96918dc | ||
|
|
639b61de95 | ||
|
|
b03ee4e9e7 | ||
|
|
176af2b217 | ||
|
|
6be0644178 | ||
|
|
0ce5e57c30 | ||
|
|
bc214291d5 | ||
|
|
d3e09d86e0 | ||
|
|
5a9706ab61 | ||
|
|
cce4340d48 | ||
|
|
577693e501 |
10
.github/dependabot.yml
vendored
10
.github/dependabot.yml
vendored
@@ -1,10 +0,0 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
36
.github/workflows/build.yml
vendored
36
.github/workflows/build.yml
vendored
@@ -15,24 +15,22 @@ on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
required: true
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.18', 'go1.19']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.17', 'go1.18']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
go: '1.19'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -43,14 +41,14 @@ jobs:
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
go: '1.19'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-11
|
||||
go: '1.20'
|
||||
go: '1.19'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -59,14 +57,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-11
|
||||
go: '1.20'
|
||||
go: '1.19'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '1.20'
|
||||
go: '1.19'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
@@ -76,20 +74,20 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
go: '1.19'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.18
|
||||
- job_name: go1.17
|
||||
os: ubuntu-latest
|
||||
go: '1.18'
|
||||
go: '1.17'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.19
|
||||
- job_name: go1.18
|
||||
os: ubuntu-latest
|
||||
go: '1.19'
|
||||
go: '1.18'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
@@ -124,7 +122,7 @@ jobs:
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
sudo apt-get install fuse3 libfuse-dev rpm pkg-config
|
||||
sudo apt-get install fuse libfuse-dev rpm pkg-config
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
|
||||
- name: Install Libraries on macOS
|
||||
@@ -220,7 +218,7 @@ jobs:
|
||||
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
lint:
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
timeout-minutes: 30
|
||||
name: "lint"
|
||||
runs-on: ubuntu-latest
|
||||
@@ -239,7 +237,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: 1.19
|
||||
check-latest: true
|
||||
|
||||
- name: Install govulncheck
|
||||
@@ -249,7 +247,7 @@ jobs:
|
||||
run: govulncheck ./...
|
||||
|
||||
android:
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
timeout-minutes: 30
|
||||
name: "android-all"
|
||||
runs-on: ubuntu-latest
|
||||
@@ -264,7 +262,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: 1.19
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v3
|
||||
|
||||
14
.github/workflows/winget.yml
vendored
14
.github/workflows/winget.yml
vendored
@@ -1,14 +0,0 @@
|
||||
name: Publish to Winget
|
||||
on:
|
||||
release:
|
||||
types: [released]
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: windows-latest # Action can only run on Windows
|
||||
steps:
|
||||
- uses: vedantmgoyal2009/winget-releaser@v2
|
||||
with:
|
||||
identifier: Rclone.Rclone
|
||||
installers-regex: '-windows-\w+\.zip$'
|
||||
token: ${{ secrets.WINGET_TOKEN }}
|
||||
@@ -11,7 +11,7 @@ RUN ./rclone version
|
||||
# Begin final image
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
|
||||
RUN apk --no-cache add ca-certificates fuse tzdata && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
|
||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||
|
||||
782
MANUAL.html
generated
782
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
1114
MANUAL.txt
generated
1114
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
@@ -10,7 +10,7 @@ This file describes how to make the various kinds of releases
|
||||
## Making a release
|
||||
|
||||
* git checkout master # see below for stable branch
|
||||
* git pull # IMPORTANT
|
||||
* git pull
|
||||
* git status - make sure everything is checked in
|
||||
* Check GitHub actions build for master is Green
|
||||
* make test # see integration test server or run locally
|
||||
@@ -21,7 +21,6 @@ This file describes how to make the various kinds of releases
|
||||
* git status - to check for new man pages - git add them
|
||||
* git commit -a -v -m "Version v1.XX.0"
|
||||
* make retag
|
||||
* git push origin # without --follow-tags so it doesn't push the tag if it fails
|
||||
* git push --follow-tags origin
|
||||
* # Wait for the GitHub builds to complete then...
|
||||
* make fetch_binaries
|
||||
@@ -75,7 +74,8 @@ Set vars
|
||||
First make the release branch. If this is a second point release then
|
||||
this will be done already.
|
||||
|
||||
* git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
|
||||
* git branch ${BASE_TAG} ${BASE_TAG}-stable
|
||||
* git co ${BASE_TAG}-stable
|
||||
* make startstable
|
||||
|
||||
Now
|
||||
|
||||
@@ -4,6 +4,32 @@
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
package azureblob
|
||||
|
||||
/* FIXME
|
||||
|
||||
Note these Azure SDK bugs which are affecting the backend
|
||||
|
||||
azblob UploadStream produces panic: send on closed channel if input stream has error #19612
|
||||
https://github.com/Azure/azure-sdk-for-go/issues/19612
|
||||
- FIXED by re-implementing UploadStream
|
||||
|
||||
azblob: when using SharedKey credentials, can't reference some blob names with ? in #19613
|
||||
https://github.com/Azure/azure-sdk-for-go/issues/19613
|
||||
- FIXED by url encoding getBlobSVC and getBlockBlobSVC
|
||||
|
||||
Azure Blob Storage paths are not URL-escaped #19475
|
||||
https://github.com/Azure/azure-sdk-for-go/issues/19475
|
||||
- FIXED by url encoding getBlobSVC and getBlockBlobSVC
|
||||
|
||||
Controlling TransferManager #19579
|
||||
https://github.com/Azure/azure-sdk-for-go/issues/19579
|
||||
- FIXED by re-implementing UploadStream
|
||||
|
||||
azblob: blob.StartCopyFromURL doesn't work with UTF-8 characters in the source blob #19614
|
||||
https://github.com/Azure/azure-sdk-for-go/issues/19614
|
||||
- FIXED by url encoding getBlobSVC and getBlockBlobSVC
|
||||
|
||||
*/
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
@@ -933,12 +959,18 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
// getBlobSVC creates a blob client
|
||||
func (f *Fs) getBlobSVC(container, containerPath string) *blob.Client {
|
||||
return f.cntSVC(container).NewBlobClient(containerPath)
|
||||
// FIXME the urlEncode here is a workaround for
|
||||
// https://github.com/Azure/azure-sdk-for-go/issues/19613
|
||||
// https://github.com/Azure/azure-sdk-for-go/issues/19475
|
||||
return f.cntSVC(container).NewBlobClient(urlEncode(containerPath))
|
||||
}
|
||||
|
||||
// getBlockBlobSVC creates a block blob client
|
||||
func (f *Fs) getBlockBlobSVC(container, containerPath string) *blockblob.Client {
|
||||
return f.cntSVC(container).NewBlockBlobClient(containerPath)
|
||||
// FIXME the urlEncode here is a workaround for
|
||||
// https://github.com/Azure/azure-sdk-for-go/issues/19613
|
||||
// https://github.com/Azure/azure-sdk-for-go/issues/19475
|
||||
return f.cntSVC(container).NewBlockBlobClient(urlEncode(containerPath))
|
||||
}
|
||||
|
||||
// updateMetadataWithModTime adds the modTime passed in to o.meta.
|
||||
@@ -953,7 +985,7 @@ func (o *Object) updateMetadataWithModTime(modTime time.Time) {
|
||||
}
|
||||
|
||||
// Returns whether file is a directory marker or not
|
||||
func isDirectoryMarker(size int64, metadata map[string]*string, remote string) bool {
|
||||
func isDirectoryMarker(size int64, metadata map[string]string, remote string) bool {
|
||||
// Directory markers are 0 length
|
||||
if size == 0 {
|
||||
endsWithSlash := strings.HasSuffix(remote, "/")
|
||||
@@ -964,7 +996,7 @@ func isDirectoryMarker(size int64, metadata map[string]*string, remote string) b
|
||||
// defacto standard for marking blobs as directories.
|
||||
// Note also that the metadata hasn't been normalised to lower case yet
|
||||
for k, v := range metadata {
|
||||
if v != nil && strings.EqualFold(k, "hdi_isfolder") && *v == "true" {
|
||||
if strings.EqualFold(k, "hdi_isfolder") && v == "true" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -1552,15 +1584,12 @@ func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// Set o.metadata from metadata
|
||||
func (o *Object) setMetadata(metadata map[string]*string) {
|
||||
func (o *Object) setMetadata(metadata map[string]string) {
|
||||
if len(metadata) > 0 {
|
||||
// Lower case the metadata
|
||||
o.meta = make(map[string]string, len(metadata))
|
||||
for k, v := range metadata {
|
||||
if v != nil {
|
||||
o.meta[strings.ToLower(k)] = *v
|
||||
}
|
||||
o.meta[strings.ToLower(k)] = v
|
||||
}
|
||||
// Set o.modTime from metadata if it exists and
|
||||
// UseServerModTime isn't in use.
|
||||
@@ -1576,16 +1605,20 @@ func (o *Object) setMetadata(metadata map[string]*string) {
|
||||
}
|
||||
}
|
||||
|
||||
// Get metadata from o.meta
|
||||
func (o *Object) getMetadata() (metadata map[string]*string) {
|
||||
if len(o.meta) == 0 {
|
||||
return nil
|
||||
// Duplicte of setMetadata but taking pointers to strings
|
||||
func (o *Object) setMetadataP(metadata map[string]*string) {
|
||||
if len(metadata) > 0 {
|
||||
// Convert the format of the metadata
|
||||
newMeta := make(map[string]string, len(metadata))
|
||||
for k, v := range metadata {
|
||||
if v != nil {
|
||||
newMeta[k] = *v
|
||||
}
|
||||
}
|
||||
o.setMetadata(newMeta)
|
||||
} else {
|
||||
o.meta = nil
|
||||
}
|
||||
metadata = make(map[string]*string, len(o.meta))
|
||||
for k, v := range o.meta {
|
||||
metadata[k] = &v
|
||||
}
|
||||
return metadata
|
||||
}
|
||||
|
||||
// decodeMetaDataFromPropertiesResponse sets the metadata from the data passed in
|
||||
@@ -1717,7 +1750,7 @@ func (o *Object) decodeMetaDataFromBlob(info *container.BlobItem) (err error) {
|
||||
} else {
|
||||
o.accessTier = *info.Properties.AccessTier
|
||||
}
|
||||
o.setMetadata(metadata)
|
||||
o.setMetadataP(metadata)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1799,7 +1832,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
blb := o.getBlobSVC()
|
||||
opt := blob.SetMetadataOptions{}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blb.SetMetadata(ctx, o.getMetadata(), &opt)
|
||||
_, err := blb.SetMetadata(ctx, o.meta, &opt)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1870,6 +1903,41 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return downloadResponse.Body, nil
|
||||
}
|
||||
|
||||
// dontEncode is the characters that do not need percent-encoding
|
||||
//
|
||||
// The characters that do not need percent-encoding are a subset of
|
||||
// the printable ASCII characters: upper-case letters, lower-case
|
||||
// letters, digits, ".", "_", "-", "/", "~", "!", "$", "'", "(", ")",
|
||||
// "*", ";", "=", ":", and "@". All other byte values in a UTF-8 must
|
||||
// be replaced with "%" and the two-digit hex value of the byte.
|
||||
const dontEncode = (`abcdefghijklmnopqrstuvwxyz` +
|
||||
`ABCDEFGHIJKLMNOPQRSTUVWXYZ` +
|
||||
`0123456789` +
|
||||
`._-/~!$'()*;=:@`)
|
||||
|
||||
// noNeedToEncode is a bitmap of characters which don't need % encoding
|
||||
var noNeedToEncode [256]bool
|
||||
|
||||
func init() {
|
||||
for _, c := range dontEncode {
|
||||
noNeedToEncode[c] = true
|
||||
}
|
||||
}
|
||||
|
||||
// urlEncode encodes in with % encoding
|
||||
func urlEncode(in string) string {
|
||||
var out bytes.Buffer
|
||||
for i := 0; i < len(in); i++ {
|
||||
c := in[i]
|
||||
if noNeedToEncode[c] {
|
||||
_ = out.WriteByte(c)
|
||||
} else {
|
||||
_, _ = out.WriteString(fmt.Sprintf("%%%02X", c))
|
||||
}
|
||||
}
|
||||
return out.String()
|
||||
}
|
||||
|
||||
// poolWrapper wraps a pool.Pool as an azblob.TransferManager
|
||||
type poolWrapper struct {
|
||||
pool *pool.Pool
|
||||
@@ -2071,7 +2139,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
rs := readSeekCloser{wrappedReader, bufferReader}
|
||||
options := blockblob.StageBlockOptions{
|
||||
// Specify the transactional md5 for the body, to be validated by the service.
|
||||
TransactionalValidation: blob.TransferValidationTypeMD5(transactionalMD5),
|
||||
TransactionalContentMD5: transactionalMD5,
|
||||
}
|
||||
_, err = blb.StageBlock(ctx, blockID, &rs, &options)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
@@ -2095,7 +2163,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
|
||||
tier := blob.AccessTier(o.fs.opt.AccessTier)
|
||||
options := blockblob.CommitBlockListOptions{
|
||||
Metadata: o.getMetadata(),
|
||||
Metadata: o.meta,
|
||||
Tier: &tier,
|
||||
HTTPHeaders: httpHeaders,
|
||||
}
|
||||
@@ -2142,7 +2210,7 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
|
||||
|
||||
tier := blob.AccessTier(o.fs.opt.AccessTier)
|
||||
options := blockblob.UploadOptions{
|
||||
Metadata: o.getMetadata(),
|
||||
Metadata: o.meta,
|
||||
Tier: &tier,
|
||||
HTTPHeaders: httpHeaders,
|
||||
}
|
||||
|
||||
@@ -1221,7 +1221,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
fs.Errorf(object.Name, "Can't create object %v", err)
|
||||
continue
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "deleting")
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
err = f.deleteByID(ctx, object.ID, object.Name)
|
||||
checkErr(err)
|
||||
tr.Done(ctx, err)
|
||||
@@ -1235,7 +1235,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
if err != nil {
|
||||
fs.Errorf(object, "Can't create object %+v", err)
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
if oldOnly && last != remote {
|
||||
// Check current version of the file
|
||||
if object.Action == "hide" {
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/b2/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -22,7 +21,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/pool"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
@@ -430,47 +428,18 @@ func (up *largeUpload) Upload(ctx context.Context) (err error) {
|
||||
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
|
||||
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
remaining = up.size
|
||||
uploadPool *pool.Pool
|
||||
ci = fs.GetConfig(ctx)
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
remaining = up.size
|
||||
)
|
||||
// If using large chunk size then make a temporary pool
|
||||
if up.chunkSize <= int64(up.f.opt.ChunkSize) {
|
||||
uploadPool = up.f.pool
|
||||
} else {
|
||||
uploadPool = pool.New(
|
||||
time.Duration(up.f.opt.MemoryPoolFlushTime),
|
||||
int(up.chunkSize),
|
||||
ci.Transfers,
|
||||
up.f.opt.MemoryPoolUseMmap,
|
||||
)
|
||||
defer uploadPool.Flush()
|
||||
}
|
||||
// Get an upload token and a buffer
|
||||
getBuf := func() (buf []byte) {
|
||||
up.f.getBuf(true)
|
||||
if !up.doCopy {
|
||||
buf = uploadPool.Get()
|
||||
}
|
||||
return buf
|
||||
}
|
||||
// Put an upload token and a buffer
|
||||
putBuf := func(buf []byte) {
|
||||
if !up.doCopy {
|
||||
uploadPool.Put(buf)
|
||||
}
|
||||
up.f.putBuf(nil, true)
|
||||
}
|
||||
g.Go(func() error {
|
||||
for part := int64(1); part <= up.parts; part++ {
|
||||
// Get a block of memory from the pool and token which limits concurrency.
|
||||
buf := getBuf()
|
||||
buf := up.f.getBuf(up.doCopy)
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
putBuf(buf)
|
||||
up.f.putBuf(buf, up.doCopy)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -484,14 +453,14 @@ func (up *largeUpload) Upload(ctx context.Context) (err error) {
|
||||
buf = buf[:reqSize]
|
||||
_, err = io.ReadFull(up.in, buf)
|
||||
if err != nil {
|
||||
putBuf(buf)
|
||||
up.f.putBuf(buf, up.doCopy)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
part := part // for the closure
|
||||
g.Go(func() (err error) {
|
||||
defer putBuf(buf)
|
||||
defer up.f.putBuf(buf, up.doCopy)
|
||||
if !up.doCopy {
|
||||
err = up.transferChunk(gCtx, part, buf)
|
||||
} else {
|
||||
|
||||
2
backend/cache/cache.go
vendored
2
backend/cache/cache.go
vendored
@@ -1038,7 +1038,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
fs.Debugf(dir, "list: remove entry: %v", entryRemote)
|
||||
}
|
||||
entries = nil //nolint:ineffassign
|
||||
entries = nil
|
||||
|
||||
// and then iterate over the ones from source (temp Objects will override source ones)
|
||||
var batchDirectories []*Directory
|
||||
|
||||
@@ -235,7 +235,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff,
|
||||
CaseInsensitive: cipher.NameEncryptionMode() == NameEncryptionOff,
|
||||
DuplicateFiles: true,
|
||||
ReadMimeType: false, // MimeTypes not supported with crypt
|
||||
WriteMimeType: false,
|
||||
@@ -396,8 +396,6 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
|
||||
|
||||
// put implements Put or PutStream
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
if f.opt.NoDataEncryption {
|
||||
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
|
||||
if err == nil && o != nil {
|
||||
@@ -415,9 +413,6 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
||||
// Find a hash the destination supports to compute a hash of
|
||||
// the encrypted data
|
||||
ht := f.Fs.Hashes().GetOne()
|
||||
if ci.IgnoreChecksum {
|
||||
ht = hash.None
|
||||
}
|
||||
var hasher *hash.MultiHasher
|
||||
if ht != hash.None {
|
||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||
|
||||
@@ -451,11 +451,7 @@ If downloading a file returns the error "This file has been identified
|
||||
as malware or spam and cannot be downloaded" with the error code
|
||||
"cannotDownloadAbusiveFile" then supply this flag to rclone to
|
||||
indicate you acknowledge the risks of downloading the file and rclone
|
||||
will download it anyway.
|
||||
|
||||
Note that if you are using service account it will need Manager
|
||||
permission (not Content Manager) to for this flag to work. If the SA
|
||||
does not have the right permission, Google will just ignore the flag.`,
|
||||
will download it anyway.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "keep_revision_forever",
|
||||
@@ -761,7 +757,7 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
|
||||
fs.Errorf(f, "Received download limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if f.opt.StopOnUploadLimit && (reason == "quotaExceeded" || reason == "storageQuotaExceeded") {
|
||||
} else if f.opt.StopOnUploadLimit && reason == "quotaExceeded" {
|
||||
fs.Errorf(f, "Received upload limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
|
||||
@@ -3326,9 +3322,9 @@ This takes an optional directory to trash which make this easier to
|
||||
use via the API.
|
||||
|
||||
rclone backend untrash drive:directory
|
||||
rclone backend --interactive untrash drive:directory subdir
|
||||
rclone backend -i untrash drive:directory subdir
|
||||
|
||||
Use the --interactive/-i or --dry-run flag to see what would be restored before restoring it.
|
||||
Use the -i flag to see what would be restored before restoring it.
|
||||
|
||||
Result:
|
||||
|
||||
@@ -3358,7 +3354,7 @@ component will be used as the file name.
|
||||
If the destination is a drive backend then server-side copying will be
|
||||
attempted if possible.
|
||||
|
||||
Use the --interactive/-i or --dry-run flag to see what would be copied before copying.
|
||||
Use the -i flag to see what would be copied before copying.
|
||||
`,
|
||||
}, {
|
||||
Name: "exportformats",
|
||||
|
||||
@@ -243,15 +243,6 @@ func (f *Fs) InternalTestShouldRetry(t *testing.T) {
|
||||
quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, quotaExceededRetry)
|
||||
assert.Equal(t, quotaExceededError, expectedQuotaError)
|
||||
|
||||
sqEItem := googleapi.ErrorItem{
|
||||
Reason: "storageQuotaExceeded",
|
||||
}
|
||||
generic403.Errors[0] = sqEItem
|
||||
expectedStorageQuotaError := fserrors.FatalError(&generic403)
|
||||
storageQuotaExceededRetry, storageQuotaExceededError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, storageQuotaExceededRetry)
|
||||
assert.Equal(t, storageQuotaExceededError, expectedStorageQuotaError)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/rclone/ftp"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -315,33 +315,18 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Return a *textproto.Error if err contains one or nil otherwise
|
||||
func textprotoError(err error) (errX *textproto.Error) {
|
||||
if errors.As(err, &errX) {
|
||||
return errX
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// returns true if this FTP error should be retried
|
||||
func isRetriableFtpError(err error) bool {
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusNotAvailable, ftp.StatusTransfertAborted:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserve to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
if isRetriableFtpError(err) {
|
||||
return true, err
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusNotAvailable:
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
@@ -478,7 +463,8 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
*pc = nil
|
||||
if err != nil {
|
||||
// If not a regular FTP error code then check the connection
|
||||
if tpErr := textprotoError(err); tpErr != nil {
|
||||
var tpErr *textproto.Error
|
||||
if !errors.As(err, &tpErr) {
|
||||
nopErr := c.NoOp()
|
||||
if nopErr != nil {
|
||||
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
|
||||
@@ -627,7 +613,8 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
|
||||
// translateErrorFile turns FTP errors into rclone errors if possible for a file
|
||||
func translateErrorFile(err error) error {
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
||||
err = fs.ErrorObjectNotFound
|
||||
@@ -638,7 +625,8 @@ func translateErrorFile(err error) error {
|
||||
|
||||
// translateErrorDir turns FTP errors into rclone errors if possible for a directory
|
||||
func translateErrorDir(err error) error {
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
||||
err = fs.ErrorDirNotFound
|
||||
@@ -929,7 +917,8 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
|
||||
}
|
||||
err = c.MakeDir(f.dirFromStandardPath(abspath))
|
||||
f.putFtpConnection(&c, err)
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
|
||||
err = nil
|
||||
@@ -1170,7 +1159,8 @@ func (f *ftpReadCloser) Close() error {
|
||||
// mask the error if it was caused by a premature close
|
||||
// NB StatusAboutToSend is to work around a bug in pureftpd
|
||||
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
|
||||
err = nil
|
||||
@@ -1196,26 +1186,15 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
fd *ftp.Response
|
||||
c *ftp.ServerConn
|
||||
)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
c, err = o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return false, err // getFtpConnection has retries already
|
||||
}
|
||||
fd, err = c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
|
||||
if err != nil {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
}
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open: %w", err)
|
||||
}
|
||||
|
||||
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
|
||||
if err != nil {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
return nil, fmt.Errorf("open: %w", err)
|
||||
}
|
||||
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
|
||||
return rc, nil
|
||||
}
|
||||
@@ -1248,10 +1227,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
|
||||
// Ignore error 250 here - send by some servers
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusRequestedFileActionOK:
|
||||
err = nil
|
||||
if err != nil {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusRequestedFileActionOK:
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
|
||||
@@ -82,8 +82,7 @@ func init() {
|
||||
saFile, _ := m.Get("service_account_file")
|
||||
saCreds, _ := m.Get("service_account_credentials")
|
||||
anonymous, _ := m.Get("anonymous")
|
||||
envAuth, _ := m.Get("env_auth")
|
||||
if saFile != "" || saCreds != "" || anonymous == "true" || envAuth == "true" {
|
||||
if saFile != "" || saCreds != "" || anonymous == "true" {
|
||||
return nil, nil
|
||||
}
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
@@ -331,17 +330,6 @@ can't check the size and hash but the file contents will be decompressed.
|
||||
Default: (encoder.Base |
|
||||
encoder.EncodeCrLf |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}, {
|
||||
Name: "env_auth",
|
||||
Help: "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
|
||||
Default: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "false",
|
||||
Help: "Enter credentials in the next step.",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
@@ -361,7 +349,6 @@ type Options struct {
|
||||
Decompress bool `config:"decompress"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
@@ -513,11 +500,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err)
|
||||
}
|
||||
} else if opt.EnvAuth {
|
||||
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
||||
if err != nil {
|
||||
|
||||
@@ -161,7 +161,7 @@ func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bo
|
||||
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
|
||||
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
|
||||
}
|
||||
accounting.Stats(ctx).NewCheckingTransfer(obj, "importing").Done(ctx, err)
|
||||
accounting.Stats(ctx).NewCheckingTransfer(obj).Done(ctx, err)
|
||||
doneCount++
|
||||
}
|
||||
})
|
||||
|
||||
@@ -503,7 +503,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
continue
|
||||
}
|
||||
}
|
||||
err = fmt.Errorf("failed to read directory %q: %w", namepath, fierr)
|
||||
err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
|
||||
fs.Errorf(dir, "%v", fierr)
|
||||
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
||||
continue
|
||||
@@ -524,10 +524,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
localPath := filepath.Join(fsDirPath, name)
|
||||
fi, err = os.Stat(localPath)
|
||||
// Quietly skip errors on excluded files and directories
|
||||
if err != nil && useFilter && !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
if os.IsNotExist(err) || isCircularSymlinkError(err) {
|
||||
// Skip bad symlinks and circular symlinks
|
||||
err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err))
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -396,73 +395,3 @@ func TestFilter(t *testing.T) {
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
func TestFilterSymlink(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
when := time.Now()
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Create a file, a directory, a symlink to a file, a symlink to a directory and a dangling symlink
|
||||
r.WriteFile("included.file", "included file", when)
|
||||
r.WriteFile("included.dir/included.sub.file", "included sub file", when)
|
||||
require.NoError(t, os.Symlink("included.file", filepath.Join(r.LocalName, "included.file.link")))
|
||||
require.NoError(t, os.Symlink("included.dir", filepath.Join(r.LocalName, "included.dir.link")))
|
||||
require.NoError(t, os.Symlink("dangling", filepath.Join(r.LocalName, "dangling.link")))
|
||||
|
||||
// Set fs into "-L" mode
|
||||
f.opt.FollowSymlinks = true
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Stat
|
||||
|
||||
// Set fs into "-l" mode
|
||||
// f.opt.FollowSymlinks = false
|
||||
// f.opt.TranslateSymlinks = true
|
||||
// f.lstat = os.Lstat
|
||||
|
||||
// Check set up for filtering
|
||||
assert.True(t, f.Features().FilterAware)
|
||||
|
||||
// Reset global error count
|
||||
accounting.Stats(ctx).ResetErrors()
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
|
||||
// Add a filter
|
||||
ctx, fi := filter.AddConfig(ctx)
|
||||
require.NoError(t, fi.AddRule("+ included.file"))
|
||||
require.NoError(t, fi.AddRule("+ included.file.link"))
|
||||
require.NoError(t, fi.AddRule("+ included.dir/**"))
|
||||
require.NoError(t, fi.AddRule("+ included.dir.link/**"))
|
||||
require.NoError(t, fi.AddRule("- *"))
|
||||
|
||||
// Check listing without use filter flag
|
||||
entries, err := f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check 1 global errors one for each dangling symlink
|
||||
assert.Equal(t, int64(1), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
accounting.Stats(ctx).ResetErrors()
|
||||
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
|
||||
|
||||
// Add user filter flag
|
||||
ctx = filter.SetUseFilter(ctx, true)
|
||||
|
||||
// Check listing with use filter flag
|
||||
entries, err = f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
|
||||
|
||||
// Check listing through a symlink still works
|
||||
entries, err = f.List(ctx, "included.dir")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included.dir/included.sub.file]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
@@ -83,17 +83,6 @@ than permanently deleting them. If you specify this then rclone will
|
||||
permanently delete objects instead.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_https",
|
||||
Help: `Use HTTPS for transfers.
|
||||
|
||||
MEGA uses plain text HTTP connections by default.
|
||||
Some ISPs throttle HTTP connections, this causes transfers to become very slow.
|
||||
Enabling this will force MEGA to use HTTPS for all transfers.
|
||||
HTTPS is normally not necesary since all data is already encrypted anyway.
|
||||
Enabling it will increase CPU usage and add network overhead.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -111,7 +100,6 @@ type Options struct {
|
||||
Pass string `config:"pass"`
|
||||
Debug bool `config:"debug"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UseHTTPS bool `config:"use_https"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -216,7 +204,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if srv == nil {
|
||||
srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
||||
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
|
||||
srv.SetHTTPS(opt.UseHTTPS)
|
||||
srv.SetLogger(func(format string, v ...interface{}) {
|
||||
fs.Infof("*go-mega*", format, v...)
|
||||
})
|
||||
|
||||
@@ -126,7 +126,6 @@ type HashesType struct {
|
||||
Sha1Hash string `json:"sha1Hash"` // hex encoded SHA1 hash for the contents of the file (if available)
|
||||
Crc32Hash string `json:"crc32Hash"` // hex encoded CRC32 value of the file (if available)
|
||||
QuickXorHash string `json:"quickXorHash"` // base64 encoded QuickXorHash value of the file (if available)
|
||||
Sha256Hash string `json:"sha256Hash"` // hex encoded SHA256 value of the file (if available)
|
||||
}
|
||||
|
||||
// FileFacet groups file-related data on OneDrive into a single structure.
|
||||
|
||||
@@ -259,48 +259,6 @@ this flag there.
|
||||
At the time of writing this only works with OneDrive personal paid accounts.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hash_type",
|
||||
Default: "auto",
|
||||
Help: `Specify the hash in use for the backend.
|
||||
|
||||
This specifies the hash type in use. If set to "auto" it will use the
|
||||
default hash which is is QuickXorHash.
|
||||
|
||||
Before rclone 1.62 an SHA1 hash was used by default for Onedrive
|
||||
Personal. For 1.62 and later the default is to use a QuickXorHash for
|
||||
all onedrive types. If an SHA1 hash is desired then set this option
|
||||
accordingly.
|
||||
|
||||
From July 2023 QuickXorHash will be the only available hash for
|
||||
both OneDrive for Business and OneDriver Personal.
|
||||
|
||||
This can be set to "none" to not use any hashes.
|
||||
|
||||
If the hash requested does not exist on the object, it will be
|
||||
returned as an empty string which is treated as a missing hash by
|
||||
rclone.
|
||||
`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "auto",
|
||||
Help: "Rclone chooses the best hash",
|
||||
}, {
|
||||
Value: "quickxor",
|
||||
Help: "QuickXor",
|
||||
}, {
|
||||
Value: "sha1",
|
||||
Help: "SHA1",
|
||||
}, {
|
||||
Value: "sha256",
|
||||
Help: "SHA256",
|
||||
}, {
|
||||
Value: "crc32",
|
||||
Help: "CRC32",
|
||||
}, {
|
||||
Value: "none",
|
||||
Help: "None - don't use any hashes",
|
||||
}},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -553,7 +511,7 @@ Example: "https://contoso.sharepoint.com/sites/mysite" or "mysite"
|
||||
`)
|
||||
case "url_end":
|
||||
siteURL := config.Result
|
||||
re := regexp.MustCompile(`https://.*\.sharepoint\.com/sites/(.*)`)
|
||||
re := regexp.MustCompile(`https://.*\.sharepoint.com/sites/(.*)`)
|
||||
match := re.FindStringSubmatch(siteURL)
|
||||
if len(match) == 2 {
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
@@ -639,7 +597,6 @@ type Options struct {
|
||||
LinkScope string `config:"link_scope"`
|
||||
LinkType string `config:"link_type"`
|
||||
LinkPassword string `config:"link_password"`
|
||||
HashType string `config:"hash_type"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -656,7 +613,6 @@ type Fs struct {
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
driveID string // ID to use for querying Microsoft Graph
|
||||
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
|
||||
hashType hash.Type // type of the hash we are using
|
||||
}
|
||||
|
||||
// Object describes a OneDrive object
|
||||
@@ -670,7 +626,8 @@ type Object struct {
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
id string // ID of the object
|
||||
hash string // Hash of the content, usually QuickXorHash but set as hash_type
|
||||
sha1 string // SHA-1 of the object content
|
||||
quickxorhash string // QuickXorHash of the object content
|
||||
mimeType string // Content-Type of object from server (may not be as uploaded)
|
||||
}
|
||||
|
||||
@@ -925,7 +882,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
driveType: opt.DriveType,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
hashType: QuickXorHashType,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@@ -935,15 +891,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}).Fill(ctx, f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// Set the user defined hash
|
||||
if opt.HashType == "auto" || opt.HashType == "" {
|
||||
opt.HashType = QuickXorHashType.String()
|
||||
}
|
||||
err = f.hashType.Set(opt.HashType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Disable change polling in China region
|
||||
// See: https://github.com/rclone/rclone/issues/6444
|
||||
if f.opt.Region == regionCN {
|
||||
@@ -1609,7 +1556,10 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(f.hashType)
|
||||
if f.driveType == driveTypePersonal {
|
||||
return hash.Set(hash.SHA1)
|
||||
}
|
||||
return hash.Set(QuickXorHashType)
|
||||
}
|
||||
|
||||
// PublicLink returns a link for downloading without account.
|
||||
@@ -1818,8 +1768,14 @@ func (o *Object) rootPath() string {
|
||||
|
||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t == o.fs.hashType {
|
||||
return o.hash, nil
|
||||
if o.fs.driveType == driveTypePersonal {
|
||||
if t == hash.SHA1 {
|
||||
return o.sha1, nil
|
||||
}
|
||||
} else {
|
||||
if t == QuickXorHashType {
|
||||
return o.quickxorhash, nil
|
||||
}
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
@@ -1850,23 +1806,16 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
file := info.GetFile()
|
||||
if file != nil {
|
||||
o.mimeType = file.MimeType
|
||||
o.hash = ""
|
||||
switch o.fs.hashType {
|
||||
case QuickXorHashType:
|
||||
if file.Hashes.QuickXorHash != "" {
|
||||
h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
|
||||
} else {
|
||||
o.hash = hex.EncodeToString(h)
|
||||
}
|
||||
if file.Hashes.Sha1Hash != "" {
|
||||
o.sha1 = strings.ToLower(file.Hashes.Sha1Hash)
|
||||
}
|
||||
if file.Hashes.QuickXorHash != "" {
|
||||
h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
|
||||
} else {
|
||||
o.quickxorhash = hex.EncodeToString(h)
|
||||
}
|
||||
case hash.SHA1:
|
||||
o.hash = strings.ToLower(file.Hashes.Sha1Hash)
|
||||
case hash.SHA256:
|
||||
o.hash = strings.ToLower(file.Hashes.Sha256Hash)
|
||||
case hash.CRC32:
|
||||
o.hash = strings.ToLower(file.Hashes.Crc32Hash)
|
||||
}
|
||||
}
|
||||
fileSystemInfo := info.GetFileSystemInfo()
|
||||
|
||||
@@ -7,40 +7,51 @@
|
||||
// See: https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
|
||||
package quickxorhash
|
||||
|
||||
// This code was ported from a fast C-implementation from
|
||||
// https://github.com/namazso/QuickXorHash
|
||||
// which has licenced as BSD Zero Clause License
|
||||
//
|
||||
// BSD Zero Clause License
|
||||
//
|
||||
// Copyright (c) 2022 namazso <admin@namazso.eu>
|
||||
//
|
||||
// Permission to use, copy, modify, and/or distribute this software for any
|
||||
// purpose with or without fee is hereby granted.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
|
||||
// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
||||
// AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|
||||
// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
|
||||
// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
|
||||
// OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
// PERFORMANCE OF THIS SOFTWARE.
|
||||
// This code was ported from the code snippet linked from
|
||||
// https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
|
||||
// Which has the copyright
|
||||
|
||||
import "hash"
|
||||
// ------------------------------------------------------------------------------
|
||||
// Copyright (c) 2016 Microsoft Corporation
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
// ------------------------------------------------------------------------------
|
||||
|
||||
import (
|
||||
"hash"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockSize is the preferred size for hashing
|
||||
BlockSize = 64
|
||||
// Size of the output checksum
|
||||
Size = 20
|
||||
shift = 11
|
||||
widthInBits = 8 * Size
|
||||
dataSize = shift * widthInBits
|
||||
Size = 20
|
||||
bitsInLastCell = 32
|
||||
shift = 11
|
||||
widthInBits = 8 * Size
|
||||
dataSize = (widthInBits-1)/64 + 1
|
||||
)
|
||||
|
||||
type quickXorHash struct {
|
||||
data [dataSize]byte
|
||||
size uint64
|
||||
data [dataSize]uint64
|
||||
lengthSoFar uint64
|
||||
shiftSoFar int
|
||||
}
|
||||
|
||||
// New returns a new hash.Hash computing the quickXorHash checksum.
|
||||
@@ -59,37 +70,94 @@ func New() hash.Hash {
|
||||
//
|
||||
// Implementations must not retain p.
|
||||
func (q *quickXorHash) Write(p []byte) (n int, err error) {
|
||||
var i int
|
||||
// fill last remain
|
||||
lastRemain := int(q.size) % dataSize
|
||||
if lastRemain != 0 {
|
||||
i += xorBytes(q.data[lastRemain:], p)
|
||||
currentshift := q.shiftSoFar
|
||||
|
||||
// The bitvector where we'll start xoring
|
||||
vectorArrayIndex := currentshift / 64
|
||||
|
||||
// The position within the bit vector at which we begin xoring
|
||||
vectorOffset := currentshift % 64
|
||||
iterations := len(p)
|
||||
if iterations > widthInBits {
|
||||
iterations = widthInBits
|
||||
}
|
||||
|
||||
if i != len(p) {
|
||||
for len(p)-i >= dataSize {
|
||||
i += xorBytes(q.data[:], p[i:])
|
||||
for i := 0; i < iterations; i++ {
|
||||
isLastCell := vectorArrayIndex == len(q.data)-1
|
||||
var bitsInVectorCell int
|
||||
if isLastCell {
|
||||
bitsInVectorCell = bitsInLastCell
|
||||
} else {
|
||||
bitsInVectorCell = 64
|
||||
}
|
||||
|
||||
// There's at least 2 bitvectors before we reach the end of the array
|
||||
if vectorOffset <= bitsInVectorCell-8 {
|
||||
for j := i; j < len(p); j += widthInBits {
|
||||
q.data[vectorArrayIndex] ^= uint64(p[j]) << uint(vectorOffset)
|
||||
}
|
||||
} else {
|
||||
index1 := vectorArrayIndex
|
||||
var index2 int
|
||||
if isLastCell {
|
||||
index2 = 0
|
||||
} else {
|
||||
index2 = vectorArrayIndex + 1
|
||||
}
|
||||
low := byte(bitsInVectorCell - vectorOffset)
|
||||
|
||||
xoredByte := byte(0)
|
||||
for j := i; j < len(p); j += widthInBits {
|
||||
xoredByte ^= p[j]
|
||||
}
|
||||
q.data[index1] ^= uint64(xoredByte) << uint(vectorOffset)
|
||||
q.data[index2] ^= uint64(xoredByte) >> low
|
||||
}
|
||||
vectorOffset += shift
|
||||
for vectorOffset >= bitsInVectorCell {
|
||||
if isLastCell {
|
||||
vectorArrayIndex = 0
|
||||
} else {
|
||||
vectorArrayIndex = vectorArrayIndex + 1
|
||||
}
|
||||
vectorOffset -= bitsInVectorCell
|
||||
}
|
||||
xorBytes(q.data[:], p[i:])
|
||||
}
|
||||
q.size += uint64(len(p))
|
||||
|
||||
// Update the starting position in a circular shift pattern
|
||||
q.shiftSoFar = (q.shiftSoFar + shift*(len(p)%widthInBits)) % widthInBits
|
||||
|
||||
q.lengthSoFar += uint64(len(p))
|
||||
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Calculate the current checksum
|
||||
func (q *quickXorHash) checkSum() (h [Size + 1]byte) {
|
||||
for i := 0; i < dataSize; i++ {
|
||||
shift := (i * 11) % 160
|
||||
shiftBytes := shift / 8
|
||||
shiftBits := shift % 8
|
||||
shifted := int(q.data[i]) << shiftBits
|
||||
h[shiftBytes] ^= byte(shifted)
|
||||
h[shiftBytes+1] ^= byte(shifted >> 8)
|
||||
func (q *quickXorHash) checkSum() (h [Size]byte) {
|
||||
// Output the data as little endian bytes
|
||||
ph := 0
|
||||
for i := 0; i < len(q.data)-1; i++ {
|
||||
d := q.data[i]
|
||||
_ = h[ph+7] // bounds check
|
||||
h[ph+0] = byte(d >> (8 * 0))
|
||||
h[ph+1] = byte(d >> (8 * 1))
|
||||
h[ph+2] = byte(d >> (8 * 2))
|
||||
h[ph+3] = byte(d >> (8 * 3))
|
||||
h[ph+4] = byte(d >> (8 * 4))
|
||||
h[ph+5] = byte(d >> (8 * 5))
|
||||
h[ph+6] = byte(d >> (8 * 6))
|
||||
h[ph+7] = byte(d >> (8 * 7))
|
||||
ph += 8
|
||||
}
|
||||
h[0] ^= h[20]
|
||||
// remaining 32 bits
|
||||
d := q.data[len(q.data)-1]
|
||||
h[Size-4] = byte(d >> (8 * 0))
|
||||
h[Size-3] = byte(d >> (8 * 1))
|
||||
h[Size-2] = byte(d >> (8 * 2))
|
||||
h[Size-1] = byte(d >> (8 * 3))
|
||||
|
||||
// XOR the file length with the least significant bits in little endian format
|
||||
d := q.size
|
||||
d = q.lengthSoFar
|
||||
h[Size-8] ^= byte(d >> (8 * 0))
|
||||
h[Size-7] ^= byte(d >> (8 * 1))
|
||||
h[Size-6] ^= byte(d >> (8 * 2))
|
||||
@@ -106,7 +174,7 @@ func (q *quickXorHash) checkSum() (h [Size + 1]byte) {
|
||||
// It does not change the underlying hash state.
|
||||
func (q *quickXorHash) Sum(b []byte) []byte {
|
||||
hash := q.checkSum()
|
||||
return append(b, hash[:Size]...)
|
||||
return append(b, hash[:]...)
|
||||
}
|
||||
|
||||
// Reset resets the Hash to its initial state.
|
||||
@@ -128,10 +196,8 @@ func (q *quickXorHash) BlockSize() int {
|
||||
}
|
||||
|
||||
// Sum returns the quickXorHash checksum of the data.
|
||||
func Sum(data []byte) (h [Size]byte) {
|
||||
func Sum(data []byte) [Size]byte {
|
||||
var d quickXorHash
|
||||
_, _ = d.Write(data)
|
||||
s := d.checkSum()
|
||||
copy(h[:], s[:])
|
||||
return h
|
||||
return d.checkSum()
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"hash"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -167,16 +166,3 @@ func TestReset(t *testing.T) {
|
||||
|
||||
// check interface
|
||||
var _ hash.Hash = (*quickXorHash)(nil)
|
||||
|
||||
func BenchmarkQuickXorHash(b *testing.B) {
|
||||
b.SetBytes(1 << 20)
|
||||
buf := make([]byte, 1<<20)
|
||||
rand.Read(buf)
|
||||
h := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
h.Reset()
|
||||
h.Write(buf)
|
||||
h.Sum(nil)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
//go:build !go1.20
|
||||
|
||||
package quickxorhash
|
||||
|
||||
func xorBytes(dst, src []byte) int {
|
||||
n := len(dst)
|
||||
if len(src) < n {
|
||||
n = len(src)
|
||||
}
|
||||
if n == 0 {
|
||||
return 0
|
||||
}
|
||||
dst = dst[:n]
|
||||
//src = src[:n]
|
||||
src = src[:len(dst)] // remove bounds check in loop
|
||||
for i := range dst {
|
||||
dst[i] ^= src[i]
|
||||
}
|
||||
return n
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
//go:build go1.20
|
||||
|
||||
package quickxorhash
|
||||
|
||||
import "crypto/subtle"
|
||||
|
||||
func xorBytes(dst, src []byte) int {
|
||||
return subtle.XORBytes(dst, src, dst)
|
||||
}
|
||||
@@ -1,145 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
|
||||
)
|
||||
|
||||
const (
|
||||
sseDefaultAlgorithm = "AES256"
|
||||
)
|
||||
|
||||
func getSha256(p []byte) []byte {
|
||||
h := sha256.New()
|
||||
h.Write(p)
|
||||
return h.Sum(nil)
|
||||
}
|
||||
|
||||
func validateSSECustomerKeyOptions(opt *Options) error {
|
||||
if opt.SSEKMSKeyID != "" && (opt.SSECustomerKeyFile != "" || opt.SSECustomerKey != "") {
|
||||
return errors.New("oos: can't use vault sse_kms_key_id and local sse_customer_key at the same time")
|
||||
}
|
||||
if opt.SSECustomerKey != "" && opt.SSECustomerKeyFile != "" {
|
||||
return errors.New("oos: can't use sse_customer_key and sse_customer_key_file at the same time")
|
||||
}
|
||||
if opt.SSEKMSKeyID != "" {
|
||||
return nil
|
||||
}
|
||||
err := populateSSECustomerKeys(opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func populateSSECustomerKeys(opt *Options) error {
|
||||
if opt.SSECustomerKeyFile != "" {
|
||||
// Reads the base64-encoded AES key data from the specified file and computes its SHA256 checksum
|
||||
data, err := os.ReadFile(expandPath(opt.SSECustomerKeyFile))
|
||||
if err != nil {
|
||||
return fmt.Errorf("oos: error reading sse_customer_key_file: %v", err)
|
||||
}
|
||||
opt.SSECustomerKey = strings.TrimSpace(string(data))
|
||||
}
|
||||
if opt.SSECustomerKey != "" {
|
||||
decoded, err := base64.StdEncoding.DecodeString(opt.SSECustomerKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("oos: Could not decode sse_customer_key_file: %w", err)
|
||||
}
|
||||
sha256Checksum := base64.StdEncoding.EncodeToString(getSha256(decoded))
|
||||
if opt.SSECustomerKeySha256 == "" {
|
||||
opt.SSECustomerKeySha256 = sha256Checksum
|
||||
} else {
|
||||
if opt.SSECustomerKeySha256 != sha256Checksum {
|
||||
return fmt.Errorf("the computed SHA256 checksum "+
|
||||
"(%v) of the key doesn't match the config entry sse_customer_key_sha256=(%v)",
|
||||
sha256Checksum, opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
if opt.SSECustomerAlgorithm == "" {
|
||||
opt.SSECustomerAlgorithm = sseDefaultAlgorithm
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// https://docs.oracle.com/en-us/iaas/Content/Object/Tasks/usingyourencryptionkeys.htm
|
||||
func useBYOKPutObject(fs *Fs, request *objectstorage.PutObjectRequest) {
|
||||
if fs.opt.SSEKMSKeyID != "" {
|
||||
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
|
||||
}
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
|
||||
func useBYOKHeadObject(fs *Fs, request *objectstorage.HeadObjectRequest) {
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
|
||||
func useBYOKGetObject(fs *Fs, request *objectstorage.GetObjectRequest) {
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
|
||||
func useBYOKCopyObject(fs *Fs, request *objectstorage.CopyObjectRequest) {
|
||||
if fs.opt.SSEKMSKeyID != "" {
|
||||
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
|
||||
}
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
|
||||
func useBYOKUpload(fs *Fs, request *transfer.UploadRequest) {
|
||||
if fs.opt.SSEKMSKeyID != "" {
|
||||
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
|
||||
}
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
@@ -9,8 +9,6 @@ import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/common/auth"
|
||||
@@ -20,33 +18,15 @@ import (
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
)
|
||||
|
||||
func expandPath(filepath string) (expandedPath string) {
|
||||
if filepath == "" {
|
||||
return filepath
|
||||
}
|
||||
cleanedPath := path.Clean(filepath)
|
||||
expandedPath = cleanedPath
|
||||
if strings.HasPrefix(cleanedPath, "~") {
|
||||
rest := cleanedPath[2:]
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return expandedPath
|
||||
}
|
||||
expandedPath = path.Join(home, rest)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getConfigurationProvider(opt *Options) (common.ConfigurationProvider, error) {
|
||||
switch opt.Provider {
|
||||
case instancePrincipal:
|
||||
return auth.InstancePrincipalConfigurationProvider()
|
||||
case userPrincipal:
|
||||
expandConfigFilePath := expandPath(opt.ConfigFile)
|
||||
if expandConfigFilePath != "" && !fileExists(expandConfigFilePath) {
|
||||
fs.Errorf(userPrincipal, "oci config file doesn't exist at %v", expandConfigFilePath)
|
||||
if opt.ConfigFile != "" && !fileExists(opt.ConfigFile) {
|
||||
fs.Errorf(userPrincipal, "oci config file doesn't exist at %v", opt.ConfigFile)
|
||||
}
|
||||
return common.CustomProfileConfigProvider(expandConfigFilePath, opt.ConfigProfile), nil
|
||||
return common.CustomProfileConfigProvider(opt.ConfigFile, opt.ConfigProfile), nil
|
||||
case resourcePrincipal:
|
||||
return auth.ResourcePrincipalConfigurationProvider()
|
||||
case noAuth:
|
||||
|
||||
@@ -65,8 +65,8 @@ a bucket or with a bucket and path.
|
||||
Long: `This command removes unfinished multipart uploads of age greater than
|
||||
max-age which defaults to 24 hours.
|
||||
|
||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||
it would do.
|
||||
Note that you can use -i/--dry-run with this command to see what it
|
||||
would do.
|
||||
|
||||
rclone backend cleanup oos:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
|
||||
|
||||
@@ -74,7 +74,6 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object) (err erro
|
||||
BucketName: common.String(srcBucket),
|
||||
CopyObjectDetails: copyObjectDetails,
|
||||
}
|
||||
useBYOKCopyObject(f, &req)
|
||||
var resp objectstorage.CopyObjectResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CopyObject(ctx, req)
|
||||
|
||||
@@ -87,7 +87,6 @@ func (o *Object) headObject(ctx context.Context) (info *objectstorage.HeadObject
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(objectPath),
|
||||
}
|
||||
useBYOKHeadObject(o.fs, &req)
|
||||
var response objectstorage.HeadObjectResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
@@ -100,7 +99,6 @@ func (o *Object) headObject(ctx context.Context) (info *objectstorage.HeadObject
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
fs.Errorf(o, "Failed to head object: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
o.fs.cache.MarkOK(bucketName)
|
||||
@@ -333,7 +331,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
||||
ObjectName: common.String(bucketPath),
|
||||
}
|
||||
o.applyGetObjectOptions(&req, options...)
|
||||
useBYOKGetObject(o.fs, &req)
|
||||
|
||||
var resp objectstorage.GetObjectResponse
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
@@ -435,7 +433,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
uploadRequest.StorageTier = storageTier
|
||||
}
|
||||
o.applyMultiPutOptions(&uploadRequest, options...)
|
||||
useBYOKUpload(o.fs, &uploadRequest)
|
||||
uploadStreamRequest := transfer.UploadStreamRequest{
|
||||
UploadRequest: uploadRequest,
|
||||
StreamReader: in,
|
||||
@@ -509,10 +506,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
req.StorageTier = storageTier
|
||||
}
|
||||
o.applyPutOptions(&req, options...)
|
||||
useBYOKPutObject(o.fs, &req)
|
||||
var resp objectstorage.PutObjectResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.PutObject(ctx, req)
|
||||
resp, err := o.fs.srv.PutObject(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -17,7 +17,9 @@ const (
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
defaultUploadConcurrency = 10
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
minSleep = 10 * time.Millisecond
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 5 * time.Minute
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
defaultCopyTimeoutDuration = fs.Duration(time.Minute)
|
||||
)
|
||||
|
||||
@@ -45,28 +47,23 @@ https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfromins
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Provider string `config:"provider"`
|
||||
Compartment string `config:"compartment"`
|
||||
Namespace string `config:"namespace"`
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ConfigFile string `config:"config_file"`
|
||||
ConfigProfile string `config:"config_profile"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
CopyTimeout fs.Duration `config:"copy_timeout"`
|
||||
StorageTier string `config:"storage_tier"`
|
||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
|
||||
SSECustomerKey string `config:"sse_customer_key"`
|
||||
SSECustomerKeyFile string `config:"sse_customer_key_file"`
|
||||
SSECustomerKeySha256 string `config:"sse_customer_key_sha256"`
|
||||
Provider string `config:"provider"`
|
||||
Compartment string `config:"compartment"`
|
||||
Namespace string `config:"namespace"`
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ConfigFile string `config:"config_file"`
|
||||
ConfigProfile string `config:"config_profile"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
CopyTimeout fs.Duration `config:"copy_timeout"`
|
||||
StorageTier string `config:"storage_tier"`
|
||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
}
|
||||
|
||||
func newOptions() []fs.Option {
|
||||
@@ -126,22 +123,6 @@ func newOptions() []fs.Option {
|
||||
Value: "Default",
|
||||
Help: "Use the default profile",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://github.com/oracle/oci-go-sdk/blob/master/objectstorage/storage_tier.go
|
||||
Name: "storage_tier",
|
||||
Help: "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm",
|
||||
Default: "Standard",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "Standard",
|
||||
Help: "Standard storage tier, this is the default tier",
|
||||
}, {
|
||||
Value: "InfrequentAccess",
|
||||
Help: "InfrequentAccess storage tier",
|
||||
}, {
|
||||
Value: "Archive",
|
||||
Help: "Archive storage tier",
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload.
|
||||
@@ -257,59 +238,5 @@ creation permissions.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "sse_customer_key_file",
|
||||
Help: `To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated
|
||||
with the object. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.'`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_key",
|
||||
Help: `To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to
|
||||
encrypt or decrypt the data. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is
|
||||
needed. For more information, see Using Your Own Keys for Server-Side Encryption
|
||||
(https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm)`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_key_sha256",
|
||||
Help: `If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption
|
||||
key. This value is used to check the integrity of the encryption key. see Using Your Own Keys for
|
||||
Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm).`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_kms_key_id",
|
||||
Help: `if using using your own master key in vault, this header specifies the
|
||||
OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call
|
||||
the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key.
|
||||
Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_algorithm",
|
||||
Help: `If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm.
|
||||
Object Storage supports "AES256" as the encryption algorithm. For more information, see
|
||||
Using Your Own Keys for Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm).`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}, {
|
||||
Value: sseDefaultAlgorithm,
|
||||
Help: sseDefaultAlgorithm,
|
||||
}},
|
||||
}}
|
||||
}
|
||||
|
||||
@@ -59,27 +59,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = validateSSECustomerKeyOptions(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
objectStorageClient, err := newObjectStorageClient(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pc := fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep)))
|
||||
// Set pacer retries to 2 (1 try and 1 retry) because we are
|
||||
// relying on SDK retry mechanism, but we allow 2 attempts to
|
||||
// retry directory listings after XMLSyntaxError
|
||||
pc.SetRetries(2)
|
||||
p := pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
srv: objectStorageClient,
|
||||
cache: bucket.NewCache(),
|
||||
pacer: pc,
|
||||
pacer: fs.NewPacer(ctx, p),
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
|
||||
177
backend/s3/s3.go
177
backend/s3/s3.go
@@ -2266,11 +2266,6 @@ rclone's choice here.
|
||||
Help: `Suppress setting and reading of system metadata`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "sts_endpoint",
|
||||
Help: "Endpoint for STS.\n\nLeave blank if using AWS to use the default endpoint for the region.",
|
||||
Provider: "AWS",
|
||||
Advanced: true,
|
||||
},
|
||||
}})
|
||||
}
|
||||
@@ -2357,7 +2352,6 @@ type Options struct {
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
STSEndpoint string `config:"sts_endpoint"`
|
||||
LocationConstraint string `config:"location_constraint"`
|
||||
ACL string `config:"acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
@@ -2534,7 +2528,7 @@ func parsePath(path string) (root string) {
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
@@ -2566,38 +2560,6 @@ func getClient(ctx context.Context, opt *Options) *http.Client {
|
||||
}
|
||||
}
|
||||
|
||||
// Default name resolver
|
||||
var defaultResolver = endpoints.DefaultResolver()
|
||||
|
||||
// resolve (service, region) to endpoint
|
||||
//
|
||||
// Used to set endpoint for s3 services and not for other services
|
||||
type resolver map[string]string
|
||||
|
||||
// Add a service to the resolver, ignoring empty urls
|
||||
func (r resolver) addService(service, url string) {
|
||||
if url == "" {
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(url, "http") {
|
||||
url = "https://" + url
|
||||
}
|
||||
r[service] = url
|
||||
}
|
||||
|
||||
// EndpointFor return the endpoint for s3 if set or the default if not
|
||||
func (r resolver) EndpointFor(service, region string, opts ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
|
||||
fs.Debugf(nil, "Resolving service %q region %q", service, region)
|
||||
url, ok := r[service]
|
||||
if ok {
|
||||
return endpoints.ResolvedEndpoint{
|
||||
URL: url,
|
||||
SigningRegion: region,
|
||||
}, nil
|
||||
}
|
||||
return defaultResolver.EndpointFor(service, region, opts...)
|
||||
}
|
||||
|
||||
// s3Connection makes a connection to s3
|
||||
func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S3, *session.Session, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
@@ -2676,12 +2638,8 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
|
||||
if opt.Region != "" {
|
||||
awsConfig.WithRegion(opt.Region)
|
||||
}
|
||||
if opt.Endpoint != "" || opt.STSEndpoint != "" {
|
||||
// If endpoints are set, override the relevant services only
|
||||
r := make(resolver)
|
||||
r.addService("s3", opt.Endpoint)
|
||||
r.addService("sts", opt.STSEndpoint)
|
||||
awsConfig.WithEndpointResolver(r)
|
||||
if opt.Endpoint != "" {
|
||||
awsConfig.WithEndpoint(opt.Endpoint)
|
||||
}
|
||||
|
||||
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
||||
@@ -3031,15 +2989,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
GetTier: true,
|
||||
SlowModTime: true,
|
||||
}).Fill(ctx, f)
|
||||
if opt.Provider == "Storj" {
|
||||
f.features.SetTier = false
|
||||
f.features.GetTier = false
|
||||
}
|
||||
if opt.Provider == "IDrive" {
|
||||
f.features.SetTier = false
|
||||
}
|
||||
// f.listMultipartUploads()
|
||||
|
||||
if f.rootBucket != "" && f.rootDirectory != "" && !opt.NoHeadObject && !strings.HasSuffix(root, "/") {
|
||||
// Check to see if the (bucket,directory) is actually an existing file
|
||||
oldRoot := f.root
|
||||
@@ -3054,6 +3003,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
if opt.Provider == "Storj" {
|
||||
f.features.SetTier = false
|
||||
f.features.GetTier = false
|
||||
}
|
||||
if opt.Provider == "IDrive" {
|
||||
f.features.SetTier = false
|
||||
}
|
||||
// f.listMultipartUploads()
|
||||
return f, nil
|
||||
}
|
||||
|
||||
@@ -3468,16 +3425,15 @@ var errEndList = errors.New("end list")
|
||||
|
||||
// list options
|
||||
type listOpt struct {
|
||||
bucket string // bucket to list
|
||||
directory string // directory with bucket
|
||||
prefix string // prefix to remove from listing
|
||||
addBucket bool // if set, the bucket is added to the start of the remote
|
||||
recurse bool // if set, recurse to read sub directories
|
||||
withVersions bool // if set, versions are produced
|
||||
hidden bool // if set, return delete markers as objects with size == isDeleteMarker
|
||||
findFile bool // if set, it will look for files called (bucket, directory)
|
||||
versionAt fs.Time // if set only show versions <= this time
|
||||
noSkipMarkers bool // if set return dir marker objects
|
||||
bucket string // bucket to list
|
||||
directory string // directory with bucket
|
||||
prefix string // prefix to remove from listing
|
||||
addBucket bool // if set, the bucket is added to the start of the remote
|
||||
recurse bool // if set, recurse to read sub directories
|
||||
withVersions bool // if set, versions are produced
|
||||
hidden bool // if set, return delete markers as objects with size == isDeleteMarker
|
||||
findFile bool // if set, it will look for files called (bucket, directory)
|
||||
versionAt fs.Time // if set only show versions <= this time
|
||||
}
|
||||
|
||||
// list lists the objects into the function supplied with the opt
|
||||
@@ -3590,7 +3546,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
}
|
||||
remote = remote[len(opt.prefix):]
|
||||
if opt.addBucket {
|
||||
remote = bucket.Join(opt.bucket, remote)
|
||||
remote = path.Join(opt.bucket, remote)
|
||||
}
|
||||
remote = strings.TrimSuffix(remote, "/")
|
||||
err = fn(remote, &s3.Object{Key: &remote}, nil, true)
|
||||
@@ -3619,10 +3575,10 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
remote = remote[len(opt.prefix):]
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
if opt.addBucket {
|
||||
remote = bucket.Join(opt.bucket, remote)
|
||||
remote = path.Join(opt.bucket, remote)
|
||||
}
|
||||
// is this a directory marker?
|
||||
if isDirectory && object.Size != nil && *object.Size == 0 && !opt.noSkipMarkers {
|
||||
if isDirectory && object.Size != nil && *object.Size == 0 {
|
||||
continue // skip directory marker
|
||||
}
|
||||
if versionIDs != nil {
|
||||
@@ -3912,7 +3868,7 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
|
||||
req.Bucket = &dstBucket
|
||||
req.ACL = stringPointerOrNil(f.opt.ACL)
|
||||
req.Key = &dstPath
|
||||
source := pathEscape(bucket.Join(srcBucket, srcPath))
|
||||
source := pathEscape(path.Join(srcBucket, srcPath))
|
||||
if src.versionID != nil {
|
||||
source += fmt.Sprintf("?versionId=%s", *src.versionID)
|
||||
}
|
||||
@@ -4147,9 +4103,9 @@ Usage Examples:
|
||||
rclone backend restore s3:bucket/path/to/directory [-o priority=PRIORITY] [-o lifetime=DAYS]
|
||||
rclone backend restore s3:bucket [-o priority=PRIORITY] [-o lifetime=DAYS]
|
||||
|
||||
This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
|
||||
This flag also obeys the filters. Test first with -i/--interactive or --dry-run flags
|
||||
|
||||
rclone --interactive backend restore --include "*.txt" s3:bucket/path -o priority=Standard
|
||||
rclone -i backend restore --include "*.txt" s3:bucket/path -o priority=Standard
|
||||
|
||||
All the objects shown will be marked for restore, then
|
||||
|
||||
@@ -4217,8 +4173,8 @@ a bucket or with a bucket and path.
|
||||
Long: `This command removes unfinished multipart uploads of age greater than
|
||||
max-age which defaults to 24 hours.
|
||||
|
||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||
it would do.
|
||||
Note that you can use -i/--dry-run with this command to see what it
|
||||
would do.
|
||||
|
||||
rclone backend cleanup s3:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w s3:bucket/path/to/object
|
||||
@@ -4234,8 +4190,8 @@ Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||
Long: `This command removes any old hidden versions of files
|
||||
on a versions enabled bucket.
|
||||
|
||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||
it would do.
|
||||
Note that you can use -i/--dry-run with this command to see what it
|
||||
would do.
|
||||
|
||||
rclone backend cleanup-hidden s3:bucket/path/to/dir
|
||||
`,
|
||||
@@ -4569,14 +4525,13 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
delErr <- operations.DeleteFiles(ctx, delChan)
|
||||
}()
|
||||
checkErr(f.list(ctx, listOpt{
|
||||
bucket: bucket,
|
||||
directory: directory,
|
||||
prefix: f.rootDirectory,
|
||||
addBucket: f.rootBucket == "",
|
||||
recurse: true,
|
||||
withVersions: versioned,
|
||||
hidden: true,
|
||||
noSkipMarkers: true,
|
||||
bucket: bucket,
|
||||
directory: directory,
|
||||
prefix: f.rootDirectory,
|
||||
addBucket: f.rootBucket == "",
|
||||
recurse: true,
|
||||
withVersions: versioned,
|
||||
hidden: true,
|
||||
}, func(remote string, object *s3.Object, versionID *string, isDirectory bool) error {
|
||||
if isDirectory {
|
||||
return nil
|
||||
@@ -4586,7 +4541,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
fs.Errorf(object, "Can't create object %+v", err)
|
||||
return nil
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
// Work out whether the file is the current version or not
|
||||
isCurrentVersion := !versioned || !version.Match(remote)
|
||||
fs.Debugf(nil, "%q version %v", remote, version.Match(remote))
|
||||
@@ -5030,7 +4985,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
var warnStreamUpload sync.Once
|
||||
|
||||
func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, size int64, in io.Reader) (wantETag, gotETag string, versionID *string, err error) {
|
||||
func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, size int64, in io.Reader) (etag string, versionID *string, err error) {
|
||||
f := o.fs
|
||||
|
||||
// make concurrency machinery
|
||||
@@ -5074,7 +5029,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return wantETag, gotETag, nil, fmt.Errorf("multipart upload failed to initialise: %w", err)
|
||||
return etag, nil, fmt.Errorf("multipart upload failed to initialise: %w", err)
|
||||
}
|
||||
uid := cout.UploadId
|
||||
|
||||
@@ -5147,7 +5102,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
finished = true
|
||||
} else if err != nil {
|
||||
free()
|
||||
return wantETag, gotETag, nil, fmt.Errorf("multipart upload failed to read source: %w", err)
|
||||
return etag, nil, fmt.Errorf("multipart upload failed to read source: %w", err)
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
@@ -5202,7 +5157,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
}
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return wantETag, gotETag, nil, err
|
||||
return etag, nil, err
|
||||
}
|
||||
|
||||
// sort the completed parts by part number
|
||||
@@ -5224,17 +5179,14 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return wantETag, gotETag, nil, fmt.Errorf("multipart upload failed to finalise: %w", err)
|
||||
return etag, nil, fmt.Errorf("multipart upload failed to finalise: %w", err)
|
||||
}
|
||||
hashOfHashes := md5.Sum(md5s)
|
||||
wantETag = fmt.Sprintf("%s-%d", hex.EncodeToString(hashOfHashes[:]), len(parts))
|
||||
etag = fmt.Sprintf("%s-%d", hex.EncodeToString(hashOfHashes[:]), len(parts))
|
||||
if resp != nil {
|
||||
if resp.ETag != nil {
|
||||
gotETag = *resp.ETag
|
||||
}
|
||||
versionID = resp.VersionId
|
||||
}
|
||||
return wantETag, gotETag, versionID, nil
|
||||
return etag, versionID, nil
|
||||
}
|
||||
|
||||
// unWrapAwsError unwraps AWS errors, looking for a non AWS error
|
||||
@@ -5534,16 +5486,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
var wantETag string // Multipart upload Etag to check
|
||||
var gotETag string // Etag we got from the upload
|
||||
var gotEtag string // Etag we got from the upload
|
||||
var lastModified time.Time // Time we got from the upload
|
||||
var versionID *string // versionID we got from the upload
|
||||
if multipart {
|
||||
wantETag, gotETag, versionID, err = o.uploadMultipart(ctx, &req, size, in)
|
||||
wantETag, versionID, err = o.uploadMultipart(ctx, &req, size, in)
|
||||
} else {
|
||||
if o.fs.opt.UsePresignedRequest {
|
||||
gotETag, lastModified, versionID, err = o.uploadSinglepartPresignedRequest(ctx, &req, size, in)
|
||||
gotEtag, lastModified, versionID, err = o.uploadSinglepartPresignedRequest(ctx, &req, size, in)
|
||||
} else {
|
||||
gotETag, lastModified, versionID, err = o.uploadSinglepartPutObject(ctx, &req, size, in)
|
||||
gotEtag, lastModified, versionID, err = o.uploadSinglepartPutObject(ctx, &req, size, in)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@@ -5559,33 +5511,32 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// User requested we don't HEAD the object after uploading it
|
||||
// so make up the object as best we can assuming it got
|
||||
// uploaded properly. If size < 0 then we need to do the HEAD.
|
||||
var head *s3.HeadObjectOutput
|
||||
if o.fs.opt.NoHead && size >= 0 {
|
||||
head = new(s3.HeadObjectOutput)
|
||||
//structs.SetFrom(head, &req)
|
||||
setFrom_s3HeadObjectOutput_s3PutObjectInput(head, &req)
|
||||
var head s3.HeadObjectOutput
|
||||
//structs.SetFrom(&head, &req)
|
||||
setFrom_s3HeadObjectOutput_s3PutObjectInput(&head, &req)
|
||||
head.ETag = &md5sumHex // doesn't matter quotes are missing
|
||||
head.ContentLength = &size
|
||||
// We get etag back from single and multipart upload so fill it in here
|
||||
if gotETag != "" {
|
||||
head.ETag = &gotETag
|
||||
// If we have done a single part PUT request then we can read these
|
||||
if gotEtag != "" {
|
||||
head.ETag = &gotEtag
|
||||
}
|
||||
if lastModified.IsZero() {
|
||||
lastModified = time.Now()
|
||||
}
|
||||
head.LastModified = &lastModified
|
||||
head.VersionId = versionID
|
||||
} else {
|
||||
// Read the metadata from the newly created object
|
||||
o.meta = nil // wipe old metadata
|
||||
head, err = o.headObject(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.setMetaData(&head)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read the metadata from the newly created object
|
||||
o.meta = nil // wipe old metadata
|
||||
head, err := o.headObject(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.setMetaData(head)
|
||||
|
||||
// Check multipart upload ETag if required
|
||||
if o.fs.opt.UseMultipartEtag.Value && !o.fs.etagIsNotMD5 && wantETag != "" && head.ETag != nil && *head.ETag != "" {
|
||||
gotETag := strings.Trim(strings.ToLower(*head.ETag), `"`)
|
||||
if wantETag != gotETag {
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
package seafile
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Renew allows tokens to be renewed on expiry.
|
||||
type Renew struct {
|
||||
ts *time.Ticker // timer indicating when it's time to renew the token
|
||||
run func() error // the callback to do the renewal
|
||||
done chan interface{} // channel to end the go routine
|
||||
shutdown *sync.Once
|
||||
}
|
||||
|
||||
// NewRenew creates a new Renew struct and starts a background process
|
||||
// which renews the token whenever it expires. It uses the run() call
|
||||
// to do the renewal.
|
||||
func NewRenew(every time.Duration, run func() error) *Renew {
|
||||
r := &Renew{
|
||||
ts: time.NewTicker(every),
|
||||
run: run,
|
||||
done: make(chan interface{}),
|
||||
shutdown: &sync.Once{},
|
||||
}
|
||||
go r.renewOnExpiry()
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *Renew) renewOnExpiry() {
|
||||
for {
|
||||
select {
|
||||
case <-r.ts.C:
|
||||
err := r.run()
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "error while refreshing decryption token: %s", err)
|
||||
}
|
||||
|
||||
case <-r.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown stops the ticker and no more renewal will take place.
|
||||
func (r *Renew) Shutdown() {
|
||||
// closing a channel can only be done once
|
||||
r.shutdown.Do(func() {
|
||||
r.ts.Stop()
|
||||
close(r.done)
|
||||
})
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
package seafile
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestShouldAllowShutdownTwice(t *testing.T) {
|
||||
renew := NewRenew(time.Hour, func() error {
|
||||
return nil
|
||||
})
|
||||
renew.Shutdown()
|
||||
renew.Shutdown()
|
||||
}
|
||||
|
||||
func TestRenewalInTimeLimit(t *testing.T) {
|
||||
var count int64
|
||||
|
||||
renew := NewRenew(100*time.Millisecond, func() error {
|
||||
atomic.AddInt64(&count, 1)
|
||||
return nil
|
||||
})
|
||||
time.Sleep(time.Second)
|
||||
renew.Shutdown()
|
||||
|
||||
// there's no guarantee the CI agent can handle a simple goroutine
|
||||
renewCount := atomic.LoadInt64(&count)
|
||||
t.Logf("renew count = %d", renewCount)
|
||||
assert.Greater(t, renewCount, int64(0))
|
||||
assert.Less(t, renewCount, int64(11))
|
||||
}
|
||||
@@ -143,7 +143,6 @@ type Fs struct {
|
||||
createDirMutex sync.Mutex // Protect creation of directories
|
||||
useOldDirectoryAPI bool // Use the old API v2 if seafile < 7
|
||||
moveDirNotAvailable bool // Version < 7.0 don't have an API to move a directory
|
||||
renew *Renew // Renew an encrypted library token
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -269,11 +268,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
// And remove the public link feature
|
||||
f.features.PublicLink = nil
|
||||
|
||||
// renew the library password every 45 minutes
|
||||
f.renew = NewRenew(45*time.Minute, func() error {
|
||||
return f.authorizeLibrary(context.Background(), libraryID)
|
||||
})
|
||||
}
|
||||
} else {
|
||||
// Deactivate the cleaner feature since there's no library selected
|
||||
@@ -389,15 +383,6 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
}
|
||||
|
||||
// Shutdown the Fs
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
if f.renew == nil {
|
||||
return nil
|
||||
}
|
||||
f.renew.Shutdown()
|
||||
return nil
|
||||
}
|
||||
|
||||
// sets the AuthorizationToken up
|
||||
func (f *Fs) setAuthorizationToken(token string) {
|
||||
f.srv.SetHeader("Authorization", "Token "+token)
|
||||
@@ -1346,7 +1331,6 @@ var (
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.PublicLinker = &Fs{}
|
||||
_ fs.UserInfoer = &Fs{}
|
||||
_ fs.Shutdowner = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
)
|
||||
|
||||
@@ -34,10 +34,9 @@ func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
|
||||
|
||||
d := &smb2.Dialer{
|
||||
Initiator: &smb2.NTLMInitiator{
|
||||
User: f.opt.User,
|
||||
Password: pass,
|
||||
Domain: f.opt.Domain,
|
||||
TargetSPN: f.opt.SPN,
|
||||
User: f.opt.User,
|
||||
Password: pass,
|
||||
Domain: f.opt.Domain,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -82,7 +81,7 @@ func (c *conn) closed() bool {
|
||||
// list the shares
|
||||
_, nopErr = c.smbSession.ListSharenames()
|
||||
}
|
||||
return nopErr != nil
|
||||
return nopErr == nil
|
||||
}
|
||||
|
||||
// Show that we are using a SMB session
|
||||
|
||||
@@ -60,17 +60,6 @@ func init() {
|
||||
Name: "domain",
|
||||
Help: "Domain name for NTLM authentication.",
|
||||
Default: "WORKGROUP",
|
||||
}, {
|
||||
Name: "spn",
|
||||
Help: `Service principal name.
|
||||
|
||||
Rclone presents this name to the server. Some servers use this as further
|
||||
authentication, and it often needs to be set for clusters. For example:
|
||||
|
||||
cifs/remotehost:1020
|
||||
|
||||
Leave blank if not sure.
|
||||
`,
|
||||
}, {
|
||||
Name: "idle_timeout",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
@@ -120,7 +109,6 @@ type Options struct {
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Domain string `config:"domain"`
|
||||
SPN string `config:"spn"`
|
||||
HideSpecial bool `config:"hide_special_share"`
|
||||
CaseInsensitive bool `config:"case_insensitive"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
"golang.org/x/text/unicode/norm"
|
||||
|
||||
"storj.io/uplink"
|
||||
"storj.io/uplink/edge"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -32,9 +31,9 @@ const (
|
||||
)
|
||||
|
||||
var satMap = map[string]string{
|
||||
"us1.storj.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us1.storj.io:7777",
|
||||
"eu1.storj.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@eu1.storj.io:7777",
|
||||
"ap1.storj.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@ap1.storj.io:7777",
|
||||
"us-central-1.storj.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777",
|
||||
"europe-west-1.storj.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777",
|
||||
"asia-east-1.storj.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777",
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
@@ -106,16 +105,16 @@ func init() {
|
||||
Name: "satellite_address",
|
||||
Help: "Satellite address.\n\nCustom satellite address should match the format: `<nodeid>@<address>:<port>`.",
|
||||
Provider: newProvider,
|
||||
Default: "us1.storj.io",
|
||||
Default: "us-central-1.storj.io",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "us1.storj.io",
|
||||
Help: "US1",
|
||||
Value: "us-central-1.storj.io",
|
||||
Help: "US Central 1",
|
||||
}, {
|
||||
Value: "eu1.storj.io",
|
||||
Help: "EU1",
|
||||
Value: "europe-west-1.storj.io",
|
||||
Help: "Europe West 1",
|
||||
}, {
|
||||
Value: "ap1.storj.io",
|
||||
Help: "AP1",
|
||||
Value: "asia-east-1.storj.io",
|
||||
Help: "Asia East 1",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -157,13 +156,11 @@ type Fs struct {
|
||||
|
||||
// Check the interfaces are satisfied.
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.PublicLinker = &Fs{}
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
)
|
||||
|
||||
// NewFs creates a filesystem backed by Storj.
|
||||
@@ -548,7 +545,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
defer func() {
|
||||
if err != nil {
|
||||
aerr := upload.Abort()
|
||||
if aerr != nil && !errors.Is(aerr, uplink.ErrUploadDone) {
|
||||
if aerr != nil {
|
||||
fs.Errorf(f, "cp input ./%s %+v: %+v", src.Remote(), options, aerr)
|
||||
}
|
||||
}
|
||||
@@ -563,16 +560,6 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
|
||||
_, err = io.Copy(upload, in)
|
||||
if err != nil {
|
||||
if errors.Is(err, uplink.ErrBucketNotFound) {
|
||||
// Rclone assumes the backend will create the bucket if not existing yet.
|
||||
// Here we create the bucket and return a retry error for rclone to retry the upload.
|
||||
_, err = f.project.EnsureBucket(ctx, bucketName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, fserrors.RetryError(errors.New("bucket was not available, now created, the upload must be retried"))
|
||||
}
|
||||
|
||||
err = fserrors.RetryError(err)
|
||||
fs.Errorf(f, "cp input ./%s %+v: %+v\n", src.Remote(), options, err)
|
||||
|
||||
@@ -774,103 +761,3 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// Return the new object
|
||||
return newObjectFromUplink(f, remote, newObject), nil
|
||||
}
|
||||
|
||||
// Purge all files in the directory specified
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
bucket, directory := f.absolute(dir)
|
||||
if bucket == "" {
|
||||
return errors.New("can't purge from root")
|
||||
}
|
||||
|
||||
if directory == "" {
|
||||
_, err := f.project.DeleteBucketWithObjects(ctx, bucket)
|
||||
if errors.Is(err, uplink.ErrBucketNotFound) {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
fs.Infof(directory, "Quick delete is available only for entire bucket. Falling back to list and delete.")
|
||||
objects := f.project.ListObjects(ctx, bucket,
|
||||
&uplink.ListObjectsOptions{
|
||||
Prefix: directory + "/",
|
||||
Recursive: true,
|
||||
},
|
||||
)
|
||||
if err := objects.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
empty := true
|
||||
for objects.Next() {
|
||||
empty = false
|
||||
_, err := f.project.DeleteObject(ctx, bucket, objects.Item().Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Infof(objects.Item().Key, "Deleted")
|
||||
}
|
||||
|
||||
if empty {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
bucket, key := f.absolute(remote)
|
||||
if bucket == "" {
|
||||
return "", errors.New("path must be specified")
|
||||
}
|
||||
|
||||
// Rclone requires that a link is only generated if the remote path exists
|
||||
if key == "" {
|
||||
_, err := f.project.StatBucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
_, err := f.project.StatObject(ctx, bucket, key)
|
||||
if err != nil {
|
||||
if !errors.Is(err, uplink.ErrObjectNotFound) {
|
||||
return "", err
|
||||
}
|
||||
// No object found, check if there is such a prefix
|
||||
iter := f.project.ListObjects(ctx, bucket, &uplink.ListObjectsOptions{Prefix: key + "/"})
|
||||
if iter.Err() != nil {
|
||||
return "", iter.Err()
|
||||
}
|
||||
if !iter.Next() {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sharedPrefix := uplink.SharePrefix{Bucket: bucket, Prefix: key}
|
||||
|
||||
permission := uplink.ReadOnlyPermission()
|
||||
if expire.IsSet() {
|
||||
permission.NotAfter = time.Now().Add(time.Duration(expire))
|
||||
}
|
||||
|
||||
sharedAccess, err := f.access.Share(permission, sharedPrefix)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("sharing access to object failed: %w", err)
|
||||
}
|
||||
|
||||
creds, err := (&edge.Config{
|
||||
AuthServiceAddress: "auth.storjshare.io:7777",
|
||||
}).RegisterAccess(ctx, sharedAccess, &edge.RegisterAccessOptions{Public: true})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("creating public link failed: %w", err)
|
||||
}
|
||||
|
||||
return edge.JoinShareURL("https://link.storjshare.io", creds.AccessKeyID, bucket, key, nil)
|
||||
}
|
||||
|
||||
@@ -214,7 +214,7 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
f.srv = rest.NewClient(client).SetRoot(apiBaseURL)
|
||||
f.IDRegexp = regexp.MustCompile(`https://uptobox\.com/([a-zA-Z0-9]+)`)
|
||||
f.IDRegexp = regexp.MustCompile("https://uptobox.com/([a-zA-Z0-9]+)")
|
||||
|
||||
_, err = f.readMetaDataForPath(ctx, f.dirPath(""), &api.MetadataRequestOptions{Limit: 10})
|
||||
if err != nil {
|
||||
|
||||
@@ -712,7 +712,6 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
|
||||
continue
|
||||
}
|
||||
subPath := u.Path[len(baseURL.Path):]
|
||||
subPath = strings.TrimPrefix(subPath, "/") // ignore leading / here for davrods
|
||||
if f.opt.Enc != encoder.EncodeZero {
|
||||
subPath = f.opt.Enc.ToStandardPath(subPath)
|
||||
}
|
||||
|
||||
@@ -24,23 +24,15 @@ var (
|
||||
// prepareServer the test server and return a function to tidy it up afterwards
|
||||
// with each request the headers option tests are executed
|
||||
func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// test the headers are there send send a dummy response to About
|
||||
// file server
|
||||
fileServer := http.FileServer(http.Dir(""))
|
||||
|
||||
// test the headers are there then pass on to fileServer
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
|
||||
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
|
||||
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
|
||||
fmt.Fprintf(w, `<d:multistatus xmlns:d="DAV:" xmlns:s="http://sabredav.org/ns" xmlns:oc="http://owncloud.org/ns" xmlns:nc="http://nextcloud.org/ns">
|
||||
<d:response>
|
||||
<d:href>/remote.php/webdav/</d:href>
|
||||
<d:propstat>
|
||||
<d:prop>
|
||||
<d:quota-available-bytes>-3</d:quota-available-bytes>
|
||||
<d:quota-used-bytes>376461895</d:quota-used-bytes>
|
||||
</d:prop>
|
||||
<d:status>HTTP/1.1 200 OK</d:status>
|
||||
</d:propstat>
|
||||
</d:response>
|
||||
</d:multistatus>`)
|
||||
fileServer.ServeHTTP(w, r)
|
||||
})
|
||||
|
||||
// Make the test server
|
||||
@@ -76,7 +68,7 @@ func TestHeaders(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
|
||||
// send an About response since that is all the dummy server can return
|
||||
// any request will do
|
||||
_, err := f.Features().About(context.Background())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -57,7 +57,6 @@ var osarches = []string{
|
||||
"linux/386",
|
||||
"linux/amd64",
|
||||
"linux/arm",
|
||||
"linux/arm-v6",
|
||||
"linux/arm-v7",
|
||||
"linux/arm64",
|
||||
"linux/mips",
|
||||
@@ -65,12 +64,10 @@ var osarches = []string{
|
||||
"freebsd/386",
|
||||
"freebsd/amd64",
|
||||
"freebsd/arm",
|
||||
"freebsd/arm-v6",
|
||||
"freebsd/arm-v7",
|
||||
"netbsd/386",
|
||||
"netbsd/amd64",
|
||||
"netbsd/arm",
|
||||
"netbsd/arm-v6",
|
||||
"netbsd/arm-v7",
|
||||
"openbsd/386",
|
||||
"openbsd/amd64",
|
||||
@@ -85,16 +82,13 @@ var archFlags = map[string][]string{
|
||||
"386": {"GO386=softfloat"},
|
||||
"mips": {"GOMIPS=softfloat"},
|
||||
"mipsle": {"GOMIPS=softfloat"},
|
||||
"arm": {"GOARM=5"},
|
||||
"arm-v6": {"GOARM=6"},
|
||||
"arm-v7": {"GOARM=7"},
|
||||
}
|
||||
|
||||
// Map Go architectures to NFPM architectures
|
||||
// Any missing are passed straight through
|
||||
var goarchToNfpm = map[string]string{
|
||||
"arm": "arm5",
|
||||
"arm-v6": "arm6",
|
||||
"arm": "arm6",
|
||||
"arm-v7": "arm7",
|
||||
}
|
||||
|
||||
|
||||
@@ -26,8 +26,7 @@ echo "Making release ${VERSION} anchor ${ANCHOR} to repo ${REPO}"
|
||||
gh release create "${VERSION}" \
|
||||
--repo ${REPO} \
|
||||
--title "rclone ${VERSION}" \
|
||||
--notes-file "/tmp/${VERSION}-release-notes" \
|
||||
--draft=true
|
||||
--notes-file "/tmp/${VERSION}-release-notes"
|
||||
|
||||
for build in build/*; do
|
||||
case $build in
|
||||
@@ -41,10 +40,6 @@ for build in build/*; do
|
||||
"${build}"
|
||||
done
|
||||
|
||||
gh release edit "${VERSION}" \
|
||||
--repo ${REPO} \
|
||||
--draft=false
|
||||
|
||||
gh release view "${VERSION}" \
|
||||
--repo ${REPO}
|
||||
|
||||
|
||||
@@ -12,14 +12,12 @@ import (
|
||||
|
||||
var (
|
||||
noAutoBrowser bool
|
||||
template string
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &noAutoBrowser, "auth-no-open-browser", "", false, "Do not automatically open auth link in default browser")
|
||||
flags.StringVarP(cmdFlags, &template, "template", "", "", "The path to a custom Go template for generating HTML responses")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -30,15 +28,13 @@ Remote authorization. Used to authorize a remote or headless
|
||||
rclone from a machine with a browser - use as instructed by
|
||||
rclone config.
|
||||
|
||||
Use --auth-no-open-browser to prevent rclone to open auth
|
||||
link in default browser automatically.
|
||||
|
||||
Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.`,
|
||||
Use the --auth-no-open-browser to prevent rclone to open auth
|
||||
link in default browser automatically.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 3, command, args)
|
||||
return config.Authorize(context.Background(), args, noAutoBrowser, template)
|
||||
return config.Authorize(context.Background(), args, noAutoBrowser)
|
||||
},
|
||||
}
|
||||
|
||||
12
cmd/cmd.go
12
cmd/cmd.go
@@ -73,13 +73,11 @@ func ShowVersion() {
|
||||
|
||||
linking, tagString := buildinfo.GetLinkingAndTags()
|
||||
|
||||
arch := buildinfo.GetArch()
|
||||
|
||||
fmt.Printf("rclone %s\n", fs.Version)
|
||||
fmt.Printf("- os/version: %s\n", osVersion)
|
||||
fmt.Printf("- os/kernel: %s\n", osKernel)
|
||||
fmt.Printf("- os/type: %s\n", runtime.GOOS)
|
||||
fmt.Printf("- os/arch: %s\n", arch)
|
||||
fmt.Printf("- os/arch: %s\n", runtime.GOARCH)
|
||||
fmt.Printf("- go/version: %s\n", runtime.Version())
|
||||
fmt.Printf("- go/linking: %s\n", linking)
|
||||
fmt.Printf("- go/tags: %s\n", tagString)
|
||||
@@ -401,15 +399,9 @@ func initConfig() {
|
||||
// Start accounting
|
||||
accounting.Start(ctx)
|
||||
|
||||
// Configure console
|
||||
// Hide console window
|
||||
if ci.NoConsole {
|
||||
// Hide the console window
|
||||
terminal.HideConsole()
|
||||
} else {
|
||||
// Enable color support on stdout if possible.
|
||||
// This enables virtual terminal processing on Windows 10,
|
||||
// adding native support for ANSI/VT100 escape sequences.
|
||||
terminal.EnableColorsStdout()
|
||||
}
|
||||
|
||||
// Load filters
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@@ -568,21 +567,6 @@ func (fsys *FS) Listxattr(path string, fill func(name string) bool) (errc int) {
|
||||
return -fuse.ENOSYS
|
||||
}
|
||||
|
||||
// Getpath allows a case-insensitive file system to report the correct case of
|
||||
// a file path.
|
||||
func (fsys *FS) Getpath(path string, fh uint64) (errc int, normalisedPath string) {
|
||||
defer log.Trace(path, "Getpath fh=%d", fh)("errc=%d, normalisedPath=%q", &errc, &normalisedPath)
|
||||
node, _, errc := fsys.getNode(path, fh)
|
||||
if errc != 0 {
|
||||
return errc, ""
|
||||
}
|
||||
normalisedPath = node.Path()
|
||||
if !strings.HasPrefix("/", normalisedPath) {
|
||||
normalisedPath = "/" + normalisedPath
|
||||
}
|
||||
return 0, normalisedPath
|
||||
}
|
||||
|
||||
// Translate errors from mountlib
|
||||
func translateError(err error) (errc int) {
|
||||
if err == nil {
|
||||
@@ -647,7 +631,6 @@ func translateOpenFlags(inFlags int) (outFlags int) {
|
||||
var (
|
||||
_ fuse.FileSystemInterface = (*FS)(nil)
|
||||
_ fuse.FileSystemOpenEx = (*FS)(nil)
|
||||
_ fuse.FileSystemGetpath = (*FS)(nil)
|
||||
//_ fuse.FileSystemChflags = (*FS)(nil)
|
||||
//_ fuse.FileSystemSetcrtime = (*FS)(nil)
|
||||
//_ fuse.FileSystemSetchgtime = (*FS)(nil)
|
||||
|
||||
@@ -84,6 +84,9 @@ func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.
|
||||
if opt.DaemonTimeout != 0 {
|
||||
options = append(options, "-o", fmt.Sprintf("daemon_timeout=%d", int(opt.DaemonTimeout.Seconds())))
|
||||
}
|
||||
if opt.AllowNonEmpty {
|
||||
options = append(options, "-o", "nonempty")
|
||||
}
|
||||
if opt.AllowOther {
|
||||
options = append(options, "-o", "allow_other")
|
||||
}
|
||||
@@ -149,14 +152,14 @@ func waitFor(fn func() bool) (ok bool) {
|
||||
// report an error when fusermount is called.
|
||||
func mount(VFS *vfs.VFS, mountPath string, opt *mountlib.Options) (<-chan error, func() error, error) {
|
||||
// Get mountpoint using OS specific logic
|
||||
f := VFS.Fs()
|
||||
mountpoint, err := getMountpoint(f, mountPath, opt)
|
||||
mountpoint, err := getMountpoint(mountPath, opt)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
fs.Debugf(nil, "Mounting on %q (%q)", mountpoint, opt.VolumeName)
|
||||
|
||||
// Create underlying FS
|
||||
f := VFS.Fs()
|
||||
fsys := NewFS(VFS)
|
||||
host := fuse.NewFileSystemHost(fsys)
|
||||
host.SetCapReaddirPlus(true) // only works on Windows
|
||||
|
||||
@@ -9,10 +9,9 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func getMountpoint(f fs.Fs, mountPath string, opt *mountlib.Options) (string, error) {
|
||||
func getMountpoint(mountPath string, opt *mountlib.Options) (string, error) {
|
||||
fi, err := os.Stat(mountPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to retrieve mount path information: %w", err)
|
||||
@@ -20,11 +19,5 @@ func getMountpoint(f fs.Fs, mountPath string, opt *mountlib.Options) (string, er
|
||||
if !fi.IsDir() {
|
||||
return "", errors.New("mount path is not a directory")
|
||||
}
|
||||
if err = mountlib.CheckOverlap(f, mountPath); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err = mountlib.CheckAllowNonEmpty(mountPath, opt); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return mountPath, nil
|
||||
}
|
||||
|
||||
@@ -18,13 +18,13 @@ import (
|
||||
var isDriveRegex = regexp.MustCompile(`^[a-zA-Z]\:$`)
|
||||
var isDriveRootPathRegex = regexp.MustCompile(`^[a-zA-Z]\:\\$`)
|
||||
var isDriveOrRootPathRegex = regexp.MustCompile(`^[a-zA-Z]\:\\?$`)
|
||||
var isNetworkSharePathRegex = regexp.MustCompile(`^\\\\[^\\\?]+\\[^\\]`)
|
||||
var isNetworkSharePathRegex = regexp.MustCompile(`^\\\\[^\\]+\\[^\\]`)
|
||||
|
||||
// isNetworkSharePath returns true if the given string is a valid network share path,
|
||||
// in the basic UNC format "\\Server\Share\Path", where the first two path components
|
||||
// are required ("\\Server\Share", which represents the volume).
|
||||
// Extended-length UNC format "\\?\UNC\Server\Share\Path" is not considered, as it is
|
||||
// not supported by cgofuse/winfsp, so returns false for any paths with prefix "\\?\".
|
||||
// not supported by cgofuse/winfsp.
|
||||
// Note: There is a UNCPath function in lib/file, but it refers to any extended-length
|
||||
// paths using prefix "\\?\", and not necessarily network resource UNC paths.
|
||||
func isNetworkSharePath(l string) bool {
|
||||
@@ -94,7 +94,7 @@ func handleNetworkShareMountpath(mountpath string, opt *mountlib.Options) (strin
|
||||
}
|
||||
|
||||
// handleLocalMountpath handles the case where mount path is a local file system path.
|
||||
func handleLocalMountpath(f fs.Fs, mountpath string, opt *mountlib.Options) (string, error) {
|
||||
func handleLocalMountpath(mountpath string, opt *mountlib.Options) (string, error) {
|
||||
// Assuming path is drive letter or directory path, not network share (UNC) path.
|
||||
// If drive letter: Must be given as a single character followed by ":" and nothing else.
|
||||
// Else, assume directory path: Directory must not exist, but its parent must.
|
||||
@@ -125,9 +125,6 @@ func handleLocalMountpath(f fs.Fs, mountpath string, opt *mountlib.Options) (str
|
||||
}
|
||||
return "", fmt.Errorf("failed to retrieve mountpoint directory parent information: %w", err)
|
||||
}
|
||||
if err = mountlib.CheckOverlap(f, mountpath); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return mountpath, nil
|
||||
}
|
||||
@@ -161,19 +158,9 @@ func handleVolumeName(opt *mountlib.Options, volumeName string) {
|
||||
|
||||
// getMountpoint handles mounting details on Windows,
|
||||
// where disk and network based file systems are treated different.
|
||||
func getMountpoint(f fs.Fs, mountpath string, opt *mountlib.Options) (mountpoint string, err error) {
|
||||
// Inform about some options not relevant in this mode
|
||||
if opt.AllowNonEmpty {
|
||||
fs.Logf(nil, "--allow-non-empty flag does nothing on Windows")
|
||||
}
|
||||
if opt.AllowRoot {
|
||||
fs.Logf(nil, "--allow-root flag does nothing on Windows")
|
||||
}
|
||||
if opt.AllowOther {
|
||||
fs.Logf(nil, "--allow-other flag does nothing on Windows")
|
||||
}
|
||||
func getMountpoint(mountpath string, opt *mountlib.Options) (mountpoint string, err error) {
|
||||
|
||||
// Handle mountpath
|
||||
// First handle mountpath
|
||||
var volumeName string
|
||||
if isDefaultPath(mountpath) {
|
||||
// Mount path indicates defaults, which will automatically pick an unused drive letter.
|
||||
@@ -185,10 +172,10 @@ func getMountpoint(f fs.Fs, mountpath string, opt *mountlib.Options) (mountpoint
|
||||
volumeName = mountpath[1:] // WinFsp requires volume prefix as UNC-like path but with only a single backslash
|
||||
} else {
|
||||
// Mount path is drive letter or directory path.
|
||||
mountpoint, err = handleLocalMountpath(f, mountpath, opt)
|
||||
mountpoint, err = handleLocalMountpath(mountpath, opt)
|
||||
}
|
||||
|
||||
// Handle volume name
|
||||
// Second handle volume name
|
||||
handleVolumeName(opt, volumeName)
|
||||
|
||||
// Done, return mountpoint to be used, together with updated mount options.
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -28,12 +27,12 @@ it will always be removed.
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f, fileName := cmd.NewFsFile(args[0])
|
||||
fs, fileName := cmd.NewFsFile(args[0])
|
||||
cmd.Run(true, false, command, func() error {
|
||||
if fileName == "" {
|
||||
return fmt.Errorf("%s is a directory or doesn't exist: %w", args[0], fs.ErrorObjectNotFound)
|
||||
return fmt.Errorf("%s is a directory or doesn't exist", args[0])
|
||||
}
|
||||
fileObj, err := f.NewObject(context.Background(), fileName)
|
||||
fileObj, err := fs.NewObject(context.Background(), fileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -34,6 +34,9 @@ func mountOptions(VFS *vfs.VFS, device string, opt *mountlib.Options) (options [
|
||||
if opt.AsyncRead {
|
||||
options = append(options, fuse.AsyncRead())
|
||||
}
|
||||
if opt.AllowNonEmpty {
|
||||
options = append(options, fuse.AllowNonEmptyMount())
|
||||
}
|
||||
if opt.AllowOther {
|
||||
options = append(options, fuse.AllowOther())
|
||||
}
|
||||
@@ -69,17 +72,9 @@ func mountOptions(VFS *vfs.VFS, device string, opt *mountlib.Options) (options [
|
||||
// returns an error, and an error channel for the serve process to
|
||||
// report an error when fusermount is called.
|
||||
func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error, func() error, error) {
|
||||
f := VFS.Fs()
|
||||
if runtime.GOOS == "darwin" {
|
||||
fs.Logf(nil, "macOS users: please try \"rclone cmount\" as it will be the default in v1.54")
|
||||
}
|
||||
if err := mountlib.CheckOverlap(f, mountpoint); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := mountlib.CheckAllowNonEmpty(mountpoint, opt); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
|
||||
if opt.DebugFUSE {
|
||||
fuse.Debug = func(msg interface{}) {
|
||||
@@ -87,6 +82,8 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
|
||||
}
|
||||
}
|
||||
|
||||
f := VFS.Fs()
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
c, err := fuse.Mount(mountpoint, mountOptions(VFS, opt.DeviceName, opt)...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
||||
@@ -95,6 +95,9 @@ func mountOptions(fsys *FS, f fs.Fs, opt *mountlib.Options) (mountOpts *fuse.Mou
|
||||
}
|
||||
var opts []string
|
||||
// FIXME doesn't work opts = append(opts, fmt.Sprintf("max_readahead=%d", maxReadAhead))
|
||||
if fsys.opt.AllowNonEmpty {
|
||||
opts = append(opts, "nonempty")
|
||||
}
|
||||
if fsys.opt.AllowOther {
|
||||
opts = append(opts, "allow_other")
|
||||
}
|
||||
@@ -145,16 +148,9 @@ func mountOptions(fsys *FS, f fs.Fs, opt *mountlib.Options) (mountOpts *fuse.Mou
|
||||
// report an error when fusermount is called.
|
||||
func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error, func() error, error) {
|
||||
f := VFS.Fs()
|
||||
if err := mountlib.CheckOverlap(f, mountpoint); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := mountlib.CheckAllowNonEmpty(mountpoint, opt); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
|
||||
fsys := NewFS(VFS, opt)
|
||||
|
||||
// nodeFsOpts := &fusefs.PathNodeFsOptions{
|
||||
// ClientInodes: false,
|
||||
// Debug: mountlib.DebugFUSE,
|
||||
|
||||
@@ -33,20 +33,12 @@ func CheckMountEmpty(mountpoint string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read %s: %w", mtabPath, err)
|
||||
}
|
||||
foundAutofs := false
|
||||
for _, entry := range entries {
|
||||
if entry.Dir == mountpointAbs {
|
||||
if entry.Type != "autofs" {
|
||||
return fmt.Errorf(msg, mountpointAbs)
|
||||
}
|
||||
foundAutofs = true
|
||||
if entry.Dir == mountpointAbs && entry.Type != "autofs" {
|
||||
return fmt.Errorf(msg, mountpointAbs)
|
||||
}
|
||||
}
|
||||
// It isn't safe to list an autofs in the middle of mounting
|
||||
if foundAutofs {
|
||||
return nil
|
||||
}
|
||||
return checkMountEmpty(mountpoint)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckMountReady checks whether mountpoint is mounted by rclone.
|
||||
|
||||
@@ -4,13 +4,33 @@
|
||||
package mountlib
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// CheckMountEmpty checks if mountpoint folder is empty.
|
||||
// On non-Linux unixes we list directory to ensure that.
|
||||
func CheckMountEmpty(mountpoint string) error {
|
||||
return checkMountEmpty(mountpoint)
|
||||
fp, err := os.Open(mountpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open: %s: %w", mountpoint, err)
|
||||
}
|
||||
defer fs.CheckClose(fp, &err)
|
||||
|
||||
_, err = fp.Readdirnames(1)
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
|
||||
const msg = "directory is not empty, use --allow-non-empty to mount anyway: %s"
|
||||
if err == nil {
|
||||
return fmt.Errorf(msg, mountpoint)
|
||||
}
|
||||
return fmt.Errorf(msg+": %w", mountpoint, err)
|
||||
}
|
||||
|
||||
// CheckMountReady should check if mountpoint is mounted by rclone.
|
||||
|
||||
@@ -159,59 +159,38 @@ group "Everyone" will be used to represent others. The user/group can be customi
|
||||
with FUSE options "UserName" and "GroupName",
|
||||
e.g. |-o UserName=user123 -o GroupName="Authenticated Users"|.
|
||||
The permissions on each entry will be set according to [options](#options)
|
||||
|--dir-perms| and |--file-perms|, which takes a value in traditional Unix
|
||||
|--dir-perms| and |--file-perms|, which takes a value in traditional
|
||||
[numeric notation](https://en.wikipedia.org/wiki/File-system_permissions#Numeric_notation).
|
||||
|
||||
The default permissions corresponds to |--file-perms 0666 --dir-perms 0777|,
|
||||
i.e. read and write permissions to everyone. This means you will not be able
|
||||
to start any programs from the mount. To be able to do that you must add
|
||||
execute permissions, e.g. |--file-perms 0777 --dir-perms 0777| to add it
|
||||
to everyone. If the program needs to write files, chances are you will
|
||||
have to enable [VFS File Caching](#vfs-file-caching) as well (see also
|
||||
[limitations](#limitations)). Note that the default write permission have
|
||||
some restrictions for accounts other than the owner, specifically it lacks
|
||||
the "write extended attributes", as explained next.
|
||||
to everyone. If the program needs to write files, chances are you will have
|
||||
to enable [VFS File Caching](#vfs-file-caching) as well (see also [limitations](#limitations)).
|
||||
|
||||
The mapping of permissions is not always trivial, and the result you see in
|
||||
Windows Explorer may not be exactly like you expected. For example, when setting
|
||||
a value that includes write access for the group or others scope, this will be
|
||||
mapped to individual permissions "write attributes", "write data" and
|
||||
"append data", but not "write extended attributes". Windows will then show this
|
||||
as basic permission "Special" instead of "Write", because "Write" also covers
|
||||
the "write extended attributes" permission. When setting digit 0 for group or
|
||||
others, to indicate no permissions, they will still get individual permissions
|
||||
"read attributes", "read extended attributes" and "read permissions". This is
|
||||
done for compatibility reasons, e.g. to allow users without additional
|
||||
permissions to be able to read basic metadata about files like in Unix.
|
||||
Note that the mapping of permissions is not always trivial, and the result
|
||||
you see in Windows Explorer may not be exactly like you expected.
|
||||
For example, when setting a value that includes write access, this will be
|
||||
mapped to individual permissions "write attributes", "write data" and "append data",
|
||||
but not "write extended attributes". Windows will then show this as basic
|
||||
permission "Special" instead of "Write", because "Write" includes the
|
||||
"write extended attributes" permission.
|
||||
|
||||
WinFsp 2021 (version 1.9) introduced a new FUSE option "FileSecurity",
|
||||
If you set POSIX permissions for only allowing access to the owner, using
|
||||
|--file-perms 0600 --dir-perms 0700|, the user group and the built-in "Everyone"
|
||||
group will still be given some special permissions, such as "read attributes"
|
||||
and "read permissions", in Windows. This is done for compatibility reasons,
|
||||
e.g. to allow users without additional permissions to be able to read basic
|
||||
metadata about files like in UNIX. One case that may arise is that other programs
|
||||
(incorrectly) interprets this as the file being accessible by everyone. For example
|
||||
an SSH client may warn about "unprotected private key file".
|
||||
|
||||
WinFsp 2021 (version 1.9) introduces a new FUSE option "FileSecurity",
|
||||
that allows the complete specification of file security descriptors using
|
||||
[SDDL](https://docs.microsoft.com/en-us/windows/win32/secauthz/security-descriptor-string-format).
|
||||
With this you get detailed control of the resulting permissions, compared
|
||||
to use of the POSIX permissions described above, and no additional permissions
|
||||
will be added automatically for compatibility with Unix. Some example use
|
||||
cases will following.
|
||||
|
||||
If you set POSIX permissions for only allowing access to the owner,
|
||||
using |--file-perms 0600 --dir-perms 0700|, the user group and the built-in
|
||||
"Everyone" group will still be given some special permissions, as described
|
||||
above. Some programs may then (incorrectly) interpret this as the file being
|
||||
accessible by everyone, for example an SSH client may warn about "unprotected
|
||||
private key file". You can work around this by specifying
|
||||
|-o FileSecurity="D:P(A;;FA;;;OW)"|, which sets file all access (FA) to the
|
||||
owner (OW), and nothing else.
|
||||
|
||||
When setting write permissions then, except for the owner, this does not
|
||||
include the "write extended attributes" permission, as mentioned above.
|
||||
This may prevent applications from writing to files, giving permission denied
|
||||
error instead. To set working write permissions for the built-in "Everyone"
|
||||
group, similar to what it gets by default but with the addition of the
|
||||
"write extended attributes", you can specify
|
||||
|-o FileSecurity="D:P(A;;FRFW;;;WD)"|, which sets file read (FR) and file
|
||||
write (FW) to everyone (WD). If file execute (FX) is also needed, then change
|
||||
to |-o FileSecurity="D:P(A;;FRFWFX;;;WD)"|, or set file all access (FA) to
|
||||
get full access permissions, including delete, with
|
||||
|-o FileSecurity="D:P(A;;FA;;;WD)"|.
|
||||
With this you can work around issues such as the mentioned "unprotected private key file"
|
||||
by specifying |-o FileSecurity="D:P(A;;FA;;;OW)"|, for file all access (FA) to the owner (OW).
|
||||
|
||||
#### Windows caveats
|
||||
|
||||
@@ -240,58 +219,14 @@ processes as the SYSTEM account. Another alternative is to run the mount
|
||||
command from a Windows Scheduled Task, or a Windows Service, configured
|
||||
to run as the SYSTEM account. A third alternative is to use the
|
||||
[WinFsp.Launcher infrastructure](https://github.com/winfsp/winfsp/wiki/WinFsp-Service-Architecture)).
|
||||
Read more in the [install documentation](https://rclone.org/install/).
|
||||
Note that when running rclone as another user, it will not use
|
||||
the configuration file from your profile unless you tell it to
|
||||
with the [|--config|](https://rclone.org/docs/#config-config-file) option.
|
||||
Note also that it is now the SYSTEM account that will have the owner
|
||||
permissions, and other accounts will have permissions according to the
|
||||
group or others scopes. As mentioned above, these will then not get the
|
||||
"write extended attributes" permission, and this may prevent writing to
|
||||
files. You can work around this with the FileSecurity option, see
|
||||
example above.
|
||||
Read more in the [install documentation](https://rclone.org/install/).
|
||||
|
||||
Note that mapping to a directory path, instead of a drive letter,
|
||||
does not suffer from the same limitations.
|
||||
|
||||
### Mounting on macOS
|
||||
|
||||
Mounting on macOS can be done either via [macFUSE](https://osxfuse.github.io/)
|
||||
(also known as osxfuse) or [FUSE-T](https://www.fuse-t.org/). macFUSE is a traditional
|
||||
FUSE driver utilizing a macOS kernel extension (kext). FUSE-T is an alternative FUSE system
|
||||
which "mounts" via an NFSv4 local server.
|
||||
|
||||
#### FUSE-T Limitations, Caveats, and Notes
|
||||
|
||||
There are some limitations, caveats, and notes about how it works. These are current as
|
||||
of FUSE-T version 1.0.14.
|
||||
|
||||
##### ModTime update on read
|
||||
|
||||
As per the [FUSE-T wiki](https://github.com/macos-fuse-t/fuse-t/wiki#caveats):
|
||||
|
||||
> File access and modification times cannot be set separately as it seems to be an
|
||||
> issue with the NFS client which always modifies both. Can be reproduced with
|
||||
> 'touch -m' and 'touch -a' commands
|
||||
|
||||
This means that viewing files with various tools, notably macOS Finder, will cause rlcone
|
||||
to update the modification time of the file. This may make rclone upload a full new copy
|
||||
of the file.
|
||||
|
||||
##### Unicode Normalization
|
||||
|
||||
Rclone includes flags for unicode normalization with macFUSE that should be updated
|
||||
for FUSE-T. See [this forum post](https://forum.rclone.org/t/some-unicode-forms-break-mount-on-macos-with-fuse-t/36403)
|
||||
and [FUSE-T issue #16](https://github.com/macos-fuse-t/fuse-t/issues/16). The following
|
||||
flag should be added to the |rclone mount| command.
|
||||
|
||||
-o modules=iconv,from_code=UTF-8,to_code=UTF-8
|
||||
|
||||
##### Read Only mounts
|
||||
|
||||
When mounting with |--read-only|, attempts to write to files will fail *silently* as
|
||||
opposed to with a clear warning as in macFUSE.
|
||||
|
||||
### Limitations
|
||||
|
||||
Without the use of |--vfs-cache-mode| this can only write files
|
||||
|
||||
@@ -237,8 +237,13 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
|
||||
|
||||
// Mount the remote at mountpoint
|
||||
func (m *MountPoint) Mount() (daemon *os.Process, err error) {
|
||||
if err = m.CheckOverlap(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Ensure sensible defaults
|
||||
if err = m.CheckAllowed(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.SetVolumeName(m.MountOpt.VolumeName)
|
||||
m.SetDeviceName(m.MountOpt.DeviceName)
|
||||
|
||||
|
||||
@@ -2,8 +2,6 @@ package mountlib
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -34,22 +32,22 @@ func ClipBlocks(b *uint64) {
|
||||
}
|
||||
}
|
||||
|
||||
// CheckOverlap checks that root doesn't overlap with a mountpoint
|
||||
func CheckOverlap(f fs.Fs, mountpoint string) error {
|
||||
name := f.Name()
|
||||
// CheckOverlap checks that root doesn't overlap with mountpoint
|
||||
func (m *MountPoint) CheckOverlap() error {
|
||||
name := m.Fs.Name()
|
||||
if name != "" && name != "local" {
|
||||
return nil
|
||||
}
|
||||
rootAbs := absPath(f.Root())
|
||||
mountpointAbs := absPath(mountpoint)
|
||||
rootAbs := absPath(m.Fs.Root())
|
||||
mountpointAbs := absPath(m.MountPoint)
|
||||
if strings.HasPrefix(rootAbs, mountpointAbs) || strings.HasPrefix(mountpointAbs, rootAbs) {
|
||||
const msg = "mount point %q (%q) and directory to be mounted %q (%q) mustn't overlap"
|
||||
return fmt.Errorf(msg, mountpoint, mountpointAbs, f.Root(), rootAbs)
|
||||
const msg = "mount point %q and directory to be mounted %q mustn't overlap"
|
||||
return fmt.Errorf(msg, m.MountPoint, m.Fs.Root())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// absPath is a helper function for CheckOverlap
|
||||
// absPath is a helper function for MountPoint.CheckOverlap
|
||||
func absPath(path string) string {
|
||||
if abs, err := filepath.EvalSymlinks(path); err == nil {
|
||||
path = abs
|
||||
@@ -58,43 +56,32 @@ func absPath(path string) string {
|
||||
path = abs
|
||||
}
|
||||
path = filepath.ToSlash(path)
|
||||
if runtime.GOOS == "windows" {
|
||||
// Removes any UNC long path prefix to make sure a simple HasPrefix test
|
||||
// in CheckOverlap works when one is UNC (root) and one is not (mountpoint).
|
||||
path = strings.TrimPrefix(path, `//?/`)
|
||||
}
|
||||
if !strings.HasSuffix(path, "/") {
|
||||
path += "/"
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
// CheckAllowNonEmpty checks --allow-non-empty flag, and if not used verifies that mountpoint is empty.
|
||||
func CheckAllowNonEmpty(mountpoint string, opt *Options) error {
|
||||
if !opt.AllowNonEmpty {
|
||||
return CheckMountEmpty(mountpoint)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkMountEmpty checks if mountpoint folder is empty by listing it.
|
||||
func checkMountEmpty(mountpoint string) error {
|
||||
fp, err := os.Open(mountpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open: %s: %w", mountpoint, err)
|
||||
}
|
||||
defer fs.CheckClose(fp, &err)
|
||||
|
||||
_, err = fp.Readdirnames(1)
|
||||
if err == io.EOF {
|
||||
// CheckAllowed informs about ignored flags on Windows. If not on Windows
|
||||
// and not --allow-non-empty flag is used, verify that mountpoint is empty.
|
||||
func (m *MountPoint) CheckAllowed() error {
|
||||
opt := &m.MountOpt
|
||||
if runtime.GOOS == "windows" {
|
||||
if opt.AllowNonEmpty {
|
||||
fs.Logf(nil, "--allow-non-empty flag does nothing on Windows")
|
||||
}
|
||||
if opt.AllowRoot {
|
||||
fs.Logf(nil, "--allow-root flag does nothing on Windows")
|
||||
}
|
||||
if opt.AllowOther {
|
||||
fs.Logf(nil, "--allow-other flag does nothing on Windows")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const msg = "%q is not empty, use --allow-non-empty to mount anyway"
|
||||
if err == nil {
|
||||
return fmt.Errorf(msg, mountpoint)
|
||||
if !opt.AllowNonEmpty {
|
||||
return CheckMountEmpty(m.MountPoint)
|
||||
}
|
||||
return fmt.Errorf(msg+": %w", mountpoint, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetVolumeName with sensible default
|
||||
|
||||
@@ -336,31 +336,11 @@ func makeRandomExeName(baseName, extension string) (string, error) {
|
||||
|
||||
func downloadUpdate(ctx context.Context, beta bool, version, siteURL, newFile, packageFormat string) error {
|
||||
osName := runtime.GOOS
|
||||
arch := runtime.GOARCH
|
||||
if osName == "darwin" {
|
||||
osName = "osx"
|
||||
}
|
||||
arch := runtime.GOARCH
|
||||
if arch == "arm" {
|
||||
// Check the ARM compatibility level of the current CPU.
|
||||
// We don't know if this matches the rclone binary currently running, it
|
||||
// could for example be a ARMv6 variant running on a ARMv7 compatible CPU,
|
||||
// so we will simply pick the best possible variant.
|
||||
switch buildinfo.GetSupportedGOARM() {
|
||||
case 7:
|
||||
// This system can run any binaries built with GOARCH=arm, including GOARM=7.
|
||||
// Pick the ARMv7 variant of rclone, published with suffix "arm-v7".
|
||||
arch = "arm-v7"
|
||||
case 6:
|
||||
// This system can run binaries built with GOARCH=arm and GOARM=6 or lower.
|
||||
// Pick the ARMv6 variant of rclone, published with suffix "arm-v6".
|
||||
arch = "arm-v6"
|
||||
case 5:
|
||||
// This system can only run binaries built with GOARCH=arm and GOARM=5.
|
||||
// Pick the ARMv5 variant of rclone, which also works without hardfloat,
|
||||
// published with suffix "arm".
|
||||
arch = "arm"
|
||||
}
|
||||
}
|
||||
|
||||
archiveFilename := fmt.Sprintf("rclone-%s-%s-%s.%s", version, osName, arch, packageFormat)
|
||||
archiveURL := fmt.Sprintf("%s/%s/%s", siteURL, version, archiveFilename)
|
||||
archiveBuf, err := downloadFile(ctx, archiveURL)
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
// Code generated by vfsgen; DO NOT EDIT.
|
||||
|
||||
//go:build !dev
|
||||
// +build !dev
|
||||
|
||||
package data
|
||||
|
||||
@@ -60,14 +60,12 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
|
||||
case "":
|
||||
continue
|
||||
case "remote", "fs":
|
||||
if str != "" {
|
||||
p, err := fspath.Parse(str)
|
||||
if err != nil || p.Name == ":" {
|
||||
return fmt.Errorf("cannot parse path %q: %w", str, err)
|
||||
}
|
||||
fsName, fsPath, fsOpt = p.Name, p.Path, p.Config
|
||||
vol.Fs = str
|
||||
p, err := fspath.Parse(str)
|
||||
if err != nil || p.Name == ":" {
|
||||
return fmt.Errorf("cannot parse path %q: %w", str, err)
|
||||
}
|
||||
fsName, fsPath, fsOpt = p.Name, p.Path, p.Config
|
||||
vol.Fs = str
|
||||
case "type":
|
||||
fsType = str
|
||||
vol.Type = str
|
||||
|
||||
@@ -3,7 +3,6 @@ package http
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
@@ -16,8 +15,6 @@ import (
|
||||
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
libhttp "github.com/rclone/rclone/lib/http"
|
||||
@@ -50,7 +47,6 @@ func init() {
|
||||
libhttp.AddHTTPFlagsPrefix(flagSet, "", &Opt.HTTP)
|
||||
libhttp.AddTemplateFlagsPrefix(flagSet, "", &Opt.Template)
|
||||
vfsflags.AddFlags(flagSet)
|
||||
proxyflags.AddFlags(flagSet)
|
||||
}
|
||||
|
||||
// Command definition for cobra
|
||||
@@ -68,21 +64,18 @@ The server will log errors. Use ` + "`-v`" + ` to see access logs.
|
||||
|
||||
` + "`--bwlimit`" + ` will be respected for file transfers. Use ` + "`--stats`" + ` to
|
||||
control the stats printing.
|
||||
` + libhttp.Help + libhttp.TemplateHelp + libhttp.AuthHelp + vfs.Help + proxy.Help,
|
||||
` + libhttp.Help + libhttp.TemplateHelp + libhttp.AuthHelp + vfs.Help,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
var f fs.Fs
|
||||
if proxyflags.Opt.AuthProxy == "" {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f = cmd.NewFsSrc(args)
|
||||
} else {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
}
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsSrc(args)
|
||||
|
||||
cmd.Run(false, true, command, func() error {
|
||||
s, err := run(context.Background(), f, Opt)
|
||||
ctx := context.Background()
|
||||
|
||||
s, err := run(ctx, f, Opt)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -93,60 +86,25 @@ control the stats printing.
|
||||
},
|
||||
}
|
||||
|
||||
// HTTP contains everything to run the server
|
||||
type HTTP struct {
|
||||
// server contains everything to run the server
|
||||
type serveCmd struct {
|
||||
f fs.Fs
|
||||
_vfs *vfs.VFS // don't use directly, use getVFS
|
||||
vfs *vfs.VFS
|
||||
server *libhttp.Server
|
||||
opt Options
|
||||
proxy *proxy.Proxy
|
||||
ctx context.Context // for global config
|
||||
}
|
||||
|
||||
// Gets the VFS in use for this request
|
||||
func (s *HTTP) getVFS(ctx context.Context) (VFS *vfs.VFS, err error) {
|
||||
if s._vfs != nil {
|
||||
return s._vfs, nil
|
||||
}
|
||||
value := libhttp.CtxGetAuth(ctx)
|
||||
if value == nil {
|
||||
return nil, errors.New("no VFS found in context")
|
||||
}
|
||||
VFS, ok := value.(*vfs.VFS)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("context value is not VFS: %#v", value)
|
||||
}
|
||||
return VFS, nil
|
||||
}
|
||||
func run(ctx context.Context, f fs.Fs, opt Options) (*serveCmd, error) {
|
||||
var err error
|
||||
|
||||
// auth does proxy authorization
|
||||
func (s *HTTP) auth(user, pass string) (value interface{}, err error) {
|
||||
VFS, _, err := s.proxy.Call(user, pass, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return VFS, err
|
||||
}
|
||||
|
||||
func run(ctx context.Context, f fs.Fs, opt Options) (s *HTTP, err error) {
|
||||
s = &HTTP{
|
||||
s := &serveCmd{
|
||||
f: f,
|
||||
ctx: ctx,
|
||||
opt: opt,
|
||||
}
|
||||
|
||||
if proxyflags.Opt.AuthProxy != "" {
|
||||
s.proxy = proxy.New(ctx, &proxyflags.Opt)
|
||||
// override auth
|
||||
s.opt.Auth.CustomAuthFn = s.auth
|
||||
} else {
|
||||
s._vfs = vfs.New(f, &vfsflags.Opt)
|
||||
vfs: vfs.New(f, &vfsflags.Opt),
|
||||
}
|
||||
|
||||
s.server, err = libhttp.NewServer(ctx,
|
||||
libhttp.WithConfig(s.opt.HTTP),
|
||||
libhttp.WithAuth(s.opt.Auth),
|
||||
libhttp.WithTemplate(s.opt.Template),
|
||||
libhttp.WithConfig(opt.HTTP),
|
||||
libhttp.WithAuth(opt.Auth),
|
||||
libhttp.WithTemplate(opt.Template),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to init server: %w", err)
|
||||
@@ -166,7 +124,7 @@ func run(ctx context.Context, f fs.Fs, opt Options) (s *HTTP, err error) {
|
||||
}
|
||||
|
||||
// handler reads incoming requests and dispatches them
|
||||
func (s *HTTP) handler(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *serveCmd) handler(w http.ResponseWriter, r *http.Request) {
|
||||
isDir := strings.HasSuffix(r.URL.Path, "/")
|
||||
remote := strings.Trim(r.URL.Path, "/")
|
||||
if isDir {
|
||||
@@ -177,15 +135,9 @@ func (s *HTTP) handler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// serveDir serves a directory index at dirRemote
|
||||
func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string) {
|
||||
VFS, err := s.getVFS(r.Context())
|
||||
if err != nil {
|
||||
http.Error(w, "Root directory not found", http.StatusNotFound)
|
||||
fs.Errorf(nil, "Failed to serve directory: %v", err)
|
||||
return
|
||||
}
|
||||
func (s *serveCmd) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string) {
|
||||
// List the directory
|
||||
node, err := VFS.Stat(dirRemote)
|
||||
node, err := s.vfs.Stat(dirRemote)
|
||||
if err == vfs.ENOENT {
|
||||
http.Error(w, "Directory not found", http.StatusNotFound)
|
||||
return
|
||||
@@ -225,15 +177,8 @@ func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string
|
||||
}
|
||||
|
||||
// serveFile serves a file object at remote
|
||||
func (s *HTTP) serveFile(w http.ResponseWriter, r *http.Request, remote string) {
|
||||
VFS, err := s.getVFS(r.Context())
|
||||
if err != nil {
|
||||
http.Error(w, "File not found", http.StatusNotFound)
|
||||
fs.Errorf(nil, "Failed to serve file: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
node, err := VFS.Stat(remote)
|
||||
func (s *serveCmd) serveFile(w http.ResponseWriter, r *http.Request, remote string) {
|
||||
node, err := s.vfs.Stat(remote)
|
||||
if err == vfs.ENOENT {
|
||||
fs.Infof(remote, "%s: File not found", r.RemoteAddr)
|
||||
http.Error(w, "File not found", http.StatusNotFound)
|
||||
|
||||
@@ -6,14 +6,13 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configfile"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
libhttp "github.com/rclone/rclone/lib/http"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -22,16 +21,18 @@ import (
|
||||
|
||||
var (
|
||||
updateGolden = flag.Bool("updategolden", false, "update golden files for regression test")
|
||||
sc *serveCmd
|
||||
testURL string
|
||||
)
|
||||
|
||||
const (
|
||||
testBindAddress = "localhost:0"
|
||||
testUser = "user"
|
||||
testPass = "pass"
|
||||
testTemplate = "testdata/golden/testindex.html"
|
||||
)
|
||||
|
||||
func start(ctx context.Context, t *testing.T, f fs.Fs) (s *HTTP, testURL string) {
|
||||
func start(t *testing.T, f fs.Fs) {
|
||||
ctx := context.Background()
|
||||
|
||||
opts := Options{
|
||||
HTTP: libhttp.DefaultCfg(),
|
||||
Template: libhttp.TemplateConfig{
|
||||
@@ -39,13 +40,10 @@ func start(ctx context.Context, t *testing.T, f fs.Fs) (s *HTTP, testURL string)
|
||||
},
|
||||
}
|
||||
opts.HTTP.ListenAddr = []string{testBindAddress}
|
||||
if proxyflags.Opt.AuthProxy == "" {
|
||||
opts.Auth.BasicUser = testUser
|
||||
opts.Auth.BasicPass = testPass
|
||||
}
|
||||
|
||||
s, err := run(ctx, f, opts)
|
||||
require.NoError(t, err, "failed to start server")
|
||||
sc = s
|
||||
|
||||
urls := s.server.URLs()
|
||||
require.Len(t, urls, 1, "expected one URL")
|
||||
@@ -65,8 +63,6 @@ func start(ctx context.Context, t *testing.T, f fs.Fs) (s *HTTP, testURL string)
|
||||
pause *= 2
|
||||
}
|
||||
t.Fatal("couldn't connect to server")
|
||||
|
||||
return s, testURL
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -74,6 +70,31 @@ var (
|
||||
expectedTime = time.Date(2000, 1, 2, 3, 4, 5, 0, time.UTC)
|
||||
)
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// Configure the remote
|
||||
configfile.Install()
|
||||
// fs.Config.LogLevel = fs.LogLevelDebug
|
||||
// fs.Config.DumpHeaders = true
|
||||
// fs.Config.DumpBodies = true
|
||||
|
||||
// exclude files called hidden.txt and directories called hidden
|
||||
fi := filter.GetConfig(ctx)
|
||||
require.NoError(t, fi.AddRule("- hidden.txt"))
|
||||
require.NoError(t, fi.AddRule("- hidden/**"))
|
||||
|
||||
// Create a test Fs
|
||||
f, err := fs.NewFs(context.Background(), "testdata/files")
|
||||
require.NoError(t, err)
|
||||
|
||||
// set date of datedObject to expectedTime
|
||||
obj, err := f.NewObject(context.Background(), datedObject)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, obj.SetModTime(context.Background(), expectedTime))
|
||||
|
||||
start(t, f)
|
||||
}
|
||||
|
||||
// check body against the file, or re-write body if -updategolden is
|
||||
// set.
|
||||
func checkGolden(t *testing.T, fileName string, got []byte) {
|
||||
@@ -90,49 +111,7 @@ func checkGolden(t *testing.T, fileName string, got []byte) {
|
||||
}
|
||||
}
|
||||
|
||||
func testGET(t *testing.T, useProxy bool) {
|
||||
ctx := context.Background()
|
||||
// ci := fs.GetConfig(ctx)
|
||||
// ci.LogLevel = fs.LogLevelDebug
|
||||
|
||||
// exclude files called hidden.txt and directories called hidden
|
||||
fi := filter.GetConfig(ctx)
|
||||
require.NoError(t, fi.AddRule("- hidden.txt"))
|
||||
require.NoError(t, fi.AddRule("- hidden/**"))
|
||||
|
||||
var f fs.Fs
|
||||
if useProxy {
|
||||
// the backend config will be made by the proxy
|
||||
prog, err := filepath.Abs("../servetest/proxy_code.go")
|
||||
require.NoError(t, err)
|
||||
files, err := filepath.Abs("testdata/files")
|
||||
require.NoError(t, err)
|
||||
cmd := "go run " + prog + " " + files
|
||||
|
||||
// FIXME this is untidy setting a global variable!
|
||||
proxyflags.Opt.AuthProxy = cmd
|
||||
defer func() {
|
||||
proxyflags.Opt.AuthProxy = ""
|
||||
}()
|
||||
|
||||
f = nil
|
||||
} else {
|
||||
// Create a test Fs
|
||||
var err error
|
||||
f, err = fs.NewFs(context.Background(), "testdata/files")
|
||||
require.NoError(t, err)
|
||||
|
||||
// set date of datedObject to expectedTime
|
||||
obj, err := f.NewObject(context.Background(), datedObject)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, obj.SetModTime(context.Background(), expectedTime))
|
||||
}
|
||||
|
||||
s, testURL := start(ctx, t, f)
|
||||
defer func() {
|
||||
assert.NoError(t, s.server.Shutdown())
|
||||
}()
|
||||
|
||||
func TestGET(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
URL string
|
||||
Status int
|
||||
@@ -237,7 +216,6 @@ func testGET(t *testing.T, useProxy bool) {
|
||||
if test.Range != "" {
|
||||
req.Header.Add("Range", test.Range)
|
||||
}
|
||||
req.SetBasicAuth(testUser, testPass)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.Status, resp.StatusCode, test.Golden)
|
||||
@@ -259,11 +237,3 @@ func testGET(t *testing.T, useProxy bool) {
|
||||
checkGolden(t, test.Golden, body)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGET(t *testing.T) {
|
||||
testGET(t, false)
|
||||
}
|
||||
|
||||
func TestAuthProxy(t *testing.T) {
|
||||
testGET(t, true)
|
||||
}
|
||||
|
||||
@@ -79,30 +79,6 @@ supported hash on the backend or you can use a named hash such as
|
||||
"MD5" or "SHA-1". Use the [hashsum](/commands/rclone_hashsum/) command
|
||||
to see the full list.
|
||||
|
||||
### Access WebDAV on Windows
|
||||
WebDAV shared folder can be mapped as a drive on Windows, however the default settings prevent it.
|
||||
Windows will fail to connect to the server using insecure Basic authentication.
|
||||
It will not even display any login dialog. Windows requires SSL / HTTPS connection to be used with Basic.
|
||||
If you try to connect via Add Network Location Wizard you will get the following error:
|
||||
"The folder you entered does not appear to be valid. Please choose another".
|
||||
However, you still can connect if you set the following registry key on a client machine:
|
||||
HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\WebClient\Parameters\BasicAuthLevel to 2.
|
||||
The BasicAuthLevel can be set to the following values:
|
||||
0 - Basic authentication disabled
|
||||
1 - Basic authentication enabled for SSL connections only
|
||||
2 - Basic authentication enabled for SSL connections and for non-SSL connections
|
||||
If required, increase the FileSizeLimitInBytes to a higher value.
|
||||
Navigate to the Services interface, then restart the WebClient service.
|
||||
|
||||
### Access Office applications on WebDAV
|
||||
Navigate to following registry HKEY_CURRENT_USER\Software\Microsoft\Office\[14.0/15.0/16.0]\Common\Internet
|
||||
Create a new DWORD BasicAuthLevel with value 2.
|
||||
0 - Basic authentication disabled
|
||||
1 - Basic authentication enabled for SSL connections only
|
||||
2 - Basic authentication enabled for SSL and for non-SSL connections
|
||||
|
||||
https://learn.microsoft.com/en-us/office/troubleshoot/powerpoint/office-opens-blank-from-sharepoint
|
||||
|
||||
` + libhttp.Help + libhttp.TemplateHelp + libhttp.AuthHelp + vfs.Help + proxy.Help,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
|
||||
@@ -39,7 +39,7 @@ recursion.
|
||||
|
||||
Some backends do not always provide file sizes, see for example
|
||||
[Google Photos](/googlephotos/#size) and
|
||||
[Google Docs](/drive/#limitations-of-google-docs).
|
||||
[Google Drive](/drive/#limitations-of-google-docs).
|
||||
Rclone will then show a notice in the log indicating how many such
|
||||
files were encountered, and count them in as empty files in the output
|
||||
of the size command.
|
||||
|
||||
@@ -36,7 +36,7 @@ want to delete files from destination, use the
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
` + "`--dry-run` or the `--interactive`/`-i`" + ` flag.
|
||||
|
||||
rclone sync --interactive SOURCE remote:DESTINATION
|
||||
rclone sync -i SOURCE remote:DESTINATION
|
||||
|
||||
Note that files in the destination won't be deleted if there were any
|
||||
errors at any point. Duplicate objects (files with the same name, on
|
||||
|
||||
@@ -52,7 +52,7 @@ unless ` + "`--no-create`" + ` or ` + "`--recursive`" + ` is provided.
|
||||
|
||||
If ` + "`--recursive`" + ` is used then recursively sets the modification
|
||||
time on all existing files that is found under the path. Filters are supported,
|
||||
and you can test with the ` + "`--dry-run`" + ` or the ` + "`--interactive`/`-i`" + ` flag.
|
||||
and you can test with the ` + "`--dry-run`" + ` or the ` + "`--interactive`" + ` flag.
|
||||
|
||||
If ` + "`--timestamp`" + ` is used then sets the modification time to that
|
||||
time instead of the current time. Times may be specified as one of:
|
||||
|
||||
@@ -18,8 +18,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/dirtree"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -28,7 +26,6 @@ var (
|
||||
outFileName string
|
||||
noReport bool
|
||||
sort string
|
||||
enc = encoder.OS
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -103,26 +100,22 @@ For a more interactive navigation of the remote see the
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
ci := fs.GetConfig(context.Background())
|
||||
var outFile io.Writer
|
||||
outFile := os.Stdout
|
||||
if outFileName != "" {
|
||||
var err error
|
||||
outFile, err = os.Create(outFileName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create output file: %w", err)
|
||||
}
|
||||
opts.Colorize = false
|
||||
} else {
|
||||
terminal.Start()
|
||||
outFile = terminal.Out
|
||||
opts.Colorize = true
|
||||
}
|
||||
opts.VerSort = opts.VerSort || sort == "version"
|
||||
opts.ModSort = opts.ModSort || sort == "mtime"
|
||||
opts.CTimeSort = opts.CTimeSort || sort == "ctime"
|
||||
opts.NameSort = sort == "name"
|
||||
opts.SizeSort = sort == "size"
|
||||
ci := fs.GetConfig(context.Background())
|
||||
opts.UnitSize = ci.HumanReadable
|
||||
opts.Colorize = ci.TerminalColorMode != fs.TerminalColorModeNever
|
||||
if opts.DeepLevel == 0 {
|
||||
opts.DeepLevel = ci.MaxDepth
|
||||
}
|
||||
@@ -165,7 +158,7 @@ type FileInfo struct {
|
||||
|
||||
// Name is base name of the file
|
||||
func (to *FileInfo) Name() string {
|
||||
return enc.FromStandardName(path.Base(to.entry.Remote()))
|
||||
return path.Base(to.entry.Remote())
|
||||
}
|
||||
|
||||
// Size in bytes for regular files; system-dependent for others
|
||||
@@ -199,7 +192,7 @@ func (to *FileInfo) Sys() interface{} {
|
||||
|
||||
// String returns the full path
|
||||
func (to *FileInfo) String() string {
|
||||
return filepath.FromSlash(enc.FromStandardPath(to.entry.Remote()))
|
||||
return to.entry.Remote()
|
||||
}
|
||||
|
||||
// Fs maps an fs.Fs into a tree.Fs
|
||||
@@ -214,7 +207,6 @@ func NewFs(dirs dirtree.DirTree) Fs {
|
||||
func (dirs Fs) Stat(filePath string) (fi os.FileInfo, err error) {
|
||||
defer log.Trace(nil, "filePath=%q", filePath)("fi=%+v, err=%v", &fi, &err)
|
||||
filePath = filepath.ToSlash(filePath)
|
||||
filePath = enc.ToStandardPath(filePath)
|
||||
filePath = strings.TrimLeft(filePath, "/")
|
||||
if filePath == "" {
|
||||
return &FileInfo{fs.NewDir("", time.Now())}, nil
|
||||
@@ -230,14 +222,13 @@ func (dirs Fs) Stat(filePath string) (fi os.FileInfo, err error) {
|
||||
func (dirs Fs) ReadDir(dir string) (names []string, err error) {
|
||||
defer log.Trace(nil, "dir=%s", dir)("names=%+v, err=%v", &names, &err)
|
||||
dir = filepath.ToSlash(dir)
|
||||
dir = enc.ToStandardPath(dir)
|
||||
dir = strings.TrimLeft(dir, "/")
|
||||
entries, ok := dirs[dir]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("couldn't find directory %q", dir)
|
||||
}
|
||||
for _, entry := range entries {
|
||||
names = append(names, enc.FromStandardName(path.Base(entry.Remote())))
|
||||
names = append(names, path.Base(entry.Remote()))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ FROM alpine:latest
|
||||
COPY --from=binaries /usr/local/bin/rclone /usr/bin/rclone
|
||||
|
||||
RUN mkdir -p /data/config /data/cache /mnt \
|
||||
&& apk --no-cache add ca-certificates fuse3 tzdata \
|
||||
&& apk --no-cache add ca-certificates fuse tzdata \
|
||||
&& echo "user_allow_other" >> /etc/fuse.conf \
|
||||
&& rclone version
|
||||
|
||||
|
||||
@@ -132,7 +132,7 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="Internet Archive" home="https://archive.org/" config="/internetarchive/" >}}
|
||||
{{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}}
|
||||
{{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
|
||||
{{< provider name="IDrive e2" home="https://www.idrive.com/e2/?refer=rclone" config="/s3/#idrive-e2" >}}
|
||||
{{< provider name="IDrive e2" home="https://www.idrive.com/e2/" config="/s3/#idrive-e2" >}}
|
||||
{{< provider name="IONOS Cloud" home="https://cloud.ionos.com/storage/object-storage" config="/s3/#ionos" >}}
|
||||
{{< provider name="Koofr" home="https://koofr.eu/" config="/koofr/" >}}
|
||||
{{< provider name="Liara Object Storage" home="https://liara.ir/landing/object-storage" config="/s3/#liara-object-storage" >}}
|
||||
|
||||
@@ -673,25 +673,3 @@ put them back in again.` >}}
|
||||
* vanplus <60313789+vanplus@users.noreply.github.com>
|
||||
* Jack <16779171+jkpe@users.noreply.github.com>
|
||||
* Abdullah Saglam <abdullah.saglam@stonebranch.com>
|
||||
* Marks Polakovs <github@markspolakovs.me>
|
||||
* piyushgarg <piyushgarg80@gmail.com>
|
||||
* Kaloyan Raev <kaloyan-raev@users.noreply.github.com>
|
||||
* IMTheNachoMan <imthenachoman@gmail.com>
|
||||
* alankrit <alankrit@google.com>
|
||||
* Bryan Kaplan <#@bryankaplan.com>
|
||||
* LXY <767763591@qq.com>
|
||||
* Simmon Li (he/him) <li.simmon@gmail.com>
|
||||
* happyxhw <44490504+happyxhw@users.noreply.github.com>
|
||||
* Simmon Li (he/him) <hello@crespire.dev>
|
||||
* Matthias Baur <baurmatt@users.noreply.github.com>
|
||||
* Hunter Wittenborn <hunter@hunterwittenborn.com>
|
||||
* logopk <peter@kreuser.name>
|
||||
* Gerard Bosch <30733556+gerardbosch@users.noreply.github.com>
|
||||
* ToBeFree <github@tfrei.de>
|
||||
* NodudeWasTaken <75137537+NodudeWasTaken@users.noreply.github.com>
|
||||
* Peter Brunner <peter@lugoues.net>
|
||||
* Ninh Pham <dongian.rapclubkhtn@gmail.com>
|
||||
* Ryan Caezar Itang <sitiom@proton.me>
|
||||
* Peter Brunner <peter@psykhe.com>
|
||||
* Leandro Sacchet <leandro.sacchet@animati.com.br>
|
||||
* dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
|
||||
|
||||
@@ -67,7 +67,7 @@ List the contents of a container
|
||||
Sync `/home/local/directory` to the remote container, deleting any excess
|
||||
files in the container.
|
||||
|
||||
rclone sync --interactive /home/local/directory remote:container
|
||||
rclone sync -i /home/local/directory remote:container
|
||||
|
||||
### --fast-list
|
||||
|
||||
|
||||
@@ -72,7 +72,7 @@ List the contents of a bucket
|
||||
Sync `/home/local/directory` to the remote bucket, deleting any
|
||||
excess files in the bucket.
|
||||
|
||||
rclone sync --interactive /home/local/directory remote:bucket
|
||||
rclone sync -i /home/local/directory remote:bucket
|
||||
|
||||
### Application Keys
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ versionIntroduced: "v1.58"
|
||||
- For successive sync runs, leave off the `--resync` flag.
|
||||
- Consider using a [filters file](#filtering) for excluding
|
||||
unnecessary files and directories from the sync.
|
||||
- Consider setting up the [--check-access](#check-access) feature
|
||||
- Consider setting up the [--check-access](#check-access-option) feature
|
||||
for safety.
|
||||
- On Linux, consider setting up a [crontab entry](#cron). bisync can
|
||||
safely run in concurrent cron jobs thanks to lock files it maintains.
|
||||
@@ -146,9 +146,9 @@ The base directories on the both Path1 and Path2 filesystems must exist
|
||||
or bisync will fail. This is required for safety - that bisync can verify
|
||||
that both paths are valid.
|
||||
|
||||
When using `--resync`, a newer version of a file either on Path1 or Path2
|
||||
filesystem, will overwrite the file on the other path (only the last version
|
||||
will be kept). Carefully evaluate deltas using [--dry-run](/flags/#non-backend-flags).
|
||||
When using `--resync` a newer version of a file on the Path2 filesystem
|
||||
will be overwritten by the Path1 filesystem version.
|
||||
Carefully evaluate deltas using [--dry-run](/flags/#non-backend-flags).
|
||||
|
||||
For a resync run, one of the paths may be empty (no files in the path tree).
|
||||
The resync run should result in files on both paths, else a normal non-resync
|
||||
@@ -164,27 +164,14 @@ deleting **everything** in the other path.
|
||||
Access check files are an additional safety measure against data loss.
|
||||
bisync will ensure it can find matching `RCLONE_TEST` files in the same places
|
||||
in the Path1 and Path2 filesystems.
|
||||
`RCLONE_TEST` files are not generated automatically.
|
||||
For `--check-access`to succeed, you must first either:
|
||||
**A)** Place one or more `RCLONE_TEST` files in the Path1 or Path2 filesystem
|
||||
and then do either a run without `--check-access` or a [--resync](#resync) to
|
||||
set matching files on both filesystems, or
|
||||
**B)** Set `--check-filename` to a filename already in use in various locations
|
||||
throughout your sync'd fileset.
|
||||
Time stamps and file contents are not important, just the names and locations.
|
||||
Place one or more `RCLONE_TEST` files in the Path1 or Path2 filesystem and
|
||||
then do either a run without `--check-access` or a `--resync` to set
|
||||
matching files on both filesystems.
|
||||
If you have symbolic links in your sync tree it is recommended to place
|
||||
`RCLONE_TEST` files in the linked-to directory tree to protect against
|
||||
bisync assuming a bunch of deleted files if the linked-to tree should not be
|
||||
accessible.
|
||||
See also the [--check-filename](--check-filename) flag.
|
||||
|
||||
#### --check-filename
|
||||
|
||||
Name of the file(s) used in access health validation.
|
||||
The default `--check-filename` is `RCLONE_TEST`.
|
||||
One or more files having this filename must exist, synchronized between your
|
||||
source and destination filesets, in order for `--check-access` to succeed.
|
||||
See [--check-access](#check-access) for additional details.
|
||||
accessible. Also see the `--check-filename` flag.
|
||||
|
||||
#### --max-delete
|
||||
|
||||
|
||||
@@ -5,129 +5,6 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.62.2 - 2023-03-16
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.62.1...v1.62.2)
|
||||
|
||||
* Bug Fixes
|
||||
* docker volume plugin: Add missing fuse3 dependency (Nick Craig-Wood)
|
||||
* docs: Fix size documentation (asdffdsazqqq)
|
||||
* FTP
|
||||
* Fix 426 errors on downloads with vsftpd (Lesmiscore)
|
||||
|
||||
## v1.62.1 - 2023-03-15
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.62.0...v1.62.1)
|
||||
|
||||
* Bug Fixes
|
||||
* docker: Add missing fuse3 dependency (cycneuramus)
|
||||
* build: Update release docs to be more careful with the tag (Nick Craig-Wood)
|
||||
* build: Set Github release to draft while uploading binaries (Nick Craig-Wood)
|
||||
|
||||
## v1.62.0 - 2023-03-14
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.61.0...v1.62.0)
|
||||
|
||||
* New Features
|
||||
* accounting: Make checkers show what they are doing (Nick Craig-Wood)
|
||||
* authorize: Add support for custom templates (Hunter Wittenborn)
|
||||
* build
|
||||
* Update to go1.20 (Nick Craig-Wood, Anagh Kumar Baranwal)
|
||||
* Add winget releaser workflow (Ryan Caezar Itang)
|
||||
* Add dependabot (Ryan Caezar Itang)
|
||||
* doc updates (albertony, Bryan Kaplan, Gerard Bosch, IMTheNachoMan, Justin Winokur, Manoj Ghosh, Nick Craig-Wood, Ole Frost, Peter Brunner, piyushgarg, Ryan Caezar Itang, Simmon Li, ToBeFree)
|
||||
* filter: Emit INFO message when can't work out directory filters (Nick Craig-Wood)
|
||||
* fs
|
||||
* Added multiple ca certificate support. (alankrit)
|
||||
* Add `--max-delete-size` a delete size threshold (Leandro Sacchet)
|
||||
* fspath: Allow the symbols `@` and `+` in remote names (albertony)
|
||||
* lib/terminal: Enable windows console virtual terminal sequences processing (ANSI/VT100 colors) (albertony)
|
||||
* move: If `--check-first` and `--order-by` are set then delete with perfect ordering (Nick Craig-Wood)
|
||||
* serve http: Support `--auth-proxy` (Matthias Baur)
|
||||
* Bug Fixes
|
||||
* accounting
|
||||
* Avoid negative ETA values for very slow speeds (albertony)
|
||||
* Limit length of ETA string (albertony)
|
||||
* Show human readable elapsed time when longer than a day (albertony)
|
||||
* all: Apply codeql fixes (Aaron Gokaslan)
|
||||
* build
|
||||
* Fix condition for manual workflow run (albertony)
|
||||
* Fix building for ARMv5 and ARMv6 (albertony)
|
||||
* selfupdate: Consider ARM version
|
||||
* install.sh: fix ARMv6 download
|
||||
* version: Report ARM version
|
||||
* deletefile: Return error code 4 if file does not exist (Nick Craig-Wood)
|
||||
* docker: Fix volume plugin does not remount volume on docker restart (logopk)
|
||||
* fs: Fix race conditions in `--max-delete` and `--max-delete-size` (Nick Craig-Wood)
|
||||
* lib/oauthutil: Handle fatal errors better (Alex Chen)
|
||||
* mount2: Fix `--allow-non-empty` (Nick Craig-Wood)
|
||||
* operations: Fix concurrency: use `--checkers` unless transferring files (Nick Craig-Wood)
|
||||
* serve ftp: Fix timestamps older than 1 year in listings (Nick Craig-Wood)
|
||||
* sync: Fix concurrency: use `--checkers` unless transferring files (Nick Craig-Wood)
|
||||
* tree
|
||||
* Fix nil pointer exception on stat failure (Nick Craig-Wood)
|
||||
* Fix colored output on windows (albertony)
|
||||
* Fix display of files with illegal Windows file system names (Nick Craig-Wood)
|
||||
* Mount
|
||||
* Fix creating and renaming files on case insensitive backends (Nick Craig-Wood)
|
||||
* Do not treat `\\?\` prefixed paths as network share paths on windows (albertony)
|
||||
* Fix check for empty mount point on Linux (Nick Craig-Wood)
|
||||
* Fix `--allow-non-empty` (Nick Craig-Wood)
|
||||
* Avoid incorrect or premature overlap check on windows (albertony)
|
||||
* Update to fuse3 after bazil.org/fuse update (Nick Craig-Wood)
|
||||
* VFS
|
||||
* Make uploaded files retain modtime with non-modtime backends (Nick Craig-Wood)
|
||||
* Fix incorrect modtime on fs which don't support setting modtime (Nick Craig-Wood)
|
||||
* Fix rename of directory containing files to be uploaded (Nick Craig-Wood)
|
||||
* Local
|
||||
* Fix `%!w(<nil>)` in "failed to read directory" error (Marks Polakovs)
|
||||
* Fix exclusion of dangling symlinks with -L/--copy-links (Nick Craig-Wood)
|
||||
* Crypt
|
||||
* Obey `--ignore-checksum` (Nick Craig-Wood)
|
||||
* Fix for unencrypted directory names on case insensitive remotes (Ole Frost)
|
||||
* Azure Blob
|
||||
* Remove workarounds for SDK bugs after v0.6.1 update (Nick Craig-Wood)
|
||||
* B2
|
||||
* Fix uploading files bigger than 1TiB (Nick Craig-Wood)
|
||||
* Drive
|
||||
* Note that `--drive-acknowledge-abuse` needs SA Manager permission (Nick Craig-Wood)
|
||||
* Make `--drive-stop-on-upload-limit` to respond to storageQuotaExceeded (Ninh Pham)
|
||||
* FTP
|
||||
* Retry 426 errors (Nick Craig-Wood)
|
||||
* Retry errors when initiating downloads (Nick Craig-Wood)
|
||||
* Revert to upstream `github.com/jlaffaye/ftp` now fix is merged (Nick Craig-Wood)
|
||||
* Google Cloud Storage
|
||||
* Add `--gcs-env-auth` to pick up IAM credentials from env/instance (Peter Brunner)
|
||||
* Mega
|
||||
* Add `--mega-use-https` flag (NodudeWasTaken)
|
||||
* Onedrive
|
||||
* Default onedrive personal to QuickXorHash as Microsoft is removing SHA1 (Nick Craig-Wood)
|
||||
* Add `--onedrive-hash-type` to change the hash in use (Nick Craig-Wood)
|
||||
* Improve speed of QuickXorHash (LXY)
|
||||
* Oracle Object Storage
|
||||
* Speed up operations by using S3 pacer and setting minsleep to 10ms (Manoj Ghosh)
|
||||
* Expose the `storage_tier` option in config (Manoj Ghosh)
|
||||
* Bring your own encryption keys (Manoj Ghosh)
|
||||
* S3
|
||||
* Check multipart upload ETag when `--s3-no-head` is in use (Nick Craig-Wood)
|
||||
* Add `--s3-sts-endpoint` to specify STS endpoint (Nick Craig-Wood)
|
||||
* Fix incorrect tier support for StorJ and IDrive when pointing at a file (Ole Frost)
|
||||
* Fix AWS STS failing if `--s3-endpoint` is set (Nick Craig-Wood)
|
||||
* Make purge remove directory markers too (Nick Craig-Wood)
|
||||
* Seafile
|
||||
* Renew library password (Fred)
|
||||
* SFTP
|
||||
* Fix uploads being 65% slower than they should be with crypt (Nick Craig-Wood)
|
||||
* Smb
|
||||
* Allow SPN (service principal name) to be configured (Nick Craig-Wood)
|
||||
* Check smb connection is closed (happyxhw)
|
||||
* Storj
|
||||
* Implement `rclone link` (Kaloyan Raev)
|
||||
* Implement `rclone purge` (Kaloyan Raev)
|
||||
* Update satellite urls and labels (Kaloyan Raev)
|
||||
* WebDAV
|
||||
* Fix interop with davrods server (Nick Craig-Wood)
|
||||
|
||||
## v1.61.1 - 2022-12-23
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.61.0...v1.61.1)
|
||||
|
||||
@@ -257,7 +257,7 @@ style or chunk naming scheme is to:
|
||||
- Create another directory (most probably on the same cloud storage)
|
||||
and configure a new remote with desired metadata format,
|
||||
hash type, chunk naming etc.
|
||||
- Now run `rclone sync --interactive oldchunks: newchunks:` and all your data
|
||||
- Now run `rclone sync -i oldchunks: newchunks:` and all your data
|
||||
will be transparently converted in transfer.
|
||||
This may take some time, yet chunker will try server-side
|
||||
copy if possible.
|
||||
|
||||
@@ -17,11 +17,9 @@ Remote authorization. Used to authorize a remote or headless
|
||||
rclone from a machine with a browser - use as instructed by
|
||||
rclone config.
|
||||
|
||||
Use --auth-no-open-browser to prevent rclone to open auth
|
||||
Use the --auth-no-open-browser to prevent rclone to open auth
|
||||
link in default browser automatically.
|
||||
|
||||
Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.
|
||||
|
||||
```
|
||||
rclone authorize [flags]
|
||||
```
|
||||
@@ -31,7 +29,6 @@ rclone authorize [flags]
|
||||
```
|
||||
--auth-no-open-browser Do not automatically open auth link in default browser
|
||||
-h, --help help for authorize
|
||||
--template string The path to a custom Go template for generating HTML responses
|
||||
```
|
||||
|
||||
See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
@@ -170,59 +170,38 @@ group "Everyone" will be used to represent others. The user/group can be customi
|
||||
with FUSE options "UserName" and "GroupName",
|
||||
e.g. `-o UserName=user123 -o GroupName="Authenticated Users"`.
|
||||
The permissions on each entry will be set according to [options](#options)
|
||||
`--dir-perms` and `--file-perms`, which takes a value in traditional Unix
|
||||
`--dir-perms` and `--file-perms`, which takes a value in traditional
|
||||
[numeric notation](https://en.wikipedia.org/wiki/File-system_permissions#Numeric_notation).
|
||||
|
||||
The default permissions corresponds to `--file-perms 0666 --dir-perms 0777`,
|
||||
i.e. read and write permissions to everyone. This means you will not be able
|
||||
to start any programs from the mount. To be able to do that you must add
|
||||
execute permissions, e.g. `--file-perms 0777 --dir-perms 0777` to add it
|
||||
to everyone. If the program needs to write files, chances are you will
|
||||
have to enable [VFS File Caching](#vfs-file-caching) as well (see also
|
||||
[limitations](#limitations)). Note that the default write permission have
|
||||
some restrictions for accounts other than the owner, specifically it lacks
|
||||
the "write extended attributes", as explained next.
|
||||
to everyone. If the program needs to write files, chances are you will have
|
||||
to enable [VFS File Caching](#vfs-file-caching) as well (see also [limitations](#limitations)).
|
||||
|
||||
The mapping of permissions is not always trivial, and the result you see in
|
||||
Windows Explorer may not be exactly like you expected. For example, when setting
|
||||
a value that includes write access for the group or others scope, this will be
|
||||
mapped to individual permissions "write attributes", "write data" and
|
||||
"append data", but not "write extended attributes". Windows will then show this
|
||||
as basic permission "Special" instead of "Write", because "Write" also covers
|
||||
the "write extended attributes" permission. When setting digit 0 for group or
|
||||
others, to indicate no permissions, they will still get individual permissions
|
||||
"read attributes", "read extended attributes" and "read permissions". This is
|
||||
done for compatibility reasons, e.g. to allow users without additional
|
||||
permissions to be able to read basic metadata about files like in Unix.
|
||||
Note that the mapping of permissions is not always trivial, and the result
|
||||
you see in Windows Explorer may not be exactly like you expected.
|
||||
For example, when setting a value that includes write access, this will be
|
||||
mapped to individual permissions "write attributes", "write data" and "append data",
|
||||
but not "write extended attributes". Windows will then show this as basic
|
||||
permission "Special" instead of "Write", because "Write" includes the
|
||||
"write extended attributes" permission.
|
||||
|
||||
WinFsp 2021 (version 1.9) introduced a new FUSE option "FileSecurity",
|
||||
If you set POSIX permissions for only allowing access to the owner, using
|
||||
`--file-perms 0600 --dir-perms 0700`, the user group and the built-in "Everyone"
|
||||
group will still be given some special permissions, such as "read attributes"
|
||||
and "read permissions", in Windows. This is done for compatibility reasons,
|
||||
e.g. to allow users without additional permissions to be able to read basic
|
||||
metadata about files like in UNIX. One case that may arise is that other programs
|
||||
(incorrectly) interprets this as the file being accessible by everyone. For example
|
||||
an SSH client may warn about "unprotected private key file".
|
||||
|
||||
WinFsp 2021 (version 1.9) introduces a new FUSE option "FileSecurity",
|
||||
that allows the complete specification of file security descriptors using
|
||||
[SDDL](https://docs.microsoft.com/en-us/windows/win32/secauthz/security-descriptor-string-format).
|
||||
With this you get detailed control of the resulting permissions, compared
|
||||
to use of the POSIX permissions described above, and no additional permissions
|
||||
will be added automatically for compatibility with Unix. Some example use
|
||||
cases will following.
|
||||
|
||||
If you set POSIX permissions for only allowing access to the owner,
|
||||
using `--file-perms 0600 --dir-perms 0700`, the user group and the built-in
|
||||
"Everyone" group will still be given some special permissions, as described
|
||||
above. Some programs may then (incorrectly) interpret this as the file being
|
||||
accessible by everyone, for example an SSH client may warn about "unprotected
|
||||
private key file". You can work around this by specifying
|
||||
`-o FileSecurity="D:P(A;;FA;;;OW)"`, which sets file all access (FA) to the
|
||||
owner (OW), and nothing else.
|
||||
|
||||
When setting write permissions then, except for the owner, this does not
|
||||
include the "write extended attributes" permission, as mentioned above.
|
||||
This may prevent applications from writing to files, giving permission denied
|
||||
error instead. To set working write permissions for the built-in "Everyone"
|
||||
group, similar to what it gets by default but with the addition of the
|
||||
"write extended attributes", you can specify
|
||||
`-o FileSecurity="D:P(A;;FRFW;;;WD)"`, which sets file read (FR) and file
|
||||
write (FW) to everyone (WD). If file execute (FX) is also needed, then change
|
||||
to `-o FileSecurity="D:P(A;;FRFWFX;;;WD)"`, or set file all access (FA) to
|
||||
get full access permissions, including delete, with
|
||||
`-o FileSecurity="D:P(A;;FA;;;WD)"`.
|
||||
With this you can work around issues such as the mentioned "unprotected private key file"
|
||||
by specifying `-o FileSecurity="D:P(A;;FA;;;OW)"`, for file all access (FA) to the owner (OW).
|
||||
|
||||
### Windows caveats
|
||||
|
||||
@@ -251,58 +230,14 @@ processes as the SYSTEM account. Another alternative is to run the mount
|
||||
command from a Windows Scheduled Task, or a Windows Service, configured
|
||||
to run as the SYSTEM account. A third alternative is to use the
|
||||
[WinFsp.Launcher infrastructure](https://github.com/winfsp/winfsp/wiki/WinFsp-Service-Architecture)).
|
||||
Read more in the [install documentation](https://rclone.org/install/).
|
||||
Note that when running rclone as another user, it will not use
|
||||
the configuration file from your profile unless you tell it to
|
||||
with the [`--config`](https://rclone.org/docs/#config-config-file) option.
|
||||
Note also that it is now the SYSTEM account that will have the owner
|
||||
permissions, and other accounts will have permissions according to the
|
||||
group or others scopes. As mentioned above, these will then not get the
|
||||
"write extended attributes" permission, and this may prevent writing to
|
||||
files. You can work around this with the FileSecurity option, see
|
||||
example above.
|
||||
Read more in the [install documentation](https://rclone.org/install/).
|
||||
|
||||
Note that mapping to a directory path, instead of a drive letter,
|
||||
does not suffer from the same limitations.
|
||||
|
||||
## Mounting on macOS
|
||||
|
||||
Mounting on macOS can be done either via [macFUSE](https://osxfuse.github.io/)
|
||||
(also known as osxfuse) or [FUSE-T](https://www.fuse-t.org/). macFUSE is a traditional
|
||||
FUSE driver utilizing a macOS kernel extension (kext). FUSE-T is an alternative FUSE system
|
||||
which "mounts" via an NFSv4 local server.
|
||||
|
||||
### FUSE-T Limitations, Caveats, and Notes
|
||||
|
||||
There are some limitations, caveats, and notes about how it works. These are current as
|
||||
of FUSE-T version 1.0.14.
|
||||
|
||||
#### ModTime update on read
|
||||
|
||||
As per the [FUSE-T wiki](https://github.com/macos-fuse-t/fuse-t/wiki#caveats):
|
||||
|
||||
> File access and modification times cannot be set separately as it seems to be an
|
||||
> issue with the NFS client which always modifies both. Can be reproduced with
|
||||
> 'touch -m' and 'touch -a' commands
|
||||
|
||||
This means that viewing files with various tools, notably macOS Finder, will cause rlcone
|
||||
to update the modification time of the file. This may make rclone upload a full new copy
|
||||
of the file.
|
||||
|
||||
#### Unicode Normalization
|
||||
|
||||
Rclone includes flags for unicode normalization with macFUSE that should be updated
|
||||
for FUSE-T. See [this forum post](https://forum.rclone.org/t/some-unicode-forms-break-mount-on-macos-with-fuse-t/36403)
|
||||
and [FUSE-T issue #16](https://github.com/macos-fuse-t/fuse-t/issues/16). The following
|
||||
flag should be added to the `rclone mount` command.
|
||||
|
||||
-o modules=iconv,from_code=UTF-8,to_code=UTF-8
|
||||
|
||||
#### Read Only mounts
|
||||
|
||||
When mounting with `--read-only`, attempts to write to files will fail *silently* as
|
||||
opposed to with a clear warning as in macFUSE.
|
||||
|
||||
## Limitations
|
||||
|
||||
Without the use of `--vfs-cache-mode` this can only write files
|
||||
|
||||
@@ -437,87 +437,6 @@ _WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
|
||||
result is accurate. However, this is very inefficient and may cost lots of API
|
||||
calls resulting in extra charges. Use it as a last resort and only with caching.
|
||||
|
||||
## Auth Proxy
|
||||
|
||||
If you supply the parameter `--auth-proxy /path/to/program` then
|
||||
rclone will use that program to generate backends on the fly which
|
||||
then are used to authenticate incoming requests. This uses a simple
|
||||
JSON based protocol with input on STDIN and output on STDOUT.
|
||||
|
||||
**PLEASE NOTE:** `--auth-proxy` and `--authorized-keys` cannot be used
|
||||
together, if `--auth-proxy` is set the authorized keys option will be
|
||||
ignored.
|
||||
|
||||
There is an example program
|
||||
[bin/test_proxy.py](https://github.com/rclone/rclone/blob/master/test_proxy.py)
|
||||
in the rclone source code.
|
||||
|
||||
The program's job is to take a `user` and `pass` on the input and turn
|
||||
those into the config for a backend on STDOUT in JSON format. This
|
||||
config will have any default parameters for the backend added, but it
|
||||
won't use configuration from environment variables or command line
|
||||
options - it is the job of the proxy program to make a complete
|
||||
config.
|
||||
|
||||
This config generated must have this extra parameter
|
||||
- `_root` - root to use for the backend
|
||||
|
||||
And it may have this parameter
|
||||
- `_obscure` - comma separated strings for parameters to obscure
|
||||
|
||||
If password authentication was used by the client, input to the proxy
|
||||
process (on STDIN) would look similar to this:
|
||||
|
||||
```
|
||||
{
|
||||
"user": "me",
|
||||
"pass": "mypassword"
|
||||
}
|
||||
```
|
||||
|
||||
If public-key authentication was used by the client, input to the
|
||||
proxy process (on STDIN) would look similar to this:
|
||||
|
||||
```
|
||||
{
|
||||
"user": "me",
|
||||
"public_key": "AAAAB3NzaC1yc2EAAAADAQABAAABAQDuwESFdAe14hVS6omeyX7edc...JQdf"
|
||||
}
|
||||
```
|
||||
|
||||
And as an example return this on STDOUT
|
||||
|
||||
```
|
||||
{
|
||||
"type": "sftp",
|
||||
"_root": "",
|
||||
"_obscure": "pass",
|
||||
"user": "me",
|
||||
"pass": "mypassword",
|
||||
"host": "sftp.example.com"
|
||||
}
|
||||
```
|
||||
|
||||
This would mean that an SFTP backend would be created on the fly for
|
||||
the `user` and `pass`/`public_key` returned in the output to the host given. Note
|
||||
that since `_obscure` is set to `pass`, rclone will obscure the `pass`
|
||||
parameter before creating the backend (which is required for sftp
|
||||
backends).
|
||||
|
||||
The program can manipulate the supplied `user` in any way, for example
|
||||
to make proxy to many different sftp backends, you could make the
|
||||
`user` be `user@example.com` and then set the `host` to `example.com`
|
||||
in the output and the user to `user`. For security you'd probably want
|
||||
to restrict the `host` to a limited list.
|
||||
|
||||
Note that an internal cache is keyed on `user` so only use that for
|
||||
configuration, don't use `pass` or `public_key`. This also means that if a user's
|
||||
password or public-key is changed the cache will need to expire (which takes 5 mins)
|
||||
before it takes effect.
|
||||
|
||||
This can be used to build general purpose proxies to any kind of
|
||||
backend that rclone supports.
|
||||
|
||||
|
||||
```
|
||||
rclone serve http remote:path [flags]
|
||||
@@ -527,7 +446,6 @@ rclone serve http remote:path [flags]
|
||||
|
||||
```
|
||||
--addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080])
|
||||
--auth-proxy string A program to use to create the backend from the auth
|
||||
--baseurl string Prefix for URLs - leave blank for root
|
||||
--cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
--client-ca string Client certificate authority to verify clients with
|
||||
|
||||
@@ -28,30 +28,6 @@ supported hash on the backend or you can use a named hash such as
|
||||
"MD5" or "SHA-1". Use the [hashsum](/commands/rclone_hashsum/) command
|
||||
to see the full list.
|
||||
|
||||
## Access WebDAV on Windows
|
||||
WebDAV shared folder can be mapped as a drive on Windows, however the default settings prevent it.
|
||||
Windows will fail to connect to the server using insecure Basic authentication.
|
||||
It will not even display any login dialog. Windows requires SSL / HTTPS connection to be used with Basic.
|
||||
If you try to connect via Add Network Location Wizard you will get the following error:
|
||||
"The folder you entered does not appear to be valid. Please choose another".
|
||||
However, you still can connect if you set the following registry key on a client machine:
|
||||
HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\WebClient\Parameters\BasicAuthLevel to 2.
|
||||
The BasicAuthLevel can be set to the following values:
|
||||
0 - Basic authentication disabled
|
||||
1 - Basic authentication enabled for SSL connections only
|
||||
2 - Basic authentication enabled for SSL connections and for non-SSL connections
|
||||
If required, increase the FileSizeLimitInBytes to a higher value.
|
||||
Navigate to the Services interface, then restart the WebClient service.
|
||||
|
||||
## Access Office applications on WebDAV
|
||||
Navigate to following registry HKEY_CURRENT_USER\Software\Microsoft\Office\[14.0/15.0/16.0]\Common\Internet
|
||||
Create a new DWORD BasicAuthLevel with value 2.
|
||||
0 - Basic authentication disabled
|
||||
1 - Basic authentication enabled for SSL connections only
|
||||
2 - Basic authentication enabled for SSL and for non-SSL connections
|
||||
|
||||
https://learn.microsoft.com/en-us/office/troubleshoot/powerpoint/office-opens-blank-from-sharepoint
|
||||
|
||||
|
||||
## Server options
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ recursion.
|
||||
|
||||
Some backends do not always provide file sizes, see for example
|
||||
[Google Photos](/googlephotos/#size) and
|
||||
[Google Docs](/drive/#limitations-of-google-docs).
|
||||
[Google Drive](/drive/#limitations-of-google-docs).
|
||||
Rclone will then show a notice in the log indicating how many such
|
||||
files were encountered, and count them in as empty files in the output
|
||||
of the size command.
|
||||
|
||||
@@ -23,7 +23,7 @@ want to delete files from destination, use the
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
`--dry-run` or the `--interactive`/`-i` flag.
|
||||
|
||||
rclone sync --interactive SOURCE remote:DESTINATION
|
||||
rclone sync -i SOURCE remote:DESTINATION
|
||||
|
||||
Note that files in the destination won't be deleted if there were any
|
||||
errors at any point. Duplicate objects (files with the same name, on
|
||||
|
||||
@@ -21,7 +21,7 @@ unless `--no-create` or `--recursive` is provided.
|
||||
|
||||
If `--recursive` is used then recursively sets the modification
|
||||
time on all existing files that is found under the path. Filters are supported,
|
||||
and you can test with the `--dry-run` or the `--interactive`/`-i` flag.
|
||||
and you can test with the `--dry-run` or the `--interactive` flag.
|
||||
|
||||
If `--timestamp` is used then sets the modification time to that
|
||||
time instead of the current time. Times may be specified as one of:
|
||||
|
||||
@@ -58,7 +58,7 @@ custom salt is effectively a second password that must be memorized.
|
||||
based on XSalsa20 cipher and Poly1305 for integrity.
|
||||
[Names](#name-encryption) (file- and directory names) are also encrypted
|
||||
by default, but this has some implications and is therefore
|
||||
possible to be turned off.
|
||||
possible to turned off.
|
||||
|
||||
## Configuration
|
||||
|
||||
@@ -660,7 +660,7 @@ as `eremote:`.
|
||||
|
||||
To sync the two remotes you would do
|
||||
|
||||
rclone sync --interactive remote:crypt remote2:crypt
|
||||
rclone sync -i remote:crypt remote2:crypt
|
||||
|
||||
And to check the integrity you would do
|
||||
|
||||
|
||||
@@ -94,7 +94,7 @@ storage system in the config file then the sub path, e.g.
|
||||
|
||||
You can define as many storage paths as you like in the config file.
|
||||
|
||||
Please use the [`--interactive`/`-i`](#interactive) flag while
|
||||
Please use the [`-i` / `--interactive`](#interactive) flag while
|
||||
learning rclone to avoid accidental data loss.
|
||||
|
||||
Subcommands
|
||||
@@ -104,7 +104,7 @@ rclone uses a system of subcommands. For example
|
||||
|
||||
rclone ls remote:path # lists a remote
|
||||
rclone copy /local/path remote:path # copies /local/path to the remote
|
||||
rclone sync --interactive /local/path remote:path # syncs /local/path to the remote
|
||||
rclone sync -i /local/path remote:path # syncs /local/path to the remote
|
||||
|
||||
The main rclone commands with most used first
|
||||
|
||||
@@ -338,7 +338,7 @@ Will get their own names
|
||||
### Valid remote names
|
||||
|
||||
Remote names are case sensitive, and must adhere to the following rules:
|
||||
- May contain number, letter, `_`, `-`, `.`, `+`, `@` and space.
|
||||
- May contain number, letter, `_`, `-`, `.` and space.
|
||||
- May not start with `-` or space.
|
||||
- May not end with space.
|
||||
|
||||
@@ -396,11 +396,11 @@ file or directory like this then use the full path starting with a
|
||||
|
||||
So to sync a directory called `sync:me` to a remote called `remote:` use
|
||||
|
||||
rclone sync --interactive ./sync:me remote:path
|
||||
rclone sync -i ./sync:me remote:path
|
||||
|
||||
or
|
||||
|
||||
rclone sync --interactive /full/path/to/sync:me remote:path
|
||||
rclone sync -i /full/path/to/sync:me remote:path
|
||||
|
||||
Server Side Copy
|
||||
----------------
|
||||
@@ -433,8 +433,8 @@ same.
|
||||
|
||||
This can be used when scripting to make aged backups efficiently, e.g.
|
||||
|
||||
rclone sync --interactive remote:current-backup remote:previous-backup
|
||||
rclone sync --interactive /path/to/files remote:current-backup
|
||||
rclone sync -i remote:current-backup remote:previous-backup
|
||||
rclone sync -i /path/to/files remote:current-backup
|
||||
|
||||
## Metadata support {#metadata}
|
||||
|
||||
@@ -621,7 +621,7 @@ excluded by a filter rule.
|
||||
|
||||
For example
|
||||
|
||||
rclone sync --interactive /path/to/local remote:current --backup-dir remote:old
|
||||
rclone sync -i /path/to/local remote:current --backup-dir remote:old
|
||||
|
||||
will sync `/path/to/local` to `remote:current`, but for any files
|
||||
which would have been updated or deleted will be stored in
|
||||
@@ -789,12 +789,6 @@ interfere with checking.
|
||||
It can also be useful to ensure perfect ordering when using
|
||||
`--order-by`.
|
||||
|
||||
If both `--check-first` and `--order-by` are set when doing `rclone move`
|
||||
then rclone will use the transfer thread to delete source files which
|
||||
don't need transferring. This will enable perfect ordering of the
|
||||
transfers and deletes but will cause the transfer stats to have more
|
||||
items in than expected.
|
||||
|
||||
Using this flag can use more memory as it effectively sets
|
||||
`--max-backlog` to infinite. This means that all the info on the
|
||||
objects to transfer is held in memory before the transfers start.
|
||||
@@ -1092,7 +1086,7 @@ Add an HTTP header for all download transactions. The flag can be repeated to
|
||||
add multiple headers.
|
||||
|
||||
```
|
||||
rclone sync --interactive s3:test/src ~/dst --header-download "X-Amz-Meta-Test: Foo" --header-download "X-Amz-Meta-Test2: Bar"
|
||||
rclone sync -i s3:test/src ~/dst --header-download "X-Amz-Meta-Test: Foo" --header-download "X-Amz-Meta-Test2: Bar"
|
||||
```
|
||||
|
||||
See the GitHub issue [here](https://github.com/rclone/rclone/issues/59) for
|
||||
@@ -1104,7 +1098,7 @@ Add an HTTP header for all upload transactions. The flag can be repeated to add
|
||||
multiple headers.
|
||||
|
||||
```
|
||||
rclone sync --interactive ~/src s3:test/dst --header-upload "Content-Disposition: attachment; filename='cool.html'" --header-upload "X-Amz-Meta-Test: FooBar"
|
||||
rclone sync -i ~/src s3:test/dst --header-upload "Content-Disposition: attachment; filename='cool.html'" --header-upload "X-Amz-Meta-Test: FooBar"
|
||||
```
|
||||
|
||||
See the GitHub issue [here](https://github.com/rclone/rclone/issues/59) for
|
||||
@@ -1214,7 +1208,7 @@ This can be useful as an additional layer of protection for immutable
|
||||
or append-only data sets (notably backup archives), where modification
|
||||
implies corruption and should not be propagated.
|
||||
|
||||
### -i, --interactive {#interactive}
|
||||
### -i / --interactive {#interactive}
|
||||
|
||||
This flag can be used to tell rclone that you wish a manual
|
||||
confirmation before destructive operations.
|
||||
@@ -1225,7 +1219,7 @@ especially with `rclone sync`.
|
||||
For example
|
||||
|
||||
```
|
||||
$ rclone delete --interactive /tmp/dir
|
||||
$ rclone delete -i /tmp/dir
|
||||
rclone: delete "important-file.txt"?
|
||||
y) Yes, this is OK (default)
|
||||
n) No, skip this
|
||||
@@ -1340,14 +1334,6 @@ This tells rclone not to delete more than N files. If that limit is
|
||||
exceeded then a fatal error will be generated and rclone will stop the
|
||||
operation in progress.
|
||||
|
||||
### --max-delete-size=SIZE ###
|
||||
|
||||
Rclone will stop deleting files when the total size of deletions has
|
||||
reached the size specified. It defaults to off.
|
||||
|
||||
If that limit is exceeded then a fatal error will be generated and
|
||||
rclone will stop the operation in progress.
|
||||
|
||||
### --max-depth=N ###
|
||||
|
||||
This modifies the recursion depth for all the commands except purge.
|
||||
@@ -1386,7 +1372,7 @@ When the limit is reached all transfers will stop immediately.
|
||||
|
||||
Rclone will exit with exit code 8 if the transfer limit is reached.
|
||||
|
||||
## -M, --metadata
|
||||
## --metadata / -M
|
||||
|
||||
Setting this flag enables rclone to copy the metadata from the source
|
||||
to the destination. For local backends this is ownership, permissions,
|
||||
@@ -1805,7 +1791,7 @@ or with `--backup-dir`. See `--backup-dir` for more info.
|
||||
|
||||
For example
|
||||
|
||||
rclone copy --interactive /path/to/local/file remote:current --suffix .bak
|
||||
rclone copy -i /path/to/local/file remote:current --suffix .bak
|
||||
|
||||
will copy `/path/to/local` to `remote:current`, but for any files
|
||||
which would have been updated or deleted have .bak added.
|
||||
@@ -1814,7 +1800,7 @@ If using `rclone sync` with `--suffix` and without `--backup-dir` then
|
||||
it is recommended to put a filter rule in excluding the suffix
|
||||
otherwise the `sync` will delete the backup files.
|
||||
|
||||
rclone sync --interactive /path/to/local/file remote:current --suffix .bak --exclude "*.bak"
|
||||
rclone sync -i /path/to/local/file remote:current --suffix .bak --exclude "*.bak"
|
||||
|
||||
### --suffix-keep-extension ###
|
||||
|
||||
@@ -2113,9 +2099,9 @@ these options. For example this can be very useful with the HTTP or
|
||||
WebDAV backends. Rclone HTTP servers have their own set of
|
||||
configuration for SSL/TLS which you can find in their documentation.
|
||||
|
||||
### --ca-cert stringArray
|
||||
### --ca-cert string
|
||||
|
||||
This loads the PEM encoded certificate authority certificates and uses
|
||||
This loads the PEM encoded certificate authority certificate and uses
|
||||
it to verify the certificates of the servers rclone connects to.
|
||||
|
||||
If you have generated certificates signed with a local CA then you
|
||||
|
||||
@@ -16,8 +16,7 @@ See the [install](https://rclone.org/install/) documentation for more details.
|
||||
|:-------:|:-------:|:-----:|:-----:|:----:|:----:|:-------:|:------:|:-------:|:-----:|:-------:|
|
||||
| Intel/AMD - 64 Bit | {{< download windows amd64 >}} | {{< download osx amd64 >}} | {{< download linux amd64 >}} | {{< download linux amd64 deb >}} | {{< download linux amd64 rpm >}} | {{< download freebsd amd64 >}} | {{< download netbsd amd64 >}} | {{< download openbsd amd64 >}} | {{< download plan9 amd64 >}} | {{< download solaris amd64 >}} |
|
||||
| Intel/AMD - 32 Bit | {{< download windows 386 >}} | - | {{< download linux 386 >}} | {{< download linux 386 deb >}} | {{< download linux 386 rpm >}} | {{< download freebsd 386 >}} | {{< download netbsd 386 >}} | {{< download openbsd 386 >}} | {{< download plan9 386 >}} | - |
|
||||
| ARMv5 - 32 Bit NOHF | - | - | {{< download linux arm >}} | {{< download linux arm deb >}} | {{< download linux arm rpm >}} | {{< download freebsd arm >}} | {{< download netbsd arm >}} | - | - | - |
|
||||
| ARMv6 - 32 Bit | - | - | {{< download linux arm-v6 >}} | {{< download linux arm-v6 deb >}} | {{< download linux arm-v6 rpm >}} | {{< download freebsd arm-v6 >}} | {{< download netbsd arm-v6 >}} | - | - | - |
|
||||
| ARMv6 - 32 Bit | - | - | {{< download linux arm >}} | {{< download linux arm deb >}} | {{< download linux arm rpm >}} | {{< download freebsd arm >}} | {{< download netbsd arm >}} | - | - | - |
|
||||
| ARMv7 - 32 Bit | - | - | {{< download linux arm-v7 >}} | {{< download linux arm-v7 deb >}} | {{< download linux arm-v7 rpm >}} | {{< download freebsd arm-v7 >}} | {{< download netbsd arm-v7 >}} | - | - | - |
|
||||
| ARM - 64 Bit | {{< download windows arm64 >}} | {{< download osx arm64 >}} | {{< download linux arm64 >}} | {{< download linux arm64 deb >}} | {{< download linux arm64 rpm >}} | - | - | - | - | - |
|
||||
| MIPS - Big Endian | - | - | {{< download linux mips >}} | {{< download linux mips deb >}} | {{< download linux mips rpm >}} | - | - | - | - | - |
|
||||
@@ -25,10 +24,6 @@ See the [install](https://rclone.org/install/) documentation for more details.
|
||||
|
||||
You can also find a [mirror of the downloads on GitHub](https://github.com/rclone/rclone/releases/tag/{{< version >}}).
|
||||
|
||||
See also [Android builds](https://beta.rclone.org/{{% version %}}/testbuilds/).
|
||||
These are built as part of the official release, but haven't been
|
||||
adopted as first class builds yet.
|
||||
|
||||
## Script download and install ##
|
||||
|
||||
To install rclone on Linux/macOS/BSD systems, run:
|
||||
@@ -88,8 +83,7 @@ script) from a URL which doesn't change then you can use these links.
|
||||
|:-------:|:-------:|:-----:|:-----:|:----:|:----:|:-------:|:------:|:-------:|:-----:|:-------:|
|
||||
| Intel/AMD - 64 Bit | {{< cdownload windows amd64 >}} | {{< cdownload osx amd64 >}} | {{< cdownload linux amd64 >}} | {{< cdownload linux amd64 deb >}} | {{< cdownload linux amd64 rpm >}} | {{< cdownload freebsd amd64 >}} | {{< cdownload netbsd amd64 >}} | {{< cdownload openbsd amd64 >}} | {{< cdownload plan9 amd64 >}} | {{< cdownload solaris amd64 >}} |
|
||||
| Intel/AMD - 32 Bit | {{< cdownload windows 386 >}} | - | {{< cdownload linux 386 >}} | {{< cdownload linux 386 deb >}} | {{< cdownload linux 386 rpm >}} | {{< cdownload freebsd 386 >}} | {{< cdownload netbsd 386 >}} | {{< cdownload openbsd 386 >}} | {{< cdownload plan9 386 >}} | - |
|
||||
| ARMv5 - 32 Bit NOHF | - | - | {{< cdownload linux arm >}} | {{< cdownload linux arm deb >}} | {{< cdownload linux arm rpm >}} | {{< cdownload freebsd arm >}} | {{< cdownload netbsd arm >}} | - | - | - |
|
||||
| ARMv6 - 32 Bit | - | - | {{< cdownload linux arm-v6 >}} | {{< cdownload linux arm-v6 deb >}} | {{< cdownload linux arm-v6 rpm >}} | {{< cdownload freebsd arm-v6 >}} | {{< cdownload netbsd arm-v6 >}} | - | - | - |
|
||||
| ARMv6 - 32 Bit | - | - | {{< cdownload linux arm >}} | {{< cdownload linux arm deb >}} | {{< cdownload linux arm rpm >}} | {{< cdownload freebsd arm >}} | {{< cdownload netbsd arm >}} | - | - | - |
|
||||
| ARMv7 - 32 Bit | - | - | {{< cdownload linux arm-v7 >}} | {{< cdownload linux arm-v7 deb >}} | {{< cdownload linux arm-v7 rpm >}} | {{< cdownload freebsd arm-v7 >}} | {{< cdownload netbsd arm-v7 >}} | - | - | - |
|
||||
| ARM - 64 Bit | {{< cdownload windows arm64 >}} | {{< cdownload osx arm64 >}} | {{< cdownload linux arm64 >}} | {{< cdownload linux arm64 deb >}} | {{< cdownload linux arm64 rpm >}} | - | - | - | - | - |
|
||||
| MIPS - Big Endian | - | - | {{< cdownload linux mips >}} | {{< cdownload linux mips deb >}} | {{< cdownload linux mips rpm >}} | - | - | - | - | - |
|
||||
|
||||
@@ -988,10 +988,6 @@ as malware or spam and cannot be downloaded" with the error code
|
||||
indicate you acknowledge the risks of downloading the file and rclone
|
||||
will download it anyway.
|
||||
|
||||
Note that if you are using service account it will need Manager
|
||||
permission (not Content Manager) to for this flag to work. If the SA
|
||||
does not have the right permission, Google will just ignore the flag.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: acknowledge_abuse
|
||||
@@ -1366,9 +1362,9 @@ This takes an optional directory to trash which make this easier to
|
||||
use via the API.
|
||||
|
||||
rclone backend untrash drive:directory
|
||||
rclone backend --interactive untrash drive:directory subdir
|
||||
rclone backend -i untrash drive:directory subdir
|
||||
|
||||
Use the --interactive/-i or --dry-run flag to see what would be restored before restoring it.
|
||||
Use the -i flag to see what would be restored before restoring it.
|
||||
|
||||
Result:
|
||||
|
||||
@@ -1402,7 +1398,7 @@ component will be used as the file name.
|
||||
If the destination is a drive backend then server-side copying will be
|
||||
attempted if possible.
|
||||
|
||||
Use the --interactive/-i or --dry-run flag to see what would be copied before copying.
|
||||
Use the -i flag to see what would be copied before copying.
|
||||
|
||||
|
||||
### exportformats
|
||||
@@ -1512,15 +1508,9 @@ to the next step; if not, click on "CONFIGURE CONSENT SCREEN" button
|
||||
(near the top right corner of the right panel), then select "External"
|
||||
and click on "CREATE"; on the next screen, enter an "Application name"
|
||||
("rclone" is OK); enter "User Support Email" (your own email is OK);
|
||||
enter "Developer Contact Email" (your own email is OK); then click on
|
||||
"Save" (all other data is optional). You will also have to add some scopes,
|
||||
including `.../auth/docs` and `.../auth/drive` in order to be able to edit,
|
||||
create and delete files with RClone. You may also want to include the
|
||||
`../auth/drive.metadata.readonly` scope. After adding scopes, click
|
||||
"Save and continue" to add test users. Be sure to add your own account to
|
||||
the test users. Once you've added yourself as a test user and saved the
|
||||
changes, click again on "Credentials" on the left panel to go back to
|
||||
the "Credentials" screen.
|
||||
enter "Developer Contact Email" (your own email is OK); then click on "Save" (all other data is optional).
|
||||
Click again on "Credentials" on the left panel to go back to the
|
||||
"Credentials" screen.
|
||||
|
||||
(PS: if you are a GSuite user, you could also select "Internal" instead
|
||||
of "External" above, but this will restrict API use to Google Workspace
|
||||
@@ -1533,14 +1523,16 @@ then select "OAuth client ID".
|
||||
|
||||
8. It will show you a client ID and client secret. Make a note of these.
|
||||
|
||||
(If you selected "External" at Step 5 continue to Step 9.
|
||||
(If you selected "External" at Step 5 continue to "Publish App" in the Steps 9 and 10.
|
||||
If you chose "Internal" you don't need to publish and can skip straight to
|
||||
Step 10 but your destination drive must be part of the same Google Workspace.)
|
||||
Step 11.)
|
||||
|
||||
9. Go to "Oauth consent screen" and then click "PUBLISH APP" button and confirm.
|
||||
You will also want to add yourself as a test user.
|
||||
9. Go to "Oauth consent screen" and press "Publish App"
|
||||
|
||||
10. Provide the noted client ID and client secret to rclone.
|
||||
10. Click "OAuth consent screen", then click "PUBLISH APP" button and
|
||||
confirm, or add your account under "Test users".
|
||||
|
||||
11. Provide the noted client ID and client secret to rclone.
|
||||
|
||||
Be aware that, due to the "enhanced security" recently introduced by
|
||||
Google, you are theoretically expected to "submit your app for verification"
|
||||
@@ -1548,11 +1540,7 @@ and then wait a few weeks(!) for their response; in practice, you can go right
|
||||
ahead and use the client ID and client secret with rclone, the only issue will
|
||||
be a very scary confirmation screen shown when you connect via your browser
|
||||
for rclone to be able to get its token-id (but as this only happens during
|
||||
the remote configuration, it's not such a big deal). Keeping the application in
|
||||
"Testing" will work as well, but the limitation is that any grants will expire
|
||||
after a week, which can be annoying to refresh constantly. If, for whatever
|
||||
reason, a short grant time is not a problem, then keeping the application in
|
||||
testing mode would also be sufficient.
|
||||
the remote configuration, it's not such a big deal).
|
||||
|
||||
(Thanks to @balazer on github for these instructions.)
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ The syncs would be incremental (on a file by file basis).
|
||||
|
||||
e.g.
|
||||
|
||||
rclone sync --interactive drive:Folder s3:bucket
|
||||
rclone sync -i drive:Folder s3:bucket
|
||||
|
||||
|
||||
### Using rclone from multiple locations at the same time ###
|
||||
@@ -42,8 +42,8 @@ You can use rclone from multiple places at the same time if you choose
|
||||
different subdirectory for the output, e.g.
|
||||
|
||||
```
|
||||
Server A> rclone sync --interactive /tmp/whatever remote:ServerA
|
||||
Server B> rclone sync --interactive /tmp/whatever remote:ServerB
|
||||
Server A> rclone sync -i /tmp/whatever remote:ServerA
|
||||
Server B> rclone sync -i /tmp/whatever remote:ServerB
|
||||
```
|
||||
|
||||
If you sync to the same directory then you should use rclone copy
|
||||
|
||||
@@ -723,7 +723,7 @@ and `-v` first.
|
||||
In conjunction with `rclone sync`, `--delete-excluded` deletes any files
|
||||
on the destination which are excluded from the command.
|
||||
|
||||
E.g. the scope of `rclone sync --interactive A: B:` can be restricted:
|
||||
E.g. the scope of `rclone sync -i A: B:` can be restricted:
|
||||
|
||||
rclone --min-size 50k --delete-excluded sync A: B:
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ These flags are available for every command.
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer (default 16Mi)
|
||||
--bwlimit BwTimetable Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
|
||||
--bwlimit-file BwTimetable Bandwidth limit per file in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
|
||||
--ca-cert stringArray CA certificate used to verify servers
|
||||
--ca-cert string CA certificate used to verify servers
|
||||
--cache-dir string Directory rclone will use for caching (default "$HOME/.cache/rclone")
|
||||
--check-first Do all the checks before starting transfers
|
||||
--checkers int Number of checkers to run in parallel (default 8)
|
||||
@@ -82,7 +82,6 @@ These flags are available for every command.
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-delete-size SizeSuffix When synchronizing, limit the total size of deletes (default off)
|
||||
--max-depth int If set limits the recursion depth to this (default -1)
|
||||
--max-duration Duration Maximum duration rclone will transfer data for (default 0s)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off)
|
||||
@@ -171,7 +170,7 @@ These flags are available for every command.
|
||||
--use-json-log Use json log format
|
||||
--use-mmap Use mmap allocator (see docs)
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.62.2")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.61.1")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
```
|
||||
|
||||
@@ -388,7 +387,6 @@ and may be set in the config file.
|
||||
--gcs-decompress If set this will decompress gzip encoded objects
|
||||
--gcs-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
|
||||
--gcs-endpoint string Endpoint for the service
|
||||
--gcs-env-auth Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars)
|
||||
--gcs-location string Location for the newly created buckets
|
||||
--gcs-no-check-bucket If set, don't attempt to check the bucket exists or create it
|
||||
--gcs-object-acl string Access Control List for new objects
|
||||
@@ -477,7 +475,6 @@ and may be set in the config file.
|
||||
--mega-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash
|
||||
--mega-pass string Password (obscured)
|
||||
--mega-use-https Use HTTPS for transfers
|
||||
--mega-user string User name
|
||||
--netstorage-account string Set the NetStorage account name
|
||||
--netstorage-host string Domain+path of NetStorage host to connect to
|
||||
@@ -493,7 +490,6 @@ and may be set in the config file.
|
||||
--onedrive-drive-type string The type of the drive (personal | business | documentLibrary)
|
||||
--onedrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot)
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings
|
||||
--onedrive-hash-type string Specify the hash in use for the backend (default "auto")
|
||||
--onedrive-link-password string Set the password for links created by the link command
|
||||
--onedrive-link-scope string Set the scope of the links created by the link command (default "anonymous")
|
||||
--onedrive-link-type string Set the type of the links created by the link command (default "view")
|
||||
@@ -518,12 +514,6 @@ and may be set in the config file.
|
||||
--oos-no-check-bucket If set, don't attempt to check the bucket exists or create it
|
||||
--oos-provider string Choose your Auth Provider (default "env_auth")
|
||||
--oos-region string Object storage Region
|
||||
--oos-sse-customer-algorithm string If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm
|
||||
--oos-sse-customer-key string To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to
|
||||
--oos-sse-customer-key-file string To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated
|
||||
--oos-sse-customer-key-sha256 string If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption
|
||||
--oos-sse-kms-key-id string if using using your own master key in vault, this header specifies the
|
||||
--oos-storage-tier string The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm (default "Standard")
|
||||
--oos-upload-concurrency int Concurrency for multipart uploads (default 10)
|
||||
--oos-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
|
||||
--opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size (default 10Mi)
|
||||
@@ -592,7 +582,6 @@ and may be set in the config file.
|
||||
--s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional)
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3
|
||||
--s3-sts-endpoint string Endpoint for STS
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
|
||||
--s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint
|
||||
@@ -658,13 +647,12 @@ and may be set in the config file.
|
||||
--smb-idle-timeout Duration Max time before closing idle connections (default 1m0s)
|
||||
--smb-pass string SMB password (obscured)
|
||||
--smb-port int SMB port number (default 445)
|
||||
--smb-spn string Service principal name
|
||||
--smb-user string SMB username (default "$USER")
|
||||
--storj-access-grant string Access grant
|
||||
--storj-api-key string API key
|
||||
--storj-passphrase string Encryption passphrase
|
||||
--storj-provider string Choose an authentication method (default "existing")
|
||||
--storj-satellite-address string Satellite address (default "us1.storj.io")
|
||||
--storj-satellite-address string Satellite address (default "us-central-1.storj.io")
|
||||
--sugarsync-access-key-id string Sugarsync Access Key ID
|
||||
--sugarsync-app-id string Sugarsync App ID
|
||||
--sugarsync-authorization string Sugarsync authorization
|
||||
|
||||
@@ -99,7 +99,7 @@ List the contents of a directory
|
||||
Sync `/home/local/directory` to the remote directory, deleting any
|
||||
excess files in the directory.
|
||||
|
||||
rclone sync --interactive /home/local/directory remote:directory
|
||||
rclone sync -i /home/local/directory remote:directory
|
||||
|
||||
### Anonymous FTP
|
||||
|
||||
|
||||
@@ -172,7 +172,7 @@ List the contents of a bucket
|
||||
Sync `/home/local/directory` to the remote bucket, deleting any excess
|
||||
files in the bucket.
|
||||
|
||||
rclone sync --interactive /home/local/directory remote:bucket
|
||||
rclone sync -i /home/local/directory remote:bucket
|
||||
|
||||
### Service Account support
|
||||
|
||||
@@ -552,24 +552,6 @@ Properties:
|
||||
- "DURABLE_REDUCED_AVAILABILITY"
|
||||
- Durable reduced availability storage class
|
||||
|
||||
#### --gcs-env-auth
|
||||
|
||||
Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).
|
||||
|
||||
Only applies if service_account_file and service_account_credentials is blank.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: env_auth
|
||||
- Env Var: RCLONE_GCS_ENV_AUTH
|
||||
- Type: bool
|
||||
- Default: false
|
||||
- Examples:
|
||||
- "false"
|
||||
- Enter credentials in the next step.
|
||||
- "true"
|
||||
- Get GCP IAM credentials from the environment (env vars or IAM).
|
||||
|
||||
### Advanced options
|
||||
|
||||
Here are the Advanced options specific to google cloud storage (Google Cloud Storage (this is not Google Drive)).
|
||||
|
||||
@@ -117,11 +117,11 @@ List the contents of an album
|
||||
Sync `/home/local/images` to the Google Photos, removing any excess
|
||||
files in the album.
|
||||
|
||||
rclone sync --interactive /home/local/image remote:album/newAlbum
|
||||
rclone sync -i /home/local/image remote:album/newAlbum
|
||||
|
||||
### Layout
|
||||
|
||||
As Google Photos is not a general purpose cloud storage system, the
|
||||
As Google Photos is not a general purpose cloud storage system the
|
||||
backend is laid out to help you navigate it.
|
||||
|
||||
The directories under `media` show different ways of categorizing the
|
||||
@@ -471,4 +471,4 @@ Rclone cannot delete files anywhere except under `album`.
|
||||
|
||||
### Deleting albums
|
||||
|
||||
The Google Photos API does not support deleting albums - see [bug #135714733](https://issuetracker.google.com/issues/135714733).
|
||||
The Google Photos API does not support deleting albums - see [bug #135714733](https://issuetracker.google.com/issues/135714733).
|
||||
@@ -91,7 +91,7 @@ List the contents of a directory
|
||||
|
||||
Sync the remote `directory` to `/home/local/directory`, deleting any excess files.
|
||||
|
||||
rclone sync --interactive remote:directory /home/local/directory
|
||||
rclone sync -i remote:directory /home/local/directory
|
||||
|
||||
### Setting up your own HDFS instance for testing
|
||||
|
||||
|
||||
@@ -99,7 +99,7 @@ List the contents of a directory
|
||||
|
||||
Sync the remote `directory` to `/home/local/directory`, deleting any excess files.
|
||||
|
||||
rclone sync --interactive remote:directory /home/local/directory
|
||||
rclone sync -i remote:directory /home/local/directory
|
||||
|
||||
### Read only
|
||||
|
||||
|
||||
@@ -142,14 +142,6 @@ If you are planning to use the [rclone mount](/commands/rclone_mount/)
|
||||
feature then you will need to install the third party utility
|
||||
[WinFsp](https://winfsp.dev/) also.
|
||||
|
||||
### Windows package manager (Winget) {#windows-chocolatey}
|
||||
|
||||
[Winget](https://learn.microsoft.com/en-us/windows/package-manager/) comes pre-installed with the latest versions of Windows. If not, update the [App Installer](https://www.microsoft.com/p/app-installer/9nblggh4nns1) package from the Microsoft store.
|
||||
|
||||
```
|
||||
winget install Rclone.Rclone
|
||||
```
|
||||
|
||||
### Chocolatey package manager {#windows-chocolatey}
|
||||
|
||||
Make sure you have [Choco](https://chocolatey.org/) installed
|
||||
@@ -173,19 +165,6 @@ developers so it may be out of date. Its current version is as below.
|
||||
|
||||
[](https://repology.org/project/rclone/versions)
|
||||
|
||||
### Scoop package manager {#windows-scoop}
|
||||
|
||||
Make sure you have [Scoop](https://scoop.sh/) installed
|
||||
|
||||
```
|
||||
scoop install rclone
|
||||
```
|
||||
|
||||
Note that this is a third party installer not controlled by the rclone
|
||||
developers so it may be out of date. Its current version is as below.
|
||||
|
||||
[](https://repology.org/project/rclone/versions)
|
||||
|
||||
## Package manager installation {#package-manager}
|
||||
|
||||
Many Linux, Windows, macOS and other OS distributions package and
|
||||
|
||||
@@ -109,9 +109,6 @@ case "$OS_type" in
|
||||
armv7*)
|
||||
OS_type='arm-v7'
|
||||
;;
|
||||
armv6*)
|
||||
OS_type='arm-v6'
|
||||
;;
|
||||
arm*)
|
||||
OS_type='arm'
|
||||
;;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user