1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-29 06:33:16 +00:00

Compare commits

..

3 Commits

Author SHA1 Message Date
Nick Craig-Wood
ff8ce1befa serve nfs: try potential fix for nfs locking #7973
See: https://github.com/willscott/go-nfs/issues/54
2024-08-28 08:01:46 +01:00
Nick Craig-Wood
77415bc461 serve nfs: unify the nfs library logging with rclone's logging better
Before this we ignored the logging levels and logged everything as
debug. This will obey the rclone logging flags and log at the correct
level.
2024-08-28 08:01:46 +01:00
Nick Craig-Wood
7d1e57ff1c serve nfs: fix incorrect user id and group id exported to NFS #7973
Before this change all exports were exported as root and the --uid and
--gid flags of the VFS were ignored.

This fixes the issue by exporting the UID and GID correctly which
default to the current user and group unless set explicitly.
2024-08-28 07:50:58 +01:00
233 changed files with 6040 additions and 16891 deletions

View File

@@ -32,27 +32,15 @@ jobs:
- name: Get actual major version
id: actual_major_version
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_HUB_USER }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
- name: Build and publish image
uses: docker/build-push-action@v6
uses: ilteoood/docker_buildx@1.1.0
with:
file: Dockerfile
context: .
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
push: true
tags: |
rclone/rclone:latest
rclone/rclone:${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }}
rclone/rclone:${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }}
rclone/rclone:${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
build_docker_volume_plugin:
if: github.repository == 'rclone/rclone'

View File

@@ -100,45 +100,10 @@ linters-settings:
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
gocritic:
# Enable all default checks with some exceptions and some additions (commented).
# Cannot use both enabled-checks and disabled-checks, so must specify all to be used.
disable-all: true
enabled-checks:
#- appendAssign # Enabled by default
- argOrder
- assignOp
- badCall
- badCond
#- captLocal # Enabled by default
- caseOrder
- codegenComment
#- commentFormatting # Enabled by default
- defaultCaseOrder
- deprecatedComment
- dupArg
- dupBranchBody
- dupCase
- dupSubExpr
- elseif
#- exitAfterDefer # Enabled by default
- flagDeref
- flagName
#- ifElseChain # Enabled by default
- mapKey
- newDeref
- offBy1
- regexpMust
- ruleguard # Not enabled by default
#- singleCaseSwitch # Enabled by default
- sloppyLen
- sloppyTypeAssert
- switchTrue
- typeSwitchVar
- underef
- unlambda
- unslice
- valSwap
- wrapperFunc
settings:
ruleguard:
rules: "${configDir}/bin/rules.go"
disabled-checks:
- appendAssign
- captLocal
- commentFormatting
- exitAfterDefer
- ifElseChain
- singleCaseSwitch

4123
MANUAL.html generated

File diff suppressed because it is too large Load Diff

4071
MANUAL.md generated

File diff suppressed because it is too large Load Diff

3978
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -168,8 +168,6 @@ docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/a
To make a full build then set the tags correctly and add `--push`
Note that you can't only build one architecture - you need to build them all.
```
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
```

View File

@@ -1 +1 @@
v1.68.1
v1.68.0

View File

@@ -10,6 +10,7 @@ import (
goflag "flag"
"fmt"
"io"
"log"
"math/rand"
"os"
"path"
@@ -92,7 +93,7 @@ func TestMain(m *testing.M) {
goflag.Parse()
var rc int
fs.Logf(nil, "Running with the following params: \n remote: %v", remoteName)
log.Printf("Running with the following params: \n remote: %v", remoteName)
runInstance = newRun()
rc = m.Run()
os.Exit(rc)
@@ -407,7 +408,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
// update in the wrapped fs
originalSize, err := runInstance.size(t, rootFs, "data.bin")
require.NoError(t, err)
fs.Logf(nil, "original size: %v", originalSize)
log.Printf("original size: %v", originalSize)
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err)
@@ -424,7 +425,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
require.NoError(t, err)
require.Equal(t, int64(len(data2)), o.Size())
fs.Logf(nil, "updated size: %v", len(data2))
log.Printf("updated size: %v", len(data2))
// get a new instance from the cache
if runInstance.wrappedIsExternal {
@@ -484,49 +485,49 @@ func TestInternalMoveWithNotify(t *testing.T) {
err = runInstance.retryBlock(func() error {
li, err := runInstance.list(t, rootFs, "test")
if err != nil {
fs.Logf(nil, "err: %v", err)
log.Printf("err: %v", err)
return err
}
if len(li) != 2 {
fs.Logf(nil, "not expected listing /test: %v", li)
log.Printf("not expected listing /test: %v", li)
return fmt.Errorf("not expected listing /test: %v", li)
}
li, err = runInstance.list(t, rootFs, "test/one")
if err != nil {
fs.Logf(nil, "err: %v", err)
log.Printf("err: %v", err)
return err
}
if len(li) != 0 {
fs.Logf(nil, "not expected listing /test/one: %v", li)
log.Printf("not expected listing /test/one: %v", li)
return fmt.Errorf("not expected listing /test/one: %v", li)
}
li, err = runInstance.list(t, rootFs, "test/second")
if err != nil {
fs.Logf(nil, "err: %v", err)
log.Printf("err: %v", err)
return err
}
if len(li) != 1 {
fs.Logf(nil, "not expected listing /test/second: %v", li)
log.Printf("not expected listing /test/second: %v", li)
return fmt.Errorf("not expected listing /test/second: %v", li)
}
if fi, ok := li[0].(os.FileInfo); ok {
if fi.Name() != "data.bin" {
fs.Logf(nil, "not expected name: %v", fi.Name())
log.Printf("not expected name: %v", fi.Name())
return fmt.Errorf("not expected name: %v", fi.Name())
}
} else if di, ok := li[0].(fs.DirEntry); ok {
if di.Remote() != "test/second/data.bin" {
fs.Logf(nil, "not expected remote: %v", di.Remote())
log.Printf("not expected remote: %v", di.Remote())
return fmt.Errorf("not expected remote: %v", di.Remote())
}
} else {
fs.Logf(nil, "unexpected listing: %v", li)
log.Printf("unexpected listing: %v", li)
return fmt.Errorf("unexpected listing: %v", li)
}
fs.Logf(nil, "complete listing: %v", li)
log.Printf("complete listing: %v", li)
return nil
}, 12, time.Second*10)
require.NoError(t, err)
@@ -576,43 +577,43 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
err = runInstance.retryBlock(func() error {
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
if !found {
fs.Logf(nil, "not found /test")
log.Printf("not found /test")
return fmt.Errorf("not found /test")
}
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
if !found {
fs.Logf(nil, "not found /test/one")
log.Printf("not found /test/one")
return fmt.Errorf("not found /test/one")
}
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
if !found {
fs.Logf(nil, "not found /test/one/test2")
log.Printf("not found /test/one/test2")
return fmt.Errorf("not found /test/one/test2")
}
li, err := runInstance.list(t, rootFs, "test/one")
if err != nil {
fs.Logf(nil, "err: %v", err)
log.Printf("err: %v", err)
return err
}
if len(li) != 1 {
fs.Logf(nil, "not expected listing /test/one: %v", li)
log.Printf("not expected listing /test/one: %v", li)
return fmt.Errorf("not expected listing /test/one: %v", li)
}
if fi, ok := li[0].(os.FileInfo); ok {
if fi.Name() != "test2" {
fs.Logf(nil, "not expected name: %v", fi.Name())
log.Printf("not expected name: %v", fi.Name())
return fmt.Errorf("not expected name: %v", fi.Name())
}
} else if di, ok := li[0].(fs.DirEntry); ok {
if di.Remote() != "test/one/test2" {
fs.Logf(nil, "not expected remote: %v", di.Remote())
log.Printf("not expected remote: %v", di.Remote())
return fmt.Errorf("not expected remote: %v", di.Remote())
}
} else {
fs.Logf(nil, "unexpected listing: %v", li)
log.Printf("unexpected listing: %v", li)
return fmt.Errorf("unexpected listing: %v", li)
}
fs.Logf(nil, "complete listing /test/one/test2")
log.Printf("complete listing /test/one/test2")
return nil
}, 12, time.Second*10)
require.NoError(t, err)
@@ -770,24 +771,24 @@ func TestInternalBug2117(t *testing.T) {
di, err := runInstance.list(t, rootFs, "test/dir1/dir2")
require.NoError(t, err)
fs.Logf(nil, "len: %v", len(di))
log.Printf("len: %v", len(di))
require.Len(t, di, 1)
time.Sleep(time.Second * 30)
di, err = runInstance.list(t, rootFs, "test/dir1/dir2")
require.NoError(t, err)
fs.Logf(nil, "len: %v", len(di))
log.Printf("len: %v", len(di))
require.Len(t, di, 1)
di, err = runInstance.list(t, rootFs, "test/dir1")
require.NoError(t, err)
fs.Logf(nil, "len: %v", len(di))
log.Printf("len: %v", len(di))
require.Len(t, di, 4)
di, err = runInstance.list(t, rootFs, "test")
require.NoError(t, err)
fs.Logf(nil, "len: %v", len(di))
log.Printf("len: %v", len(di))
require.Len(t, di, 4)
}
@@ -828,7 +829,7 @@ func newRun() *run {
} else {
r.tmpUploadDir = uploadDir
}
fs.Logf(nil, "Temp Upload Dir: %v", r.tmpUploadDir)
log.Printf("Temp Upload Dir: %v", r.tmpUploadDir)
return r
}

View File

@@ -61,7 +61,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
return false, err // No such user
case 186:
return false, err // IP blocked?
case 374, 412: // Flood detected seems to be #412 now
case 374:
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
time.Sleep(30 * time.Second)
default:

View File

@@ -441,28 +441,23 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
srcFs := srcObj.fs
// Find current directory ID
srcLeaf, srcDirectoryID, err := srcFs.dirCache.FindPath(ctx, srcObj.remote, false)
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
if err != nil {
return nil, err
}
// Create temporary object
dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote)
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
if err != nil {
return nil, err
}
// If it is in the correct directory, just rename it
var url string
if srcDirectoryID == dstDirectoryID {
// No rename needed
if srcLeaf == dstLeaf {
return src, nil
}
resp, err := f.renameFile(ctx, srcObj.file.URL, dstLeaf)
if currentDirectoryID == directoryID {
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
if err != nil {
return nil, fmt.Errorf("couldn't rename file: %w", err)
}
@@ -471,16 +466,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
url = resp.URLs[0].URL
} else {
dstFolderID, err := strconv.Atoi(dstDirectoryID)
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
rename := dstLeaf
// No rename needed
if srcLeaf == dstLeaf {
rename = ""
}
resp, err := f.moveFile(ctx, srcObj.file.URL, dstFolderID, rename)
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, fmt.Errorf("couldn't move file: %w", err)
}

View File

@@ -1105,12 +1105,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, fs.ErrorCantMove
}
// Find existing object
srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
if err != nil {
return nil, err
}
// Create temporary object
dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
if err != nil {
@@ -1118,7 +1112,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// Do the move
info, err := f.moveTo(ctx, srcObj.id, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID)
info, err := f.moveTo(ctx, srcObj.id, path.Base(srcObj.remote), dstLeaf, srcObj.dirID, dstDirectoryID)
if err != nil {
return nil, err
}
@@ -1469,13 +1463,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if o.id == "" {
return nil, errors.New("can't download - no id")
}
if o.url == "" {
// On upload an Object is returned with no url, so fetch it here if needed
err = o.readMetaData(ctx)
if err != nil {
return nil, fmt.Errorf("read metadata: %w", err)
}
}
fs.FixRangeOption(options, o.size)
var resp *http.Response
opts := rest.Opts{

View File

@@ -1555,7 +1555,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
info, err := f.copyOrMove(ctx, "mv", srcObj.filePath(), remote)
if err == nil && meta != nil {
if err != nil && meta != nil {
createTime, createTimeMeta := srcObj.parseFsMetadataTime(meta, "btime")
if !createTimeMeta {
createTime = srcObj.createTime

View File

@@ -942,8 +942,7 @@ func errorHandler(resp *http.Response) error {
// Decode error response
errResponse := new(api.Error)
err := rest.DecodeJSON(resp, &errResponse)
// Redirects have no body so don't report an error
if err != nil && resp.Header.Get("Location") == "" {
if err != nil {
fs.Debugf(nil, "Couldn't decode error response: %v", err)
}
if errResponse.ErrorInfo.Code == "" {

View File

@@ -513,72 +513,6 @@ type RequestDecompress struct {
DefaultParent bool `json:"default_parent,omitempty"`
}
// ------------------------------------------------------------ authorization
// CaptchaToken is a response to requestCaptchaToken api call
type CaptchaToken struct {
CaptchaToken string `json:"captcha_token"`
ExpiresIn int64 `json:"expires_in"` // currently 300s
// API doesn't provide Expiry field and thus it should be populated from ExpiresIn on retrieval
Expiry time.Time `json:"expiry,omitempty"`
URL string `json:"url,omitempty"` // a link for users to solve captcha
}
// expired reports whether the token is expired.
// t must be non-nil.
func (t *CaptchaToken) expired() bool {
if t.Expiry.IsZero() {
return false
}
expiryDelta := time.Duration(10) * time.Second // same as oauth2's defaultExpiryDelta
return t.Expiry.Round(0).Add(-expiryDelta).Before(time.Now())
}
// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
func (t *CaptchaToken) Valid() bool {
return t != nil && t.CaptchaToken != "" && !t.expired()
}
// CaptchaTokenRequest is to request for captcha token
type CaptchaTokenRequest struct {
Action string `json:"action,omitempty"`
CaptchaToken string `json:"captcha_token,omitempty"`
ClientID string `json:"client_id,omitempty"`
DeviceID string `json:"device_id,omitempty"`
Meta *CaptchaTokenMeta `json:"meta,omitempty"`
}
// CaptchaTokenMeta contains meta info for CaptchaTokenRequest
type CaptchaTokenMeta struct {
CaptchaSign string `json:"captcha_sign,omitempty"`
ClientVersion string `json:"client_version,omitempty"`
PackageName string `json:"package_name,omitempty"`
Timestamp string `json:"timestamp,omitempty"`
UserID string `json:"user_id,omitempty"` // webdrive uses this instead of UserName
UserName string `json:"username,omitempty"`
Email string `json:"email,omitempty"`
PhoneNumber string `json:"phone_number,omitempty"`
}
// Token represents oauth2 token used for pikpak which needs to be converted to be compatible with oauth2.Token
type Token struct {
TokenType string `json:"token_type"`
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int `json:"expires_in"`
Sub string `json:"sub"`
}
// Expiry returns expiry from expires in, so it should be called on retrieval
// e must be non-nil.
func (e *Token) Expiry() (t time.Time) {
if v := e.ExpiresIn; v != 0 {
return time.Now().Add(time.Duration(v) * time.Second)
}
return
}
// ------------------------------------------------------------
// NOT implemented YET

View File

@@ -3,10 +3,8 @@ package pikpak
import (
"bytes"
"context"
"crypto/md5"
"crypto/sha1"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
@@ -16,13 +14,10 @@ import (
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/rclone/rclone/backend/pikpak/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/rest"
)
@@ -267,20 +262,15 @@ func (f *Fs) getGcid(ctx context.Context, src fs.ObjectInfo) (gcid string, err e
if err != nil {
return
}
if src.Size() == 0 {
// If src is zero-length, the API will return
// Error "cid and file_size is required" (400)
// In this case, we can simply return cid == gcid
return cid, nil
}
params := url.Values{}
params.Set("cid", cid)
params.Set("file_size", strconv.FormatInt(src.Size(), 10))
opts := rest.Opts{
Method: "GET",
Path: "/drive/v1/resource/cid",
Parameters: params,
Method: "GET",
Path: "/drive/v1/resource/cid",
Parameters: params,
ExtraHeaders: map[string]string{"x-device-id": f.deviceID},
}
info := struct {
@@ -418,8 +408,6 @@ func calcCid(ctx context.Context, src fs.ObjectInfo) (cid string, err error) {
return
}
// ------------------------------------------------------------ authorization
// randomly generates device id used for request header 'x-device-id'
//
// original javascript implementation
@@ -440,206 +428,3 @@ func genDeviceID() string {
}
return string(base)
}
var md5Salt = []string{
"C9qPpZLN8ucRTaTiUMWYS9cQvWOE",
"+r6CQVxjzJV6LCV",
"F",
"pFJRC",
"9WXYIDGrwTCz2OiVlgZa90qpECPD6olt",
"/750aCr4lm/Sly/c",
"RB+DT/gZCrbV",
"",
"CyLsf7hdkIRxRm215hl",
"7xHvLi2tOYP0Y92b",
"ZGTXXxu8E/MIWaEDB+Sm/",
"1UI3",
"E7fP5Pfijd+7K+t6Tg/NhuLq0eEUVChpJSkrKxpO",
"ihtqpG6FMt65+Xk+tWUH2",
"NhXXU9rg4XXdzo7u5o",
}
func md5Sum(text string) string {
hash := md5.Sum([]byte(text))
return hex.EncodeToString(hash[:])
}
func calcCaptchaSign(deviceID string) (timestamp, sign string) {
timestamp = fmt.Sprint(time.Now().UnixMilli())
str := fmt.Sprint(clientID, clientVersion, packageName, deviceID, timestamp)
for _, salt := range md5Salt {
str = md5Sum(str + salt)
}
sign = "1." + str
return
}
func newCaptchaTokenRequest(action, oldToken string, opt *Options) (req *api.CaptchaTokenRequest) {
req = &api.CaptchaTokenRequest{
Action: action,
CaptchaToken: oldToken, // can be empty initially
ClientID: clientID,
DeviceID: opt.DeviceID,
Meta: new(api.CaptchaTokenMeta),
}
switch action {
case "POST:/v1/auth/signin":
req.Meta.UserName = opt.Username
default:
timestamp, captchaSign := calcCaptchaSign(opt.DeviceID)
req.Meta.CaptchaSign = captchaSign
req.Meta.Timestamp = timestamp
req.Meta.ClientVersion = clientVersion
req.Meta.PackageName = packageName
req.Meta.UserID = opt.UserID
}
return
}
// CaptchaTokenSource stores updated captcha tokens in the config file
type CaptchaTokenSource struct {
mu sync.Mutex
m configmap.Mapper
opt *Options
token *api.CaptchaToken
ctx context.Context
rst *pikpakClient
}
// initialize CaptchaTokenSource from rclone.conf if possible
func newCaptchaTokenSource(ctx context.Context, opt *Options, m configmap.Mapper) *CaptchaTokenSource {
token := new(api.CaptchaToken)
tokenString, ok := m.Get("captcha_token")
if !ok || tokenString == "" {
fs.Debugf(nil, "failed to read captcha token out of config file")
} else {
if err := json.Unmarshal([]byte(tokenString), token); err != nil {
fs.Debugf(nil, "failed to parse captcha token out of config file: %v", err)
}
}
return &CaptchaTokenSource{
m: m,
opt: opt,
token: token,
ctx: ctx,
rst: newPikpakClient(getClient(ctx, opt), opt),
}
}
// requestToken retrieves captcha token from API
func (cts *CaptchaTokenSource) requestToken(ctx context.Context, req *api.CaptchaTokenRequest) (err error) {
opts := rest.Opts{
Method: "POST",
RootURL: "https://user.mypikpak.com/v1/shield/captcha/init",
}
var info *api.CaptchaToken
_, err = cts.rst.CallJSON(ctx, &opts, &req, &info)
if err == nil && info.ExpiresIn != 0 {
// populate to Expiry
info.Expiry = time.Now().Add(time.Duration(info.ExpiresIn) * time.Second)
cts.token = info // update with a new one
}
return
}
func (cts *CaptchaTokenSource) refreshToken(opts *rest.Opts) (string, error) {
oldToken := ""
if cts.token != nil {
oldToken = cts.token.CaptchaToken
}
action := "GET:/drive/v1/about"
if opts.RootURL == "" && opts.Path != "" {
action = fmt.Sprintf("%s:%s", opts.Method, opts.Path)
} else if u, err := url.Parse(opts.RootURL); err == nil {
action = fmt.Sprintf("%s:%s", opts.Method, u.Path)
}
req := newCaptchaTokenRequest(action, oldToken, cts.opt)
if err := cts.requestToken(cts.ctx, req); err != nil {
return "", fmt.Errorf("failed to retrieve captcha token from api: %w", err)
}
// put it into rclone.conf
tokenBytes, err := json.Marshal(cts.token)
if err != nil {
return "", fmt.Errorf("failed to marshal captcha token: %w", err)
}
cts.m.Set("captcha_token", string(tokenBytes))
return cts.token.CaptchaToken, nil
}
// Invalidate resets existing captcha token for a forced refresh
func (cts *CaptchaTokenSource) Invalidate() {
cts.mu.Lock()
cts.token.CaptchaToken = ""
cts.mu.Unlock()
}
// Token returns a valid captcha token
func (cts *CaptchaTokenSource) Token(opts *rest.Opts) (string, error) {
cts.mu.Lock()
defer cts.mu.Unlock()
if cts.token.Valid() {
return cts.token.CaptchaToken, nil
}
return cts.refreshToken(opts)
}
// pikpakClient wraps rest.Client with a handle of captcha token
type pikpakClient struct {
opt *Options
client *rest.Client
captcha *CaptchaTokenSource
}
// newPikpakClient takes an (oauth) http.Client and makes a new api instance for pikpak with
// * error handler
// * root url
// * default headers
func newPikpakClient(c *http.Client, opt *Options) *pikpakClient {
client := rest.NewClient(c).SetErrorHandler(errorHandler).SetRoot(rootURL)
for key, val := range map[string]string{
"Referer": "https://mypikpak.com/",
"x-client-id": clientID,
"x-client-version": clientVersion,
"x-device-id": opt.DeviceID,
// "x-device-model": "firefox%2F129.0",
// "x-device-name": "PC-Firefox",
// "x-device-sign": fmt.Sprintf("wdi10.%sxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", opt.DeviceID),
// "x-net-work-type": "NONE",
// "x-os-version": "Win32",
// "x-platform-version": "1",
// "x-protocol-version": "301",
// "x-provider-name": "NONE",
// "x-sdk-version": "8.0.3",
} {
client.SetHeader(key, val)
}
return &pikpakClient{
client: client,
opt: opt,
}
}
// This should be called right after pikpakClient initialized
func (c *pikpakClient) SetCaptchaTokener(ctx context.Context, m configmap.Mapper) *pikpakClient {
c.captcha = newCaptchaTokenSource(ctx, c.opt, m)
return c
}
func (c *pikpakClient) CallJSON(ctx context.Context, opts *rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
if c.captcha != nil {
token, err := c.captcha.Token(opts)
if err != nil || token == "" {
return nil, fserrors.FatalError(fmt.Errorf("couldn't get captcha token: %v", err))
}
if opts.ExtraHeaders == nil {
opts.ExtraHeaders = make(map[string]string)
}
opts.ExtraHeaders["x-captcha-token"] = token
}
return c.client.CallJSON(ctx, opts, request, response)
}
func (c *pikpakClient) Call(ctx context.Context, opts *rest.Opts) (resp *http.Response, err error) {
return c.client.Call(ctx, opts)
}

View File

@@ -23,7 +23,6 @@ package pikpak
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
@@ -52,7 +51,6 @@ import (
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/dircache"
@@ -66,17 +64,15 @@ import (
// Constants
const (
clientID = "YUMx5nI8ZU8Ap8pm"
clientVersion = "2.0.0"
packageName = "mypikpak.com"
defaultUserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0"
minSleep = 100 * time.Millisecond
maxSleep = 2 * time.Second
taskWaitTime = 500 * time.Millisecond
decayConstant = 2 // bigger for slower decay, exponential
rootURL = "https://api-drive.mypikpak.com"
minChunkSize = fs.SizeSuffix(manager.MinUploadPartSize)
defaultUploadConcurrency = manager.DefaultUploadConcurrency
rcloneClientID = "YNxT9w7GMdWvEOKa"
rcloneEncryptedClientSecret = "aqrmB6M1YJ1DWCBxVxFSjFo7wzWEky494YMmkqgAl1do1WKOe2E"
minSleep = 100 * time.Millisecond
maxSleep = 2 * time.Second
taskWaitTime = 500 * time.Millisecond
decayConstant = 2 // bigger for slower decay, exponential
rootURL = "https://api-drive.mypikpak.com"
minChunkSize = fs.SizeSuffix(manager.MinUploadPartSize)
defaultUploadConcurrency = manager.DefaultUploadConcurrency
)
// Globals
@@ -89,53 +85,43 @@ var (
TokenURL: "https://user.mypikpak.com/v1/auth/token",
AuthStyle: oauth2.AuthStyleInParams,
},
ClientID: clientID,
RedirectURL: oauthutil.RedirectURL,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
}
)
// Returns OAuthOptions modified for pikpak
func pikpakOAuthOptions() []fs.Option {
opts := []fs.Option{}
for _, opt := range oauthutil.SharedOptions {
if opt.Name == config.ConfigClientID {
opt.Advanced = true
} else if opt.Name == config.ConfigClientSecret {
opt.Advanced = true
}
opts = append(opts, opt)
}
return opts
}
// pikpakAutorize retrieves OAuth token using user/pass and save it to rclone.conf
func pikpakAuthorize(ctx context.Context, opt *Options, name string, m configmap.Mapper) error {
if opt.Username == "" {
return errors.New("no username")
// override default client id/secret
if id, ok := m.Get("client_id"); ok && id != "" {
oauthConfig.ClientID = id
}
if secret, ok := m.Get("client_secret"); ok && secret != "" {
oauthConfig.ClientSecret = secret
}
pass, err := obscure.Reveal(opt.Password)
if err != nil {
return fmt.Errorf("failed to decode password - did you obscure it?: %w", err)
}
// new device id if necessary
if len(opt.DeviceID) != 32 {
opt.DeviceID = genDeviceID()
m.Set("device_id", opt.DeviceID)
fs.Infof(nil, "Using new device id %q", opt.DeviceID)
}
opts := rest.Opts{
Method: "POST",
RootURL: "https://user.mypikpak.com/v1/auth/signin",
}
req := map[string]string{
"username": opt.Username,
"password": pass,
"client_id": clientID,
}
var token api.Token
rst := newPikpakClient(getClient(ctx, opt), opt).SetCaptchaTokener(ctx, m)
_, err = rst.CallJSON(ctx, &opts, req, &token)
if apiErr, ok := err.(*api.Error); ok {
if apiErr.Reason == "captcha_invalid" && apiErr.Code == 4002 {
rst.captcha.Invalidate()
_, err = rst.CallJSON(ctx, &opts, req, &token)
}
}
t, err := oauthConfig.PasswordCredentialsToken(ctx, opt.Username, pass)
if err != nil {
return fmt.Errorf("failed to retrieve token using username/password: %w", err)
}
t := &oauth2.Token{
AccessToken: token.AccessToken,
TokenType: token.TokenType,
RefreshToken: token.RefreshToken,
Expiry: token.Expiry(),
}
return oauthutil.PutToken(name, m, t, false)
}
@@ -174,7 +160,7 @@ func init() {
}
return nil, fmt.Errorf("unknown state %q", config.State)
},
Options: []fs.Option{{
Options: append(pikpakOAuthOptions(), []fs.Option{{
Name: "user",
Help: "Pikpak username.",
Required: true,
@@ -184,18 +170,6 @@ func init() {
Help: "Pikpak password.",
Required: true,
IsPassword: true,
}, {
Name: "device_id",
Help: "Device ID used for authorization.",
Advanced: true,
Sensitive: true,
}, {
Name: "user_agent",
Default: defaultUserAgent,
Advanced: true,
Help: fmt.Sprintf(`HTTP user agent for pikpak.
Defaults to "%s" or "--pikpak-user-agent" provided on command line.`, defaultUserAgent),
}, {
Name: "root_folder_id",
Help: `ID of the root folder.
@@ -274,7 +248,7 @@ this may help to speed up the transfers.`,
encoder.EncodeRightSpace |
encoder.EncodeRightPeriod |
encoder.EncodeInvalidUtf8),
}},
}}...),
})
}
@@ -282,9 +256,6 @@ this may help to speed up the transfers.`,
type Options struct {
Username string `config:"user"`
Password string `config:"pass"`
UserID string `config:"user_id"` // only available during runtime
DeviceID string `config:"device_id"`
UserAgent string `config:"user_agent"`
RootFolderID string `config:"root_folder_id"`
UseTrash bool `config:"use_trash"`
TrashedOnly bool `config:"trashed_only"`
@@ -300,10 +271,11 @@ type Fs struct {
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
rst *pikpakClient // the connection to the server
rst *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
rootFolderID string // the id of the root folder
deviceID string // device id used for api requests
client *http.Client // authorized client
m configmap.Mapper
tokenMu *sync.Mutex // when renewing tokens
@@ -457,12 +429,6 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
} else if apiErr.Reason == "file_space_not_enough" {
// "file_space_not_enough" (8): Storage space is not enough
return false, fserrors.FatalError(err)
} else if apiErr.Reason == "captcha_invalid" && apiErr.Code == 9 {
// "captcha_invalid" (9): Verification code is invalid
// This error occurred on the POST:/drive/v1/files endpoint
// when a zero-byte file was uploaded with an invalid captcha token
f.rst.captcha.Invalidate()
return true, err
}
}
@@ -486,36 +452,13 @@ func errorHandler(resp *http.Response) error {
return errResponse
}
// getClient makes an http client according to the options
func getClient(ctx context.Context, opt *Options) *http.Client {
// Override few config settings and create a client
newCtx, ci := fs.AddConfig(ctx)
ci.UserAgent = opt.UserAgent
return fshttp.NewClient(newCtx)
}
// newClientWithPacer sets a new http/rest client with a pacer to Fs
func (f *Fs) newClientWithPacer(ctx context.Context) (err error) {
var ts *oauthutil.TokenSource
f.client, ts, err = oauthutil.NewClientWithBaseClient(ctx, f.name, f.m, oauthConfig, getClient(ctx, &f.opt))
f.client, _, err = oauthutil.NewClient(ctx, f.name, f.m, oauthConfig)
if err != nil {
return fmt.Errorf("failed to create oauth client: %w", err)
}
token, err := ts.Token()
if err != nil {
return err
}
// parse user_id from oauth access token for later use
if parts := strings.Split(token.AccessToken, "."); len(parts) > 1 {
jsonStr, _ := base64.URLEncoding.DecodeString(parts[1] + "===")
info := struct {
UserID string `json:"sub,omitempty"`
}{}
if jsonErr := json.Unmarshal(jsonStr, &info); jsonErr == nil {
f.opt.UserID = info.UserID
}
}
f.rst = newPikpakClient(f.client, &f.opt).SetCaptchaTokener(ctx, f.m)
f.rst = rest.NewClient(f.client).SetRoot(rootURL).SetErrorHandler(errorHandler)
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
return nil
}
@@ -548,19 +491,10 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
CanHaveEmptyDirectories: true, // can have empty directories
NoMultiThreading: true, // can't have multiple threads downloading
}).Fill(ctx, f)
// new device id if necessary
if len(f.opt.DeviceID) != 32 {
f.opt.DeviceID = genDeviceID()
m.Set("device_id", f.opt.DeviceID)
fs.Infof(nil, "Using new device id %q", f.opt.DeviceID)
}
f.deviceID = genDeviceID()
if err := f.newClientWithPacer(ctx); err != nil {
// re-authorize if necessary
if strings.Contains(err.Error(), "invalid_grant") {
return f, f.reAuthorize(ctx)
}
return nil, err
}
return f, nil

View File

@@ -3052,16 +3052,9 @@ func (s3logger) Logf(classification logging.Classification, format string, v ...
func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Client *s3.Client, err error) {
ci := fs.GetConfig(ctx)
var awsConfig aws.Config
// Make the default static auth
v := aws.Credentials{
AccessKeyID: opt.AccessKeyID,
SecretAccessKey: opt.SecretAccessKey,
SessionToken: opt.SessionToken,
}
awsConfig.Credentials = &credentials.StaticCredentialsProvider{Value: v}
// Try to fill in the config from the environment if env_auth=true
if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
if opt.EnvAuth {
configOpts := []func(*awsconfig.LoadOptions) error{}
// Set the name of the profile if supplied
if opt.Profile != "" {
@@ -3086,7 +3079,13 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
case opt.SecretAccessKey == "":
return nil, errors.New("secret_access_key not found")
default:
// static credentials are already set
// Make the static auth
v := aws.Credentials{
AccessKeyID: opt.AccessKeyID,
SecretAccessKey: opt.SecretAccessKey,
SessionToken: opt.SessionToken,
}
awsConfig.Credentials = &credentials.StaticCredentialsProvider{Value: v}
}
}
@@ -5989,13 +5988,7 @@ func (w *s3ChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
if do, ok := reader.(pool.DelayAccountinger); ok {
// To figure out this number, do a transfer and if the accounted size is 0 or a
// multiple of what it should be, increase or decrease this number.
//
// For transfers over https the SDK does not sign the body whereas over http it does
if len(w.f.opt.Endpoint) >= 5 && strings.EqualFold(w.f.opt.Endpoint[:5], "http:") {
do.DelayAccounting(3)
} else {
do.DelayAccounting(2)
}
do.DelayAccounting(3)
}
// create checksum of buffer for integrity checking

View File

@@ -883,7 +883,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// About gets quota information
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
var used, objects, total int64
var total, objects int64
if f.rootContainer != "" {
var container swift.Container
err = f.pacer.Call(func() (bool, error) {
@@ -893,9 +893,8 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
if err != nil {
return nil, fmt.Errorf("container info failed: %w", err)
}
used = container.Bytes
total = container.Bytes
objects = container.Count
total = container.QuotaBytes
} else {
var containers []swift.Container
err = f.pacer.Call(func() (bool, error) {
@@ -906,19 +905,14 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return nil, fmt.Errorf("container listing failed: %w", err)
}
for _, c := range containers {
used += c.Bytes
total += c.Bytes
objects += c.Count
total += c.QuotaBytes
}
}
usage = &fs.Usage{
Used: fs.NewUsageValue(used), // bytes in use
Used: fs.NewUsageValue(total), // bytes in use
Objects: fs.NewUsageValue(objects), // objects in use
}
if total > 0 {
usage.Total = fs.NewUsageValue(total)
usage.Free = fs.NewUsageValue(total - used)
}
return usage, nil
}

View File

@@ -7,6 +7,7 @@ import (
"errors"
"fmt"
"io"
"log"
"net/http"
"net/url"
"path"
@@ -25,7 +26,6 @@ import (
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
@@ -39,8 +39,6 @@ const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second // may needs to be increased, testing needed
decayConstant = 2 // bigger for slower decay, exponential
userAgentTemplae = `Yandex.Disk {"os":"windows","dtype":"ydisk3","vsn":"3.2.37.4977","id":"6BD01244C7A94456BBCEE7EEC990AEAD","id2":"0F370CD40C594A4783BC839C846B999C","session_id":"%s"}`
)
// Globals
@@ -81,22 +79,15 @@ func init() {
// it doesn't seem worth making an exception for this
Default: (encoder.Display |
encoder.EncodeInvalidUtf8),
}, {
Name: "spoof_ua",
Help: "Set the user agent to match an official version of the yandex disk client. May help with upload performance.",
Default: true,
Advanced: true,
Hide: fs.OptionHideConfigurator,
}}...),
})
}
// Options defines the configuration for this backend
type Options struct {
Token string `config:"token"`
HardDelete bool `config:"hard_delete"`
Enc encoder.MultiEncoder `config:"encoding"`
SpoofUserAgent bool `config:"spoof_ua"`
Token string `config:"token"`
HardDelete bool `config:"hard_delete"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote yandex
@@ -263,12 +254,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, err
}
ctx, ci := fs.AddConfig(ctx)
if fs.ConfigOptionsInfo.Get("user_agent").IsDefault() && opt.SpoofUserAgent {
randomSessionID, _ := random.Password(128)
ci.UserAgent = fmt.Sprintf(userAgentTemplae, randomSessionID)
}
token, err := oauthutil.GetToken(name, m)
if err != nil {
return nil, fmt.Errorf("couldn't read OAuth token: %w", err)
@@ -282,13 +267,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, fmt.Errorf("couldn't save OAuth token: %w", err)
}
fs.Logf(nil, "Automatically upgraded OAuth config.")
log.Printf("Automatically upgraded OAuth config.")
}
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, fmt.Errorf("failed to configure Yandex: %w", err)
}
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
opt: *opt,

View File

@@ -2,8 +2,6 @@
package api
import (
"encoding/json"
"fmt"
"strconv"
"time"
)
@@ -14,12 +12,7 @@ type Time time.Time
// UnmarshalJSON turns JSON into a Time
func (t *Time) UnmarshalJSON(data []byte) error {
s := string(data)
// If the time is a quoted string, strip quotes
if len(s) >= 2 && s[0] == '"' && s[len(s)-1] == '"' {
s = s[1 : len(s)-1]
}
millis, err := strconv.ParseInt(s, 10, 64)
millis, err := strconv.ParseInt(string(data), 10, 64)
if err != nil {
return err
}
@@ -91,73 +84,6 @@ type ItemList struct {
Items []Item `json:"data"`
}
// UploadFileInfo is what the FileInfo field in the UnloadInfo struct decodes to
type UploadFileInfo struct {
OrgID string `json:"ORG_ID"`
ResourceID string `json:"RESOURCE_ID"`
LibraryID string `json:"LIBRARY_ID"`
Md5Checksum string `json:"MD5_CHECKSUM"`
ParentModelID string `json:"PARENT_MODEL_ID"`
ParentID string `json:"PARENT_ID"`
ResourceType int `json:"RESOURCE_TYPE"`
WmsSentTime string `json:"WMS_SENT_TIME"`
TabID string `json:"TAB_ID"`
Owner string `json:"OWNER"`
ResourceGroup string `json:"RESOURCE_GROUP"`
ParentModelName string `json:"PARENT_MODEL_NAME"`
Size int64 `json:"size"`
Operation string `json:"OPERATION"`
EventID string `json:"EVENT_ID"`
AuditInfo struct {
VersionInfo struct {
VersionAuthors []string `json:"versionAuthors"`
VersionID string `json:"versionId"`
IsMinorVersion bool `json:"isMinorVersion"`
VersionTime Time `json:"versionTime"`
VersionAuthorZuid []string `json:"versionAuthorZuid"`
VersionNotes string `json:"versionNotes"`
VersionNumber string `json:"versionNumber"`
} `json:"versionInfo"`
Resource struct {
Owner string `json:"owner"`
CreatedTime Time `json:"created_time"`
Creator string `json:"creator"`
ServiceType int `json:"service_type"`
Extension string `json:"extension"`
StatusChangeTime Time `json:"status_change_time"`
ResourceType int `json:"resource_type"`
Name string `json:"name"`
} `json:"resource"`
ParentInfo struct {
ParentName string `json:"parentName"`
ParentID string `json:"parentId"`
ParentType int `json:"parentType"`
} `json:"parentInfo"`
LibraryInfo struct {
LibraryName string `json:"libraryName"`
LibraryID string `json:"libraryId"`
LibraryType int `json:"libraryType"`
} `json:"libraryInfo"`
UpdateType string `json:"updateType"`
StatusCode string `json:"statusCode"`
} `json:"AUDIT_INFO"`
ZUID int64 `json:"ZUID"`
TeamID string `json:"TEAM_ID"`
}
// GetModTime fetches the modification time of the upload
//
// This tries a few places and if all fails returns the current time
func (ufi *UploadFileInfo) GetModTime() Time {
if t := ufi.AuditInfo.Resource.CreatedTime; !time.Time(t).IsZero() {
return t
}
if t := ufi.AuditInfo.Resource.StatusChangeTime; !time.Time(t).IsZero() {
return t
}
return Time(time.Now())
}
// UploadInfo is a simplified and slightly different version of
// the Item struct only used in the response to uploads
type UploadInfo struct {
@@ -165,21 +91,9 @@ type UploadInfo struct {
ParentID string `json:"parent_id"`
FileName string `json:"notes.txt"`
RessourceID string `json:"resource_id"`
Permalink string `json:"Permalink"`
FileInfo string `json:"File INFO"` // JSON encoded UploadFileInfo
} `json:"attributes"`
}
// GetUploadFileInfo decodes the embedded FileInfo
func (ui *UploadInfo) GetUploadFileInfo() (*UploadFileInfo, error) {
var ufi UploadFileInfo
err := json.Unmarshal([]byte(ui.Attributes.FileInfo), &ufi)
if err != nil {
return nil, fmt.Errorf("failed to decode FileInfo: %w", err)
}
return &ufi, nil
}
// UploadResponse is the response to a file Upload
type UploadResponse struct {
Uploads []UploadInfo `json:"data"`

View File

@@ -677,26 +677,25 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
if len(uploadResponse.Uploads) != 1 {
return nil, errors.New("upload: invalid response")
}
upload := uploadResponse.Uploads[0]
uploadInfo, err := upload.GetUploadFileInfo()
if err != nil {
return nil, fmt.Errorf("upload error: %w", err)
// Received meta data is missing size so we have to read it again.
// It doesn't always appear on first read so try again if necessary
var info *api.Item
const maxTries = 10
sleepTime := 100 * time.Millisecond
for i := 0; i < maxTries; i++ {
info, err = f.readMetaDataForID(ctx, uploadResponse.Uploads[0].Attributes.RessourceID)
if err != nil {
return nil, err
}
if info.Attributes.StorageInfo.Size != 0 || size == 0 {
break
}
fs.Debugf(f, "Size not available yet for %q - try again in %v (try %d/%d)", name, sleepTime, i+1, maxTries)
time.Sleep(sleepTime)
sleepTime *= 2
}
// Fill in the api.Item from the api.UploadFileInfo
var info api.Item
info.ID = upload.Attributes.RessourceID
info.Attributes.Name = upload.Attributes.FileName
// info.Attributes.Type = not used
info.Attributes.IsFolder = false
// info.Attributes.CreatedTime = not used
info.Attributes.ModifiedTime = uploadInfo.GetModTime()
// info.Attributes.UploadedTime = 0 not used
info.Attributes.StorageInfo.Size = uploadInfo.Size
info.Attributes.StorageInfo.FileCount = 0
info.Attributes.StorageInfo.FolderCount = 0
return &info, nil
return info, nil
}
// Put the object into the container

View File

@@ -32,9 +32,6 @@ def alter_doc(backend):
"""Alter the documentation for backend"""
rclone_bin_dir = Path(sys.path[0]).parent.absolute()
doc_file = "docs/content/"+backend+".md"
doc_file2 = "docs/content/"+backend+"/_index.md"
if not os.path.exists(doc_file) and os.path.exists(doc_file2):
doc_file = doc_file2
if not os.path.exists(doc_file):
raise ValueError("Didn't find doc file %s" % (doc_file,))
new_file = doc_file+"~new~"

View File

@@ -64,7 +64,7 @@ docs = [
"azurefiles.md",
"onedrive.md",
"opendrive.md",
"oracleobjectstorage/_index.md",
"oracleobjectstorage.md",
"qingstor.md",
"quatrix.md",
"sia.md",
@@ -81,6 +81,7 @@ docs = [
"smb.md",
"storj.md",
"sugarsync.md",
"tardigrade.md", # stub only to redirect to storj.md
"ulozto.md",
"uptobox.md",
"union.md",
@@ -158,7 +159,6 @@ def read_doc(doc):
def check_docs(docpath):
"""Check all the docs are in docpath"""
files = set(f for f in os.listdir(docpath) if f.endswith(".md"))
files.update(f for f in docs if os.path.exists(os.path.join(docpath,f)))
files -= set(ignore_docs)
docs_set = set(docs)
if files == docs_set:

View File

@@ -29,7 +29,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
cmd := exec.Command("git", "log", "--oneline", from+".."+to)
out, err := cmd.Output()
if err != nil {
log.Fatalf("failed to run git log %s: %v", from+".."+to, err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
log.Fatalf("failed to run git log %s: %v", from+".."+to, err)
}
logMap = map[string]string{}
logs = []string{}
@@ -39,7 +39,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
}
match := logRe.FindSubmatch(line)
if match == nil {
log.Fatalf("failed to parse line: %q", line) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
log.Fatalf("failed to parse line: %q", line)
}
var hash, logMessage = string(match[1]), string(match[2])
logMap[logMessage] = hash
@@ -52,12 +52,12 @@ func main() {
flag.Parse()
args := flag.Args()
if len(args) != 0 {
log.Fatalf("Syntax: %s", os.Args[0]) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
log.Fatalf("Syntax: %s", os.Args[0])
}
// v1.54.0
versionBytes, err := os.ReadFile("VERSION")
if err != nil {
log.Fatalf("Failed to read version: %v", err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
log.Fatalf("Failed to read version: %v", err)
}
if versionBytes[0] == 'v' {
versionBytes = versionBytes[1:]
@@ -65,7 +65,7 @@ func main() {
versionBytes = bytes.TrimSpace(versionBytes)
semver := semver.New(string(versionBytes))
stable := fmt.Sprintf("v%d.%d", semver.Major, semver.Minor-1)
log.Printf("Finding commits in %v not in stable %s", semver, stable) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
log.Printf("Finding commits in %v not in stable %s", semver, stable)
masterMap, masterLogs := readCommits(stable+".0", "master")
stableMap, _ := readCommits(stable+".0", stable+"-stable")
for _, logMessage := range masterLogs {

View File

@@ -1,51 +0,0 @@
// Ruleguard file implementing custom linting rules.
//
// Note that when used from golangci-lint (using the gocritic linter configured
// with the ruleguard check), because rule files are not handled by
// golangci-lint itself, changes will not invalidate the golangci-lint cache,
// and you must manually clean to cache (golangci-lint cache clean) for them to
// be considered, as explained here:
// https://www.quasilyte.dev/blog/post/ruleguard/#using-from-the-golangci-lint
//
// Note that this file is ignored from build with a build constraint, but using
// a different than "ignore" to avoid go mod tidy making dsl an indirect
// dependency, as explained here:
// https://github.com/quasilyte/go-ruleguard?tab=readme-ov-file#troubleshooting
//go:build ruleguard
// +build ruleguard
// Package gorules implementing custom linting rules using ruleguard
package gorules
import "github.com/quasilyte/go-ruleguard/dsl"
// Suggest rewriting "log.(Print|Fatal|Panic)(f|ln)?" to
// "fs.(Printf|Fatalf|Panicf)", and do it if running golangci-lint with
// argument --fix. The suggestion wraps a single non-string single argument or
// variadic arguments in fmt.Sprint to be compatible with format string
// argument of fs functions.
//
// Caveats:
// - After applying the suggestions, imports may have to be fixed manually,
// removing unused "log", adding "github.com/rclone/rclone/fs" and "fmt",
// and if there was a variable named "fs" or "fmt" in the scope the name
// clash must be fixed.
// - Suggested code is incorrect when within fs package itself, due to the
// "fs."" prefix. Could handle it using condition
// ".Where(m.File().PkgPath.Matches(`github.com/rclone/rclone/fs`))"
// but not sure how to avoid duplicating all checks with and without this
// condition so haven't bothered yet.
func useFsLog(m dsl.Matcher) {
m.Match(`log.Print($x)`, `log.Println($x)`).Where(m["x"].Type.Is(`string`)).Suggest(`fs.Log(nil, $x)`)
m.Match(`log.Print($*args)`, `log.Println($*args)`).Suggest(`fs.Log(nil, fmt.Sprint($args))`)
m.Match(`log.Printf($*args)`).Suggest(`fs.Logf(nil, $args)`)
m.Match(`log.Fatal($x)`, `log.Fatalln($x)`).Where(m["x"].Type.Is(`string`)).Suggest(`fs.Fatal(nil, $x)`)
m.Match(`log.Fatal($*args)`, `log.Fatalln($*args)`).Suggest(`fs.Fatal(nil, fmt.Sprint($args))`)
m.Match(`log.Fatalf($*args)`).Suggest(`fs.Fatalf(nil, $args)`)
m.Match(`log.Panic($x)`, `log.Panicln($x)`).Where(m["x"].Type.Is(`string`)).Suggest(`fs.Panic(nil, $x)`)
m.Match(`log.Panic($*args)`, `log.Panicln($*args)`).Suggest(`fs.Panic(nil, fmt.Sprint($args))`)
m.Match(`log.Panicf($*args)`).Suggest(`fs.Panicf(nil, $args)`)
}

View File

@@ -10,6 +10,7 @@ import (
"errors"
"flag"
"fmt"
"log"
"os"
"path"
"path/filepath"
@@ -231,7 +232,7 @@ func TestBisyncRemoteLocal(t *testing.T) {
t.Skip("path1 and path2 are the same remote")
}
_, remote, cleanup, err := fstest.RandomRemote()
fs.Logf(nil, "remote: %v", remote)
log.Printf("remote: %v", remote)
require.NoError(t, err)
defer cleanup()
testBisync(t, remote, *argRemote2)
@@ -243,7 +244,7 @@ func TestBisyncLocalRemote(t *testing.T) {
t.Skip("path1 and path2 are the same remote")
}
_, remote, cleanup, err := fstest.RandomRemote()
fs.Logf(nil, "remote: %v", remote)
log.Printf("remote: %v", remote)
require.NoError(t, err)
defer cleanup()
testBisync(t, *argRemote2, remote)
@@ -253,7 +254,7 @@ func TestBisyncLocalRemote(t *testing.T) {
// (useful for testing server-side copy/move)
func TestBisyncRemoteRemote(t *testing.T) {
_, remote, cleanup, err := fstest.RandomRemote()
fs.Logf(nil, "remote: %v", remote)
log.Printf("remote: %v", remote)
require.NoError(t, err)
defer cleanup()
testBisync(t, remote, remote)
@@ -449,13 +450,13 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
for _, dir := range srcDirs {
dirs = append(dirs, norm.NFC.String(dir.Remote()))
}
fs.Logf(nil, "checking initFs %s", initFs)
log.Printf("checking initFs %s", initFs)
fstest.CheckListingWithPrecision(b.t, initFs, items, dirs, initFs.Precision())
checkError(b.t, sync.CopyDir(ctxNoDsStore, b.fs1, initFs, true), "setting up path1")
fs.Logf(nil, "checking Path1 %s", b.fs1)
log.Printf("checking Path1 %s", b.fs1)
fstest.CheckListingWithPrecision(b.t, b.fs1, items, dirs, b.fs1.Precision())
checkError(b.t, sync.CopyDir(ctxNoDsStore, b.fs2, initFs, true), "setting up path2")
fs.Logf(nil, "checking path2 %s", b.fs2)
log.Printf("checking path2 %s", b.fs2)
fstest.CheckListingWithPrecision(b.t, b.fs2, items, dirs, b.fs2.Precision())
// Create log file
@@ -513,21 +514,21 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
require.NoError(b.t, err, "saving log file %s", savedLog)
if b.golden && !b.stopped {
fs.Logf(nil, "Store results to golden directory")
log.Printf("Store results to golden directory")
b.storeGolden()
return
}
errorCount := 0
if b.noCompare {
fs.Logf(nil, "Skip comparing results with golden directory")
log.Printf("Skip comparing results with golden directory")
errorCount = -2
} else {
errorCount = b.compareResults()
}
if b.noCleanup {
fs.Logf(nil, "Skip cleanup")
log.Printf("Skip cleanup")
} else {
b.cleanupCase(ctx)
}
@@ -1382,24 +1383,24 @@ func (b *bisyncTest) compareResults() int {
const divider = "----------------------------------------------------------"
if goldenNum != resultNum {
fs.Log(nil, divider)
fs.Log(nil, color(terminal.RedFg, "MISCOMPARE - Number of Golden and Results files do not match:"))
fs.Logf(nil, " Golden count: %d", goldenNum)
fs.Logf(nil, " Result count: %d", resultNum)
fs.Logf(nil, " Golden files: %s", strings.Join(goldenFiles, ", "))
fs.Logf(nil, " Result files: %s", strings.Join(resultFiles, ", "))
log.Print(divider)
log.Print(color(terminal.RedFg, "MISCOMPARE - Number of Golden and Results files do not match:"))
log.Printf(" Golden count: %d", goldenNum)
log.Printf(" Result count: %d", resultNum)
log.Printf(" Golden files: %s", strings.Join(goldenFiles, ", "))
log.Printf(" Result files: %s", strings.Join(resultFiles, ", "))
}
for _, file := range goldenFiles {
if !resultSet.Has(file) {
errorCount++
fs.Logf(nil, " File found in Golden but not in Results: %s", file)
log.Printf(" File found in Golden but not in Results: %s", file)
}
}
for _, file := range resultFiles {
if !goldenSet.Has(file) {
errorCount++
fs.Logf(nil, " File found in Results but not in Golden: %s", file)
log.Printf(" File found in Results but not in Golden: %s", file)
}
}
@@ -1432,15 +1433,15 @@ func (b *bisyncTest) compareResults() int {
text, err := difflib.GetUnifiedDiffString(diff)
require.NoError(b.t, err, "diff failed")
fs.Log(nil, divider)
fs.Logf(nil, color(terminal.RedFg, "| MISCOMPARE -Golden vs +Results for %s"), file)
log.Print(divider)
log.Printf(color(terminal.RedFg, "| MISCOMPARE -Golden vs +Results for %s"), file)
for _, line := range strings.Split(strings.TrimSpace(text), "\n") {
fs.Logf(nil, "| %s", strings.TrimSpace(line))
log.Printf("| %s", strings.TrimSpace(line))
}
}
if errorCount > 0 {
fs.Log(nil, divider)
log.Print(divider)
}
if errorCount == 0 && goldenNum != resultNum {
return -1
@@ -1463,7 +1464,7 @@ func (b *bisyncTest) storeGolden() {
continue
}
if fileName == "backupdirs" {
fs.Logf(nil, "skipping: %v", fileName)
log.Printf("skipping: %v", fileName)
continue
}
goldName := b.toGolden(fileName)
@@ -1488,7 +1489,7 @@ func (b *bisyncTest) storeGolden() {
continue
}
if fileName == "backupdirs" {
fs.Logf(nil, "skipping: %v", fileName)
log.Printf("skipping: %v", fileName)
continue
}
text := b.mangleResult(b.goldenDir, fileName, true)
@@ -1848,7 +1849,7 @@ func fileType(fileName string) string {
// logPrintf prints a message to stdout and to the test log
func (b *bisyncTest) logPrintf(text string, args ...interface{}) {
line := fmt.Sprintf(text, args...)
fs.Log(nil, line)
log.Print(line)
if b.logFile != nil {
_, err := fmt.Fprintln(b.logFile, line)
require.NoError(b.t, err, "writing log file")

View File

@@ -4,11 +4,11 @@ package cat
import (
"context"
"io"
"log"
"os"
"strings"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
@@ -79,7 +79,7 @@ files, use:
usedHead := head > 0
usedTail := tail > 0
if usedHead && usedTail || usedHead && usedOffset || usedTail && usedOffset {
fs.Fatalf(nil, "Can only use one of --head, --tail or --offset with --count")
log.Fatalf("Can only use one of --head, --tail or --offset with --count")
}
if head > 0 {
offset = 0

View File

@@ -10,6 +10,7 @@ import (
"context"
"errors"
"fmt"
"log"
"os"
"os/exec"
"path"
@@ -87,7 +88,7 @@ func NewFsFile(remote string) (fs.Fs, string) {
_, fsPath, err := fspath.SplitFs(remote)
if err != nil {
err = fs.CountError(err)
fs.Fatalf(nil, "Failed to create file system for %q: %v", remote, err)
log.Fatalf("Failed to create file system for %q: %v", remote, err)
}
f, err := cache.Get(context.Background(), remote)
switch err {
@@ -99,7 +100,7 @@ func NewFsFile(remote string) (fs.Fs, string) {
return f, ""
default:
err = fs.CountError(err)
fs.Fatalf(nil, "Failed to create file system for %q: %v", remote, err)
log.Fatalf("Failed to create file system for %q: %v", remote, err)
}
return nil, ""
}
@@ -115,13 +116,13 @@ func newFsFileAddFilter(remote string) (fs.Fs, string) {
if !fi.InActive() {
err := fmt.Errorf("can't limit to single files when using filters: %v", remote)
err = fs.CountError(err)
fs.Fatal(nil, err.Error())
log.Fatal(err.Error())
}
// Limit transfers to this file
err := fi.AddFile(fileName)
if err != nil {
err = fs.CountError(err)
fs.Fatalf(nil, "Failed to limit to single file %q: %v", remote, err)
log.Fatalf("Failed to limit to single file %q: %v", remote, err)
}
}
return f, fileName
@@ -143,7 +144,7 @@ func newFsDir(remote string) fs.Fs {
f, err := cache.Get(context.Background(), remote)
if err != nil {
err = fs.CountError(err)
fs.Fatalf(nil, "Failed to create file system for %q: %v", remote, err)
log.Fatalf("Failed to create file system for %q: %v", remote, err)
}
cache.Pin(f) // pin indefinitely since it was on the CLI
return f
@@ -185,24 +186,24 @@ func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs
var err error
dstRemote, dstFileName, err = fspath.Split(dstRemote)
if err != nil {
fs.Fatalf(nil, "Parsing %q failed: %v", args[1], err)
log.Fatalf("Parsing %q failed: %v", args[1], err)
}
if dstRemote == "" {
dstRemote = "."
}
if dstFileName == "" {
fs.Fatalf(nil, "%q is a directory", args[1])
log.Fatalf("%q is a directory", args[1])
}
}
fdst, err := cache.Get(context.Background(), dstRemote)
switch err {
case fs.ErrorIsFile:
_ = fs.CountError(err)
fs.Fatalf(nil, "Source doesn't exist or is a directory and destination is a file")
log.Fatalf("Source doesn't exist or is a directory and destination is a file")
case nil:
default:
_ = fs.CountError(err)
fs.Fatalf(nil, "Failed to create file system for destination %q: %v", dstRemote, err)
log.Fatalf("Failed to create file system for destination %q: %v", dstRemote, err)
}
cache.Pin(fdst) // pin indefinitely since it was on the CLI
return
@@ -212,13 +213,13 @@ func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs
func NewFsDstFile(args []string) (fdst fs.Fs, dstFileName string) {
dstRemote, dstFileName, err := fspath.Split(args[0])
if err != nil {
fs.Fatalf(nil, "Parsing %q failed: %v", args[0], err)
log.Fatalf("Parsing %q failed: %v", args[0], err)
}
if dstRemote == "" {
dstRemote = "."
}
if dstFileName == "" {
fs.Fatalf(nil, "%q is a directory", args[0])
log.Fatalf("%q is a directory", args[0])
}
fdst = newFsDir(dstRemote)
return
@@ -327,9 +328,9 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
if cmdErr != nil {
nerrs := accounting.GlobalStats().GetErrors()
if nerrs <= 1 {
fs.Logf(nil, "Failed to %s: %v", cmd.Name(), cmdErr)
log.Printf("Failed to %s: %v", cmd.Name(), cmdErr)
} else {
fs.Logf(nil, "Failed to %s with %d errors: last error was: %v", cmd.Name(), nerrs, cmdErr)
log.Printf("Failed to %s with %d errors: last error was: %v", cmd.Name(), nerrs, cmdErr)
}
}
resolveExitCode(cmdErr)
@@ -382,7 +383,7 @@ func initConfig() {
// Set the global options from the flags
err := fs.GlobalOptionsInit()
if err != nil {
fs.Fatalf(nil, "Failed to initialise global options: %v", err)
log.Fatalf("Failed to initialise global options: %v", err)
}
ctx := context.Background()
@@ -420,16 +421,9 @@ func initConfig() {
}
// Start the remote control server if configured
_, err = rcserver.Start(ctx, &rc.Opt)
_, err = rcserver.Start(context.Background(), &rc.Opt)
if err != nil {
fs.Fatalf(nil, "Failed to start remote control: %v", err)
}
// Start the metrics server if configured
_, err = rcserver.MetricsStart(ctx, &rc.Opt)
if err != nil {
fs.Fatalf(nil, "Failed to start metrics server: %v", err)
log.Fatalf("Failed to start remote control: %v", err)
}
// Setup CPU profiling if desired
@@ -438,19 +432,19 @@ func initConfig() {
f, err := os.Create(*cpuProfile)
if err != nil {
err = fs.CountError(err)
fs.Fatal(nil, fmt.Sprint(err))
log.Fatal(err)
}
err = pprof.StartCPUProfile(f)
if err != nil {
err = fs.CountError(err)
fs.Fatal(nil, fmt.Sprint(err))
log.Fatal(err)
}
atexit.Register(func() {
pprof.StopCPUProfile()
err := f.Close()
if err != nil {
err = fs.CountError(err)
fs.Fatal(nil, fmt.Sprint(err))
log.Fatal(err)
}
})
}
@@ -462,17 +456,17 @@ func initConfig() {
f, err := os.Create(*memProfile)
if err != nil {
err = fs.CountError(err)
fs.Fatal(nil, fmt.Sprint(err))
log.Fatal(err)
}
err = pprof.WriteHeapProfile(f)
if err != nil {
err = fs.CountError(err)
fs.Fatal(nil, fmt.Sprint(err))
log.Fatal(err)
}
err = f.Close()
if err != nil {
err = fs.CountError(err)
fs.Fatal(nil, fmt.Sprint(err))
log.Fatal(err)
}
})
}
@@ -536,6 +530,6 @@ func Main() {
if strings.HasPrefix(err.Error(), "unknown command") && selfupdateEnabled {
Root.PrintErrf("You could use '%s selfupdate' to get latest features.\n\n", Root.CommandPath())
}
fs.Fatalf(nil, "Fatal error: %v", err)
log.Fatalf("Fatal error: %v", err)
}
}

View File

@@ -36,7 +36,6 @@ func init() {
configCommand.AddCommand(configReconnectCommand)
configCommand.AddCommand(configDisconnectCommand)
configCommand.AddCommand(configUserInfoCommand)
configCommand.AddCommand(configEncryptionCommand)
}
var configCommand = &cobra.Command{
@@ -519,91 +518,3 @@ system.
return nil
},
}
func init() {
configEncryptionCommand.AddCommand(configEncryptionSetCommand)
configEncryptionCommand.AddCommand(configEncryptionRemoveCommand)
configEncryptionCommand.AddCommand(configEncryptionCheckCommand)
}
var configEncryptionCommand = &cobra.Command{
Use: "encryption",
Short: `set, remove and check the encryption for the config file`,
Long: `This command sets, clears and checks the encryption for the config file using
the subcommands below.
`,
}
var configEncryptionSetCommand = &cobra.Command{
Use: "set",
Short: `Set or change the config file encryption password`,
Long: strings.ReplaceAll(`This command sets or changes the config file encryption password.
If there was no config password set then it sets a new one, otherwise
it changes the existing config password.
Note that if you are changing an encryption password using
|--password-command| then this will be called once to decrypt the
config using the old password and then again to read the new
password to re-encrypt the config.
When |--password-command| is called to change the password then the
environment variable |RCLONE_PASSWORD_CHANGE=1| will be set. So if
changing passwords programatically you can use the environment
variable to distinguish which password you must supply.
Alternatively you can remove the password first (with |rclone config
encryption remove|), then set it again with this command which may be
easier if you don't mind the unecrypted config file being on the disk
briefly.
`, "|", "`"),
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(0, 0, command, args)
config.LoadedData()
config.ChangeConfigPasswordAndSave()
return nil
},
}
var configEncryptionRemoveCommand = &cobra.Command{
Use: "remove",
Short: `Remove the config file encryption password`,
Long: strings.ReplaceAll(`Remove the config file encryption password
This removes the config file encryption, returning it to un-encrypted.
If |--password-command| is in use, this will be called to supply the old config
password.
If the config was not encrypted then no error will be returned and
this command will do nothing.
`, "|", "`"),
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(0, 0, command, args)
config.LoadedData()
config.RemoveConfigPasswordAndSave()
return nil
},
}
var configEncryptionCheckCommand = &cobra.Command{
Use: "check",
Short: `Check that the config file is encrypted`,
Long: strings.ReplaceAll(`This checks the config file is encrypted and that you can decrypt it.
It will attempt to decrypt the config using the password you supply.
If decryption fails it will return a non-zero exit code if using
|--password-command|, otherwise it will prompt again for the password.
If the config file is not encrypted it will return a non zero exit code.
`, "|", "`"),
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(0, 0, command, args)
config.LoadedData()
if !config.IsEncrypted() {
return errors.New("config file is NOT encrypted")
}
return nil
},
}

View File

@@ -3,7 +3,7 @@ package dedupe
import (
"context"
"fmt"
"log"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
@@ -142,7 +142,7 @@ Or
if len(args) > 1 {
err := dedupeMode.Set(args[0])
if err != nil {
fs.Fatal(nil, fmt.Sprint(err))
log.Fatal(err)
}
args = args[1:]
}

View File

@@ -1,11 +1,10 @@
package genautocomplete
import (
"fmt"
"log"
"os"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/spf13/cobra"
)
@@ -51,7 +50,7 @@ current shell.
if args[0] == "-" {
err := cmd.Root.GenBashCompletionV2(os.Stdout, false)
if err != nil {
fs.Fatal(nil, fmt.Sprint(err))
log.Fatal(err)
}
return
}
@@ -59,7 +58,7 @@ current shell.
}
err := cmd.Root.GenBashCompletionFileV2(out, false)
if err != nil {
fs.Fatal(nil, fmt.Sprint(err))
log.Fatal(err)
}
},
}

View File

@@ -1,11 +1,10 @@
package genautocomplete
import (
"fmt"
"log"
"os"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/spf13/cobra"
)
@@ -40,7 +39,7 @@ If output_file is "-", then the output will be written to stdout.
if args[0] == "-" {
err := cmd.Root.GenFishCompletion(os.Stdout, true)
if err != nil {
fs.Fatal(nil, fmt.Sprint(err))
log.Fatal(err)
}
return
}
@@ -48,7 +47,7 @@ If output_file is "-", then the output will be written to stdout.
}
err := cmd.Root.GenFishCompletionFile(out, true)
if err != nil {
fs.Fatal(nil, fmt.Sprint(err))
log.Fatal(err)
}
},
}

View File

@@ -1,11 +1,10 @@
package genautocomplete
import (
"fmt"
"log"
"os"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/spf13/cobra"
)
@@ -32,13 +31,13 @@ If output_file is "-" or missing, then the output will be written to stdout.
if len(args) == 0 || (len(args) > 0 && args[0] == "-") {
err := cmd.Root.GenPowerShellCompletion(os.Stdout)
if err != nil {
fs.Fatal(nil, fmt.Sprint(err))
log.Fatal(err)
}
return
}
err := cmd.Root.GenPowerShellCompletionFile(args[0])
if err != nil {
fs.Fatal(nil, fmt.Sprint(err))
log.Fatal(err)
}
},
}

View File

@@ -1,11 +1,10 @@
package genautocomplete
import (
"fmt"
"log"
"os"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/spf13/cobra"
)
@@ -40,7 +39,7 @@ If output_file is "-", then the output will be written to stdout.
if args[0] == "-" {
err := cmd.Root.GenZshCompletion(os.Stdout)
if err != nil {
fs.Fatal(nil, fmt.Sprint(err))
log.Fatal(err)
}
return
}
@@ -48,12 +47,12 @@ If output_file is "-", then the output will be written to stdout.
}
outFile, err := os.Create(out)
if err != nil {
fs.Fatal(nil, fmt.Sprint(err))
log.Fatal(err)
}
defer func() { _ = outFile.Close() }()
err = cmd.Root.GenZshCompletion(outFile)
if err != nil {
fs.Fatal(nil, fmt.Sprint(err))
log.Fatal(err)
}
},
}

View File

@@ -4,6 +4,7 @@ package gendocs
import (
"bytes"
"fmt"
"log"
"os"
"path"
"path/filepath"
@@ -13,7 +14,6 @@ import (
"time"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/lib/file"
"github.com/spf13/cobra"
@@ -144,7 +144,7 @@ rclone.org website.`,
var buf bytes.Buffer
err := frontmatterTemplate.Execute(&buf, data)
if err != nil {
fs.Fatalf(nil, "Failed to render frontmatter template: %v", err)
log.Fatalf("Failed to render frontmatter template: %v", err)
}
return buf.String()
}

View File

@@ -3,6 +3,7 @@ package cmd
import (
"context"
"fmt"
"log"
"os"
"regexp"
"sort"
@@ -75,7 +76,7 @@ var helpFlags = &cobra.Command{
if len(args) > 0 {
re, err := filter.GlobStringToRegexp(args[0], false, true)
if err != nil {
fs.Fatalf(nil, "Invalid flag filter: %v", err)
log.Fatalf("Invalid flag filter: %v", err)
}
fs.Debugf(nil, "Flag filter: %s", re.String())
filterFlagsRe = re
@@ -285,7 +286,7 @@ func quoteString(v interface{}) string {
func showBackend(name string) {
backend, err := fs.Find(name)
if err != nil {
fs.Fatal(nil, fmt.Sprint(err))
log.Fatal(err)
}
var standardOptions, advancedOptions fs.Options
done := map[string]struct{}{}

View File

@@ -41,7 +41,7 @@ var commandDefinition = &cobra.Command{
Short: `List directories and objects in the path in JSON format.`,
Long: `List directories and objects in the path in JSON format.
The output is an array of Items, where each Item looks like this:
The output is an array of Items, where each Item looks like this
{
"Hashes" : {
@@ -63,50 +63,44 @@ The output is an array of Items, where each Item looks like this:
"Tier" : "hot",
}
The exact set of properties included depends on the backend:
If ` + "`--hash`" + ` is not specified, the Hashes property will be omitted. The
types of hash can be specified with the ` + "`--hash-type`" + ` parameter (which
may be repeated). If ` + "`--hash-type`" + ` is set then it implies ` + "`--hash`" + `.
- The property IsBucket will only be included for bucket-based remotes, and only
for directories that are buckets. It will always be omitted when value is not true.
- Properties Encrypted and EncryptedPath will only be included for encrypted
remotes, and (as mentioned below) only if the ` + "`--encrypted`" + ` option is set.
If ` + "`--no-modtime`" + ` is specified then ModTime will be blank. This can
speed things up on remotes where reading the ModTime takes an extra
request (e.g. s3, swift).
Different options may also affect which properties are included:
If ` + "`--no-mimetype`" + ` is specified then MimeType will be blank. This can
speed things up on remotes where reading the MimeType takes an extra
request (e.g. s3, swift).
- If ` + "`--hash`" + ` is not specified, the Hashes property will be omitted. The
types of hash can be specified with the ` + "`--hash-type`" + ` parameter (which
may be repeated). If ` + "`--hash-type`" + ` is set then it implies ` + "`--hash`" + `.
- If ` + "`--no-modtime`" + ` is specified then ModTime will be blank. This can
speed things up on remotes where reading the ModTime takes an extra
request (e.g. s3, swift).
- If ` + "`--no-mimetype`" + ` is specified then MimeType will be blank. This can
speed things up on remotes where reading the MimeType takes an extra
request (e.g. s3, swift).
- If ` + "`--encrypted`" + ` is not specified the Encrypted and EncryptedPath
properties will be omitted - even for encrypted remotes.
- If ` + "`--metadata`" + ` is set then an additional Metadata property will be
returned. This will have [metadata](/docs/#metadata) in rclone standard format
as a JSON object.
If ` + "`--encrypted`" + ` is not specified the Encrypted will be omitted.
The default is to list directories and files/objects, but this can be changed
with the following options:
If ` + "`--dirs-only`" + ` is not specified files in addition to directories are
returned
- If ` + "`--dirs-only`" + ` is specified then directories will be returned
only, no files/objects.
- If ` + "`--files-only`" + ` is specified then files will be returned only,
no directories.
If ` + "`--files-only`" + ` is not specified directories in addition to the files
will be returned.
If ` + "`--stat`" + ` is set then the the output is not an array of items,
but instead a single JSON blob will be returned about the item pointed to.
This will return an error if the item isn't found, however on bucket based
backends (like s3, gcs, b2, azureblob etc) if the item isn't found it will
return an empty directory, as it isn't possible to tell empty directories
from missing directories there.
If ` + "`--metadata`" + ` is set then an additional Metadata key will be returned.
This will have metadata in rclone standard format as a JSON object.
if ` + "`--stat`" + ` is set then a single JSON blob will be returned about the
item pointed to. This will return an error if the item isn't found.
However on bucket based backends (like s3, gcs, b2, azureblob etc) if
the item isn't found it will return an empty directory as it isn't
possible to tell empty directories from missing directories there.
The Path field will only show folders below the remote path being listed.
If "remote:path" contains the file "subfolder/file.txt", the Path for "file.txt"
will be "subfolder/file.txt", not "remote:path/subfolder/file.txt".
When used without ` + "`--recursive`" + ` the Path will always be the same as Name.
If the directory is a bucket in a bucket-based backend, then
"IsBucket" will be set to true. This key won't be present unless it is
"true".
The time is in RFC3339 format with up to nanosecond precision. The
number of decimal digits in the seconds will depend on the precision
that the remote can hold the times, so if times are accurate to the
@@ -116,8 +110,7 @@ accurate to the nearest second (Dropbox, Box, WebDav, etc.) no digits
will be shown ("2017-05-31T16:15:57+01:00").
The whole output can be processed as a JSON blob, or alternatively it
can be processed line by line as each item is written on individual lines
(except with ` + "`--stat`" + `).
can be processed line by line as each item is written one to a line.
` + lshelp.Help,
Annotations: map[string]string{
"versionIntroduced": "v1.37",

View File

@@ -5,6 +5,7 @@ package mount2
import (
"fmt"
"log"
"runtime"
"time"
@@ -149,7 +150,7 @@ func mountOptions(fsys *FS, f fs.Fs, opt *mountlib.Options) (mountOpts *fuse.Mou
opts = append(opts, "ro")
}
if fsys.opt.WritebackCache {
fs.Printf(nil, "FIXME --write-back-cache not supported")
log.Printf("FIXME --write-back-cache not supported")
// FIXME opts = append(opts,fuse.WritebackCache())
}
// Some OS X only options

View File

@@ -5,6 +5,7 @@ import (
"context"
_ "embed"
"fmt"
"log"
"os"
"runtime"
"strings"
@@ -310,7 +311,7 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
err = mnt.Wait()
}
if err != nil {
fs.Fatalf(nil, "Fatal error: %v", err)
log.Fatalf("Fatal error: %v", err)
}
return
}
@@ -338,7 +339,7 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
atexit.Unregister(handle)
}
if err != nil {
fs.Fatalf(nil, "Fatal error: %v", err)
log.Fatalf("Fatal error: %v", err)
}
},
}

View File

@@ -42,9 +42,7 @@ When running in background mode the user will have to stop the mount manually:
# Linux
fusermount -u /path/to/local/mount
#... or on some systems
fusermount3 -u /path/to/local/mount
# OS X or Linux when using nfsmount
# OS X
umount /path/to/local/mount
The umount operation can fail, for example when the mountpoint is busy.
@@ -388,9 +386,9 @@ Note that systemd runs mount units without any environment variables including
`PATH` or `HOME`. This means that tilde (`~`) expansion will not work
and you should provide `--config` and `--cache-dir` explicitly as absolute
paths via rclone arguments.
Since mounting requires the `fusermount` or `fusermount3` program,
rclone will use the fallback PATH of `/bin:/usr/bin` in this scenario.
Please ensure that `fusermount`/`fusermount3` is present on this PATH.
Since mounting requires the `fusermount` program, rclone will use the fallback
PATH of `/bin:/usr/bin` in this scenario. Please ensure that `fusermount`
is present on this PATH.
### Rclone as Unix mount helper

View File

@@ -3,6 +3,7 @@ package mountlib
import (
"context"
"errors"
"log"
"sort"
"sync"
"time"
@@ -122,12 +123,12 @@ func mountRc(ctx context.Context, in rc.Params) (out rc.Params, err error) {
mnt := NewMountPoint(mountFn, mountPoint, fdst, &mountOpt, &vfsOpt)
_, err = mnt.Mount()
if err != nil {
fs.Logf(nil, "mount FAILED: %v", err)
log.Printf("mount FAILED: %v", err)
return nil, err
}
go func() {
if err = mnt.Wait(); err != nil {
fs.Logf(nil, "unmount FAILED: %v", err)
log.Printf("unmount FAILED: %v", err)
return
}
mountMu.Lock()

View File

@@ -929,23 +929,23 @@ func (u *UI) Run() error {
return fmt.Errorf("screen init: %w", err)
}
// Hijack fs.LogOutput so that it doesn't corrupt the screen.
if logOutput := fs.LogOutput; !log.Redirected() {
// Hijack fs.LogPrint so that it doesn't corrupt the screen.
if logPrint := fs.LogPrint; !log.Redirected() {
type log struct {
text string
level fs.LogLevel
}
var logs []log
fs.LogOutput = func(level fs.LogLevel, text string) {
fs.LogPrint = func(level fs.LogLevel, text string) {
if len(logs) > 100 {
logs = logs[len(logs)-100:]
}
logs = append(logs, log{level: level, text: text})
}
defer func() {
fs.LogOutput = logOutput
fs.LogPrint = logPrint
for i := range logs {
logOutput(logs[i].level, logs[i].text)
logPrint(logs[i].level, logs[i].text)
}
}()
}

View File

@@ -28,12 +28,12 @@ const (
// It returns a func which should be called to stop the stats.
func startProgress() func() {
stopStats := make(chan struct{})
oldLogOutput := fs.LogOutput
oldLogPrint := fs.LogPrint
oldSyncPrint := operations.SyncPrintf
if !log.Redirected() {
// Intercept the log calls if not logging to file or syslog
fs.LogOutput = func(level fs.LogLevel, text string) {
fs.LogPrint = func(level fs.LogLevel, text string) {
printProgress(fmt.Sprintf("%s %-6s: %s", time.Now().Format(logTimeFormat), level, text))
}
@@ -60,7 +60,7 @@ func startProgress() func() {
case <-stopStats:
ticker.Stop()
printProgress("")
fs.LogOutput = oldLogOutput
fs.LogPrint = oldLogPrint
operations.SyncPrintf = oldSyncPrint
fmt.Println("")
return

View File

@@ -3,11 +3,11 @@ package rcat
import (
"context"
"log"
"os"
"time"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
@@ -64,7 +64,7 @@ destination which can use retries.`,
stat, _ := os.Stdin.Stat()
if (stat.Mode() & os.ModeCharDevice) != 0 {
fs.Fatalf(nil, "nothing to read from standard input (stdin).")
log.Fatalf("nothing to read from standard input (stdin).")
}
fdst, dstFileName := cmd.NewFsDstFile(args)

View File

@@ -3,9 +3,9 @@ package rcd
import (
"context"
"log"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fs/rc/rcflags"
"github.com/rclone/rclone/fs/rc/rcserver"
@@ -39,7 +39,7 @@ See the [rc documentation](/rc/) for more info on the rc flags.
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 1, command, args)
if rc.Opt.Enabled {
fs.Fatalf(nil, "Don't supply --rc flag when using rcd")
log.Fatalf("Don't supply --rc flag when using rcd")
}
// Start the rc
@@ -50,10 +50,10 @@ See the [rc documentation](/rc/) for more info on the rc flags.
s, err := rcserver.Start(context.Background(), &rc.Opt)
if err != nil {
fs.Fatalf(nil, "Failed to start remote control: %v", err)
log.Fatalf("Failed to start remote control: %v", err)
}
if s == nil {
fs.Fatal(nil, "rc server not configured")
log.Fatal("rc server not configured")
}
// Notify stopping on exit

View File

@@ -14,6 +14,7 @@ import (
"errors"
"fmt"
"io"
"log"
"net/http"
"os"
"os/exec"
@@ -82,19 +83,19 @@ var cmdSelfUpdate = &cobra.Command{
}
if Opt.Package != "zip" {
if Opt.Package != "deb" && Opt.Package != "rpm" {
fs.Fatalf(nil, "--package should be one of zip|deb|rpm")
log.Fatalf("--package should be one of zip|deb|rpm")
}
if runtime.GOOS != "linux" {
fs.Fatalf(nil, ".deb and .rpm packages are supported only on Linux")
log.Fatalf(".deb and .rpm packages are supported only on Linux")
} else if os.Geteuid() != 0 && !Opt.Check {
fs.Fatalf(nil, ".deb and .rpm must be installed by root")
log.Fatalf(".deb and .rpm must be installed by root")
}
if Opt.Output != "" && !Opt.Check {
fmt.Println("Warning: --output is ignored with --package deb|rpm")
}
}
if err := InstallUpdate(context.Background(), &Opt); err != nil {
fs.Fatalf(nil, "Error: %v", err)
log.Fatalf("Error: %v", err)
}
},
}

View File

@@ -5,6 +5,7 @@ import (
"encoding/xml"
"errors"
"fmt"
"log"
"net/http"
"net/url"
"os"
@@ -359,7 +360,7 @@ func (o *object) FilePath() string {
// Returns the ObjectID for the object. This is used in various ContentDirectory actions.
func (o object) ID() string {
if !path.IsAbs(o.Path) {
fs.Panicf(nil, "Relative object path: %s", o.Path)
log.Panicf("Relative object path: %s", o.Path)
}
if len(o.Path) == 1 {
return "0"

View File

@@ -5,6 +5,7 @@ import (
"encoding/xml"
"fmt"
"io"
"log"
"net"
"net/http"
"net/http/httptest"
@@ -30,7 +31,7 @@ func makeDefaultFriendlyName() string {
func makeDeviceUUID(unique string) string {
h := md5.New()
if _, err := io.WriteString(h, unique); err != nil {
fs.Panicf(nil, "makeDeviceUUID write failed: %s", err)
log.Panicf("makeDeviceUUID write failed: %s", err)
}
buf := h.Sum(nil)
return upnp.FormatUUID(buf)
@@ -40,7 +41,7 @@ func makeDeviceUUID(unique string) string {
func listInterfaces() []net.Interface {
ifs, err := net.Interfaces()
if err != nil {
fs.Logf(nil, "list network interfaces: %v", err)
log.Printf("list network interfaces: %v", err)
return []net.Interface{}
}
@@ -70,7 +71,7 @@ func didlLite(chardata string) string {
func mustMarshalXML(value interface{}) []byte {
ret, err := xml.MarshalIndent(value, "", " ")
if err != nil {
fs.Panicf(nil, "mustMarshalXML failed to marshal %v: %s", value, err)
log.Panicf("mustMarshalXML failed to marshal %v: %s", value, err)
}
return ret
}

View File

@@ -251,15 +251,6 @@ func getVFSOption(vfsOpt *vfscommon.Options, opt rc.Params, key string) (ok bool
err = getFVarP(&vfsOpt.ReadAhead, opt, key)
case "vfs-used-is-size":
vfsOpt.UsedIsSize, err = opt.GetBool(key)
case "vfs-read-chunk-streams":
intVal, err = opt.GetInt64(key)
if err == nil {
if intVal >= 0 && intVal <= math.MaxInt {
vfsOpt.ChunkStreams = int(intVal)
} else {
err = fmt.Errorf("key %q (%v) overflows int", key, intVal)
}
}
// unprefixed vfs options
case "no-modtime":

View File

@@ -6,6 +6,7 @@ import (
"errors"
"fmt"
"io"
"log"
"net/http"
"os"
"path"
@@ -91,7 +92,7 @@ control the stats printing.
cmd.Run(false, true, command, func() error {
s, err := run(context.Background(), f, Opt)
if err != nil {
fs.Fatal(nil, fmt.Sprint(err))
log.Fatal(err)
}
defer systemd.Notify()()

View File

@@ -6,11 +6,11 @@ import (
"crypto/rsa"
"crypto/sha256"
"encoding/base64"
"log"
"strings"
"testing"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/stretchr/testify/assert"
@@ -149,11 +149,11 @@ func TestRun(t *testing.T) {
privateKey, privateKeyErr := rsa.GenerateKey(rand.Reader, 2048)
if privateKeyErr != nil {
fs.Fatal(nil, "error generating test private key "+privateKeyErr.Error())
log.Fatal("error generating test private key " + privateKeyErr.Error())
}
publicKey, publicKeyError := ssh.NewPublicKey(&privateKey.PublicKey)
if privateKeyErr != nil {
fs.Fatal(nil, "error generating test public key "+publicKeyError.Error())
log.Fatal("error generating test public key " + publicKeyError.Error())
}
publicKeyString := base64.StdEncoding.EncodeToString(publicKey.Marshal())

View File

@@ -27,7 +27,6 @@ import (
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/file"
sdActivation "github.com/rclone/rclone/lib/sdactivation"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
"golang.org/x/crypto/ssh"
@@ -267,27 +266,10 @@ func (s *server) serve() (err error) {
// Once a ServerConfig has been configured, connections can be
// accepted.
var listener net.Listener
// In case we run in a socket-activated environment, listen on (the first)
// passed FD.
sdListeners, err := sdActivation.Listeners()
s.listener, err = net.Listen("tcp", s.opt.ListenAddr)
if err != nil {
return fmt.Errorf("unable to acquire listeners: %w", err)
return fmt.Errorf("failed to listen for connection: %w", err)
}
if len(sdListeners) > 0 {
if len(sdListeners) > 1 {
fs.LogPrintf(fs.LogLevelWarning, nil, "more than one listener passed, ignoring all but the first.\n")
}
listener = sdListeners[0]
} else {
listener, err = net.Listen("tcp", s.opt.ListenAddr)
if err != nil {
return fmt.Errorf("failed to listen for connection: %w", err)
}
}
s.listener = listener
fs.Logf(nil, "SFTP server listening on %v\n", s.listener.Addr())
go s.acceptConnections()

View File

@@ -115,17 +115,6 @@ directory.
By default the server binds to localhost:2022 - if you want it to be
reachable externally then supply ` + "`--addr :2022`" + ` for example.
This also supports being run with socket activation, in which case it will
listen on the first passed FD.
It can be configured with .socket and .service unit files as described in
https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html
Socket activation can be tested ad-hoc with the ` + "`systemd-socket-activate`" + `command:
systemd-socket-activate -l 2222 -- rclone serve sftp :local:vfs/
This will socket-activate rclone on the first connection to port 2222 over TCP.
Note that the default of ` + "`--vfs-cache-mode off`" + ` is fine for the rclone
sftp backend, but it may not be with other SFTP clients.

View File

@@ -3,11 +3,11 @@
package cmd
import (
"log"
"os"
"os/signal"
"syscall"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
)
@@ -17,7 +17,7 @@ func SigInfoHandler() {
signal.Notify(signals, syscall.SIGINFO)
go func() {
for range signals {
fs.Printf(nil, "%v\n", accounting.GlobalStats())
log.Printf("%v\n", accounting.GlobalStats())
}
}()
}

View File

@@ -5,6 +5,7 @@ package info
import (
"context"
"fmt"
"log"
"os"
"path/filepath"
"strings"
@@ -24,7 +25,7 @@ func (r *results) checkBase32768() {
n := 0
dir, err := os.MkdirTemp("", "rclone-base32768-files")
if err != nil {
fs.Logf(nil, "Failed to make temp dir: %v", err)
log.Printf("Failed to make temp dir: %v", err)
return
}
defer func() {
@@ -40,7 +41,7 @@ func (r *results) checkBase32768() {
fileName := filepath.Join(dir, fmt.Sprintf("%04d-%s.txt", n, out.String()))
err = os.WriteFile(fileName, []byte(fileName), 0666)
if err != nil {
fs.Logf(nil, "write %q failed: %v", fileName, err)
log.Printf("write %q failed: %v", fileName, err)
return
}
n++
@@ -49,7 +50,7 @@ func (r *results) checkBase32768() {
// Make a local fs
fLocal, err := fs.NewFs(ctx, dir)
if err != nil {
fs.Logf(nil, "Failed to make local fs: %v", err)
log.Printf("Failed to make local fs: %v", err)
return
}
@@ -60,14 +61,14 @@ func (r *results) checkBase32768() {
s = fspath.JoinRootPath(s, testDir)
fRemote, err := fs.NewFs(ctx, s)
if err != nil {
fs.Logf(nil, "Failed to make remote fs: %v", err)
log.Printf("Failed to make remote fs: %v", err)
return
}
defer func() {
err := operations.Purge(ctx, r.f, testDir)
if err != nil {
fs.Logf(nil, "Failed to purge test directory: %v", err)
log.Printf("Failed to purge test directory: %v", err)
return
}
}()
@@ -75,7 +76,7 @@ func (r *results) checkBase32768() {
// Sync local to remote
err = sync.Sync(ctx, fRemote, fLocal, false)
if err != nil {
fs.Logf(nil, "Failed to sync remote fs: %v", err)
log.Printf("Failed to sync remote fs: %v", err)
return
}
@@ -85,7 +86,7 @@ func (r *results) checkBase32768() {
Fsrc: fLocal,
})
if err != nil {
fs.Logf(nil, "Failed to check remote fs: %v", err)
log.Printf("Failed to check remote fs: %v", err)
return
}

View File

@@ -10,6 +10,7 @@ import (
"encoding/json"
"fmt"
"io"
"log"
"os"
"path"
"regexp"
@@ -76,7 +77,7 @@ code for each one.
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1e6, command, args)
if !checkNormalization && !checkControl && !checkLength && !checkStreaming && !checkBase32768 && !all {
fs.Fatalf(nil, "no tests selected - select a test or use --all")
log.Fatalf("no tests selected - select a test or use --all")
}
if all {
checkNormalization = true
@@ -92,7 +93,7 @@ code for each one.
fs.Infof(f, "Created temporary directory for test files: %s", tempDirPath)
err := f.Mkdir(context.Background(), "")
if err != nil {
fs.Fatalf(nil, "couldn't create temporary directory: %v", err)
log.Fatalf("couldn't create temporary directory: %v", err)
}
cmd.Run(false, false, command, func() error {

View File

@@ -7,12 +7,12 @@ import (
"flag"
"fmt"
"io"
"log"
"os"
"sort"
"strconv"
"github.com/rclone/rclone/cmd/test/info/internal"
"github.com/rclone/rclone/fs"
)
func main() {
@@ -24,21 +24,21 @@ func main() {
for _, fn := range args {
f, err := os.Open(fn)
if err != nil {
fs.Fatalf(nil, "Unable to open %q: %s", fn, err)
log.Fatalf("Unable to open %q: %s", fn, err)
}
var remote internal.InfoReport
dec := json.NewDecoder(f)
err = dec.Decode(&remote)
if err != nil {
fs.Fatalf(nil, "Unable to decode %q: %s", fn, err)
log.Fatalf("Unable to decode %q: %s", fn, err)
}
if remote.ControlCharacters == nil {
fs.Logf(nil, "Skipping remote %s: no ControlCharacters", remote.Remote)
log.Printf("Skipping remote %s: no ControlCharacters", remote.Remote)
} else {
remotes = append(remotes, remote)
}
if err := f.Close(); err != nil {
fs.Fatalf(nil, "Closing %q failed: %s", fn, err)
log.Fatalf("Closing %q failed: %s", fn, err)
}
}
@@ -117,11 +117,11 @@ func main() {
} else {
f, err := os.Create(*fOut)
if err != nil {
fs.Fatalf(nil, "Unable to create %q: %s", *fOut, err)
log.Fatalf("Unable to create %q: %s", *fOut, err)
}
defer func() {
if err := f.Close(); err != nil {
fs.Fatal(nil, fmt.Sprint("Error writing csv:", err))
log.Fatalln("Error writing csv:", err)
}
}()
writer = f
@@ -130,9 +130,9 @@ func main() {
w := csv.NewWriter(writer)
err := w.WriteAll(records)
if err != nil {
fs.Fatal(nil, fmt.Sprint("Error writing csv:", err))
log.Fatalln("Error writing csv:", err)
} else if err := w.Error(); err != nil {
fs.Fatal(nil, fmt.Sprint("Error writing csv:", err))
log.Fatalln("Error writing csv:", err)
}
}

View File

@@ -4,6 +4,7 @@ package makefiles
import (
"io"
"log"
"math"
"math/rand"
"os"
@@ -116,7 +117,7 @@ var makefileCmd = &cobra.Command{
var size fs.SizeSuffix
err := size.Set(args[0])
if err != nil {
fs.Fatalf(nil, "Failed to parse size %q: %v", args[0], err)
log.Fatalf("Failed to parse size %q: %v", args[0], err)
}
start := time.Now()
fs.Logf(nil, "Creating %d files of size %v.", len(args[1:]), size)
@@ -147,7 +148,7 @@ func commonInit() {
}
randSource = rand.New(rand.NewSource(seed))
if bool2int(zero)+bool2int(sparse)+bool2int(ascii)+bool2int(pattern)+bool2int(chargen) > 1 {
fs.Fatal(nil, "Can only supply one of --zero, --sparse, --ascii, --pattern or --chargen")
log.Fatal("Can only supply one of --zero, --sparse, --ascii, --pattern or --chargen")
}
switch {
case zero, sparse:
@@ -275,12 +276,12 @@ func (d *dir) list(path string, output []string) []string {
func writeFile(dir, name string, size int64) {
err := file.MkdirAll(dir, 0777)
if err != nil {
fs.Fatalf(nil, "Failed to make directory %q: %v", dir, err)
log.Fatalf("Failed to make directory %q: %v", dir, err)
}
path := filepath.Join(dir, name)
fd, err := os.Create(path)
if err != nil {
fs.Fatalf(nil, "Failed to open file %q: %v", path, err)
log.Fatalf("Failed to open file %q: %v", path, err)
}
if sparse {
err = fd.Truncate(size)
@@ -288,11 +289,11 @@ func writeFile(dir, name string, size int64) {
_, err = io.CopyN(fd, source, size)
}
if err != nil {
fs.Fatalf(nil, "Failed to write %v bytes to file %q: %v", size, path, err)
log.Fatalf("Failed to write %v bytes to file %q: %v", size, path, err)
}
err = fd.Close()
if err != nil {
fs.Fatalf(nil, "Failed to close file %q: %v", path, err)
log.Fatalf("Failed to close file %q: %v", path, err)
}
fs.Infof(path, "Written file size %v", fs.SizeSuffix(size))
}

View File

@@ -6,6 +6,7 @@ import (
"context"
"errors"
"fmt"
"log"
"time"
"github.com/rclone/rclone/cmd"
@@ -85,7 +86,7 @@ then add the ` + "`--localtime`" + ` flag.
func newFsDst(args []string) (f fs.Fs, remote string) {
root, remote, err := fspath.Split(args[0])
if err != nil {
fs.Fatalf(nil, "Parsing %q failed: %v", args[0], err)
log.Fatalf("Parsing %q failed: %v", args[0], err)
}
if root == "" {
root = "."

View File

@@ -6,13 +6,13 @@
package cmdtest
import (
"log"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -26,14 +26,14 @@ func TestMain(m *testing.M) {
// started by Go test => execute tests
err := os.Setenv(rcloneTestMain, "true")
if err != nil {
fs.Fatalf(nil, "Unable to set %s: %s", rcloneTestMain, err.Error())
log.Fatalf("Unable to set %s: %s", rcloneTestMain, err.Error())
}
os.Exit(m.Run())
} else {
// started by func rcloneExecMain => call rclone main in cmdtest.go
err := os.Unsetenv(rcloneTestMain)
if err != nil {
fs.Fatalf(nil, "Unable to unset %s: %s", rcloneTestMain, err.Error())
log.Fatalf("Unable to unset %s: %s", rcloneTestMain, err.Error())
}
main()
}
@@ -47,7 +47,7 @@ const rcloneTestMain = "RCLONE_TEST_MAIN"
func rcloneExecMain(env string, args ...string) (string, error) {
_, found := os.LookupEnv(rcloneTestMain)
if !found {
fs.Fatalf(nil, "Unexpected execution path: %s is missing.", rcloneTestMain)
log.Fatalf("Unexpected execution path: %s is missing.", rcloneTestMain)
}
// make a call to self to execute rclone main in a predefined environment (enters TestMain above)
command := exec.Command(os.Args[0], args...)

View File

@@ -6,9 +6,7 @@ package cmdtest
import (
"os"
"regexp"
"runtime"
"strings"
"testing"
"github.com/stretchr/testify/assert"
@@ -346,42 +344,4 @@ func TestEnvironmentVariables(t *testing.T) {
env = ""
out, err = rcloneEnv(env, "version", "-vv", "--use-json-log")
jsonLogOK()
// Find all the File filter lines in out and return them
parseFileFilters := func(out string) (extensions []string) {
// Match: - (^|/)[^/]*\.jpg$
find := regexp.MustCompile(`^- \(\^\|\/\)\[\^\/\]\*\\\.(.*?)\$$`)
for _, line := range strings.Split(out, "\n") {
if m := find.FindStringSubmatch(line); m != nil {
extensions = append(extensions, m[1])
}
}
return extensions
}
// Make sure that multiple valued (stringArray) environment variables are handled properly
env = ``
out, err = rcloneEnv(env, "version", "-vv", "--dump", "filters", "--exclude", "*.gif", "--exclude", "*.tif")
require.NoError(t, err)
assert.Equal(t, []string{"gif", "tif"}, parseFileFilters(out))
env = `RCLONE_EXCLUDE=*.jpg`
out, err = rcloneEnv(env, "version", "-vv", "--dump", "filters", "--exclude", "*.gif")
require.NoError(t, err)
assert.Equal(t, []string{"jpg", "gif"}, parseFileFilters(out))
env = `RCLONE_EXCLUDE=*.jpg,*.png`
out, err = rcloneEnv(env, "version", "-vv", "--dump", "filters", "--exclude", "*.gif", "--exclude", "*.tif")
require.NoError(t, err)
assert.Equal(t, []string{"jpg", "png", "gif", "tif"}, parseFileFilters(out))
env = `RCLONE_EXCLUDE="*.jpg","*.png"`
out, err = rcloneEnv(env, "version", "-vv", "--dump", "filters")
require.NoError(t, err)
assert.Equal(t, []string{"jpg", "png"}, parseFileFilters(out))
env = `RCLONE_EXCLUDE="*.,,,","*.png"`
out, err = rcloneEnv(env, "version", "-vv", "--dump", "filters")
require.NoError(t, err)
assert.Equal(t, []string{",,,", "png"}, parseFileFilters(out))
}

View File

@@ -883,9 +883,3 @@ put them back in again.` >}}
* Georg Welzel <gwelzel@mailbox.org>
* John Oxley <john.oxley@gmail.com> <joxley@meta.com>
* Pawel Palucha <pawel.palucha@aetion.com>
* crystalstall <crystalruby@qq.com>
* nipil <nipil@users.noreply.github.com>
* yuval-cloudinary <46710068+yuval-cloudinary@users.noreply.github.com>
* Mathieu Moreau <mrx23dot@users.noreply.github.com>
* fsantagostinobietti <6057026+fsantagostinobietti@users.noreply.github.com>
* Oleg Kunitsyn <114359669+hiddenmarten@users.noreply.github.com>

View File

@@ -5,146 +5,6 @@ description: "Rclone Changelog"
# Changelog
## v1.68.1 - 2024-09-24
[See commits](https://github.com/rclone/rclone/compare/v1.68.0...v1.68.1)
* Bug Fixes
* build: Fix docker release build (ttionya)
* doc fixes (Nick Craig-Wood, Pawel Palucha)
* fs
* Fix `--dump filters` not always appearing (Nick Craig-Wood)
* Fix setting `stringArray` config values from environment variables (Nick Craig-Wood)
* rc: Fix default value of `--metrics-addr` (Nick Craig-Wood)
* serve docker: Add missing `vfs-read-chunk-streams` option in docker volume driver (Divyam)
* Onedrive
* Fix spurious "Couldn't decode error response: EOF" DEBUG (Nick Craig-Wood)
* Pikpak
* Fix login issue where token retrieval fails (wiserain)
* S3
* Fix rclone ignoring static credentials when `env_auth=true` (Nick Craig-Wood)
## v1.68.0 - 2024-09-08
[See commits](https://github.com/rclone/rclone/compare/v1.67.0...v1.68.0)
* New backends
* [Files.com](/filescom) (Sam Harrison)
* [Gofile](/gofile/) (Nick Craig-Wood)
* [Pixeldrain](/pixeldrain/) (Fornax)
* Changed backends
* [S3](/s3/) backend updated to use [AWS SDKv2](https://github.com/aws/aws-sdk-go-v2) as v1 is now unsupported.
* The matrix of providers and auth methods is huge and there could be problems with obscure combinations.
* Please report problems in a [new issue](https://github.com/rclone/rclone/issues/new/choose) on Github.
* New commands
* [config encryption](/commands/rclone_config_encryption/): set, remove and check to manage config file encryption (Nick Craig-Wood)
* New Features
* build
* Update to go1.23 and make go1.21 the minimum required version (Nick Craig-Wood)
* Update all dependencies (Nick Craig-Wood)
* Disable wasm/js build due to [go bug #64856](https://github.com/golang/go/issues/64856) (Nick Craig-Wood)
* Enable custom linting rules with ruleguard via gocritic (albertony)
* Update logging statements to make `--use-json-log` work always (albertony)
* Adding new code quality tests and fixing the fallout (albertony)
* config
* Internal config re-organised to be more consistent and make it available from the rc (Nick Craig-Wood)
* Avoid remotes with empty names from the environment (albertony)
* Make listing of remotes more consistent (albertony)
* Make getting config values more consistent (albertony)
* Use `--password-command` to set config file password if supplied (Nick Craig-Wood)
* doc fixes (albertony, crystalstall, David Seifert, Eng Zer Jun, Ernie Hershey, Florian Klink, John Oxley, kapitainsky, Mathieu Moreau, Nick Craig-Wood, nipil, Pétr Bozsó, Russ Bubley, Sam Harrison, Thearas, URenko, Will Miles, yuval-cloudinary)
* fs: Allow semicolons as well as spaces in `--bwlimit` timetable parsing (Kyle Reynolds)
* help
* Global flags help command now takes glob filter (albertony)
* Make help command output less distracting (albertony)
* lib/encoder: Add Raw encoding for use where no encoding at all is required, eg `--local-encoding Raw` (URenko)
* listremotes: Added options for filtering, ordering and json output (albertony)
* nfsmount
* Make the `--sudo` flag work for umount as well as mount (Nick Craig-Wood)
* Add `-o tcp` option to NFS mount options to fix mounting under Linux (Nick Craig-Wood)
* operations: copy: generate stable partial suffix (Georg Welzel)
* rc
* Add [options/info](/rc/#options-info) call to enumerate options (Nick Craig-Wood)
* Add option blocks parameter to [options/get](/rc/#options-get) and [options/info](/rc/#options-info) (Nick Craig-Wood)
* Add [vfs/queue](/rc/#vfs-queue) to show the status of the upload queue (Nick Craig-Wood)
* Add [vfs/queue-set-expiry](/rc/#vfs-queue-set-expiry) to adjust expiry of items in the VFS queue (Nick Craig-Wood)
* Add `--unix-socket` option to `rc` command (Florian Klink)
* Prevent unmount rc command from sending a `STOPPING=1` sd-notify message (AThePeanut4)
* rcserver: Implement [prometheus metrics](/docs/#metrics) on a dedicated port (Oleg Kunitsyn)
* serve dlna
* Also look at "Subs" subdirectory (Florian Klink)
* Don't swallow `video.{idx,sub}` (Florian Klink)
* Set more correct mime type (Florian Klink)
* serve nfs
* Implement on disk cache for file handles selected with `--nfs-cache-type` (Nick Craig-Wood)
* Add tracing to filesystem calls (Nick Craig-Wood)
* Mask unimplemented error from chmod (Nick Craig-Wood)
* Unify the nfs library logging with rclone's logging better (Nick Craig-Wood)
* Fix incorrect user id and group id exported to NFS (Nick Craig-Wood)
* serve s3
* Implement `--auth-proxy` (Sawjan Gurung)
* Update to AWS SDKv2 by updating `github.com/rclone/gofakes3` (Nick Craig-Wood)
* Bug Fixes
* bisync: Fix sync time problems with backends that round time (eg Dropbox) (nielash)
* serve dlna: Fix panic: invalid argument to Int63n (Nick Craig-Wood)
* VFS
* Add [--vfs-read-chunk-streams](/commands/rclone_mount/#vfs-read-chunk-streams-0-1) to parallel read chunks from files (Nick Craig-Wood)
* This can increase mount performance on high bandwidth or large latency links
* Fix cache encoding with special characters (URenko)
* Local
* Fix encoding of root path fix (URenko)
* Add server-side copy (using clone) with xattrs on macOS (nielash)
* `--local-no-clone` flag to disable cloning for server-side copies (nielash)
* Support setting custom `--metadata` during server-side Copy (nielash)
* Azure Blob
* Allow anonymous access for public resources (Nick Craig-Wood)
* B2
* Include custom upload headers in large file info (Pat Patterson)
* Drive
* Fix copying Google Docs to a backend which only supports SHA1 (Nick Craig-Wood)
* Fichier
* Fix detection of Flood Detected error (Nick Craig-Wood)
* Fix server side move (Nick Craig-Wood)
* HTTP
* Reload client certificates on expiry (Saleh Dindar)
* Support listening on passed FDs (Florian Klink)
* Jottacloud
* Fix setting of metadata on server side move (albertony)
* Onedrive
* Fix nil pointer error when uploading small files (Nick Craig-Wood)
* Pcloud
* Implement `SetModTime` (Georg Welzel)
* Implement `OpenWriterAt` feature to enable multipart uploads (Georg Welzel)
* Pikpak
* Improve data consistency by ensuring async tasks complete (wiserain)
* Implement custom hash to replace wrong sha1 (wiserain)
* Fix error with `copyto` command (wiserain)
* Optimize file move by removing unnecessary `readMetaData()` call (wiserain)
* Non-buffered hash calculation for local source files (wiserain)
* Optimize upload by pre-fetching gcid from API (wiserain)
* Correct file transfer progress for uploads by hash (wiserain)
* Update to using AWS SDK v2 (wiserain)
* S3
* Update to using AWS SDK v2 (Nick Craig-Wood)
* Add `--s3-sdk-log-mode` to control SDKv2 debugging (Nick Craig-Wood)
* Fix incorrect region for Magalu provider (Filipe Herculano)
* Allow restoring from intelligent-tiering storage class (Pawel Palucha)
* SFTP
* Use `uint32` for mtime to save memory (Tomasz Melcer)
* Ignore useless errors when closing the connection pool (Nick Craig-Wood)
* Support listening on passed FDs (Florian Klink)
* Swift
* Add workarounds for bad listings in Ceph RGW (Paul Collins)
* Add total/free space info in `about` command. (fsantagostinobietti)
* Ulozto
* Fix upload of > 2GB files on 32 bit platforms (Tobias Markus)
* WebDAV
* Add `--webdav-unix-socket-path` to connect to a unix socket (Florian Klink)
* Yandex
* Implement custom user agent to help with upload speeds (Sebastian Bünger)
* Zoho
* Fix inefficiencies uploading with new API to avoid throttling (Nick Craig-Wood)
## v1.67.0 - 2024-06-14
[See commits](https://github.com/rclone/rclone/compare/v1.66.0...v1.67.0)

View File

@@ -3,11 +3,12 @@ title: "rclone"
description: "Show help for rclone commands, flags and backends."
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/ and as part of making a release run "make commanddocs"
---
# rclone
## rclone
Show help for rclone commands, flags and backends.
## Synopsis
### Synopsis
Rclone syncs files to and from cloud storage providers as well as
mounting them, listing them in lots of different ways.
@@ -21,7 +22,7 @@ documentation, changelog and configuration walkthroughs.
rclone [flags]
```
## Options
### Options
```
--alias-description string Description of the remote
@@ -122,7 +123,7 @@ rclone [flags]
--box-token-url string Token server url
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB) (default 50Mi)
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer (default 16Mi)
--bwlimit BwTimetable Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
--bwlimit BwTimetable Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable.
--bwlimit-file BwTimetable Bandwidth limit per file in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
--ca-cert stringArray CA certificate used to verify servers
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage (default 1m0s)
@@ -149,7 +150,7 @@ rclone [flags]
--cache-writes Cache file data on writes through the FS
--check-first Do all the checks before starting transfers
--checkers int Number of checkers to run in parallel (default 8)
-c, --checksum Check for changes with size & checksum (if available, or fallback to size only)
-c, --checksum Check for changes with size & checksum (if available, or fallback to size only).
--chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks (default 2Gi)
--chunker-description string Description of the remote
--chunker-fail-hard Choose how chunker should handle files with missing or invalid chunks
@@ -160,7 +161,7 @@ rclone [flags]
--color AUTO|NEVER|ALWAYS When to show colors (and other ANSI codes) AUTO|NEVER|ALWAYS (default AUTO)
--combine-description string Description of the remote
--combine-upstreams SpaceSepList Upstreams for combining
--compare-dest stringArray Include additional server-side paths during comparison
--compare-dest stringArray Include additional comma separated server-side paths during comparison
--compress-description string Description of the remote
--compress-level int GZIP compression level (-2 to 9) (default -1)
--compress-mode string Compression mode (default "gzip")
@@ -191,7 +192,7 @@ rclone [flags]
--delete-during When synchronizing, delete files during transfer
--delete-excluded Delete files on dest excluded from sync
--disable string Disable a comma separated list of features (use --disable help to see a list)
--disable-http-keep-alives Disable HTTP keep-alives and use each connection once
--disable-http-keep-alives Disable HTTP keep-alives and use each connection once.
--disable-http2 Disable HTTP/2 in the global transport
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs
@@ -287,12 +288,6 @@ rclone [flags]
--filefabric-version string Version read from the file fabric
--files-from stringArray Read list of source-file names from file (use - to read from stdin)
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
--filescom-api-key string The API key used to authenticate with Files.com
--filescom-description string Description of the remote
--filescom-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot)
--filescom-password string The password used to authenticate with Files.com (obscured)
--filescom-site string Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com)
--filescom-username string The username used to authenticate with Files.com
-f, --filter stringArray Add a file filtering rule
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
--fix-case Force rename of case insensitive dest to match source
@@ -341,12 +336,6 @@ rclone [flags]
--gcs-token string OAuth Access Token as a JSON blob
--gcs-token-url string Token server url
--gcs-user-project string User project
--gofile-access-token string API Access token
--gofile-account-id string Account ID
--gofile-description string Description of the remote
--gofile-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftPeriod,RightPeriod,InvalidUtf8,Dot,Exclamation)
--gofile-list-chunk int Number of items to list in each call (default 1000)
--gofile-root-folder-id string ID of the root folder
--gphotos-auth-url string Auth server URL
--gphotos-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s)
--gphotos-batch-mode string Upload file batching sync|async|off (default "sync")
@@ -456,7 +445,6 @@ rclone [flags]
--local-description string Description of the remote
--local-encoding Encoding The encoding for the backend (default Slash,Dot)
--local-no-check-updated Don't check to see if the files change during upload
--local-no-clone Disable reflink cloning for server-side copies
--local-no-preallocate Disable preallocation of disk space for transferred files
--local-no-set-modtime Disable setting modtime
--local-no-sparse Disable sparse files for multi-thread downloads
@@ -510,22 +498,6 @@ rclone [flags]
--metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin)
--metadata-mapper SpaceSepList Program to run to transforming metadata before upload
--metadata-set stringArray Add metadata key=value when uploading
--metrics-addr stringArray IPaddress:Port or :Port to bind metrics server to
--metrics-allow-origin string Origin which cross-domain request (CORS) can be executed from
--metrics-baseurl string Prefix for URLs - leave blank for root
--metrics-cert string TLS PEM key (concatenation of certificate and CA certificate)
--metrics-client-ca string Client certificate authority to verify clients with
--metrics-htpasswd string A htpasswd file - if not provided no authentication is done
--metrics-key string TLS PEM Private key
--metrics-max-header-bytes int Maximum size of request header (default 4096)
--metrics-min-tls-version string Minimum TLS version that is acceptable (default "tls1.0")
--metrics-pass string Password for authentication
--metrics-realm string Realm for authentication
--metrics-salt string Password hashing salt (default "dlPL2MqE")
--metrics-server-read-timeout Duration Timeout for server reading data (default 1h0m0s)
--metrics-server-write-timeout Duration Timeout for server writing data (default 1h0m0s)
--metrics-template string User-specified template
--metrics-user string User name for authentication
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
--modify-window Duration Max time diff to be considered the same (default 1ns)
@@ -616,22 +588,21 @@ rclone [flags]
--pcloud-token string OAuth Access Token as a JSON blob
--pcloud-token-url string Token server url
--pcloud-username string Your pcloud username
--pikpak-auth-url string Auth server URL
--pikpak-chunk-size SizeSuffix Chunk size for multipart uploads (default 5Mi)
--pikpak-client-id string OAuth Client Id
--pikpak-client-secret string OAuth Client Secret
--pikpak-description string Description of the remote
--pikpak-device-id string Device ID used for authorization
--pikpak-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot)
--pikpak-hash-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate hash if required (default 10Mi)
--pikpak-pass string Pikpak password (obscured)
--pikpak-root-folder-id string ID of the root folder
--pikpak-token string OAuth Access Token as a JSON blob
--pikpak-token-url string Token server url
--pikpak-trashed-only Only show files that are in the trash
--pikpak-upload-concurrency int Concurrency for multipart uploads (default 5)
--pikpak-use-trash Send files to the trash instead of deleting permanently (default true)
--pikpak-user string Pikpak username
--pikpak-user-agent string HTTP user agent for pikpak (default "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0")
--pixeldrain-api-key string API key for your pixeldrain account
--pixeldrain-api-url string The API endpoint to connect to. In the vast majority of cases it's fine to leave (default "https://pixeldrain.com/api")
--pixeldrain-description string Description of the remote
--pixeldrain-root-folder-id string Root of the filesystem to use (default "me")
--premiumizeme-auth-url string Auth server URL
--premiumizeme-client-id string OAuth Client Id
--premiumizeme-client-secret string OAuth Client Secret
@@ -680,12 +651,12 @@ rclone [flags]
--quatrix-skip-project-folders Skip project folders in operations
-q, --quiet Print as little stuff as possible
--rc Enable the remote control server
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default localhost:5572)
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default [localhost:5572])
--rc-allow-origin string Origin which cross-domain request (CORS) can be executed from
--rc-baseurl string Prefix for URLs - leave blank for root
--rc-cert string TLS PEM key (concatenation of certificate and CA certificate)
--rc-client-ca string Client certificate authority to verify clients with
--rc-enable-metrics Enable the Prometheus metrics path at the remote control server
--rc-enable-metrics Enable prometheus metrics on /metrics
--rc-files string Path to local files to serve on the HTTP server
--rc-htpasswd string A htpasswd file - if not provided no authentication is done
--rc-job-expire-duration Duration Expire finished async jobs older than this value (default 1m0s)
@@ -741,7 +712,6 @@ rclone [flags]
--s3-provider string Choose your S3 provider
--s3-region string Region to connect to
--s3-requester-pays Enables requester pays option when interacting with S3 bucket
--s3-sdk-log-mode Bits Set to debug the SDK (default Off)
--s3-secret-access-key string AWS Secret Access Key (password)
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3
--s3-session-token string An AWS session token
@@ -752,6 +722,7 @@ rclone [flags]
--s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional)
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key
--s3-storage-class string The storage class to use when storing new objects in S3
--s3-sts-endpoint string Endpoint for STS
--s3-upload-concurrency int Concurrency for multipart uploads and copies (default 4)
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
--s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint
@@ -761,7 +732,6 @@ rclone [flags]
--s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset)
--s3-use-multipart-uploads Tristate Set if rclone should use multipart uploads (default unset)
--s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads
--s3-use-unsigned-payload Tristate Whether to use an unsigned payload in PutObject (default unset)
--s3-v2-auth If true use v2 authentication
--s3-version-at Time Show file versions as they were at the specified time (default off)
--s3-version-deleted Show deleted file markers when using versions
@@ -882,12 +852,10 @@ rclone [flags]
--swift-encoding Encoding The encoding for the backend (default Slash,InvalidUtf8)
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form
--swift-fetch-until-empty-page When paginating, always fetch unless we received an empty page
--swift-key string API key or password (OS_PASSWORD)
--swift-leave-parts-on-error If true avoid calling abort upload on a failure
--swift-no-chunk Don't chunk files during streaming upload
--swift-no-large-objects Disable support for static and dynamic large objects
--swift-partial-page-fetch-threshold int When paginating, fetch if the current page is within this percentage of the limit
--swift-region string Region name - optional (OS_REGION_NAME)
--swift-storage-policy string The storage policy to use when creating a new container
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
@@ -898,7 +866,7 @@ rclone [flags]
--swift-user string User name to log in (OS_USERNAME)
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID)
--syslog Use Syslog for logging
--syslog-facility string Facility for syslog, e.g. KERN,USER (default "DAEMON")
--syslog-facility string Facility for syslog, e.g. KERN,USER,... (default "DAEMON")
--temp-dir string Directory rclone will use for temporary files (default "/tmp")
--timeout Duration IO idle timeout (default 5m0s)
--tpslimit float Limit HTTP transactions per second to this
@@ -929,7 +897,7 @@ rclone [flags]
--use-json-log Use json log format
--use-mmap Use mmap allocator (see docs)
--use-server-modtime Use server modified time instead of object metadata
--user-agent string Set the user-agent to a specified string (default "rclone/v1.68.1")
--user-agent string Set the user-agent to a specified string (default "rclone/v1.67.0")
-v, --verbose count Print lots more stuff (repeat for more)
-V, --version Print the version number
--webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon)
@@ -942,7 +910,6 @@ rclone [flags]
--webdav-owncloud-exclude-shares Exclude ownCloud shares
--webdav-pacer-min-sleep Duration Minimum time to sleep between API calls (default 10ms)
--webdav-pass string Password (obscured)
--webdav-unix-socket string Path to a unix domain socket to dial to, instead of opening a TCP connection directly
--webdav-url string URL of http host to connect to
--webdav-user string User name
--webdav-vendor string Name of the WebDAV site/service/software you are using
@@ -952,7 +919,6 @@ rclone [flags]
--yandex-description string Description of the remote
--yandex-encoding Encoding The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
--yandex-hard-delete Delete files permanently rather than putting them into the trash
--yandex-spoof-ua Set the user agent to match an official version of the yandex disk client. May help with upload performance (default true)
--yandex-token string OAuth Access Token as a JSON blob
--yandex-token-url string Token server url
--zoho-auth-url string Auth server URL
@@ -965,7 +931,7 @@ rclone [flags]
--zoho-token-url string Token server url
```
## See Also
### SEE ALSO
* [rclone about](/commands/rclone_about/) - Get quota information from the remote.
* [rclone authorize](/commands/rclone_authorize/) - Remote authorization.

View File

@@ -10,7 +10,8 @@ Get quota information from the remote.
## Synopsis
Prints quota information about a remote to standard
`rclone about` prints quota information about a remote to standard
output. The output is typically used, free, quota and trash contents.
E.g. Typical output from `rclone about remote:` is:
@@ -69,9 +70,10 @@ rclone about remote: [flags]
--json Format output as JSON
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
# SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.

View File

@@ -10,6 +10,7 @@ Remote authorization.
## Synopsis
Remote authorization. Used to authorize a remote or headless
rclone from a machine with a browser - use as instructed by
rclone config.
@@ -31,9 +32,10 @@ rclone authorize [flags]
--template string The path to a custom Go template for generating HTML responses
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
# SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.

View File

@@ -10,6 +10,7 @@ Run a backend-specific command.
## Synopsis
This runs a backend-specific command. The commands themselves (except
for "help" and "features") are defined by the backends and you should
see the backend docs for definitions.
@@ -49,12 +50,10 @@ rclone backend <command> remote:path [opts] <args> [flags]
-o, --option stringArray Option in the form name=value or name
```
Options shared with other commands are described next.
See the [global flags page](/flags/) for global options not listed here.
### Important Options
## Important Options
Important flags useful for most commands
Important flags useful for most commands.
```
-n, --dry-run Do a trial run with no permanent changes
@@ -62,7 +61,9 @@ Important flags useful for most commands
-v, --verbose count Print lots more stuff (repeat for more)
```
## See Also
See the [global flags page](/flags/) for global options not listed here.
# SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.

View File

@@ -63,17 +63,15 @@ rclone bisync remote1:path1 remote2:path2 [flags]
--workdir string Use custom working dir - useful for testing. (default: {WORKDIR})
```
Options shared with other commands are described next.
See the [global flags page](/flags/) for global options not listed here.
### Copy Options
## Copy Options
Flags for anything which can copy a file
Flags for anything which can Copy a file.
```
--check-first Do all the checks before starting transfers
-c, --checksum Check for changes with size & checksum (if available, or fallback to size only)
--compare-dest stringArray Include additional server-side paths during comparison
-c, --checksum Check for changes with size & checksum (if available, or fallback to size only).
--compare-dest stringArray Include additional comma separated server-side paths during comparison
--copy-dest stringArray Implies --compare-dest but also copies files from paths into destination
--cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD)
--ignore-case-sync Ignore case when synchronizing
@@ -105,9 +103,9 @@ Flags for anything which can copy a file
-u, --update Skip files that are newer on the destination
```
### Important Options
## Important Options
Important flags useful for most commands
Important flags useful for most commands.
```
-n, --dry-run Do a trial run with no permanent changes
@@ -115,9 +113,9 @@ Important flags useful for most commands
-v, --verbose count Print lots more stuff (repeat for more)
```
### Filter Options
## Filter Options
Flags for filtering directory listings
Flags for filtering directory listings.
```
--delete-excluded Delete files on dest excluded from sync
@@ -144,7 +142,9 @@ Flags for filtering directory listings
--min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
```
## See Also
See the [global flags page](/flags/) for global options not listed here.
# SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.

View File

@@ -10,7 +10,8 @@ Concatenates any files and sends them to stdout.
## Synopsis
Sends any files to standard output.
rclone cat sends any files to standard output.
You can use it like this to output a single file
@@ -58,12 +59,10 @@ rclone cat remote:path [flags]
--tail int Only print the last N characters
```
Options shared with other commands are described next.
See the [global flags page](/flags/) for global options not listed here.
### Filter Options
## Filter Options
Flags for filtering directory listings
Flags for filtering directory listings.
```
--delete-excluded Delete files on dest excluded from sync
@@ -90,16 +89,18 @@ Flags for filtering directory listings
--min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
```
### Listing Options
## Listing Options
Flags for listing directories
Flags for listing directories.
```
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
--fast-list Use recursive list if available; uses more memory but fewer transactions
```
## See Also
See the [global flags page](/flags/) for global options not listed here.
# SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.

View File

@@ -9,6 +9,7 @@ Checks the files in the source and destination match.
## Synopsis
Checks the files in the source and destination match. It compares
sizes and hashes (MD5 or SHA1) and logs a report of files that don't
match. It doesn't alter the source or destination.
@@ -72,20 +73,18 @@ rclone check source:path dest:path [flags]
--one-way Check one way only, source files must exist on remote
```
Options shared with other commands are described next.
See the [global flags page](/flags/) for global options not listed here.
### Check Options
## Check Options
Flags used for check commands
Flags used for `rclone check`.
```
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
```
### Filter Options
## Filter Options
Flags for filtering directory listings
Flags for filtering directory listings.
```
--delete-excluded Delete files on dest excluded from sync
@@ -112,16 +111,18 @@ Flags for filtering directory listings
--min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
```
### Listing Options
## Listing Options
Flags for listing directories
Flags for listing directories.
```
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
--fast-list Use recursive list if available; uses more memory but fewer transactions
```
## See Also
See the [global flags page](/flags/) for global options not listed here.
# SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.

View File

@@ -10,6 +10,7 @@ Checks the files in the destination against a SUM file.
## Synopsis
Checks that hashsums of destination files match the SUM file.
It compares hashes (MD5, SHA1, etc) and logs a report of files which
don't match. It doesn't alter the file system.
@@ -66,12 +67,10 @@ rclone checksum <hash> sumfile dst:path [flags]
--one-way Check one way only, source files must exist on remote
```
Options shared with other commands are described next.
See the [global flags page](/flags/) for global options not listed here.
### Filter Options
## Filter Options
Flags for filtering directory listings
Flags for filtering directory listings.
```
--delete-excluded Delete files on dest excluded from sync
@@ -98,16 +97,18 @@ Flags for filtering directory listings
--min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
```
### Listing Options
## Listing Options
Flags for listing directories
Flags for listing directories.
```
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
--fast-list Use recursive list if available; uses more memory but fewer transactions
```
## See Also
See the [global flags page](/flags/) for global options not listed here.
# SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.

View File

@@ -10,6 +10,7 @@ Clean up the remote if possible.
## Synopsis
Clean up the remote if possible. Empty the trash or delete old file
versions. Not supported by all remotes.
@@ -24,12 +25,10 @@ rclone cleanup remote:path [flags]
-h, --help help for cleanup
```
Options shared with other commands are described next.
See the [global flags page](/flags/) for global options not listed here.
### Important Options
## Important Options
Important flags useful for most commands
Important flags useful for most commands.
```
-n, --dry-run Do a trial run with no permanent changes
@@ -37,7 +36,9 @@ Important flags useful for most commands
-v, --verbose count Print lots more stuff (repeat for more)
```
## See Also
See the [global flags page](/flags/) for global options not listed here.
# SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.

View File

@@ -12,6 +12,7 @@ Output completion script for a given shell.
## Synopsis
Generates a shell completion script for rclone.
Run with `--help` to list the supported shells.
@@ -22,9 +23,10 @@ Run with `--help` to list the supported shells.
-h, --help help for completion
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
# SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
* [rclone completion bash](/commands/rclone_completion_bash/) - Output bash completion script for rclone.

View File

@@ -11,11 +11,12 @@ Output bash completion script for rclone.
## Synopsis
Generates a bash shell autocompletion script for rclone.
By default, when run without any arguments,
rclone completion bash
rclone genautocomplete bash
the generated script will be written to
@@ -50,9 +51,10 @@ rclone completion bash [output_file] [flags]
-h, --help help for bash
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
# SEE ALSO
* [rclone completion](/commands/rclone_completion/) - Output completion script for a given shell.

View File

@@ -11,12 +11,13 @@ Output fish completion script for rclone.
## Synopsis
Generates a fish autocompletion script for rclone.
This writes to /etc/fish/completions/rclone.fish by default so will
probably need to be run with sudo or as root, e.g.
sudo rclone completion fish
sudo rclone genautocomplete fish
Logout and login again to use the autocompletion scripts, or source
them directly
@@ -39,9 +40,10 @@ rclone completion fish [output_file] [flags]
-h, --help help for fish
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
# SEE ALSO
* [rclone completion](/commands/rclone_completion/) - Output completion script for a given shell.

View File

@@ -11,6 +11,7 @@ Output powershell completion script for rclone.
## Synopsis
Generate the autocompletion script for powershell.
To load completions in your current shell session:
@@ -33,9 +34,10 @@ rclone completion powershell [output_file] [flags]
-h, --help help for powershell
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
# SEE ALSO
* [rclone completion](/commands/rclone_completion/) - Output completion script for a given shell.

View File

@@ -11,12 +11,13 @@ Output zsh completion script for rclone.
## Synopsis
Generates a zsh autocompletion script for rclone.
This writes to /usr/share/zsh/vendor-completions/_rclone by default so will
probably need to be run with sudo or as root, e.g.
sudo rclone completion zsh
sudo rclone genautocomplete zsh
Logout and login again to use the autocompletion scripts, or source
them directly
@@ -39,9 +40,10 @@ rclone completion zsh [output_file] [flags]
-h, --help help for zsh
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
# SEE ALSO
* [rclone completion](/commands/rclone_completion/) - Output completion script for a given shell.

View File

@@ -25,9 +25,10 @@ rclone config [flags]
-h, --help help for config
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
# SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
* [rclone config create](/commands/rclone_config_create/) - Create a new remote with name, type and options.
@@ -35,7 +36,6 @@ See the [global flags page](/flags/) for global options not listed here.
* [rclone config disconnect](/commands/rclone_config_disconnect/) - Disconnects user from remote
* [rclone config dump](/commands/rclone_config_dump/) - Dump the config file as JSON.
* [rclone config edit](/commands/rclone_config_edit/) - Enter an interactive configuration session.
* [rclone config encryption](/commands/rclone_config_encryption/) - set, remove and check the encryption for the config file
* [rclone config file](/commands/rclone_config_file/) - Show path of configuration file in use.
* [rclone config password](/commands/rclone_config_password/) - Update password in an existing remote.
* [rclone config paths](/commands/rclone_config_paths/) - Show paths used for configuration, cache, temp etc.

View File

@@ -10,6 +10,7 @@ Create a new remote with name, type and options.
## Synopsis
Create a new remote of `name` with `type` and options. The options
should be passed in pairs of `key` `value` or as `key=value`.
@@ -129,9 +130,10 @@ rclone config create name type [key value]* [flags]
--state string State - use with --continue
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
# SEE ALSO
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.

View File

@@ -18,9 +18,10 @@ rclone config delete name [flags]
-h, --help help for delete
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
# SEE ALSO
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.

View File

@@ -9,6 +9,7 @@ Disconnects user from remote
## Synopsis
This disconnects the remote: passed in to the cloud storage system.
This normally means revoking the oauth token.
@@ -26,9 +27,10 @@ rclone config disconnect remote: [flags]
-h, --help help for disconnect
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
# SEE ALSO
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.

View File

@@ -18,9 +18,10 @@ rclone config dump [flags]
-h, --help help for dump
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
# SEE ALSO
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.

View File

@@ -25,9 +25,10 @@ rclone config edit [flags]
-h, --help help for edit
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
# SEE ALSO
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.

View File

@@ -1,30 +0,0 @@
---
title: "rclone config encryption"
description: "set, remove and check the encryption for the config file"
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/encryption/ and as part of making a release run "make commanddocs"
---
# rclone config encryption
set, remove and check the encryption for the config file
## Synopsis
This command sets, clears and checks the encryption for the config file using
the subcommands below.
## Options
```
-h, --help help for encryption
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
* [rclone config encryption check](/commands/rclone_config_encryption_check/) - Check that the config file is encrypted
* [rclone config encryption remove](/commands/rclone_config_encryption_remove/) - Remove the config file encryption password
* [rclone config encryption set](/commands/rclone_config_encryption_set/) - Set or change the config file encryption password

View File

@@ -1,37 +0,0 @@
---
title: "rclone config encryption check"
description: "Check that the config file is encrypted"
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/encryption/check/ and as part of making a release run "make commanddocs"
---
# rclone config encryption check
Check that the config file is encrypted
## Synopsis
This checks the config file is encrypted and that you can decrypt it.
It will attempt to decrypt the config using the password you supply.
If decryption fails it will return a non-zero exit code if using
`--password-command`, otherwise it will prompt again for the password.
If the config file is not encrypted it will return a non zero exit code.
```
rclone config encryption check [flags]
```
## Options
```
-h, --help help for check
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
* [rclone config encryption](/commands/rclone_config_encryption/) - set, remove and check the encryption for the config file

View File

@@ -1,38 +0,0 @@
---
title: "rclone config encryption remove"
description: "Remove the config file encryption password"
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/encryption/remove/ and as part of making a release run "make commanddocs"
---
# rclone config encryption remove
Remove the config file encryption password
## Synopsis
Remove the config file encryption password
This removes the config file encryption, returning it to un-encrypted.
If `--password-command` is in use, this will be called to supply the old config
password.
If the config was not encrypted then no error will be returned and
this command will do nothing.
```
rclone config encryption remove [flags]
```
## Options
```
-h, --help help for remove
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
* [rclone config encryption](/commands/rclone_config_encryption/) - set, remove and check the encryption for the config file

View File

@@ -1,48 +0,0 @@
---
title: "rclone config encryption set"
description: "Set or change the config file encryption password"
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/encryption/set/ and as part of making a release run "make commanddocs"
---
# rclone config encryption set
Set or change the config file encryption password
## Synopsis
This command sets or changes the config file encryption password.
If there was no config password set then it sets a new one, otherwise
it changes the existing config password.
Note that if you are changing an encryption password using
`--password-command` then this will be called once to decrypt the
config using the old password and then again to read the new
password to re-encrypt the config.
When `--password-command` is called to change the password then the
environment variable `RCLONE_PASSWORD_CHANGE=1` will be set. So if
changing passwords programatically you can use the environment
variable to distinguish which password you must supply.
Alternatively you can remove the password first (with `rclone config
encryption remove`), then set it again with this command which may be
easier if you don't mind the unecrypted config file being on the disk
briefly.
```
rclone config encryption set [flags]
```
## Options
```
-h, --help help for set
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
* [rclone config encryption](/commands/rclone_config_encryption/) - set, remove and check the encryption for the config file

View File

@@ -18,9 +18,10 @@ rclone config file [flags]
-h, --help help for file
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
# SEE ALSO
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.

View File

@@ -10,6 +10,7 @@ Update password in an existing remote.
## Synopsis
Update an existing remote's password. The password
should be passed in pairs of `key` `password` or as `key=password`.
The `password` should be passed in in clear (unobscured).
@@ -33,9 +34,10 @@ rclone config password name [key value]+ [flags]
-h, --help help for password
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
# SEE ALSO
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.

View File

@@ -18,9 +18,10 @@ rclone config paths [flags]
-h, --help help for paths
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
# SEE ALSO
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.

View File

@@ -18,9 +18,10 @@ rclone config providers [flags]
-h, --help help for providers
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
# SEE ALSO
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.

View File

@@ -9,6 +9,7 @@ Re-authenticates user with remote.
## Synopsis
This reconnects remote: passed in to the cloud storage system.
To disconnect the remote use "rclone config disconnect".
@@ -26,9 +27,10 @@ rclone config reconnect remote: [flags]
-h, --help help for reconnect
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
# SEE ALSO
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.

View File

@@ -32,9 +32,10 @@ rclone config redacted [<remote>] [flags]
-h, --help help for redacted
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
# SEE ALSO
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.

View File

@@ -18,9 +18,10 @@ rclone config show [<remote>] [flags]
-h, --help help for show
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
# SEE ALSO
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.

View File

@@ -18,9 +18,10 @@ rclone config touch [flags]
-h, --help help for touch
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
# SEE ALSO
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.

View File

@@ -10,6 +10,7 @@ Update options in an existing remote.
## Synopsis
Update an existing remote's options. The options should be passed in
pairs of `key` `value` or as `key=value`.
@@ -129,9 +130,10 @@ rclone config update name [key value]+ [flags]
--state string State - use with --continue
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
# SEE ALSO
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.

View File

@@ -9,6 +9,7 @@ Prints info about logged in user of remote.
## Synopsis
This prints the details of the person logged in to the cloud storage
system.
@@ -24,9 +25,10 @@ rclone config userinfo remote: [flags]
--json Format output as JSON
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
# SEE ALSO
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.

View File

@@ -9,6 +9,7 @@ Copy files from source to dest, skipping identical files.
## Synopsis
Copy the source to the destination. Does not transfer files that are
identical on source and destination, testing by size and modification
time or MD5SUM. Doesn't delete files from the destination. If you
@@ -86,17 +87,15 @@ rclone copy source:path dest:path [flags]
-h, --help help for copy
```
Options shared with other commands are described next.
See the [global flags page](/flags/) for global options not listed here.
### Copy Options
## Copy Options
Flags for anything which can copy a file
Flags for anything which can Copy a file.
```
--check-first Do all the checks before starting transfers
-c, --checksum Check for changes with size & checksum (if available, or fallback to size only)
--compare-dest stringArray Include additional server-side paths during comparison
-c, --checksum Check for changes with size & checksum (if available, or fallback to size only).
--compare-dest stringArray Include additional comma separated server-side paths during comparison
--copy-dest stringArray Implies --compare-dest but also copies files from paths into destination
--cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD)
--ignore-case-sync Ignore case when synchronizing
@@ -128,9 +127,9 @@ Flags for anything which can copy a file
-u, --update Skip files that are newer on the destination
```
### Important Options
## Important Options
Important flags useful for most commands
Important flags useful for most commands.
```
-n, --dry-run Do a trial run with no permanent changes
@@ -138,9 +137,9 @@ Important flags useful for most commands
-v, --verbose count Print lots more stuff (repeat for more)
```
### Filter Options
## Filter Options
Flags for filtering directory listings
Flags for filtering directory listings.
```
--delete-excluded Delete files on dest excluded from sync
@@ -167,16 +166,18 @@ Flags for filtering directory listings
--min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
```
### Listing Options
## Listing Options
Flags for listing directories
Flags for listing directories.
```
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
--fast-list Use recursive list if available; uses more memory but fewer transactions
```
## See Also
See the [global flags page](/flags/) for global options not listed here.
# SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.

View File

@@ -10,6 +10,7 @@ Copy files from source to dest, skipping identical files.
## Synopsis
If source:path is a file or directory then it copies it to a file or
directory named dest:path.
@@ -49,17 +50,15 @@ rclone copyto source:path dest:path [flags]
-h, --help help for copyto
```
Options shared with other commands are described next.
See the [global flags page](/flags/) for global options not listed here.
### Copy Options
## Copy Options
Flags for anything which can copy a file
Flags for anything which can Copy a file.
```
--check-first Do all the checks before starting transfers
-c, --checksum Check for changes with size & checksum (if available, or fallback to size only)
--compare-dest stringArray Include additional server-side paths during comparison
-c, --checksum Check for changes with size & checksum (if available, or fallback to size only).
--compare-dest stringArray Include additional comma separated server-side paths during comparison
--copy-dest stringArray Implies --compare-dest but also copies files from paths into destination
--cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD)
--ignore-case-sync Ignore case when synchronizing
@@ -91,9 +90,9 @@ Flags for anything which can copy a file
-u, --update Skip files that are newer on the destination
```
### Important Options
## Important Options
Important flags useful for most commands
Important flags useful for most commands.
```
-n, --dry-run Do a trial run with no permanent changes
@@ -101,9 +100,9 @@ Important flags useful for most commands
-v, --verbose count Print lots more stuff (repeat for more)
```
### Filter Options
## Filter Options
Flags for filtering directory listings
Flags for filtering directory listings.
```
--delete-excluded Delete files on dest excluded from sync
@@ -130,16 +129,18 @@ Flags for filtering directory listings
--min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
```
### Listing Options
## Listing Options
Flags for listing directories
Flags for listing directories.
```
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
--fast-list Use recursive list if available; uses more memory but fewer transactions
```
## See Also
See the [global flags page](/flags/) for global options not listed here.
# SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.

View File

@@ -10,6 +10,7 @@ Copy the contents of the URL supplied content to dest:path.
## Synopsis
Download a URL's content and copy it to the destination without saving
it in temporary storage.
@@ -55,12 +56,10 @@ rclone copyurl https://example.com dest:path [flags]
--stdout Write the output to stdout rather than a file
```
Options shared with other commands are described next.
See the [global flags page](/flags/) for global options not listed here.
### Important Options
## Important Options
Important flags useful for most commands
Important flags useful for most commands.
```
-n, --dry-run Do a trial run with no permanent changes
@@ -68,7 +67,9 @@ Important flags useful for most commands
-v, --verbose count Print lots more stuff (repeat for more)
```
## See Also
See the [global flags page](/flags/) for global options not listed here.
# SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.

Some files were not shown because too many files have changed in this diff Show More