mirror of
https://github.com/rclone/rclone.git
synced 2025-12-12 14:23:24 +00:00
Compare commits
2 Commits
jwt-v5-com
...
fix-oauth-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bbb31d6acf | ||
|
|
7c705e0efa |
27
.github/workflows/build.yml
vendored
27
.github/workflows/build.yml
vendored
@@ -26,12 +26,12 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.22', 'go1.23']
|
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.21', 'go1.22']
|
||||||
|
|
||||||
include:
|
include:
|
||||||
- job_name: linux
|
- job_name: linux
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '>=1.24.0-rc.1'
|
go: '>=1.23.0-rc.1'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
build_flags: '-include "^linux/"'
|
build_flags: '-include "^linux/"'
|
||||||
check: true
|
check: true
|
||||||
@@ -42,14 +42,14 @@ jobs:
|
|||||||
|
|
||||||
- job_name: linux_386
|
- job_name: linux_386
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '>=1.24.0-rc.1'
|
go: '>=1.23.0-rc.1'
|
||||||
goarch: 386
|
goarch: 386
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
quicktest: true
|
quicktest: true
|
||||||
|
|
||||||
- job_name: mac_amd64
|
- job_name: mac_amd64
|
||||||
os: macos-latest
|
os: macos-latest
|
||||||
go: '>=1.24.0-rc.1'
|
go: '>=1.23.0-rc.1'
|
||||||
gotags: 'cmount'
|
gotags: 'cmount'
|
||||||
build_flags: '-include "^darwin/amd64" -cgo'
|
build_flags: '-include "^darwin/amd64" -cgo'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
@@ -58,14 +58,14 @@ jobs:
|
|||||||
|
|
||||||
- job_name: mac_arm64
|
- job_name: mac_arm64
|
||||||
os: macos-latest
|
os: macos-latest
|
||||||
go: '>=1.24.0-rc.1'
|
go: '>=1.23.0-rc.1'
|
||||||
gotags: 'cmount'
|
gotags: 'cmount'
|
||||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: windows
|
- job_name: windows
|
||||||
os: windows-latest
|
os: windows-latest
|
||||||
go: '>=1.24.0-rc.1'
|
go: '>=1.23.0-rc.1'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
cgo: '0'
|
cgo: '0'
|
||||||
build_flags: '-include "^windows/"'
|
build_flags: '-include "^windows/"'
|
||||||
@@ -75,20 +75,20 @@ jobs:
|
|||||||
|
|
||||||
- job_name: other_os
|
- job_name: other_os
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '>=1.24.0-rc.1'
|
go: '>=1.23.0-rc.1'
|
||||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||||
compile_all: true
|
compile_all: true
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: go1.22
|
- job_name: go1.21
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.22'
|
go: '1.21'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
racequicktest: true
|
racequicktest: true
|
||||||
|
|
||||||
- job_name: go1.23
|
- job_name: go1.22
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.23'
|
go: '1.22'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
racequicktest: true
|
racequicktest: true
|
||||||
|
|
||||||
@@ -123,8 +123,7 @@ jobs:
|
|||||||
sudo modprobe fuse
|
sudo modprobe fuse
|
||||||
sudo chmod 666 /dev/fuse
|
sudo chmod 666 /dev/fuse
|
||||||
sudo chown root:$USER /etc/fuse.conf
|
sudo chown root:$USER /etc/fuse.conf
|
||||||
sudo apt-get update
|
sudo apt-get install fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
|
||||||
sudo apt-get install -y fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
|
|
||||||
if: matrix.os == 'ubuntu-latest'
|
if: matrix.os == 'ubuntu-latest'
|
||||||
|
|
||||||
- name: Install Libraries on macOS
|
- name: Install Libraries on macOS
|
||||||
@@ -311,7 +310,7 @@ jobs:
|
|||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '>=1.24.0-rc.1'
|
go-version: '>=1.23.0-rc.1'
|
||||||
|
|
||||||
- name: Set global environment variables
|
- name: Set global environment variables
|
||||||
shell: bash
|
shell: bash
|
||||||
|
|||||||
2431
MANUAL.html
generated
2431
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
2517
MANUAL.txt
generated
2517
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
21
RELEASE.md
21
RELEASE.md
@@ -47,20 +47,13 @@ Early in the next release cycle update the dependencies.
|
|||||||
* `git commit -a -v -m "build: update all dependencies"`
|
* `git commit -a -v -m "build: update all dependencies"`
|
||||||
|
|
||||||
If the `make updatedirect` upgrades the version of go in the `go.mod`
|
If the `make updatedirect` upgrades the version of go in the `go.mod`
|
||||||
|
then go to manual mode. `go1.20` here is the lowest supported version
|
||||||
go 1.22.0
|
|
||||||
|
|
||||||
then go to manual mode. `go1.22` here is the lowest supported version
|
|
||||||
in the `go.mod`.
|
in the `go.mod`.
|
||||||
|
|
||||||
If `make updatedirect` added a `toolchain` directive then remove it.
|
|
||||||
We don't want to force a toolchain on our users. Linux packagers are
|
|
||||||
often using a version of Go that is a few versions out of date.
|
|
||||||
|
|
||||||
```
|
```
|
||||||
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
|
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
|
||||||
go get -d $(cat /tmp/potential-upgrades)
|
go get -d $(cat /tmp/potential-upgrades)
|
||||||
go mod tidy -go=1.22 -compat=1.22
|
go mod tidy -go=1.20 -compat=1.20
|
||||||
```
|
```
|
||||||
|
|
||||||
If the `go mod tidy` fails use the output from it to remove the
|
If the `go mod tidy` fails use the output from it to remove the
|
||||||
@@ -93,16 +86,6 @@ build.
|
|||||||
Once it compiles locally, push it on a test branch and commit fixes
|
Once it compiles locally, push it on a test branch and commit fixes
|
||||||
until the tests pass.
|
until the tests pass.
|
||||||
|
|
||||||
### Major versions
|
|
||||||
|
|
||||||
The above procedure will not upgrade major versions, so v2 to v3.
|
|
||||||
However this tool can show which major versions might need to be
|
|
||||||
upgraded:
|
|
||||||
|
|
||||||
go run github.com/icholy/gomajor@latest list -major
|
|
||||||
|
|
||||||
Expect API breakage when updating major versions.
|
|
||||||
|
|
||||||
## Tidy beta
|
## Tidy beta
|
||||||
|
|
||||||
At some point after the release run
|
At some point after the release run
|
||||||
|
|||||||
@@ -10,7 +10,6 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/box"
|
_ "github.com/rclone/rclone/backend/box"
|
||||||
_ "github.com/rclone/rclone/backend/cache"
|
_ "github.com/rclone/rclone/backend/cache"
|
||||||
_ "github.com/rclone/rclone/backend/chunker"
|
_ "github.com/rclone/rclone/backend/chunker"
|
||||||
_ "github.com/rclone/rclone/backend/cloudinary"
|
|
||||||
_ "github.com/rclone/rclone/backend/combine"
|
_ "github.com/rclone/rclone/backend/combine"
|
||||||
_ "github.com/rclone/rclone/backend/compress"
|
_ "github.com/rclone/rclone/backend/compress"
|
||||||
_ "github.com/rclone/rclone/backend/crypt"
|
_ "github.com/rclone/rclone/backend/crypt"
|
||||||
|
|||||||
@@ -2162,9 +2162,6 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
|||||||
if chunkNumber <= 8 {
|
if chunkNumber <= 8 {
|
||||||
return w.f.shouldRetry(ctx, err)
|
return w.f.shouldRetry(ctx, err)
|
||||||
}
|
}
|
||||||
if fserrors.ContextError(ctx, &err) {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
// retry all chunks once have done the first few
|
// retry all chunks once have done the first few
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -393,10 +393,8 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
|||||||
policyClientOptions := policy.ClientOptions{
|
policyClientOptions := policy.ClientOptions{
|
||||||
Transport: newTransporter(ctx),
|
Transport: newTransporter(ctx),
|
||||||
}
|
}
|
||||||
backup := service.ShareTokenIntentBackup
|
|
||||||
clientOpt := service.ClientOptions{
|
clientOpt := service.ClientOptions{
|
||||||
ClientOptions: policyClientOptions,
|
ClientOptions: policyClientOptions,
|
||||||
FileRequestIntent: &backup,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Here we auth by setting one of cred, sharedKeyCred or f.client
|
// Here we auth by setting one of cred, sharedKeyCred or f.client
|
||||||
|
|||||||
@@ -44,7 +44,6 @@ type Bucket struct {
|
|||||||
type LifecycleRule struct {
|
type LifecycleRule struct {
|
||||||
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
|
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
|
||||||
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
|
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
|
||||||
DaysFromStartingToCancelingUnfinishedLargeFiles *int `json:"daysFromStartingToCancelingUnfinishedLargeFiles"`
|
|
||||||
FileNamePrefix string `json:"fileNamePrefix"`
|
FileNamePrefix string `json:"fileNamePrefix"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2231,7 +2231,6 @@ This will dump something like this showing the lifecycle rules.
|
|||||||
{
|
{
|
||||||
"daysFromHidingToDeleting": 1,
|
"daysFromHidingToDeleting": 1,
|
||||||
"daysFromUploadingToHiding": null,
|
"daysFromUploadingToHiding": null,
|
||||||
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
|
|
||||||
"fileNamePrefix": ""
|
"fileNamePrefix": ""
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
@@ -2260,7 +2259,6 @@ See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
|
|||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
|
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
|
||||||
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
|
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
|
||||||
"daysFromStartingToCancelingUnfinishedLargeFiles": "Cancels any unfinished large file versions after this many days",
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2280,13 +2278,6 @@ func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, op
|
|||||||
}
|
}
|
||||||
newRule.DaysFromUploadingToHiding = &days
|
newRule.DaysFromUploadingToHiding = &days
|
||||||
}
|
}
|
||||||
if daysStr := opt["daysFromStartingToCancelingUnfinishedLargeFiles"]; daysStr != "" {
|
|
||||||
days, err := strconv.Atoi(daysStr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("bad daysFromStartingToCancelingUnfinishedLargeFiles: %w", err)
|
|
||||||
}
|
|
||||||
newRule.DaysFromStartingToCancelingUnfinishedLargeFiles = &days
|
|
||||||
}
|
|
||||||
bucketName, _ := f.split("")
|
bucketName, _ := f.split("")
|
||||||
if bucketName == "" {
|
if bucketName == "" {
|
||||||
return nil, errors.New("bucket required")
|
return nil, errors.New("bucket required")
|
||||||
@@ -2294,7 +2285,7 @@ func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, op
|
|||||||
}
|
}
|
||||||
|
|
||||||
var bucket *api.Bucket
|
var bucket *api.Bucket
|
||||||
if newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil || newRule.DaysFromStartingToCancelingUnfinishedLargeFiles != nil {
|
if newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil {
|
||||||
bucketID, err := f.getBucketID(ctx, bucketName)
|
bucketID, err := f.getBucketID(ctx, bucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@@ -27,6 +27,7 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang-jwt/jwt/v4"
|
||||||
"github.com/rclone/rclone/backend/box/api"
|
"github.com/rclone/rclone/backend/box/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
@@ -45,6 +46,7 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/random"
|
"github.com/rclone/rclone/lib/random"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
"github.com/youmark/pkcs8"
|
"github.com/youmark/pkcs8"
|
||||||
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -63,10 +65,12 @@ const (
|
|||||||
// Globals
|
// Globals
|
||||||
var (
|
var (
|
||||||
// Description of how to auth for this app
|
// Description of how to auth for this app
|
||||||
oauthConfig = &oauthutil.Config{
|
oauthConfig = &oauth2.Config{
|
||||||
Scopes: nil,
|
Scopes: nil,
|
||||||
|
Endpoint: oauth2.Endpoint{
|
||||||
AuthURL: "https://app.box.com/api/oauth2/authorize",
|
AuthURL: "https://app.box.com/api/oauth2/authorize",
|
||||||
TokenURL: "https://app.box.com/api/oauth2/token",
|
TokenURL: "https://app.box.com/api/oauth2/token",
|
||||||
|
},
|
||||||
ClientID: rcloneClientID,
|
ClientID: rcloneClientID,
|
||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
RedirectURL: oauthutil.RedirectURL,
|
RedirectURL: oauthutil.RedirectURL,
|
||||||
@@ -74,7 +78,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type boxCustomClaims struct {
|
type boxCustomClaims struct {
|
||||||
jwtutil.LegacyStandardClaims
|
jwt.StandardClaims
|
||||||
BoxSubType string `json:"box_sub_type,omitempty"`
|
BoxSubType string `json:"box_sub_type,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -222,8 +226,10 @@ func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomC
|
|||||||
}
|
}
|
||||||
|
|
||||||
claims = &boxCustomClaims{
|
claims = &boxCustomClaims{
|
||||||
LegacyStandardClaims: jwtutil.LegacyStandardClaims{
|
//lint:ignore SA1019 since we need to use jwt.StandardClaims even if deprecated in jwt-go v4 until a more permanent solution is ready in time before jwt-go v5 where it is removed entirely
|
||||||
ID: val,
|
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1019
|
||||||
|
StandardClaims: jwt.StandardClaims{
|
||||||
|
Id: val,
|
||||||
Issuer: boxConfig.BoxAppSettings.ClientID,
|
Issuer: boxConfig.BoxAppSettings.ClientID,
|
||||||
Subject: boxConfig.EnterpriseID,
|
Subject: boxConfig.EnterpriseID,
|
||||||
Audience: tokenURL,
|
Audience: tokenURL,
|
||||||
@@ -252,9 +258,6 @@ func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
|
|||||||
|
|
||||||
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
|
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
|
||||||
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
||||||
if block == nil {
|
|
||||||
return nil, errors.New("box: failed to PEM decode private key")
|
|
||||||
}
|
|
||||||
if len(rest) > 0 {
|
if len(rest) > 0 {
|
||||||
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
|
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,48 +0,0 @@
|
|||||||
// Package api has type definitions for cloudinary
|
|
||||||
package api
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CloudinaryEncoder extends the built-in encoder
|
|
||||||
type CloudinaryEncoder interface {
|
|
||||||
// FromStandardPath takes a / separated path in Standard encoding
|
|
||||||
// and converts it to a / separated path in this encoding.
|
|
||||||
FromStandardPath(string) string
|
|
||||||
// FromStandardName takes name in Standard encoding and converts
|
|
||||||
// it in this encoding.
|
|
||||||
FromStandardName(string) string
|
|
||||||
// ToStandardPath takes a / separated path in this encoding
|
|
||||||
// and converts it to a / separated path in Standard encoding.
|
|
||||||
ToStandardPath(string) string
|
|
||||||
// ToStandardName takes name in this encoding and converts
|
|
||||||
// it in Standard encoding.
|
|
||||||
ToStandardName(string) string
|
|
||||||
// Encoded root of the remote (as passed into NewFs)
|
|
||||||
FromStandardFullPath(string) string
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateOptions was created to pass options from Update to Put
|
|
||||||
type UpdateOptions struct {
|
|
||||||
PublicID string
|
|
||||||
ResourceType string
|
|
||||||
DeliveryType string
|
|
||||||
AssetFolder string
|
|
||||||
DisplayName string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Header formats the option as a string
|
|
||||||
func (o *UpdateOptions) Header() (string, string) {
|
|
||||||
return "UpdateOption", fmt.Sprintf("%s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mandatory returns whether the option must be parsed or can be ignored
|
|
||||||
func (o *UpdateOptions) Mandatory() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// String formats the option into human-readable form
|
|
||||||
func (o *UpdateOptions) String() string {
|
|
||||||
return fmt.Sprintf("Fully qualified Public ID: %s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
|
|
||||||
}
|
|
||||||
@@ -1,711 +0,0 @@
|
|||||||
// Package cloudinary provides an interface to the Cloudinary DAM
|
|
||||||
package cloudinary
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"path"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/cloudinary/cloudinary-go/v2"
|
|
||||||
SDKApi "github.com/cloudinary/cloudinary-go/v2/api"
|
|
||||||
"github.com/cloudinary/cloudinary-go/v2/api/admin"
|
|
||||||
"github.com/cloudinary/cloudinary-go/v2/api/admin/search"
|
|
||||||
"github.com/cloudinary/cloudinary-go/v2/api/uploader"
|
|
||||||
"github.com/rclone/rclone/backend/cloudinary/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/config"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
"github.com/zeebo/blake3"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Cloudinary shouldn't have a trailing dot if there is no path
|
|
||||||
func cldPathDir(somePath string) string {
|
|
||||||
if somePath == "" || somePath == "." {
|
|
||||||
return somePath
|
|
||||||
}
|
|
||||||
dir := path.Dir(somePath)
|
|
||||||
if dir == "." {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return dir
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register with Fs
|
|
||||||
func init() {
|
|
||||||
fs.Register(&fs.RegInfo{
|
|
||||||
Name: "cloudinary",
|
|
||||||
Description: "Cloudinary",
|
|
||||||
NewFs: NewFs,
|
|
||||||
Options: []fs.Option{
|
|
||||||
{
|
|
||||||
Name: "cloud_name",
|
|
||||||
Help: "Cloudinary Environment Name",
|
|
||||||
Required: true,
|
|
||||||
Sensitive: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "api_key",
|
|
||||||
Help: "Cloudinary API Key",
|
|
||||||
Required: true,
|
|
||||||
Sensitive: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "api_secret",
|
|
||||||
Help: "Cloudinary API Secret",
|
|
||||||
Required: true,
|
|
||||||
Sensitive: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "upload_prefix",
|
|
||||||
Help: "Specify the API endpoint for environments out of the US",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "upload_preset",
|
|
||||||
Help: "Upload Preset to select asset manipulation on upload",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: config.ConfigEncoding,
|
|
||||||
Help: config.ConfigEncodingHelp,
|
|
||||||
Advanced: true,
|
|
||||||
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
|
|
||||||
encoder.EncodeSlash |
|
|
||||||
encoder.EncodeLtGt |
|
|
||||||
encoder.EncodeDoubleQuote |
|
|
||||||
encoder.EncodeQuestion |
|
|
||||||
encoder.EncodeAsterisk |
|
|
||||||
encoder.EncodePipe |
|
|
||||||
encoder.EncodeHash |
|
|
||||||
encoder.EncodePercent |
|
|
||||||
encoder.EncodeBackSlash |
|
|
||||||
encoder.EncodeDel |
|
|
||||||
encoder.EncodeCtl |
|
|
||||||
encoder.EncodeRightSpace |
|
|
||||||
encoder.EncodeInvalidUtf8 |
|
|
||||||
encoder.EncodeDot),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "eventually_consistent_delay",
|
|
||||||
Default: fs.Duration(0),
|
|
||||||
Advanced: true,
|
|
||||||
Help: "Wait N seconds for eventual consistency of the databases that support the backend operation",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
|
||||||
type Options struct {
|
|
||||||
CloudName string `config:"cloud_name"`
|
|
||||||
APIKey string `config:"api_key"`
|
|
||||||
APISecret string `config:"api_secret"`
|
|
||||||
UploadPrefix string `config:"upload_prefix"`
|
|
||||||
UploadPreset string `config:"upload_preset"`
|
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
|
||||||
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs represents a remote cloudinary server
|
|
||||||
type Fs struct {
|
|
||||||
name string
|
|
||||||
root string
|
|
||||||
opt Options
|
|
||||||
features *fs.Features
|
|
||||||
pacer *fs.Pacer
|
|
||||||
srv *rest.Client // For downloading assets via the Cloudinary CDN
|
|
||||||
cld *cloudinary.Cloudinary // API calls are going through the Cloudinary SDK
|
|
||||||
lastCRUD time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object describes a cloudinary object
|
|
||||||
type Object struct {
|
|
||||||
fs *Fs
|
|
||||||
remote string
|
|
||||||
size int64
|
|
||||||
modTime time.Time
|
|
||||||
url string
|
|
||||||
md5sum string
|
|
||||||
publicID string
|
|
||||||
resourceType string
|
|
||||||
deliveryType string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, bucket:path
|
|
||||||
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
|
|
||||||
opt := new(Options)
|
|
||||||
err := configstruct.Set(m, opt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize the Cloudinary client
|
|
||||||
cld, err := cloudinary.NewFromParams(opt.CloudName, opt.APIKey, opt.APISecret)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create Cloudinary client: %w", err)
|
|
||||||
}
|
|
||||||
cld.Admin.Client = *fshttp.NewClient(ctx)
|
|
||||||
cld.Upload.Client = *fshttp.NewClient(ctx)
|
|
||||||
if opt.UploadPrefix != "" {
|
|
||||||
cld.Config.API.UploadPrefix = opt.UploadPrefix
|
|
||||||
}
|
|
||||||
client := fshttp.NewClient(ctx)
|
|
||||||
f := &Fs{
|
|
||||||
name: name,
|
|
||||||
root: root,
|
|
||||||
opt: *opt,
|
|
||||||
cld: cld,
|
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(1000), pacer.MaxSleep(10000), pacer.DecayConstant(2))),
|
|
||||||
srv: rest.NewClient(client),
|
|
||||||
}
|
|
||||||
|
|
||||||
f.features = (&fs.Features{
|
|
||||||
CanHaveEmptyDirectories: true,
|
|
||||||
}).Fill(ctx, f)
|
|
||||||
|
|
||||||
if root != "" {
|
|
||||||
// Check to see if the root actually an existing file
|
|
||||||
remote := path.Base(root)
|
|
||||||
f.root = cldPathDir(root)
|
|
||||||
_, err := f.NewObject(ctx, remote)
|
|
||||||
if err != nil {
|
|
||||||
if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
|
|
||||||
// File doesn't exist so return the previous root
|
|
||||||
f.root = root
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// return an error with an fs which points to the parent
|
|
||||||
return f, fs.ErrorIsFile
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
|
||||||
|
|
||||||
// FromStandardPath implementation of the api.CloudinaryEncoder
|
|
||||||
func (f *Fs) FromStandardPath(s string) string {
|
|
||||||
return strings.ReplaceAll(f.opt.Enc.FromStandardPath(s), "&", "\uFF06")
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromStandardName implementation of the api.CloudinaryEncoder
|
|
||||||
func (f *Fs) FromStandardName(s string) string {
|
|
||||||
return strings.ReplaceAll(f.opt.Enc.FromStandardName(s), "&", "\uFF06")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToStandardPath implementation of the api.CloudinaryEncoder
|
|
||||||
func (f *Fs) ToStandardPath(s string) string {
|
|
||||||
return strings.ReplaceAll(f.opt.Enc.ToStandardPath(s), "\uFF06", "&")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToStandardName implementation of the api.CloudinaryEncoder
|
|
||||||
func (f *Fs) ToStandardName(s string) string {
|
|
||||||
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&")
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromStandardFullPath encodes a full path to Cloudinary standard
|
|
||||||
func (f *Fs) FromStandardFullPath(dir string) string {
|
|
||||||
return path.Join(api.CloudinaryEncoder.FromStandardPath(f, f.root), api.CloudinaryEncoder.FromStandardPath(f, dir))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToAssetFolderAPI encodes folders as expected by the Cloudinary SDK
|
|
||||||
func (f *Fs) ToAssetFolderAPI(dir string) string {
|
|
||||||
return strings.ReplaceAll(dir, "%", "%25")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToDisplayNameElastic encodes a special case of elasticsearch
|
|
||||||
func (f *Fs) ToDisplayNameElastic(dir string) string {
|
|
||||||
return strings.ReplaceAll(dir, "!", "\\!")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Name() string {
|
|
||||||
return f.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Root of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Root() string {
|
|
||||||
return f.root
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitEventuallyConsistent waits till the FS is eventually consistent
|
|
||||||
func (f *Fs) WaitEventuallyConsistent() {
|
|
||||||
if f.opt.EventuallyConsistentDelay == fs.Duration(0) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
delay := time.Duration(f.opt.EventuallyConsistentDelay)
|
|
||||||
timeSinceLastCRUD := time.Since(f.lastCRUD)
|
|
||||||
if timeSinceLastCRUD < delay {
|
|
||||||
time.Sleep(delay - timeSinceLastCRUD)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// String converts this Fs to a string
|
|
||||||
func (f *Fs) String() string {
|
|
||||||
return fmt.Sprintf("Cloudinary root '%s'", f.root)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
|
||||||
func (f *Fs) Features() *fs.Features {
|
|
||||||
return f.features
|
|
||||||
}
|
|
||||||
|
|
||||||
// List the objects and directories in dir into entries
|
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
|
||||||
remotePrefix := f.FromStandardFullPath(dir)
|
|
||||||
if remotePrefix != "" && !strings.HasSuffix(remotePrefix, "/") {
|
|
||||||
remotePrefix += "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
var entries fs.DirEntries
|
|
||||||
dirs := make(map[string]struct{})
|
|
||||||
nextCursor := ""
|
|
||||||
f.WaitEventuallyConsistent()
|
|
||||||
for {
|
|
||||||
// user the folders api to list folders.
|
|
||||||
folderParams := admin.SubFoldersParams{
|
|
||||||
Folder: f.ToAssetFolderAPI(remotePrefix),
|
|
||||||
MaxResults: 500,
|
|
||||||
}
|
|
||||||
if nextCursor != "" {
|
|
||||||
folderParams.NextCursor = nextCursor
|
|
||||||
}
|
|
||||||
|
|
||||||
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to list sub-folders: %w", err)
|
|
||||||
}
|
|
||||||
if results.Error.Message != "" {
|
|
||||||
if strings.HasPrefix(results.Error.Message, "Can't find folder with path") {
|
|
||||||
return nil, fs.ErrorDirNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("failed to list sub-folders: %s", results.Error.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, folder := range results.Folders {
|
|
||||||
relativePath := api.CloudinaryEncoder.ToStandardPath(f, strings.TrimPrefix(folder.Path, remotePrefix))
|
|
||||||
parts := strings.Split(relativePath, "/")
|
|
||||||
|
|
||||||
// It's a directory
|
|
||||||
dirName := parts[len(parts)-1]
|
|
||||||
if _, found := dirs[dirName]; !found {
|
|
||||||
d := fs.NewDir(path.Join(dir, dirName), time.Time{})
|
|
||||||
entries = append(entries, d)
|
|
||||||
dirs[dirName] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Break if there are no more results
|
|
||||||
if results.NextCursor == "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
nextCursor = results.NextCursor
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
// Use the assets.AssetsByAssetFolder API to list assets
|
|
||||||
assetsParams := admin.AssetsByAssetFolderParams{
|
|
||||||
AssetFolder: remotePrefix,
|
|
||||||
MaxResults: 500,
|
|
||||||
}
|
|
||||||
if nextCursor != "" {
|
|
||||||
assetsParams.NextCursor = nextCursor
|
|
||||||
}
|
|
||||||
|
|
||||||
results, err := f.cld.Admin.AssetsByAssetFolder(ctx, assetsParams)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to list assets: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, asset := range results.Assets {
|
|
||||||
remote := api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName)
|
|
||||||
if dir != "" {
|
|
||||||
remote = path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName))
|
|
||||||
}
|
|
||||||
o := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: remote,
|
|
||||||
size: int64(asset.Bytes),
|
|
||||||
modTime: asset.CreatedAt,
|
|
||||||
url: asset.SecureURL,
|
|
||||||
publicID: asset.PublicID,
|
|
||||||
resourceType: asset.AssetType,
|
|
||||||
deliveryType: asset.Type,
|
|
||||||
}
|
|
||||||
entries = append(entries, o)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Break if there are no more results
|
|
||||||
if results.NextCursor == "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
nextCursor = results.NextCursor
|
|
||||||
}
|
|
||||||
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found it returns the error fs.ErrorObjectNotFound.
|
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|
||||||
searchParams := search.Query{
|
|
||||||
Expression: fmt.Sprintf("asset_folder:\"%s\" AND display_name:\"%s\"",
|
|
||||||
f.FromStandardFullPath(cldPathDir(remote)),
|
|
||||||
f.ToDisplayNameElastic(api.CloudinaryEncoder.FromStandardName(f, path.Base(remote)))),
|
|
||||||
SortBy: []search.SortByField{{"uploaded_at": "desc"}},
|
|
||||||
MaxResults: 2,
|
|
||||||
}
|
|
||||||
var results *admin.SearchResult
|
|
||||||
f.WaitEventuallyConsistent()
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
var err1 error
|
|
||||||
results, err1 = f.cld.Admin.Search(ctx, searchParams)
|
|
||||||
if err1 == nil && results.TotalCount != len(results.Assets) {
|
|
||||||
err1 = errors.New("partial response so waiting for eventual consistency")
|
|
||||||
}
|
|
||||||
return shouldRetry(ctx, nil, err1)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
if results.TotalCount == 0 || len(results.Assets) == 0 {
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
asset := results.Assets[0]
|
|
||||||
|
|
||||||
o := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: remote,
|
|
||||||
size: int64(asset.Bytes),
|
|
||||||
modTime: asset.UploadedAt,
|
|
||||||
url: asset.SecureURL,
|
|
||||||
md5sum: asset.Etag,
|
|
||||||
publicID: asset.PublicID,
|
|
||||||
resourceType: asset.ResourceType,
|
|
||||||
deliveryType: asset.Type,
|
|
||||||
}
|
|
||||||
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) getSuggestedPublicID(assetFolder string, displayName string, modTime time.Time) string {
|
|
||||||
payload := []byte(path.Join(assetFolder, displayName))
|
|
||||||
hash := blake3.Sum256(payload)
|
|
||||||
return hex.EncodeToString(hash[:])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put uploads content to Cloudinary
|
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
if src.Size() == 0 {
|
|
||||||
return nil, fs.ErrorCantUploadEmptyFiles
|
|
||||||
}
|
|
||||||
|
|
||||||
params := uploader.UploadParams{
|
|
||||||
UploadPreset: f.opt.UploadPreset,
|
|
||||||
}
|
|
||||||
|
|
||||||
updateObject := false
|
|
||||||
var modTime time.Time
|
|
||||||
for _, option := range options {
|
|
||||||
if updateOptions, ok := option.(*api.UpdateOptions); ok {
|
|
||||||
if updateOptions.PublicID != "" {
|
|
||||||
updateObject = true
|
|
||||||
params.Overwrite = SDKApi.Bool(true)
|
|
||||||
params.Invalidate = SDKApi.Bool(true)
|
|
||||||
params.PublicID = updateOptions.PublicID
|
|
||||||
params.ResourceType = updateOptions.ResourceType
|
|
||||||
params.Type = SDKApi.DeliveryType(updateOptions.DeliveryType)
|
|
||||||
params.AssetFolder = updateOptions.AssetFolder
|
|
||||||
params.DisplayName = updateOptions.DisplayName
|
|
||||||
modTime = src.ModTime(ctx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !updateObject {
|
|
||||||
params.AssetFolder = f.FromStandardFullPath(cldPathDir(src.Remote()))
|
|
||||||
params.DisplayName = api.CloudinaryEncoder.FromStandardName(f, path.Base(src.Remote()))
|
|
||||||
// We want to conform to the unique asset ID of rclone, which is (asset_folder,display_name,last_modified).
|
|
||||||
// We also want to enable customers to choose their own public_id, in case duplicate names are not a crucial use case.
|
|
||||||
// Upload_presets that apply randomness to the public ID would not work well with rclone duplicate assets support.
|
|
||||||
params.FilenameOverride = f.getSuggestedPublicID(params.AssetFolder, params.DisplayName, src.ModTime(ctx))
|
|
||||||
}
|
|
||||||
uploadResult, err := f.cld.Upload.Upload(ctx, in, params)
|
|
||||||
f.lastCRUD = time.Now()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to upload to Cloudinary: %w", err)
|
|
||||||
}
|
|
||||||
if !updateObject {
|
|
||||||
modTime = uploadResult.CreatedAt
|
|
||||||
}
|
|
||||||
if uploadResult.Error.Message != "" {
|
|
||||||
return nil, errors.New(uploadResult.Error.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
o := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: src.Remote(),
|
|
||||||
size: int64(uploadResult.Bytes),
|
|
||||||
modTime: modTime,
|
|
||||||
url: uploadResult.SecureURL,
|
|
||||||
md5sum: uploadResult.Etag,
|
|
||||||
publicID: uploadResult.PublicID,
|
|
||||||
resourceType: uploadResult.ResourceType,
|
|
||||||
deliveryType: uploadResult.Type,
|
|
||||||
}
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Precision of the remote
|
|
||||||
func (f *Fs) Precision() time.Duration {
|
|
||||||
return fs.ModTimeNotSupported
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hashes returns the supported hash sets
|
|
||||||
func (f *Fs) Hashes() hash.Set {
|
|
||||||
return hash.Set(hash.MD5)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mkdir creates empty folders
|
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|
||||||
params := admin.CreateFolderParams{Folder: f.ToAssetFolderAPI(f.FromStandardFullPath(dir))}
|
|
||||||
res, err := f.cld.Admin.CreateFolder(ctx, params)
|
|
||||||
f.lastCRUD = time.Now()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if res.Error.Message != "" {
|
|
||||||
return errors.New(res.Error.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir deletes empty folders
|
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|
||||||
// Additional test because Cloudinary will delete folders without
|
|
||||||
// assets, regardless of empty sub-folders
|
|
||||||
folder := f.ToAssetFolderAPI(f.FromStandardFullPath(dir))
|
|
||||||
folderParams := admin.SubFoldersParams{
|
|
||||||
Folder: folder,
|
|
||||||
MaxResults: 1,
|
|
||||||
}
|
|
||||||
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if results.TotalCount > 0 {
|
|
||||||
return fs.ErrorDirectoryNotEmpty
|
|
||||||
}
|
|
||||||
|
|
||||||
params := admin.DeleteFolderParams{Folder: folder}
|
|
||||||
res, err := f.cld.Admin.DeleteFolder(ctx, params)
|
|
||||||
f.lastCRUD = time.Now()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if res.Error.Message != "" {
|
|
||||||
if strings.HasPrefix(res.Error.Message, "Can't find folder with path") {
|
|
||||||
return fs.ErrorDirNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
return errors.New(res.Error.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// retryErrorCodes is a slice of error codes that we will retry
|
|
||||||
var retryErrorCodes = []int{
|
|
||||||
420, // Too Many Requests (legacy)
|
|
||||||
429, // Too Many Requests
|
|
||||||
500, // Internal Server Error
|
|
||||||
502, // Bad Gateway
|
|
||||||
503, // Service Unavailable
|
|
||||||
504, // Gateway Timeout
|
|
||||||
509, // Bandwidth Limit Exceeded
|
|
||||||
}
|
|
||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this resp and err
|
|
||||||
// deserve to be retried. It returns the err as a convenience
|
|
||||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
|
||||||
if fserrors.ContextError(ctx, &err) {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
tryAgain := "Try again on "
|
|
||||||
if idx := strings.Index(err.Error(), tryAgain); idx != -1 {
|
|
||||||
layout := "2006-01-02 15:04:05 UTC"
|
|
||||||
dateStr := err.Error()[idx+len(tryAgain) : idx+len(tryAgain)+len(layout)]
|
|
||||||
timestamp, err2 := time.Parse(layout, dateStr)
|
|
||||||
if err2 == nil {
|
|
||||||
return true, fserrors.NewErrorRetryAfter(time.Until(timestamp))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Debugf(nil, "Retrying API error %v", err)
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
|
||||||
|
|
||||||
// Hash returns the MD5 of an object
|
|
||||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
|
||||||
if ty != hash.MD5 {
|
|
||||||
return "", hash.ErrUnsupported
|
|
||||||
}
|
|
||||||
return o.md5sum, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return a string version
|
|
||||||
func (o *Object) String() string {
|
|
||||||
if o == nil {
|
|
||||||
return "<nil>"
|
|
||||||
}
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs returns the parent Fs
|
|
||||||
func (o *Object) Fs() fs.Info {
|
|
||||||
return o.fs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remote returns the remote path
|
|
||||||
func (o *Object) Remote() string {
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModTime returns the modification time of the object
|
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
|
||||||
return o.modTime
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size of object in bytes
|
|
||||||
func (o *Object) Size() int64 {
|
|
||||||
return o.size
|
|
||||||
}
|
|
||||||
|
|
||||||
// Storable returns if this object is storable
|
|
||||||
func (o *Object) Storable() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|
||||||
return fs.ErrorCantSetModTime
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open an object for read
|
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|
||||||
var resp *http.Response
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
RootURL: o.url,
|
|
||||||
Options: options,
|
|
||||||
}
|
|
||||||
var offset int64
|
|
||||||
var count int64
|
|
||||||
var key string
|
|
||||||
var value string
|
|
||||||
fs.FixRangeOption(options, o.size)
|
|
||||||
for _, option := range options {
|
|
||||||
switch x := option.(type) {
|
|
||||||
case *fs.RangeOption:
|
|
||||||
offset, count = x.Decode(o.size)
|
|
||||||
if count < 0 {
|
|
||||||
count = o.size - offset
|
|
||||||
}
|
|
||||||
key, value = option.Header()
|
|
||||||
case *fs.SeekOption:
|
|
||||||
offset = x.Offset
|
|
||||||
count = o.size - offset
|
|
||||||
key, value = option.Header()
|
|
||||||
default:
|
|
||||||
if option.Mandatory() {
|
|
||||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if key != "" && value != "" {
|
|
||||||
opts.ExtraHeaders = make(map[string]string)
|
|
||||||
opts.ExtraHeaders[key] = value
|
|
||||||
}
|
|
||||||
// Make sure that the asset is fully available
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
|
||||||
if err == nil {
|
|
||||||
cl, clErr := strconv.Atoi(resp.Header.Get("content-length"))
|
|
||||||
if clErr == nil && count == int64(cl) {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed download of \"%s\": %w", o.url, err)
|
|
||||||
}
|
|
||||||
return resp.Body, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the object with the contents of the io.Reader
|
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
|
||||||
options = append(options, &api.UpdateOptions{
|
|
||||||
PublicID: o.publicID,
|
|
||||||
ResourceType: o.resourceType,
|
|
||||||
DeliveryType: o.deliveryType,
|
|
||||||
DisplayName: api.CloudinaryEncoder.FromStandardName(o.fs, path.Base(o.Remote())),
|
|
||||||
AssetFolder: o.fs.FromStandardFullPath(cldPathDir(o.Remote())),
|
|
||||||
})
|
|
||||||
updatedObj, err := o.fs.Put(ctx, in, src, options...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if uo, ok := updatedObj.(*Object); ok {
|
|
||||||
o.size = uo.size
|
|
||||||
o.modTime = time.Now() // Skipping uo.modTime because the API returns the create time
|
|
||||||
o.url = uo.url
|
|
||||||
o.md5sum = uo.md5sum
|
|
||||||
o.publicID = uo.publicID
|
|
||||||
o.resourceType = uo.resourceType
|
|
||||||
o.deliveryType = uo.deliveryType
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove an object
|
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
|
||||||
params := uploader.DestroyParams{
|
|
||||||
PublicID: o.publicID,
|
|
||||||
ResourceType: o.resourceType,
|
|
||||||
Type: o.deliveryType,
|
|
||||||
}
|
|
||||||
res, dErr := o.fs.cld.Upload.Destroy(ctx, params)
|
|
||||||
o.fs.lastCRUD = time.Now()
|
|
||||||
if dErr != nil {
|
|
||||||
return dErr
|
|
||||||
}
|
|
||||||
|
|
||||||
if res.Error.Message != "" {
|
|
||||||
return errors.New(res.Error.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
if res.Result != "ok" {
|
|
||||||
return errors.New(res.Result)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
// Test Cloudinary filesystem interface
|
|
||||||
|
|
||||||
package cloudinary_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/cloudinary"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
name := "TestCloudinary"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":",
|
|
||||||
NilObject: (*cloudinary.Object)(nil),
|
|
||||||
SkipInvalidUTF8: true,
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "eventually_consistent_delay", Value: "7"},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -80,10 +80,9 @@ const (
|
|||||||
// Globals
|
// Globals
|
||||||
var (
|
var (
|
||||||
// Description of how to auth for this app
|
// Description of how to auth for this app
|
||||||
driveConfig = &oauthutil.Config{
|
driveConfig = &oauth2.Config{
|
||||||
Scopes: []string{scopePrefix + "drive"},
|
Scopes: []string{scopePrefix + "drive"},
|
||||||
AuthURL: google.Endpoint.AuthURL,
|
Endpoint: google.Endpoint,
|
||||||
TokenURL: google.Endpoint.TokenURL,
|
|
||||||
ClientID: rcloneClientID,
|
ClientID: rcloneClientID,
|
||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
RedirectURL: oauthutil.RedirectURL,
|
RedirectURL: oauthutil.RedirectURL,
|
||||||
@@ -3525,14 +3524,14 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
|
|||||||
return f.unTrash(ctx, dir, directoryID, true)
|
return f.unTrash(ctx, dir, directoryID, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy or move file with id to dest
|
// copy file with id to dest
|
||||||
func (f *Fs) copyOrMoveID(ctx context.Context, operation string, id, dest string) (err error) {
|
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||||
info, err := f.getFile(ctx, id, f.getFileFields(ctx))
|
info, err := f.getFile(ctx, id, f.getFileFields(ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't find id: %w", err)
|
return fmt.Errorf("couldn't find id: %w", err)
|
||||||
}
|
}
|
||||||
if info.MimeType == driveFolderType {
|
if info.MimeType == driveFolderType {
|
||||||
return fmt.Errorf("can't %s directory use: rclone %s --drive-root-folder-id %s %s %s", operation, operation, id, fs.ConfigString(f), dest)
|
return fmt.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
|
||||||
}
|
}
|
||||||
info.Name = f.opt.Enc.ToStandardName(info.Name)
|
info.Name = f.opt.Enc.ToStandardName(info.Name)
|
||||||
o, err := f.newObjectWithInfo(ctx, info.Name, info)
|
o, err := f.newObjectWithInfo(ctx, info.Name, info)
|
||||||
@@ -3553,15 +3552,9 @@ func (f *Fs) copyOrMoveID(ctx context.Context, operation string, id, dest string
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
_, err = operations.Copy(ctx, dstFs, nil, destLeaf, o)
|
||||||
var opErr error
|
if err != nil {
|
||||||
if operation == "moveid" {
|
return fmt.Errorf("copy failed: %w", err)
|
||||||
_, opErr = operations.Move(ctx, dstFs, nil, destLeaf, o)
|
|
||||||
} else {
|
|
||||||
_, opErr = operations.Copy(ctx, dstFs, nil, destLeaf, o)
|
|
||||||
}
|
|
||||||
if opErr != nil {
|
|
||||||
return fmt.Errorf("%s failed: %w", operation, opErr)
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -3798,28 +3791,6 @@ attempted if possible.
|
|||||||
|
|
||||||
Use the --interactive/-i or --dry-run flag to see what would be copied before copying.
|
Use the --interactive/-i or --dry-run flag to see what would be copied before copying.
|
||||||
`,
|
`,
|
||||||
}, {
|
|
||||||
Name: "moveid",
|
|
||||||
Short: "Move files by ID",
|
|
||||||
Long: `This command moves files by ID
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
|
|
||||||
rclone backend moveid drive: ID path
|
|
||||||
rclone backend moveid drive: ID1 path1 ID2 path2
|
|
||||||
|
|
||||||
It moves the drive file with ID given to the path (an rclone path which
|
|
||||||
will be passed internally to rclone moveto).
|
|
||||||
|
|
||||||
The path should end with a / to indicate move the file as named to
|
|
||||||
this directory. If it doesn't end with a / then the last path
|
|
||||||
component will be used as the file name.
|
|
||||||
|
|
||||||
If the destination is a drive backend then server-side moving will be
|
|
||||||
attempted if possible.
|
|
||||||
|
|
||||||
Use the --interactive/-i or --dry-run flag to see what would be moved beforehand.
|
|
||||||
`,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "exportformats",
|
Name: "exportformats",
|
||||||
Short: "Dump the export formats for debug purposes",
|
Short: "Dump the export formats for debug purposes",
|
||||||
@@ -3998,16 +3969,16 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
dir = arg[0]
|
dir = arg[0]
|
||||||
}
|
}
|
||||||
return f.unTrashDir(ctx, dir, true)
|
return f.unTrashDir(ctx, dir, true)
|
||||||
case "copyid", "moveid":
|
case "copyid":
|
||||||
if len(arg)%2 != 0 {
|
if len(arg)%2 != 0 {
|
||||||
return nil, errors.New("need an even number of arguments")
|
return nil, errors.New("need an even number of arguments")
|
||||||
}
|
}
|
||||||
for len(arg) > 0 {
|
for len(arg) > 0 {
|
||||||
id, dest := arg[0], arg[1]
|
id, dest := arg[0], arg[1]
|
||||||
arg = arg[2:]
|
arg = arg[2:]
|
||||||
err = f.copyOrMoveID(ctx, name, id, dest)
|
err = f.copyID(ctx, id, dest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed %s %q to %q: %w", name, id, dest, err)
|
return nil, fmt.Errorf("failed copying %q to %q: %w", id, dest, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, nil
|
return nil, nil
|
||||||
|
|||||||
@@ -479,8 +479,8 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
|
|||||||
require.NoError(t, f.Purge(ctx, "trashDir"))
|
require.NoError(t, f.Purge(ctx, "trashDir"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyOrMoveID
|
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
|
||||||
func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
|
func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
obj, err := f.NewObject(ctx, existingFile)
|
obj, err := f.NewObject(ctx, existingFile)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -498,7 +498,7 @@ func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
t.Run("BadID", func(t *testing.T) {
|
t.Run("BadID", func(t *testing.T) {
|
||||||
err = f.copyOrMoveID(ctx, "moveid", "ID-NOT-FOUND", dir+"/")
|
err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
assert.Contains(t, err.Error(), "couldn't find id")
|
assert.Contains(t, err.Error(), "couldn't find id")
|
||||||
})
|
})
|
||||||
@@ -506,31 +506,19 @@ func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
|
|||||||
t.Run("Directory", func(t *testing.T) {
|
t.Run("Directory", func(t *testing.T) {
|
||||||
rootID, err := f.dirCache.RootID(ctx, false)
|
rootID, err := f.dirCache.RootID(ctx, false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = f.copyOrMoveID(ctx, "moveid", rootID, dir+"/")
|
err = f.copyID(ctx, rootID, dir+"/")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
assert.Contains(t, err.Error(), "can't moveid directory")
|
assert.Contains(t, err.Error(), "can't copy directory")
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("MoveWithoutDestName", func(t *testing.T) {
|
t.Run("WithoutDestName", func(t *testing.T) {
|
||||||
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/")
|
err = f.copyID(ctx, o.id, dir+"/")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
checkFile(path.Base(existingFile))
|
checkFile(path.Base(existingFile))
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("CopyWithoutDestName", func(t *testing.T) {
|
t.Run("WithDestName", func(t *testing.T) {
|
||||||
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/")
|
err = f.copyID(ctx, o.id, dir+"/potato.txt")
|
||||||
require.NoError(t, err)
|
|
||||||
checkFile(path.Base(existingFile))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("MoveWithDestName", func(t *testing.T) {
|
|
||||||
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/potato.txt")
|
|
||||||
require.NoError(t, err)
|
|
||||||
checkFile("potato.txt")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("CopyWithDestName", func(t *testing.T) {
|
|
||||||
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/potato.txt")
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
checkFile("potato.txt")
|
checkFile("potato.txt")
|
||||||
})
|
})
|
||||||
@@ -659,7 +647,7 @@ func (f *Fs) InternalTest(t *testing.T) {
|
|||||||
})
|
})
|
||||||
t.Run("Shortcuts", f.InternalTestShortcuts)
|
t.Run("Shortcuts", f.InternalTestShortcuts)
|
||||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||||
t.Run("CopyOrMoveID", f.InternalTestCopyOrMoveID)
|
t.Run("CopyID", f.InternalTestCopyID)
|
||||||
t.Run("Query", f.InternalTestQuery)
|
t.Run("Query", f.InternalTestQuery)
|
||||||
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
||||||
t.Run("ShouldRetry", f.InternalTestShouldRetry)
|
t.Run("ShouldRetry", f.InternalTestShouldRetry)
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||||
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// finishBatch commits the batch, returning a batch status to poll or maybe complete
|
// finishBatch commits the batch, returning a batch status to poll or maybe complete
|
||||||
@@ -20,10 +21,14 @@ func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinish
|
|||||||
}
|
}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
complete, err = f.srv.UploadSessionFinishBatchV2(arg)
|
complete, err = f.srv.UploadSessionFinishBatchV2(arg)
|
||||||
if retry, err := shouldRetryExclude(ctx, err); !retry {
|
// If error is insufficient space then don't retry
|
||||||
return retry, err
|
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||||
|
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||||
|
err = fserrors.NoRetryError(err)
|
||||||
|
return false, err
|
||||||
}
|
}
|
||||||
// after the first chunk is uploaded, we retry everything except the excluded errors
|
}
|
||||||
|
// after the first chunk is uploaded, we retry everything
|
||||||
return err != nil, err
|
return err != nil, err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -47,7 +47,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
|
||||||
"github.com/rclone/rclone/lib/batcher"
|
"github.com/rclone/rclone/lib/batcher"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
@@ -94,7 +93,7 @@ const (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
// Description of how to auth for this app
|
// Description of how to auth for this app
|
||||||
dropboxConfig = &oauthutil.Config{
|
dropboxConfig = &oauth2.Config{
|
||||||
Scopes: []string{
|
Scopes: []string{
|
||||||
"files.metadata.write",
|
"files.metadata.write",
|
||||||
"files.content.write",
|
"files.content.write",
|
||||||
@@ -109,8 +108,7 @@ var (
|
|||||||
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
||||||
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
|
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
|
||||||
// },
|
// },
|
||||||
AuthURL: dropbox.OAuthEndpoint("").AuthURL,
|
Endpoint: dropbox.OAuthEndpoint(""),
|
||||||
TokenURL: dropbox.OAuthEndpoint("").TokenURL,
|
|
||||||
ClientID: rcloneClientID,
|
ClientID: rcloneClientID,
|
||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||||
@@ -135,7 +133,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Gets an oauth config with the right scopes
|
// Gets an oauth config with the right scopes
|
||||||
func getOauthConfig(m configmap.Mapper) *oauthutil.Config {
|
func getOauthConfig(m configmap.Mapper) *oauth2.Config {
|
||||||
// If not impersonating, use standard scopes
|
// If not impersonating, use standard scopes
|
||||||
if impersonate, _ := m.Get("impersonate"); impersonate == "" {
|
if impersonate, _ := m.Get("impersonate"); impersonate == "" {
|
||||||
return dropboxConfig
|
return dropboxConfig
|
||||||
@@ -318,46 +316,32 @@ func (f *Fs) Features() *fs.Features {
|
|||||||
return f.features
|
return f.features
|
||||||
}
|
}
|
||||||
|
|
||||||
// Some specific errors which should be excluded from retries
|
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||||
func shouldRetryExclude(ctx context.Context, err error) (bool, error) {
|
// retried. It returns the err as a convenience
|
||||||
if err == nil {
|
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if fserrors.ContextError(ctx, &err) {
|
if fserrors.ContextError(ctx, &err) {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
// First check for specific errors
|
if err == nil {
|
||||||
//
|
return false, err
|
||||||
// These come back from the SDK in a whole host of different
|
}
|
||||||
// error types, but there doesn't seem to be a consistent way
|
|
||||||
// of reading the error cause, so here we just check using the
|
|
||||||
// error string which isn't perfect but does the job.
|
|
||||||
errString := err.Error()
|
errString := err.Error()
|
||||||
|
// First check for specific errors
|
||||||
if strings.Contains(errString, "insufficient_space") {
|
if strings.Contains(errString, "insufficient_space") {
|
||||||
return false, fserrors.FatalError(err)
|
return false, fserrors.FatalError(err)
|
||||||
} else if strings.Contains(errString, "malformed_path") {
|
} else if strings.Contains(errString, "malformed_path") {
|
||||||
return false, fserrors.NoRetryError(err)
|
return false, fserrors.NoRetryError(err)
|
||||||
}
|
}
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
|
||||||
// retried. It returns the err as a convenience
|
|
||||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
|
||||||
if retry, err := shouldRetryExclude(ctx, err); !retry {
|
|
||||||
return retry, err
|
|
||||||
}
|
|
||||||
// Then handle any official Retry-After header from Dropbox's SDK
|
// Then handle any official Retry-After header from Dropbox's SDK
|
||||||
switch e := err.(type) {
|
switch e := err.(type) {
|
||||||
case auth.RateLimitAPIError:
|
case auth.RateLimitAPIError:
|
||||||
if e.RateLimitError.RetryAfter > 0 {
|
if e.RateLimitError.RetryAfter > 0 {
|
||||||
fs.Logf(nil, "Error %v. Too many requests or write operations. Trying again in %d seconds.", err, e.RateLimitError.RetryAfter)
|
fs.Logf(errString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||||
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
|
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
|
||||||
}
|
}
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
// Keep old behavior for backward compatibility
|
// Keep old behavior for backward compatibility
|
||||||
errString := err.Error()
|
|
||||||
if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
|
if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
@@ -1036,20 +1020,13 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find and remove existing object
|
|
||||||
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer cleanup(&err)
|
|
||||||
|
|
||||||
// Temporary Object under construction
|
// Temporary Object under construction
|
||||||
dstObj := &Object{
|
dstObj := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
@@ -1063,6 +1040,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
|
|||||||
ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()),
|
ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
var err error
|
||||||
var result *files.RelocationResult
|
var result *files.RelocationResult
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
result, err = f.srv.CopyV2(&arg)
|
result, err = f.srv.CopyV2(&arg)
|
||||||
@@ -1714,10 +1692,14 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
|
|||||||
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
entry, err = o.fs.srv.UploadSessionFinish(args, nil)
|
entry, err = o.fs.srv.UploadSessionFinish(args, nil)
|
||||||
if retry, err := shouldRetryExclude(ctx, err); !retry {
|
// If error is insufficient space then don't retry
|
||||||
return retry, err
|
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||||
|
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||||
|
err = fserrors.NoRetryError(err)
|
||||||
|
return false, err
|
||||||
}
|
}
|
||||||
// after the first chunk is uploaded, we retry everything except the excluded errors
|
}
|
||||||
|
// after the first chunk is uploaded, we retry everything
|
||||||
return err != nil, err
|
return err != nil, err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -1214,7 +1214,7 @@ func (f *Fs) copyTo(ctx context.Context, srcID, srcLeaf, dstLeaf, dstDirectoryID
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
@@ -1228,19 +1228,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
|
|||||||
return nil, fmt.Errorf("can't copy %q -> %q as are same name", srcPath, dstPath)
|
return nil, fmt.Errorf("can't copy %q -> %q as are same name", srcPath, dstPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find existing object
|
|
||||||
existingObj, err := f.NewObject(ctx, remote)
|
|
||||||
if err == nil {
|
|
||||||
defer func() {
|
|
||||||
// Don't remove existing object if returning an error
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
fs.Debugf(existingObj, "Server side copy: removing existing object after successful copy")
|
|
||||||
err = existingObj.Remove(ctx)
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create temporary object
|
// Create temporary object
|
||||||
dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -60,17 +60,14 @@ const (
|
|||||||
minSleep = 10 * time.Millisecond
|
minSleep = 10 * time.Millisecond
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
// Description of how to auth for this app
|
||||||
// Description of how to auth for this app
|
var storageConfig = &oauth2.Config{
|
||||||
storageConfig = &oauthutil.Config{
|
|
||||||
Scopes: []string{storage.DevstorageReadWriteScope},
|
Scopes: []string{storage.DevstorageReadWriteScope},
|
||||||
AuthURL: google.Endpoint.AuthURL,
|
Endpoint: google.Endpoint,
|
||||||
TokenURL: google.Endpoint.TokenURL,
|
|
||||||
ClientID: rcloneClientID,
|
ClientID: rcloneClientID,
|
||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
RedirectURL: oauthutil.RedirectURL,
|
RedirectURL: oauthutil.RedirectURL,
|
||||||
}
|
}
|
||||||
)
|
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
|
|||||||
@@ -33,6 +33,7 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/oauthutil"
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
|
"golang.org/x/oauth2"
|
||||||
"golang.org/x/oauth2/google"
|
"golang.org/x/oauth2/google"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -59,14 +60,13 @@ const (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
// Description of how to auth for this app
|
// Description of how to auth for this app
|
||||||
oauthConfig = &oauthutil.Config{
|
oauthConfig = &oauth2.Config{
|
||||||
Scopes: []string{
|
Scopes: []string{
|
||||||
"openid",
|
"openid",
|
||||||
"profile",
|
"profile",
|
||||||
scopeReadWrite, // this must be at position scopeAccess
|
scopeReadWrite, // this must be at position scopeAccess
|
||||||
},
|
},
|
||||||
AuthURL: google.Endpoint.AuthURL,
|
Endpoint: google.Endpoint,
|
||||||
TokenURL: google.Endpoint.TokenURL,
|
|
||||||
ClientID: rcloneClientID,
|
ClientID: rcloneClientID,
|
||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
RedirectURL: oauthutil.RedirectURL,
|
RedirectURL: oauthutil.RedirectURL,
|
||||||
@@ -1168,7 +1168,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
errors := make([]error, 1)
|
errors := make([]error, 1)
|
||||||
results := make([]*api.MediaItem, 1)
|
results := make([]*api.MediaItem, 1)
|
||||||
err = o.fs.commitBatch(ctx, []uploadedItem{uploaded}, results, errors)
|
err = o.fs.commitBatch(ctx, []uploadedItem{uploaded}, results, errors)
|
||||||
if err == nil {
|
if err != nil {
|
||||||
err = errors[0]
|
err = errors[0]
|
||||||
info = results[0]
|
info = results[0]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -31,6 +31,7 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/oauthutil"
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -47,9 +48,11 @@ const (
|
|||||||
// Globals
|
// Globals
|
||||||
var (
|
var (
|
||||||
// Description of how to auth for this app.
|
// Description of how to auth for this app.
|
||||||
oauthConfig = &oauthutil.Config{
|
oauthConfig = &oauth2.Config{
|
||||||
|
Endpoint: oauth2.Endpoint{
|
||||||
AuthURL: "https://my.hidrive.com/client/authorize",
|
AuthURL: "https://my.hidrive.com/client/authorize",
|
||||||
TokenURL: "https://my.hidrive.com/oauth2/token",
|
TokenURL: "https://my.hidrive.com/oauth2/token",
|
||||||
|
},
|
||||||
ClientID: rcloneClientID,
|
ClientID: rcloneClientID,
|
||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||||
|
|||||||
@@ -331,13 +331,12 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
|
|
||||||
// Join's the remote onto the base URL
|
// Join's the remote onto the base URL
|
||||||
func (f *Fs) url(remote string) string {
|
func (f *Fs) url(remote string) string {
|
||||||
trimmedRemote := strings.TrimLeft(remote, "/") // remove leading "/" since we always have it in f.endpointURL
|
|
||||||
if f.opt.NoEscape {
|
if f.opt.NoEscape {
|
||||||
// Directly concatenate without escaping, no_escape behavior
|
// Directly concatenate without escaping, no_escape behavior
|
||||||
return f.endpointURL + trimmedRemote
|
return f.endpointURL + remote
|
||||||
}
|
}
|
||||||
// Default behavior
|
// Default behavior
|
||||||
return f.endpointURL + rest.URLPathEscape(trimmedRemote)
|
return f.endpointURL + rest.URLPathEscape(remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errors returned by parseName
|
// Errors returned by parseName
|
||||||
|
|||||||
@@ -191,33 +191,6 @@ func TestNewObject(t *testing.T) {
|
|||||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewObjectWithLeadingSlash(t *testing.T) {
|
|
||||||
f := prepare(t)
|
|
||||||
|
|
||||||
o, err := f.NewObject(context.Background(), "/four/under four.txt")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, "/four/under four.txt", o.Remote())
|
|
||||||
assert.Equal(t, int64(8+lineEndSize), o.Size())
|
|
||||||
_, ok := o.(*Object)
|
|
||||||
assert.True(t, ok)
|
|
||||||
|
|
||||||
// Test the time is correct on the object
|
|
||||||
|
|
||||||
tObj := o.ModTime(context.Background())
|
|
||||||
|
|
||||||
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
tFile := fi.ModTime()
|
|
||||||
|
|
||||||
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
|
|
||||||
|
|
||||||
// check object not found
|
|
||||||
o, err = f.NewObject(context.Background(), "/not found.txt")
|
|
||||||
assert.Nil(t, o)
|
|
||||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOpen(t *testing.T) {
|
func TestOpen(t *testing.T) {
|
||||||
m := prepareServer(t)
|
m := prepareServer(t)
|
||||||
|
|
||||||
|
|||||||
@@ -277,9 +277,11 @@ machines.`)
|
|||||||
m.Set(configClientID, teliaseCloudClientID)
|
m.Set(configClientID, teliaseCloudClientID)
|
||||||
m.Set(configTokenURL, teliaseCloudTokenURL)
|
m.Set(configTokenURL, teliaseCloudTokenURL)
|
||||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||||
OAuth2Config: &oauthutil.Config{
|
OAuth2Config: &oauth2.Config{
|
||||||
|
Endpoint: oauth2.Endpoint{
|
||||||
AuthURL: teliaseCloudAuthURL,
|
AuthURL: teliaseCloudAuthURL,
|
||||||
TokenURL: teliaseCloudTokenURL,
|
TokenURL: teliaseCloudTokenURL,
|
||||||
|
},
|
||||||
ClientID: teliaseCloudClientID,
|
ClientID: teliaseCloudClientID,
|
||||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||||
@@ -290,9 +292,11 @@ machines.`)
|
|||||||
m.Set(configClientID, telianoCloudClientID)
|
m.Set(configClientID, telianoCloudClientID)
|
||||||
m.Set(configTokenURL, telianoCloudTokenURL)
|
m.Set(configTokenURL, telianoCloudTokenURL)
|
||||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||||
OAuth2Config: &oauthutil.Config{
|
OAuth2Config: &oauth2.Config{
|
||||||
|
Endpoint: oauth2.Endpoint{
|
||||||
AuthURL: telianoCloudAuthURL,
|
AuthURL: telianoCloudAuthURL,
|
||||||
TokenURL: telianoCloudTokenURL,
|
TokenURL: telianoCloudTokenURL,
|
||||||
|
},
|
||||||
ClientID: telianoCloudClientID,
|
ClientID: telianoCloudClientID,
|
||||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||||
@@ -303,9 +307,11 @@ machines.`)
|
|||||||
m.Set(configClientID, tele2CloudClientID)
|
m.Set(configClientID, tele2CloudClientID)
|
||||||
m.Set(configTokenURL, tele2CloudTokenURL)
|
m.Set(configTokenURL, tele2CloudTokenURL)
|
||||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||||
OAuth2Config: &oauthutil.Config{
|
OAuth2Config: &oauth2.Config{
|
||||||
|
Endpoint: oauth2.Endpoint{
|
||||||
AuthURL: tele2CloudAuthURL,
|
AuthURL: tele2CloudAuthURL,
|
||||||
TokenURL: tele2CloudTokenURL,
|
TokenURL: tele2CloudTokenURL,
|
||||||
|
},
|
||||||
ClientID: tele2CloudClientID,
|
ClientID: tele2CloudClientID,
|
||||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||||
@@ -316,9 +322,11 @@ machines.`)
|
|||||||
m.Set(configClientID, onlimeCloudClientID)
|
m.Set(configClientID, onlimeCloudClientID)
|
||||||
m.Set(configTokenURL, onlimeCloudTokenURL)
|
m.Set(configTokenURL, onlimeCloudTokenURL)
|
||||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||||
OAuth2Config: &oauthutil.Config{
|
OAuth2Config: &oauth2.Config{
|
||||||
|
Endpoint: oauth2.Endpoint{
|
||||||
AuthURL: onlimeCloudAuthURL,
|
AuthURL: onlimeCloudAuthURL,
|
||||||
TokenURL: onlimeCloudTokenURL,
|
TokenURL: onlimeCloudTokenURL,
|
||||||
|
},
|
||||||
ClientID: onlimeCloudClientID,
|
ClientID: onlimeCloudClientID,
|
||||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||||
@@ -916,17 +924,19 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
|
|||||||
}
|
}
|
||||||
|
|
||||||
baseClient := fshttp.NewClient(ctx)
|
baseClient := fshttp.NewClient(ctx)
|
||||||
oauthConfig := &oauthutil.Config{
|
oauthConfig := &oauth2.Config{
|
||||||
|
Endpoint: oauth2.Endpoint{
|
||||||
AuthURL: defaultTokenURL,
|
AuthURL: defaultTokenURL,
|
||||||
TokenURL: defaultTokenURL,
|
TokenURL: defaultTokenURL,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
if ver == configVersion {
|
if ver == configVersion {
|
||||||
oauthConfig.ClientID = defaultClientID
|
oauthConfig.ClientID = defaultClientID
|
||||||
// if custom endpoints are set use them else stick with defaults
|
// if custom endpoints are set use them else stick with defaults
|
||||||
if tokenURL, ok := m.Get(configTokenURL); ok {
|
if tokenURL, ok := m.Get(configTokenURL); ok {
|
||||||
oauthConfig.TokenURL = tokenURL
|
oauthConfig.Endpoint.TokenURL = tokenURL
|
||||||
// jottacloud is weird. we need to use the tokenURL as authURL
|
// jottacloud is weird. we need to use the tokenURL as authURL
|
||||||
oauthConfig.AuthURL = tokenURL
|
oauthConfig.Endpoint.AuthURL = tokenURL
|
||||||
}
|
}
|
||||||
} else if ver == legacyConfigVersion {
|
} else if ver == legacyConfigVersion {
|
||||||
clientID, ok := m.Get(configClientID)
|
clientID, ok := m.Get(configClientID)
|
||||||
@@ -940,8 +950,8 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
|
|||||||
oauthConfig.ClientID = clientID
|
oauthConfig.ClientID = clientID
|
||||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||||
|
|
||||||
oauthConfig.TokenURL = legacyTokenURL
|
oauthConfig.Endpoint.TokenURL = legacyTokenURL
|
||||||
oauthConfig.AuthURL = legacyTokenURL
|
oauthConfig.Endpoint.AuthURL = legacyTokenURL
|
||||||
|
|
||||||
// add the request filter to fix token refresh
|
// add the request filter to fix token refresh
|
||||||
if do, ok := baseClient.Transport.(interface {
|
if do, ok := baseClient.Transport.(interface {
|
||||||
|
|||||||
@@ -5,18 +5,18 @@ package local
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"golang.org/x/sys/windows"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var getFreeDiskSpace = windows.NewLazySystemDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
|
var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
|
||||||
|
|
||||||
// About gets quota information
|
// About gets quota information
|
||||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
var available, total, free int64
|
var available, total, free int64
|
||||||
root, e := windows.UTF16PtrFromString(f.root)
|
root, e := syscall.UTF16PtrFromString(f.root)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return nil, fmt.Errorf("failed to read disk usage: %w", e)
|
return nil, fmt.Errorf("failed to read disk usage: %w", e)
|
||||||
}
|
}
|
||||||
@@ -26,7 +26,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
|
uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
|
||||||
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
|
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
|
||||||
)
|
)
|
||||||
if e1 != windows.Errno(0) {
|
if e1 != syscall.Errno(0) {
|
||||||
return nil, fmt.Errorf("failed to read disk usage: %w", e1)
|
return nil, fmt.Errorf("failed to read disk usage: %w", e1)
|
||||||
}
|
}
|
||||||
usage := &fs.Usage{
|
usage := &fs.Usage{
|
||||||
|
|||||||
@@ -1,16 +0,0 @@
|
|||||||
//go:build windows || plan9 || js || linux
|
|
||||||
|
|
||||||
package local
|
|
||||||
|
|
||||||
import "os"
|
|
||||||
|
|
||||||
const haveLChmod = false
|
|
||||||
|
|
||||||
// lChmod changes the mode of the named file to mode. If the file is a symbolic
|
|
||||||
// link, it changes the link, not the target. If there is an error,
|
|
||||||
// it will be of type *PathError.
|
|
||||||
func lChmod(name string, mode os.FileMode) error {
|
|
||||||
// Can't do this safely on this OS - chmoding a symlink always
|
|
||||||
// changes the destination.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,41 +0,0 @@
|
|||||||
//go:build !windows && !plan9 && !js && !linux
|
|
||||||
|
|
||||||
package local
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
const haveLChmod = true
|
|
||||||
|
|
||||||
// syscallMode returns the syscall-specific mode bits from Go's portable mode bits.
|
|
||||||
//
|
|
||||||
// Borrowed from the syscall source since it isn't public.
|
|
||||||
func syscallMode(i os.FileMode) (o uint32) {
|
|
||||||
o |= uint32(i.Perm())
|
|
||||||
if i&os.ModeSetuid != 0 {
|
|
||||||
o |= syscall.S_ISUID
|
|
||||||
}
|
|
||||||
if i&os.ModeSetgid != 0 {
|
|
||||||
o |= syscall.S_ISGID
|
|
||||||
}
|
|
||||||
if i&os.ModeSticky != 0 {
|
|
||||||
o |= syscall.S_ISVTX
|
|
||||||
}
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
// lChmod changes the mode of the named file to mode. If the file is a symbolic
|
|
||||||
// link, it changes the link, not the target. If there is an error,
|
|
||||||
// it will be of type *PathError.
|
|
||||||
func lChmod(name string, mode os.FileMode) error {
|
|
||||||
// NB linux does not support AT_SYMLINK_NOFOLLOW as a parameter to fchmodat
|
|
||||||
// and returns ENOTSUP if you try, so we don't support this on linux
|
|
||||||
if e := unix.Fchmodat(unix.AT_FDCWD, name, syscallMode(mode), unix.AT_SYMLINK_NOFOLLOW); e != nil {
|
|
||||||
return &os.PathError{Op: "lChmod", Path: name, Err: e}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build plan9 || js
|
//go:build windows || plan9 || js
|
||||||
|
|
||||||
package local
|
package local
|
||||||
|
|
||||||
|
|||||||
@@ -1,19 +0,0 @@
|
|||||||
//go:build windows
|
|
||||||
|
|
||||||
package local
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const haveLChtimes = true
|
|
||||||
|
|
||||||
// lChtimes changes the access and modification times of the named
|
|
||||||
// link, similar to the Unix utime() or utimes() functions.
|
|
||||||
//
|
|
||||||
// The underlying filesystem may truncate or round the values to a
|
|
||||||
// less precise time unit.
|
|
||||||
// If there is an error, it will be of type *PathError.
|
|
||||||
func lChtimes(name string, atime time.Time, mtime time.Time) error {
|
|
||||||
return setTimes(name, atime, mtime, time.Time{}, true)
|
|
||||||
}
|
|
||||||
@@ -34,6 +34,7 @@ import (
|
|||||||
// Constants
|
// Constants
|
||||||
const (
|
const (
|
||||||
devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
||||||
|
linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
|
||||||
useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
|
useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -100,8 +101,10 @@ Metadata is supported on files and directories.
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "links",
|
Name: "links",
|
||||||
Help: "Translate symlinks to/from regular files with a '" + fs.LinkSuffix + "' extension for the local backend.",
|
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
|
||||||
Default: false,
|
Default: false,
|
||||||
|
NoPrefix: true,
|
||||||
|
ShortOpt: "l",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -376,22 +379,17 @@ type Directory struct {
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
||||||
errLinksNeedsSuffix = errors.New("need \"" + fs.LinkSuffix + "\" suffix to refer to symlink when using -l/--links")
|
errLinksNeedsSuffix = errors.New("need \"" + linkSuffix + "\" suffix to refer to symlink when using -l/--links")
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path
|
// NewFs constructs an Fs from the path
|
||||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
ci := fs.GetConfig(ctx)
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Override --local-links with --links if set
|
|
||||||
if ci.Links {
|
|
||||||
opt.TranslateSymlinks = true
|
|
||||||
}
|
|
||||||
if opt.TranslateSymlinks && opt.FollowSymlinks {
|
if opt.TranslateSymlinks && opt.FollowSymlinks {
|
||||||
return nil, errLinksAndCopyLinks
|
return nil, errLinksAndCopyLinks
|
||||||
}
|
}
|
||||||
@@ -437,9 +435,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
||||||
}
|
}
|
||||||
// Check to see if this is a .rclonelink if not found
|
// Check to see if this is a .rclonelink if not found
|
||||||
hasLinkSuffix := strings.HasSuffix(f.root, fs.LinkSuffix)
|
hasLinkSuffix := strings.HasSuffix(f.root, linkSuffix)
|
||||||
if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) {
|
if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) {
|
||||||
fi, err = f.lstat(strings.TrimSuffix(f.root, fs.LinkSuffix))
|
fi, err = f.lstat(strings.TrimSuffix(f.root, linkSuffix))
|
||||||
}
|
}
|
||||||
if err == nil && f.isRegular(fi.Mode()) {
|
if err == nil && f.isRegular(fi.Mode()) {
|
||||||
// Handle the odd case, that a symlink was specified by name without the link suffix
|
// Handle the odd case, that a symlink was specified by name without the link suffix
|
||||||
@@ -510,8 +508,8 @@ func (f *Fs) caseInsensitive() bool {
|
|||||||
//
|
//
|
||||||
// for regular files, localPath is returned unchanged
|
// for regular files, localPath is returned unchanged
|
||||||
func translateLink(remote, localPath string) (newLocalPath string, isTranslatedLink bool) {
|
func translateLink(remote, localPath string) (newLocalPath string, isTranslatedLink bool) {
|
||||||
isTranslatedLink = strings.HasSuffix(remote, fs.LinkSuffix)
|
isTranslatedLink = strings.HasSuffix(remote, linkSuffix)
|
||||||
newLocalPath = strings.TrimSuffix(localPath, fs.LinkSuffix)
|
newLocalPath = strings.TrimSuffix(localPath, linkSuffix)
|
||||||
return newLocalPath, isTranslatedLink
|
return newLocalPath, isTranslatedLink
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -694,7 +692,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
} else {
|
} else {
|
||||||
// Check whether this link should be translated
|
// Check whether this link should be translated
|
||||||
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||||
newRemote += fs.LinkSuffix
|
newRemote += linkSuffix
|
||||||
}
|
}
|
||||||
// Don't include non directory if not included
|
// Don't include non directory if not included
|
||||||
// we leave directory filtering to the layer above
|
// we leave directory filtering to the layer above
|
||||||
|
|||||||
@@ -110,7 +110,7 @@ func TestSymlink(t *testing.T) {
|
|||||||
require.NoError(t, lChtimes(symlinkPath, modTime2, modTime2))
|
require.NoError(t, lChtimes(symlinkPath, modTime2, modTime2))
|
||||||
|
|
||||||
// Object viewed as symlink
|
// Object viewed as symlink
|
||||||
file2 := fstest.NewItem("symlink.txt"+fs.LinkSuffix, "file.txt", modTime2)
|
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
|
||||||
|
|
||||||
// Object viewed as destination
|
// Object viewed as destination
|
||||||
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
||||||
@@ -139,7 +139,7 @@ func TestSymlink(t *testing.T) {
|
|||||||
|
|
||||||
// Create a symlink
|
// Create a symlink
|
||||||
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
|
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
|
||||||
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+fs.LinkSuffix, "file.txt", modTime3, false)
|
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
||||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
||||||
if haveLChtimes {
|
if haveLChtimes {
|
||||||
r.CheckLocalItems(t, file1, file2, file3)
|
r.CheckLocalItems(t, file1, file2, file3)
|
||||||
@@ -155,9 +155,9 @@ func TestSymlink(t *testing.T) {
|
|||||||
assert.Equal(t, "file.txt", linkText)
|
assert.Equal(t, "file.txt", linkText)
|
||||||
|
|
||||||
// Check that NewObject gets the correct object
|
// Check that NewObject gets the correct object
|
||||||
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+fs.LinkSuffix)
|
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "symlink2.txt"+fs.LinkSuffix, o.Remote())
|
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
|
||||||
assert.Equal(t, int64(8), o.Size())
|
assert.Equal(t, int64(8), o.Size())
|
||||||
|
|
||||||
// Check that NewObject doesn't see the non suffixed version
|
// Check that NewObject doesn't see the non suffixed version
|
||||||
@@ -165,7 +165,7 @@ func TestSymlink(t *testing.T) {
|
|||||||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
require.Equal(t, fs.ErrorObjectNotFound, err)
|
||||||
|
|
||||||
// Check that NewFs works with the suffixed version and --links
|
// Check that NewFs works with the suffixed version and --links
|
||||||
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+fs.LinkSuffix), configmap.Simple{
|
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+linkSuffix), configmap.Simple{
|
||||||
"links": "true",
|
"links": "true",
|
||||||
})
|
})
|
||||||
require.Equal(t, fs.ErrorIsFile, err)
|
require.Equal(t, fs.ErrorIsFile, err)
|
||||||
@@ -268,66 +268,22 @@ func TestMetadata(t *testing.T) {
|
|||||||
r := fstest.NewRun(t)
|
r := fstest.NewRun(t)
|
||||||
const filePath = "metafile.txt"
|
const filePath = "metafile.txt"
|
||||||
when := time.Now()
|
when := time.Now()
|
||||||
|
const dayLength = len("2001-01-01")
|
||||||
|
whenRFC := when.Format(time.RFC3339Nano)
|
||||||
r.WriteFile(filePath, "metadata file contents", when)
|
r.WriteFile(filePath, "metadata file contents", when)
|
||||||
f := r.Flocal.(*Fs)
|
f := r.Flocal.(*Fs)
|
||||||
|
|
||||||
// Set fs into "-l" / "--links" mode
|
|
||||||
f.opt.TranslateSymlinks = true
|
|
||||||
|
|
||||||
// Write a symlink to the file
|
|
||||||
symlinkPath := "metafile-link.txt"
|
|
||||||
osSymlinkPath := filepath.Join(f.root, symlinkPath)
|
|
||||||
symlinkPath += fs.LinkSuffix
|
|
||||||
require.NoError(t, os.Symlink(filePath, osSymlinkPath))
|
|
||||||
symlinkModTime := fstest.Time("2002-02-03T04:05:10.123123123Z")
|
|
||||||
require.NoError(t, lChtimes(osSymlinkPath, symlinkModTime, symlinkModTime))
|
|
||||||
|
|
||||||
// Get the object
|
// Get the object
|
||||||
obj, err := f.NewObject(ctx, filePath)
|
obj, err := f.NewObject(ctx, filePath)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
o := obj.(*Object)
|
o := obj.(*Object)
|
||||||
|
|
||||||
// Get the symlink object
|
|
||||||
symlinkObj, err := f.NewObject(ctx, symlinkPath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
symlinkO := symlinkObj.(*Object)
|
|
||||||
|
|
||||||
// Record metadata for o
|
|
||||||
oMeta, err := o.Metadata(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Test symlink first to check it doesn't mess up file
|
|
||||||
t.Run("Symlink", func(t *testing.T) {
|
|
||||||
testMetadata(t, r, symlinkO, symlinkModTime)
|
|
||||||
})
|
|
||||||
|
|
||||||
// Read it again
|
|
||||||
oMetaNew, err := o.Metadata(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Check that operating on the symlink didn't change the file it was pointing to
|
|
||||||
// See: https://github.com/rclone/rclone/security/advisories/GHSA-hrxh-9w67-g4cv
|
|
||||||
assert.Equal(t, oMeta, oMetaNew, "metadata setting on symlink messed up file")
|
|
||||||
|
|
||||||
// Now run the same tests on the file
|
|
||||||
t.Run("File", func(t *testing.T) {
|
|
||||||
testMetadata(t, r, o, when)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
|
||||||
ctx := context.Background()
|
|
||||||
whenRFC := when.Format(time.RFC3339Nano)
|
|
||||||
const dayLength = len("2001-01-01")
|
|
||||||
|
|
||||||
f := r.Flocal.(*Fs)
|
|
||||||
features := f.Features()
|
features := f.Features()
|
||||||
|
|
||||||
var hasXID, hasAtime, hasBtime, canSetXattrOnLinks bool
|
var hasXID, hasAtime, hasBtime bool
|
||||||
switch runtime.GOOS {
|
switch runtime.GOOS {
|
||||||
case "darwin", "freebsd", "netbsd", "linux":
|
case "darwin", "freebsd", "netbsd", "linux":
|
||||||
hasXID, hasAtime, hasBtime = true, true, true
|
hasXID, hasAtime, hasBtime = true, true, true
|
||||||
canSetXattrOnLinks = runtime.GOOS != "linux"
|
|
||||||
case "openbsd", "solaris":
|
case "openbsd", "solaris":
|
||||||
hasXID, hasAtime = true, true
|
hasXID, hasAtime = true, true
|
||||||
case "windows":
|
case "windows":
|
||||||
@@ -350,10 +306,6 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Nil(t, m)
|
assert.Nil(t, m)
|
||||||
|
|
||||||
if !canSetXattrOnLinks && o.translatedLink {
|
|
||||||
t.Skip("Skip remainder of test as can't set xattr on symlinks on this OS")
|
|
||||||
}
|
|
||||||
|
|
||||||
inM := fs.Metadata{
|
inM := fs.Metadata{
|
||||||
"potato": "chips",
|
"potato": "chips",
|
||||||
"cabbage": "soup",
|
"cabbage": "soup",
|
||||||
@@ -368,21 +320,18 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
checkTime := func(m fs.Metadata, key string, when time.Time) {
|
checkTime := func(m fs.Metadata, key string, when time.Time) {
|
||||||
t.Helper()
|
|
||||||
mt, ok := o.parseMetadataTime(m, key)
|
mt, ok := o.parseMetadataTime(m, key)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
dt := mt.Sub(when)
|
dt := mt.Sub(when)
|
||||||
precision := time.Second
|
precision := time.Second
|
||||||
assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v want %v got %v", key, dt, precision, mt, when))
|
assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v", key, dt, precision))
|
||||||
}
|
}
|
||||||
|
|
||||||
checkInt := func(m fs.Metadata, key string, base int) int {
|
checkInt := func(m fs.Metadata, key string, base int) int {
|
||||||
t.Helper()
|
|
||||||
value, ok := o.parseMetadataInt(m, key, base)
|
value, ok := o.parseMetadataInt(m, key, base)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
return value
|
return value
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("Read", func(t *testing.T) {
|
t.Run("Read", func(t *testing.T) {
|
||||||
m, err := o.Metadata(ctx)
|
m, err := o.Metadata(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -392,12 +341,13 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
|||||||
checkInt(m, "mode", 8)
|
checkInt(m, "mode", 8)
|
||||||
checkTime(m, "mtime", when)
|
checkTime(m, "mtime", when)
|
||||||
|
|
||||||
|
assert.Equal(t, len(whenRFC), len(m["mtime"]))
|
||||||
assert.Equal(t, whenRFC[:dayLength], m["mtime"][:dayLength])
|
assert.Equal(t, whenRFC[:dayLength], m["mtime"][:dayLength])
|
||||||
|
|
||||||
if hasAtime && !o.translatedLink { // symlinks generally don't record atime
|
if hasAtime {
|
||||||
checkTime(m, "atime", when)
|
checkTime(m, "atime", when)
|
||||||
}
|
}
|
||||||
if hasBtime && !o.translatedLink { // symlinks generally don't record btime
|
if hasBtime {
|
||||||
checkTime(m, "btime", when)
|
checkTime(m, "btime", when)
|
||||||
}
|
}
|
||||||
if hasXID {
|
if hasXID {
|
||||||
@@ -421,10 +371,6 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
|||||||
"mode": "0767",
|
"mode": "0767",
|
||||||
"potato": "wedges",
|
"potato": "wedges",
|
||||||
}
|
}
|
||||||
if !canSetXattrOnLinks && o.translatedLink {
|
|
||||||
// Don't change xattr if not supported on symlinks
|
|
||||||
delete(newM, "potato")
|
|
||||||
}
|
|
||||||
err := o.writeMetadata(newM)
|
err := o.writeMetadata(newM)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@@ -434,11 +380,7 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
|||||||
|
|
||||||
mode := checkInt(m, "mode", 8)
|
mode := checkInt(m, "mode", 8)
|
||||||
if runtime.GOOS != "windows" {
|
if runtime.GOOS != "windows" {
|
||||||
expectedMode := 0767
|
assert.Equal(t, 0767, mode&0777, fmt.Sprintf("mode wrong - expecting 0767 got 0%o", mode&0777))
|
||||||
if o.translatedLink && runtime.GOOS == "linux" {
|
|
||||||
expectedMode = 0777 // perms of symlinks always read as 0777 on linux
|
|
||||||
}
|
|
||||||
assert.Equal(t, expectedMode, mode&0777, fmt.Sprintf("mode wrong - expecting 0%o got 0%o", expectedMode, mode&0777))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
checkTime(m, "mtime", newMtime)
|
checkTime(m, "mtime", newMtime)
|
||||||
@@ -448,7 +390,7 @@ func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
|||||||
if haveSetBTime {
|
if haveSetBTime {
|
||||||
checkTime(m, "btime", newBtime)
|
checkTime(m, "btime", newBtime)
|
||||||
}
|
}
|
||||||
if xattrSupported && (canSetXattrOnLinks || !o.translatedLink) {
|
if xattrSupported {
|
||||||
assert.Equal(t, "wedges", m["potato"])
|
assert.Equal(t, "wedges", m["potato"])
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -105,11 +105,7 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
|||||||
}
|
}
|
||||||
if haveSetBTime {
|
if haveSetBTime {
|
||||||
if btimeOK {
|
if btimeOK {
|
||||||
if o.translatedLink {
|
|
||||||
err = lsetBTime(o.path, btime)
|
|
||||||
} else {
|
|
||||||
err = setBTime(o.path, btime)
|
err = setBTime(o.path, btime)
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
outErr = fmt.Errorf("failed to set birth (creation) time: %w", err)
|
outErr = fmt.Errorf("failed to set birth (creation) time: %w", err)
|
||||||
}
|
}
|
||||||
@@ -124,12 +120,8 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
|||||||
}
|
}
|
||||||
if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
|
if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
|
||||||
fs.Debugf(o, "Ignoring request to set ownership %o.%o on this OS", gid, uid)
|
fs.Debugf(o, "Ignoring request to set ownership %o.%o on this OS", gid, uid)
|
||||||
} else {
|
|
||||||
if o.translatedLink {
|
|
||||||
err = os.Lchown(o.path, uid, gid)
|
|
||||||
} else {
|
} else {
|
||||||
err = os.Chown(o.path, uid, gid)
|
err = os.Chown(o.path, uid, gid)
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
outErr = fmt.Errorf("failed to change ownership: %w", err)
|
outErr = fmt.Errorf("failed to change ownership: %w", err)
|
||||||
}
|
}
|
||||||
@@ -140,16 +132,7 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
|||||||
if mode >= 0 {
|
if mode >= 0 {
|
||||||
umode := uint(mode)
|
umode := uint(mode)
|
||||||
if umode <= math.MaxUint32 {
|
if umode <= math.MaxUint32 {
|
||||||
if o.translatedLink {
|
|
||||||
if haveLChmod {
|
|
||||||
err = lChmod(o.path, os.FileMode(umode))
|
|
||||||
} else {
|
|
||||||
fs.Debugf(o, "Unable to set mode %v on a symlink on this OS", os.FileMode(umode))
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
err = os.Chmod(o.path, os.FileMode(umode))
|
err = os.Chmod(o.path, os.FileMode(umode))
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
outErr = fmt.Errorf("failed to change permissions: %w", err)
|
outErr = fmt.Errorf("failed to change permissions: %w", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,9 +13,3 @@ func setBTime(name string, btime time.Time) error {
|
|||||||
// Does nothing
|
// Does nothing
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// lsetBTime changes the birth time of the link passed in
|
|
||||||
func lsetBTime(name string, btime time.Time) error {
|
|
||||||
// Does nothing
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -9,20 +9,15 @@ import (
|
|||||||
|
|
||||||
const haveSetBTime = true
|
const haveSetBTime = true
|
||||||
|
|
||||||
// setTimes sets any of atime, mtime or btime
|
// setBTime sets the birth time of the file passed in
|
||||||
// if link is set it sets a link rather than the target
|
func setBTime(name string, btime time.Time) (err error) {
|
||||||
func setTimes(name string, atime, mtime, btime time.Time, link bool) (err error) {
|
|
||||||
pathp, err := syscall.UTF16PtrFromString(name)
|
pathp, err := syscall.UTF16PtrFromString(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
fileFlag := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS)
|
|
||||||
if link {
|
|
||||||
fileFlag |= syscall.FILE_FLAG_OPEN_REPARSE_POINT
|
|
||||||
}
|
|
||||||
h, err := syscall.CreateFile(pathp,
|
h, err := syscall.CreateFile(pathp,
|
||||||
syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil,
|
syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil,
|
||||||
syscall.OPEN_EXISTING, fileFlag, 0)
|
syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -32,28 +27,6 @@ func setTimes(name string, atime, mtime, btime time.Time, link bool) (err error)
|
|||||||
err = closeErr
|
err = closeErr
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
var patime, pmtime, pbtime *syscall.Filetime
|
bFileTime := syscall.NsecToFiletime(btime.UnixNano())
|
||||||
if !atime.IsZero() {
|
return syscall.SetFileTime(h, &bFileTime, nil, nil)
|
||||||
t := syscall.NsecToFiletime(atime.UnixNano())
|
|
||||||
patime = &t
|
|
||||||
}
|
|
||||||
if !mtime.IsZero() {
|
|
||||||
t := syscall.NsecToFiletime(mtime.UnixNano())
|
|
||||||
pmtime = &t
|
|
||||||
}
|
|
||||||
if !btime.IsZero() {
|
|
||||||
t := syscall.NsecToFiletime(btime.UnixNano())
|
|
||||||
pbtime = &t
|
|
||||||
}
|
|
||||||
return syscall.SetFileTime(h, pbtime, patime, pmtime)
|
|
||||||
}
|
|
||||||
|
|
||||||
// setBTime sets the birth time of the file passed in
|
|
||||||
func setBTime(name string, btime time.Time) (err error) {
|
|
||||||
return setTimes(name, time.Time{}, time.Time{}, btime, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// lsetBTime changes the birth time of the link passed in
|
|
||||||
func lsetBTime(name string, btime time.Time) error {
|
|
||||||
return setTimes(name, time.Time{}, time.Time{}, btime, true)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -68,12 +68,14 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Description of how to authorize
|
// Description of how to authorize
|
||||||
var oauthConfig = &oauthutil.Config{
|
var oauthConfig = &oauth2.Config{
|
||||||
ClientID: api.OAuthClientID,
|
ClientID: api.OAuthClientID,
|
||||||
ClientSecret: "",
|
ClientSecret: "",
|
||||||
|
Endpoint: oauth2.Endpoint{
|
||||||
AuthURL: api.OAuthURL,
|
AuthURL: api.OAuthURL,
|
||||||
TokenURL: api.OAuthURL,
|
TokenURL: api.OAuthURL,
|
||||||
AuthStyle: oauth2.AuthStyleInParams,
|
AuthStyle: oauth2.AuthStyleInParams,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
@@ -436,9 +438,7 @@ func (f *Fs) authorize(ctx context.Context, force bool) (err error) {
|
|||||||
if err != nil || !tokenIsValid(t) {
|
if err != nil || !tokenIsValid(t) {
|
||||||
fs.Infof(f, "Valid token not found, authorizing.")
|
fs.Infof(f, "Valid token not found, authorizing.")
|
||||||
ctx := oauthutil.Context(ctx, f.cli)
|
ctx := oauthutil.Context(ctx, f.cli)
|
||||||
|
t, err = oauthConfig.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
|
||||||
oauth2Conf := oauthConfig.MakeOauth2Config()
|
|
||||||
t, err = oauth2Conf.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
|
|
||||||
}
|
}
|
||||||
if err == nil && !tokenIsValid(t) {
|
if err == nil && !tokenIsValid(t) {
|
||||||
err = errors.New("invalid token")
|
err = errors.New("invalid token")
|
||||||
|
|||||||
@@ -202,14 +202,9 @@ type SharingLinkType struct {
|
|||||||
type LinkType string
|
type LinkType string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// ViewLinkType (role: read) A view-only sharing link, allowing read-only access.
|
ViewLinkType LinkType = "view" // ViewLinkType (role: read) A view-only sharing link, allowing read-only access.
|
||||||
ViewLinkType LinkType = "view"
|
EditLinkType LinkType = "edit" // EditLinkType (role: write) An edit sharing link, allowing read-write access.
|
||||||
// EditLinkType (role: write) An edit sharing link, allowing read-write access.
|
EmbedLinkType LinkType = "embed" // EmbedLinkType (role: read) A view-only sharing link that can be used to embed content into a host webpage. Embed links are not available for OneDrive for Business or SharePoint.
|
||||||
EditLinkType LinkType = "edit"
|
|
||||||
// EmbedLinkType (role: read) A view-only sharing link that can be used to embed
|
|
||||||
// content into a host webpage. Embed links are not available for OneDrive for
|
|
||||||
// Business or SharePoint.
|
|
||||||
EmbedLinkType LinkType = "embed"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// LinkScope represents the scope of the link represented by this permission.
|
// LinkScope represents the scope of the link represented by this permission.
|
||||||
@@ -217,12 +212,9 @@ const (
|
|||||||
type LinkScope string
|
type LinkScope string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// AnonymousScope = Anyone with the link has access, without needing to sign in.
|
AnonymousScope LinkScope = "anonymous" // AnonymousScope = Anyone with the link has access, without needing to sign in. This may include people outside of your organization.
|
||||||
// This may include people outside of your organization.
|
OrganizationScope LinkScope = "organization" // OrganizationScope = Anyone signed into your organization (tenant) can use the link to get access. Only available in OneDrive for Business and SharePoint.
|
||||||
AnonymousScope LinkScope = "anonymous"
|
|
||||||
// OrganizationScope = Anyone signed into your organization (tenant) can use the
|
|
||||||
// link to get access. Only available in OneDrive for Business and SharePoint.
|
|
||||||
OrganizationScope LinkScope = "organization"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// PermissionsType provides information about a sharing permission granted for a DriveItem resource.
|
// PermissionsType provides information about a sharing permission granted for a DriveItem resource.
|
||||||
@@ -244,14 +236,10 @@ type PermissionsType struct {
|
|||||||
type Role string
|
type Role string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// ReadRole provides the ability to read the metadata and contents of the item.
|
ReadRole Role = "read" // ReadRole provides the ability to read the metadata and contents of the item.
|
||||||
ReadRole Role = "read"
|
WriteRole Role = "write" // WriteRole provides the ability to read and modify the metadata and contents of the item.
|
||||||
// WriteRole provides the ability to read and modify the metadata and contents of the item.
|
OwnerRole Role = "owner" // OwnerRole represents the owner role for SharePoint and OneDrive for Business.
|
||||||
WriteRole Role = "write"
|
MemberRole Role = "member" // MemberRole represents the member role for SharePoint and OneDrive for Business.
|
||||||
// OwnerRole represents the owner role for SharePoint and OneDrive for Business.
|
|
||||||
OwnerRole Role = "owner"
|
|
||||||
// MemberRole represents the member role for SharePoint and OneDrive for Business.
|
|
||||||
MemberRole Role = "member"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// PermissionsResponse is the response to the list permissions method
|
// PermissionsResponse is the response to the list permissions method
|
||||||
|
|||||||
@@ -40,6 +40,7 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
"github.com/rclone/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -64,21 +65,14 @@ const (
|
|||||||
|
|
||||||
// Globals
|
// Globals
|
||||||
var (
|
var (
|
||||||
|
authPath = "/common/oauth2/v2.0/authorize"
|
||||||
// Define the paths used for token operations
|
tokenPath = "/common/oauth2/v2.0/token"
|
||||||
commonPathPrefix = "/common" // prefix for the paths if tenant isn't known
|
|
||||||
authPath = "/oauth2/v2.0/authorize"
|
|
||||||
tokenPath = "/oauth2/v2.0/token"
|
|
||||||
|
|
||||||
scopeAccess = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "Sites.Read.All", "offline_access"}
|
scopeAccess = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "Sites.Read.All", "offline_access"}
|
||||||
scopeAccessWithoutSites = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"}
|
scopeAccessWithoutSites = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"}
|
||||||
|
|
||||||
// When using client credential OAuth flow, scope of .default is required in order
|
// Description of how to auth for this app for a business account
|
||||||
// to use the permissions configured for the application within the tenant
|
oauthConfig = &oauth2.Config{
|
||||||
scopeAccessClientCred = fs.SpaceSepList{".default"}
|
|
||||||
|
|
||||||
// Base config for how to auth
|
|
||||||
oauthConfig = &oauthutil.Config{
|
|
||||||
Scopes: scopeAccess,
|
Scopes: scopeAccess,
|
||||||
ClientID: rcloneClientID,
|
ClientID: rcloneClientID,
|
||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
@@ -189,14 +183,6 @@ Choose or manually enter a custom space separated list with all scopes, that rcl
|
|||||||
Help: "Read and write access to all resources, without the ability to browse SharePoint sites. \nSame as if disable_site_permission was set to true",
|
Help: "Read and write access to all resources, without the ability to browse SharePoint sites. \nSame as if disable_site_permission was set to true",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
|
||||||
Name: "tenant",
|
|
||||||
Help: `ID of the service principal's tenant. Also called its directory ID.
|
|
||||||
|
|
||||||
Set this if using
|
|
||||||
- Client Credential flow
|
|
||||||
`,
|
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "disable_site_permission",
|
Name: "disable_site_permission",
|
||||||
Help: `Disable the request for Sites.Read.All permission.
|
Help: `Disable the request for Sites.Read.All permission.
|
||||||
@@ -541,54 +527,28 @@ func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make the oauth config for the backend
|
// Config the backend
|
||||||
func makeOauthConfig(ctx context.Context, opt *Options) (*oauthutil.Config, error) {
|
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||||
// Copy the default oauthConfig
|
region, graphURL := getRegionURL(m)
|
||||||
oauthConfig := *oauthConfig
|
|
||||||
|
|
||||||
// Set the scopes
|
if config.State == "" {
|
||||||
oauthConfig.Scopes = opt.AccessScopes
|
var accessScopes fs.SpaceSepList
|
||||||
if opt.DisableSitePermission {
|
accessScopesString, _ := m.Get("access_scopes")
|
||||||
|
err := accessScopes.Set(accessScopesString)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse access_scopes: %w", err)
|
||||||
|
}
|
||||||
|
oauthConfig.Scopes = []string(accessScopes)
|
||||||
|
disableSitePermission, _ := m.Get("disable_site_permission")
|
||||||
|
if disableSitePermission == "true" {
|
||||||
oauthConfig.Scopes = scopeAccessWithoutSites
|
oauthConfig.Scopes = scopeAccessWithoutSites
|
||||||
}
|
}
|
||||||
|
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||||
// Construct the auth URLs
|
AuthURL: authEndpoint[region] + authPath,
|
||||||
prefix := commonPathPrefix
|
TokenURL: authEndpoint[region] + tokenPath,
|
||||||
if opt.Tenant != "" {
|
|
||||||
prefix = "/" + opt.Tenant
|
|
||||||
}
|
|
||||||
oauthConfig.TokenURL = authEndpoint[opt.Region] + prefix + tokenPath
|
|
||||||
oauthConfig.AuthURL = authEndpoint[opt.Region] + prefix + authPath
|
|
||||||
|
|
||||||
// Check to see if we are using client credentials flow
|
|
||||||
if opt.ClientCredentials {
|
|
||||||
// Override scope to .default
|
|
||||||
oauthConfig.Scopes = scopeAccessClientCred
|
|
||||||
if opt.Tenant == "" {
|
|
||||||
return nil, fmt.Errorf("tenant parameter must be set when using %s", config.ConfigClientCredentials)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &oauthConfig, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config the backend
|
|
||||||
func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.ConfigIn) (*fs.ConfigOut, error) {
|
|
||||||
opt := new(Options)
|
|
||||||
err := configstruct.Set(m, opt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
_, graphURL := getRegionURL(m)
|
|
||||||
|
|
||||||
// Check to see if this is the start of the state machine execution
|
|
||||||
if conf.State == "" {
|
|
||||||
conf, err := makeOauthConfig(ctx, opt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
return oauthutil.ConfigOut("choose_type", &oauthutil.Options{
|
return oauthutil.ConfigOut("choose_type", &oauthutil.Options{
|
||||||
OAuth2Config: conf,
|
OAuth2Config: oauthConfig,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -596,11 +556,9 @@ func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.Config
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to configure OneDrive: %w", err)
|
return nil, fmt.Errorf("failed to configure OneDrive: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a REST client, build on the OAuth client created above
|
|
||||||
srv := rest.NewClient(oAuthClient)
|
srv := rest.NewClient(oAuthClient)
|
||||||
|
|
||||||
switch conf.State {
|
switch config.State {
|
||||||
case "choose_type":
|
case "choose_type":
|
||||||
return fs.ConfigChooseExclusiveFixed("choose_type_done", "config_type", "Type of connection", []fs.OptionExample{{
|
return fs.ConfigChooseExclusiveFixed("choose_type_done", "config_type", "Type of connection", []fs.OptionExample{{
|
||||||
Value: "onedrive",
|
Value: "onedrive",
|
||||||
@@ -626,7 +584,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.Config
|
|||||||
}})
|
}})
|
||||||
case "choose_type_done":
|
case "choose_type_done":
|
||||||
// Jump to next state according to config chosen
|
// Jump to next state according to config chosen
|
||||||
return fs.ConfigGoto(conf.Result)
|
return fs.ConfigGoto(config.Result)
|
||||||
case "onedrive":
|
case "onedrive":
|
||||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||||
opts: rest.Opts{
|
opts: rest.Opts{
|
||||||
@@ -644,22 +602,16 @@ func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.Config
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
case "driveid":
|
case "driveid":
|
||||||
out, err := fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID")
|
return fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID")
|
||||||
if err != nil {
|
|
||||||
return out, err
|
|
||||||
}
|
|
||||||
// Default the drive_id to the previous version in the config
|
|
||||||
out.Option.Default, _ = m.Get("drive_id")
|
|
||||||
return out, nil
|
|
||||||
case "driveid_end":
|
case "driveid_end":
|
||||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||||
finalDriveID: conf.Result,
|
finalDriveID: config.Result,
|
||||||
})
|
})
|
||||||
case "siteid":
|
case "siteid":
|
||||||
return fs.ConfigInput("siteid_end", "config_siteid", "Site ID")
|
return fs.ConfigInput("siteid_end", "config_siteid", "Site ID")
|
||||||
case "siteid_end":
|
case "siteid_end":
|
||||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||||
siteID: conf.Result,
|
siteID: config.Result,
|
||||||
})
|
})
|
||||||
case "url":
|
case "url":
|
||||||
return fs.ConfigInput("url_end", "config_site_url", `Site URL
|
return fs.ConfigInput("url_end", "config_site_url", `Site URL
|
||||||
@@ -670,7 +622,7 @@ Examples:
|
|||||||
- "https://XXX.sharepoint.com/teams/ID"
|
- "https://XXX.sharepoint.com/teams/ID"
|
||||||
`)
|
`)
|
||||||
case "url_end":
|
case "url_end":
|
||||||
siteURL := conf.Result
|
siteURL := config.Result
|
||||||
re := regexp.MustCompile(`https://.*\.sharepoint\.com(/.*)`)
|
re := regexp.MustCompile(`https://.*\.sharepoint\.com(/.*)`)
|
||||||
match := re.FindStringSubmatch(siteURL)
|
match := re.FindStringSubmatch(siteURL)
|
||||||
if len(match) == 2 {
|
if len(match) == 2 {
|
||||||
@@ -685,12 +637,12 @@ Examples:
|
|||||||
return fs.ConfigInput("path_end", "config_sharepoint_url", `Server-relative URL`)
|
return fs.ConfigInput("path_end", "config_sharepoint_url", `Server-relative URL`)
|
||||||
case "path_end":
|
case "path_end":
|
||||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||||
relativePath: conf.Result,
|
relativePath: config.Result,
|
||||||
})
|
})
|
||||||
case "search":
|
case "search":
|
||||||
return fs.ConfigInput("search_end", "config_search_term", `Search term`)
|
return fs.ConfigInput("search_end", "config_search_term", `Search term`)
|
||||||
case "search_end":
|
case "search_end":
|
||||||
searchTerm := conf.Result
|
searchTerm := config.Result
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
RootURL: graphURL,
|
RootURL: graphURL,
|
||||||
@@ -712,10 +664,10 @@ Examples:
|
|||||||
})
|
})
|
||||||
case "search_sites":
|
case "search_sites":
|
||||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||||
siteID: conf.Result,
|
siteID: config.Result,
|
||||||
})
|
})
|
||||||
case "driveid_final":
|
case "driveid_final":
|
||||||
finalDriveID := conf.Result
|
finalDriveID := config.Result
|
||||||
|
|
||||||
// Test the driveID and get drive type
|
// Test the driveID and get drive type
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
@@ -734,12 +686,12 @@ Examples:
|
|||||||
|
|
||||||
return fs.ConfigConfirm("driveid_final_end", true, "config_drive_ok", fmt.Sprintf("Drive OK?\n\nFound drive %q of type %q\nURL: %s\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL))
|
return fs.ConfigConfirm("driveid_final_end", true, "config_drive_ok", fmt.Sprintf("Drive OK?\n\nFound drive %q of type %q\nURL: %s\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL))
|
||||||
case "driveid_final_end":
|
case "driveid_final_end":
|
||||||
if conf.Result == "true" {
|
if config.Result == "true" {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
return fs.ConfigGoto("choose_type")
|
return fs.ConfigGoto("choose_type")
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("unknown state %q", conf.State)
|
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
@@ -750,9 +702,7 @@ type Options struct {
|
|||||||
DriveType string `config:"drive_type"`
|
DriveType string `config:"drive_type"`
|
||||||
RootFolderID string `config:"root_folder_id"`
|
RootFolderID string `config:"root_folder_id"`
|
||||||
DisableSitePermission bool `config:"disable_site_permission"`
|
DisableSitePermission bool `config:"disable_site_permission"`
|
||||||
ClientCredentials bool `config:"client_credentials"`
|
|
||||||
AccessScopes fs.SpaceSepList `config:"access_scopes"`
|
AccessScopes fs.SpaceSepList `config:"access_scopes"`
|
||||||
Tenant string `config:"tenant"`
|
|
||||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||||
ListChunk int64 `config:"list_chunk"`
|
ListChunk int64 `config:"list_chunk"`
|
||||||
@@ -1040,10 +990,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
rootURL := graphAPIEndpoint[opt.Region] + "/v1.0" + "/drives/" + opt.DriveID
|
rootURL := graphAPIEndpoint[opt.Region] + "/v1.0" + "/drives/" + opt.DriveID
|
||||||
|
oauthConfig.Scopes = opt.AccessScopes
|
||||||
oauthConfig, err := makeOauthConfig(ctx, opt)
|
if opt.DisableSitePermission {
|
||||||
if err != nil {
|
oauthConfig.Scopes = scopeAccessWithoutSites
|
||||||
return nil, err
|
}
|
||||||
|
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||||
|
AuthURL: authEndpoint[opt.Region] + authPath,
|
||||||
|
TokenURL: authEndpoint[opt.Region] + tokenPath,
|
||||||
}
|
}
|
||||||
|
|
||||||
client := fshttp.NewClient(ctx)
|
client := fshttp.NewClient(ctx)
|
||||||
@@ -1656,7 +1609,7 @@ func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
@@ -1671,18 +1624,11 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
|
|||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
|
|
||||||
err = srcObj.readMetaData(ctx)
|
err := srcObj.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find and remove existing object
|
|
||||||
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer cleanup(&err)
|
|
||||||
|
|
||||||
// Check we aren't overwriting a file on the same remote
|
// Check we aren't overwriting a file on the same remote
|
||||||
if srcObj.fs == f {
|
if srcObj.fs == f {
|
||||||
srcPath := srcObj.rootPath()
|
srcPath := srcObj.rootPath()
|
||||||
@@ -2610,11 +2556,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return errors.New("can't upload content to a OneNote file")
|
return errors.New("can't upload content to a OneNote file")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only start the renewer if we have a valid one
|
|
||||||
if o.fs.tokenRenewer != nil {
|
|
||||||
o.fs.tokenRenewer.Start()
|
o.fs.tokenRenewer.Start()
|
||||||
defer o.fs.tokenRenewer.Stop()
|
defer o.fs.tokenRenewer.Stop()
|
||||||
}
|
|
||||||
|
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
|
|
||||||
|
|||||||
@@ -215,11 +215,11 @@ func (f *Fs) TestDirectoryMetadata(t *testing.T, r *fstest.Run) {
|
|||||||
compareDirMeta(expectedMeta, actualMeta, false)
|
compareDirMeta(expectedMeta, actualMeta, false)
|
||||||
|
|
||||||
// modtime
|
// modtime
|
||||||
fstest.AssertTimeEqualWithPrecision(t, newDst.Remote(), t1, newDst.ModTime(ctx), f.Precision())
|
assert.Equal(t, t1.Truncate(f.Precision()), newDst.ModTime(ctx))
|
||||||
// try changing it and re-check it
|
// try changing it and re-check it
|
||||||
newDst, err = operations.SetDirModTime(ctx, f, newDst, "", t2)
|
newDst, err = operations.SetDirModTime(ctx, f, newDst, "", t2)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
fstest.AssertTimeEqualWithPrecision(t, newDst.Remote(), t2, newDst.ModTime(ctx), f.Precision())
|
assert.Equal(t, t2.Truncate(f.Precision()), newDst.ModTime(ctx))
|
||||||
// ensure that f.DirSetModTime also works
|
// ensure that f.DirSetModTime also works
|
||||||
err = f.DirSetModTime(ctx, "subdir", t3)
|
err = f.DirSetModTime(ctx, "subdir", t3)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -227,7 +227,7 @@ func (f *Fs) TestDirectoryMetadata(t *testing.T, r *fstest.Run) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
entries.ForDir(func(dir fs.Directory) {
|
entries.ForDir(func(dir fs.Directory) {
|
||||||
if dir.Remote() == "subdir" {
|
if dir.Remote() == "subdir" {
|
||||||
fstest.AssertTimeEqualWithPrecision(t, dir.Remote(), t3, dir.ModTime(ctx), f.Precision())
|
assert.True(t, t3.Truncate(f.Precision()).Equal(dir.ModTime(ctx)), fmt.Sprintf("got %v", dir.ModTime(ctx)))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|||||||
@@ -22,7 +22,6 @@ import (
|
|||||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/chunksize"
|
"github.com/rclone/rclone/fs/chunksize"
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -184,9 +183,6 @@ func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, rea
|
|||||||
if ossPartNumber <= 8 {
|
if ossPartNumber <= 8 {
|
||||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||||
}
|
}
|
||||||
if fserrors.ContextError(ctx, &err) {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
// retry all chunks once have done the first few
|
// retry all chunks once have done the first few
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -106,9 +106,9 @@ func newOptions() []fs.Option {
|
|||||||
Sensitive: true,
|
Sensitive: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "compartment",
|
Name: "compartment",
|
||||||
Help: "Specify compartment OCID, if you need to list buckets.\n\nList objects works without compartment OCID.",
|
Help: "Object storage compartment OCID",
|
||||||
Provider: "!no_auth",
|
Provider: "!no_auth",
|
||||||
Required: false,
|
Required: true,
|
||||||
Sensitive: true,
|
Sensitive: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "region",
|
Name: "region",
|
||||||
|
|||||||
@@ -48,10 +48,12 @@ const (
|
|||||||
// Globals
|
// Globals
|
||||||
var (
|
var (
|
||||||
// Description of how to auth for this app
|
// Description of how to auth for this app
|
||||||
oauthConfig = &oauthutil.Config{
|
oauthConfig = &oauth2.Config{
|
||||||
Scopes: nil,
|
Scopes: nil,
|
||||||
|
Endpoint: oauth2.Endpoint{
|
||||||
AuthURL: "https://my.pcloud.com/oauth2/authorize",
|
AuthURL: "https://my.pcloud.com/oauth2/authorize",
|
||||||
// TokenURL: "https://api.pcloud.com/oauth2_token", set by updateTokenURL
|
// TokenURL: "https://api.pcloud.com/oauth2_token", set by updateTokenURL
|
||||||
|
},
|
||||||
ClientID: rcloneClientID,
|
ClientID: rcloneClientID,
|
||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||||
@@ -59,8 +61,8 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Update the TokenURL with the actual hostname
|
// Update the TokenURL with the actual hostname
|
||||||
func updateTokenURL(oauthConfig *oauthutil.Config, hostname string) {
|
func updateTokenURL(oauthConfig *oauth2.Config, hostname string) {
|
||||||
oauthConfig.TokenURL = "https://" + hostname + "/oauth2_token"
|
oauthConfig.Endpoint.TokenURL = "https://" + hostname + "/oauth2_token"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
@@ -77,7 +79,7 @@ func init() {
|
|||||||
fs.Errorf(nil, "Failed to read config: %v", err)
|
fs.Errorf(nil, "Failed to read config: %v", err)
|
||||||
}
|
}
|
||||||
updateTokenURL(oauthConfig, optc.Hostname)
|
updateTokenURL(oauthConfig, optc.Hostname)
|
||||||
checkAuth := func(oauthConfig *oauthutil.Config, auth *oauthutil.AuthResult) error {
|
checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
|
||||||
if auth == nil || auth.Form == nil {
|
if auth == nil || auth.Form == nil {
|
||||||
return errors.New("form not found in response")
|
return errors.New("form not found in response")
|
||||||
}
|
}
|
||||||
@@ -397,15 +399,14 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("open file: %w", err)
|
return nil, fmt.Errorf("open file: %w", err)
|
||||||
}
|
}
|
||||||
if _, err := fileClose(ctx, client, f.pacer, openResult.FileDescriptor); err != nil {
|
|
||||||
return nil, fmt.Errorf("close file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
writer := &writerAt{
|
writer := &writerAt{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
|
client: client,
|
||||||
fs: f,
|
fs: f,
|
||||||
size: size,
|
size: size,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
|
fd: openResult.FileDescriptor,
|
||||||
fileID: openResult.Fileid,
|
fileID: openResult.Fileid,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -18,14 +18,21 @@ import (
|
|||||||
// writerAt implements fs.WriterAtCloser, adding the OpenWrtierAt feature to pcloud.
|
// writerAt implements fs.WriterAtCloser, adding the OpenWrtierAt feature to pcloud.
|
||||||
type writerAt struct {
|
type writerAt struct {
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
|
client *rest.Client
|
||||||
fs *Fs
|
fs *Fs
|
||||||
size int64
|
size int64
|
||||||
remote string
|
remote string
|
||||||
|
fd int64
|
||||||
fileID int64
|
fileID int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close implements WriterAt.Close.
|
// Close implements WriterAt.Close.
|
||||||
func (c *writerAt) Close() error {
|
func (c *writerAt) Close() error {
|
||||||
|
// close fd
|
||||||
|
if _, err := c.fileClose(c.ctx); err != nil {
|
||||||
|
return fmt.Errorf("close fd: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Avoiding race conditions: Depending on the tcp connection, there might be
|
// Avoiding race conditions: Depending on the tcp connection, there might be
|
||||||
// caching issues when checking the size immediately after write.
|
// caching issues when checking the size immediately after write.
|
||||||
// Hence we try avoiding them by checking the resulting size on a different connection.
|
// Hence we try avoiding them by checking the resulting size on a different connection.
|
||||||
@@ -65,18 +72,8 @@ func (c *writerAt) WriteAt(buffer []byte, offset int64) (n int, err error) {
|
|||||||
inSHA1Bytes := sha1.Sum(buffer)
|
inSHA1Bytes := sha1.Sum(buffer)
|
||||||
inSHA1 := hex.EncodeToString(inSHA1Bytes[:])
|
inSHA1 := hex.EncodeToString(inSHA1Bytes[:])
|
||||||
|
|
||||||
client, err := c.fs.newSingleConnClient(c.ctx)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("create client: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
openResult, err := fileOpen(c.ctx, client, c.fs, c.fileID)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("open file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// get target hash
|
// get target hash
|
||||||
outChecksum, err := fileChecksum(c.ctx, client, c.fs.pacer, openResult.FileDescriptor, offset, int64(contentLength))
|
outChecksum, err := c.fileChecksum(c.ctx, offset, int64(contentLength))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@@ -92,15 +89,10 @@ func (c *writerAt) WriteAt(buffer []byte, offset int64) (n int, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// upload buffer with offset if necessary
|
// upload buffer with offset if necessary
|
||||||
if _, err := filePWrite(c.ctx, client, c.fs.pacer, openResult.FileDescriptor, offset, buffer); err != nil {
|
if _, err := c.filePWrite(c.ctx, offset, buffer); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// close fd
|
|
||||||
if _, err := fileClose(c.ctx, client, c.fs.pacer, openResult.FileDescriptor); err != nil {
|
|
||||||
return contentLength, fmt.Errorf("close fd: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return contentLength, nil
|
return contentLength, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -133,40 +125,11 @@ func fileOpenNew(ctx context.Context, c *rest.Client, srcFs *Fs, directoryID, fi
|
|||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call pcloud file_open using fileid with O_WRITE flags, see [API Doc.]
|
|
||||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_open.html
|
|
||||||
func fileOpen(ctx context.Context, c *rest.Client, srcFs *Fs, fileID int64) (*api.FileOpenResponse, error) {
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "PUT",
|
|
||||||
Path: "/file_open",
|
|
||||||
Parameters: url.Values{},
|
|
||||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
|
||||||
ExtraHeaders: map[string]string{
|
|
||||||
"Connection": "keep-alive",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
opts.Parameters.Set("fileid", strconv.FormatInt(fileID, 10))
|
|
||||||
opts.Parameters.Set("flags", "0x0002") // O_WRITE
|
|
||||||
|
|
||||||
result := &api.FileOpenResponse{}
|
|
||||||
err := srcFs.pacer.CallNoRetry(func() (bool, error) {
|
|
||||||
resp, err := c.CallJSON(ctx, &opts, nil, result)
|
|
||||||
err = result.Error.Update(err)
|
|
||||||
return shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("open new file descriptor: %w", err)
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call pcloud file_checksum, see [API Doc.]
|
// Call pcloud file_checksum, see [API Doc.]
|
||||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_checksum.html
|
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_checksum.html
|
||||||
func fileChecksum(
|
func (c *writerAt) fileChecksum(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
client *rest.Client,
|
offset, count int64,
|
||||||
pacer *fs.Pacer,
|
|
||||||
fd, offset, count int64,
|
|
||||||
) (*api.FileChecksumResponse, error) {
|
) (*api.FileChecksumResponse, error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "PUT",
|
Method: "PUT",
|
||||||
@@ -177,29 +140,26 @@ func fileChecksum(
|
|||||||
"Connection": "keep-alive",
|
"Connection": "keep-alive",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
|
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||||
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
|
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
|
||||||
opts.Parameters.Set("count", strconv.FormatInt(count, 10))
|
opts.Parameters.Set("count", strconv.FormatInt(count, 10))
|
||||||
|
|
||||||
result := &api.FileChecksumResponse{}
|
result := &api.FileChecksumResponse{}
|
||||||
err := pacer.CallNoRetry(func() (bool, error) {
|
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
resp, err := client.CallJSON(ctx, &opts, nil, result)
|
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||||
err = result.Error.Update(err)
|
err = result.Error.Update(err)
|
||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("checksum of fd %d with offset %d and size %d: %w", fd, offset, count, err)
|
return nil, fmt.Errorf("checksum of fd %d with offset %d and size %d: %w", c.fd, offset, count, err)
|
||||||
}
|
}
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call pcloud file_pwrite, see [API Doc.]
|
// Call pcloud file_pwrite, see [API Doc.]
|
||||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_pwrite.html
|
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_pwrite.html
|
||||||
func filePWrite(
|
func (c *writerAt) filePWrite(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
client *rest.Client,
|
|
||||||
pacer *fs.Pacer,
|
|
||||||
fd int64,
|
|
||||||
offset int64,
|
offset int64,
|
||||||
buf []byte,
|
buf []byte,
|
||||||
) (*api.FilePWriteResponse, error) {
|
) (*api.FilePWriteResponse, error) {
|
||||||
@@ -216,29 +176,24 @@ func filePWrite(
|
|||||||
"Connection": "keep-alive",
|
"Connection": "keep-alive",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
|
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||||
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
|
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
|
||||||
|
|
||||||
result := &api.FilePWriteResponse{}
|
result := &api.FilePWriteResponse{}
|
||||||
err := pacer.CallNoRetry(func() (bool, error) {
|
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
resp, err := client.CallJSON(ctx, &opts, nil, result)
|
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||||
err = result.Error.Update(err)
|
err = result.Error.Update(err)
|
||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("write %d bytes to fd %d with offset %d: %w", contentLength, fd, offset, err)
|
return nil, fmt.Errorf("write %d bytes to fd %d with offset %d: %w", contentLength, c.fd, offset, err)
|
||||||
}
|
}
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call pcloud file_close, see [API Doc.]
|
// Call pcloud file_close, see [API Doc.]
|
||||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_close.html
|
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_close.html
|
||||||
func fileClose(
|
func (c *writerAt) fileClose(ctx context.Context) (*api.FileCloseResponse, error) {
|
||||||
ctx context.Context,
|
|
||||||
client *rest.Client,
|
|
||||||
pacer *fs.Pacer,
|
|
||||||
fd int64,
|
|
||||||
) (*api.FileCloseResponse, error) {
|
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "PUT",
|
Method: "PUT",
|
||||||
Path: "/file_close",
|
Path: "/file_close",
|
||||||
@@ -246,11 +201,11 @@ func fileClose(
|
|||||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||||
Close: true,
|
Close: true,
|
||||||
}
|
}
|
||||||
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
|
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||||
|
|
||||||
result := &api.FileCloseResponse{}
|
result := &api.FileCloseResponse{}
|
||||||
err := pacer.CallNoRetry(func() (bool, error) {
|
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
resp, err := client.CallJSON(ctx, &opts, nil, result)
|
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||||
err = result.Error.Update(err)
|
err = result.Error.Update(err)
|
||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -82,11 +82,13 @@ const (
|
|||||||
// Globals
|
// Globals
|
||||||
var (
|
var (
|
||||||
// Description of how to auth for this app
|
// Description of how to auth for this app
|
||||||
oauthConfig = &oauthutil.Config{
|
oauthConfig = &oauth2.Config{
|
||||||
Scopes: nil,
|
Scopes: nil,
|
||||||
|
Endpoint: oauth2.Endpoint{
|
||||||
AuthURL: "https://user.mypikpak.com/v1/auth/signin",
|
AuthURL: "https://user.mypikpak.com/v1/auth/signin",
|
||||||
TokenURL: "https://user.mypikpak.com/v1/auth/token",
|
TokenURL: "https://user.mypikpak.com/v1/auth/token",
|
||||||
AuthStyle: oauth2.AuthStyleInParams,
|
AuthStyle: oauth2.AuthStyleInParams,
|
||||||
|
},
|
||||||
ClientID: clientID,
|
ClientID: clientID,
|
||||||
RedirectURL: oauthutil.RedirectURL,
|
RedirectURL: oauthutil.RedirectURL,
|
||||||
}
|
}
|
||||||
@@ -213,11 +215,6 @@ Fill in for rclone to use a non root folder as its starting point.
|
|||||||
Default: false,
|
Default: false,
|
||||||
Help: "Only show files that are in the trash.\n\nThis will show trashed files in their original directory structure.",
|
Help: "Only show files that are in the trash.\n\nThis will show trashed files in their original directory structure.",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Name: "no_media_link",
|
|
||||||
Default: false,
|
|
||||||
Help: "Use original file links instead of media links.\n\nThis avoids issues caused by invalid media links, but may reduce download speeds.",
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "hash_memory_limit",
|
Name: "hash_memory_limit",
|
||||||
Help: "Files bigger than this will be cached on disk to calculate hash if required.",
|
Help: "Files bigger than this will be cached on disk to calculate hash if required.",
|
||||||
@@ -291,7 +288,6 @@ type Options struct {
|
|||||||
RootFolderID string `config:"root_folder_id"`
|
RootFolderID string `config:"root_folder_id"`
|
||||||
UseTrash bool `config:"use_trash"`
|
UseTrash bool `config:"use_trash"`
|
||||||
TrashedOnly bool `config:"trashed_only"`
|
TrashedOnly bool `config:"trashed_only"`
|
||||||
NoMediaLink bool `config:"no_media_link"`
|
|
||||||
HashMemoryThreshold fs.SizeSuffix `config:"hash_memory_limit"`
|
HashMemoryThreshold fs.SizeSuffix `config:"hash_memory_limit"`
|
||||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
UploadConcurrency int `config:"upload_concurrency"`
|
UploadConcurrency int `config:"upload_concurrency"`
|
||||||
@@ -565,7 +561,6 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
|||||||
if strings.Contains(err.Error(), "invalid_grant") {
|
if strings.Contains(err.Error(), "invalid_grant") {
|
||||||
return f, f.reAuthorize(ctx)
|
return f, f.reAuthorize(ctx)
|
||||||
}
|
}
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return f, nil
|
return f, nil
|
||||||
@@ -1581,18 +1576,19 @@ func (o *Object) setMetaData(info *api.File) (err error) {
|
|||||||
o.md5sum = info.Md5Checksum
|
o.md5sum = info.Md5Checksum
|
||||||
if info.Links.ApplicationOctetStream != nil {
|
if info.Links.ApplicationOctetStream != nil {
|
||||||
o.link = info.Links.ApplicationOctetStream
|
o.link = info.Links.ApplicationOctetStream
|
||||||
if !o.fs.opt.NoMediaLink {
|
|
||||||
if fid := parseFileID(o.link.URL); fid != "" {
|
if fid := parseFileID(o.link.URL); fid != "" {
|
||||||
for _, media := range info.Medias {
|
for mid, media := range info.Medias {
|
||||||
if media.Link != nil && parseFileID(media.Link.URL) == fid {
|
if media.Link == nil {
|
||||||
fs.Debugf(o, "Using a media link")
|
continue
|
||||||
|
}
|
||||||
|
if mfid := parseFileID(media.Link.URL); fid == mfid {
|
||||||
|
fs.Debugf(o, "Using a media link from Medias[%d]", mid)
|
||||||
o.link = media.Link
|
o.link = media.Link
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -43,6 +43,7 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
"github.com/rclone/rclone/lib/random"
|
"github.com/rclone/rclone/lib/random"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -58,10 +59,12 @@ const (
|
|||||||
// Globals
|
// Globals
|
||||||
var (
|
var (
|
||||||
// Description of how to auth for this app
|
// Description of how to auth for this app
|
||||||
oauthConfig = &oauthutil.Config{
|
oauthConfig = &oauth2.Config{
|
||||||
Scopes: nil,
|
Scopes: nil,
|
||||||
|
Endpoint: oauth2.Endpoint{
|
||||||
AuthURL: "https://www.premiumize.me/authorize",
|
AuthURL: "https://www.premiumize.me/authorize",
|
||||||
TokenURL: "https://www.premiumize.me/token",
|
TokenURL: "https://www.premiumize.me/token",
|
||||||
|
},
|
||||||
ClientID: rcloneClientID,
|
ClientID: rcloneClientID,
|
||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
RedirectURL: oauthutil.RedirectURL,
|
RedirectURL: oauthutil.RedirectURL,
|
||||||
|
|||||||
@@ -572,17 +572,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// We have successfully copied the file to random name
|
|
||||||
// Check to see if file already exists first and delete it if so
|
|
||||||
existingObj, err := f.NewObject(ctx, remote)
|
|
||||||
if err == nil {
|
|
||||||
err = existingObj.Remove(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("server side copy: failed to remove existing file: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
params := url.Values{}
|
params := url.Values{}
|
||||||
params.Set("file_id", strconv.FormatInt(resp.File.ID, 10))
|
params.Set("file_id", strconv.FormatInt(resp.File.ID, 10))
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/dircache"
|
"github.com/rclone/rclone/lib/dircache"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -40,10 +41,12 @@ const (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
// Description of how to auth for this app
|
// Description of how to auth for this app
|
||||||
putioConfig = &oauthutil.Config{
|
putioConfig = &oauth2.Config{
|
||||||
Scopes: []string{},
|
Scopes: []string{},
|
||||||
|
Endpoint: oauth2.Endpoint{
|
||||||
AuthURL: "https://api.put.io/v2/oauth2/authenticate",
|
AuthURL: "https://api.put.io/v2/oauth2/authenticate",
|
||||||
TokenURL: "https://api.put.io/v2/oauth2/access_token",
|
TokenURL: "https://api.put.io/v2/oauth2/access_token",
|
||||||
|
},
|
||||||
ClientID: rcloneClientID,
|
ClientID: rcloneClientID,
|
||||||
ClientSecret: obscure.MustReveal(rcloneObscuredClientSecret),
|
ClientSecret: obscure.MustReveal(rcloneObscuredClientSecret),
|
||||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||||
|
|||||||
@@ -2056,7 +2056,7 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
|||||||
Help: "One Zone Infrequent Access storage class",
|
Help: "One Zone Infrequent Access storage class",
|
||||||
}, {
|
}, {
|
||||||
Value: "GLACIER",
|
Value: "GLACIER",
|
||||||
Help: "Glacier Flexible Retrieval storage class",
|
Help: "Glacier storage class",
|
||||||
}, {
|
}, {
|
||||||
Value: "DEEP_ARCHIVE",
|
Value: "DEEP_ARCHIVE",
|
||||||
Help: "Glacier Deep Archive storage class",
|
Help: "Glacier Deep Archive storage class",
|
||||||
@@ -5866,25 +5866,6 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options
|
|||||||
return resp.Body, err
|
return resp.Body, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// middleware to stop the SDK adding `Accept-Encoding: identity`
|
|
||||||
func removeDisableGzip() func(*middleware.Stack) error {
|
|
||||||
return func(stack *middleware.Stack) error {
|
|
||||||
_, err := stack.Finalize.Remove("DisableAcceptEncodingGzip")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// middleware to set Accept-Encoding to how we want it
|
|
||||||
//
|
|
||||||
// This make sure we download compressed files as-is from all platforms
|
|
||||||
func (f *Fs) acceptEncoding() (APIOptions []func(*middleware.Stack) error) {
|
|
||||||
APIOptions = append(APIOptions, removeDisableGzip())
|
|
||||||
if f.opt.UseAcceptEncodingGzip.Value {
|
|
||||||
APIOptions = append(APIOptions, smithyhttp.AddHeaderValue("Accept-Encoding", "gzip"))
|
|
||||||
}
|
|
||||||
return APIOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
@@ -5918,8 +5899,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
|
|
||||||
var APIOptions []func(*middleware.Stack) error
|
var APIOptions []func(*middleware.Stack) error
|
||||||
|
|
||||||
// Set the SDK to always download compressed files as-is
|
// Override the automatic decompression in the transport to
|
||||||
APIOptions = append(APIOptions, o.fs.acceptEncoding()...)
|
// download compressed files as-is
|
||||||
|
if o.fs.opt.UseAcceptEncodingGzip.Value {
|
||||||
|
APIOptions = append(APIOptions, smithyhttp.AddHeaderValue("Accept-Encoding", "gzip"))
|
||||||
|
}
|
||||||
|
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
switch option.(type) {
|
switch option.(type) {
|
||||||
@@ -6070,8 +6054,8 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
|||||||
chunkSize: int64(chunkSize),
|
chunkSize: int64(chunkSize),
|
||||||
size: size,
|
size: size,
|
||||||
f: f,
|
f: f,
|
||||||
bucket: ui.req.Bucket,
|
bucket: mOut.Bucket,
|
||||||
key: ui.req.Key,
|
key: mOut.Key,
|
||||||
uploadID: mOut.UploadId,
|
uploadID: mOut.UploadId,
|
||||||
multiPartUploadInput: &mReq,
|
multiPartUploadInput: &mReq,
|
||||||
completedParts: make([]types.CompletedPart, 0),
|
completedParts: make([]types.CompletedPart, 0),
|
||||||
@@ -6175,9 +6159,6 @@ func (w *s3ChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
|||||||
if chunkNumber <= 8 {
|
if chunkNumber <= 8 {
|
||||||
return w.f.shouldRetry(ctx, err)
|
return w.f.shouldRetry(ctx, err)
|
||||||
}
|
}
|
||||||
if fserrors.ContextError(ctx, &err) {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
// retry all chunks once have done the first few
|
// retry all chunks once have done the first few
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -23,20 +23,14 @@ func SetupS3Test(t *testing.T) (context.Context, *Options, *http.Client) {
|
|||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
opt := &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "TestS3:",
|
RemoteName: "TestS3:",
|
||||||
NilObject: (*Object)(nil),
|
NilObject: (*Object)(nil),
|
||||||
TiersToTest: []string{"STANDARD"},
|
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
||||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||||
MinChunkSize: minChunkSize,
|
MinChunkSize: minChunkSize,
|
||||||
},
|
},
|
||||||
}
|
})
|
||||||
// Test wider range of tiers on AWS
|
|
||||||
if *fstest.RemoteName == "" || *fstest.RemoteName == "TestS3:" {
|
|
||||||
opt.TiersToTest = []string{"STANDARD", "STANDARD_IA"}
|
|
||||||
}
|
|
||||||
fstests.Run(t, opt)
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIntegration2(t *testing.T) {
|
func TestIntegration2(t *testing.T) {
|
||||||
|
|||||||
@@ -99,11 +99,6 @@ Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
|
|||||||
in the new OpenSSH format can't be used.`,
|
in the new OpenSSH format can't be used.`,
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
Sensitive: true,
|
Sensitive: true,
|
||||||
}, {
|
|
||||||
Name: "pubkey",
|
|
||||||
Help: `SSH public certificate for public certificate based authentication.
|
|
||||||
Set this if you have a signed certificate you want to use for authentication.
|
|
||||||
If specified will override pubkey_file.`,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "pubkey_file",
|
Name: "pubkey_file",
|
||||||
Help: `Optional path to public key file.
|
Help: `Optional path to public key file.
|
||||||
@@ -516,7 +511,6 @@ type Options struct {
|
|||||||
KeyPem string `config:"key_pem"`
|
KeyPem string `config:"key_pem"`
|
||||||
KeyFile string `config:"key_file"`
|
KeyFile string `config:"key_file"`
|
||||||
KeyFilePass string `config:"key_file_pass"`
|
KeyFilePass string `config:"key_file_pass"`
|
||||||
PubKey string `config:"pubkey"`
|
|
||||||
PubKeyFile string `config:"pubkey_file"`
|
PubKeyFile string `config:"pubkey_file"`
|
||||||
KnownHostsFile string `config:"known_hosts_file"`
|
KnownHostsFile string `config:"known_hosts_file"`
|
||||||
KeyUseAgent bool `config:"key_use_agent"`
|
KeyUseAgent bool `config:"key_use_agent"`
|
||||||
@@ -1003,21 +997,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If a public key has been specified then use that
|
// If a public key has been specified then use that
|
||||||
if pubkeyFile != "" || opt.PubKey != "" {
|
if pubkeyFile != "" {
|
||||||
pubKeyRaw := []byte(opt.PubKey)
|
certfile, err := os.ReadFile(pubkeyFile)
|
||||||
// Use this error if public key is provided inline and is not a certificate
|
|
||||||
// if public key file is provided instead, use the err in the if block
|
|
||||||
notACertError := errors.New("public key provided is not a certificate: " + opt.PubKey)
|
|
||||||
if opt.PubKey == "" {
|
|
||||||
notACertError = errors.New("public key file is not a certificate file: " + pubkeyFile)
|
|
||||||
err := error(nil)
|
|
||||||
pubKeyRaw, err = os.ReadFile(pubkeyFile)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to read cert file: %w", err)
|
return nil, fmt.Errorf("unable to read cert file: %w", err)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
pk, _, _, _, err := ssh.ParseAuthorizedKey(pubKeyRaw)
|
pk, _, _, _, err := ssh.ParseAuthorizedKey(certfile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to parse cert file: %w", err)
|
return nil, fmt.Errorf("unable to parse cert file: %w", err)
|
||||||
}
|
}
|
||||||
@@ -1031,7 +1017,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
// knows everything it needs.
|
// knows everything it needs.
|
||||||
cert, ok := pk.(*ssh.Certificate)
|
cert, ok := pk.(*ssh.Certificate)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, notACertError
|
return nil, errors.New("public key file is not a certificate file: " + pubkeyFile)
|
||||||
}
|
}
|
||||||
pubsigner, err := ssh.NewCertSigner(cert, signer)
|
pubsigner, err := ssh.NewCertSigner(cert, signer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -2101,10 +2087,10 @@ func (file *objectReader) Read(p []byte) (n int, err error) {
|
|||||||
|
|
||||||
// Close a reader of a remote sftp file
|
// Close a reader of a remote sftp file
|
||||||
func (file *objectReader) Close() (err error) {
|
func (file *objectReader) Close() (err error) {
|
||||||
// Close the pipeReader so writes to the pipeWriter fail
|
|
||||||
_ = file.pipeReader.Close()
|
|
||||||
// Close the sftpFile - this will likely cause the WriteTo to error
|
// Close the sftpFile - this will likely cause the WriteTo to error
|
||||||
err = file.sftpFile.Close()
|
err = file.sftpFile.Close()
|
||||||
|
// Close the pipeReader so writes to the pipeWriter fail
|
||||||
|
_ = file.pipeReader.Close()
|
||||||
// Wait for the background process to finish
|
// Wait for the background process to finish
|
||||||
<-file.done
|
<-file.done
|
||||||
// Show connection no longer in use
|
// Show connection no longer in use
|
||||||
|
|||||||
@@ -97,6 +97,7 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
"github.com/rclone/rclone/lib/random"
|
"github.com/rclone/rclone/lib/random"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -114,11 +115,13 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Generate a new oauth2 config which we will update when we know the TokenURL
|
// Generate a new oauth2 config which we will update when we know the TokenURL
|
||||||
func newOauthConfig(tokenURL string) *oauthutil.Config {
|
func newOauthConfig(tokenURL string) *oauth2.Config {
|
||||||
return &oauthutil.Config{
|
return &oauth2.Config{
|
||||||
Scopes: nil,
|
Scopes: nil,
|
||||||
|
Endpoint: oauth2.Endpoint{
|
||||||
AuthURL: "https://secure.sharefile.com/oauth/authorize",
|
AuthURL: "https://secure.sharefile.com/oauth/authorize",
|
||||||
TokenURL: tokenURL,
|
TokenURL: tokenURL,
|
||||||
|
},
|
||||||
ClientID: rcloneClientID,
|
ClientID: rcloneClientID,
|
||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
RedirectURL: oauthutil.RedirectPublicSecureURL,
|
RedirectURL: oauthutil.RedirectPublicSecureURL,
|
||||||
@@ -133,7 +136,7 @@ func init() {
|
|||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||||
oauthConfig := newOauthConfig("")
|
oauthConfig := newOauthConfig("")
|
||||||
checkAuth := func(oauthConfig *oauthutil.Config, auth *oauthutil.AuthResult) error {
|
checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
|
||||||
if auth == nil || auth.Form == nil {
|
if auth == nil || auth.Form == nil {
|
||||||
return errors.New("endpoint not found in response")
|
return errors.New("endpoint not found in response")
|
||||||
}
|
}
|
||||||
@@ -144,7 +147,7 @@ func init() {
|
|||||||
}
|
}
|
||||||
endpoint := "https://" + subdomain + "." + apicp
|
endpoint := "https://" + subdomain + "." + apicp
|
||||||
m.Set("endpoint", endpoint)
|
m.Set("endpoint", endpoint)
|
||||||
oauthConfig.TokenURL = endpoint + tokenPath
|
oauthConfig.Endpoint.TokenURL = endpoint + tokenPath
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||||
|
|||||||
@@ -31,29 +31,13 @@ func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
d := &smb2.Dialer{}
|
d := &smb2.Dialer{
|
||||||
if f.opt.UseKerberos {
|
Initiator: &smb2.NTLMInitiator{
|
||||||
cl, err := getKerberosClient()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
spn := f.opt.SPN
|
|
||||||
if spn == "" {
|
|
||||||
spn = "cifs/" + f.opt.Host
|
|
||||||
}
|
|
||||||
|
|
||||||
d.Initiator = &smb2.Krb5Initiator{
|
|
||||||
Client: cl,
|
|
||||||
TargetSPN: spn,
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
d.Initiator = &smb2.NTLMInitiator{
|
|
||||||
User: f.opt.User,
|
User: f.opt.User,
|
||||||
Password: pass,
|
Password: pass,
|
||||||
Domain: f.opt.Domain,
|
Domain: f.opt.Domain,
|
||||||
TargetSPN: f.opt.SPN,
|
TargetSPN: f.opt.SPN,
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
session, err := d.DialConn(ctx, tconn, addr)
|
session, err := d.DialConn(ctx, tconn, addr)
|
||||||
|
|||||||
@@ -1,78 +0,0 @@
|
|||||||
package smb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/user"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/jcmturner/gokrb5/v8/client"
|
|
||||||
"github.com/jcmturner/gokrb5/v8/config"
|
|
||||||
"github.com/jcmturner/gokrb5/v8/credentials"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
kerberosClient *client.Client
|
|
||||||
kerberosErr error
|
|
||||||
kerberosOnce sync.Once
|
|
||||||
)
|
|
||||||
|
|
||||||
// getKerberosClient returns a Kerberos client that can be used to authenticate.
|
|
||||||
func getKerberosClient() (*client.Client, error) {
|
|
||||||
if kerberosClient == nil || kerberosErr == nil {
|
|
||||||
kerberosOnce.Do(func() {
|
|
||||||
kerberosClient, kerberosErr = createKerberosClient()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return kerberosClient, kerberosErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// createKerberosClient creates a new Kerberos client.
|
|
||||||
func createKerberosClient() (*client.Client, error) {
|
|
||||||
cfgPath := os.Getenv("KRB5_CONFIG")
|
|
||||||
if cfgPath == "" {
|
|
||||||
cfgPath = "/etc/krb5.conf"
|
|
||||||
}
|
|
||||||
|
|
||||||
cfg, err := config.Load(cfgPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine the ccache location from the environment, falling back to the
|
|
||||||
// default location.
|
|
||||||
ccachePath := os.Getenv("KRB5CCNAME")
|
|
||||||
switch {
|
|
||||||
case strings.Contains(ccachePath, ":"):
|
|
||||||
parts := strings.SplitN(ccachePath, ":", 2)
|
|
||||||
switch parts[0] {
|
|
||||||
case "FILE":
|
|
||||||
ccachePath = parts[1]
|
|
||||||
case "DIR":
|
|
||||||
primary, err := os.ReadFile(filepath.Join(parts[1], "primary"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
ccachePath = filepath.Join(parts[1], strings.TrimSpace(string(primary)))
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unsupported KRB5CCNAME: %s", ccachePath)
|
|
||||||
}
|
|
||||||
case ccachePath == "":
|
|
||||||
u, err := user.Current()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ccachePath = "/tmp/krb5cc_" + u.Uid
|
|
||||||
}
|
|
||||||
|
|
||||||
ccache, err := credentials.LoadCCache(ccachePath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return client.NewFromCCache(ccache, cfg)
|
|
||||||
}
|
|
||||||
@@ -76,16 +76,6 @@ authentication, and it often needs to be set for clusters. For example:
|
|||||||
Leave blank if not sure.
|
Leave blank if not sure.
|
||||||
`,
|
`,
|
||||||
Sensitive: true,
|
Sensitive: true,
|
||||||
}, {
|
|
||||||
Name: "use_kerberos",
|
|
||||||
Help: `Use Kerberos authentication.
|
|
||||||
|
|
||||||
If set, rclone will use Kerberos authentication instead of NTLM. This
|
|
||||||
requires a valid Kerberos configuration and credentials cache to be
|
|
||||||
available, either in the default locations or as specified by the
|
|
||||||
KRB5_CONFIG and KRB5CCNAME environment variables.
|
|
||||||
`,
|
|
||||||
Default: false,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "idle_timeout",
|
Name: "idle_timeout",
|
||||||
Default: fs.Duration(60 * time.Second),
|
Default: fs.Duration(60 * time.Second),
|
||||||
@@ -136,7 +126,6 @@ type Options struct {
|
|||||||
Pass string `config:"pass"`
|
Pass string `config:"pass"`
|
||||||
Domain string `config:"domain"`
|
Domain string `config:"domain"`
|
||||||
SPN string `config:"spn"`
|
SPN string `config:"spn"`
|
||||||
UseKerberos bool `config:"use_kerberos"`
|
|
||||||
HideSpecial bool `config:"hide_special_share"`
|
HideSpecial bool `config:"hide_special_share"`
|
||||||
CaseInsensitive bool `config:"case_insensitive"`
|
CaseInsensitive bool `config:"case_insensitive"`
|
||||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||||
@@ -612,10 +601,9 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fi, err := cn.smbShare.Stat(reqDir)
|
fi, err := cn.smbShare.Stat(reqDir)
|
||||||
if err != nil {
|
if err == nil {
|
||||||
return fmt.Errorf("SetModTime: stat: %w", err)
|
|
||||||
}
|
|
||||||
o.statResult = fi
|
o.statResult = fi
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -697,6 +685,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
|
o.statResult, _ = cn.smbShare.Stat(filename)
|
||||||
o.fs.putConnection(&cn)
|
o.fs.putConnection(&cn)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@@ -734,7 +723,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return fmt.Errorf("Update Close failed: %w", err)
|
return fmt.Errorf("Update Close failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the modified time and also o.statResult
|
// Set the modified time
|
||||||
err = o.SetModTime(ctx, src.ModTime(ctx))
|
err = o.SetModTime(ctx, src.ModTime(ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Update SetModTime failed: %w", err)
|
return fmt.Errorf("Update SetModTime failed: %w", err)
|
||||||
|
|||||||
@@ -2,7 +2,6 @@
|
|||||||
package smb_test
|
package smb_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/smb"
|
"github.com/rclone/rclone/backend/smb"
|
||||||
@@ -16,13 +15,3 @@ func TestIntegration(t *testing.T) {
|
|||||||
NilObject: (*smb.Object)(nil),
|
NilObject: (*smb.Object)(nil),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIntegration2(t *testing.T) {
|
|
||||||
krb5Dir := t.TempDir()
|
|
||||||
t.Setenv("KRB5_CONFIG", filepath.Join(krb5Dir, "krb5.conf"))
|
|
||||||
t.Setenv("KRB5CCNAME", filepath.Join(krb5Dir, "ccache"))
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: "TestSMBKerberos:rclone",
|
|
||||||
NilObject: (*smb.Object)(nil),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -35,7 +35,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
|
||||||
"github.com/rclone/rclone/lib/dircache"
|
"github.com/rclone/rclone/lib/dircache"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
@@ -868,13 +867,13 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
err = srcObj.readMetaData(ctx)
|
err := srcObj.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -891,13 +890,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find and remove existing object
|
|
||||||
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer cleanup(&err)
|
|
||||||
|
|
||||||
// Copy the object
|
// Copy the object
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
|
|||||||
@@ -161,24 +161,7 @@ Set to 0 to disable chunked uploading.
|
|||||||
Default: false,
|
Default: false,
|
||||||
},
|
},
|
||||||
fshttp.UnixSocketConfig,
|
fshttp.UnixSocketConfig,
|
||||||
{
|
},
|
||||||
Name: "auth_redirect",
|
|
||||||
Help: `Preserve authentication on redirect.
|
|
||||||
|
|
||||||
If the server redirects rclone to a new domain when it is trying to
|
|
||||||
read a file then normally rclone will drop the Authorization: header
|
|
||||||
from the request.
|
|
||||||
|
|
||||||
This is standard security practice to avoid sending your credentials
|
|
||||||
to an unknown webserver.
|
|
||||||
|
|
||||||
However this is desirable in some circumstances. If you are getting
|
|
||||||
an error like "401 Unauthorized" when rclone is attempting to read
|
|
||||||
files from the webdav server then you can try this option.
|
|
||||||
`,
|
|
||||||
Advanced: true,
|
|
||||||
Default: false,
|
|
||||||
}},
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -197,7 +180,6 @@ type Options struct {
|
|||||||
ExcludeShares bool `config:"owncloud_exclude_shares"`
|
ExcludeShares bool `config:"owncloud_exclude_shares"`
|
||||||
ExcludeMounts bool `config:"owncloud_exclude_mounts"`
|
ExcludeMounts bool `config:"owncloud_exclude_mounts"`
|
||||||
UnixSocket string `config:"unix_socket"`
|
UnixSocket string `config:"unix_socket"`
|
||||||
AuthRedirect bool `config:"auth_redirect"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote webdav
|
// Fs represents a remote webdav
|
||||||
@@ -1474,7 +1456,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
ExtraHeaders: map[string]string{
|
ExtraHeaders: map[string]string{
|
||||||
"Depth": "0",
|
"Depth": "0",
|
||||||
},
|
},
|
||||||
AuthRedirect: o.fs.opt.AuthRedirect, // allow redirects to preserve Auth
|
|
||||||
}
|
}
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||||
|
|||||||
@@ -22,13 +22,13 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
"github.com/rclone/rclone/lib/random"
|
"github.com/rclone/rclone/lib/random"
|
||||||
"github.com/rclone/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
|
|
||||||
// oAuth
|
// oAuth
|
||||||
@@ -46,9 +46,11 @@ const (
|
|||||||
// Globals
|
// Globals
|
||||||
var (
|
var (
|
||||||
// Description of how to auth for this app
|
// Description of how to auth for this app
|
||||||
oauthConfig = &oauthutil.Config{
|
oauthConfig = &oauth2.Config{
|
||||||
|
Endpoint: oauth2.Endpoint{
|
||||||
AuthURL: "https://oauth.yandex.com/authorize", //same as https://oauth.yandex.ru/authorize
|
AuthURL: "https://oauth.yandex.com/authorize", //same as https://oauth.yandex.ru/authorize
|
||||||
TokenURL: "https://oauth.yandex.com/token", //same as https://oauth.yandex.ru/token
|
TokenURL: "https://oauth.yandex.com/token", //same as https://oauth.yandex.ru/token
|
||||||
|
},
|
||||||
ClientID: rcloneClientID,
|
ClientID: rcloneClientID,
|
||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
RedirectURL: oauthutil.RedirectURL,
|
RedirectURL: oauthutil.RedirectURL,
|
||||||
@@ -711,7 +713,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dst string, overwrite
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
@@ -719,21 +721,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
|
|||||||
}
|
}
|
||||||
|
|
||||||
dstPath := f.filePath(remote)
|
dstPath := f.filePath(remote)
|
||||||
err = f.mkParentDirs(ctx, dstPath)
|
err := f.mkParentDirs(ctx, dstPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find and remove existing object
|
|
||||||
//
|
|
||||||
// Note that the overwrite flag doesn't seem to work for server side copy
|
|
||||||
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer cleanup(&err)
|
|
||||||
|
|
||||||
err = f.copyOrMove(ctx, "copy", srcObj.filePath(), dstPath, false)
|
err = f.copyOrMove(ctx, "copy", srcObj.filePath(), dstPath, false)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ const (
|
|||||||
// Globals
|
// Globals
|
||||||
var (
|
var (
|
||||||
// Description of how to auth for this app
|
// Description of how to auth for this app
|
||||||
oauthConfig = &oauthutil.Config{
|
oauthConfig = &oauth2.Config{
|
||||||
Scopes: []string{
|
Scopes: []string{
|
||||||
"aaaserver.profile.read",
|
"aaaserver.profile.read",
|
||||||
"WorkDrive.team.READ",
|
"WorkDrive.team.READ",
|
||||||
@@ -55,10 +55,11 @@ var (
|
|||||||
"WorkDrive.files.ALL",
|
"WorkDrive.files.ALL",
|
||||||
"ZohoFiles.files.ALL",
|
"ZohoFiles.files.ALL",
|
||||||
},
|
},
|
||||||
|
Endpoint: oauth2.Endpoint{
|
||||||
AuthURL: "https://accounts.zoho.eu/oauth/v2/auth",
|
AuthURL: "https://accounts.zoho.eu/oauth/v2/auth",
|
||||||
TokenURL: "https://accounts.zoho.eu/oauth/v2/token",
|
TokenURL: "https://accounts.zoho.eu/oauth/v2/token",
|
||||||
AuthStyle: oauth2.AuthStyleInParams,
|
AuthStyle: oauth2.AuthStyleInParams,
|
||||||
|
},
|
||||||
ClientID: rcloneClientID,
|
ClientID: rcloneClientID,
|
||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||||
@@ -275,8 +276,8 @@ func setupRegion(m configmap.Mapper) error {
|
|||||||
downloadURL = fmt.Sprintf("https://download.zoho.%s/v1/workdrive", region)
|
downloadURL = fmt.Sprintf("https://download.zoho.%s/v1/workdrive", region)
|
||||||
uploadURL = fmt.Sprintf("https://upload.zoho.%s/workdrive-api/v1", region)
|
uploadURL = fmt.Sprintf("https://upload.zoho.%s/workdrive-api/v1", region)
|
||||||
accountsURL = fmt.Sprintf("https://accounts.zoho.%s", region)
|
accountsURL = fmt.Sprintf("https://accounts.zoho.%s", region)
|
||||||
oauthConfig.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region)
|
oauthConfig.Endpoint.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region)
|
||||||
oauthConfig.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region)
|
oauthConfig.Endpoint.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -7,11 +7,11 @@ for backend in $( find backend -maxdepth 1 -type d ); do
|
|||||||
continue
|
continue
|
||||||
fi
|
fi
|
||||||
|
|
||||||
commit=$(git log --oneline -- $backend | tail -n 1 | cut -d' ' -f1)
|
commit=$(git log --oneline -- $backend | tail -1 | cut -d' ' -f1)
|
||||||
if [ "$commit" == "" ]; then
|
if [ "$commit" == "" ]; then
|
||||||
commit=$(git log --oneline -- backend/$backend | tail -n 1 | cut -d' ' -f1)
|
commit=$(git log --oneline -- backend/$backend | tail -1 | cut -d' ' -f1)
|
||||||
fi
|
fi
|
||||||
version=$(git tag --contains $commit | grep ^v | sort -n | head -n 1)
|
version=$(git tag --contains $commit | grep ^v | sort -n | head -1)
|
||||||
echo $backend $version
|
echo $backend $version
|
||||||
sed -i~ "4i versionIntroduced: \"$version\"" docs/content/${backend}.md
|
sed -i~ "4i versionIntroduced: \"$version\"" docs/content/${backend}.md
|
||||||
done
|
done
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ conversion into man pages etc.
|
|||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import time
|
import time
|
||||||
import subprocess
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
docpath = "docs/content"
|
docpath = "docs/content"
|
||||||
@@ -36,7 +35,6 @@ docs = [
|
|||||||
"box.md",
|
"box.md",
|
||||||
"cache.md",
|
"cache.md",
|
||||||
"chunker.md",
|
"chunker.md",
|
||||||
"cloudinary.md",
|
|
||||||
"sharefile.md",
|
"sharefile.md",
|
||||||
"crypt.md",
|
"crypt.md",
|
||||||
"compress.md",
|
"compress.md",
|
||||||
@@ -193,23 +191,13 @@ def main():
|
|||||||
command_docs = read_commands(docpath).replace("\\", "\\\\") # escape \ so we can use command_docs in re.sub
|
command_docs = read_commands(docpath).replace("\\", "\\\\") # escape \ so we can use command_docs in re.sub
|
||||||
build_date = datetime.utcfromtimestamp(
|
build_date = datetime.utcfromtimestamp(
|
||||||
int(os.environ.get('SOURCE_DATE_EPOCH', time.time())))
|
int(os.environ.get('SOURCE_DATE_EPOCH', time.time())))
|
||||||
help_output = subprocess.check_output(["rclone", "help"]).decode("utf-8")
|
|
||||||
with open(outfile, "w") as out:
|
with open(outfile, "w") as out:
|
||||||
out.write("""\
|
out.write("""\
|
||||||
%% rclone(1) User Manual
|
%% rclone(1) User Manual
|
||||||
%% Nick Craig-Wood
|
%% Nick Craig-Wood
|
||||||
%% %s
|
%% %s
|
||||||
|
|
||||||
# NAME
|
""" % build_date.strftime("%b %d, %Y"))
|
||||||
|
|
||||||
rclone - manage files on cloud storage
|
|
||||||
|
|
||||||
# SYNOPSIS
|
|
||||||
|
|
||||||
```
|
|
||||||
%s
|
|
||||||
```
|
|
||||||
""" % (build_date.strftime("%b %d, %Y"), help_output))
|
|
||||||
for doc in docs:
|
for doc in docs:
|
||||||
contents = read_doc(doc)
|
contents = read_doc(doc)
|
||||||
# Substitute the commands into doc.md
|
# Substitute the commands into doc.md
|
||||||
|
|||||||
@@ -7,18 +7,15 @@ Run with no arguments to test all backends or a supply a list of
|
|||||||
backends to test.
|
backends to test.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import sys
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
all_backends = "backend/all/all.go"
|
all_backends = "backend/all/all.go"
|
||||||
|
|
||||||
# compile command which is more or less like the production builds
|
# compile command which is more or less like the production builds
|
||||||
compile_command = ["go", "build", "--ldflags", "-s", "-trimpath"]
|
compile_command = ["go", "build", "--ldflags", "-s", "-trimpath"]
|
||||||
|
|
||||||
# disable CGO as that makes a lot of difference to binary size
|
import os
|
||||||
os.environ["CGO_ENABLED"]="0"
|
import re
|
||||||
|
import sys
|
||||||
|
import subprocess
|
||||||
|
|
||||||
match_backend = re.compile(r'"github.com/rclone/rclone/backend/(.*?)"')
|
match_backend = re.compile(r'"github.com/rclone/rclone/backend/(.*?)"')
|
||||||
|
|
||||||
@@ -46,9 +43,6 @@ def write_all(orig_all, backend):
|
|||||||
# Comment out line matching backend
|
# Comment out line matching backend
|
||||||
if match and match.group(1) == backend:
|
if match and match.group(1) == backend:
|
||||||
line = "// " + line
|
line = "// " + line
|
||||||
# s3 and pikpak depend on each other
|
|
||||||
if backend == "s3" and "pikpak" in line:
|
|
||||||
line = "// " + line
|
|
||||||
fd.write(line+"\n")
|
fd.write(line+"\n")
|
||||||
|
|
||||||
def compile():
|
def compile():
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ if [ "$1" == "" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
VERSION="$1"
|
VERSION="$1"
|
||||||
ANCHOR=$(grep '^## v' docs/content/changelog.md | head -n 1 | sed 's/^## //; s/[^A-Za-z0-9-]/-/g; s/--*/-/g')
|
ANCHOR=$(grep '^## v' docs/content/changelog.md | head -1 | sed 's/^## //; s/[^A-Za-z0-9-]/-/g; s/--*/-/g')
|
||||||
|
|
||||||
cat > "/tmp/${VERSION}-release-notes" <<EOF
|
cat > "/tmp/${VERSION}-release-notes" <<EOF
|
||||||
This is the ${VERSION} release of rclone.
|
This is the ${VERSION} release of rclone.
|
||||||
|
|||||||
@@ -5,13 +5,20 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CaptureOutput runs a function capturing its output.
|
// CaptureOutput runs a function capturing its output.
|
||||||
func CaptureOutput(fun func()) []byte {
|
func CaptureOutput(fun func()) []byte {
|
||||||
logSave := log.Writer()
|
logSave := log.Writer()
|
||||||
logrusSave := logrus.StandardLogger().Out
|
logrusSave := logrus.StandardLogger().Writer()
|
||||||
|
defer func() {
|
||||||
|
err := logrusSave.Close()
|
||||||
|
if err != nil {
|
||||||
|
fs.Errorf(nil, "error closing logrusSave: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
buf := &bytes.Buffer{}
|
buf := &bytes.Buffer{}
|
||||||
log.SetOutput(buf)
|
log.SetOutput(buf)
|
||||||
logrus.SetOutput(buf)
|
logrus.SetOutput(buf)
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import (
|
|||||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"gopkg.in/yaml.v3"
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
const configFile = "../../fstest/test_all/config.yaml"
|
const configFile = "../../fstest/test_all/config.yaml"
|
||||||
|
|||||||
@@ -63,40 +63,40 @@ func (b *bisyncRun) setCompareDefaults(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if b.opt.Compare.SlowHashSyncOnly && b.opt.Compare.SlowHashDetected && b.opt.Resync {
|
if b.opt.Compare.SlowHashSyncOnly && b.opt.Compare.SlowHashDetected && b.opt.Resync {
|
||||||
fs.Log(nil, Color(terminal.Dim, "Ignoring checksums during --resync as --slow-hash-sync-only is set."))
|
fs.Logf(nil, Color(terminal.Dim, "Ignoring checksums during --resync as --slow-hash-sync-only is set.")) ///nolint:govet
|
||||||
ci.CheckSum = false
|
ci.CheckSum = false
|
||||||
// note not setting b.opt.Compare.Checksum = false as we still want to build listings on the non-slow side, if any
|
// note not setting b.opt.Compare.Checksum = false as we still want to build listings on the non-slow side, if any
|
||||||
} else if b.opt.Compare.Checksum && !ci.CheckSum {
|
} else if b.opt.Compare.Checksum && !ci.CheckSum {
|
||||||
fs.Log(nil, Color(terminal.YellowFg, "WARNING: Checksums will be compared for deltas but not during sync as --checksum is not set."))
|
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Checksums will be compared for deltas but not during sync as --checksum is not set.")) //nolint:govet
|
||||||
}
|
}
|
||||||
if b.opt.Compare.Modtime && (b.fs1.Precision() == fs.ModTimeNotSupported || b.fs2.Precision() == fs.ModTimeNotSupported) {
|
if b.opt.Compare.Modtime && (b.fs1.Precision() == fs.ModTimeNotSupported || b.fs2.Precision() == fs.ModTimeNotSupported) {
|
||||||
fs.Log(nil, Color(terminal.YellowFg, "WARNING: Modtime compare was requested but at least one remote does not support it. It is recommended to use --checksum or --size-only instead."))
|
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Modtime compare was requested but at least one remote does not support it. It is recommended to use --checksum or --size-only instead.")) //nolint:govet
|
||||||
}
|
}
|
||||||
if (ci.CheckSum || b.opt.Compare.Checksum) && b.opt.IgnoreListingChecksum {
|
if (ci.CheckSum || b.opt.Compare.Checksum) && b.opt.IgnoreListingChecksum {
|
||||||
if (b.opt.Compare.HashType1 == hash.None || b.opt.Compare.HashType2 == hash.None) && !b.opt.Compare.DownloadHash {
|
if (b.opt.Compare.HashType1 == hash.None || b.opt.Compare.HashType2 == hash.None) && !b.opt.Compare.DownloadHash {
|
||||||
fs.Logf(nil, Color(terminal.YellowFg, `WARNING: Checksum compare was requested but at least one remote does not support checksums (or checksums are being ignored) and --ignore-listing-checksum is set.
|
fs.Logf(nil, Color(terminal.YellowFg, `WARNING: Checksum compare was requested but at least one remote does not support checksums (or checksums are being ignored) and --ignore-listing-checksum is set.
|
||||||
Ignoring Checksums globally and falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime). Path1 (%s): %s, Path2 (%s): %s`),
|
Ignoring Checksums globally and falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime). Path1 (%s): %s, Path2 (%s): %s`),
|
||||||
b.fs1.String(), b.opt.Compare.HashType1.String(), b.fs2.String(), b.opt.Compare.HashType2.String())
|
b.fs1.String(), b.opt.Compare.HashType1.String(), b.fs2.String(), b.opt.Compare.HashType2.String()) //nolint:govet
|
||||||
b.opt.Compare.Modtime = true
|
b.opt.Compare.Modtime = true
|
||||||
b.opt.Compare.Size = true
|
b.opt.Compare.Size = true
|
||||||
ci.CheckSum = false
|
ci.CheckSum = false
|
||||||
b.opt.Compare.Checksum = false
|
b.opt.Compare.Checksum = false
|
||||||
} else {
|
} else {
|
||||||
fs.Log(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksum for deltas as --ignore-listing-checksum is set"))
|
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksum for deltas as --ignore-listing-checksum is set")) //nolint:govet
|
||||||
// note: --checksum will still affect the internal sync calls
|
// note: --checksum will still affect the internal sync calls
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !ci.CheckSum && !b.opt.Compare.Checksum && !b.opt.IgnoreListingChecksum {
|
if !ci.CheckSum && !b.opt.Compare.Checksum && !b.opt.IgnoreListingChecksum {
|
||||||
fs.Infoc(nil, Color(terminal.Dim, "Setting --ignore-listing-checksum as neither --checksum nor --compare checksum are set."))
|
fs.Infof(nil, Color(terminal.Dim, "Setting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.")) //nolint:govet
|
||||||
b.opt.IgnoreListingChecksum = true
|
b.opt.IgnoreListingChecksum = true
|
||||||
}
|
}
|
||||||
if !b.opt.Compare.Size && !b.opt.Compare.Modtime && !b.opt.Compare.Checksum {
|
if !b.opt.Compare.Size && !b.opt.Compare.Modtime && !b.opt.Compare.Checksum {
|
||||||
return errors.New(Color(terminal.RedFg, "must set a Compare method. (size, modtime, and checksum can't all be false.)"))
|
return errors.New(Color(terminal.RedFg, "must set a Compare method. (size, modtime, and checksum can't all be false.)")) //nolint:govet
|
||||||
}
|
}
|
||||||
|
|
||||||
notSupported := func(label string, value bool, opt *bool) {
|
notSupported := func(label string, value bool, opt *bool) {
|
||||||
if value {
|
if value {
|
||||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: %s is set but bisync does not support it. It will be ignored."), label)
|
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: %s is set but bisync does not support it. It will be ignored."), label) //nolint:govet
|
||||||
*opt = false
|
*opt = false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -123,13 +123,13 @@ func sizeDiffers(a, b int64) bool {
|
|||||||
func hashDiffers(a, b string, ht1, ht2 hash.Type, size1, size2 int64) bool {
|
func hashDiffers(a, b string, ht1, ht2 hash.Type, size1, size2 int64) bool {
|
||||||
if a == "" || b == "" {
|
if a == "" || b == "" {
|
||||||
if ht1 != hash.None && ht2 != hash.None && !(size1 <= 0 || size2 <= 0) {
|
if ht1 != hash.None && ht2 != hash.None && !(size1 <= 0 || size2 <= 0) {
|
||||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), a, b)
|
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), a, b) //nolint:govet
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if ht1 != ht2 {
|
if ht1 != ht2 {
|
||||||
if !(downloadHash && ((ht1 == hash.MD5 && ht2 == hash.None) || (ht1 == hash.None && ht2 == hash.MD5))) {
|
if !(downloadHash && ((ht1 == hash.MD5 && ht2 == hash.None) || (ht1 == hash.None && ht2 == hash.MD5))) {
|
||||||
fs.Infof(nil, Color(terminal.YellowFg, "WARNING: Can't compare hashes of different types (%s, %s)"), ht1.String(), ht2.String())
|
fs.Infof(nil, Color(terminal.YellowFg, "WARNING: Can't compare hashes of different types (%s, %s)"), ht1.String(), ht2.String()) //nolint:govet
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -151,7 +151,7 @@ func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else if b.opt.Compare.SlowHashSyncOnly && b.opt.Compare.SlowHashDetected {
|
} else if b.opt.Compare.SlowHashSyncOnly && b.opt.Compare.SlowHashDetected {
|
||||||
fs.Log(b.fs2, Color(terminal.YellowFg, "Ignoring --slow-hash-sync-only and falling back to --no-slow-hash as Path1 and Path2 have no hashes in common."))
|
fs.Logf(b.fs2, Color(terminal.YellowFg, "Ignoring --slow-hash-sync-only and falling back to --no-slow-hash as Path1 and Path2 have no hashes in common.")) //nolint:govet
|
||||||
b.opt.Compare.SlowHashSyncOnly = false
|
b.opt.Compare.SlowHashSyncOnly = false
|
||||||
b.opt.Compare.NoSlowHash = true
|
b.opt.Compare.NoSlowHash = true
|
||||||
ci.CheckSum = false
|
ci.CheckSum = false
|
||||||
@@ -159,7 +159,7 @@ func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !b.opt.Compare.DownloadHash && !b.opt.Compare.SlowHashSyncOnly {
|
if !b.opt.Compare.DownloadHash && !b.opt.Compare.SlowHashSyncOnly {
|
||||||
fs.Log(b.fs2, Color(terminal.YellowFg, "--checksum is in use but Path1 and Path2 have no hashes in common; falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime)"))
|
fs.Logf(b.fs2, Color(terminal.YellowFg, "--checksum is in use but Path1 and Path2 have no hashes in common; falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime)")) //nolint:govet
|
||||||
fs.Infof("Path1 hashes", "%v", b.fs1.Hashes().String())
|
fs.Infof("Path1 hashes", "%v", b.fs1.Hashes().String())
|
||||||
fs.Infof("Path2 hashes", "%v", b.fs2.Hashes().String())
|
fs.Infof("Path2 hashes", "%v", b.fs2.Hashes().String())
|
||||||
b.opt.Compare.Modtime = true
|
b.opt.Compare.Modtime = true
|
||||||
@@ -167,25 +167,25 @@ func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
|
|||||||
ci.CheckSum = false
|
ci.CheckSum = false
|
||||||
}
|
}
|
||||||
if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs1.Features().SlowHash {
|
if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs1.Features().SlowHash {
|
||||||
fs.Infoc(nil, Color(terminal.YellowFg, "Slow hash detected on Path1. Will ignore checksum due to slow-hash settings"))
|
fs.Infof(nil, Color(terminal.YellowFg, "Slow hash detected on Path1. Will ignore checksum due to slow-hash settings")) //nolint:govet
|
||||||
b.opt.Compare.HashType1 = hash.None
|
b.opt.Compare.HashType1 = hash.None
|
||||||
} else {
|
} else {
|
||||||
b.opt.Compare.HashType1 = b.fs1.Hashes().GetOne()
|
b.opt.Compare.HashType1 = b.fs1.Hashes().GetOne()
|
||||||
if b.opt.Compare.HashType1 != hash.None {
|
if b.opt.Compare.HashType1 != hash.None {
|
||||||
fs.Logf(b.fs1, Color(terminal.YellowFg, "will use %s for same-side diffs on Path1 only"), b.opt.Compare.HashType1)
|
fs.Logf(b.fs1, Color(terminal.YellowFg, "will use %s for same-side diffs on Path1 only"), b.opt.Compare.HashType1) //nolint:govet
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs2.Features().SlowHash {
|
if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs2.Features().SlowHash {
|
||||||
fs.Infoc(nil, Color(terminal.YellowFg, "Slow hash detected on Path2. Will ignore checksum due to slow-hash settings"))
|
fs.Infof(nil, Color(terminal.YellowFg, "Slow hash detected on Path2. Will ignore checksum due to slow-hash settings")) //nolint:govet
|
||||||
b.opt.Compare.HashType1 = hash.None
|
b.opt.Compare.HashType1 = hash.None
|
||||||
} else {
|
} else {
|
||||||
b.opt.Compare.HashType2 = b.fs2.Hashes().GetOne()
|
b.opt.Compare.HashType2 = b.fs2.Hashes().GetOne()
|
||||||
if b.opt.Compare.HashType2 != hash.None {
|
if b.opt.Compare.HashType2 != hash.None {
|
||||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "will use %s for same-side diffs on Path2 only"), b.opt.Compare.HashType2)
|
fs.Logf(b.fs2, Color(terminal.YellowFg, "will use %s for same-side diffs on Path2 only"), b.opt.Compare.HashType2) //nolint:govet
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if b.opt.Compare.HashType1 == hash.None && b.opt.Compare.HashType2 == hash.None && !b.opt.Compare.DownloadHash {
|
if b.opt.Compare.HashType1 == hash.None && b.opt.Compare.HashType2 == hash.None && !b.opt.Compare.DownloadHash {
|
||||||
fs.Log(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksums globally as hashes are ignored or unavailable on both sides."))
|
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksums globally as hashes are ignored or unavailable on both sides.")) //nolint:govet
|
||||||
b.opt.Compare.Checksum = false
|
b.opt.Compare.Checksum = false
|
||||||
ci.CheckSum = false
|
ci.CheckSum = false
|
||||||
b.opt.IgnoreListingChecksum = true
|
b.opt.IgnoreListingChecksum = true
|
||||||
@@ -232,7 +232,7 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
|
|||||||
b.opt.Compare.Checksum = true
|
b.opt.Compare.Checksum = true
|
||||||
CompareFlag.Checksum = true
|
CompareFlag.Checksum = true
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf(Color(terminal.RedFg, "unknown compare option: %s (must be size, modtime, or checksum)"), opt)
|
return fmt.Errorf(Color(terminal.RedFg, "unknown compare option: %s (must be size, modtime, or checksum)"), opt) //nolint:govet
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -284,14 +284,14 @@ func tryDownloadHash(ctx context.Context, o fs.DirEntry, hashVal string) (string
|
|||||||
}
|
}
|
||||||
if o.Size() < 0 {
|
if o.Size() < 0 {
|
||||||
downloadHashWarn.Do(func() {
|
downloadHashWarn.Do(func() {
|
||||||
fs.Log(o, Color(terminal.YellowFg, "Skipping hash download as checksum not reliable with files of unknown length."))
|
fs.Logf(o, Color(terminal.YellowFg, "Skipping hash download as checksum not reliable with files of unknown length.")) //nolint:govet
|
||||||
})
|
})
|
||||||
fs.Debugf(o, "Skipping hash download as checksum not reliable with files of unknown length.")
|
fs.Debugf(o, "Skipping hash download as checksum not reliable with files of unknown length.")
|
||||||
return hashVal, hash.ErrUnsupported
|
return hashVal, hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
|
|
||||||
firstDownloadHash.Do(func() {
|
firstDownloadHash.Do(func() {
|
||||||
fs.Infoc(obj.Fs().Name(), Color(terminal.Dim, "Downloading hashes..."))
|
fs.Infof(obj.Fs().Name(), Color(terminal.Dim, "Downloading hashes...")) //nolint:govet
|
||||||
})
|
})
|
||||||
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "computing hash with --download-hash")
|
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "computing hash with --download-hash")
|
||||||
defer func() {
|
defer func() {
|
||||||
|
|||||||
@@ -66,8 +66,7 @@ func quotePath(path string) string {
|
|||||||
return escapePath(path, true)
|
return escapePath(path, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Colors controls whether terminal colors are enabled
|
var Colors bool // Colors controls whether terminal colors are enabled
|
||||||
var Colors bool
|
|
||||||
|
|
||||||
// Color handles terminal colors for bisync
|
// Color handles terminal colors for bisync
|
||||||
func Color(style string, s string) string {
|
func Color(style string, s string) string {
|
||||||
@@ -78,15 +77,6 @@ func Color(style string, s string) string {
|
|||||||
return style + s + terminal.Reset
|
return style + s + terminal.Reset
|
||||||
}
|
}
|
||||||
|
|
||||||
// ColorX handles terminal colors for bisync
|
|
||||||
func ColorX(style string, s string) string {
|
|
||||||
if !Colors {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
terminal.Start()
|
|
||||||
return style + s + terminal.Reset
|
|
||||||
}
|
|
||||||
|
|
||||||
func encode(s string) string {
|
func encode(s string) string {
|
||||||
return encoder.OS.ToStandardPath(encoder.OS.FromStandardPath(s))
|
return encoder.OS.ToStandardPath(encoder.OS.FromStandardPath(s))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -131,18 +131,18 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
|||||||
finaliseOnce.Do(func() {
|
finaliseOnce.Do(func() {
|
||||||
if atexit.Signalled() {
|
if atexit.Signalled() {
|
||||||
if b.opt.Resync {
|
if b.opt.Resync {
|
||||||
fs.Log(nil, Color(terminal.GreenFg, "No need to gracefully shutdown during --resync (just run it again.)"))
|
fs.Logf(nil, Color(terminal.GreenFg, "No need to gracefully shutdown during --resync (just run it again.)")) //nolint:govet
|
||||||
} else {
|
} else {
|
||||||
fs.Log(nil, Color(terminal.YellowFg, "Attempting to gracefully shutdown. (Send exit signal again for immediate un-graceful shutdown.)"))
|
fs.Logf(nil, Color(terminal.YellowFg, "Attempting to gracefully shutdown. (Send exit signal again for immediate un-graceful shutdown.)")) //nolint:govet
|
||||||
b.InGracefulShutdown = true
|
b.InGracefulShutdown = true
|
||||||
if b.SyncCI != nil {
|
if b.SyncCI != nil {
|
||||||
fs.Infoc(nil, Color(terminal.YellowFg, "Telling Sync to wrap up early."))
|
fs.Infof(nil, Color(terminal.YellowFg, "Telling Sync to wrap up early.")) //nolint:govet
|
||||||
b.SyncCI.MaxTransfer = 1
|
b.SyncCI.MaxTransfer = 1
|
||||||
b.SyncCI.MaxDuration = 1 * time.Second
|
b.SyncCI.MaxDuration = 1 * time.Second
|
||||||
b.SyncCI.CutoffMode = fs.CutoffModeSoft
|
b.SyncCI.CutoffMode = fs.CutoffModeSoft
|
||||||
gracePeriod := 30 * time.Second // TODO: flag to customize this?
|
gracePeriod := 30 * time.Second // TODO: flag to customize this?
|
||||||
if !waitFor("Canceling Sync if not done in", gracePeriod, func() bool { return b.CleanupCompleted }) {
|
if !waitFor("Canceling Sync if not done in", gracePeriod, func() bool { return b.CleanupCompleted }) {
|
||||||
fs.Log(nil, Color(terminal.YellowFg, "Canceling sync and cleaning up"))
|
fs.Logf(nil, Color(terminal.YellowFg, "Canceling sync and cleaning up")) //nolint:govet
|
||||||
b.CancelSync()
|
b.CancelSync()
|
||||||
waitFor("Aborting Bisync if not done in", 60*time.Second, func() bool { return b.CleanupCompleted })
|
waitFor("Aborting Bisync if not done in", 60*time.Second, func() bool { return b.CleanupCompleted })
|
||||||
}
|
}
|
||||||
@@ -150,13 +150,13 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
|||||||
// we haven't started to sync yet, so we're good.
|
// we haven't started to sync yet, so we're good.
|
||||||
// no need to worry about the listing files, as we haven't overwritten them yet.
|
// no need to worry about the listing files, as we haven't overwritten them yet.
|
||||||
b.CleanupCompleted = true
|
b.CleanupCompleted = true
|
||||||
fs.Log(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully."))
|
fs.Logf(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully.")) //nolint:govet
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !b.CleanupCompleted {
|
if !b.CleanupCompleted {
|
||||||
if !b.opt.Resync {
|
if !b.opt.Resync {
|
||||||
fs.Log(nil, Color(terminal.HiRedFg, "Graceful shutdown failed."))
|
fs.Logf(nil, Color(terminal.HiRedFg, "Graceful shutdown failed.")) //nolint:govet
|
||||||
fs.Log(nil, Color(terminal.RedFg, "Bisync interrupted. Must run --resync to recover."))
|
fs.Logf(nil, Color(terminal.RedFg, "Bisync interrupted. Must run --resync to recover.")) //nolint:govet
|
||||||
}
|
}
|
||||||
markFailed(b.listing1)
|
markFailed(b.listing1)
|
||||||
markFailed(b.listing2)
|
markFailed(b.listing2)
|
||||||
@@ -180,14 +180,14 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
|||||||
b.critical = false
|
b.critical = false
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fs.Log(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully."))
|
fs.Logf(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully.")) //nolint:govet
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.critical {
|
if b.critical {
|
||||||
if b.retryable && b.opt.Resilient {
|
if b.retryable && b.opt.Resilient {
|
||||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err)
|
fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err) //nolint:govet
|
||||||
fs.Error(nil, Color(terminal.YellowFg, "Bisync aborted. Error is retryable without --resync due to --resilient mode."))
|
fs.Errorf(nil, Color(terminal.YellowFg, "Bisync aborted. Error is retryable without --resync due to --resilient mode.")) //nolint:govet
|
||||||
} else {
|
} else {
|
||||||
if bilib.FileExists(b.listing1) {
|
if bilib.FileExists(b.listing1) {
|
||||||
_ = os.Rename(b.listing1, b.listing1+"-err")
|
_ = os.Rename(b.listing1, b.listing1+"-err")
|
||||||
@@ -196,15 +196,15 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
|||||||
_ = os.Rename(b.listing2, b.listing2+"-err")
|
_ = os.Rename(b.listing2, b.listing2+"-err")
|
||||||
}
|
}
|
||||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err)
|
fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err)
|
||||||
fs.Error(nil, Color(terminal.RedFg, "Bisync aborted. Must run --resync to recover."))
|
fs.Errorf(nil, Color(terminal.RedFg, "Bisync aborted. Must run --resync to recover.")) //nolint:govet
|
||||||
}
|
}
|
||||||
return ErrBisyncAborted
|
return ErrBisyncAborted
|
||||||
}
|
}
|
||||||
if b.abort && !b.InGracefulShutdown {
|
if b.abort && !b.InGracefulShutdown {
|
||||||
fs.Log(nil, Color(terminal.RedFg, "Bisync aborted. Please try again."))
|
fs.Logf(nil, Color(terminal.RedFg, "Bisync aborted. Please try again.")) //nolint:govet
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fs.Infoc(nil, Color(terminal.GreenFg, "Bisync successful"))
|
fs.Infof(nil, Color(terminal.GreenFg, "Bisync successful")) //nolint:govet
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -270,7 +270,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
|||||||
if b.opt.Recover && bilib.FileExists(b.listing1+"-old") && bilib.FileExists(b.listing2+"-old") {
|
if b.opt.Recover && bilib.FileExists(b.listing1+"-old") && bilib.FileExists(b.listing2+"-old") {
|
||||||
errTip := fmt.Sprintf(Color(terminal.CyanFg, "Path1: %s\n"), Color(terminal.HiBlueFg, b.listing1))
|
errTip := fmt.Sprintf(Color(terminal.CyanFg, "Path1: %s\n"), Color(terminal.HiBlueFg, b.listing1))
|
||||||
errTip += fmt.Sprintf(Color(terminal.CyanFg, "Path2: %s"), Color(terminal.HiBlueFg, b.listing2))
|
errTip += fmt.Sprintf(Color(terminal.CyanFg, "Path2: %s"), Color(terminal.HiBlueFg, b.listing2))
|
||||||
fs.Log(nil, Color(terminal.YellowFg, "Listings not found. Reverting to prior backup as --recover is set. \n")+errTip)
|
fs.Logf(nil, Color(terminal.YellowFg, "Listings not found. Reverting to prior backup as --recover is set. \n")+errTip) //nolint:govet
|
||||||
if opt.CheckSync != CheckSyncFalse {
|
if opt.CheckSync != CheckSyncFalse {
|
||||||
// Run CheckSync to ensure old listing is valid (garbage in, garbage out!)
|
// Run CheckSync to ensure old listing is valid (garbage in, garbage out!)
|
||||||
fs.Infof(nil, "Validating backup listings for Path1 %s vs Path2 %s", quotePath(path1), quotePath(path2))
|
fs.Infof(nil, "Validating backup listings for Path1 %s vs Path2 %s", quotePath(path1), quotePath(path2))
|
||||||
@@ -279,7 +279,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
|||||||
b.retryable = true
|
b.retryable = true
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
fs.Infoc(nil, Color(terminal.GreenFg, "Backup listing is valid."))
|
fs.Infof(nil, Color(terminal.GreenFg, "Backup listing is valid.")) //nolint:govet
|
||||||
}
|
}
|
||||||
b.revertToOldListings()
|
b.revertToOldListings()
|
||||||
} else {
|
} else {
|
||||||
@@ -299,7 +299,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
|||||||
fs.Infof(nil, "Building Path1 and Path2 listings")
|
fs.Infof(nil, "Building Path1 and Path2 listings")
|
||||||
ls1, ls2, err = b.makeMarchListing(fctx)
|
ls1, ls2, err = b.makeMarchListing(fctx)
|
||||||
if err != nil || accounting.Stats(fctx).Errored() {
|
if err != nil || accounting.Stats(fctx).Errored() {
|
||||||
fs.Error(nil, Color(terminal.RedFg, "There were errors while building listings. Aborting as it is too dangerous to continue."))
|
fs.Errorf(nil, Color(terminal.RedFg, "There were errors while building listings. Aborting as it is too dangerous to continue.")) //nolint:govet
|
||||||
b.critical = true
|
b.critical = true
|
||||||
b.retryable = true
|
b.retryable = true
|
||||||
return err
|
return err
|
||||||
@@ -623,7 +623,7 @@ func (b *bisyncRun) checkSyntax() error {
|
|||||||
|
|
||||||
func (b *bisyncRun) debug(nametocheck, msgiftrue string) {
|
func (b *bisyncRun) debug(nametocheck, msgiftrue string) {
|
||||||
if b.DebugName != "" && b.DebugName == nametocheck {
|
if b.DebugName != "" && b.DebugName == nametocheck {
|
||||||
fs.Infoc(Color(terminal.MagentaBg, "DEBUGNAME "+b.DebugName), Color(terminal.MagentaBg, msgiftrue))
|
fs.Infof(Color(terminal.MagentaBg, "DEBUGNAME "+b.DebugName), Color(terminal.MagentaBg, msgiftrue)) //nolint:govet
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -161,7 +161,7 @@ func WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEn
|
|||||||
prettyprint(result, "writing result", fs.LogLevelDebug)
|
prettyprint(result, "writing result", fs.LogLevelDebug)
|
||||||
if result.Size < 0 && result.Flags != "d" && ((queueCI.CheckSum && !downloadHash) || queueCI.SizeOnly) {
|
if result.Size < 0 && result.Flags != "d" && ((queueCI.CheckSum && !downloadHash) || queueCI.SizeOnly) {
|
||||||
once.Do(func() {
|
once.Do(func() {
|
||||||
fs.Log(result.Name, Color(terminal.YellowFg, "Files of unknown size (such as Google Docs) do not sync reliably with --checksum or --size-only. Consider using modtime instead (the default) or --drive-skip-gdocs"))
|
fs.Logf(result.Name, Color(terminal.YellowFg, "Files of unknown size (such as Google Docs) do not sync reliably with --checksum or --size-only. Consider using modtime instead (the default) or --drive-skip-gdocs")) //nolint:govet
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -142,7 +142,7 @@ func (b *bisyncRun) resolve(ctxMove context.Context, path1, path2, file, alias s
|
|||||||
if winningPath > 0 {
|
if winningPath > 0 {
|
||||||
fs.Infof(file, Color(terminal.GreenFg, "The winner is: Path%d"), winningPath)
|
fs.Infof(file, Color(terminal.GreenFg, "The winner is: Path%d"), winningPath)
|
||||||
} else {
|
} else {
|
||||||
fs.Infoc(file, Color(terminal.RedFg, "A winner could not be determined."))
|
fs.Infof(file, Color(terminal.RedFg, "A winner could not be determined.")) //nolint:govet
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ import (
|
|||||||
// and either flag is sufficient without the other.
|
// and either flag is sufficient without the other.
|
||||||
func (b *bisyncRun) setResyncDefaults() {
|
func (b *bisyncRun) setResyncDefaults() {
|
||||||
if b.opt.Resync && b.opt.ResyncMode == PreferNone {
|
if b.opt.Resync && b.opt.ResyncMode == PreferNone {
|
||||||
fs.Debug(nil, Color(terminal.Dim, "defaulting to --resync-mode path1 as --resync is set"))
|
fs.Debugf(nil, Color(terminal.Dim, "defaulting to --resync-mode path1 as --resync is set")) //nolint:govet
|
||||||
b.opt.ResyncMode = PreferPath1
|
b.opt.ResyncMode = PreferPath1
|
||||||
}
|
}
|
||||||
if b.opt.ResyncMode != PreferNone {
|
if b.opt.ResyncMode != PreferNone {
|
||||||
|
|||||||
@@ -80,7 +80,6 @@ INFO : Path2 checking for diffs
|
|||||||
INFO : Applying changes
|
INFO : Applying changes
|
||||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}subdir[0m
|
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}subdir[0m
|
||||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||||
INFO : subdir: Making directory
|
|
||||||
INFO : Updating listings
|
INFO : Updating listings
|
||||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||||
INFO : [32mBisync successful[0m
|
INFO : [32mBisync successful[0m
|
||||||
|
|||||||
@@ -121,6 +121,19 @@ func (fsys *FS) lookupParentDir(filePath string) (leaf string, dir *vfs.Dir, err
|
|||||||
return leaf, dir, errc
|
return leaf, dir, errc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// lookup a File given a path
|
||||||
|
func (fsys *FS) lookupFile(path string) (file *vfs.File, errc int) {
|
||||||
|
node, errc := fsys.lookupNode(path)
|
||||||
|
if errc != 0 {
|
||||||
|
return nil, errc
|
||||||
|
}
|
||||||
|
file, ok := node.(*vfs.File)
|
||||||
|
if !ok {
|
||||||
|
return nil, -fuse.EISDIR
|
||||||
|
}
|
||||||
|
return file, 0
|
||||||
|
}
|
||||||
|
|
||||||
// get a node and handle from the path or from the fh if not fhUnset
|
// get a node and handle from the path or from the fh if not fhUnset
|
||||||
//
|
//
|
||||||
// handle may be nil
|
// handle may be nil
|
||||||
@@ -141,9 +154,15 @@ func (fsys *FS) stat(node vfs.Node, stat *fuse.Stat_t) (errc int) {
|
|||||||
Size := uint64(node.Size())
|
Size := uint64(node.Size())
|
||||||
Blocks := (Size + 511) / 512
|
Blocks := (Size + 511) / 512
|
||||||
modTime := node.ModTime()
|
modTime := node.ModTime()
|
||||||
|
Mode := node.Mode().Perm()
|
||||||
|
if node.IsDir() {
|
||||||
|
Mode |= fuse.S_IFDIR
|
||||||
|
} else {
|
||||||
|
Mode |= fuse.S_IFREG
|
||||||
|
}
|
||||||
//stat.Dev = 1
|
//stat.Dev = 1
|
||||||
stat.Ino = node.Inode() // FIXME do we need to set the inode number?
|
stat.Ino = node.Inode() // FIXME do we need to set the inode number?
|
||||||
stat.Mode = getMode(node)
|
stat.Mode = uint32(Mode)
|
||||||
stat.Nlink = 1
|
stat.Nlink = 1
|
||||||
stat.Uid = fsys.VFS.Opt.UID
|
stat.Uid = fsys.VFS.Opt.UID
|
||||||
stat.Gid = fsys.VFS.Opt.GID
|
stat.Gid = fsys.VFS.Opt.GID
|
||||||
@@ -490,15 +509,14 @@ func (fsys *FS) Link(oldpath string, newpath string) (errc int) {
|
|||||||
|
|
||||||
// Symlink creates a symbolic link.
|
// Symlink creates a symbolic link.
|
||||||
func (fsys *FS) Symlink(target string, newpath string) (errc int) {
|
func (fsys *FS) Symlink(target string, newpath string) (errc int) {
|
||||||
defer log.Trace(target, "newpath=%q, target=%q", newpath, target)("errc=%d", &errc)
|
defer log.Trace(target, "newpath=%q", newpath)("errc=%d", &errc)
|
||||||
return translateError(fsys.VFS.Symlink(target, newpath))
|
return -fuse.ENOSYS
|
||||||
}
|
}
|
||||||
|
|
||||||
// Readlink reads the target of a symbolic link.
|
// Readlink reads the target of a symbolic link.
|
||||||
func (fsys *FS) Readlink(path string) (errc int, linkPath string) {
|
func (fsys *FS) Readlink(path string) (errc int, linkPath string) {
|
||||||
defer log.Trace(path, "")("errc=%v, linkPath=%q", &errc, linkPath)
|
defer log.Trace(path, "")("linkPath=%q, errc=%d", &linkPath, &errc)
|
||||||
linkPath, err := fsys.VFS.Readlink(path)
|
return -fuse.ENOSYS, ""
|
||||||
return translateError(err), linkPath
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Chmod changes the permission bits of a file.
|
// Chmod changes the permission bits of a file.
|
||||||
@@ -562,7 +580,7 @@ func (fsys *FS) Getpath(path string, fh uint64) (errc int, normalisedPath string
|
|||||||
return errc, ""
|
return errc, ""
|
||||||
}
|
}
|
||||||
normalisedPath = node.Path()
|
normalisedPath = node.Path()
|
||||||
if !strings.HasPrefix(normalisedPath, "/") {
|
if !strings.HasPrefix("/", normalisedPath) {
|
||||||
normalisedPath = "/" + normalisedPath
|
normalisedPath = "/" + normalisedPath
|
||||||
}
|
}
|
||||||
return 0, normalisedPath
|
return 0, normalisedPath
|
||||||
@@ -597,8 +615,6 @@ func translateError(err error) (errc int) {
|
|||||||
return -fuse.ENOSYS
|
return -fuse.ENOSYS
|
||||||
case vfs.EINVAL:
|
case vfs.EINVAL:
|
||||||
return -fuse.EINVAL
|
return -fuse.EINVAL
|
||||||
case vfs.ELOOP:
|
|
||||||
return -fuse.ELOOP
|
|
||||||
}
|
}
|
||||||
fs.Errorf(nil, "IO error: %v", err)
|
fs.Errorf(nil, "IO error: %v", err)
|
||||||
return -fuse.EIO
|
return -fuse.EIO
|
||||||
@@ -630,22 +646,6 @@ func translateOpenFlags(inFlags int) (outFlags int) {
|
|||||||
return outFlags
|
return outFlags
|
||||||
}
|
}
|
||||||
|
|
||||||
// get the Mode from a vfs Node
|
|
||||||
func getMode(node os.FileInfo) uint32 {
|
|
||||||
vfsMode := node.Mode()
|
|
||||||
Mode := vfsMode.Perm()
|
|
||||||
if vfsMode&os.ModeDir != 0 {
|
|
||||||
Mode |= fuse.S_IFDIR
|
|
||||||
} else if vfsMode&os.ModeSymlink != 0 {
|
|
||||||
Mode |= fuse.S_IFLNK
|
|
||||||
} else if vfsMode&os.ModeNamedPipe != 0 {
|
|
||||||
Mode |= fuse.S_IFIFO
|
|
||||||
} else {
|
|
||||||
Mode |= fuse.S_IFREG
|
|
||||||
}
|
|
||||||
return uint32(Mode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure interfaces are satisfied
|
// Make sure interfaces are satisfied
|
||||||
var (
|
var (
|
||||||
_ fuse.FileSystemInterface = (*FS)(nil)
|
_ fuse.FileSystemInterface = (*FS)(nil)
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/cmd/mountlib"
|
"github.com/rclone/rclone/cmd/mountlib"
|
||||||
@@ -34,6 +35,19 @@ func init() {
|
|||||||
buildinfo.Tags = append(buildinfo.Tags, "cmount")
|
buildinfo.Tags = append(buildinfo.Tags, "cmount")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Find the option string in the current options
|
||||||
|
func findOption(name string, options []string) (found bool) {
|
||||||
|
for _, option := range options {
|
||||||
|
if option == "-o" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.Contains(option, name) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// mountOptions configures the options from the command line flags
|
// mountOptions configures the options from the command line flags
|
||||||
func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.Options) (options []string) {
|
func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.Options) (options []string) {
|
||||||
// Options
|
// Options
|
||||||
@@ -79,9 +93,9 @@ func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.
|
|||||||
if VFS.Opt.ReadOnly {
|
if VFS.Opt.ReadOnly {
|
||||||
options = append(options, "-o", "ro")
|
options = append(options, "-o", "ro")
|
||||||
}
|
}
|
||||||
//if opt.WritebackCache {
|
if opt.WritebackCache {
|
||||||
// FIXME? options = append(options, "-o", WritebackCache())
|
// FIXME? options = append(options, "-o", WritebackCache())
|
||||||
//}
|
}
|
||||||
if runtime.GOOS == "darwin" {
|
if runtime.GOOS == "darwin" {
|
||||||
if opt.VolumeName != "" {
|
if opt.VolumeName != "" {
|
||||||
options = append(options, "-o", "volname="+opt.VolumeName)
|
options = append(options, "-o", "volname="+opt.VolumeName)
|
||||||
@@ -97,7 +111,9 @@ func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.
|
|||||||
for _, option := range opt.ExtraOptions {
|
for _, option := range opt.ExtraOptions {
|
||||||
options = append(options, "-o", option)
|
options = append(options, "-o", option)
|
||||||
}
|
}
|
||||||
options = append(options, opt.ExtraFlags...)
|
for _, option := range opt.ExtraFlags {
|
||||||
|
options = append(options, option)
|
||||||
|
}
|
||||||
return options
|
return options
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -34,7 +33,7 @@ func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) (err error) {
|
|||||||
a.Valid = time.Duration(d.fsys.opt.AttrTimeout)
|
a.Valid = time.Duration(d.fsys.opt.AttrTimeout)
|
||||||
a.Gid = d.VFS().Opt.GID
|
a.Gid = d.VFS().Opt.GID
|
||||||
a.Uid = d.VFS().Opt.UID
|
a.Uid = d.VFS().Opt.UID
|
||||||
a.Mode = d.Mode()
|
a.Mode = os.ModeDir | os.FileMode(d.VFS().Opt.DirPerms)
|
||||||
modTime := d.ModTime()
|
modTime := d.ModTime()
|
||||||
a.Atime = modTime
|
a.Atime = modTime
|
||||||
a.Mtime = modTime
|
a.Mtime = modTime
|
||||||
@@ -141,13 +140,11 @@ var _ fusefs.NodeCreater = (*Dir)(nil)
|
|||||||
// Create makes a new file
|
// Create makes a new file
|
||||||
func (d *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (node fusefs.Node, handle fusefs.Handle, err error) {
|
func (d *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (node fusefs.Node, handle fusefs.Handle, err error) {
|
||||||
defer log.Trace(d, "name=%q", req.Name)("node=%v, handle=%v, err=%v", &node, &handle, &err)
|
defer log.Trace(d, "name=%q", req.Name)("node=%v, handle=%v, err=%v", &node, &handle, &err)
|
||||||
// translate the fuse flags to os flags
|
file, err := d.Dir.Create(req.Name, int(req.Flags))
|
||||||
osFlags := int(req.Flags) | os.O_CREATE
|
|
||||||
file, err := d.Dir.Create(req.Name, osFlags)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, translateError(err)
|
return nil, nil, translateError(err)
|
||||||
}
|
}
|
||||||
fh, err := file.Open(osFlags)
|
fh, err := file.Open(int(req.Flags) | os.O_CREATE)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, translateError(err)
|
return nil, nil, translateError(err)
|
||||||
}
|
}
|
||||||
@@ -203,6 +200,7 @@ func (d *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fusefs
|
|||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("unknown Dir type %T", newDir)
|
return fmt.Errorf("unknown Dir type %T", newDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = d.Dir.Rename(req.OldName, req.NewName, destDir.Dir)
|
err = d.Dir.Rename(req.OldName, req.NewName, destDir.Dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return translateError(err)
|
return translateError(err)
|
||||||
@@ -241,24 +239,6 @@ func (d *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fusefs.Node)
|
|||||||
return nil, syscall.ENOSYS
|
return nil, syscall.ENOSYS
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ fusefs.NodeSymlinker = (*Dir)(nil)
|
|
||||||
|
|
||||||
// Symlink create a symbolic link.
|
|
||||||
func (d *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (node fusefs.Node, err error) {
|
|
||||||
defer log.Trace(d, "newname=%v, target=%v", req.NewName, req.Target)("node=%v, err=%v", &node, &err)
|
|
||||||
|
|
||||||
newName := path.Join(d.Path(), req.NewName)
|
|
||||||
target := req.Target
|
|
||||||
|
|
||||||
n, err := d.VFS().CreateSymlink(target, newName)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
node = &File{n.(*vfs.File), d.fsys}
|
|
||||||
return node, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check interface satisfied
|
// Check interface satisfied
|
||||||
var _ fusefs.NodeMknoder = (*Dir)(nil)
|
var _ fusefs.NodeMknoder = (*Dir)(nil)
|
||||||
|
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ func (f *File) Attr(ctx context.Context, a *fuse.Attr) (err error) {
|
|||||||
Blocks := (Size + 511) / 512
|
Blocks := (Size + 511) / 512
|
||||||
a.Gid = f.VFS().Opt.GID
|
a.Gid = f.VFS().Opt.GID
|
||||||
a.Uid = f.VFS().Opt.UID
|
a.Uid = f.VFS().Opt.UID
|
||||||
a.Mode = f.File.Mode() &^ os.ModeAppend
|
a.Mode = os.FileMode(f.VFS().Opt.FilePerms)
|
||||||
a.Size = Size
|
a.Size = Size
|
||||||
a.Atime = modTime
|
a.Atime = modTime
|
||||||
a.Mtime = modTime
|
a.Mtime = modTime
|
||||||
@@ -129,11 +129,3 @@ func (f *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) er
|
|||||||
}
|
}
|
||||||
|
|
||||||
var _ fusefs.NodeRemovexattrer = (*File)(nil)
|
var _ fusefs.NodeRemovexattrer = (*File)(nil)
|
||||||
|
|
||||||
var _ fusefs.NodeReadlinker = (*File)(nil)
|
|
||||||
|
|
||||||
// Readlink read symbolic link target.
|
|
||||||
func (f *File) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (ret string, err error) {
|
|
||||||
defer log.Trace(f, "")("ret=%v, err=%v", &ret, &err)
|
|
||||||
return f.VFS().Readlink(f.Path())
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -100,8 +100,6 @@ func translateError(err error) error {
|
|||||||
return syscall.ENOSYS
|
return syscall.ENOSYS
|
||||||
case vfs.EINVAL:
|
case vfs.EINVAL:
|
||||||
return fuse.Errno(syscall.EINVAL)
|
return fuse.Errno(syscall.EINVAL)
|
||||||
case vfs.ELOOP:
|
|
||||||
return fuse.Errno(syscall.ELOOP)
|
|
||||||
}
|
}
|
||||||
fs.Errorf(nil, "IO error: %v", err)
|
fs.Errorf(nil, "IO error: %v", err)
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -51,14 +51,9 @@ func (f *FS) SetDebug(debug bool) {
|
|||||||
|
|
||||||
// get the Mode from a vfs Node
|
// get the Mode from a vfs Node
|
||||||
func getMode(node os.FileInfo) uint32 {
|
func getMode(node os.FileInfo) uint32 {
|
||||||
vfsMode := node.Mode()
|
Mode := node.Mode().Perm()
|
||||||
Mode := vfsMode.Perm()
|
if node.IsDir() {
|
||||||
if vfsMode&os.ModeDir != 0 {
|
|
||||||
Mode |= fuse.S_IFDIR
|
Mode |= fuse.S_IFDIR
|
||||||
} else if vfsMode&os.ModeSymlink != 0 {
|
|
||||||
Mode |= fuse.S_IFLNK
|
|
||||||
} else if vfsMode&os.ModeNamedPipe != 0 {
|
|
||||||
Mode |= fuse.S_IFIFO
|
|
||||||
} else {
|
} else {
|
||||||
Mode |= fuse.S_IFREG
|
Mode |= fuse.S_IFREG
|
||||||
}
|
}
|
||||||
@@ -133,8 +128,6 @@ func translateError(err error) syscall.Errno {
|
|||||||
return syscall.ENOSYS
|
return syscall.ENOSYS
|
||||||
case vfs.EINVAL:
|
case vfs.EINVAL:
|
||||||
return syscall.EINVAL
|
return syscall.EINVAL
|
||||||
case vfs.ELOOP:
|
|
||||||
return syscall.ELOOP
|
|
||||||
}
|
}
|
||||||
fs.Errorf(nil, "IO error: %v", err)
|
fs.Errorf(nil, "IO error: %v", err)
|
||||||
return syscall.EIO
|
return syscall.EIO
|
||||||
|
|||||||
@@ -227,7 +227,7 @@ type dirStream struct {
|
|||||||
// HasNext indicates if there are further entries. HasNext
|
// HasNext indicates if there are further entries. HasNext
|
||||||
// might be called on already closed streams.
|
// might be called on already closed streams.
|
||||||
func (ds *dirStream) HasNext() bool {
|
func (ds *dirStream) HasNext() bool {
|
||||||
return ds.i < len(ds.nodes)+2
|
return ds.i < len(ds.nodes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Next retrieves the next entry. It is only called if HasNext
|
// Next retrieves the next entry. It is only called if HasNext
|
||||||
@@ -235,22 +235,7 @@ func (ds *dirStream) HasNext() bool {
|
|||||||
// indicate I/O errors
|
// indicate I/O errors
|
||||||
func (ds *dirStream) Next() (de fuse.DirEntry, errno syscall.Errno) {
|
func (ds *dirStream) Next() (de fuse.DirEntry, errno syscall.Errno) {
|
||||||
// defer log.Trace(nil, "")("de=%+v, errno=%v", &de, &errno)
|
// defer log.Trace(nil, "")("de=%+v, errno=%v", &de, &errno)
|
||||||
if ds.i == 0 {
|
fi := ds.nodes[ds.i]
|
||||||
ds.i++
|
|
||||||
return fuse.DirEntry{
|
|
||||||
Mode: fuse.S_IFDIR,
|
|
||||||
Name: ".",
|
|
||||||
Ino: 0, // FIXME
|
|
||||||
}, 0
|
|
||||||
} else if ds.i == 1 {
|
|
||||||
ds.i++
|
|
||||||
return fuse.DirEntry{
|
|
||||||
Mode: fuse.S_IFDIR,
|
|
||||||
Name: "..",
|
|
||||||
Ino: 0, // FIXME
|
|
||||||
}, 0
|
|
||||||
}
|
|
||||||
fi := ds.nodes[ds.i-2]
|
|
||||||
de = fuse.DirEntry{
|
de = fuse.DirEntry{
|
||||||
// Mode is the file's mode. Only the high bits (e.g. S_IFDIR)
|
// Mode is the file's mode. Only the high bits (e.g. S_IFDIR)
|
||||||
// are considered.
|
// are considered.
|
||||||
@@ -458,31 +443,3 @@ func (n *Node) Listxattr(ctx context.Context, dest []byte) (uint32, syscall.Errn
|
|||||||
}
|
}
|
||||||
|
|
||||||
var _ fusefs.NodeListxattrer = (*Node)(nil)
|
var _ fusefs.NodeListxattrer = (*Node)(nil)
|
||||||
|
|
||||||
var _ fusefs.NodeReadlinker = (*Node)(nil)
|
|
||||||
|
|
||||||
// Readlink read symbolic link target.
|
|
||||||
func (n *Node) Readlink(ctx context.Context) (ret []byte, err syscall.Errno) {
|
|
||||||
defer log.Trace(n, "")("ret=%v, err=%v", &ret, &err)
|
|
||||||
path := n.node.Path()
|
|
||||||
s, serr := n.node.VFS().Readlink(path)
|
|
||||||
return []byte(s), translateError(serr)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ fusefs.NodeSymlinker = (*Node)(nil)
|
|
||||||
|
|
||||||
// Symlink create symbolic link.
|
|
||||||
func (n *Node) Symlink(ctx context.Context, target, name string, out *fuse.EntryOut) (node *fusefs.Inode, err syscall.Errno) {
|
|
||||||
defer log.Trace(n, "name=%v, target=%v", name, target)("node=%v, err=%v", &node, &err)
|
|
||||||
fullPath := path.Join(n.node.Path(), name)
|
|
||||||
vfsNode, serr := n.node.VFS().CreateSymlink(target, fullPath)
|
|
||||||
if serr != nil {
|
|
||||||
return nil, translateError(serr)
|
|
||||||
}
|
|
||||||
|
|
||||||
n.fsys.setEntryOut(vfsNode, out)
|
|
||||||
newNode := newNode(n.fsys, vfsNode)
|
|
||||||
newInode := n.NewInode(ctx, newNode, fusefs.StableAttr{Mode: out.Attr.Mode})
|
|
||||||
|
|
||||||
return newInode, 0
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -373,9 +373,6 @@ func (m *MountPoint) Mount() (mountDaemon *os.Process, err error) {
|
|||||||
|
|
||||||
m.ErrChan, m.UnmountFn, err = m.MountFn(m.VFS, m.MountPoint, &m.MountOpt)
|
m.ErrChan, m.UnmountFn, err = m.MountFn(m.VFS, m.MountPoint, &m.MountOpt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if len(os.Args) > 0 && strings.HasPrefix(os.Args[0], "/snap/") {
|
|
||||||
return nil, fmt.Errorf("mounting is not supported when running from snap")
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("failed to mount FUSE fs: %w", err)
|
return nil, fmt.Errorf("failed to mount FUSE fs: %w", err)
|
||||||
}
|
}
|
||||||
m.MountedOn = time.Now()
|
m.MountedOn = time.Now()
|
||||||
|
|||||||
@@ -3,9 +3,6 @@
|
|||||||
package nfsmount
|
package nfsmount
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"runtime"
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
@@ -33,24 +30,7 @@ func TestMount(t *testing.T) {
|
|||||||
}
|
}
|
||||||
sudo = true
|
sudo = true
|
||||||
}
|
}
|
||||||
for _, cacheType := range []string{"memory", "disk", "symlink"} {
|
|
||||||
t.Run(cacheType, func(t *testing.T) {
|
|
||||||
nfs.Opt.HandleCacheDir = t.TempDir()
|
nfs.Opt.HandleCacheDir = t.TempDir()
|
||||||
require.NoError(t, nfs.Opt.HandleCache.Set(cacheType))
|
require.NoError(t, nfs.Opt.HandleCache.Set("disk"))
|
||||||
// Check we can create a handler
|
|
||||||
_, err := nfs.NewHandler(context.Background(), nil, &nfs.Opt)
|
|
||||||
if errors.Is(err, nfs.ErrorSymlinkCacheNotSupported) || errors.Is(err, nfs.ErrorSymlinkCacheNoPermission) {
|
|
||||||
t.Skip(err.Error() + ": run with: go test -c && sudo setcap cap_dac_read_search+ep ./nfsmount.test && ./nfsmount.test -test.v")
|
|
||||||
}
|
|
||||||
require.NoError(t, err)
|
|
||||||
// Configure rclone via environment var since the mount gets run in a subprocess
|
|
||||||
_ = os.Setenv("RCLONE_NFS_CACHE_DIR", nfs.Opt.HandleCacheDir)
|
|
||||||
_ = os.Setenv("RCLONE_NFS_CACHE_TYPE", cacheType)
|
|
||||||
t.Cleanup(func() {
|
|
||||||
_ = os.Unsetenv("RCLONE_NFS_CACHE_DIR")
|
|
||||||
_ = os.Unsetenv("RCLONE_NFS_CACHE_TYPE")
|
|
||||||
})
|
|
||||||
vfstest.RunTests(t, false, vfscommon.CacheModeWrites, false, mount)
|
vfstest.RunTests(t, false, vfscommon.CacheModeWrites, false, mount)
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,7 +11,6 @@ import (
|
|||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"sort"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/anacrolix/dms/dlna"
|
"github.com/anacrolix/dms/dlna"
|
||||||
@@ -159,18 +158,6 @@ func (cds *contentDirectoryService) readContainer(o object, host string) (ret []
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sort the directory entries by directories first then alphabetically by name
|
|
||||||
sort.Slice(dirEntries, func(i, j int) bool {
|
|
||||||
iNode, jNode := dirEntries[i], dirEntries[j]
|
|
||||||
iIsDir, jIsDir := iNode.IsDir(), jNode.IsDir()
|
|
||||||
if iIsDir && !jIsDir {
|
|
||||||
return true
|
|
||||||
} else if !iIsDir && jIsDir {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return strings.ToLower(iNode.Name()) < strings.ToLower(jNode.Name())
|
|
||||||
})
|
|
||||||
|
|
||||||
dirEntries, mediaResources := mediaWithResources(dirEntries)
|
dirEntries, mediaResources := mediaWithResources(dirEntries)
|
||||||
for _, de := range dirEntries {
|
for _, de := range dirEntries {
|
||||||
child := object{
|
child := object{
|
||||||
|
|||||||
@@ -2,15 +2,17 @@ package docker
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/rclone/rclone/cmd/mountlib"
|
"github.com/rclone/rclone/cmd/mountlib"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
"github.com/rclone/rclone/fs/fspath"
|
||||||
"github.com/rclone/rclone/fs/rc"
|
"github.com/rclone/rclone/fs/rc"
|
||||||
"github.com/rclone/rclone/vfs/vfscommon"
|
"github.com/rclone/rclone/vfs/vfscommon"
|
||||||
|
|
||||||
|
"github.com/spf13/pflag"
|
||||||
)
|
)
|
||||||
|
|
||||||
// applyOptions configures volume from request options.
|
// applyOptions configures volume from request options.
|
||||||
@@ -110,15 +112,11 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
|
|||||||
for key, val := range vol.Options {
|
for key, val := range vol.Options {
|
||||||
opt[key] = val
|
opt[key] = val
|
||||||
}
|
}
|
||||||
mntMap := configmap.Simple{}
|
|
||||||
vfsMap := configmap.Simple{}
|
|
||||||
for key := range opt {
|
for key := range opt {
|
||||||
var ok bool
|
var ok bool
|
||||||
var err error
|
var err error
|
||||||
normalKey := normalOptName(key)
|
|
||||||
underscoreKey := strings.ReplaceAll(normalKey, "-", "_")
|
|
||||||
|
|
||||||
switch normalKey {
|
switch normalOptName(key) {
|
||||||
case "persist":
|
case "persist":
|
||||||
vol.persist, err = opt.GetBool(key)
|
vol.persist, err = opt.GetBool(key)
|
||||||
ok = true
|
ok = true
|
||||||
@@ -131,24 +129,25 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
// try to use as a mount option in mntMap
|
// try to use as a mount option in mntOpt
|
||||||
if mountlib.OptionsInfo.Get(underscoreKey) != nil {
|
ok, err = getMountOption(mntOpt, opt, key)
|
||||||
mntMap[underscoreKey] = vol.Options[key]
|
if ok && err != nil {
|
||||||
ok = true
|
return fmt.Errorf("cannot parse mount option %q: %w", key, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
// try as a vfs option in vfsMap
|
// try as a vfs option in vfsOpt
|
||||||
if vfscommon.OptionsInfo.Get(underscoreKey) != nil {
|
ok, err = getVFSOption(vfsOpt, opt, key)
|
||||||
vfsMap[underscoreKey] = vol.Options[key]
|
if ok && err != nil {
|
||||||
ok = true
|
return fmt.Errorf("cannot parse vfs option %q: %w", key, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
// try as a backend option in fsOpt (backends use "_" instead of "-")
|
// try as a backend option in fsOpt (backends use "_" instead of "-")
|
||||||
fsOptName := strings.TrimPrefix(underscoreKey, fsType+"_")
|
optWithPrefix := strings.ReplaceAll(normalOptName(key), "-", "_")
|
||||||
hasFsPrefix := underscoreKey != fsOptName
|
fsOptName := strings.TrimPrefix(optWithPrefix, fsType+"_")
|
||||||
|
hasFsPrefix := optWithPrefix != fsOptName
|
||||||
if !hasFsPrefix || fsInfo.Options.Get(fsOptName) == nil {
|
if !hasFsPrefix || fsInfo.Options.Get(fsOptName) == nil {
|
||||||
fs.Logf(nil, "Option %q is not supported by backend %q", key, fsType)
|
fs.Logf(nil, "Option %q is not supported by backend %q", key, fsType)
|
||||||
return fmt.Errorf("unsupported backend option %q", key)
|
return fmt.Errorf("unsupported backend option %q", key)
|
||||||
@@ -160,18 +159,6 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse VFS options
|
|
||||||
err = configstruct.Set(vfsMap, vfsOpt)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("cannot parse vfs options: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse Mount options
|
|
||||||
err = configstruct.Set(mntMap, mntOpt)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("cannot parse mount options: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// build remote string from fsName, fsType, fsOpt, fsPath
|
// build remote string from fsName, fsType, fsOpt, fsPath
|
||||||
colon := ":"
|
colon := ":"
|
||||||
comma := ","
|
comma := ","
|
||||||
@@ -191,6 +178,150 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
|
|||||||
return vol.validate()
|
return vol.validate()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getMountOption(mntOpt *mountlib.Options, opt rc.Params, key string) (ok bool, err error) {
|
||||||
|
ok = true
|
||||||
|
switch normalOptName(key) {
|
||||||
|
case "debug-fuse":
|
||||||
|
mntOpt.DebugFUSE, err = opt.GetBool(key)
|
||||||
|
case "attr-timeout":
|
||||||
|
mntOpt.AttrTimeout, err = opt.GetFsDuration(key)
|
||||||
|
case "option":
|
||||||
|
mntOpt.ExtraOptions, err = getStringArray(opt, key)
|
||||||
|
case "fuse-flag":
|
||||||
|
mntOpt.ExtraFlags, err = getStringArray(opt, key)
|
||||||
|
case "daemon":
|
||||||
|
mntOpt.Daemon, err = opt.GetBool(key)
|
||||||
|
case "daemon-timeout":
|
||||||
|
mntOpt.DaemonTimeout, err = opt.GetFsDuration(key)
|
||||||
|
case "default-permissions":
|
||||||
|
mntOpt.DefaultPermissions, err = opt.GetBool(key)
|
||||||
|
case "allow-non-empty":
|
||||||
|
mntOpt.AllowNonEmpty, err = opt.GetBool(key)
|
||||||
|
case "allow-root":
|
||||||
|
mntOpt.AllowRoot, err = opt.GetBool(key)
|
||||||
|
case "allow-other":
|
||||||
|
mntOpt.AllowOther, err = opt.GetBool(key)
|
||||||
|
case "async-read":
|
||||||
|
mntOpt.AsyncRead, err = opt.GetBool(key)
|
||||||
|
case "max-read-ahead":
|
||||||
|
err = getFVarP(&mntOpt.MaxReadAhead, opt, key)
|
||||||
|
case "write-back-cache":
|
||||||
|
mntOpt.WritebackCache, err = opt.GetBool(key)
|
||||||
|
case "volname":
|
||||||
|
mntOpt.VolumeName, err = opt.GetString(key)
|
||||||
|
case "noappledouble":
|
||||||
|
mntOpt.NoAppleDouble, err = opt.GetBool(key)
|
||||||
|
case "noapplexattr":
|
||||||
|
mntOpt.NoAppleXattr, err = opt.GetBool(key)
|
||||||
|
case "network-mode":
|
||||||
|
mntOpt.NetworkMode, err = opt.GetBool(key)
|
||||||
|
default:
|
||||||
|
ok = false
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func getVFSOption(vfsOpt *vfscommon.Options, opt rc.Params, key string) (ok bool, err error) {
|
||||||
|
var intVal int64
|
||||||
|
ok = true
|
||||||
|
switch normalOptName(key) {
|
||||||
|
|
||||||
|
// options prefixed with "vfs-"
|
||||||
|
case "vfs-cache-mode":
|
||||||
|
err = getFVarP(&vfsOpt.CacheMode, opt, key)
|
||||||
|
case "vfs-cache-poll-interval":
|
||||||
|
vfsOpt.CachePollInterval, err = opt.GetFsDuration(key)
|
||||||
|
case "vfs-cache-max-age":
|
||||||
|
vfsOpt.CacheMaxAge, err = opt.GetFsDuration(key)
|
||||||
|
case "vfs-cache-max-size":
|
||||||
|
err = getFVarP(&vfsOpt.CacheMaxSize, opt, key)
|
||||||
|
case "vfs-read-chunk-size":
|
||||||
|
err = getFVarP(&vfsOpt.ChunkSize, opt, key)
|
||||||
|
case "vfs-read-chunk-size-limit":
|
||||||
|
err = getFVarP(&vfsOpt.ChunkSizeLimit, opt, key)
|
||||||
|
case "vfs-case-insensitive":
|
||||||
|
vfsOpt.CaseInsensitive, err = opt.GetBool(key)
|
||||||
|
case "vfs-write-wait":
|
||||||
|
vfsOpt.WriteWait, err = opt.GetFsDuration(key)
|
||||||
|
case "vfs-read-wait":
|
||||||
|
vfsOpt.ReadWait, err = opt.GetFsDuration(key)
|
||||||
|
case "vfs-write-back":
|
||||||
|
vfsOpt.WriteBack, err = opt.GetFsDuration(key)
|
||||||
|
case "vfs-read-ahead":
|
||||||
|
err = getFVarP(&vfsOpt.ReadAhead, opt, key)
|
||||||
|
case "vfs-used-is-size":
|
||||||
|
vfsOpt.UsedIsSize, err = opt.GetBool(key)
|
||||||
|
case "vfs-read-chunk-streams":
|
||||||
|
intVal, err = opt.GetInt64(key)
|
||||||
|
if err == nil {
|
||||||
|
if intVal >= 0 && intVal <= math.MaxInt {
|
||||||
|
vfsOpt.ChunkStreams = int(intVal)
|
||||||
|
} else {
|
||||||
|
err = fmt.Errorf("key %q (%v) overflows int", key, intVal)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// unprefixed vfs options
|
||||||
|
case "no-modtime":
|
||||||
|
vfsOpt.NoModTime, err = opt.GetBool(key)
|
||||||
|
case "no-checksum":
|
||||||
|
vfsOpt.NoChecksum, err = opt.GetBool(key)
|
||||||
|
case "dir-cache-time":
|
||||||
|
vfsOpt.DirCacheTime, err = opt.GetFsDuration(key)
|
||||||
|
case "poll-interval":
|
||||||
|
vfsOpt.PollInterval, err = opt.GetFsDuration(key)
|
||||||
|
case "read-only":
|
||||||
|
vfsOpt.ReadOnly, err = opt.GetBool(key)
|
||||||
|
case "dir-perms":
|
||||||
|
err = getFVarP(&vfsOpt.DirPerms, opt, key)
|
||||||
|
case "file-perms":
|
||||||
|
err = getFVarP(&vfsOpt.FilePerms, opt, key)
|
||||||
|
|
||||||
|
// unprefixed unix-only vfs options
|
||||||
|
case "umask":
|
||||||
|
err = getFVarP(&vfsOpt.Umask, opt, key)
|
||||||
|
case "uid":
|
||||||
|
intVal, err = opt.GetInt64(key)
|
||||||
|
if err == nil {
|
||||||
|
if intVal >= 0 && intVal <= math.MaxUint32 {
|
||||||
|
vfsOpt.UID = uint32(intVal)
|
||||||
|
} else {
|
||||||
|
err = fmt.Errorf("key %q (%v) overflows uint32", key, intVal)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "gid":
|
||||||
|
intVal, err = opt.GetInt64(key)
|
||||||
|
if err == nil {
|
||||||
|
if intVal >= 0 && intVal <= math.MaxUint32 {
|
||||||
|
vfsOpt.UID = uint32(intVal)
|
||||||
|
} else {
|
||||||
|
err = fmt.Errorf("key %q (%v) overflows uint32", key, intVal)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// non-vfs options
|
||||||
|
default:
|
||||||
|
ok = false
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFVarP(pvalue pflag.Value, opt rc.Params, key string) error {
|
||||||
|
str, err := opt.GetString(key)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return pvalue.Set(str)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getStringArray(opt rc.Params, key string) ([]string, error) {
|
||||||
|
str, err := opt.GetString(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return strings.Split(str, ","), nil
|
||||||
|
}
|
||||||
|
|
||||||
func normalOptName(key string) string {
|
func normalOptName(key string) string {
|
||||||
return strings.ReplaceAll(strings.TrimPrefix(strings.ToLower(key), "--"), "_", "-")
|
return strings.ReplaceAll(strings.TrimPrefix(strings.ToLower(key), "--"), "_", "-")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,75 +0,0 @@
|
|||||||
package docker
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/cmd/mountlib"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestApplyOptions(t *testing.T) {
|
|
||||||
vol := &Volume{
|
|
||||||
Name: "testName",
|
|
||||||
MountPoint: "testPath",
|
|
||||||
drv: &Driver{
|
|
||||||
root: "testRoot",
|
|
||||||
},
|
|
||||||
mnt: &mountlib.MountPoint{
|
|
||||||
MountPoint: "testPath",
|
|
||||||
},
|
|
||||||
mountReqs: make(map[string]interface{}),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Happy path
|
|
||||||
volOpt := VolOpts{
|
|
||||||
"remote": "/tmp/docker",
|
|
||||||
"persist": "FALSE",
|
|
||||||
"mount_type": "potato",
|
|
||||||
// backend options
|
|
||||||
"--local-case-sensitive": "true",
|
|
||||||
"local_no_check_updated": "1",
|
|
||||||
// mount options
|
|
||||||
"debug-fuse": "true",
|
|
||||||
"attr_timeout": "100s",
|
|
||||||
"--async-read": "TRUE",
|
|
||||||
// vfs options
|
|
||||||
"no-modtime": "1",
|
|
||||||
"no_checksum": "true",
|
|
||||||
"--no-seek": "true",
|
|
||||||
}
|
|
||||||
err := vol.applyOptions(volOpt)
|
|
||||||
require.NoError(t, err)
|
|
||||||
// normal options
|
|
||||||
assert.Equal(t, ":local,case_sensitive='true',no_check_updated='1':/tmp/docker", vol.fsString)
|
|
||||||
assert.Equal(t, false, vol.persist)
|
|
||||||
assert.Equal(t, "potato", vol.mountType)
|
|
||||||
// mount options
|
|
||||||
assert.Equal(t, true, vol.mnt.MountOpt.DebugFUSE)
|
|
||||||
assert.Equal(t, fs.Duration(100*time.Second), vol.mnt.MountOpt.AttrTimeout)
|
|
||||||
assert.Equal(t, true, vol.mnt.MountOpt.AsyncRead)
|
|
||||||
// vfs options
|
|
||||||
assert.Equal(t, true, vol.mnt.VFSOpt.NoModTime)
|
|
||||||
assert.Equal(t, true, vol.mnt.VFSOpt.NoChecksum)
|
|
||||||
assert.Equal(t, true, vol.mnt.VFSOpt.NoSeek)
|
|
||||||
|
|
||||||
// Check errors
|
|
||||||
err = vol.applyOptions(VolOpts{
|
|
||||||
"debug-fuse": "POTATO",
|
|
||||||
})
|
|
||||||
require.ErrorContains(t, err, "cannot parse mount options")
|
|
||||||
err = vol.applyOptions(VolOpts{
|
|
||||||
"no-modtime": "POTATO",
|
|
||||||
})
|
|
||||||
require.ErrorContains(t, err, "cannot parse vfs options")
|
|
||||||
err = vol.applyOptions(VolOpts{
|
|
||||||
"remote": "/tmp/docker",
|
|
||||||
"local_not_found": "POTATO",
|
|
||||||
})
|
|
||||||
require.ErrorContains(t, err, "unsupported backend option")
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -24,12 +24,6 @@ import (
|
|||||||
nfshelper "github.com/willscott/go-nfs/helpers"
|
nfshelper "github.com/willscott/go-nfs/helpers"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Errors on cache initialisation
|
|
||||||
var (
|
|
||||||
ErrorSymlinkCacheNotSupported = errors.New("symlink cache not supported on " + runtime.GOOS)
|
|
||||||
ErrorSymlinkCacheNoPermission = errors.New("symlink cache must be run as root or with CAP_DAC_READ_SEARCH")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Cache controls the file handle cache implementation
|
// Cache controls the file handle cache implementation
|
||||||
type Cache interface {
|
type Cache interface {
|
||||||
// ToHandle takes a file and represents it with an opaque handle to reference it.
|
// ToHandle takes a file and represents it with an opaque handle to reference it.
|
||||||
@@ -49,22 +43,16 @@ type Cache interface {
|
|||||||
|
|
||||||
// Set the cache of the handler to the type required by the user
|
// Set the cache of the handler to the type required by the user
|
||||||
func (h *Handler) getCache() (c Cache, err error) {
|
func (h *Handler) getCache() (c Cache, err error) {
|
||||||
fs.Debugf("nfs", "Starting %v handle cache", h.opt.HandleCache)
|
|
||||||
switch h.opt.HandleCache {
|
switch h.opt.HandleCache {
|
||||||
case cacheMemory:
|
case cacheMemory:
|
||||||
return nfshelper.NewCachingHandler(h, h.opt.HandleLimit), nil
|
return nfshelper.NewCachingHandler(h, h.opt.HandleLimit), nil
|
||||||
case cacheDisk:
|
case cacheDisk:
|
||||||
return newDiskHandler(h)
|
return newDiskHandler(h)
|
||||||
case cacheSymlink:
|
case cacheSymlink:
|
||||||
dh, err := newDiskHandler(h)
|
if runtime.GOOS != "linux" {
|
||||||
if err != nil {
|
return nil, errors.New("can only use symlink cache on Linux")
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
err = dh.makeSymlinkCache()
|
return nil, errors.New("FIXME not implemented yet")
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return dh, nil
|
|
||||||
}
|
}
|
||||||
return nil, errors.New("unknown handle cache type")
|
return nil, errors.New("unknown handle cache type")
|
||||||
}
|
}
|
||||||
@@ -74,10 +62,6 @@ type diskHandler struct {
|
|||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
cacheDir string
|
cacheDir string
|
||||||
billyFS billy.Filesystem
|
billyFS billy.Filesystem
|
||||||
write func(fh []byte, cachePath string, fullPath string) ([]byte, error)
|
|
||||||
read func(fh []byte, cachePath string) ([]byte, error)
|
|
||||||
remove func(fh []byte, cachePath string) error
|
|
||||||
handleType int32 //nolint:unused // used by the symlink cache
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a new disk handler
|
// Create a new disk handler
|
||||||
@@ -99,9 +83,6 @@ func newDiskHandler(h *Handler) (dh *diskHandler, err error) {
|
|||||||
dh = &diskHandler{
|
dh = &diskHandler{
|
||||||
cacheDir: cacheDir,
|
cacheDir: cacheDir,
|
||||||
billyFS: h.billyFS,
|
billyFS: h.billyFS,
|
||||||
write: dh.diskCacheWrite,
|
|
||||||
read: dh.diskCacheRead,
|
|
||||||
remove: dh.diskCacheRemove,
|
|
||||||
}
|
}
|
||||||
fs.Infof("nfs", "Storing handle cache in %q", dh.cacheDir)
|
fs.Infof("nfs", "Storing handle cache in %q", dh.cacheDir)
|
||||||
return dh, nil
|
return dh, nil
|
||||||
@@ -139,7 +120,7 @@ func (dh *diskHandler) ToHandle(f billy.Filesystem, splitPath []string) (fh []by
|
|||||||
fs.Errorf("nfs", "Couldn't create cache file handle directory: %v", err)
|
fs.Errorf("nfs", "Couldn't create cache file handle directory: %v", err)
|
||||||
return fh
|
return fh
|
||||||
}
|
}
|
||||||
fh, err = dh.write(fh, cachePath, fullPath)
|
err = os.WriteFile(cachePath, []byte(fullPath), 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf("nfs", "Couldn't create cache file handle: %v", err)
|
fs.Errorf("nfs", "Couldn't create cache file handle: %v", err)
|
||||||
return fh
|
return fh
|
||||||
@@ -147,11 +128,6 @@ func (dh *diskHandler) ToHandle(f billy.Filesystem, splitPath []string) (fh []by
|
|||||||
return fh
|
return fh
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write the fullPath into cachePath returning the possibly updated fh
|
|
||||||
func (dh *diskHandler) diskCacheWrite(fh []byte, cachePath string, fullPath string) ([]byte, error) {
|
|
||||||
return fh, os.WriteFile(cachePath, []byte(fullPath), 0600)
|
|
||||||
}
|
|
||||||
|
|
||||||
var errStaleHandle = &nfs.NFSStatusError{NFSStatus: nfs.NFSStatusStale}
|
var errStaleHandle = &nfs.NFSStatusError{NFSStatus: nfs.NFSStatusStale}
|
||||||
|
|
||||||
// FromHandle converts from an opaque handle to the file it represents
|
// FromHandle converts from an opaque handle to the file it represents
|
||||||
@@ -159,7 +135,7 @@ func (dh *diskHandler) FromHandle(fh []byte) (f billy.Filesystem, splitPath []st
|
|||||||
dh.mu.RLock()
|
dh.mu.RLock()
|
||||||
defer dh.mu.RUnlock()
|
defer dh.mu.RUnlock()
|
||||||
cachePath := dh.handleToPath(fh)
|
cachePath := dh.handleToPath(fh)
|
||||||
fullPathBytes, err := dh.read(fh, cachePath)
|
fullPathBytes, err := os.ReadFile(cachePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf("nfs", "Stale handle %q: %v", cachePath, err)
|
fs.Errorf("nfs", "Stale handle %q: %v", cachePath, err)
|
||||||
return nil, nil, errStaleHandle
|
return nil, nil, errStaleHandle
|
||||||
@@ -168,28 +144,18 @@ func (dh *diskHandler) FromHandle(fh []byte) (f billy.Filesystem, splitPath []st
|
|||||||
return dh.billyFS, splitPath, nil
|
return dh.billyFS, splitPath, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read the contents of (fh, cachePath)
|
|
||||||
func (dh *diskHandler) diskCacheRead(fh []byte, cachePath string) ([]byte, error) {
|
|
||||||
return os.ReadFile(cachePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Invalidate the handle passed - used on rename and delete
|
// Invalidate the handle passed - used on rename and delete
|
||||||
func (dh *diskHandler) InvalidateHandle(f billy.Filesystem, fh []byte) error {
|
func (dh *diskHandler) InvalidateHandle(f billy.Filesystem, fh []byte) error {
|
||||||
dh.mu.Lock()
|
dh.mu.Lock()
|
||||||
defer dh.mu.Unlock()
|
defer dh.mu.Unlock()
|
||||||
cachePath := dh.handleToPath(fh)
|
cachePath := dh.handleToPath(fh)
|
||||||
err := dh.remove(fh, cachePath)
|
err := os.Remove(cachePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf("nfs", "Failed to remove handle %q: %v", cachePath, err)
|
fs.Errorf("nfs", "Failed to remove handle %q: %v", cachePath, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove the (fh, cachePath) file
|
|
||||||
func (dh *diskHandler) diskCacheRemove(fh []byte, cachePath string) error {
|
|
||||||
return os.Remove(cachePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleLimit exports how many file handles can be safely stored by this cache.
|
// HandleLimit exports how many file handles can be safely stored by this cache.
|
||||||
func (dh *diskHandler) HandleLimit() int {
|
func (dh *diskHandler) HandleLimit() int {
|
||||||
return math.MaxInt
|
return math.MaxInt
|
||||||
|
|||||||
@@ -13,9 +13,6 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NB to test the symlink cache, running with elevated permissions is needed
|
|
||||||
const testSymlinkCache = "go test -c && sudo setcap cap_dac_read_search+ep ./nfs.test && ./nfs.test -test.v -test.run TestCache/symlink"
|
|
||||||
|
|
||||||
// Check basic CRUD operations
|
// Check basic CRUD operations
|
||||||
func testCacheCRUD(t *testing.T, h *Handler, c Cache, fileName string) {
|
func testCacheCRUD(t *testing.T, h *Handler, c Cache, fileName string) {
|
||||||
// Check reading a non existent handle returns an error
|
// Check reading a non existent handle returns an error
|
||||||
@@ -104,12 +101,11 @@ func TestCache(t *testing.T) {
|
|||||||
ci := fs.GetConfig(context.Background())
|
ci := fs.GetConfig(context.Background())
|
||||||
oldLogLevel := ci.LogLevel
|
oldLogLevel := ci.LogLevel
|
||||||
ci.LogLevel = fs.LogLevelEmergency
|
ci.LogLevel = fs.LogLevelEmergency
|
||||||
//ci.LogLevel = fs.LogLevelDebug
|
|
||||||
defer func() {
|
defer func() {
|
||||||
ci.LogLevel = oldLogLevel
|
ci.LogLevel = oldLogLevel
|
||||||
}()
|
}()
|
||||||
billyFS := &FS{nil} // place holder billyFS
|
billyFS := &FS{nil} // place holder billyFS
|
||||||
for _, cacheType := range []handleCache{cacheMemory, cacheDisk, cacheSymlink} {
|
for _, cacheType := range []handleCache{cacheMemory, cacheDisk} {
|
||||||
cacheType := cacheType
|
cacheType := cacheType
|
||||||
t.Run(cacheType.String(), func(t *testing.T) {
|
t.Run(cacheType.String(), func(t *testing.T) {
|
||||||
h := &Handler{
|
h := &Handler{
|
||||||
@@ -119,27 +115,8 @@ func TestCache(t *testing.T) {
|
|||||||
h.opt.HandleCache = cacheType
|
h.opt.HandleCache = cacheType
|
||||||
h.opt.HandleCacheDir = t.TempDir()
|
h.opt.HandleCacheDir = t.TempDir()
|
||||||
c, err := h.getCache()
|
c, err := h.getCache()
|
||||||
if err == ErrorSymlinkCacheNotSupported {
|
|
||||||
t.Skip(err.Error())
|
|
||||||
}
|
|
||||||
if err == ErrorSymlinkCacheNoPermission {
|
|
||||||
t.Skip("Need more permissions to run symlink cache tests: " + testSymlinkCache)
|
|
||||||
}
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
t.Run("Empty", func(t *testing.T) {
|
|
||||||
// Write a handle
|
|
||||||
splitPath := []string{""}
|
|
||||||
fh := c.ToHandle(h.billyFS, splitPath)
|
|
||||||
assert.True(t, len(fh) > 0)
|
|
||||||
|
|
||||||
// Read the handle back
|
|
||||||
newFs, newSplitPath, err := c.FromHandle(fh)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, h.billyFS, newFs)
|
|
||||||
assert.Equal(t, splitPath, newSplitPath)
|
|
||||||
testCacheCRUD(t, h, c, "file")
|
|
||||||
})
|
|
||||||
t.Run("CRUD", func(t *testing.T) {
|
t.Run("CRUD", func(t *testing.T) {
|
||||||
testCacheCRUD(t, h, c, "file")
|
testCacheCRUD(t, h, c, "file")
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -3,6 +3,7 @@
|
|||||||
package nfs
|
package nfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -36,7 +37,7 @@ func setSys(fi os.FileInfo) {
|
|||||||
Nlink: 1,
|
Nlink: 1,
|
||||||
UID: vfs.Opt.UID,
|
UID: vfs.Opt.UID,
|
||||||
GID: vfs.Opt.GID,
|
GID: vfs.Opt.GID,
|
||||||
Fileid: node.Inode(), // without this mounting doesn't work on Linux
|
Fileid: math.MaxUint64, // without this mounting doesn't work on Linux
|
||||||
}
|
}
|
||||||
node.SetSys(&stat)
|
node.SetSys(&stat)
|
||||||
}
|
}
|
||||||
@@ -141,16 +142,16 @@ func (f *FS) Lstat(filename string) (fi os.FileInfo, err error) {
|
|||||||
return fi, nil
|
return fi, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Symlink creates a link pointing to target
|
// Symlink is not supported over NFS
|
||||||
func (f *FS) Symlink(target, link string) (err error) {
|
func (f *FS) Symlink(target, link string) (err error) {
|
||||||
defer log.Trace(target, "link=%q", link)("err=%v", &err)
|
defer log.Trace(target, "link=%q", link)("err=%v", &err)
|
||||||
return f.vfs.Symlink(target, link)
|
return os.ErrInvalid
|
||||||
}
|
}
|
||||||
|
|
||||||
// Readlink reads the contents of link
|
// Readlink is not supported
|
||||||
func (f *FS) Readlink(link string) (result string, err error) {
|
func (f *FS) Readlink(link string) (result string, err error) {
|
||||||
defer log.Trace(link, "")("result=%q, err=%v", &result, &err)
|
defer log.Trace(link, "")("result=%q, err=%v", &result, &err)
|
||||||
return f.vfs.Readlink(link)
|
return "", os.ErrInvalid
|
||||||
}
|
}
|
||||||
|
|
||||||
// Chmod changes the file modes
|
// Chmod changes the file modes
|
||||||
|
|||||||
@@ -145,9 +145,7 @@ that it uses an on disk cache, but the cache entries are held as
|
|||||||
symlinks. Rclone will use the handle of the underlying file as the NFS
|
symlinks. Rclone will use the handle of the underlying file as the NFS
|
||||||
handle which improves performance. This sort of cache can't be backed
|
handle which improves performance. This sort of cache can't be backed
|
||||||
up and restored as the underlying handles will change. This is Linux
|
up and restored as the underlying handles will change. This is Linux
|
||||||
only. It requres running rclone as root or with |CAP_DAC_READ_SEARCH|.
|
only.
|
||||||
You can run rclone with this extra permission by doing this to the
|
|
||||||
rclone binary |sudo setcap cap_dac_read_search+ep /path/to/rclone|.
|
|
||||||
|
|
||||||
|--nfs-cache-handle-limit| controls the maximum number of cached NFS
|
|--nfs-cache-handle-limit| controls the maximum number of cached NFS
|
||||||
handles stored by the caching handler. This should not be set too low
|
handles stored by the caching handler. This should not be set too low
|
||||||
|
|||||||
@@ -1,177 +0,0 @@
|
|||||||
//go:build unix && linux
|
|
||||||
|
|
||||||
/*
|
|
||||||
This implements an efficient disk cache for the NFS file handles for
|
|
||||||
Linux only.
|
|
||||||
|
|
||||||
1. The destination paths are stored as symlink destinations. These
|
|
||||||
can be stored in the directory for maximum efficiency.
|
|
||||||
|
|
||||||
2. The on disk handle of the cache file is returned to NFS with
|
|
||||||
name_to_handle_at(). This means that if the cache is deleted and
|
|
||||||
restored, the file handle mapping will be lost.
|
|
||||||
|
|
||||||
3. These handles are looked up with open_by_handle_at() so no
|
|
||||||
searching through directory trees is needed.
|
|
||||||
|
|
||||||
Note that open_by_handle_at requires CAP_DAC_READ_SEARCH so rclone
|
|
||||||
will need to be run as root or with elevated permissions.
|
|
||||||
|
|
||||||
Test with
|
|
||||||
|
|
||||||
go test -c && sudo setcap cap_dac_read_search+ep ./nfs.test && ./nfs.test -test.v -test.run TestCache/symlink
|
|
||||||
|
|
||||||
*/
|
|
||||||
|
|
||||||
package nfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
// emptyPath is written instead of "" as symlinks can't be empty
|
|
||||||
var (
|
|
||||||
emptyPath = "\x01"
|
|
||||||
emptyPathBytes = []byte(emptyPath)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Turn the diskHandler into a symlink cache
|
|
||||||
//
|
|
||||||
// This also tests the cache works as it may not have enough
|
|
||||||
// permissions or have be the correct Linux version.
|
|
||||||
func (dh *diskHandler) makeSymlinkCache() error {
|
|
||||||
path := filepath.Join(dh.cacheDir, "test")
|
|
||||||
fullPath := "testpath"
|
|
||||||
fh := []byte{1, 2, 3, 4, 5}
|
|
||||||
|
|
||||||
// Create a symlink
|
|
||||||
newFh, err := dh.symlinkCacheWrite(fh, path, fullPath)
|
|
||||||
fs.Debugf(nil, "newFh = %q", newFh)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("symlink cache write test failed: %w", err)
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
_ = os.Remove(path)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Read it back
|
|
||||||
newFullPath, err := dh.symlinkCacheRead(newFh, path)
|
|
||||||
fs.Debugf(nil, "newFullPath = %q", newFullPath)
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, syscall.EPERM) {
|
|
||||||
return ErrorSymlinkCacheNoPermission
|
|
||||||
}
|
|
||||||
return fmt.Errorf("symlink cache read test failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check result all OK
|
|
||||||
if string(newFullPath) != fullPath {
|
|
||||||
return fmt.Errorf("symlink cache read test failed: expecting %q read %q", string(newFullPath), fullPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If OK install symlink cache
|
|
||||||
dh.read = dh.symlinkCacheRead
|
|
||||||
dh.write = dh.symlinkCacheWrite
|
|
||||||
dh.remove = dh.symlinkCacheRemove
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write the fullPath into cachePath returning the possibly updated fh
|
|
||||||
//
|
|
||||||
// This writes the fullPath into the file with the cachePath given and
|
|
||||||
// returns the handle for that file so we can look it up later.
|
|
||||||
func (dh *diskHandler) symlinkCacheWrite(fh []byte, cachePath string, fullPath string) (newFh []byte, err error) {
|
|
||||||
//defer log.Trace(nil, "fh=%x, cachePath=%q, fullPath=%q", fh, cachePath)("newFh=%x, err=%v", &newFh, &err)
|
|
||||||
|
|
||||||
// Can't write an empty symlink so write a substitution
|
|
||||||
if fullPath == "" {
|
|
||||||
fullPath = emptyPath
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write the symlink
|
|
||||||
err = os.Symlink(fullPath, cachePath)
|
|
||||||
if err != nil && !errors.Is(err, syscall.EEXIST) {
|
|
||||||
return nil, fmt.Errorf("symlink cache create symlink: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the newly created symlinks handle
|
|
||||||
handle, _, err := unix.NameToHandleAt(unix.AT_FDCWD, cachePath, 0)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("symlink cache name to handle at: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store the handle type if it hasn't changed
|
|
||||||
// This should run once only when called by makeSymlinkCache
|
|
||||||
if dh.handleType != handle.Type() {
|
|
||||||
dh.handleType = handle.Type()
|
|
||||||
}
|
|
||||||
|
|
||||||
return handle.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the contents of (fh, cachePath)
|
|
||||||
//
|
|
||||||
// This reads the symlink with the corresponding file handle and
|
|
||||||
// returns the contents. It ignores the cachePath which will be
|
|
||||||
// pointing in the wrong place.
|
|
||||||
//
|
|
||||||
// Note that the caller needs CAP_DAC_READ_SEARCH to use this.
|
|
||||||
func (dh *diskHandler) symlinkCacheRead(fh []byte, cachePath string) (fullPath []byte, err error) {
|
|
||||||
//defer log.Trace(nil, "fh=%x, cachePath=%q", fh, cachePath)("fullPath=%q, err=%v", &fullPath, &err)
|
|
||||||
|
|
||||||
// Find the file with the handle passed in
|
|
||||||
handle := unix.NewFileHandle(dh.handleType, fh)
|
|
||||||
fd, err := unix.OpenByHandleAt(unix.AT_FDCWD, handle, unix.O_RDONLY|unix.O_PATH|unix.O_NOFOLLOW) // needs O_PATH for symlinks
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("symlink cache open by handle at: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close it on exit
|
|
||||||
defer func() {
|
|
||||||
newErr := unix.Close(fd)
|
|
||||||
if err != nil {
|
|
||||||
err = newErr
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Read the symlink which is the path required
|
|
||||||
buf := make([]byte, 1024) // Max path length
|
|
||||||
n, err := unix.Readlinkat(fd, "", buf) // It will (silently) truncate the contents, in case the buffer is too small to hold all of the contents.
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("symlink cache read: %w", err)
|
|
||||||
}
|
|
||||||
fullPath = buf[:n:n]
|
|
||||||
|
|
||||||
// Undo empty symlink substitution
|
|
||||||
if bytes.Equal(fullPath, emptyPathBytes) {
|
|
||||||
fullPath = buf[:0:0]
|
|
||||||
}
|
|
||||||
|
|
||||||
return fullPath, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove the (fh, cachePath) file
|
|
||||||
func (dh *diskHandler) symlinkCacheRemove(fh []byte, cachePath string) error {
|
|
||||||
// First read the path
|
|
||||||
fullPath, err := dh.symlinkCacheRead(fh, cachePath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// fh for the actual cache file
|
|
||||||
fh = hashPath(string(fullPath))
|
|
||||||
|
|
||||||
// cachePath for the actual cache file
|
|
||||||
cachePath = dh.handleToPath(fh)
|
|
||||||
|
|
||||||
return os.Remove(cachePath)
|
|
||||||
}
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
//go:build unix && !linux
|
|
||||||
|
|
||||||
package nfs
|
|
||||||
|
|
||||||
// Turn the diskHandler into a symlink cache
|
|
||||||
func (dh *diskHandler) makeSymlinkCache() error {
|
|
||||||
return ErrorSymlinkCacheNotSupported
|
|
||||||
}
|
|
||||||
@@ -9,7 +9,6 @@ import (
|
|||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ncw/swift/v2"
|
"github.com/ncw/swift/v2"
|
||||||
"github.com/rclone/gofakes3"
|
"github.com/rclone/gofakes3"
|
||||||
@@ -19,6 +18,7 @@ import (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
emptyPrefix = &gofakes3.Prefix{}
|
emptyPrefix = &gofakes3.Prefix{}
|
||||||
|
timeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
|
||||||
)
|
)
|
||||||
|
|
||||||
// s3Backend implements the gofacess3.Backend interface to make an S3
|
// s3Backend implements the gofacess3.Backend interface to make an S3
|
||||||
@@ -52,7 +52,7 @@ func (b *s3Backend) ListBuckets(ctx context.Context) ([]gofakes3.BucketInfo, err
|
|||||||
for _, entry := range dirEntries {
|
for _, entry := range dirEntries {
|
||||||
if entry.IsDir() {
|
if entry.IsDir() {
|
||||||
response = append(response, gofakes3.BucketInfo{
|
response = append(response, gofakes3.BucketInfo{
|
||||||
Name: entry.Name(),
|
Name: gofakes3.URLEncode(entry.Name()),
|
||||||
CreationDate: gofakes3.NewContentTime(entry.ModTime()),
|
CreationDate: gofakes3.NewContentTime(entry.ModTime()),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -98,13 +98,6 @@ func (b *s3Backend) ListBucket(ctx context.Context, bucket string, prefix *gofak
|
|||||||
return b.pager(response, page)
|
return b.pager(response, page)
|
||||||
}
|
}
|
||||||
|
|
||||||
// formatHeaderTime makes an timestamp which is the same as that used by AWS.
|
|
||||||
//
|
|
||||||
// This is like RFC1123 always in UTC, but has GMT instead of UTC
|
|
||||||
func formatHeaderTime(t time.Time) string {
|
|
||||||
return t.UTC().Format("Mon, 02 Jan 2006 15:04:05") + " GMT"
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeadObject returns the fileinfo for the given object name.
|
// HeadObject returns the fileinfo for the given object name.
|
||||||
//
|
//
|
||||||
// Note that the metadata is not supported yet.
|
// Note that the metadata is not supported yet.
|
||||||
@@ -138,7 +131,7 @@ func (b *s3Backend) HeadObject(ctx context.Context, bucketName, objectName strin
|
|||||||
hash := getFileHashByte(fobj)
|
hash := getFileHashByte(fobj)
|
||||||
|
|
||||||
meta := map[string]string{
|
meta := map[string]string{
|
||||||
"Last-Modified": formatHeaderTime(node.ModTime()),
|
"Last-Modified": node.ModTime().Format(timeFormat),
|
||||||
"Content-Type": fs.MimeType(context.Background(), fobj),
|
"Content-Type": fs.MimeType(context.Background(), fobj),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -215,7 +208,7 @@ func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string
|
|||||||
}
|
}
|
||||||
|
|
||||||
meta := map[string]string{
|
meta := map[string]string{
|
||||||
"Last-Modified": formatHeaderTime(node.ModTime()),
|
"Last-Modified": node.ModTime().Format(timeFormat),
|
||||||
"Content-Type": fs.MimeType(context.Background(), fobj),
|
"Content-Type": fs.MimeType(context.Background(), fobj),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -227,7 +220,7 @@ func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string
|
|||||||
}
|
}
|
||||||
|
|
||||||
return &gofakes3.Object{
|
return &gofakes3.Object{
|
||||||
Name: objectName,
|
Name: gofakes3.URLEncode(objectName),
|
||||||
Hash: hash,
|
Hash: hash,
|
||||||
Metadata: meta,
|
Metadata: meta,
|
||||||
Size: size,
|
Size: size,
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
|
|||||||
|
|
||||||
if entry.IsDir() {
|
if entry.IsDir() {
|
||||||
if addPrefix {
|
if addPrefix {
|
||||||
response.AddPrefix(objectPath)
|
response.AddPrefix(gofakes3.URLEncode(objectPath))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
err := b.entryListR(_vfs, bucket, path.Join(fdPath, object), "", false, response)
|
err := b.entryListR(_vfs, bucket, path.Join(fdPath, object), "", false, response)
|
||||||
@@ -37,7 +37,7 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
item := &gofakes3.Content{
|
item := &gofakes3.Content{
|
||||||
Key: objectPath,
|
Key: gofakes3.URLEncode(objectPath),
|
||||||
LastModified: gofakes3.NewContentTime(entry.ModTime()),
|
LastModified: gofakes3.NewContentTime(entry.ModTime()),
|
||||||
ETag: getFileHash(entry),
|
ETag: getFileHash(entry),
|
||||||
Size: entry.Size(),
|
Size: entry.Size(),
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ secret_access_key = SECRET_ACCESS_KEY
|
|||||||
use_multipart_uploads = false
|
use_multipart_uploads = false
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that setting `use_multipart_uploads = false` is to work around
|
Note that setting `disable_multipart_uploads = true` is to work around
|
||||||
[a bug](#bugs) which will be fixed in due course.
|
[a bug](#bugs) which will be fixed in due course.
|
||||||
|
|
||||||
### Bugs
|
### Bugs
|
||||||
|
|||||||
@@ -65,7 +65,7 @@ func (s *server) getVFS(what string, sshConn *ssh.ServerConn) (VFS *vfs.VFS) {
|
|||||||
if s.proxy == nil {
|
if s.proxy == nil {
|
||||||
return s.vfs
|
return s.vfs
|
||||||
}
|
}
|
||||||
if sshConn.Permissions == nil || sshConn.Permissions.Extensions == nil {
|
if sshConn.Permissions == nil && sshConn.Permissions.Extensions == nil {
|
||||||
fs.Infof(what, "SSH Permissions Extensions not found")
|
fs.Infof(what, "SSH Permissions Extensions not found")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -143,14 +143,9 @@ func (s *server) serve() (err error) {
|
|||||||
authKeysFile := env.ShellExpand(s.opt.AuthorizedKeys)
|
authKeysFile := env.ShellExpand(s.opt.AuthorizedKeys)
|
||||||
authorizedKeysMap, err = loadAuthorizedKeys(authKeysFile)
|
authorizedKeysMap, err = loadAuthorizedKeys(authKeysFile)
|
||||||
// If user set the flag away from the default then report an error
|
// If user set the flag away from the default then report an error
|
||||||
if s.opt.AuthorizedKeys != Opt.AuthorizedKeys {
|
if err != nil && s.opt.AuthorizedKeys != Opt.AuthorizedKeys {
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(authorizedKeysMap) == 0 {
|
|
||||||
return fmt.Errorf("failed to parse authorized keys")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fs.Logf(nil, "Loaded %d authorized keys from %q", len(authorizedKeysMap), authKeysFile)
|
fs.Logf(nil, "Loaded %d authorized keys from %q", len(authorizedKeysMap), authKeysFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -354,11 +349,12 @@ func loadAuthorizedKeys(authorizedKeysPath string) (authorizedKeysMap map[string
|
|||||||
authorizedKeysMap = make(map[string]struct{})
|
authorizedKeysMap = make(map[string]struct{})
|
||||||
for len(authorizedKeysBytes) > 0 {
|
for len(authorizedKeysBytes) > 0 {
|
||||||
pubKey, _, _, rest, err := ssh.ParseAuthorizedKey(authorizedKeysBytes)
|
pubKey, _, _, rest, err := ssh.ParseAuthorizedKey(authorizedKeysBytes)
|
||||||
if err == nil {
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse authorized keys: %w", err)
|
||||||
|
}
|
||||||
authorizedKeysMap[string(pubKey.Marshal())] = struct{}{}
|
authorizedKeysMap[string(pubKey.Marshal())] = struct{}{}
|
||||||
authorizedKeysBytes = bytes.TrimSpace(rest)
|
authorizedKeysBytes = bytes.TrimSpace(rest)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return authorizedKeysMap, nil
|
return authorizedKeysMap, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ package size
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
@@ -71,13 +72,13 @@ of the size command.
|
|||||||
count := strconv.FormatInt(results.Count, 10)
|
count := strconv.FormatInt(results.Count, 10)
|
||||||
countSuffix := fs.CountSuffix(results.Count).String()
|
countSuffix := fs.CountSuffix(results.Count).String()
|
||||||
if count == countSuffix {
|
if count == countSuffix {
|
||||||
operations.SyncPrintf("Total objects: %s\n", count)
|
fmt.Printf("Total objects: %s\n", count)
|
||||||
} else {
|
} else {
|
||||||
operations.SyncPrintf("Total objects: %s (%s)\n", countSuffix, count)
|
fmt.Printf("Total objects: %s (%s)\n", countSuffix, count)
|
||||||
}
|
}
|
||||||
operations.SyncPrintf("Total size: %s (%d Byte)\n", fs.SizeSuffix(results.Bytes).ByteUnit(), results.Bytes)
|
fmt.Printf("Total size: %s (%d Byte)\n", fs.SizeSuffix(results.Bytes).ByteUnit(), results.Bytes)
|
||||||
if results.Sizeless > 0 {
|
if results.Sizeless > 0 {
|
||||||
operations.SyncPrintf("Total objects with unknown size: %s (%d)\n", fs.CountSuffix(results.Sizeless), results.Sizeless)
|
fmt.Printf("Total objects with unknown size: %s (%d)\n", fs.CountSuffix(results.Sizeless), results.Sizeless)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -30,7 +30,6 @@ var (
|
|||||||
maxFileSize = fs.SizeSuffix(100)
|
maxFileSize = fs.SizeSuffix(100)
|
||||||
minFileNameLength = 4
|
minFileNameLength = 4
|
||||||
maxFileNameLength = 12
|
maxFileNameLength = 12
|
||||||
flat = false
|
|
||||||
seed = int64(1)
|
seed = int64(1)
|
||||||
zero = false
|
zero = false
|
||||||
sparse = false
|
sparse = false
|
||||||
@@ -56,7 +55,6 @@ func init() {
|
|||||||
flags.FVarP(makefilesFlags, &maxFileSize, "max-file-size", "", "Maximum size of files to create", "")
|
flags.FVarP(makefilesFlags, &maxFileSize, "max-file-size", "", "Maximum size of files to create", "")
|
||||||
flags.IntVarP(makefilesFlags, &minFileNameLength, "min-name-length", "", minFileNameLength, "Minimum size of file names", "")
|
flags.IntVarP(makefilesFlags, &minFileNameLength, "min-name-length", "", minFileNameLength, "Minimum size of file names", "")
|
||||||
flags.IntVarP(makefilesFlags, &maxFileNameLength, "max-name-length", "", maxFileNameLength, "Maximum size of file names", "")
|
flags.IntVarP(makefilesFlags, &maxFileNameLength, "max-name-length", "", maxFileNameLength, "Maximum size of file names", "")
|
||||||
flags.BoolVarP(makefilesFlags, &flat, "flat", "", false, "If set create all files in the root directory", "")
|
|
||||||
|
|
||||||
test.Command.AddCommand(makefileCmd)
|
test.Command.AddCommand(makefileCmd)
|
||||||
makefileFlags := makefileCmd.Flags()
|
makefileFlags := makefileCmd.Flags()
|
||||||
@@ -83,9 +81,6 @@ var makefilesCmd = &cobra.Command{
|
|||||||
commonInit()
|
commonInit()
|
||||||
outputDirectory := args[0]
|
outputDirectory := args[0]
|
||||||
directoriesToCreate = numberOfFiles / averageFilesPerDirectory
|
directoriesToCreate = numberOfFiles / averageFilesPerDirectory
|
||||||
if flat {
|
|
||||||
directoriesToCreate = 0
|
|
||||||
}
|
|
||||||
averageSize := (minFileSize + maxFileSize) / 2
|
averageSize := (minFileSize + maxFileSize) / 2
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
fs.Logf(nil, "Creating %d files of average size %v in %d directories in %q.", numberOfFiles, averageSize, directoriesToCreate, outputDirectory)
|
fs.Logf(nil, "Creating %d files of average size %v in %d directories in %q.", numberOfFiles, averageSize, directoriesToCreate, outputDirectory)
|
||||||
|
|||||||
@@ -3,13 +3,10 @@ package version
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"debug/buildinfo"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
"runtime/debug"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -23,14 +20,12 @@ import (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
check = false
|
check = false
|
||||||
deps = false
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
cmd.Root.AddCommand(commandDefinition)
|
cmd.Root.AddCommand(commandDefinition)
|
||||||
cmdFlags := commandDefinition.Flags()
|
cmdFlags := commandDefinition.Flags()
|
||||||
flags.BoolVarP(cmdFlags, &check, "check", "", false, "Check for new version", "")
|
flags.BoolVarP(cmdFlags, &check, "check", "", false, "Check for new version", "")
|
||||||
flags.BoolVarP(cmdFlags, &deps, "deps", "", false, "Show the Go dependencies", "")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var commandDefinition = &cobra.Command{
|
var commandDefinition = &cobra.Command{
|
||||||
@@ -72,25 +67,18 @@ Or
|
|||||||
beta: 1.42.0.5 (released 2018-06-17)
|
beta: 1.42.0.5 (released 2018-06-17)
|
||||||
upgrade: https://beta.rclone.org/v1.42-005-g56e1e820
|
upgrade: https://beta.rclone.org/v1.42-005-g56e1e820
|
||||||
|
|
||||||
If you supply the --deps flag then rclone will print a list of all the
|
|
||||||
packages it depends on and their versions along with some other
|
|
||||||
information about the build.
|
|
||||||
`,
|
`,
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.33",
|
"versionIntroduced": "v1.33",
|
||||||
},
|
},
|
||||||
RunE: func(command *cobra.Command, args []string) error {
|
Run: func(command *cobra.Command, args []string) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
cmd.CheckArgs(0, 0, command, args)
|
cmd.CheckArgs(0, 0, command, args)
|
||||||
if deps {
|
|
||||||
return printDependencies()
|
|
||||||
}
|
|
||||||
if check {
|
if check {
|
||||||
CheckVersion(ctx)
|
CheckVersion(ctx)
|
||||||
} else {
|
} else {
|
||||||
cmd.ShowVersion()
|
cmd.ShowVersion()
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -163,36 +151,3 @@ func CheckVersion(ctx context.Context) {
|
|||||||
fmt.Println("Your version is compiled from git so comparisons may be wrong.")
|
fmt.Println("Your version is compiled from git so comparisons may be wrong.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Print info about a build module
|
|
||||||
func printModule(module *debug.Module) {
|
|
||||||
if module.Replace != nil {
|
|
||||||
fmt.Printf("- %s %s (replaced by %s %s)\n",
|
|
||||||
module.Path, module.Version, module.Replace.Path, module.Replace.Version)
|
|
||||||
} else {
|
|
||||||
fmt.Printf("- %s %s\n", module.Path, module.Version)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// printDependencies shows the packages we use in a format like go.mod
|
|
||||||
func printDependencies() error {
|
|
||||||
info, err := buildinfo.ReadFile(os.Args[0])
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error reading build info: %w", err)
|
|
||||||
}
|
|
||||||
fmt.Println("Go Version:")
|
|
||||||
fmt.Printf("- %s\n", info.GoVersion)
|
|
||||||
fmt.Println("Main package:")
|
|
||||||
printModule(&info.Main)
|
|
||||||
fmt.Println("Binary path:")
|
|
||||||
fmt.Printf("- %s\n", info.Path)
|
|
||||||
fmt.Println("Settings:")
|
|
||||||
for _, setting := range info.Settings {
|
|
||||||
fmt.Printf("- %s: %s\n", setting.Key, setting.Value)
|
|
||||||
}
|
|
||||||
fmt.Println("Dependencies:")
|
|
||||||
for _, dep := range info.Deps {
|
|
||||||
printModule(dep)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestEnvironmentVariables demonstrates and verifies the test functions for end-to-end testing of rclone
|
// TestCmdTest demonstrates and verifies the test functions for end-to-end testing of rclone
|
||||||
func TestEnvironmentVariables(t *testing.T) {
|
func TestEnvironmentVariables(t *testing.T) {
|
||||||
|
|
||||||
createTestEnvironment(t)
|
createTestEnvironment(t)
|
||||||
|
|||||||
@@ -66,13 +66,12 @@ so it is easy to tweak stuff.
|
|||||||
└── static - static content for the website
|
└── static - static content for the website
|
||||||
├── css
|
├── css
|
||||||
│ ├── bootstrap.css
|
│ ├── bootstrap.css
|
||||||
│ └── custom.css - custom css goes here
|
│ ├── custom.css - custom css goes here
|
||||||
├── fontawesome
|
│ └── font-awesome.css
|
||||||
│ ├── css
|
|
||||||
│ └── webfonts
|
|
||||||
├── img - images used
|
├── img - images used
|
||||||
├── js
|
├── js
|
||||||
│ ├── bootstrap.js
|
│ ├── bootstrap.js
|
||||||
│ ├── custom.js - custom javascript goes here
|
│ ├── custom.js - custom javascript goes here
|
||||||
│ └── jquery.js
|
│ └── jquery.js
|
||||||
|
└── webfonts
|
||||||
```
|
```
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user