mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
154 Commits
fix-oauth-
...
jwt-v5-com
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
da307bcd9a | ||
|
|
8c3ea2842c | ||
|
|
f4d7df1511 | ||
|
|
fa3a8161cf | ||
|
|
2c72842c10 | ||
|
|
4a81f12c26 | ||
|
|
aabda1cda2 | ||
|
|
572fe20f8e | ||
|
|
2fd4c45b34 | ||
|
|
ec5489e23f | ||
|
|
6898375a2d | ||
|
|
d413443a6a | ||
|
|
5039747f26 | ||
|
|
11ba4ac539 | ||
|
|
b4ed7fb7d7 | ||
|
|
719473565e | ||
|
|
bd7278d7e9 | ||
|
|
45ba81c726 | ||
|
|
530658e0cc | ||
|
|
b742705d0c | ||
|
|
cd3b08d8cf | ||
|
|
009660a489 | ||
|
|
4b6c7c6d84 | ||
|
|
a7db375f5d | ||
|
|
101dcfe157 | ||
|
|
aec87b74d3 | ||
|
|
91c8f92ccb | ||
|
|
965bf19065 | ||
|
|
15ef3b90fa | ||
|
|
f6efaf2a63 | ||
|
|
0e7c495395 | ||
|
|
ff0ded8f11 | ||
|
|
110bf468a4 | ||
|
|
d4e86f4d8b | ||
|
|
6091a0362b | ||
|
|
33d2747829 | ||
|
|
c9e5f45d73 | ||
|
|
2f66537514 | ||
|
|
a491312c7d | ||
|
|
45b7690867 | ||
|
|
30ef1ddb23 | ||
|
|
424d8e3123 | ||
|
|
04dfa6d923 | ||
|
|
fdff1a54ee | ||
|
|
42240f4b5d | ||
|
|
7692ef289f | ||
|
|
bfb7b88371 | ||
|
|
5f70918e2c | ||
|
|
abf11271fe | ||
|
|
a36e89bb61 | ||
|
|
35614acf59 | ||
|
|
7e4b8e33f5 | ||
|
|
5151a663f0 | ||
|
|
b85a1b684b | ||
|
|
8fa8f146fa | ||
|
|
6cad0a013e | ||
|
|
aa743cbc60 | ||
|
|
a389a2979b | ||
|
|
d6f0d1d349 | ||
|
|
4ed6960d95 | ||
|
|
731af0c0ab | ||
|
|
5499fd3b59 | ||
|
|
e0e697ca11 | ||
|
|
05f000b076 | ||
|
|
a34c839514 | ||
|
|
6a217c7dc1 | ||
|
|
e1748a3183 | ||
|
|
bc08e05a00 | ||
|
|
9218b69afe | ||
|
|
0ce2e12d9f | ||
|
|
7224b76801 | ||
|
|
d2398ccb59 | ||
|
|
0988fd9e9f | ||
|
|
51cde23e82 | ||
|
|
caac95ff54 | ||
|
|
19f4580aca | ||
|
|
27f448d14d | ||
|
|
500698c5be | ||
|
|
91af6da068 | ||
|
|
b8835fe7b4 | ||
|
|
48d9e88e8f | ||
|
|
4e7ee9310e | ||
|
|
d629102fa6 | ||
|
|
db1ed69693 | ||
|
|
06657c49a0 | ||
|
|
f1d2f2b2c8 | ||
|
|
a5abe4b8b3 | ||
|
|
c0339327be | ||
|
|
353bc3130e | ||
|
|
126f00882b | ||
|
|
44c3f5e1e8 | ||
|
|
c47c94e485 | ||
|
|
1f328fbcfd | ||
|
|
7f1240516e | ||
|
|
f9946b37f9 | ||
|
|
96fe25cf0a | ||
|
|
a176d4cbda | ||
|
|
e704e33045 | ||
|
|
2f3e90f671 | ||
|
|
65012beea4 | ||
|
|
704217b698 | ||
|
|
6ade1055d5 | ||
|
|
6a983d601c | ||
|
|
eaafae95fa | ||
|
|
5ca1436c24 | ||
|
|
c46e93cc42 | ||
|
|
66943d3d79 | ||
|
|
a78bc093de | ||
|
|
2446c4928d | ||
|
|
e11e679e90 | ||
|
|
ba8e538173 | ||
|
|
40111ba5e1 | ||
|
|
ab58ae5b03 | ||
|
|
ca8860177e | ||
|
|
d65d1a44b3 | ||
|
|
c1763a3f95 | ||
|
|
964fcd5f59 | ||
|
|
c6281a1217 | ||
|
|
ff3f8f0b33 | ||
|
|
2d844a26c3 | ||
|
|
1b68492c85 | ||
|
|
acd5a893e2 | ||
|
|
0214a59a8c | ||
|
|
6079cab090 | ||
|
|
bf57087a6e | ||
|
|
d8bc542ffc | ||
|
|
01ccf204f4 | ||
|
|
84b64dcdf9 | ||
|
|
8cc1020a58 | ||
|
|
1e2b354456 | ||
|
|
f639cd9c78 | ||
|
|
e50f995d87 | ||
|
|
abe884e744 | ||
|
|
173b2ac956 | ||
|
|
1317fdb9b8 | ||
|
|
1072173d58 | ||
|
|
df19c6f7bf | ||
|
|
ee72554fb9 | ||
|
|
abb4f77568 | ||
|
|
ca2b27422f | ||
|
|
740f6b318c | ||
|
|
f307d929a8 | ||
|
|
ceea6753ee | ||
|
|
2bafbf3c04 | ||
|
|
3e14ba54b8 | ||
|
|
2f7a30cf61 | ||
|
|
0ad925278d | ||
|
|
e3053350f3 | ||
|
|
b9207e5727 | ||
|
|
40159e7a16 | ||
|
|
16baa24964 | ||
|
|
72f06bcc4b | ||
|
|
c527dd8c9c | ||
|
|
29fd894189 |
31
.github/workflows/build.yml
vendored
31
.github/workflows/build.yml
vendored
@@ -26,12 +26,12 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.21', 'go1.22']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.22', 'go1.23']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '>=1.24.0-rc.1'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -42,14 +42,14 @@ jobs:
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '>=1.24.0-rc.1'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '>=1.24.0-rc.1'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -58,14 +58,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '>=1.24.0-rc.1'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '>=1.24.0-rc.1'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
@@ -75,23 +75,23 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '>=1.24.0-rc.1'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.21
|
||||
os: ubuntu-latest
|
||||
go: '1.21'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.22
|
||||
os: ubuntu-latest
|
||||
go: '1.22'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.23
|
||||
os: ubuntu-latest
|
||||
go: '1.23'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
@@ -123,7 +123,8 @@ jobs:
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
sudo apt-get install fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
|
||||
- name: Install Libraries on macOS
|
||||
@@ -310,7 +311,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.23.0-rc.1'
|
||||
go-version: '>=1.24.0-rc.1'
|
||||
|
||||
- name: Set global environment variables
|
||||
shell: bash
|
||||
|
||||
2431
MANUAL.html
generated
2431
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
2523
MANUAL.txt
generated
2523
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
21
RELEASE.md
21
RELEASE.md
@@ -47,13 +47,20 @@ Early in the next release cycle update the dependencies.
|
||||
* `git commit -a -v -m "build: update all dependencies"`
|
||||
|
||||
If the `make updatedirect` upgrades the version of go in the `go.mod`
|
||||
then go to manual mode. `go1.20` here is the lowest supported version
|
||||
|
||||
go 1.22.0
|
||||
|
||||
then go to manual mode. `go1.22` here is the lowest supported version
|
||||
in the `go.mod`.
|
||||
|
||||
If `make updatedirect` added a `toolchain` directive then remove it.
|
||||
We don't want to force a toolchain on our users. Linux packagers are
|
||||
often using a version of Go that is a few versions out of date.
|
||||
|
||||
```
|
||||
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
|
||||
go get -d $(cat /tmp/potential-upgrades)
|
||||
go mod tidy -go=1.20 -compat=1.20
|
||||
go mod tidy -go=1.22 -compat=1.22
|
||||
```
|
||||
|
||||
If the `go mod tidy` fails use the output from it to remove the
|
||||
@@ -86,6 +93,16 @@ build.
|
||||
Once it compiles locally, push it on a test branch and commit fixes
|
||||
until the tests pass.
|
||||
|
||||
### Major versions
|
||||
|
||||
The above procedure will not upgrade major versions, so v2 to v3.
|
||||
However this tool can show which major versions might need to be
|
||||
upgraded:
|
||||
|
||||
go run github.com/icholy/gomajor@latest list -major
|
||||
|
||||
Expect API breakage when updating major versions.
|
||||
|
||||
## Tidy beta
|
||||
|
||||
At some point after the release run
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/box"
|
||||
_ "github.com/rclone/rclone/backend/cache"
|
||||
_ "github.com/rclone/rclone/backend/chunker"
|
||||
_ "github.com/rclone/rclone/backend/cloudinary"
|
||||
_ "github.com/rclone/rclone/backend/combine"
|
||||
_ "github.com/rclone/rclone/backend/compress"
|
||||
_ "github.com/rclone/rclone/backend/crypt"
|
||||
|
||||
@@ -2162,6 +2162,9 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
||||
if chunkNumber <= 8 {
|
||||
return w.f.shouldRetry(ctx, err)
|
||||
}
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// retry all chunks once have done the first few
|
||||
return true, err
|
||||
}
|
||||
|
||||
@@ -393,8 +393,10 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
policyClientOptions := policy.ClientOptions{
|
||||
Transport: newTransporter(ctx),
|
||||
}
|
||||
backup := service.ShareTokenIntentBackup
|
||||
clientOpt := service.ClientOptions{
|
||||
ClientOptions: policyClientOptions,
|
||||
ClientOptions: policyClientOptions,
|
||||
FileRequestIntent: &backup,
|
||||
}
|
||||
|
||||
// Here we auth by setting one of cred, sharedKeyCred or f.client
|
||||
|
||||
@@ -42,9 +42,10 @@ type Bucket struct {
|
||||
|
||||
// LifecycleRule is a single lifecycle rule
|
||||
type LifecycleRule struct {
|
||||
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
|
||||
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
|
||||
FileNamePrefix string `json:"fileNamePrefix"`
|
||||
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
|
||||
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
|
||||
DaysFromStartingToCancelingUnfinishedLargeFiles *int `json:"daysFromStartingToCancelingUnfinishedLargeFiles"`
|
||||
FileNamePrefix string `json:"fileNamePrefix"`
|
||||
}
|
||||
|
||||
// Timestamp is a UTC time when this file was uploaded. It is a base
|
||||
|
||||
@@ -2231,6 +2231,7 @@ This will dump something like this showing the lifecycle rules.
|
||||
{
|
||||
"daysFromHidingToDeleting": 1,
|
||||
"daysFromUploadingToHiding": null,
|
||||
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
|
||||
"fileNamePrefix": ""
|
||||
}
|
||||
]
|
||||
@@ -2257,8 +2258,9 @@ overwrites will still cause versions to be made.
|
||||
See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
|
||||
`,
|
||||
Opts: map[string]string{
|
||||
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
|
||||
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
|
||||
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
|
||||
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
|
||||
"daysFromStartingToCancelingUnfinishedLargeFiles": "Cancels any unfinished large file versions after this many days",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -2278,6 +2280,13 @@ func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, op
|
||||
}
|
||||
newRule.DaysFromUploadingToHiding = &days
|
||||
}
|
||||
if daysStr := opt["daysFromStartingToCancelingUnfinishedLargeFiles"]; daysStr != "" {
|
||||
days, err := strconv.Atoi(daysStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bad daysFromStartingToCancelingUnfinishedLargeFiles: %w", err)
|
||||
}
|
||||
newRule.DaysFromStartingToCancelingUnfinishedLargeFiles = &days
|
||||
}
|
||||
bucketName, _ := f.split("")
|
||||
if bucketName == "" {
|
||||
return nil, errors.New("bucket required")
|
||||
@@ -2285,7 +2294,7 @@ func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, op
|
||||
}
|
||||
|
||||
var bucket *api.Bucket
|
||||
if newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil {
|
||||
if newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil || newRule.DaysFromStartingToCancelingUnfinishedLargeFiles != nil {
|
||||
bucketID, err := f.getBucketID(ctx, bucketName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang-jwt/jwt/v4"
|
||||
"github.com/rclone/rclone/backend/box/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -46,7 +45,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/youmark/pkcs8"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -65,12 +63,10 @@ const (
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: nil,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://app.box.com/api/oauth2/authorize",
|
||||
TokenURL: "https://app.box.com/api/oauth2/token",
|
||||
},
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: nil,
|
||||
AuthURL: "https://app.box.com/api/oauth2/authorize",
|
||||
TokenURL: "https://app.box.com/api/oauth2/token",
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
@@ -78,7 +74,7 @@ var (
|
||||
)
|
||||
|
||||
type boxCustomClaims struct {
|
||||
jwt.StandardClaims
|
||||
jwtutil.LegacyStandardClaims
|
||||
BoxSubType string `json:"box_sub_type,omitempty"`
|
||||
}
|
||||
|
||||
@@ -226,10 +222,8 @@ func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomC
|
||||
}
|
||||
|
||||
claims = &boxCustomClaims{
|
||||
//lint:ignore SA1019 since we need to use jwt.StandardClaims even if deprecated in jwt-go v4 until a more permanent solution is ready in time before jwt-go v5 where it is removed entirely
|
||||
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1019
|
||||
StandardClaims: jwt.StandardClaims{
|
||||
Id: val,
|
||||
LegacyStandardClaims: jwtutil.LegacyStandardClaims{
|
||||
ID: val,
|
||||
Issuer: boxConfig.BoxAppSettings.ClientID,
|
||||
Subject: boxConfig.EnterpriseID,
|
||||
Audience: tokenURL,
|
||||
@@ -258,6 +252,9 @@ func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
|
||||
|
||||
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
|
||||
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
||||
if block == nil {
|
||||
return nil, errors.New("box: failed to PEM decode private key")
|
||||
}
|
||||
if len(rest) > 0 {
|
||||
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
|
||||
}
|
||||
|
||||
48
backend/cloudinary/api/types.go
Normal file
48
backend/cloudinary/api/types.go
Normal file
@@ -0,0 +1,48 @@
|
||||
// Package api has type definitions for cloudinary
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// CloudinaryEncoder extends the built-in encoder
|
||||
type CloudinaryEncoder interface {
|
||||
// FromStandardPath takes a / separated path in Standard encoding
|
||||
// and converts it to a / separated path in this encoding.
|
||||
FromStandardPath(string) string
|
||||
// FromStandardName takes name in Standard encoding and converts
|
||||
// it in this encoding.
|
||||
FromStandardName(string) string
|
||||
// ToStandardPath takes a / separated path in this encoding
|
||||
// and converts it to a / separated path in Standard encoding.
|
||||
ToStandardPath(string) string
|
||||
// ToStandardName takes name in this encoding and converts
|
||||
// it in Standard encoding.
|
||||
ToStandardName(string) string
|
||||
// Encoded root of the remote (as passed into NewFs)
|
||||
FromStandardFullPath(string) string
|
||||
}
|
||||
|
||||
// UpdateOptions was created to pass options from Update to Put
|
||||
type UpdateOptions struct {
|
||||
PublicID string
|
||||
ResourceType string
|
||||
DeliveryType string
|
||||
AssetFolder string
|
||||
DisplayName string
|
||||
}
|
||||
|
||||
// Header formats the option as a string
|
||||
func (o *UpdateOptions) Header() (string, string) {
|
||||
return "UpdateOption", fmt.Sprintf("%s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
|
||||
}
|
||||
|
||||
// Mandatory returns whether the option must be parsed or can be ignored
|
||||
func (o *UpdateOptions) Mandatory() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// String formats the option into human-readable form
|
||||
func (o *UpdateOptions) String() string {
|
||||
return fmt.Sprintf("Fully qualified Public ID: %s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
|
||||
}
|
||||
711
backend/cloudinary/cloudinary.go
Normal file
711
backend/cloudinary/cloudinary.go
Normal file
@@ -0,0 +1,711 @@
|
||||
// Package cloudinary provides an interface to the Cloudinary DAM
|
||||
package cloudinary
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cloudinary/cloudinary-go/v2"
|
||||
SDKApi "github.com/cloudinary/cloudinary-go/v2/api"
|
||||
"github.com/cloudinary/cloudinary-go/v2/api/admin"
|
||||
"github.com/cloudinary/cloudinary-go/v2/api/admin/search"
|
||||
"github.com/cloudinary/cloudinary-go/v2/api/uploader"
|
||||
"github.com/rclone/rclone/backend/cloudinary/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/zeebo/blake3"
|
||||
)
|
||||
|
||||
// Cloudinary shouldn't have a trailing dot if there is no path
|
||||
func cldPathDir(somePath string) string {
|
||||
if somePath == "" || somePath == "." {
|
||||
return somePath
|
||||
}
|
||||
dir := path.Dir(somePath)
|
||||
if dir == "." {
|
||||
return ""
|
||||
}
|
||||
return dir
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "cloudinary",
|
||||
Description: "Cloudinary",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "cloud_name",
|
||||
Help: "Cloudinary Environment Name",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: "api_key",
|
||||
Help: "Cloudinary API Key",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: "api_secret",
|
||||
Help: "Cloudinary API Secret",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: "upload_prefix",
|
||||
Help: "Specify the API endpoint for environments out of the US",
|
||||
},
|
||||
{
|
||||
Name: "upload_preset",
|
||||
Help: "Upload Preset to select asset manipulation on upload",
|
||||
},
|
||||
{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
|
||||
encoder.EncodeSlash |
|
||||
encoder.EncodeLtGt |
|
||||
encoder.EncodeDoubleQuote |
|
||||
encoder.EncodeQuestion |
|
||||
encoder.EncodeAsterisk |
|
||||
encoder.EncodePipe |
|
||||
encoder.EncodeHash |
|
||||
encoder.EncodePercent |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeDel |
|
||||
encoder.EncodeCtl |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeInvalidUtf8 |
|
||||
encoder.EncodeDot),
|
||||
},
|
||||
{
|
||||
Name: "eventually_consistent_delay",
|
||||
Default: fs.Duration(0),
|
||||
Advanced: true,
|
||||
Help: "Wait N seconds for eventual consistency of the databases that support the backend operation",
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
CloudName string `config:"cloud_name"`
|
||||
APIKey string `config:"api_key"`
|
||||
APISecret string `config:"api_secret"`
|
||||
UploadPrefix string `config:"upload_prefix"`
|
||||
UploadPreset string `config:"upload_preset"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
|
||||
}
|
||||
|
||||
// Fs represents a remote cloudinary server
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
pacer *fs.Pacer
|
||||
srv *rest.Client // For downloading assets via the Cloudinary CDN
|
||||
cld *cloudinary.Cloudinary // API calls are going through the Cloudinary SDK
|
||||
lastCRUD time.Time
|
||||
}
|
||||
|
||||
// Object describes a cloudinary object
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
size int64
|
||||
modTime time.Time
|
||||
url string
|
||||
md5sum string
|
||||
publicID string
|
||||
resourceType string
|
||||
deliveryType string
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize the Cloudinary client
|
||||
cld, err := cloudinary.NewFromParams(opt.CloudName, opt.APIKey, opt.APISecret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Cloudinary client: %w", err)
|
||||
}
|
||||
cld.Admin.Client = *fshttp.NewClient(ctx)
|
||||
cld.Upload.Client = *fshttp.NewClient(ctx)
|
||||
if opt.UploadPrefix != "" {
|
||||
cld.Config.API.UploadPrefix = opt.UploadPrefix
|
||||
}
|
||||
client := fshttp.NewClient(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
cld: cld,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(1000), pacer.MaxSleep(10000), pacer.DecayConstant(2))),
|
||||
srv: rest.NewClient(client),
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
if root != "" {
|
||||
// Check to see if the root actually an existing file
|
||||
remote := path.Base(root)
|
||||
f.root = cldPathDir(root)
|
||||
_, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
|
||||
// File doesn't exist so return the previous root
|
||||
f.root = root
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// FromStandardPath implementation of the api.CloudinaryEncoder
|
||||
func (f *Fs) FromStandardPath(s string) string {
|
||||
return strings.ReplaceAll(f.opt.Enc.FromStandardPath(s), "&", "\uFF06")
|
||||
}
|
||||
|
||||
// FromStandardName implementation of the api.CloudinaryEncoder
|
||||
func (f *Fs) FromStandardName(s string) string {
|
||||
return strings.ReplaceAll(f.opt.Enc.FromStandardName(s), "&", "\uFF06")
|
||||
}
|
||||
|
||||
// ToStandardPath implementation of the api.CloudinaryEncoder
|
||||
func (f *Fs) ToStandardPath(s string) string {
|
||||
return strings.ReplaceAll(f.opt.Enc.ToStandardPath(s), "\uFF06", "&")
|
||||
}
|
||||
|
||||
// ToStandardName implementation of the api.CloudinaryEncoder
|
||||
func (f *Fs) ToStandardName(s string) string {
|
||||
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&")
|
||||
}
|
||||
|
||||
// FromStandardFullPath encodes a full path to Cloudinary standard
|
||||
func (f *Fs) FromStandardFullPath(dir string) string {
|
||||
return path.Join(api.CloudinaryEncoder.FromStandardPath(f, f.root), api.CloudinaryEncoder.FromStandardPath(f, dir))
|
||||
}
|
||||
|
||||
// ToAssetFolderAPI encodes folders as expected by the Cloudinary SDK
|
||||
func (f *Fs) ToAssetFolderAPI(dir string) string {
|
||||
return strings.ReplaceAll(dir, "%", "%25")
|
||||
}
|
||||
|
||||
// ToDisplayNameElastic encodes a special case of elasticsearch
|
||||
func (f *Fs) ToDisplayNameElastic(dir string) string {
|
||||
return strings.ReplaceAll(dir, "!", "\\!")
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// WaitEventuallyConsistent waits till the FS is eventually consistent
|
||||
func (f *Fs) WaitEventuallyConsistent() {
|
||||
if f.opt.EventuallyConsistentDelay == fs.Duration(0) {
|
||||
return
|
||||
}
|
||||
delay := time.Duration(f.opt.EventuallyConsistentDelay)
|
||||
timeSinceLastCRUD := time.Since(f.lastCRUD)
|
||||
if timeSinceLastCRUD < delay {
|
||||
time.Sleep(delay - timeSinceLastCRUD)
|
||||
}
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Cloudinary root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries
|
||||
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||
remotePrefix := f.FromStandardFullPath(dir)
|
||||
if remotePrefix != "" && !strings.HasSuffix(remotePrefix, "/") {
|
||||
remotePrefix += "/"
|
||||
}
|
||||
|
||||
var entries fs.DirEntries
|
||||
dirs := make(map[string]struct{})
|
||||
nextCursor := ""
|
||||
f.WaitEventuallyConsistent()
|
||||
for {
|
||||
// user the folders api to list folders.
|
||||
folderParams := admin.SubFoldersParams{
|
||||
Folder: f.ToAssetFolderAPI(remotePrefix),
|
||||
MaxResults: 500,
|
||||
}
|
||||
if nextCursor != "" {
|
||||
folderParams.NextCursor = nextCursor
|
||||
}
|
||||
|
||||
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list sub-folders: %w", err)
|
||||
}
|
||||
if results.Error.Message != "" {
|
||||
if strings.HasPrefix(results.Error.Message, "Can't find folder with path") {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to list sub-folders: %s", results.Error.Message)
|
||||
}
|
||||
|
||||
for _, folder := range results.Folders {
|
||||
relativePath := api.CloudinaryEncoder.ToStandardPath(f, strings.TrimPrefix(folder.Path, remotePrefix))
|
||||
parts := strings.Split(relativePath, "/")
|
||||
|
||||
// It's a directory
|
||||
dirName := parts[len(parts)-1]
|
||||
if _, found := dirs[dirName]; !found {
|
||||
d := fs.NewDir(path.Join(dir, dirName), time.Time{})
|
||||
entries = append(entries, d)
|
||||
dirs[dirName] = struct{}{}
|
||||
}
|
||||
}
|
||||
// Break if there are no more results
|
||||
if results.NextCursor == "" {
|
||||
break
|
||||
}
|
||||
nextCursor = results.NextCursor
|
||||
}
|
||||
|
||||
for {
|
||||
// Use the assets.AssetsByAssetFolder API to list assets
|
||||
assetsParams := admin.AssetsByAssetFolderParams{
|
||||
AssetFolder: remotePrefix,
|
||||
MaxResults: 500,
|
||||
}
|
||||
if nextCursor != "" {
|
||||
assetsParams.NextCursor = nextCursor
|
||||
}
|
||||
|
||||
results, err := f.cld.Admin.AssetsByAssetFolder(ctx, assetsParams)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list assets: %w", err)
|
||||
}
|
||||
|
||||
for _, asset := range results.Assets {
|
||||
remote := api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName)
|
||||
if dir != "" {
|
||||
remote = path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName))
|
||||
}
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: int64(asset.Bytes),
|
||||
modTime: asset.CreatedAt,
|
||||
url: asset.SecureURL,
|
||||
publicID: asset.PublicID,
|
||||
resourceType: asset.AssetType,
|
||||
deliveryType: asset.Type,
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
|
||||
// Break if there are no more results
|
||||
if results.NextCursor == "" {
|
||||
break
|
||||
}
|
||||
nextCursor = results.NextCursor
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
searchParams := search.Query{
|
||||
Expression: fmt.Sprintf("asset_folder:\"%s\" AND display_name:\"%s\"",
|
||||
f.FromStandardFullPath(cldPathDir(remote)),
|
||||
f.ToDisplayNameElastic(api.CloudinaryEncoder.FromStandardName(f, path.Base(remote)))),
|
||||
SortBy: []search.SortByField{{"uploaded_at": "desc"}},
|
||||
MaxResults: 2,
|
||||
}
|
||||
var results *admin.SearchResult
|
||||
f.WaitEventuallyConsistent()
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var err1 error
|
||||
results, err1 = f.cld.Admin.Search(ctx, searchParams)
|
||||
if err1 == nil && results.TotalCount != len(results.Assets) {
|
||||
err1 = errors.New("partial response so waiting for eventual consistency")
|
||||
}
|
||||
return shouldRetry(ctx, nil, err1)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
if results.TotalCount == 0 || len(results.Assets) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
asset := results.Assets[0]
|
||||
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: int64(asset.Bytes),
|
||||
modTime: asset.UploadedAt,
|
||||
url: asset.SecureURL,
|
||||
md5sum: asset.Etag,
|
||||
publicID: asset.PublicID,
|
||||
resourceType: asset.ResourceType,
|
||||
deliveryType: asset.Type,
|
||||
}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
func (f *Fs) getSuggestedPublicID(assetFolder string, displayName string, modTime time.Time) string {
|
||||
payload := []byte(path.Join(assetFolder, displayName))
|
||||
hash := blake3.Sum256(payload)
|
||||
return hex.EncodeToString(hash[:])
|
||||
}
|
||||
|
||||
// Put uploads content to Cloudinary
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if src.Size() == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
params := uploader.UploadParams{
|
||||
UploadPreset: f.opt.UploadPreset,
|
||||
}
|
||||
|
||||
updateObject := false
|
||||
var modTime time.Time
|
||||
for _, option := range options {
|
||||
if updateOptions, ok := option.(*api.UpdateOptions); ok {
|
||||
if updateOptions.PublicID != "" {
|
||||
updateObject = true
|
||||
params.Overwrite = SDKApi.Bool(true)
|
||||
params.Invalidate = SDKApi.Bool(true)
|
||||
params.PublicID = updateOptions.PublicID
|
||||
params.ResourceType = updateOptions.ResourceType
|
||||
params.Type = SDKApi.DeliveryType(updateOptions.DeliveryType)
|
||||
params.AssetFolder = updateOptions.AssetFolder
|
||||
params.DisplayName = updateOptions.DisplayName
|
||||
modTime = src.ModTime(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !updateObject {
|
||||
params.AssetFolder = f.FromStandardFullPath(cldPathDir(src.Remote()))
|
||||
params.DisplayName = api.CloudinaryEncoder.FromStandardName(f, path.Base(src.Remote()))
|
||||
// We want to conform to the unique asset ID of rclone, which is (asset_folder,display_name,last_modified).
|
||||
// We also want to enable customers to choose their own public_id, in case duplicate names are not a crucial use case.
|
||||
// Upload_presets that apply randomness to the public ID would not work well with rclone duplicate assets support.
|
||||
params.FilenameOverride = f.getSuggestedPublicID(params.AssetFolder, params.DisplayName, src.ModTime(ctx))
|
||||
}
|
||||
uploadResult, err := f.cld.Upload.Upload(ctx, in, params)
|
||||
f.lastCRUD = time.Now()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to upload to Cloudinary: %w", err)
|
||||
}
|
||||
if !updateObject {
|
||||
modTime = uploadResult.CreatedAt
|
||||
}
|
||||
if uploadResult.Error.Message != "" {
|
||||
return nil, errors.New(uploadResult.Error.Message)
|
||||
}
|
||||
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
size: int64(uploadResult.Bytes),
|
||||
modTime: modTime,
|
||||
url: uploadResult.SecureURL,
|
||||
md5sum: uploadResult.Etag,
|
||||
publicID: uploadResult.PublicID,
|
||||
resourceType: uploadResult.ResourceType,
|
||||
deliveryType: uploadResult.Type,
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Precision of the remote
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
}
|
||||
|
||||
// Mkdir creates empty folders
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
params := admin.CreateFolderParams{Folder: f.ToAssetFolderAPI(f.FromStandardFullPath(dir))}
|
||||
res, err := f.cld.Admin.CreateFolder(ctx, params)
|
||||
f.lastCRUD = time.Now()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.Error.Message != "" {
|
||||
return errors.New(res.Error.Message)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir deletes empty folders
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
// Additional test because Cloudinary will delete folders without
|
||||
// assets, regardless of empty sub-folders
|
||||
folder := f.ToAssetFolderAPI(f.FromStandardFullPath(dir))
|
||||
folderParams := admin.SubFoldersParams{
|
||||
Folder: folder,
|
||||
MaxResults: 1,
|
||||
}
|
||||
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if results.TotalCount > 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
|
||||
params := admin.DeleteFolderParams{Folder: folder}
|
||||
res, err := f.cld.Admin.DeleteFolder(ctx, params)
|
||||
f.lastCRUD = time.Now()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.Error.Message != "" {
|
||||
if strings.HasPrefix(res.Error.Message, "Can't find folder with path") {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return errors.New(res.Error.Message)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
420, // Too Many Requests (legacy)
|
||||
429, // Too Many Requests
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
if err != nil {
|
||||
tryAgain := "Try again on "
|
||||
if idx := strings.Index(err.Error(), tryAgain); idx != -1 {
|
||||
layout := "2006-01-02 15:04:05 UTC"
|
||||
dateStr := err.Error()[idx+len(tryAgain) : idx+len(tryAgain)+len(layout)]
|
||||
timestamp, err2 := time.Parse(layout, dateStr)
|
||||
if err2 == nil {
|
||||
return true, fserrors.NewErrorRetryAfter(time.Until(timestamp))
|
||||
}
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "Retrying API error %v", err)
|
||||
return true, err
|
||||
}
|
||||
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Hash returns the MD5 of an object
|
||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
||||
if ty != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return o.md5sum, nil
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// Size of object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// Storable returns if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: o.url,
|
||||
Options: options,
|
||||
}
|
||||
var offset int64
|
||||
var count int64
|
||||
var key string
|
||||
var value string
|
||||
fs.FixRangeOption(options, o.size)
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
offset, count = x.Decode(o.size)
|
||||
if count < 0 {
|
||||
count = o.size - offset
|
||||
}
|
||||
key, value = option.Header()
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
count = o.size - offset
|
||||
key, value = option.Header()
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
if key != "" && value != "" {
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
opts.ExtraHeaders[key] = value
|
||||
}
|
||||
// Make sure that the asset is fully available
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
if err == nil {
|
||||
cl, clErr := strconv.Atoi(resp.Header.Get("content-length"))
|
||||
if clErr == nil && count == int64(cl) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed download of \"%s\": %w", o.url, err)
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
options = append(options, &api.UpdateOptions{
|
||||
PublicID: o.publicID,
|
||||
ResourceType: o.resourceType,
|
||||
DeliveryType: o.deliveryType,
|
||||
DisplayName: api.CloudinaryEncoder.FromStandardName(o.fs, path.Base(o.Remote())),
|
||||
AssetFolder: o.fs.FromStandardFullPath(cldPathDir(o.Remote())),
|
||||
})
|
||||
updatedObj, err := o.fs.Put(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if uo, ok := updatedObj.(*Object); ok {
|
||||
o.size = uo.size
|
||||
o.modTime = time.Now() // Skipping uo.modTime because the API returns the create time
|
||||
o.url = uo.url
|
||||
o.md5sum = uo.md5sum
|
||||
o.publicID = uo.publicID
|
||||
o.resourceType = uo.resourceType
|
||||
o.deliveryType = uo.deliveryType
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
params := uploader.DestroyParams{
|
||||
PublicID: o.publicID,
|
||||
ResourceType: o.resourceType,
|
||||
Type: o.deliveryType,
|
||||
}
|
||||
res, dErr := o.fs.cld.Upload.Destroy(ctx, params)
|
||||
o.fs.lastCRUD = time.Now()
|
||||
if dErr != nil {
|
||||
return dErr
|
||||
}
|
||||
|
||||
if res.Error.Message != "" {
|
||||
return errors.New(res.Error.Message)
|
||||
}
|
||||
|
||||
if res.Result != "ok" {
|
||||
return errors.New(res.Result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
23
backend/cloudinary/cloudinary_test.go
Normal file
23
backend/cloudinary/cloudinary_test.go
Normal file
@@ -0,0 +1,23 @@
|
||||
// Test Cloudinary filesystem interface
|
||||
|
||||
package cloudinary_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/cloudinary"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
name := "TestCloudinary"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*cloudinary.Object)(nil),
|
||||
SkipInvalidUTF8: true,
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "eventually_consistent_delay", Value: "7"},
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -80,9 +80,10 @@ const (
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
driveConfig = &oauth2.Config{
|
||||
driveConfig = &oauthutil.Config{
|
||||
Scopes: []string{scopePrefix + "drive"},
|
||||
Endpoint: google.Endpoint,
|
||||
AuthURL: google.Endpoint.AuthURL,
|
||||
TokenURL: google.Endpoint.TokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
@@ -3524,14 +3525,14 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
|
||||
return f.unTrash(ctx, dir, directoryID, true)
|
||||
}
|
||||
|
||||
// copy file with id to dest
|
||||
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||
// copy or move file with id to dest
|
||||
func (f *Fs) copyOrMoveID(ctx context.Context, operation string, id, dest string) (err error) {
|
||||
info, err := f.getFile(ctx, id, f.getFileFields(ctx))
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't find id: %w", err)
|
||||
}
|
||||
if info.MimeType == driveFolderType {
|
||||
return fmt.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
|
||||
return fmt.Errorf("can't %s directory use: rclone %s --drive-root-folder-id %s %s %s", operation, operation, id, fs.ConfigString(f), dest)
|
||||
}
|
||||
info.Name = f.opt.Enc.ToStandardName(info.Name)
|
||||
o, err := f.newObjectWithInfo(ctx, info.Name, info)
|
||||
@@ -3552,9 +3553,15 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = operations.Copy(ctx, dstFs, nil, destLeaf, o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("copy failed: %w", err)
|
||||
|
||||
var opErr error
|
||||
if operation == "moveid" {
|
||||
_, opErr = operations.Move(ctx, dstFs, nil, destLeaf, o)
|
||||
} else {
|
||||
_, opErr = operations.Copy(ctx, dstFs, nil, destLeaf, o)
|
||||
}
|
||||
if opErr != nil {
|
||||
return fmt.Errorf("%s failed: %w", operation, opErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -3791,6 +3798,28 @@ attempted if possible.
|
||||
|
||||
Use the --interactive/-i or --dry-run flag to see what would be copied before copying.
|
||||
`,
|
||||
}, {
|
||||
Name: "moveid",
|
||||
Short: "Move files by ID",
|
||||
Long: `This command moves files by ID
|
||||
|
||||
Usage:
|
||||
|
||||
rclone backend moveid drive: ID path
|
||||
rclone backend moveid drive: ID1 path1 ID2 path2
|
||||
|
||||
It moves the drive file with ID given to the path (an rclone path which
|
||||
will be passed internally to rclone moveto).
|
||||
|
||||
The path should end with a / to indicate move the file as named to
|
||||
this directory. If it doesn't end with a / then the last path
|
||||
component will be used as the file name.
|
||||
|
||||
If the destination is a drive backend then server-side moving will be
|
||||
attempted if possible.
|
||||
|
||||
Use the --interactive/-i or --dry-run flag to see what would be moved beforehand.
|
||||
`,
|
||||
}, {
|
||||
Name: "exportformats",
|
||||
Short: "Dump the export formats for debug purposes",
|
||||
@@ -3969,16 +3998,16 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
dir = arg[0]
|
||||
}
|
||||
return f.unTrashDir(ctx, dir, true)
|
||||
case "copyid":
|
||||
case "copyid", "moveid":
|
||||
if len(arg)%2 != 0 {
|
||||
return nil, errors.New("need an even number of arguments")
|
||||
}
|
||||
for len(arg) > 0 {
|
||||
id, dest := arg[0], arg[1]
|
||||
arg = arg[2:]
|
||||
err = f.copyID(ctx, id, dest)
|
||||
err = f.copyOrMoveID(ctx, name, id, dest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed copying %q to %q: %w", id, dest, err)
|
||||
return nil, fmt.Errorf("failed %s %q to %q: %w", name, id, dest, err)
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
|
||||
@@ -479,8 +479,8 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
|
||||
require.NoError(t, f.Purge(ctx, "trashDir"))
|
||||
}
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
|
||||
func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyOrMoveID
|
||||
func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
obj, err := f.NewObject(ctx, existingFile)
|
||||
require.NoError(t, err)
|
||||
@@ -498,7 +498,7 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run("BadID", func(t *testing.T) {
|
||||
err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
|
||||
err = f.copyOrMoveID(ctx, "moveid", "ID-NOT-FOUND", dir+"/")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "couldn't find id")
|
||||
})
|
||||
@@ -506,19 +506,31 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
t.Run("Directory", func(t *testing.T) {
|
||||
rootID, err := f.dirCache.RootID(ctx, false)
|
||||
require.NoError(t, err)
|
||||
err = f.copyID(ctx, rootID, dir+"/")
|
||||
err = f.copyOrMoveID(ctx, "moveid", rootID, dir+"/")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "can't copy directory")
|
||||
assert.Contains(t, err.Error(), "can't moveid directory")
|
||||
})
|
||||
|
||||
t.Run("WithoutDestName", func(t *testing.T) {
|
||||
err = f.copyID(ctx, o.id, dir+"/")
|
||||
t.Run("MoveWithoutDestName", func(t *testing.T) {
|
||||
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/")
|
||||
require.NoError(t, err)
|
||||
checkFile(path.Base(existingFile))
|
||||
})
|
||||
|
||||
t.Run("WithDestName", func(t *testing.T) {
|
||||
err = f.copyID(ctx, o.id, dir+"/potato.txt")
|
||||
t.Run("CopyWithoutDestName", func(t *testing.T) {
|
||||
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/")
|
||||
require.NoError(t, err)
|
||||
checkFile(path.Base(existingFile))
|
||||
})
|
||||
|
||||
t.Run("MoveWithDestName", func(t *testing.T) {
|
||||
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/potato.txt")
|
||||
require.NoError(t, err)
|
||||
checkFile("potato.txt")
|
||||
})
|
||||
|
||||
t.Run("CopyWithDestName", func(t *testing.T) {
|
||||
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/potato.txt")
|
||||
require.NoError(t, err)
|
||||
checkFile("potato.txt")
|
||||
})
|
||||
@@ -647,7 +659,7 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
})
|
||||
t.Run("Shortcuts", f.InternalTestShortcuts)
|
||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||
t.Run("CopyID", f.InternalTestCopyID)
|
||||
t.Run("CopyOrMoveID", f.InternalTestCopyOrMoveID)
|
||||
t.Run("Query", f.InternalTestQuery)
|
||||
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
||||
t.Run("ShouldRetry", f.InternalTestShouldRetry)
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
)
|
||||
|
||||
// finishBatch commits the batch, returning a batch status to poll or maybe complete
|
||||
@@ -21,14 +20,10 @@ func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinish
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
complete, err = f.srv.UploadSessionFinishBatchV2(arg)
|
||||
// If error is insufficient space then don't retry
|
||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||
err = fserrors.NoRetryError(err)
|
||||
return false, err
|
||||
}
|
||||
if retry, err := shouldRetryExclude(ctx, err); !retry {
|
||||
return retry, err
|
||||
}
|
||||
// after the first chunk is uploaded, we retry everything
|
||||
// after the first chunk is uploaded, we retry everything except the excluded errors
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -47,6 +47,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/batcher"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
@@ -93,7 +94,7 @@ const (
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
dropboxConfig = &oauth2.Config{
|
||||
dropboxConfig = &oauthutil.Config{
|
||||
Scopes: []string{
|
||||
"files.metadata.write",
|
||||
"files.content.write",
|
||||
@@ -108,7 +109,8 @@ var (
|
||||
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
||||
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
|
||||
// },
|
||||
Endpoint: dropbox.OAuthEndpoint(""),
|
||||
AuthURL: dropbox.OAuthEndpoint("").AuthURL,
|
||||
TokenURL: dropbox.OAuthEndpoint("").TokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
@@ -133,7 +135,7 @@ var (
|
||||
)
|
||||
|
||||
// Gets an oauth config with the right scopes
|
||||
func getOauthConfig(m configmap.Mapper) *oauth2.Config {
|
||||
func getOauthConfig(m configmap.Mapper) *oauthutil.Config {
|
||||
// If not impersonating, use standard scopes
|
||||
if impersonate, _ := m.Get("impersonate"); impersonate == "" {
|
||||
return dropboxConfig
|
||||
@@ -316,32 +318,46 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// Some specific errors which should be excluded from retries
|
||||
func shouldRetryExclude(ctx context.Context, err error) (bool, error) {
|
||||
if err == nil {
|
||||
return false, err
|
||||
}
|
||||
errString := err.Error()
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// First check for specific errors
|
||||
//
|
||||
// These come back from the SDK in a whole host of different
|
||||
// error types, but there doesn't seem to be a consistent way
|
||||
// of reading the error cause, so here we just check using the
|
||||
// error string which isn't perfect but does the job.
|
||||
errString := err.Error()
|
||||
if strings.Contains(errString, "insufficient_space") {
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if strings.Contains(errString, "malformed_path") {
|
||||
return false, fserrors.NoRetryError(err)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if retry, err := shouldRetryExclude(ctx, err); !retry {
|
||||
return retry, err
|
||||
}
|
||||
// Then handle any official Retry-After header from Dropbox's SDK
|
||||
switch e := err.(type) {
|
||||
case auth.RateLimitAPIError:
|
||||
if e.RateLimitError.RetryAfter > 0 {
|
||||
fs.Logf(errString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||
fs.Logf(nil, "Error %v. Too many requests or write operations. Trying again in %d seconds.", err, e.RateLimitError.RetryAfter)
|
||||
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
// Keep old behavior for backward compatibility
|
||||
errString := err.Error()
|
||||
if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
|
||||
return true, err
|
||||
}
|
||||
@@ -1020,13 +1036,20 @@ func (f *Fs) Precision() time.Duration {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Find and remove existing object
|
||||
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cleanup(&err)
|
||||
|
||||
// Temporary Object under construction
|
||||
dstObj := &Object{
|
||||
fs: f,
|
||||
@@ -1040,7 +1063,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()),
|
||||
},
|
||||
}
|
||||
var err error
|
||||
var result *files.RelocationResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
result, err = f.srv.CopyV2(&arg)
|
||||
@@ -1692,14 +1714,10 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
entry, err = o.fs.srv.UploadSessionFinish(args, nil)
|
||||
// If error is insufficient space then don't retry
|
||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||
err = fserrors.NoRetryError(err)
|
||||
return false, err
|
||||
}
|
||||
if retry, err := shouldRetryExclude(ctx, err); !retry {
|
||||
return retry, err
|
||||
}
|
||||
// after the first chunk is uploaded, we retry everything
|
||||
// after the first chunk is uploaded, we retry everything except the excluded errors
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -1214,7 +1214,7 @@ func (f *Fs) copyTo(ctx context.Context, srcID, srcLeaf, dstLeaf, dstDirectoryID
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
@@ -1228,6 +1228,19 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, fmt.Errorf("can't copy %q -> %q as are same name", srcPath, dstPath)
|
||||
}
|
||||
|
||||
// Find existing object
|
||||
existingObj, err := f.NewObject(ctx, remote)
|
||||
if err == nil {
|
||||
defer func() {
|
||||
// Don't remove existing object if returning an error
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
fs.Debugf(existingObj, "Server side copy: removing existing object after successful copy")
|
||||
err = existingObj.Remove(ctx)
|
||||
}()
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
||||
if err != nil {
|
||||
|
||||
@@ -60,14 +60,17 @@ const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
)
|
||||
|
||||
// Description of how to auth for this app
|
||||
var storageConfig = &oauth2.Config{
|
||||
Scopes: []string{storage.DevstorageReadWriteScope},
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
storageConfig = &oauthutil.Config{
|
||||
Scopes: []string{storage.DevstorageReadWriteScope},
|
||||
AuthURL: google.Endpoint.AuthURL,
|
||||
TokenURL: google.Endpoint.TokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
|
||||
@@ -33,7 +33,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
)
|
||||
|
||||
@@ -60,13 +59,14 @@ const (
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: []string{
|
||||
"openid",
|
||||
"profile",
|
||||
scopeReadWrite, // this must be at position scopeAccess
|
||||
},
|
||||
Endpoint: google.Endpoint,
|
||||
AuthURL: google.Endpoint.AuthURL,
|
||||
TokenURL: google.Endpoint.TokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
@@ -1168,7 +1168,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
errors := make([]error, 1)
|
||||
results := make([]*api.MediaItem, 1)
|
||||
err = o.fs.commitBatch(ctx, []uploadedItem{uploaded}, results, errors)
|
||||
if err != nil {
|
||||
if err == nil {
|
||||
err = errors[0]
|
||||
info = results[0]
|
||||
}
|
||||
|
||||
@@ -31,7 +31,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -48,11 +47,9 @@ const (
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app.
|
||||
oauthConfig = &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://my.hidrive.com/client/authorize",
|
||||
TokenURL: "https://my.hidrive.com/oauth2/token",
|
||||
},
|
||||
oauthConfig = &oauthutil.Config{
|
||||
AuthURL: "https://my.hidrive.com/client/authorize",
|
||||
TokenURL: "https://my.hidrive.com/oauth2/token",
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
|
||||
@@ -331,12 +331,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
// Join's the remote onto the base URL
|
||||
func (f *Fs) url(remote string) string {
|
||||
trimmedRemote := strings.TrimLeft(remote, "/") // remove leading "/" since we always have it in f.endpointURL
|
||||
if f.opt.NoEscape {
|
||||
// Directly concatenate without escaping, no_escape behavior
|
||||
return f.endpointURL + remote
|
||||
return f.endpointURL + trimmedRemote
|
||||
}
|
||||
// Default behavior
|
||||
return f.endpointURL + rest.URLPathEscape(remote)
|
||||
return f.endpointURL + rest.URLPathEscape(trimmedRemote)
|
||||
}
|
||||
|
||||
// Errors returned by parseName
|
||||
|
||||
@@ -191,6 +191,33 @@ func TestNewObject(t *testing.T) {
|
||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
}
|
||||
|
||||
func TestNewObjectWithLeadingSlash(t *testing.T) {
|
||||
f := prepare(t)
|
||||
|
||||
o, err := f.NewObject(context.Background(), "/four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "/four/under four.txt", o.Remote())
|
||||
assert.Equal(t, int64(8+lineEndSize), o.Size())
|
||||
_, ok := o.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
// Test the time is correct on the object
|
||||
|
||||
tObj := o.ModTime(context.Background())
|
||||
|
||||
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
|
||||
require.NoError(t, err)
|
||||
tFile := fi.ModTime()
|
||||
|
||||
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
|
||||
|
||||
// check object not found
|
||||
o, err = f.NewObject(context.Background(), "/not found.txt")
|
||||
assert.Nil(t, o)
|
||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
}
|
||||
|
||||
func TestOpen(t *testing.T) {
|
||||
m := prepareServer(t)
|
||||
|
||||
|
||||
@@ -277,11 +277,9 @@ machines.`)
|
||||
m.Set(configClientID, teliaseCloudClientID)
|
||||
m.Set(configTokenURL, teliaseCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: teliaseCloudAuthURL,
|
||||
TokenURL: teliaseCloudTokenURL,
|
||||
},
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: teliaseCloudAuthURL,
|
||||
TokenURL: teliaseCloudTokenURL,
|
||||
ClientID: teliaseCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
@@ -292,11 +290,9 @@ machines.`)
|
||||
m.Set(configClientID, telianoCloudClientID)
|
||||
m.Set(configTokenURL, telianoCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: telianoCloudAuthURL,
|
||||
TokenURL: telianoCloudTokenURL,
|
||||
},
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: telianoCloudAuthURL,
|
||||
TokenURL: telianoCloudTokenURL,
|
||||
ClientID: telianoCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
@@ -307,11 +303,9 @@ machines.`)
|
||||
m.Set(configClientID, tele2CloudClientID)
|
||||
m.Set(configTokenURL, tele2CloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: tele2CloudAuthURL,
|
||||
TokenURL: tele2CloudTokenURL,
|
||||
},
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: tele2CloudAuthURL,
|
||||
TokenURL: tele2CloudTokenURL,
|
||||
ClientID: tele2CloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
@@ -322,11 +316,9 @@ machines.`)
|
||||
m.Set(configClientID, onlimeCloudClientID)
|
||||
m.Set(configTokenURL, onlimeCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: onlimeCloudAuthURL,
|
||||
TokenURL: onlimeCloudTokenURL,
|
||||
},
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: onlimeCloudAuthURL,
|
||||
TokenURL: onlimeCloudTokenURL,
|
||||
ClientID: onlimeCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
@@ -924,19 +916,17 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
|
||||
}
|
||||
|
||||
baseClient := fshttp.NewClient(ctx)
|
||||
oauthConfig := &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: defaultTokenURL,
|
||||
TokenURL: defaultTokenURL,
|
||||
},
|
||||
oauthConfig := &oauthutil.Config{
|
||||
AuthURL: defaultTokenURL,
|
||||
TokenURL: defaultTokenURL,
|
||||
}
|
||||
if ver == configVersion {
|
||||
oauthConfig.ClientID = defaultClientID
|
||||
// if custom endpoints are set use them else stick with defaults
|
||||
if tokenURL, ok := m.Get(configTokenURL); ok {
|
||||
oauthConfig.Endpoint.TokenURL = tokenURL
|
||||
oauthConfig.TokenURL = tokenURL
|
||||
// jottacloud is weird. we need to use the tokenURL as authURL
|
||||
oauthConfig.Endpoint.AuthURL = tokenURL
|
||||
oauthConfig.AuthURL = tokenURL
|
||||
}
|
||||
} else if ver == legacyConfigVersion {
|
||||
clientID, ok := m.Get(configClientID)
|
||||
@@ -950,8 +940,8 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
|
||||
oauthConfig.ClientID = clientID
|
||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||
|
||||
oauthConfig.Endpoint.TokenURL = legacyTokenURL
|
||||
oauthConfig.Endpoint.AuthURL = legacyTokenURL
|
||||
oauthConfig.TokenURL = legacyTokenURL
|
||||
oauthConfig.AuthURL = legacyTokenURL
|
||||
|
||||
// add the request filter to fix token refresh
|
||||
if do, ok := baseClient.Transport.(interface {
|
||||
|
||||
@@ -5,18 +5,18 @@ package local
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
|
||||
var getFreeDiskSpace = windows.NewLazySystemDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var available, total, free int64
|
||||
root, e := syscall.UTF16PtrFromString(f.root)
|
||||
root, e := windows.UTF16PtrFromString(f.root)
|
||||
if e != nil {
|
||||
return nil, fmt.Errorf("failed to read disk usage: %w", e)
|
||||
}
|
||||
@@ -26,7 +26,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
|
||||
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
|
||||
)
|
||||
if e1 != syscall.Errno(0) {
|
||||
if e1 != windows.Errno(0) {
|
||||
return nil, fmt.Errorf("failed to read disk usage: %w", e1)
|
||||
}
|
||||
usage := &fs.Usage{
|
||||
|
||||
16
backend/local/lchmod.go
Normal file
16
backend/local/lchmod.go
Normal file
@@ -0,0 +1,16 @@
|
||||
//go:build windows || plan9 || js || linux
|
||||
|
||||
package local
|
||||
|
||||
import "os"
|
||||
|
||||
const haveLChmod = false
|
||||
|
||||
// lChmod changes the mode of the named file to mode. If the file is a symbolic
|
||||
// link, it changes the link, not the target. If there is an error,
|
||||
// it will be of type *PathError.
|
||||
func lChmod(name string, mode os.FileMode) error {
|
||||
// Can't do this safely on this OS - chmoding a symlink always
|
||||
// changes the destination.
|
||||
return nil
|
||||
}
|
||||
41
backend/local/lchmod_unix.go
Normal file
41
backend/local/lchmod_unix.go
Normal file
@@ -0,0 +1,41 @@
|
||||
//go:build !windows && !plan9 && !js && !linux
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const haveLChmod = true
|
||||
|
||||
// syscallMode returns the syscall-specific mode bits from Go's portable mode bits.
|
||||
//
|
||||
// Borrowed from the syscall source since it isn't public.
|
||||
func syscallMode(i os.FileMode) (o uint32) {
|
||||
o |= uint32(i.Perm())
|
||||
if i&os.ModeSetuid != 0 {
|
||||
o |= syscall.S_ISUID
|
||||
}
|
||||
if i&os.ModeSetgid != 0 {
|
||||
o |= syscall.S_ISGID
|
||||
}
|
||||
if i&os.ModeSticky != 0 {
|
||||
o |= syscall.S_ISVTX
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// lChmod changes the mode of the named file to mode. If the file is a symbolic
|
||||
// link, it changes the link, not the target. If there is an error,
|
||||
// it will be of type *PathError.
|
||||
func lChmod(name string, mode os.FileMode) error {
|
||||
// NB linux does not support AT_SYMLINK_NOFOLLOW as a parameter to fchmodat
|
||||
// and returns ENOTSUP if you try, so we don't support this on linux
|
||||
if e := unix.Fchmodat(unix.AT_FDCWD, name, syscallMode(mode), unix.AT_SYMLINK_NOFOLLOW); e != nil {
|
||||
return &os.PathError{Op: "lChmod", Path: name, Err: e}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build windows || plan9 || js
|
||||
//go:build plan9 || js
|
||||
|
||||
package local
|
||||
|
||||
|
||||
19
backend/local/lchtimes_windows.go
Normal file
19
backend/local/lchtimes_windows.go
Normal file
@@ -0,0 +1,19 @@
|
||||
//go:build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const haveLChtimes = true
|
||||
|
||||
// lChtimes changes the access and modification times of the named
|
||||
// link, similar to the Unix utime() or utimes() functions.
|
||||
//
|
||||
// The underlying filesystem may truncate or round the values to a
|
||||
// less precise time unit.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func lChtimes(name string, atime time.Time, mtime time.Time) error {
|
||||
return setTimes(name, atime, mtime, time.Time{}, true)
|
||||
}
|
||||
@@ -34,7 +34,6 @@ import (
|
||||
// Constants
|
||||
const (
|
||||
devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
||||
linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
|
||||
useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
|
||||
)
|
||||
|
||||
@@ -101,10 +100,8 @@ Metadata is supported on files and directories.
|
||||
},
|
||||
{
|
||||
Name: "links",
|
||||
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
|
||||
Help: "Translate symlinks to/from regular files with a '" + fs.LinkSuffix + "' extension for the local backend.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "l",
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
@@ -379,17 +376,22 @@ type Directory struct {
|
||||
|
||||
var (
|
||||
errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
||||
errLinksNeedsSuffix = errors.New("need \"" + linkSuffix + "\" suffix to refer to symlink when using -l/--links")
|
||||
errLinksNeedsSuffix = errors.New("need \"" + fs.LinkSuffix + "\" suffix to refer to symlink when using -l/--links")
|
||||
)
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Override --local-links with --links if set
|
||||
if ci.Links {
|
||||
opt.TranslateSymlinks = true
|
||||
}
|
||||
if opt.TranslateSymlinks && opt.FollowSymlinks {
|
||||
return nil, errLinksAndCopyLinks
|
||||
}
|
||||
@@ -435,9 +437,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
||||
}
|
||||
// Check to see if this is a .rclonelink if not found
|
||||
hasLinkSuffix := strings.HasSuffix(f.root, linkSuffix)
|
||||
hasLinkSuffix := strings.HasSuffix(f.root, fs.LinkSuffix)
|
||||
if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) {
|
||||
fi, err = f.lstat(strings.TrimSuffix(f.root, linkSuffix))
|
||||
fi, err = f.lstat(strings.TrimSuffix(f.root, fs.LinkSuffix))
|
||||
}
|
||||
if err == nil && f.isRegular(fi.Mode()) {
|
||||
// Handle the odd case, that a symlink was specified by name without the link suffix
|
||||
@@ -508,8 +510,8 @@ func (f *Fs) caseInsensitive() bool {
|
||||
//
|
||||
// for regular files, localPath is returned unchanged
|
||||
func translateLink(remote, localPath string) (newLocalPath string, isTranslatedLink bool) {
|
||||
isTranslatedLink = strings.HasSuffix(remote, linkSuffix)
|
||||
newLocalPath = strings.TrimSuffix(localPath, linkSuffix)
|
||||
isTranslatedLink = strings.HasSuffix(remote, fs.LinkSuffix)
|
||||
newLocalPath = strings.TrimSuffix(localPath, fs.LinkSuffix)
|
||||
return newLocalPath, isTranslatedLink
|
||||
}
|
||||
|
||||
@@ -692,7 +694,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
} else {
|
||||
// Check whether this link should be translated
|
||||
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
newRemote += linkSuffix
|
||||
newRemote += fs.LinkSuffix
|
||||
}
|
||||
// Don't include non directory if not included
|
||||
// we leave directory filtering to the layer above
|
||||
|
||||
@@ -110,7 +110,7 @@ func TestSymlink(t *testing.T) {
|
||||
require.NoError(t, lChtimes(symlinkPath, modTime2, modTime2))
|
||||
|
||||
// Object viewed as symlink
|
||||
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
|
||||
file2 := fstest.NewItem("symlink.txt"+fs.LinkSuffix, "file.txt", modTime2)
|
||||
|
||||
// Object viewed as destination
|
||||
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
||||
@@ -139,7 +139,7 @@ func TestSymlink(t *testing.T) {
|
||||
|
||||
// Create a symlink
|
||||
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
|
||||
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
||||
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+fs.LinkSuffix, "file.txt", modTime3, false)
|
||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
||||
if haveLChtimes {
|
||||
r.CheckLocalItems(t, file1, file2, file3)
|
||||
@@ -155,9 +155,9 @@ func TestSymlink(t *testing.T) {
|
||||
assert.Equal(t, "file.txt", linkText)
|
||||
|
||||
// Check that NewObject gets the correct object
|
||||
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
|
||||
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+fs.LinkSuffix)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
|
||||
assert.Equal(t, "symlink2.txt"+fs.LinkSuffix, o.Remote())
|
||||
assert.Equal(t, int64(8), o.Size())
|
||||
|
||||
// Check that NewObject doesn't see the non suffixed version
|
||||
@@ -165,7 +165,7 @@ func TestSymlink(t *testing.T) {
|
||||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
|
||||
// Check that NewFs works with the suffixed version and --links
|
||||
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+linkSuffix), configmap.Simple{
|
||||
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+fs.LinkSuffix), configmap.Simple{
|
||||
"links": "true",
|
||||
})
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
@@ -268,22 +268,66 @@ func TestMetadata(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
const filePath = "metafile.txt"
|
||||
when := time.Now()
|
||||
const dayLength = len("2001-01-01")
|
||||
whenRFC := when.Format(time.RFC3339Nano)
|
||||
r.WriteFile(filePath, "metadata file contents", when)
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Set fs into "-l" / "--links" mode
|
||||
f.opt.TranslateSymlinks = true
|
||||
|
||||
// Write a symlink to the file
|
||||
symlinkPath := "metafile-link.txt"
|
||||
osSymlinkPath := filepath.Join(f.root, symlinkPath)
|
||||
symlinkPath += fs.LinkSuffix
|
||||
require.NoError(t, os.Symlink(filePath, osSymlinkPath))
|
||||
symlinkModTime := fstest.Time("2002-02-03T04:05:10.123123123Z")
|
||||
require.NoError(t, lChtimes(osSymlinkPath, symlinkModTime, symlinkModTime))
|
||||
|
||||
// Get the object
|
||||
obj, err := f.NewObject(ctx, filePath)
|
||||
require.NoError(t, err)
|
||||
o := obj.(*Object)
|
||||
|
||||
// Get the symlink object
|
||||
symlinkObj, err := f.NewObject(ctx, symlinkPath)
|
||||
require.NoError(t, err)
|
||||
symlinkO := symlinkObj.(*Object)
|
||||
|
||||
// Record metadata for o
|
||||
oMeta, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test symlink first to check it doesn't mess up file
|
||||
t.Run("Symlink", func(t *testing.T) {
|
||||
testMetadata(t, r, symlinkO, symlinkModTime)
|
||||
})
|
||||
|
||||
// Read it again
|
||||
oMetaNew, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that operating on the symlink didn't change the file it was pointing to
|
||||
// See: https://github.com/rclone/rclone/security/advisories/GHSA-hrxh-9w67-g4cv
|
||||
assert.Equal(t, oMeta, oMetaNew, "metadata setting on symlink messed up file")
|
||||
|
||||
// Now run the same tests on the file
|
||||
t.Run("File", func(t *testing.T) {
|
||||
testMetadata(t, r, o, when)
|
||||
})
|
||||
}
|
||||
|
||||
func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
||||
ctx := context.Background()
|
||||
whenRFC := when.Format(time.RFC3339Nano)
|
||||
const dayLength = len("2001-01-01")
|
||||
|
||||
f := r.Flocal.(*Fs)
|
||||
features := f.Features()
|
||||
|
||||
var hasXID, hasAtime, hasBtime bool
|
||||
var hasXID, hasAtime, hasBtime, canSetXattrOnLinks bool
|
||||
switch runtime.GOOS {
|
||||
case "darwin", "freebsd", "netbsd", "linux":
|
||||
hasXID, hasAtime, hasBtime = true, true, true
|
||||
canSetXattrOnLinks = runtime.GOOS != "linux"
|
||||
case "openbsd", "solaris":
|
||||
hasXID, hasAtime = true, true
|
||||
case "windows":
|
||||
@@ -306,6 +350,10 @@ func TestMetadata(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, m)
|
||||
|
||||
if !canSetXattrOnLinks && o.translatedLink {
|
||||
t.Skip("Skip remainder of test as can't set xattr on symlinks on this OS")
|
||||
}
|
||||
|
||||
inM := fs.Metadata{
|
||||
"potato": "chips",
|
||||
"cabbage": "soup",
|
||||
@@ -320,18 +368,21 @@ func TestMetadata(t *testing.T) {
|
||||
})
|
||||
|
||||
checkTime := func(m fs.Metadata, key string, when time.Time) {
|
||||
t.Helper()
|
||||
mt, ok := o.parseMetadataTime(m, key)
|
||||
assert.True(t, ok)
|
||||
dt := mt.Sub(when)
|
||||
precision := time.Second
|
||||
assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v", key, dt, precision))
|
||||
assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v want %v got %v", key, dt, precision, mt, when))
|
||||
}
|
||||
|
||||
checkInt := func(m fs.Metadata, key string, base int) int {
|
||||
t.Helper()
|
||||
value, ok := o.parseMetadataInt(m, key, base)
|
||||
assert.True(t, ok)
|
||||
return value
|
||||
}
|
||||
|
||||
t.Run("Read", func(t *testing.T) {
|
||||
m, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -341,13 +392,12 @@ func TestMetadata(t *testing.T) {
|
||||
checkInt(m, "mode", 8)
|
||||
checkTime(m, "mtime", when)
|
||||
|
||||
assert.Equal(t, len(whenRFC), len(m["mtime"]))
|
||||
assert.Equal(t, whenRFC[:dayLength], m["mtime"][:dayLength])
|
||||
|
||||
if hasAtime {
|
||||
if hasAtime && !o.translatedLink { // symlinks generally don't record atime
|
||||
checkTime(m, "atime", when)
|
||||
}
|
||||
if hasBtime {
|
||||
if hasBtime && !o.translatedLink { // symlinks generally don't record btime
|
||||
checkTime(m, "btime", when)
|
||||
}
|
||||
if hasXID {
|
||||
@@ -371,6 +421,10 @@ func TestMetadata(t *testing.T) {
|
||||
"mode": "0767",
|
||||
"potato": "wedges",
|
||||
}
|
||||
if !canSetXattrOnLinks && o.translatedLink {
|
||||
// Don't change xattr if not supported on symlinks
|
||||
delete(newM, "potato")
|
||||
}
|
||||
err := o.writeMetadata(newM)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -380,7 +434,11 @@ func TestMetadata(t *testing.T) {
|
||||
|
||||
mode := checkInt(m, "mode", 8)
|
||||
if runtime.GOOS != "windows" {
|
||||
assert.Equal(t, 0767, mode&0777, fmt.Sprintf("mode wrong - expecting 0767 got 0%o", mode&0777))
|
||||
expectedMode := 0767
|
||||
if o.translatedLink && runtime.GOOS == "linux" {
|
||||
expectedMode = 0777 // perms of symlinks always read as 0777 on linux
|
||||
}
|
||||
assert.Equal(t, expectedMode, mode&0777, fmt.Sprintf("mode wrong - expecting 0%o got 0%o", expectedMode, mode&0777))
|
||||
}
|
||||
|
||||
checkTime(m, "mtime", newMtime)
|
||||
@@ -390,7 +448,7 @@ func TestMetadata(t *testing.T) {
|
||||
if haveSetBTime {
|
||||
checkTime(m, "btime", newBtime)
|
||||
}
|
||||
if xattrSupported {
|
||||
if xattrSupported && (canSetXattrOnLinks || !o.translatedLink) {
|
||||
assert.Equal(t, "wedges", m["potato"])
|
||||
}
|
||||
})
|
||||
|
||||
@@ -105,7 +105,11 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
||||
}
|
||||
if haveSetBTime {
|
||||
if btimeOK {
|
||||
err = setBTime(o.path, btime)
|
||||
if o.translatedLink {
|
||||
err = lsetBTime(o.path, btime)
|
||||
} else {
|
||||
err = setBTime(o.path, btime)
|
||||
}
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to set birth (creation) time: %w", err)
|
||||
}
|
||||
@@ -121,7 +125,11 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
||||
if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
|
||||
fs.Debugf(o, "Ignoring request to set ownership %o.%o on this OS", gid, uid)
|
||||
} else {
|
||||
err = os.Chown(o.path, uid, gid)
|
||||
if o.translatedLink {
|
||||
err = os.Lchown(o.path, uid, gid)
|
||||
} else {
|
||||
err = os.Chown(o.path, uid, gid)
|
||||
}
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to change ownership: %w", err)
|
||||
}
|
||||
@@ -132,7 +140,16 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
||||
if mode >= 0 {
|
||||
umode := uint(mode)
|
||||
if umode <= math.MaxUint32 {
|
||||
err = os.Chmod(o.path, os.FileMode(umode))
|
||||
if o.translatedLink {
|
||||
if haveLChmod {
|
||||
err = lChmod(o.path, os.FileMode(umode))
|
||||
} else {
|
||||
fs.Debugf(o, "Unable to set mode %v on a symlink on this OS", os.FileMode(umode))
|
||||
err = nil
|
||||
}
|
||||
} else {
|
||||
err = os.Chmod(o.path, os.FileMode(umode))
|
||||
}
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to change permissions: %w", err)
|
||||
}
|
||||
|
||||
@@ -13,3 +13,9 @@ func setBTime(name string, btime time.Time) error {
|
||||
// Does nothing
|
||||
return nil
|
||||
}
|
||||
|
||||
// lsetBTime changes the birth time of the link passed in
|
||||
func lsetBTime(name string, btime time.Time) error {
|
||||
// Does nothing
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,15 +9,20 @@ import (
|
||||
|
||||
const haveSetBTime = true
|
||||
|
||||
// setBTime sets the birth time of the file passed in
|
||||
func setBTime(name string, btime time.Time) (err error) {
|
||||
// setTimes sets any of atime, mtime or btime
|
||||
// if link is set it sets a link rather than the target
|
||||
func setTimes(name string, atime, mtime, btime time.Time, link bool) (err error) {
|
||||
pathp, err := syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fileFlag := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS)
|
||||
if link {
|
||||
fileFlag |= syscall.FILE_FLAG_OPEN_REPARSE_POINT
|
||||
}
|
||||
h, err := syscall.CreateFile(pathp,
|
||||
syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil,
|
||||
syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
|
||||
syscall.OPEN_EXISTING, fileFlag, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -27,6 +32,28 @@ func setBTime(name string, btime time.Time) (err error) {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
bFileTime := syscall.NsecToFiletime(btime.UnixNano())
|
||||
return syscall.SetFileTime(h, &bFileTime, nil, nil)
|
||||
var patime, pmtime, pbtime *syscall.Filetime
|
||||
if !atime.IsZero() {
|
||||
t := syscall.NsecToFiletime(atime.UnixNano())
|
||||
patime = &t
|
||||
}
|
||||
if !mtime.IsZero() {
|
||||
t := syscall.NsecToFiletime(mtime.UnixNano())
|
||||
pmtime = &t
|
||||
}
|
||||
if !btime.IsZero() {
|
||||
t := syscall.NsecToFiletime(btime.UnixNano())
|
||||
pbtime = &t
|
||||
}
|
||||
return syscall.SetFileTime(h, pbtime, patime, pmtime)
|
||||
}
|
||||
|
||||
// setBTime sets the birth time of the file passed in
|
||||
func setBTime(name string, btime time.Time) (err error) {
|
||||
return setTimes(name, time.Time{}, time.Time{}, btime, false)
|
||||
}
|
||||
|
||||
// lsetBTime changes the birth time of the link passed in
|
||||
func lsetBTime(name string, btime time.Time) error {
|
||||
return setTimes(name, time.Time{}, time.Time{}, btime, true)
|
||||
}
|
||||
|
||||
@@ -68,14 +68,12 @@ var (
|
||||
)
|
||||
|
||||
// Description of how to authorize
|
||||
var oauthConfig = &oauth2.Config{
|
||||
var oauthConfig = &oauthutil.Config{
|
||||
ClientID: api.OAuthClientID,
|
||||
ClientSecret: "",
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: api.OAuthURL,
|
||||
TokenURL: api.OAuthURL,
|
||||
AuthStyle: oauth2.AuthStyleInParams,
|
||||
},
|
||||
AuthURL: api.OAuthURL,
|
||||
TokenURL: api.OAuthURL,
|
||||
AuthStyle: oauth2.AuthStyleInParams,
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
@@ -438,7 +436,9 @@ func (f *Fs) authorize(ctx context.Context, force bool) (err error) {
|
||||
if err != nil || !tokenIsValid(t) {
|
||||
fs.Infof(f, "Valid token not found, authorizing.")
|
||||
ctx := oauthutil.Context(ctx, f.cli)
|
||||
t, err = oauthConfig.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
|
||||
|
||||
oauth2Conf := oauthConfig.MakeOauth2Config()
|
||||
t, err = oauth2Conf.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
|
||||
}
|
||||
if err == nil && !tokenIsValid(t) {
|
||||
err = errors.New("invalid token")
|
||||
|
||||
@@ -202,9 +202,14 @@ type SharingLinkType struct {
|
||||
type LinkType string
|
||||
|
||||
const (
|
||||
ViewLinkType LinkType = "view" // ViewLinkType (role: read) A view-only sharing link, allowing read-only access.
|
||||
EditLinkType LinkType = "edit" // EditLinkType (role: write) An edit sharing link, allowing read-write access.
|
||||
EmbedLinkType LinkType = "embed" // EmbedLinkType (role: read) A view-only sharing link that can be used to embed content into a host webpage. Embed links are not available for OneDrive for Business or SharePoint.
|
||||
// ViewLinkType (role: read) A view-only sharing link, allowing read-only access.
|
||||
ViewLinkType LinkType = "view"
|
||||
// EditLinkType (role: write) An edit sharing link, allowing read-write access.
|
||||
EditLinkType LinkType = "edit"
|
||||
// EmbedLinkType (role: read) A view-only sharing link that can be used to embed
|
||||
// content into a host webpage. Embed links are not available for OneDrive for
|
||||
// Business or SharePoint.
|
||||
EmbedLinkType LinkType = "embed"
|
||||
)
|
||||
|
||||
// LinkScope represents the scope of the link represented by this permission.
|
||||
@@ -212,9 +217,12 @@ const (
|
||||
type LinkScope string
|
||||
|
||||
const (
|
||||
AnonymousScope LinkScope = "anonymous" // AnonymousScope = Anyone with the link has access, without needing to sign in. This may include people outside of your organization.
|
||||
OrganizationScope LinkScope = "organization" // OrganizationScope = Anyone signed into your organization (tenant) can use the link to get access. Only available in OneDrive for Business and SharePoint.
|
||||
|
||||
// AnonymousScope = Anyone with the link has access, without needing to sign in.
|
||||
// This may include people outside of your organization.
|
||||
AnonymousScope LinkScope = "anonymous"
|
||||
// OrganizationScope = Anyone signed into your organization (tenant) can use the
|
||||
// link to get access. Only available in OneDrive for Business and SharePoint.
|
||||
OrganizationScope LinkScope = "organization"
|
||||
)
|
||||
|
||||
// PermissionsType provides information about a sharing permission granted for a DriveItem resource.
|
||||
@@ -236,10 +244,14 @@ type PermissionsType struct {
|
||||
type Role string
|
||||
|
||||
const (
|
||||
ReadRole Role = "read" // ReadRole provides the ability to read the metadata and contents of the item.
|
||||
WriteRole Role = "write" // WriteRole provides the ability to read and modify the metadata and contents of the item.
|
||||
OwnerRole Role = "owner" // OwnerRole represents the owner role for SharePoint and OneDrive for Business.
|
||||
MemberRole Role = "member" // MemberRole represents the member role for SharePoint and OneDrive for Business.
|
||||
// ReadRole provides the ability to read the metadata and contents of the item.
|
||||
ReadRole Role = "read"
|
||||
// WriteRole provides the ability to read and modify the metadata and contents of the item.
|
||||
WriteRole Role = "write"
|
||||
// OwnerRole represents the owner role for SharePoint and OneDrive for Business.
|
||||
OwnerRole Role = "owner"
|
||||
// MemberRole represents the member role for SharePoint and OneDrive for Business.
|
||||
MemberRole Role = "member"
|
||||
)
|
||||
|
||||
// PermissionsResponse is the response to the list permissions method
|
||||
|
||||
@@ -40,7 +40,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -65,14 +64,21 @@ const (
|
||||
|
||||
// Globals
|
||||
var (
|
||||
authPath = "/common/oauth2/v2.0/authorize"
|
||||
tokenPath = "/common/oauth2/v2.0/token"
|
||||
|
||||
// Define the paths used for token operations
|
||||
commonPathPrefix = "/common" // prefix for the paths if tenant isn't known
|
||||
authPath = "/oauth2/v2.0/authorize"
|
||||
tokenPath = "/oauth2/v2.0/token"
|
||||
|
||||
scopeAccess = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "Sites.Read.All", "offline_access"}
|
||||
scopeAccessWithoutSites = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"}
|
||||
|
||||
// Description of how to auth for this app for a business account
|
||||
oauthConfig = &oauth2.Config{
|
||||
// When using client credential OAuth flow, scope of .default is required in order
|
||||
// to use the permissions configured for the application within the tenant
|
||||
scopeAccessClientCred = fs.SpaceSepList{".default"}
|
||||
|
||||
// Base config for how to auth
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: scopeAccess,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
@@ -183,6 +189,14 @@ Choose or manually enter a custom space separated list with all scopes, that rcl
|
||||
Help: "Read and write access to all resources, without the ability to browse SharePoint sites. \nSame as if disable_site_permission was set to true",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "tenant",
|
||||
Help: `ID of the service principal's tenant. Also called its directory ID.
|
||||
|
||||
Set this if using
|
||||
- Client Credential flow
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "disable_site_permission",
|
||||
Help: `Disable the request for Sites.Read.All permission.
|
||||
@@ -527,28 +541,54 @@ func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest
|
||||
})
|
||||
}
|
||||
|
||||
// Config the backend
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
region, graphURL := getRegionURL(m)
|
||||
// Make the oauth config for the backend
|
||||
func makeOauthConfig(ctx context.Context, opt *Options) (*oauthutil.Config, error) {
|
||||
// Copy the default oauthConfig
|
||||
oauthConfig := *oauthConfig
|
||||
|
||||
if config.State == "" {
|
||||
var accessScopes fs.SpaceSepList
|
||||
accessScopesString, _ := m.Get("access_scopes")
|
||||
err := accessScopes.Set(accessScopesString)
|
||||
// Set the scopes
|
||||
oauthConfig.Scopes = opt.AccessScopes
|
||||
if opt.DisableSitePermission {
|
||||
oauthConfig.Scopes = scopeAccessWithoutSites
|
||||
}
|
||||
|
||||
// Construct the auth URLs
|
||||
prefix := commonPathPrefix
|
||||
if opt.Tenant != "" {
|
||||
prefix = "/" + opt.Tenant
|
||||
}
|
||||
oauthConfig.TokenURL = authEndpoint[opt.Region] + prefix + tokenPath
|
||||
oauthConfig.AuthURL = authEndpoint[opt.Region] + prefix + authPath
|
||||
|
||||
// Check to see if we are using client credentials flow
|
||||
if opt.ClientCredentials {
|
||||
// Override scope to .default
|
||||
oauthConfig.Scopes = scopeAccessClientCred
|
||||
if opt.Tenant == "" {
|
||||
return nil, fmt.Errorf("tenant parameter must be set when using %s", config.ConfigClientCredentials)
|
||||
}
|
||||
}
|
||||
|
||||
return &oauthConfig, nil
|
||||
}
|
||||
|
||||
// Config the backend
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, graphURL := getRegionURL(m)
|
||||
|
||||
// Check to see if this is the start of the state machine execution
|
||||
if conf.State == "" {
|
||||
conf, err := makeOauthConfig(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse access_scopes: %w", err)
|
||||
}
|
||||
oauthConfig.Scopes = []string(accessScopes)
|
||||
disableSitePermission, _ := m.Get("disable_site_permission")
|
||||
if disableSitePermission == "true" {
|
||||
oauthConfig.Scopes = scopeAccessWithoutSites
|
||||
}
|
||||
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||
AuthURL: authEndpoint[region] + authPath,
|
||||
TokenURL: authEndpoint[region] + tokenPath,
|
||||
return nil, err
|
||||
}
|
||||
return oauthutil.ConfigOut("choose_type", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
OAuth2Config: conf,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -556,9 +596,11 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure OneDrive: %w", err)
|
||||
}
|
||||
|
||||
// Create a REST client, build on the OAuth client created above
|
||||
srv := rest.NewClient(oAuthClient)
|
||||
|
||||
switch config.State {
|
||||
switch conf.State {
|
||||
case "choose_type":
|
||||
return fs.ConfigChooseExclusiveFixed("choose_type_done", "config_type", "Type of connection", []fs.OptionExample{{
|
||||
Value: "onedrive",
|
||||
@@ -584,7 +626,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
}})
|
||||
case "choose_type_done":
|
||||
// Jump to next state according to config chosen
|
||||
return fs.ConfigGoto(config.Result)
|
||||
return fs.ConfigGoto(conf.Result)
|
||||
case "onedrive":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
opts: rest.Opts{
|
||||
@@ -602,16 +644,22 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
},
|
||||
})
|
||||
case "driveid":
|
||||
return fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID")
|
||||
out, err := fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID")
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
// Default the drive_id to the previous version in the config
|
||||
out.Option.Default, _ = m.Get("drive_id")
|
||||
return out, nil
|
||||
case "driveid_end":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
finalDriveID: config.Result,
|
||||
finalDriveID: conf.Result,
|
||||
})
|
||||
case "siteid":
|
||||
return fs.ConfigInput("siteid_end", "config_siteid", "Site ID")
|
||||
case "siteid_end":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
siteID: config.Result,
|
||||
siteID: conf.Result,
|
||||
})
|
||||
case "url":
|
||||
return fs.ConfigInput("url_end", "config_site_url", `Site URL
|
||||
@@ -622,7 +670,7 @@ Examples:
|
||||
- "https://XXX.sharepoint.com/teams/ID"
|
||||
`)
|
||||
case "url_end":
|
||||
siteURL := config.Result
|
||||
siteURL := conf.Result
|
||||
re := regexp.MustCompile(`https://.*\.sharepoint\.com(/.*)`)
|
||||
match := re.FindStringSubmatch(siteURL)
|
||||
if len(match) == 2 {
|
||||
@@ -637,12 +685,12 @@ Examples:
|
||||
return fs.ConfigInput("path_end", "config_sharepoint_url", `Server-relative URL`)
|
||||
case "path_end":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
relativePath: config.Result,
|
||||
relativePath: conf.Result,
|
||||
})
|
||||
case "search":
|
||||
return fs.ConfigInput("search_end", "config_search_term", `Search term`)
|
||||
case "search_end":
|
||||
searchTerm := config.Result
|
||||
searchTerm := conf.Result
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
@@ -664,10 +712,10 @@ Examples:
|
||||
})
|
||||
case "search_sites":
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
siteID: config.Result,
|
||||
siteID: conf.Result,
|
||||
})
|
||||
case "driveid_final":
|
||||
finalDriveID := config.Result
|
||||
finalDriveID := conf.Result
|
||||
|
||||
// Test the driveID and get drive type
|
||||
opts := rest.Opts{
|
||||
@@ -686,12 +734,12 @@ Examples:
|
||||
|
||||
return fs.ConfigConfirm("driveid_final_end", true, "config_drive_ok", fmt.Sprintf("Drive OK?\n\nFound drive %q of type %q\nURL: %s\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL))
|
||||
case "driveid_final_end":
|
||||
if config.Result == "true" {
|
||||
if conf.Result == "true" {
|
||||
return nil, nil
|
||||
}
|
||||
return fs.ConfigGoto("choose_type")
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
return nil, fmt.Errorf("unknown state %q", conf.State)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
@@ -702,7 +750,9 @@ type Options struct {
|
||||
DriveType string `config:"drive_type"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
DisableSitePermission bool `config:"disable_site_permission"`
|
||||
ClientCredentials bool `config:"client_credentials"`
|
||||
AccessScopes fs.SpaceSepList `config:"access_scopes"`
|
||||
Tenant string `config:"tenant"`
|
||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
@@ -990,13 +1040,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
rootURL := graphAPIEndpoint[opt.Region] + "/v1.0" + "/drives/" + opt.DriveID
|
||||
oauthConfig.Scopes = opt.AccessScopes
|
||||
if opt.DisableSitePermission {
|
||||
oauthConfig.Scopes = scopeAccessWithoutSites
|
||||
}
|
||||
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||
AuthURL: authEndpoint[opt.Region] + authPath,
|
||||
TokenURL: authEndpoint[opt.Region] + tokenPath,
|
||||
|
||||
oauthConfig, err := makeOauthConfig(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
@@ -1609,7 +1656,7 @@ func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
@@ -1624,11 +1671,18 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
err := srcObj.readMetaData(ctx)
|
||||
err = srcObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Find and remove existing object
|
||||
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cleanup(&err)
|
||||
|
||||
// Check we aren't overwriting a file on the same remote
|
||||
if srcObj.fs == f {
|
||||
srcPath := srcObj.rootPath()
|
||||
@@ -2556,8 +2610,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return errors.New("can't upload content to a OneNote file")
|
||||
}
|
||||
|
||||
o.fs.tokenRenewer.Start()
|
||||
defer o.fs.tokenRenewer.Stop()
|
||||
// Only start the renewer if we have a valid one
|
||||
if o.fs.tokenRenewer != nil {
|
||||
o.fs.tokenRenewer.Start()
|
||||
defer o.fs.tokenRenewer.Stop()
|
||||
}
|
||||
|
||||
size := src.Size()
|
||||
|
||||
|
||||
@@ -215,11 +215,11 @@ func (f *Fs) TestDirectoryMetadata(t *testing.T, r *fstest.Run) {
|
||||
compareDirMeta(expectedMeta, actualMeta, false)
|
||||
|
||||
// modtime
|
||||
assert.Equal(t, t1.Truncate(f.Precision()), newDst.ModTime(ctx))
|
||||
fstest.AssertTimeEqualWithPrecision(t, newDst.Remote(), t1, newDst.ModTime(ctx), f.Precision())
|
||||
// try changing it and re-check it
|
||||
newDst, err = operations.SetDirModTime(ctx, f, newDst, "", t2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, t2.Truncate(f.Precision()), newDst.ModTime(ctx))
|
||||
fstest.AssertTimeEqualWithPrecision(t, newDst.Remote(), t2, newDst.ModTime(ctx), f.Precision())
|
||||
// ensure that f.DirSetModTime also works
|
||||
err = f.DirSetModTime(ctx, "subdir", t3)
|
||||
assert.NoError(t, err)
|
||||
@@ -227,7 +227,7 @@ func (f *Fs) TestDirectoryMetadata(t *testing.T, r *fstest.Run) {
|
||||
assert.NoError(t, err)
|
||||
entries.ForDir(func(dir fs.Directory) {
|
||||
if dir.Remote() == "subdir" {
|
||||
assert.True(t, t3.Truncate(f.Precision()).Equal(dir.ModTime(ctx)), fmt.Sprintf("got %v", dir.ModTime(ctx)))
|
||||
fstest.AssertTimeEqualWithPrecision(t, dir.Remote(), t3, dir.ModTime(ctx), f.Precision())
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
@@ -183,6 +184,9 @@ func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, rea
|
||||
if ossPartNumber <= 8 {
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
}
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// retry all chunks once have done the first few
|
||||
return true, err
|
||||
}
|
||||
|
||||
@@ -106,9 +106,9 @@ func newOptions() []fs.Option {
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "compartment",
|
||||
Help: "Object storage compartment OCID",
|
||||
Help: "Specify compartment OCID, if you need to list buckets.\n\nList objects works without compartment OCID.",
|
||||
Provider: "!no_auth",
|
||||
Required: true,
|
||||
Required: false,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "region",
|
||||
|
||||
@@ -48,12 +48,10 @@ const (
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: nil,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://my.pcloud.com/oauth2/authorize",
|
||||
// TokenURL: "https://api.pcloud.com/oauth2_token", set by updateTokenURL
|
||||
},
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: nil,
|
||||
AuthURL: "https://my.pcloud.com/oauth2/authorize",
|
||||
// TokenURL: "https://api.pcloud.com/oauth2_token", set by updateTokenURL
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
@@ -61,8 +59,8 @@ var (
|
||||
)
|
||||
|
||||
// Update the TokenURL with the actual hostname
|
||||
func updateTokenURL(oauthConfig *oauth2.Config, hostname string) {
|
||||
oauthConfig.Endpoint.TokenURL = "https://" + hostname + "/oauth2_token"
|
||||
func updateTokenURL(oauthConfig *oauthutil.Config, hostname string) {
|
||||
oauthConfig.TokenURL = "https://" + hostname + "/oauth2_token"
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
@@ -79,7 +77,7 @@ func init() {
|
||||
fs.Errorf(nil, "Failed to read config: %v", err)
|
||||
}
|
||||
updateTokenURL(oauthConfig, optc.Hostname)
|
||||
checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
|
||||
checkAuth := func(oauthConfig *oauthutil.Config, auth *oauthutil.AuthResult) error {
|
||||
if auth == nil || auth.Form == nil {
|
||||
return errors.New("form not found in response")
|
||||
}
|
||||
@@ -399,14 +397,15 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open file: %w", err)
|
||||
}
|
||||
if _, err := fileClose(ctx, client, f.pacer, openResult.FileDescriptor); err != nil {
|
||||
return nil, fmt.Errorf("close file: %w", err)
|
||||
}
|
||||
|
||||
writer := &writerAt{
|
||||
ctx: ctx,
|
||||
client: client,
|
||||
fs: f,
|
||||
size: size,
|
||||
remote: remote,
|
||||
fd: openResult.FileDescriptor,
|
||||
fileID: openResult.Fileid,
|
||||
}
|
||||
|
||||
|
||||
@@ -18,21 +18,14 @@ import (
|
||||
// writerAt implements fs.WriterAtCloser, adding the OpenWrtierAt feature to pcloud.
|
||||
type writerAt struct {
|
||||
ctx context.Context
|
||||
client *rest.Client
|
||||
fs *Fs
|
||||
size int64
|
||||
remote string
|
||||
fd int64
|
||||
fileID int64
|
||||
}
|
||||
|
||||
// Close implements WriterAt.Close.
|
||||
func (c *writerAt) Close() error {
|
||||
// close fd
|
||||
if _, err := c.fileClose(c.ctx); err != nil {
|
||||
return fmt.Errorf("close fd: %w", err)
|
||||
}
|
||||
|
||||
// Avoiding race conditions: Depending on the tcp connection, there might be
|
||||
// caching issues when checking the size immediately after write.
|
||||
// Hence we try avoiding them by checking the resulting size on a different connection.
|
||||
@@ -72,8 +65,18 @@ func (c *writerAt) WriteAt(buffer []byte, offset int64) (n int, err error) {
|
||||
inSHA1Bytes := sha1.Sum(buffer)
|
||||
inSHA1 := hex.EncodeToString(inSHA1Bytes[:])
|
||||
|
||||
client, err := c.fs.newSingleConnClient(c.ctx)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("create client: %w", err)
|
||||
}
|
||||
|
||||
openResult, err := fileOpen(c.ctx, client, c.fs, c.fileID)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("open file: %w", err)
|
||||
}
|
||||
|
||||
// get target hash
|
||||
outChecksum, err := c.fileChecksum(c.ctx, offset, int64(contentLength))
|
||||
outChecksum, err := fileChecksum(c.ctx, client, c.fs.pacer, openResult.FileDescriptor, offset, int64(contentLength))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -89,10 +92,15 @@ func (c *writerAt) WriteAt(buffer []byte, offset int64) (n int, err error) {
|
||||
}
|
||||
|
||||
// upload buffer with offset if necessary
|
||||
if _, err := c.filePWrite(c.ctx, offset, buffer); err != nil {
|
||||
if _, err := filePWrite(c.ctx, client, c.fs.pacer, openResult.FileDescriptor, offset, buffer); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// close fd
|
||||
if _, err := fileClose(c.ctx, client, c.fs.pacer, openResult.FileDescriptor); err != nil {
|
||||
return contentLength, fmt.Errorf("close fd: %w", err)
|
||||
}
|
||||
|
||||
return contentLength, nil
|
||||
}
|
||||
|
||||
@@ -125,11 +133,40 @@ func fileOpenNew(ctx context.Context, c *rest.Client, srcFs *Fs, directoryID, fi
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_open using fileid with O_WRITE flags, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_open.html
|
||||
func fileOpen(ctx context.Context, c *rest.Client, srcFs *Fs, fileID int64) (*api.FileOpenResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/file_open",
|
||||
Parameters: url.Values{},
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
ExtraHeaders: map[string]string{
|
||||
"Connection": "keep-alive",
|
||||
},
|
||||
}
|
||||
opts.Parameters.Set("fileid", strconv.FormatInt(fileID, 10))
|
||||
opts.Parameters.Set("flags", "0x0002") // O_WRITE
|
||||
|
||||
result := &api.FileOpenResponse{}
|
||||
err := srcFs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open new file descriptor: %w", err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_checksum, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_checksum.html
|
||||
func (c *writerAt) fileChecksum(
|
||||
func fileChecksum(
|
||||
ctx context.Context,
|
||||
offset, count int64,
|
||||
client *rest.Client,
|
||||
pacer *fs.Pacer,
|
||||
fd, offset, count int64,
|
||||
) (*api.FileChecksumResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
@@ -140,26 +177,29 @@ func (c *writerAt) fileChecksum(
|
||||
"Connection": "keep-alive",
|
||||
},
|
||||
}
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
|
||||
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
|
||||
opts.Parameters.Set("count", strconv.FormatInt(count, 10))
|
||||
|
||||
result := &api.FileChecksumResponse{}
|
||||
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||
err := pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := client.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("checksum of fd %d with offset %d and size %d: %w", c.fd, offset, count, err)
|
||||
return nil, fmt.Errorf("checksum of fd %d with offset %d and size %d: %w", fd, offset, count, err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_pwrite, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_pwrite.html
|
||||
func (c *writerAt) filePWrite(
|
||||
func filePWrite(
|
||||
ctx context.Context,
|
||||
client *rest.Client,
|
||||
pacer *fs.Pacer,
|
||||
fd int64,
|
||||
offset int64,
|
||||
buf []byte,
|
||||
) (*api.FilePWriteResponse, error) {
|
||||
@@ -176,24 +216,29 @@ func (c *writerAt) filePWrite(
|
||||
"Connection": "keep-alive",
|
||||
},
|
||||
}
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
|
||||
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
|
||||
|
||||
result := &api.FilePWriteResponse{}
|
||||
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||
err := pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := client.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("write %d bytes to fd %d with offset %d: %w", contentLength, c.fd, offset, err)
|
||||
return nil, fmt.Errorf("write %d bytes to fd %d with offset %d: %w", contentLength, fd, offset, err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Call pcloud file_close, see [API Doc.]
|
||||
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_close.html
|
||||
func (c *writerAt) fileClose(ctx context.Context) (*api.FileCloseResponse, error) {
|
||||
func fileClose(
|
||||
ctx context.Context,
|
||||
client *rest.Client,
|
||||
pacer *fs.Pacer,
|
||||
fd int64,
|
||||
) (*api.FileCloseResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/file_close",
|
||||
@@ -201,11 +246,11 @@ func (c *writerAt) fileClose(ctx context.Context) (*api.FileCloseResponse, error
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
Close: true,
|
||||
}
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
|
||||
|
||||
result := &api.FileCloseResponse{}
|
||||
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||
err := pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := client.CallJSON(ctx, &opts, nil, result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
@@ -82,13 +82,11 @@ const (
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: nil,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://user.mypikpak.com/v1/auth/signin",
|
||||
TokenURL: "https://user.mypikpak.com/v1/auth/token",
|
||||
AuthStyle: oauth2.AuthStyleInParams,
|
||||
},
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: nil,
|
||||
AuthURL: "https://user.mypikpak.com/v1/auth/signin",
|
||||
TokenURL: "https://user.mypikpak.com/v1/auth/token",
|
||||
AuthStyle: oauth2.AuthStyleInParams,
|
||||
ClientID: clientID,
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
@@ -215,6 +213,11 @@ Fill in for rclone to use a non root folder as its starting point.
|
||||
Default: false,
|
||||
Help: "Only show files that are in the trash.\n\nThis will show trashed files in their original directory structure.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_media_link",
|
||||
Default: false,
|
||||
Help: "Use original file links instead of media links.\n\nThis avoids issues caused by invalid media links, but may reduce download speeds.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hash_memory_limit",
|
||||
Help: "Files bigger than this will be cached on disk to calculate hash if required.",
|
||||
@@ -288,6 +291,7 @@ type Options struct {
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
UseTrash bool `config:"use_trash"`
|
||||
TrashedOnly bool `config:"trashed_only"`
|
||||
NoMediaLink bool `config:"no_media_link"`
|
||||
HashMemoryThreshold fs.SizeSuffix `config:"hash_memory_limit"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
@@ -561,6 +565,7 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
||||
if strings.Contains(err.Error(), "invalid_grant") {
|
||||
return f, f.reAuthorize(ctx)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return f, nil
|
||||
@@ -1576,15 +1581,14 @@ func (o *Object) setMetaData(info *api.File) (err error) {
|
||||
o.md5sum = info.Md5Checksum
|
||||
if info.Links.ApplicationOctetStream != nil {
|
||||
o.link = info.Links.ApplicationOctetStream
|
||||
if fid := parseFileID(o.link.URL); fid != "" {
|
||||
for mid, media := range info.Medias {
|
||||
if media.Link == nil {
|
||||
continue
|
||||
}
|
||||
if mfid := parseFileID(media.Link.URL); fid == mfid {
|
||||
fs.Debugf(o, "Using a media link from Medias[%d]", mid)
|
||||
o.link = media.Link
|
||||
break
|
||||
if !o.fs.opt.NoMediaLink {
|
||||
if fid := parseFileID(o.link.URL); fid != "" {
|
||||
for _, media := range info.Medias {
|
||||
if media.Link != nil && parseFileID(media.Link.URL) == fid {
|
||||
fs.Debugf(o, "Using a media link")
|
||||
o.link = media.Link
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,7 +43,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -59,12 +58,10 @@ const (
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: nil,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://www.premiumize.me/authorize",
|
||||
TokenURL: "https://www.premiumize.me/token",
|
||||
},
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: nil,
|
||||
AuthURL: "https://www.premiumize.me/authorize",
|
||||
TokenURL: "https://www.premiumize.me/token",
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
|
||||
@@ -572,6 +572,17 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We have successfully copied the file to random name
|
||||
// Check to see if file already exists first and delete it if so
|
||||
existingObj, err := f.NewObject(ctx, remote)
|
||||
if err == nil {
|
||||
err = existingObj.Remove(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("server side copy: failed to remove existing file: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
params := url.Values{}
|
||||
params.Set("file_id", strconv.FormatInt(resp.File.ID, 10))
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
/*
|
||||
@@ -41,12 +40,10 @@ const (
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
putioConfig = &oauth2.Config{
|
||||
Scopes: []string{},
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://api.put.io/v2/oauth2/authenticate",
|
||||
TokenURL: "https://api.put.io/v2/oauth2/access_token",
|
||||
},
|
||||
putioConfig = &oauthutil.Config{
|
||||
Scopes: []string{},
|
||||
AuthURL: "https://api.put.io/v2/oauth2/authenticate",
|
||||
TokenURL: "https://api.put.io/v2/oauth2/access_token",
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneObscuredClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
|
||||
@@ -2056,7 +2056,7 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Help: "One Zone Infrequent Access storage class",
|
||||
}, {
|
||||
Value: "GLACIER",
|
||||
Help: "Glacier storage class",
|
||||
Help: "Glacier Flexible Retrieval storage class",
|
||||
}, {
|
||||
Value: "DEEP_ARCHIVE",
|
||||
Help: "Glacier Deep Archive storage class",
|
||||
@@ -5866,6 +5866,25 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
// middleware to stop the SDK adding `Accept-Encoding: identity`
|
||||
func removeDisableGzip() func(*middleware.Stack) error {
|
||||
return func(stack *middleware.Stack) error {
|
||||
_, err := stack.Finalize.Remove("DisableAcceptEncodingGzip")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// middleware to set Accept-Encoding to how we want it
|
||||
//
|
||||
// This make sure we download compressed files as-is from all platforms
|
||||
func (f *Fs) acceptEncoding() (APIOptions []func(*middleware.Stack) error) {
|
||||
APIOptions = append(APIOptions, removeDisableGzip())
|
||||
if f.opt.UseAcceptEncodingGzip.Value {
|
||||
APIOptions = append(APIOptions, smithyhttp.AddHeaderValue("Accept-Encoding", "gzip"))
|
||||
}
|
||||
return APIOptions
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
@@ -5899,11 +5918,8 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
var APIOptions []func(*middleware.Stack) error
|
||||
|
||||
// Override the automatic decompression in the transport to
|
||||
// download compressed files as-is
|
||||
if o.fs.opt.UseAcceptEncodingGzip.Value {
|
||||
APIOptions = append(APIOptions, smithyhttp.AddHeaderValue("Accept-Encoding", "gzip"))
|
||||
}
|
||||
// Set the SDK to always download compressed files as-is
|
||||
APIOptions = append(APIOptions, o.fs.acceptEncoding()...)
|
||||
|
||||
for _, option := range options {
|
||||
switch option.(type) {
|
||||
@@ -6054,8 +6070,8 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
chunkSize: int64(chunkSize),
|
||||
size: size,
|
||||
f: f,
|
||||
bucket: mOut.Bucket,
|
||||
key: mOut.Key,
|
||||
bucket: ui.req.Bucket,
|
||||
key: ui.req.Key,
|
||||
uploadID: mOut.UploadId,
|
||||
multiPartUploadInput: &mReq,
|
||||
completedParts: make([]types.CompletedPart, 0),
|
||||
@@ -6159,6 +6175,9 @@ func (w *s3ChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
||||
if chunkNumber <= 8 {
|
||||
return w.f.shouldRetry(ctx, err)
|
||||
}
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// retry all chunks once have done the first few
|
||||
return true, err
|
||||
}
|
||||
|
||||
@@ -23,14 +23,20 @@ func SetupS3Test(t *testing.T) (context.Context, *Options, *http.Client) {
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
opt := &fstests.Opt{
|
||||
RemoteName: "TestS3:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
||||
TiersToTest: []string{"STANDARD"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
// Test wider range of tiers on AWS
|
||||
if *fstest.RemoteName == "" || *fstest.RemoteName == "TestS3:" {
|
||||
opt.TiersToTest = []string{"STANDARD", "STANDARD_IA"}
|
||||
}
|
||||
fstests.Run(t, opt)
|
||||
|
||||
}
|
||||
|
||||
func TestIntegration2(t *testing.T) {
|
||||
|
||||
@@ -99,6 +99,11 @@ Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
|
||||
in the new OpenSSH format can't be used.`,
|
||||
IsPassword: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "pubkey",
|
||||
Help: `SSH public certificate for public certificate based authentication.
|
||||
Set this if you have a signed certificate you want to use for authentication.
|
||||
If specified will override pubkey_file.`,
|
||||
}, {
|
||||
Name: "pubkey_file",
|
||||
Help: `Optional path to public key file.
|
||||
@@ -511,6 +516,7 @@ type Options struct {
|
||||
KeyPem string `config:"key_pem"`
|
||||
KeyFile string `config:"key_file"`
|
||||
KeyFilePass string `config:"key_file_pass"`
|
||||
PubKey string `config:"pubkey"`
|
||||
PubKeyFile string `config:"pubkey_file"`
|
||||
KnownHostsFile string `config:"known_hosts_file"`
|
||||
KeyUseAgent bool `config:"key_use_agent"`
|
||||
@@ -997,13 +1003,21 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
// If a public key has been specified then use that
|
||||
if pubkeyFile != "" {
|
||||
certfile, err := os.ReadFile(pubkeyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read cert file: %w", err)
|
||||
if pubkeyFile != "" || opt.PubKey != "" {
|
||||
pubKeyRaw := []byte(opt.PubKey)
|
||||
// Use this error if public key is provided inline and is not a certificate
|
||||
// if public key file is provided instead, use the err in the if block
|
||||
notACertError := errors.New("public key provided is not a certificate: " + opt.PubKey)
|
||||
if opt.PubKey == "" {
|
||||
notACertError = errors.New("public key file is not a certificate file: " + pubkeyFile)
|
||||
err := error(nil)
|
||||
pubKeyRaw, err = os.ReadFile(pubkeyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read cert file: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
pk, _, _, _, err := ssh.ParseAuthorizedKey(certfile)
|
||||
pk, _, _, _, err := ssh.ParseAuthorizedKey(pubKeyRaw)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse cert file: %w", err)
|
||||
}
|
||||
@@ -1017,7 +1031,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// knows everything it needs.
|
||||
cert, ok := pk.(*ssh.Certificate)
|
||||
if !ok {
|
||||
return nil, errors.New("public key file is not a certificate file: " + pubkeyFile)
|
||||
return nil, notACertError
|
||||
}
|
||||
pubsigner, err := ssh.NewCertSigner(cert, signer)
|
||||
if err != nil {
|
||||
@@ -2087,10 +2101,10 @@ func (file *objectReader) Read(p []byte) (n int, err error) {
|
||||
|
||||
// Close a reader of a remote sftp file
|
||||
func (file *objectReader) Close() (err error) {
|
||||
// Close the sftpFile - this will likely cause the WriteTo to error
|
||||
err = file.sftpFile.Close()
|
||||
// Close the pipeReader so writes to the pipeWriter fail
|
||||
_ = file.pipeReader.Close()
|
||||
// Close the sftpFile - this will likely cause the WriteTo to error
|
||||
err = file.sftpFile.Close()
|
||||
// Wait for the background process to finish
|
||||
<-file.done
|
||||
// Show connection no longer in use
|
||||
|
||||
@@ -97,7 +97,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -115,13 +114,11 @@ const (
|
||||
)
|
||||
|
||||
// Generate a new oauth2 config which we will update when we know the TokenURL
|
||||
func newOauthConfig(tokenURL string) *oauth2.Config {
|
||||
return &oauth2.Config{
|
||||
Scopes: nil,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://secure.sharefile.com/oauth/authorize",
|
||||
TokenURL: tokenURL,
|
||||
},
|
||||
func newOauthConfig(tokenURL string) *oauthutil.Config {
|
||||
return &oauthutil.Config{
|
||||
Scopes: nil,
|
||||
AuthURL: "https://secure.sharefile.com/oauth/authorize",
|
||||
TokenURL: tokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectPublicSecureURL,
|
||||
@@ -136,7 +133,7 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
oauthConfig := newOauthConfig("")
|
||||
checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
|
||||
checkAuth := func(oauthConfig *oauthutil.Config, auth *oauthutil.AuthResult) error {
|
||||
if auth == nil || auth.Form == nil {
|
||||
return errors.New("endpoint not found in response")
|
||||
}
|
||||
@@ -147,7 +144,7 @@ func init() {
|
||||
}
|
||||
endpoint := "https://" + subdomain + "." + apicp
|
||||
m.Set("endpoint", endpoint)
|
||||
oauthConfig.Endpoint.TokenURL = endpoint + tokenPath
|
||||
oauthConfig.TokenURL = endpoint + tokenPath
|
||||
return nil
|
||||
}
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
|
||||
@@ -31,13 +31,29 @@ func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
|
||||
}
|
||||
}
|
||||
|
||||
d := &smb2.Dialer{
|
||||
Initiator: &smb2.NTLMInitiator{
|
||||
d := &smb2.Dialer{}
|
||||
if f.opt.UseKerberos {
|
||||
cl, err := getKerberosClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
spn := f.opt.SPN
|
||||
if spn == "" {
|
||||
spn = "cifs/" + f.opt.Host
|
||||
}
|
||||
|
||||
d.Initiator = &smb2.Krb5Initiator{
|
||||
Client: cl,
|
||||
TargetSPN: spn,
|
||||
}
|
||||
} else {
|
||||
d.Initiator = &smb2.NTLMInitiator{
|
||||
User: f.opt.User,
|
||||
Password: pass,
|
||||
Domain: f.opt.Domain,
|
||||
TargetSPN: f.opt.SPN,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
session, err := d.DialConn(ctx, tconn, addr)
|
||||
|
||||
78
backend/smb/kerberos.go
Normal file
78
backend/smb/kerberos.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package smb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/jcmturner/gokrb5/v8/client"
|
||||
"github.com/jcmturner/gokrb5/v8/config"
|
||||
"github.com/jcmturner/gokrb5/v8/credentials"
|
||||
)
|
||||
|
||||
var (
|
||||
kerberosClient *client.Client
|
||||
kerberosErr error
|
||||
kerberosOnce sync.Once
|
||||
)
|
||||
|
||||
// getKerberosClient returns a Kerberos client that can be used to authenticate.
|
||||
func getKerberosClient() (*client.Client, error) {
|
||||
if kerberosClient == nil || kerberosErr == nil {
|
||||
kerberosOnce.Do(func() {
|
||||
kerberosClient, kerberosErr = createKerberosClient()
|
||||
})
|
||||
}
|
||||
|
||||
return kerberosClient, kerberosErr
|
||||
}
|
||||
|
||||
// createKerberosClient creates a new Kerberos client.
|
||||
func createKerberosClient() (*client.Client, error) {
|
||||
cfgPath := os.Getenv("KRB5_CONFIG")
|
||||
if cfgPath == "" {
|
||||
cfgPath = "/etc/krb5.conf"
|
||||
}
|
||||
|
||||
cfg, err := config.Load(cfgPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine the ccache location from the environment, falling back to the
|
||||
// default location.
|
||||
ccachePath := os.Getenv("KRB5CCNAME")
|
||||
switch {
|
||||
case strings.Contains(ccachePath, ":"):
|
||||
parts := strings.SplitN(ccachePath, ":", 2)
|
||||
switch parts[0] {
|
||||
case "FILE":
|
||||
ccachePath = parts[1]
|
||||
case "DIR":
|
||||
primary, err := os.ReadFile(filepath.Join(parts[1], "primary"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ccachePath = filepath.Join(parts[1], strings.TrimSpace(string(primary)))
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported KRB5CCNAME: %s", ccachePath)
|
||||
}
|
||||
case ccachePath == "":
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ccachePath = "/tmp/krb5cc_" + u.Uid
|
||||
}
|
||||
|
||||
ccache, err := credentials.LoadCCache(ccachePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client.NewFromCCache(ccache, cfg)
|
||||
}
|
||||
@@ -76,6 +76,16 @@ authentication, and it often needs to be set for clusters. For example:
|
||||
Leave blank if not sure.
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "use_kerberos",
|
||||
Help: `Use Kerberos authentication.
|
||||
|
||||
If set, rclone will use Kerberos authentication instead of NTLM. This
|
||||
requires a valid Kerberos configuration and credentials cache to be
|
||||
available, either in the default locations or as specified by the
|
||||
KRB5_CONFIG and KRB5CCNAME environment variables.
|
||||
`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "idle_timeout",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
@@ -126,6 +136,7 @@ type Options struct {
|
||||
Pass string `config:"pass"`
|
||||
Domain string `config:"domain"`
|
||||
SPN string `config:"spn"`
|
||||
UseKerberos bool `config:"use_kerberos"`
|
||||
HideSpecial bool `config:"hide_special_share"`
|
||||
CaseInsensitive bool `config:"case_insensitive"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
@@ -601,9 +612,10 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) {
|
||||
}
|
||||
|
||||
fi, err := cn.smbShare.Stat(reqDir)
|
||||
if err == nil {
|
||||
o.statResult = fi
|
||||
if err != nil {
|
||||
return fmt.Errorf("SetModTime: stat: %w", err)
|
||||
}
|
||||
o.statResult = fi
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -685,7 +697,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
o.statResult, _ = cn.smbShare.Stat(filename)
|
||||
o.fs.putConnection(&cn)
|
||||
}()
|
||||
|
||||
@@ -723,7 +734,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return fmt.Errorf("Update Close failed: %w", err)
|
||||
}
|
||||
|
||||
// Set the modified time
|
||||
// Set the modified time and also o.statResult
|
||||
err = o.SetModTime(ctx, src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Update SetModTime failed: %w", err)
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
package smb_test
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/smb"
|
||||
@@ -15,3 +16,13 @@ func TestIntegration(t *testing.T) {
|
||||
NilObject: (*smb.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func TestIntegration2(t *testing.T) {
|
||||
krb5Dir := t.TempDir()
|
||||
t.Setenv("KRB5_CONFIG", filepath.Join(krb5Dir, "krb5.conf"))
|
||||
t.Setenv("KRB5CCNAME", filepath.Join(krb5Dir, "ccache"))
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestSMBKerberos:rclone",
|
||||
NilObject: (*smb.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -35,6 +35,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
@@ -867,13 +868,13 @@ func (f *Fs) Precision() time.Duration {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
err := srcObj.readMetaData(ctx)
|
||||
err = srcObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -890,6 +891,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Find and remove existing object
|
||||
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cleanup(&err)
|
||||
|
||||
// Copy the object
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
|
||||
@@ -161,7 +161,24 @@ Set to 0 to disable chunked uploading.
|
||||
Default: false,
|
||||
},
|
||||
fshttp.UnixSocketConfig,
|
||||
},
|
||||
{
|
||||
Name: "auth_redirect",
|
||||
Help: `Preserve authentication on redirect.
|
||||
|
||||
If the server redirects rclone to a new domain when it is trying to
|
||||
read a file then normally rclone will drop the Authorization: header
|
||||
from the request.
|
||||
|
||||
This is standard security practice to avoid sending your credentials
|
||||
to an unknown webserver.
|
||||
|
||||
However this is desirable in some circumstances. If you are getting
|
||||
an error like "401 Unauthorized" when rclone is attempting to read
|
||||
files from the webdav server then you can try this option.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -180,6 +197,7 @@ type Options struct {
|
||||
ExcludeShares bool `config:"owncloud_exclude_shares"`
|
||||
ExcludeMounts bool `config:"owncloud_exclude_mounts"`
|
||||
UnixSocket string `config:"unix_socket"`
|
||||
AuthRedirect bool `config:"auth_redirect"`
|
||||
}
|
||||
|
||||
// Fs represents a remote webdav
|
||||
@@ -1456,6 +1474,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
ExtraHeaders: map[string]string{
|
||||
"Depth": "0",
|
||||
},
|
||||
AuthRedirect: o.fs.opt.AuthRedirect, // allow redirects to preserve Auth
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
|
||||
@@ -22,13 +22,13 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
// oAuth
|
||||
@@ -46,11 +46,9 @@ const (
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://oauth.yandex.com/authorize", //same as https://oauth.yandex.ru/authorize
|
||||
TokenURL: "https://oauth.yandex.com/token", //same as https://oauth.yandex.ru/token
|
||||
},
|
||||
oauthConfig = &oauthutil.Config{
|
||||
AuthURL: "https://oauth.yandex.com/authorize", //same as https://oauth.yandex.ru/authorize
|
||||
TokenURL: "https://oauth.yandex.com/token", //same as https://oauth.yandex.ru/token
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
@@ -713,7 +711,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dst string, overwrite
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
@@ -721,12 +719,21 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
dstPath := f.filePath(remote)
|
||||
err := f.mkParentDirs(ctx, dstPath)
|
||||
err = f.mkParentDirs(ctx, dstPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = f.copyOrMove(ctx, "copy", srcObj.filePath(), dstPath, false)
|
||||
|
||||
// Find and remove existing object
|
||||
//
|
||||
// Note that the overwrite flag doesn't seem to work for server side copy
|
||||
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cleanup(&err)
|
||||
|
||||
err = f.copyOrMove(ctx, "copy", srcObj.filePath(), dstPath, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||
}
|
||||
|
||||
@@ -47,7 +47,7 @@ const (
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: []string{
|
||||
"aaaserver.profile.read",
|
||||
"WorkDrive.team.READ",
|
||||
@@ -55,11 +55,10 @@ var (
|
||||
"WorkDrive.files.ALL",
|
||||
"ZohoFiles.files.ALL",
|
||||
},
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://accounts.zoho.eu/oauth/v2/auth",
|
||||
TokenURL: "https://accounts.zoho.eu/oauth/v2/token",
|
||||
AuthStyle: oauth2.AuthStyleInParams,
|
||||
},
|
||||
|
||||
AuthURL: "https://accounts.zoho.eu/oauth/v2/auth",
|
||||
TokenURL: "https://accounts.zoho.eu/oauth/v2/token",
|
||||
AuthStyle: oauth2.AuthStyleInParams,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
@@ -276,8 +275,8 @@ func setupRegion(m configmap.Mapper) error {
|
||||
downloadURL = fmt.Sprintf("https://download.zoho.%s/v1/workdrive", region)
|
||||
uploadURL = fmt.Sprintf("https://upload.zoho.%s/workdrive-api/v1", region)
|
||||
accountsURL = fmt.Sprintf("https://accounts.zoho.%s", region)
|
||||
oauthConfig.Endpoint.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region)
|
||||
oauthConfig.Endpoint.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region)
|
||||
oauthConfig.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region)
|
||||
oauthConfig.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -7,11 +7,11 @@ for backend in $( find backend -maxdepth 1 -type d ); do
|
||||
continue
|
||||
fi
|
||||
|
||||
commit=$(git log --oneline -- $backend | tail -1 | cut -d' ' -f1)
|
||||
commit=$(git log --oneline -- $backend | tail -n 1 | cut -d' ' -f1)
|
||||
if [ "$commit" == "" ]; then
|
||||
commit=$(git log --oneline -- backend/$backend | tail -1 | cut -d' ' -f1)
|
||||
commit=$(git log --oneline -- backend/$backend | tail -n 1 | cut -d' ' -f1)
|
||||
fi
|
||||
version=$(git tag --contains $commit | grep ^v | sort -n | head -1)
|
||||
version=$(git tag --contains $commit | grep ^v | sort -n | head -n 1)
|
||||
echo $backend $version
|
||||
sed -i~ "4i versionIntroduced: \"$version\"" docs/content/${backend}.md
|
||||
done
|
||||
|
||||
@@ -7,6 +7,7 @@ conversion into man pages etc.
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import subprocess
|
||||
from datetime import datetime
|
||||
|
||||
docpath = "docs/content"
|
||||
@@ -35,6 +36,7 @@ docs = [
|
||||
"box.md",
|
||||
"cache.md",
|
||||
"chunker.md",
|
||||
"cloudinary.md",
|
||||
"sharefile.md",
|
||||
"crypt.md",
|
||||
"compress.md",
|
||||
@@ -191,13 +193,23 @@ def main():
|
||||
command_docs = read_commands(docpath).replace("\\", "\\\\") # escape \ so we can use command_docs in re.sub
|
||||
build_date = datetime.utcfromtimestamp(
|
||||
int(os.environ.get('SOURCE_DATE_EPOCH', time.time())))
|
||||
help_output = subprocess.check_output(["rclone", "help"]).decode("utf-8")
|
||||
with open(outfile, "w") as out:
|
||||
out.write("""\
|
||||
%% rclone(1) User Manual
|
||||
%% Nick Craig-Wood
|
||||
%% %s
|
||||
|
||||
""" % build_date.strftime("%b %d, %Y"))
|
||||
# NAME
|
||||
|
||||
rclone - manage files on cloud storage
|
||||
|
||||
# SYNOPSIS
|
||||
|
||||
```
|
||||
%s
|
||||
```
|
||||
""" % (build_date.strftime("%b %d, %Y"), help_output))
|
||||
for doc in docs:
|
||||
contents = read_doc(doc)
|
||||
# Substitute the commands into doc.md
|
||||
|
||||
@@ -7,15 +7,18 @@ Run with no arguments to test all backends or a supply a list of
|
||||
backends to test.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
all_backends = "backend/all/all.go"
|
||||
|
||||
# compile command which is more or less like the production builds
|
||||
compile_command = ["go", "build", "--ldflags", "-s", "-trimpath"]
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import subprocess
|
||||
# disable CGO as that makes a lot of difference to binary size
|
||||
os.environ["CGO_ENABLED"]="0"
|
||||
|
||||
match_backend = re.compile(r'"github.com/rclone/rclone/backend/(.*?)"')
|
||||
|
||||
@@ -43,6 +46,9 @@ def write_all(orig_all, backend):
|
||||
# Comment out line matching backend
|
||||
if match and match.group(1) == backend:
|
||||
line = "// " + line
|
||||
# s3 and pikpak depend on each other
|
||||
if backend == "s3" and "pikpak" in line:
|
||||
line = "// " + line
|
||||
fd.write(line+"\n")
|
||||
|
||||
def compile():
|
||||
|
||||
@@ -13,7 +13,7 @@ if [ "$1" == "" ]; then
|
||||
exit 1
|
||||
fi
|
||||
VERSION="$1"
|
||||
ANCHOR=$(grep '^## v' docs/content/changelog.md | head -1 | sed 's/^## //; s/[^A-Za-z0-9-]/-/g; s/--*/-/g')
|
||||
ANCHOR=$(grep '^## v' docs/content/changelog.md | head -n 1 | sed 's/^## //; s/[^A-Za-z0-9-]/-/g; s/--*/-/g')
|
||||
|
||||
cat > "/tmp/${VERSION}-release-notes" <<EOF
|
||||
This is the ${VERSION} release of rclone.
|
||||
|
||||
@@ -5,20 +5,13 @@ import (
|
||||
"bytes"
|
||||
"log"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// CaptureOutput runs a function capturing its output.
|
||||
func CaptureOutput(fun func()) []byte {
|
||||
logSave := log.Writer()
|
||||
logrusSave := logrus.StandardLogger().Writer()
|
||||
defer func() {
|
||||
err := logrusSave.Close()
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "error closing logrusSave: %v", err)
|
||||
}
|
||||
}()
|
||||
logrusSave := logrus.StandardLogger().Out
|
||||
buf := &bytes.Buffer{}
|
||||
log.SetOutput(buf)
|
||||
logrus.SetOutput(buf)
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gopkg.in/yaml.v2"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
const configFile = "../../fstest/test_all/config.yaml"
|
||||
|
||||
@@ -63,40 +63,40 @@ func (b *bisyncRun) setCompareDefaults(ctx context.Context) error {
|
||||
}
|
||||
|
||||
if b.opt.Compare.SlowHashSyncOnly && b.opt.Compare.SlowHashDetected && b.opt.Resync {
|
||||
fs.Logf(nil, Color(terminal.Dim, "Ignoring checksums during --resync as --slow-hash-sync-only is set.")) ///nolint:govet
|
||||
fs.Log(nil, Color(terminal.Dim, "Ignoring checksums during --resync as --slow-hash-sync-only is set."))
|
||||
ci.CheckSum = false
|
||||
// note not setting b.opt.Compare.Checksum = false as we still want to build listings on the non-slow side, if any
|
||||
} else if b.opt.Compare.Checksum && !ci.CheckSum {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Checksums will be compared for deltas but not during sync as --checksum is not set.")) //nolint:govet
|
||||
fs.Log(nil, Color(terminal.YellowFg, "WARNING: Checksums will be compared for deltas but not during sync as --checksum is not set."))
|
||||
}
|
||||
if b.opt.Compare.Modtime && (b.fs1.Precision() == fs.ModTimeNotSupported || b.fs2.Precision() == fs.ModTimeNotSupported) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Modtime compare was requested but at least one remote does not support it. It is recommended to use --checksum or --size-only instead.")) //nolint:govet
|
||||
fs.Log(nil, Color(terminal.YellowFg, "WARNING: Modtime compare was requested but at least one remote does not support it. It is recommended to use --checksum or --size-only instead."))
|
||||
}
|
||||
if (ci.CheckSum || b.opt.Compare.Checksum) && b.opt.IgnoreListingChecksum {
|
||||
if (b.opt.Compare.HashType1 == hash.None || b.opt.Compare.HashType2 == hash.None) && !b.opt.Compare.DownloadHash {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, `WARNING: Checksum compare was requested but at least one remote does not support checksums (or checksums are being ignored) and --ignore-listing-checksum is set.
|
||||
Ignoring Checksums globally and falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime). Path1 (%s): %s, Path2 (%s): %s`),
|
||||
b.fs1.String(), b.opt.Compare.HashType1.String(), b.fs2.String(), b.opt.Compare.HashType2.String()) //nolint:govet
|
||||
b.fs1.String(), b.opt.Compare.HashType1.String(), b.fs2.String(), b.opt.Compare.HashType2.String())
|
||||
b.opt.Compare.Modtime = true
|
||||
b.opt.Compare.Size = true
|
||||
ci.CheckSum = false
|
||||
b.opt.Compare.Checksum = false
|
||||
} else {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksum for deltas as --ignore-listing-checksum is set")) //nolint:govet
|
||||
fs.Log(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksum for deltas as --ignore-listing-checksum is set"))
|
||||
// note: --checksum will still affect the internal sync calls
|
||||
}
|
||||
}
|
||||
if !ci.CheckSum && !b.opt.Compare.Checksum && !b.opt.IgnoreListingChecksum {
|
||||
fs.Infof(nil, Color(terminal.Dim, "Setting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.")) //nolint:govet
|
||||
fs.Infoc(nil, Color(terminal.Dim, "Setting --ignore-listing-checksum as neither --checksum nor --compare checksum are set."))
|
||||
b.opt.IgnoreListingChecksum = true
|
||||
}
|
||||
if !b.opt.Compare.Size && !b.opt.Compare.Modtime && !b.opt.Compare.Checksum {
|
||||
return errors.New(Color(terminal.RedFg, "must set a Compare method. (size, modtime, and checksum can't all be false.)")) //nolint:govet
|
||||
return errors.New(Color(terminal.RedFg, "must set a Compare method. (size, modtime, and checksum can't all be false.)"))
|
||||
}
|
||||
|
||||
notSupported := func(label string, value bool, opt *bool) {
|
||||
if value {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: %s is set but bisync does not support it. It will be ignored."), label) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: %s is set but bisync does not support it. It will be ignored."), label)
|
||||
*opt = false
|
||||
}
|
||||
}
|
||||
@@ -123,13 +123,13 @@ func sizeDiffers(a, b int64) bool {
|
||||
func hashDiffers(a, b string, ht1, ht2 hash.Type, size1, size2 int64) bool {
|
||||
if a == "" || b == "" {
|
||||
if ht1 != hash.None && ht2 != hash.None && !(size1 <= 0 || size2 <= 0) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), a, b) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), a, b)
|
||||
}
|
||||
return false
|
||||
}
|
||||
if ht1 != ht2 {
|
||||
if !(downloadHash && ((ht1 == hash.MD5 && ht2 == hash.None) || (ht1 == hash.None && ht2 == hash.MD5))) {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "WARNING: Can't compare hashes of different types (%s, %s)"), ht1.String(), ht2.String()) //nolint:govet
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "WARNING: Can't compare hashes of different types (%s, %s)"), ht1.String(), ht2.String())
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -151,7 +151,7 @@ func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
|
||||
return
|
||||
}
|
||||
} else if b.opt.Compare.SlowHashSyncOnly && b.opt.Compare.SlowHashDetected {
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "Ignoring --slow-hash-sync-only and falling back to --no-slow-hash as Path1 and Path2 have no hashes in common.")) //nolint:govet
|
||||
fs.Log(b.fs2, Color(terminal.YellowFg, "Ignoring --slow-hash-sync-only and falling back to --no-slow-hash as Path1 and Path2 have no hashes in common."))
|
||||
b.opt.Compare.SlowHashSyncOnly = false
|
||||
b.opt.Compare.NoSlowHash = true
|
||||
ci.CheckSum = false
|
||||
@@ -159,7 +159,7 @@ func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
|
||||
}
|
||||
|
||||
if !b.opt.Compare.DownloadHash && !b.opt.Compare.SlowHashSyncOnly {
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "--checksum is in use but Path1 and Path2 have no hashes in common; falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime)")) //nolint:govet
|
||||
fs.Log(b.fs2, Color(terminal.YellowFg, "--checksum is in use but Path1 and Path2 have no hashes in common; falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime)"))
|
||||
fs.Infof("Path1 hashes", "%v", b.fs1.Hashes().String())
|
||||
fs.Infof("Path2 hashes", "%v", b.fs2.Hashes().String())
|
||||
b.opt.Compare.Modtime = true
|
||||
@@ -167,25 +167,25 @@ func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
|
||||
ci.CheckSum = false
|
||||
}
|
||||
if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs1.Features().SlowHash {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Slow hash detected on Path1. Will ignore checksum due to slow-hash settings")) //nolint:govet
|
||||
fs.Infoc(nil, Color(terminal.YellowFg, "Slow hash detected on Path1. Will ignore checksum due to slow-hash settings"))
|
||||
b.opt.Compare.HashType1 = hash.None
|
||||
} else {
|
||||
b.opt.Compare.HashType1 = b.fs1.Hashes().GetOne()
|
||||
if b.opt.Compare.HashType1 != hash.None {
|
||||
fs.Logf(b.fs1, Color(terminal.YellowFg, "will use %s for same-side diffs on Path1 only"), b.opt.Compare.HashType1) //nolint:govet
|
||||
fs.Logf(b.fs1, Color(terminal.YellowFg, "will use %s for same-side diffs on Path1 only"), b.opt.Compare.HashType1)
|
||||
}
|
||||
}
|
||||
if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs2.Features().SlowHash {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Slow hash detected on Path2. Will ignore checksum due to slow-hash settings")) //nolint:govet
|
||||
fs.Infoc(nil, Color(terminal.YellowFg, "Slow hash detected on Path2. Will ignore checksum due to slow-hash settings"))
|
||||
b.opt.Compare.HashType1 = hash.None
|
||||
} else {
|
||||
b.opt.Compare.HashType2 = b.fs2.Hashes().GetOne()
|
||||
if b.opt.Compare.HashType2 != hash.None {
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "will use %s for same-side diffs on Path2 only"), b.opt.Compare.HashType2) //nolint:govet
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "will use %s for same-side diffs on Path2 only"), b.opt.Compare.HashType2)
|
||||
}
|
||||
}
|
||||
if b.opt.Compare.HashType1 == hash.None && b.opt.Compare.HashType2 == hash.None && !b.opt.Compare.DownloadHash {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksums globally as hashes are ignored or unavailable on both sides.")) //nolint:govet
|
||||
fs.Log(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksums globally as hashes are ignored or unavailable on both sides."))
|
||||
b.opt.Compare.Checksum = false
|
||||
ci.CheckSum = false
|
||||
b.opt.IgnoreListingChecksum = true
|
||||
@@ -232,7 +232,7 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
|
||||
b.opt.Compare.Checksum = true
|
||||
CompareFlag.Checksum = true
|
||||
default:
|
||||
return fmt.Errorf(Color(terminal.RedFg, "unknown compare option: %s (must be size, modtime, or checksum)"), opt) //nolint:govet
|
||||
return fmt.Errorf(Color(terminal.RedFg, "unknown compare option: %s (must be size, modtime, or checksum)"), opt)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -284,14 +284,14 @@ func tryDownloadHash(ctx context.Context, o fs.DirEntry, hashVal string) (string
|
||||
}
|
||||
if o.Size() < 0 {
|
||||
downloadHashWarn.Do(func() {
|
||||
fs.Logf(o, Color(terminal.YellowFg, "Skipping hash download as checksum not reliable with files of unknown length.")) //nolint:govet
|
||||
fs.Log(o, Color(terminal.YellowFg, "Skipping hash download as checksum not reliable with files of unknown length."))
|
||||
})
|
||||
fs.Debugf(o, "Skipping hash download as checksum not reliable with files of unknown length.")
|
||||
return hashVal, hash.ErrUnsupported
|
||||
}
|
||||
|
||||
firstDownloadHash.Do(func() {
|
||||
fs.Infof(obj.Fs().Name(), Color(terminal.Dim, "Downloading hashes...")) //nolint:govet
|
||||
fs.Infoc(obj.Fs().Name(), Color(terminal.Dim, "Downloading hashes..."))
|
||||
})
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "computing hash with --download-hash")
|
||||
defer func() {
|
||||
|
||||
@@ -66,7 +66,8 @@ func quotePath(path string) string {
|
||||
return escapePath(path, true)
|
||||
}
|
||||
|
||||
var Colors bool // Colors controls whether terminal colors are enabled
|
||||
// Colors controls whether terminal colors are enabled
|
||||
var Colors bool
|
||||
|
||||
// Color handles terminal colors for bisync
|
||||
func Color(style string, s string) string {
|
||||
@@ -77,6 +78,15 @@ func Color(style string, s string) string {
|
||||
return style + s + terminal.Reset
|
||||
}
|
||||
|
||||
// ColorX handles terminal colors for bisync
|
||||
func ColorX(style string, s string) string {
|
||||
if !Colors {
|
||||
return s
|
||||
}
|
||||
terminal.Start()
|
||||
return style + s + terminal.Reset
|
||||
}
|
||||
|
||||
func encode(s string) string {
|
||||
return encoder.OS.ToStandardPath(encoder.OS.FromStandardPath(s))
|
||||
}
|
||||
|
||||
@@ -131,18 +131,18 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
||||
finaliseOnce.Do(func() {
|
||||
if atexit.Signalled() {
|
||||
if b.opt.Resync {
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "No need to gracefully shutdown during --resync (just run it again.)")) //nolint:govet
|
||||
fs.Log(nil, Color(terminal.GreenFg, "No need to gracefully shutdown during --resync (just run it again.)"))
|
||||
} else {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Attempting to gracefully shutdown. (Send exit signal again for immediate un-graceful shutdown.)")) //nolint:govet
|
||||
fs.Log(nil, Color(terminal.YellowFg, "Attempting to gracefully shutdown. (Send exit signal again for immediate un-graceful shutdown.)"))
|
||||
b.InGracefulShutdown = true
|
||||
if b.SyncCI != nil {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Telling Sync to wrap up early.")) //nolint:govet
|
||||
fs.Infoc(nil, Color(terminal.YellowFg, "Telling Sync to wrap up early."))
|
||||
b.SyncCI.MaxTransfer = 1
|
||||
b.SyncCI.MaxDuration = 1 * time.Second
|
||||
b.SyncCI.CutoffMode = fs.CutoffModeSoft
|
||||
gracePeriod := 30 * time.Second // TODO: flag to customize this?
|
||||
if !waitFor("Canceling Sync if not done in", gracePeriod, func() bool { return b.CleanupCompleted }) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Canceling sync and cleaning up")) //nolint:govet
|
||||
fs.Log(nil, Color(terminal.YellowFg, "Canceling sync and cleaning up"))
|
||||
b.CancelSync()
|
||||
waitFor("Aborting Bisync if not done in", 60*time.Second, func() bool { return b.CleanupCompleted })
|
||||
}
|
||||
@@ -150,13 +150,13 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
||||
// we haven't started to sync yet, so we're good.
|
||||
// no need to worry about the listing files, as we haven't overwritten them yet.
|
||||
b.CleanupCompleted = true
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully.")) //nolint:govet
|
||||
fs.Log(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully."))
|
||||
}
|
||||
}
|
||||
if !b.CleanupCompleted {
|
||||
if !b.opt.Resync {
|
||||
fs.Logf(nil, Color(terminal.HiRedFg, "Graceful shutdown failed.")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.RedFg, "Bisync interrupted. Must run --resync to recover.")) //nolint:govet
|
||||
fs.Log(nil, Color(terminal.HiRedFg, "Graceful shutdown failed."))
|
||||
fs.Log(nil, Color(terminal.RedFg, "Bisync interrupted. Must run --resync to recover."))
|
||||
}
|
||||
markFailed(b.listing1)
|
||||
markFailed(b.listing2)
|
||||
@@ -180,14 +180,14 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
||||
b.critical = false
|
||||
}
|
||||
if err == nil {
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully.")) //nolint:govet
|
||||
fs.Log(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully."))
|
||||
}
|
||||
}
|
||||
|
||||
if b.critical {
|
||||
if b.retryable && b.opt.Resilient {
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err) //nolint:govet
|
||||
fs.Errorf(nil, Color(terminal.YellowFg, "Bisync aborted. Error is retryable without --resync due to --resilient mode.")) //nolint:govet
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err)
|
||||
fs.Error(nil, Color(terminal.YellowFg, "Bisync aborted. Error is retryable without --resync due to --resilient mode."))
|
||||
} else {
|
||||
if bilib.FileExists(b.listing1) {
|
||||
_ = os.Rename(b.listing1, b.listing1+"-err")
|
||||
@@ -196,15 +196,15 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
||||
_ = os.Rename(b.listing2, b.listing2+"-err")
|
||||
}
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err)
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync aborted. Must run --resync to recover.")) //nolint:govet
|
||||
fs.Error(nil, Color(terminal.RedFg, "Bisync aborted. Must run --resync to recover."))
|
||||
}
|
||||
return ErrBisyncAborted
|
||||
}
|
||||
if b.abort && !b.InGracefulShutdown {
|
||||
fs.Logf(nil, Color(terminal.RedFg, "Bisync aborted. Please try again.")) //nolint:govet
|
||||
fs.Log(nil, Color(terminal.RedFg, "Bisync aborted. Please try again."))
|
||||
}
|
||||
if err == nil {
|
||||
fs.Infof(nil, Color(terminal.GreenFg, "Bisync successful")) //nolint:govet
|
||||
fs.Infoc(nil, Color(terminal.GreenFg, "Bisync successful"))
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -270,7 +270,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
if b.opt.Recover && bilib.FileExists(b.listing1+"-old") && bilib.FileExists(b.listing2+"-old") {
|
||||
errTip := fmt.Sprintf(Color(terminal.CyanFg, "Path1: %s\n"), Color(terminal.HiBlueFg, b.listing1))
|
||||
errTip += fmt.Sprintf(Color(terminal.CyanFg, "Path2: %s"), Color(terminal.HiBlueFg, b.listing2))
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Listings not found. Reverting to prior backup as --recover is set. \n")+errTip) //nolint:govet
|
||||
fs.Log(nil, Color(terminal.YellowFg, "Listings not found. Reverting to prior backup as --recover is set. \n")+errTip)
|
||||
if opt.CheckSync != CheckSyncFalse {
|
||||
// Run CheckSync to ensure old listing is valid (garbage in, garbage out!)
|
||||
fs.Infof(nil, "Validating backup listings for Path1 %s vs Path2 %s", quotePath(path1), quotePath(path2))
|
||||
@@ -279,7 +279,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
b.retryable = true
|
||||
return err
|
||||
}
|
||||
fs.Infof(nil, Color(terminal.GreenFg, "Backup listing is valid.")) //nolint:govet
|
||||
fs.Infoc(nil, Color(terminal.GreenFg, "Backup listing is valid."))
|
||||
}
|
||||
b.revertToOldListings()
|
||||
} else {
|
||||
@@ -299,7 +299,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
fs.Infof(nil, "Building Path1 and Path2 listings")
|
||||
ls1, ls2, err = b.makeMarchListing(fctx)
|
||||
if err != nil || accounting.Stats(fctx).Errored() {
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "There were errors while building listings. Aborting as it is too dangerous to continue.")) //nolint:govet
|
||||
fs.Error(nil, Color(terminal.RedFg, "There were errors while building listings. Aborting as it is too dangerous to continue."))
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return err
|
||||
@@ -623,7 +623,7 @@ func (b *bisyncRun) checkSyntax() error {
|
||||
|
||||
func (b *bisyncRun) debug(nametocheck, msgiftrue string) {
|
||||
if b.DebugName != "" && b.DebugName == nametocheck {
|
||||
fs.Infof(Color(terminal.MagentaBg, "DEBUGNAME "+b.DebugName), Color(terminal.MagentaBg, msgiftrue)) //nolint:govet
|
||||
fs.Infoc(Color(terminal.MagentaBg, "DEBUGNAME "+b.DebugName), Color(terminal.MagentaBg, msgiftrue))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -161,7 +161,7 @@ func WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEn
|
||||
prettyprint(result, "writing result", fs.LogLevelDebug)
|
||||
if result.Size < 0 && result.Flags != "d" && ((queueCI.CheckSum && !downloadHash) || queueCI.SizeOnly) {
|
||||
once.Do(func() {
|
||||
fs.Logf(result.Name, Color(terminal.YellowFg, "Files of unknown size (such as Google Docs) do not sync reliably with --checksum or --size-only. Consider using modtime instead (the default) or --drive-skip-gdocs")) //nolint:govet
|
||||
fs.Log(result.Name, Color(terminal.YellowFg, "Files of unknown size (such as Google Docs) do not sync reliably with --checksum or --size-only. Consider using modtime instead (the default) or --drive-skip-gdocs"))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -142,7 +142,7 @@ func (b *bisyncRun) resolve(ctxMove context.Context, path1, path2, file, alias s
|
||||
if winningPath > 0 {
|
||||
fs.Infof(file, Color(terminal.GreenFg, "The winner is: Path%d"), winningPath)
|
||||
} else {
|
||||
fs.Infof(file, Color(terminal.RedFg, "A winner could not be determined.")) //nolint:govet
|
||||
fs.Infoc(file, Color(terminal.RedFg, "A winner could not be determined."))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
// and either flag is sufficient without the other.
|
||||
func (b *bisyncRun) setResyncDefaults() {
|
||||
if b.opt.Resync && b.opt.ResyncMode == PreferNone {
|
||||
fs.Debugf(nil, Color(terminal.Dim, "defaulting to --resync-mode path1 as --resync is set")) //nolint:govet
|
||||
fs.Debug(nil, Color(terminal.Dim, "defaulting to --resync-mode path1 as --resync is set"))
|
||||
b.opt.ResyncMode = PreferPath1
|
||||
}
|
||||
if b.opt.ResyncMode != PreferNone {
|
||||
|
||||
@@ -80,6 +80,7 @@ INFO : Path2 checking for diffs
|
||||
INFO : Applying changes
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}subdir[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : subdir: Making directory
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -121,19 +121,6 @@ func (fsys *FS) lookupParentDir(filePath string) (leaf string, dir *vfs.Dir, err
|
||||
return leaf, dir, errc
|
||||
}
|
||||
|
||||
// lookup a File given a path
|
||||
func (fsys *FS) lookupFile(path string) (file *vfs.File, errc int) {
|
||||
node, errc := fsys.lookupNode(path)
|
||||
if errc != 0 {
|
||||
return nil, errc
|
||||
}
|
||||
file, ok := node.(*vfs.File)
|
||||
if !ok {
|
||||
return nil, -fuse.EISDIR
|
||||
}
|
||||
return file, 0
|
||||
}
|
||||
|
||||
// get a node and handle from the path or from the fh if not fhUnset
|
||||
//
|
||||
// handle may be nil
|
||||
@@ -154,15 +141,9 @@ func (fsys *FS) stat(node vfs.Node, stat *fuse.Stat_t) (errc int) {
|
||||
Size := uint64(node.Size())
|
||||
Blocks := (Size + 511) / 512
|
||||
modTime := node.ModTime()
|
||||
Mode := node.Mode().Perm()
|
||||
if node.IsDir() {
|
||||
Mode |= fuse.S_IFDIR
|
||||
} else {
|
||||
Mode |= fuse.S_IFREG
|
||||
}
|
||||
//stat.Dev = 1
|
||||
stat.Ino = node.Inode() // FIXME do we need to set the inode number?
|
||||
stat.Mode = uint32(Mode)
|
||||
stat.Mode = getMode(node)
|
||||
stat.Nlink = 1
|
||||
stat.Uid = fsys.VFS.Opt.UID
|
||||
stat.Gid = fsys.VFS.Opt.GID
|
||||
@@ -509,14 +490,15 @@ func (fsys *FS) Link(oldpath string, newpath string) (errc int) {
|
||||
|
||||
// Symlink creates a symbolic link.
|
||||
func (fsys *FS) Symlink(target string, newpath string) (errc int) {
|
||||
defer log.Trace(target, "newpath=%q", newpath)("errc=%d", &errc)
|
||||
return -fuse.ENOSYS
|
||||
defer log.Trace(target, "newpath=%q, target=%q", newpath, target)("errc=%d", &errc)
|
||||
return translateError(fsys.VFS.Symlink(target, newpath))
|
||||
}
|
||||
|
||||
// Readlink reads the target of a symbolic link.
|
||||
func (fsys *FS) Readlink(path string) (errc int, linkPath string) {
|
||||
defer log.Trace(path, "")("linkPath=%q, errc=%d", &linkPath, &errc)
|
||||
return -fuse.ENOSYS, ""
|
||||
defer log.Trace(path, "")("errc=%v, linkPath=%q", &errc, linkPath)
|
||||
linkPath, err := fsys.VFS.Readlink(path)
|
||||
return translateError(err), linkPath
|
||||
}
|
||||
|
||||
// Chmod changes the permission bits of a file.
|
||||
@@ -580,7 +562,7 @@ func (fsys *FS) Getpath(path string, fh uint64) (errc int, normalisedPath string
|
||||
return errc, ""
|
||||
}
|
||||
normalisedPath = node.Path()
|
||||
if !strings.HasPrefix("/", normalisedPath) {
|
||||
if !strings.HasPrefix(normalisedPath, "/") {
|
||||
normalisedPath = "/" + normalisedPath
|
||||
}
|
||||
return 0, normalisedPath
|
||||
@@ -615,6 +597,8 @@ func translateError(err error) (errc int) {
|
||||
return -fuse.ENOSYS
|
||||
case vfs.EINVAL:
|
||||
return -fuse.EINVAL
|
||||
case vfs.ELOOP:
|
||||
return -fuse.ELOOP
|
||||
}
|
||||
fs.Errorf(nil, "IO error: %v", err)
|
||||
return -fuse.EIO
|
||||
@@ -646,6 +630,22 @@ func translateOpenFlags(inFlags int) (outFlags int) {
|
||||
return outFlags
|
||||
}
|
||||
|
||||
// get the Mode from a vfs Node
|
||||
func getMode(node os.FileInfo) uint32 {
|
||||
vfsMode := node.Mode()
|
||||
Mode := vfsMode.Perm()
|
||||
if vfsMode&os.ModeDir != 0 {
|
||||
Mode |= fuse.S_IFDIR
|
||||
} else if vfsMode&os.ModeSymlink != 0 {
|
||||
Mode |= fuse.S_IFLNK
|
||||
} else if vfsMode&os.ModeNamedPipe != 0 {
|
||||
Mode |= fuse.S_IFIFO
|
||||
} else {
|
||||
Mode |= fuse.S_IFREG
|
||||
}
|
||||
return uint32(Mode)
|
||||
}
|
||||
|
||||
// Make sure interfaces are satisfied
|
||||
var (
|
||||
_ fuse.FileSystemInterface = (*FS)(nil)
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
@@ -35,19 +34,6 @@ func init() {
|
||||
buildinfo.Tags = append(buildinfo.Tags, "cmount")
|
||||
}
|
||||
|
||||
// Find the option string in the current options
|
||||
func findOption(name string, options []string) (found bool) {
|
||||
for _, option := range options {
|
||||
if option == "-o" {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(option, name) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// mountOptions configures the options from the command line flags
|
||||
func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.Options) (options []string) {
|
||||
// Options
|
||||
@@ -93,9 +79,9 @@ func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.
|
||||
if VFS.Opt.ReadOnly {
|
||||
options = append(options, "-o", "ro")
|
||||
}
|
||||
if opt.WritebackCache {
|
||||
// FIXME? options = append(options, "-o", WritebackCache())
|
||||
}
|
||||
//if opt.WritebackCache {
|
||||
// FIXME? options = append(options, "-o", WritebackCache())
|
||||
//}
|
||||
if runtime.GOOS == "darwin" {
|
||||
if opt.VolumeName != "" {
|
||||
options = append(options, "-o", "volname="+opt.VolumeName)
|
||||
@@ -111,9 +97,7 @@ func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.
|
||||
for _, option := range opt.ExtraOptions {
|
||||
options = append(options, "-o", option)
|
||||
}
|
||||
for _, option := range opt.ExtraFlags {
|
||||
options = append(options, option)
|
||||
}
|
||||
options = append(options, opt.ExtraFlags...)
|
||||
return options
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
@@ -33,7 +34,7 @@ func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) (err error) {
|
||||
a.Valid = time.Duration(d.fsys.opt.AttrTimeout)
|
||||
a.Gid = d.VFS().Opt.GID
|
||||
a.Uid = d.VFS().Opt.UID
|
||||
a.Mode = os.ModeDir | os.FileMode(d.VFS().Opt.DirPerms)
|
||||
a.Mode = d.Mode()
|
||||
modTime := d.ModTime()
|
||||
a.Atime = modTime
|
||||
a.Mtime = modTime
|
||||
@@ -140,11 +141,13 @@ var _ fusefs.NodeCreater = (*Dir)(nil)
|
||||
// Create makes a new file
|
||||
func (d *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (node fusefs.Node, handle fusefs.Handle, err error) {
|
||||
defer log.Trace(d, "name=%q", req.Name)("node=%v, handle=%v, err=%v", &node, &handle, &err)
|
||||
file, err := d.Dir.Create(req.Name, int(req.Flags))
|
||||
// translate the fuse flags to os flags
|
||||
osFlags := int(req.Flags) | os.O_CREATE
|
||||
file, err := d.Dir.Create(req.Name, osFlags)
|
||||
if err != nil {
|
||||
return nil, nil, translateError(err)
|
||||
}
|
||||
fh, err := file.Open(int(req.Flags) | os.O_CREATE)
|
||||
fh, err := file.Open(osFlags)
|
||||
if err != nil {
|
||||
return nil, nil, translateError(err)
|
||||
}
|
||||
@@ -200,7 +203,6 @@ func (d *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fusefs
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown Dir type %T", newDir)
|
||||
}
|
||||
|
||||
err = d.Dir.Rename(req.OldName, req.NewName, destDir.Dir)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
@@ -239,6 +241,24 @@ func (d *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fusefs.Node)
|
||||
return nil, syscall.ENOSYS
|
||||
}
|
||||
|
||||
var _ fusefs.NodeSymlinker = (*Dir)(nil)
|
||||
|
||||
// Symlink create a symbolic link.
|
||||
func (d *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (node fusefs.Node, err error) {
|
||||
defer log.Trace(d, "newname=%v, target=%v", req.NewName, req.Target)("node=%v, err=%v", &node, &err)
|
||||
|
||||
newName := path.Join(d.Path(), req.NewName)
|
||||
target := req.Target
|
||||
|
||||
n, err := d.VFS().CreateSymlink(target, newName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
node = &File{n.(*vfs.File), d.fsys}
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.NodeMknoder = (*Dir)(nil)
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ func (f *File) Attr(ctx context.Context, a *fuse.Attr) (err error) {
|
||||
Blocks := (Size + 511) / 512
|
||||
a.Gid = f.VFS().Opt.GID
|
||||
a.Uid = f.VFS().Opt.UID
|
||||
a.Mode = os.FileMode(f.VFS().Opt.FilePerms)
|
||||
a.Mode = f.File.Mode() &^ os.ModeAppend
|
||||
a.Size = Size
|
||||
a.Atime = modTime
|
||||
a.Mtime = modTime
|
||||
@@ -129,3 +129,11 @@ func (f *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) er
|
||||
}
|
||||
|
||||
var _ fusefs.NodeRemovexattrer = (*File)(nil)
|
||||
|
||||
var _ fusefs.NodeReadlinker = (*File)(nil)
|
||||
|
||||
// Readlink read symbolic link target.
|
||||
func (f *File) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (ret string, err error) {
|
||||
defer log.Trace(f, "")("ret=%v, err=%v", &ret, &err)
|
||||
return f.VFS().Readlink(f.Path())
|
||||
}
|
||||
|
||||
@@ -100,6 +100,8 @@ func translateError(err error) error {
|
||||
return syscall.ENOSYS
|
||||
case vfs.EINVAL:
|
||||
return fuse.Errno(syscall.EINVAL)
|
||||
case vfs.ELOOP:
|
||||
return fuse.Errno(syscall.ELOOP)
|
||||
}
|
||||
fs.Errorf(nil, "IO error: %v", err)
|
||||
return err
|
||||
|
||||
@@ -51,9 +51,14 @@ func (f *FS) SetDebug(debug bool) {
|
||||
|
||||
// get the Mode from a vfs Node
|
||||
func getMode(node os.FileInfo) uint32 {
|
||||
Mode := node.Mode().Perm()
|
||||
if node.IsDir() {
|
||||
vfsMode := node.Mode()
|
||||
Mode := vfsMode.Perm()
|
||||
if vfsMode&os.ModeDir != 0 {
|
||||
Mode |= fuse.S_IFDIR
|
||||
} else if vfsMode&os.ModeSymlink != 0 {
|
||||
Mode |= fuse.S_IFLNK
|
||||
} else if vfsMode&os.ModeNamedPipe != 0 {
|
||||
Mode |= fuse.S_IFIFO
|
||||
} else {
|
||||
Mode |= fuse.S_IFREG
|
||||
}
|
||||
@@ -128,6 +133,8 @@ func translateError(err error) syscall.Errno {
|
||||
return syscall.ENOSYS
|
||||
case vfs.EINVAL:
|
||||
return syscall.EINVAL
|
||||
case vfs.ELOOP:
|
||||
return syscall.ELOOP
|
||||
}
|
||||
fs.Errorf(nil, "IO error: %v", err)
|
||||
return syscall.EIO
|
||||
|
||||
@@ -227,7 +227,7 @@ type dirStream struct {
|
||||
// HasNext indicates if there are further entries. HasNext
|
||||
// might be called on already closed streams.
|
||||
func (ds *dirStream) HasNext() bool {
|
||||
return ds.i < len(ds.nodes)
|
||||
return ds.i < len(ds.nodes)+2
|
||||
}
|
||||
|
||||
// Next retrieves the next entry. It is only called if HasNext
|
||||
@@ -235,7 +235,22 @@ func (ds *dirStream) HasNext() bool {
|
||||
// indicate I/O errors
|
||||
func (ds *dirStream) Next() (de fuse.DirEntry, errno syscall.Errno) {
|
||||
// defer log.Trace(nil, "")("de=%+v, errno=%v", &de, &errno)
|
||||
fi := ds.nodes[ds.i]
|
||||
if ds.i == 0 {
|
||||
ds.i++
|
||||
return fuse.DirEntry{
|
||||
Mode: fuse.S_IFDIR,
|
||||
Name: ".",
|
||||
Ino: 0, // FIXME
|
||||
}, 0
|
||||
} else if ds.i == 1 {
|
||||
ds.i++
|
||||
return fuse.DirEntry{
|
||||
Mode: fuse.S_IFDIR,
|
||||
Name: "..",
|
||||
Ino: 0, // FIXME
|
||||
}, 0
|
||||
}
|
||||
fi := ds.nodes[ds.i-2]
|
||||
de = fuse.DirEntry{
|
||||
// Mode is the file's mode. Only the high bits (e.g. S_IFDIR)
|
||||
// are considered.
|
||||
@@ -443,3 +458,31 @@ func (n *Node) Listxattr(ctx context.Context, dest []byte) (uint32, syscall.Errn
|
||||
}
|
||||
|
||||
var _ fusefs.NodeListxattrer = (*Node)(nil)
|
||||
|
||||
var _ fusefs.NodeReadlinker = (*Node)(nil)
|
||||
|
||||
// Readlink read symbolic link target.
|
||||
func (n *Node) Readlink(ctx context.Context) (ret []byte, err syscall.Errno) {
|
||||
defer log.Trace(n, "")("ret=%v, err=%v", &ret, &err)
|
||||
path := n.node.Path()
|
||||
s, serr := n.node.VFS().Readlink(path)
|
||||
return []byte(s), translateError(serr)
|
||||
}
|
||||
|
||||
var _ fusefs.NodeSymlinker = (*Node)(nil)
|
||||
|
||||
// Symlink create symbolic link.
|
||||
func (n *Node) Symlink(ctx context.Context, target, name string, out *fuse.EntryOut) (node *fusefs.Inode, err syscall.Errno) {
|
||||
defer log.Trace(n, "name=%v, target=%v", name, target)("node=%v, err=%v", &node, &err)
|
||||
fullPath := path.Join(n.node.Path(), name)
|
||||
vfsNode, serr := n.node.VFS().CreateSymlink(target, fullPath)
|
||||
if serr != nil {
|
||||
return nil, translateError(serr)
|
||||
}
|
||||
|
||||
n.fsys.setEntryOut(vfsNode, out)
|
||||
newNode := newNode(n.fsys, vfsNode)
|
||||
newInode := n.NewInode(ctx, newNode, fusefs.StableAttr{Mode: out.Attr.Mode})
|
||||
|
||||
return newInode, 0
|
||||
}
|
||||
|
||||
@@ -373,6 +373,9 @@ func (m *MountPoint) Mount() (mountDaemon *os.Process, err error) {
|
||||
|
||||
m.ErrChan, m.UnmountFn, err = m.MountFn(m.VFS, m.MountPoint, &m.MountOpt)
|
||||
if err != nil {
|
||||
if len(os.Args) > 0 && strings.HasPrefix(os.Args[0], "/snap/") {
|
||||
return nil, fmt.Errorf("mounting is not supported when running from snap")
|
||||
}
|
||||
return nil, fmt.Errorf("failed to mount FUSE fs: %w", err)
|
||||
}
|
||||
m.MountedOn = time.Now()
|
||||
|
||||
@@ -3,6 +3,9 @@
|
||||
package nfsmount
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"testing"
|
||||
@@ -30,7 +33,24 @@ func TestMount(t *testing.T) {
|
||||
}
|
||||
sudo = true
|
||||
}
|
||||
nfs.Opt.HandleCacheDir = t.TempDir()
|
||||
require.NoError(t, nfs.Opt.HandleCache.Set("disk"))
|
||||
vfstest.RunTests(t, false, vfscommon.CacheModeWrites, false, mount)
|
||||
for _, cacheType := range []string{"memory", "disk", "symlink"} {
|
||||
t.Run(cacheType, func(t *testing.T) {
|
||||
nfs.Opt.HandleCacheDir = t.TempDir()
|
||||
require.NoError(t, nfs.Opt.HandleCache.Set(cacheType))
|
||||
// Check we can create a handler
|
||||
_, err := nfs.NewHandler(context.Background(), nil, &nfs.Opt)
|
||||
if errors.Is(err, nfs.ErrorSymlinkCacheNotSupported) || errors.Is(err, nfs.ErrorSymlinkCacheNoPermission) {
|
||||
t.Skip(err.Error() + ": run with: go test -c && sudo setcap cap_dac_read_search+ep ./nfsmount.test && ./nfsmount.test -test.v")
|
||||
}
|
||||
require.NoError(t, err)
|
||||
// Configure rclone via environment var since the mount gets run in a subprocess
|
||||
_ = os.Setenv("RCLONE_NFS_CACHE_DIR", nfs.Opt.HandleCacheDir)
|
||||
_ = os.Setenv("RCLONE_NFS_CACHE_TYPE", cacheType)
|
||||
t.Cleanup(func() {
|
||||
_ = os.Unsetenv("RCLONE_NFS_CACHE_DIR")
|
||||
_ = os.Unsetenv("RCLONE_NFS_CACHE_TYPE")
|
||||
})
|
||||
vfstest.RunTests(t, false, vfscommon.CacheModeWrites, false, mount)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/anacrolix/dms/dlna"
|
||||
@@ -158,6 +159,18 @@ func (cds *contentDirectoryService) readContainer(o object, host string) (ret []
|
||||
}
|
||||
}
|
||||
|
||||
// Sort the directory entries by directories first then alphabetically by name
|
||||
sort.Slice(dirEntries, func(i, j int) bool {
|
||||
iNode, jNode := dirEntries[i], dirEntries[j]
|
||||
iIsDir, jIsDir := iNode.IsDir(), jNode.IsDir()
|
||||
if iIsDir && !jIsDir {
|
||||
return true
|
||||
} else if !iIsDir && jIsDir {
|
||||
return false
|
||||
}
|
||||
return strings.ToLower(iNode.Name()) < strings.ToLower(jNode.Name())
|
||||
})
|
||||
|
||||
dirEntries, mediaResources := mediaWithResources(dirEntries)
|
||||
for _, de := range dirEntries {
|
||||
child := object{
|
||||
|
||||
@@ -2,17 +2,15 @@ package docker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// applyOptions configures volume from request options.
|
||||
@@ -112,11 +110,15 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
|
||||
for key, val := range vol.Options {
|
||||
opt[key] = val
|
||||
}
|
||||
mntMap := configmap.Simple{}
|
||||
vfsMap := configmap.Simple{}
|
||||
for key := range opt {
|
||||
var ok bool
|
||||
var err error
|
||||
normalKey := normalOptName(key)
|
||||
underscoreKey := strings.ReplaceAll(normalKey, "-", "_")
|
||||
|
||||
switch normalOptName(key) {
|
||||
switch normalKey {
|
||||
case "persist":
|
||||
vol.persist, err = opt.GetBool(key)
|
||||
ok = true
|
||||
@@ -129,25 +131,24 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
|
||||
}
|
||||
|
||||
if !ok {
|
||||
// try to use as a mount option in mntOpt
|
||||
ok, err = getMountOption(mntOpt, opt, key)
|
||||
if ok && err != nil {
|
||||
return fmt.Errorf("cannot parse mount option %q: %w", key, err)
|
||||
// try to use as a mount option in mntMap
|
||||
if mountlib.OptionsInfo.Get(underscoreKey) != nil {
|
||||
mntMap[underscoreKey] = vol.Options[key]
|
||||
ok = true
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
// try as a vfs option in vfsOpt
|
||||
ok, err = getVFSOption(vfsOpt, opt, key)
|
||||
if ok && err != nil {
|
||||
return fmt.Errorf("cannot parse vfs option %q: %w", key, err)
|
||||
// try as a vfs option in vfsMap
|
||||
if vfscommon.OptionsInfo.Get(underscoreKey) != nil {
|
||||
vfsMap[underscoreKey] = vol.Options[key]
|
||||
ok = true
|
||||
}
|
||||
}
|
||||
|
||||
if !ok {
|
||||
// try as a backend option in fsOpt (backends use "_" instead of "-")
|
||||
optWithPrefix := strings.ReplaceAll(normalOptName(key), "-", "_")
|
||||
fsOptName := strings.TrimPrefix(optWithPrefix, fsType+"_")
|
||||
hasFsPrefix := optWithPrefix != fsOptName
|
||||
fsOptName := strings.TrimPrefix(underscoreKey, fsType+"_")
|
||||
hasFsPrefix := underscoreKey != fsOptName
|
||||
if !hasFsPrefix || fsInfo.Options.Get(fsOptName) == nil {
|
||||
fs.Logf(nil, "Option %q is not supported by backend %q", key, fsType)
|
||||
return fmt.Errorf("unsupported backend option %q", key)
|
||||
@@ -159,6 +160,18 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Parse VFS options
|
||||
err = configstruct.Set(vfsMap, vfsOpt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse vfs options: %w", err)
|
||||
}
|
||||
|
||||
// Parse Mount options
|
||||
err = configstruct.Set(mntMap, mntOpt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse mount options: %w", err)
|
||||
}
|
||||
|
||||
// build remote string from fsName, fsType, fsOpt, fsPath
|
||||
colon := ":"
|
||||
comma := ","
|
||||
@@ -178,150 +191,6 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
|
||||
return vol.validate()
|
||||
}
|
||||
|
||||
func getMountOption(mntOpt *mountlib.Options, opt rc.Params, key string) (ok bool, err error) {
|
||||
ok = true
|
||||
switch normalOptName(key) {
|
||||
case "debug-fuse":
|
||||
mntOpt.DebugFUSE, err = opt.GetBool(key)
|
||||
case "attr-timeout":
|
||||
mntOpt.AttrTimeout, err = opt.GetFsDuration(key)
|
||||
case "option":
|
||||
mntOpt.ExtraOptions, err = getStringArray(opt, key)
|
||||
case "fuse-flag":
|
||||
mntOpt.ExtraFlags, err = getStringArray(opt, key)
|
||||
case "daemon":
|
||||
mntOpt.Daemon, err = opt.GetBool(key)
|
||||
case "daemon-timeout":
|
||||
mntOpt.DaemonTimeout, err = opt.GetFsDuration(key)
|
||||
case "default-permissions":
|
||||
mntOpt.DefaultPermissions, err = opt.GetBool(key)
|
||||
case "allow-non-empty":
|
||||
mntOpt.AllowNonEmpty, err = opt.GetBool(key)
|
||||
case "allow-root":
|
||||
mntOpt.AllowRoot, err = opt.GetBool(key)
|
||||
case "allow-other":
|
||||
mntOpt.AllowOther, err = opt.GetBool(key)
|
||||
case "async-read":
|
||||
mntOpt.AsyncRead, err = opt.GetBool(key)
|
||||
case "max-read-ahead":
|
||||
err = getFVarP(&mntOpt.MaxReadAhead, opt, key)
|
||||
case "write-back-cache":
|
||||
mntOpt.WritebackCache, err = opt.GetBool(key)
|
||||
case "volname":
|
||||
mntOpt.VolumeName, err = opt.GetString(key)
|
||||
case "noappledouble":
|
||||
mntOpt.NoAppleDouble, err = opt.GetBool(key)
|
||||
case "noapplexattr":
|
||||
mntOpt.NoAppleXattr, err = opt.GetBool(key)
|
||||
case "network-mode":
|
||||
mntOpt.NetworkMode, err = opt.GetBool(key)
|
||||
default:
|
||||
ok = false
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getVFSOption(vfsOpt *vfscommon.Options, opt rc.Params, key string) (ok bool, err error) {
|
||||
var intVal int64
|
||||
ok = true
|
||||
switch normalOptName(key) {
|
||||
|
||||
// options prefixed with "vfs-"
|
||||
case "vfs-cache-mode":
|
||||
err = getFVarP(&vfsOpt.CacheMode, opt, key)
|
||||
case "vfs-cache-poll-interval":
|
||||
vfsOpt.CachePollInterval, err = opt.GetFsDuration(key)
|
||||
case "vfs-cache-max-age":
|
||||
vfsOpt.CacheMaxAge, err = opt.GetFsDuration(key)
|
||||
case "vfs-cache-max-size":
|
||||
err = getFVarP(&vfsOpt.CacheMaxSize, opt, key)
|
||||
case "vfs-read-chunk-size":
|
||||
err = getFVarP(&vfsOpt.ChunkSize, opt, key)
|
||||
case "vfs-read-chunk-size-limit":
|
||||
err = getFVarP(&vfsOpt.ChunkSizeLimit, opt, key)
|
||||
case "vfs-case-insensitive":
|
||||
vfsOpt.CaseInsensitive, err = opt.GetBool(key)
|
||||
case "vfs-write-wait":
|
||||
vfsOpt.WriteWait, err = opt.GetFsDuration(key)
|
||||
case "vfs-read-wait":
|
||||
vfsOpt.ReadWait, err = opt.GetFsDuration(key)
|
||||
case "vfs-write-back":
|
||||
vfsOpt.WriteBack, err = opt.GetFsDuration(key)
|
||||
case "vfs-read-ahead":
|
||||
err = getFVarP(&vfsOpt.ReadAhead, opt, key)
|
||||
case "vfs-used-is-size":
|
||||
vfsOpt.UsedIsSize, err = opt.GetBool(key)
|
||||
case "vfs-read-chunk-streams":
|
||||
intVal, err = opt.GetInt64(key)
|
||||
if err == nil {
|
||||
if intVal >= 0 && intVal <= math.MaxInt {
|
||||
vfsOpt.ChunkStreams = int(intVal)
|
||||
} else {
|
||||
err = fmt.Errorf("key %q (%v) overflows int", key, intVal)
|
||||
}
|
||||
}
|
||||
|
||||
// unprefixed vfs options
|
||||
case "no-modtime":
|
||||
vfsOpt.NoModTime, err = opt.GetBool(key)
|
||||
case "no-checksum":
|
||||
vfsOpt.NoChecksum, err = opt.GetBool(key)
|
||||
case "dir-cache-time":
|
||||
vfsOpt.DirCacheTime, err = opt.GetFsDuration(key)
|
||||
case "poll-interval":
|
||||
vfsOpt.PollInterval, err = opt.GetFsDuration(key)
|
||||
case "read-only":
|
||||
vfsOpt.ReadOnly, err = opt.GetBool(key)
|
||||
case "dir-perms":
|
||||
err = getFVarP(&vfsOpt.DirPerms, opt, key)
|
||||
case "file-perms":
|
||||
err = getFVarP(&vfsOpt.FilePerms, opt, key)
|
||||
|
||||
// unprefixed unix-only vfs options
|
||||
case "umask":
|
||||
err = getFVarP(&vfsOpt.Umask, opt, key)
|
||||
case "uid":
|
||||
intVal, err = opt.GetInt64(key)
|
||||
if err == nil {
|
||||
if intVal >= 0 && intVal <= math.MaxUint32 {
|
||||
vfsOpt.UID = uint32(intVal)
|
||||
} else {
|
||||
err = fmt.Errorf("key %q (%v) overflows uint32", key, intVal)
|
||||
}
|
||||
}
|
||||
case "gid":
|
||||
intVal, err = opt.GetInt64(key)
|
||||
if err == nil {
|
||||
if intVal >= 0 && intVal <= math.MaxUint32 {
|
||||
vfsOpt.UID = uint32(intVal)
|
||||
} else {
|
||||
err = fmt.Errorf("key %q (%v) overflows uint32", key, intVal)
|
||||
}
|
||||
}
|
||||
|
||||
// non-vfs options
|
||||
default:
|
||||
ok = false
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getFVarP(pvalue pflag.Value, opt rc.Params, key string) error {
|
||||
str, err := opt.GetString(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return pvalue.Set(str)
|
||||
}
|
||||
|
||||
func getStringArray(opt rc.Params, key string) ([]string, error) {
|
||||
str, err := opt.GetString(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return strings.Split(str, ","), nil
|
||||
}
|
||||
|
||||
func normalOptName(key string) string {
|
||||
return strings.ReplaceAll(strings.TrimPrefix(strings.ToLower(key), "--"), "_", "-")
|
||||
}
|
||||
|
||||
75
cmd/serve/docker/options_test.go
Normal file
75
cmd/serve/docker/options_test.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
)
|
||||
|
||||
func TestApplyOptions(t *testing.T) {
|
||||
vol := &Volume{
|
||||
Name: "testName",
|
||||
MountPoint: "testPath",
|
||||
drv: &Driver{
|
||||
root: "testRoot",
|
||||
},
|
||||
mnt: &mountlib.MountPoint{
|
||||
MountPoint: "testPath",
|
||||
},
|
||||
mountReqs: make(map[string]interface{}),
|
||||
}
|
||||
|
||||
// Happy path
|
||||
volOpt := VolOpts{
|
||||
"remote": "/tmp/docker",
|
||||
"persist": "FALSE",
|
||||
"mount_type": "potato",
|
||||
// backend options
|
||||
"--local-case-sensitive": "true",
|
||||
"local_no_check_updated": "1",
|
||||
// mount options
|
||||
"debug-fuse": "true",
|
||||
"attr_timeout": "100s",
|
||||
"--async-read": "TRUE",
|
||||
// vfs options
|
||||
"no-modtime": "1",
|
||||
"no_checksum": "true",
|
||||
"--no-seek": "true",
|
||||
}
|
||||
err := vol.applyOptions(volOpt)
|
||||
require.NoError(t, err)
|
||||
// normal options
|
||||
assert.Equal(t, ":local,case_sensitive='true',no_check_updated='1':/tmp/docker", vol.fsString)
|
||||
assert.Equal(t, false, vol.persist)
|
||||
assert.Equal(t, "potato", vol.mountType)
|
||||
// mount options
|
||||
assert.Equal(t, true, vol.mnt.MountOpt.DebugFUSE)
|
||||
assert.Equal(t, fs.Duration(100*time.Second), vol.mnt.MountOpt.AttrTimeout)
|
||||
assert.Equal(t, true, vol.mnt.MountOpt.AsyncRead)
|
||||
// vfs options
|
||||
assert.Equal(t, true, vol.mnt.VFSOpt.NoModTime)
|
||||
assert.Equal(t, true, vol.mnt.VFSOpt.NoChecksum)
|
||||
assert.Equal(t, true, vol.mnt.VFSOpt.NoSeek)
|
||||
|
||||
// Check errors
|
||||
err = vol.applyOptions(VolOpts{
|
||||
"debug-fuse": "POTATO",
|
||||
})
|
||||
require.ErrorContains(t, err, "cannot parse mount options")
|
||||
err = vol.applyOptions(VolOpts{
|
||||
"no-modtime": "POTATO",
|
||||
})
|
||||
require.ErrorContains(t, err, "cannot parse vfs options")
|
||||
err = vol.applyOptions(VolOpts{
|
||||
"remote": "/tmp/docker",
|
||||
"local_not_found": "POTATO",
|
||||
})
|
||||
require.ErrorContains(t, err, "unsupported backend option")
|
||||
|
||||
}
|
||||
@@ -24,6 +24,12 @@ import (
|
||||
nfshelper "github.com/willscott/go-nfs/helpers"
|
||||
)
|
||||
|
||||
// Errors on cache initialisation
|
||||
var (
|
||||
ErrorSymlinkCacheNotSupported = errors.New("symlink cache not supported on " + runtime.GOOS)
|
||||
ErrorSymlinkCacheNoPermission = errors.New("symlink cache must be run as root or with CAP_DAC_READ_SEARCH")
|
||||
)
|
||||
|
||||
// Cache controls the file handle cache implementation
|
||||
type Cache interface {
|
||||
// ToHandle takes a file and represents it with an opaque handle to reference it.
|
||||
@@ -43,25 +49,35 @@ type Cache interface {
|
||||
|
||||
// Set the cache of the handler to the type required by the user
|
||||
func (h *Handler) getCache() (c Cache, err error) {
|
||||
fs.Debugf("nfs", "Starting %v handle cache", h.opt.HandleCache)
|
||||
switch h.opt.HandleCache {
|
||||
case cacheMemory:
|
||||
return nfshelper.NewCachingHandler(h, h.opt.HandleLimit), nil
|
||||
case cacheDisk:
|
||||
return newDiskHandler(h)
|
||||
case cacheSymlink:
|
||||
if runtime.GOOS != "linux" {
|
||||
return nil, errors.New("can only use symlink cache on Linux")
|
||||
dh, err := newDiskHandler(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, errors.New("FIXME not implemented yet")
|
||||
err = dh.makeSymlinkCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dh, nil
|
||||
}
|
||||
return nil, errors.New("unknown handle cache type")
|
||||
}
|
||||
|
||||
// diskHandler implements an on disk NFS file handle cache
|
||||
type diskHandler struct {
|
||||
mu sync.RWMutex
|
||||
cacheDir string
|
||||
billyFS billy.Filesystem
|
||||
mu sync.RWMutex
|
||||
cacheDir string
|
||||
billyFS billy.Filesystem
|
||||
write func(fh []byte, cachePath string, fullPath string) ([]byte, error)
|
||||
read func(fh []byte, cachePath string) ([]byte, error)
|
||||
remove func(fh []byte, cachePath string) error
|
||||
handleType int32 //nolint:unused // used by the symlink cache
|
||||
}
|
||||
|
||||
// Create a new disk handler
|
||||
@@ -83,6 +99,9 @@ func newDiskHandler(h *Handler) (dh *diskHandler, err error) {
|
||||
dh = &diskHandler{
|
||||
cacheDir: cacheDir,
|
||||
billyFS: h.billyFS,
|
||||
write: dh.diskCacheWrite,
|
||||
read: dh.diskCacheRead,
|
||||
remove: dh.diskCacheRemove,
|
||||
}
|
||||
fs.Infof("nfs", "Storing handle cache in %q", dh.cacheDir)
|
||||
return dh, nil
|
||||
@@ -120,7 +139,7 @@ func (dh *diskHandler) ToHandle(f billy.Filesystem, splitPath []string) (fh []by
|
||||
fs.Errorf("nfs", "Couldn't create cache file handle directory: %v", err)
|
||||
return fh
|
||||
}
|
||||
err = os.WriteFile(cachePath, []byte(fullPath), 0600)
|
||||
fh, err = dh.write(fh, cachePath, fullPath)
|
||||
if err != nil {
|
||||
fs.Errorf("nfs", "Couldn't create cache file handle: %v", err)
|
||||
return fh
|
||||
@@ -128,6 +147,11 @@ func (dh *diskHandler) ToHandle(f billy.Filesystem, splitPath []string) (fh []by
|
||||
return fh
|
||||
}
|
||||
|
||||
// Write the fullPath into cachePath returning the possibly updated fh
|
||||
func (dh *diskHandler) diskCacheWrite(fh []byte, cachePath string, fullPath string) ([]byte, error) {
|
||||
return fh, os.WriteFile(cachePath, []byte(fullPath), 0600)
|
||||
}
|
||||
|
||||
var errStaleHandle = &nfs.NFSStatusError{NFSStatus: nfs.NFSStatusStale}
|
||||
|
||||
// FromHandle converts from an opaque handle to the file it represents
|
||||
@@ -135,7 +159,7 @@ func (dh *diskHandler) FromHandle(fh []byte) (f billy.Filesystem, splitPath []st
|
||||
dh.mu.RLock()
|
||||
defer dh.mu.RUnlock()
|
||||
cachePath := dh.handleToPath(fh)
|
||||
fullPathBytes, err := os.ReadFile(cachePath)
|
||||
fullPathBytes, err := dh.read(fh, cachePath)
|
||||
if err != nil {
|
||||
fs.Errorf("nfs", "Stale handle %q: %v", cachePath, err)
|
||||
return nil, nil, errStaleHandle
|
||||
@@ -144,18 +168,28 @@ func (dh *diskHandler) FromHandle(fh []byte) (f billy.Filesystem, splitPath []st
|
||||
return dh.billyFS, splitPath, nil
|
||||
}
|
||||
|
||||
// Read the contents of (fh, cachePath)
|
||||
func (dh *diskHandler) diskCacheRead(fh []byte, cachePath string) ([]byte, error) {
|
||||
return os.ReadFile(cachePath)
|
||||
}
|
||||
|
||||
// Invalidate the handle passed - used on rename and delete
|
||||
func (dh *diskHandler) InvalidateHandle(f billy.Filesystem, fh []byte) error {
|
||||
dh.mu.Lock()
|
||||
defer dh.mu.Unlock()
|
||||
cachePath := dh.handleToPath(fh)
|
||||
err := os.Remove(cachePath)
|
||||
err := dh.remove(fh, cachePath)
|
||||
if err != nil {
|
||||
fs.Errorf("nfs", "Failed to remove handle %q: %v", cachePath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove the (fh, cachePath) file
|
||||
func (dh *diskHandler) diskCacheRemove(fh []byte, cachePath string) error {
|
||||
return os.Remove(cachePath)
|
||||
}
|
||||
|
||||
// HandleLimit exports how many file handles can be safely stored by this cache.
|
||||
func (dh *diskHandler) HandleLimit() int {
|
||||
return math.MaxInt
|
||||
|
||||
@@ -13,6 +13,9 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// NB to test the symlink cache, running with elevated permissions is needed
|
||||
const testSymlinkCache = "go test -c && sudo setcap cap_dac_read_search+ep ./nfs.test && ./nfs.test -test.v -test.run TestCache/symlink"
|
||||
|
||||
// Check basic CRUD operations
|
||||
func testCacheCRUD(t *testing.T, h *Handler, c Cache, fileName string) {
|
||||
// Check reading a non existent handle returns an error
|
||||
@@ -101,11 +104,12 @@ func TestCache(t *testing.T) {
|
||||
ci := fs.GetConfig(context.Background())
|
||||
oldLogLevel := ci.LogLevel
|
||||
ci.LogLevel = fs.LogLevelEmergency
|
||||
//ci.LogLevel = fs.LogLevelDebug
|
||||
defer func() {
|
||||
ci.LogLevel = oldLogLevel
|
||||
}()
|
||||
billyFS := &FS{nil} // place holder billyFS
|
||||
for _, cacheType := range []handleCache{cacheMemory, cacheDisk} {
|
||||
for _, cacheType := range []handleCache{cacheMemory, cacheDisk, cacheSymlink} {
|
||||
cacheType := cacheType
|
||||
t.Run(cacheType.String(), func(t *testing.T) {
|
||||
h := &Handler{
|
||||
@@ -115,8 +119,27 @@ func TestCache(t *testing.T) {
|
||||
h.opt.HandleCache = cacheType
|
||||
h.opt.HandleCacheDir = t.TempDir()
|
||||
c, err := h.getCache()
|
||||
if err == ErrorSymlinkCacheNotSupported {
|
||||
t.Skip(err.Error())
|
||||
}
|
||||
if err == ErrorSymlinkCacheNoPermission {
|
||||
t.Skip("Need more permissions to run symlink cache tests: " + testSymlinkCache)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("Empty", func(t *testing.T) {
|
||||
// Write a handle
|
||||
splitPath := []string{""}
|
||||
fh := c.ToHandle(h.billyFS, splitPath)
|
||||
assert.True(t, len(fh) > 0)
|
||||
|
||||
// Read the handle back
|
||||
newFs, newSplitPath, err := c.FromHandle(fh)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, h.billyFS, newFs)
|
||||
assert.Equal(t, splitPath, newSplitPath)
|
||||
testCacheCRUD(t, h, c, "file")
|
||||
})
|
||||
t.Run("CRUD", func(t *testing.T) {
|
||||
testCacheCRUD(t, h, c, "file")
|
||||
})
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
package nfs
|
||||
|
||||
import (
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
@@ -37,7 +36,7 @@ func setSys(fi os.FileInfo) {
|
||||
Nlink: 1,
|
||||
UID: vfs.Opt.UID,
|
||||
GID: vfs.Opt.GID,
|
||||
Fileid: math.MaxUint64, // without this mounting doesn't work on Linux
|
||||
Fileid: node.Inode(), // without this mounting doesn't work on Linux
|
||||
}
|
||||
node.SetSys(&stat)
|
||||
}
|
||||
@@ -142,16 +141,16 @@ func (f *FS) Lstat(filename string) (fi os.FileInfo, err error) {
|
||||
return fi, nil
|
||||
}
|
||||
|
||||
// Symlink is not supported over NFS
|
||||
// Symlink creates a link pointing to target
|
||||
func (f *FS) Symlink(target, link string) (err error) {
|
||||
defer log.Trace(target, "link=%q", link)("err=%v", &err)
|
||||
return os.ErrInvalid
|
||||
return f.vfs.Symlink(target, link)
|
||||
}
|
||||
|
||||
// Readlink is not supported
|
||||
// Readlink reads the contents of link
|
||||
func (f *FS) Readlink(link string) (result string, err error) {
|
||||
defer log.Trace(link, "")("result=%q, err=%v", &result, &err)
|
||||
return "", os.ErrInvalid
|
||||
return f.vfs.Readlink(link)
|
||||
}
|
||||
|
||||
// Chmod changes the file modes
|
||||
|
||||
@@ -145,7 +145,9 @@ that it uses an on disk cache, but the cache entries are held as
|
||||
symlinks. Rclone will use the handle of the underlying file as the NFS
|
||||
handle which improves performance. This sort of cache can't be backed
|
||||
up and restored as the underlying handles will change. This is Linux
|
||||
only.
|
||||
only. It requres running rclone as root or with |CAP_DAC_READ_SEARCH|.
|
||||
You can run rclone with this extra permission by doing this to the
|
||||
rclone binary |sudo setcap cap_dac_read_search+ep /path/to/rclone|.
|
||||
|
||||
|--nfs-cache-handle-limit| controls the maximum number of cached NFS
|
||||
handles stored by the caching handler. This should not be set too low
|
||||
|
||||
177
cmd/serve/nfs/symlink_cache_linux.go
Normal file
177
cmd/serve/nfs/symlink_cache_linux.go
Normal file
@@ -0,0 +1,177 @@
|
||||
//go:build unix && linux
|
||||
|
||||
/*
|
||||
This implements an efficient disk cache for the NFS file handles for
|
||||
Linux only.
|
||||
|
||||
1. The destination paths are stored as symlink destinations. These
|
||||
can be stored in the directory for maximum efficiency.
|
||||
|
||||
2. The on disk handle of the cache file is returned to NFS with
|
||||
name_to_handle_at(). This means that if the cache is deleted and
|
||||
restored, the file handle mapping will be lost.
|
||||
|
||||
3. These handles are looked up with open_by_handle_at() so no
|
||||
searching through directory trees is needed.
|
||||
|
||||
Note that open_by_handle_at requires CAP_DAC_READ_SEARCH so rclone
|
||||
will need to be run as root or with elevated permissions.
|
||||
|
||||
Test with
|
||||
|
||||
go test -c && sudo setcap cap_dac_read_search+ep ./nfs.test && ./nfs.test -test.v -test.run TestCache/symlink
|
||||
|
||||
*/
|
||||
|
||||
package nfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// emptyPath is written instead of "" as symlinks can't be empty
|
||||
var (
|
||||
emptyPath = "\x01"
|
||||
emptyPathBytes = []byte(emptyPath)
|
||||
)
|
||||
|
||||
// Turn the diskHandler into a symlink cache
|
||||
//
|
||||
// This also tests the cache works as it may not have enough
|
||||
// permissions or have be the correct Linux version.
|
||||
func (dh *diskHandler) makeSymlinkCache() error {
|
||||
path := filepath.Join(dh.cacheDir, "test")
|
||||
fullPath := "testpath"
|
||||
fh := []byte{1, 2, 3, 4, 5}
|
||||
|
||||
// Create a symlink
|
||||
newFh, err := dh.symlinkCacheWrite(fh, path, fullPath)
|
||||
fs.Debugf(nil, "newFh = %q", newFh)
|
||||
if err != nil {
|
||||
return fmt.Errorf("symlink cache write test failed: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = os.Remove(path)
|
||||
}()
|
||||
|
||||
// Read it back
|
||||
newFullPath, err := dh.symlinkCacheRead(newFh, path)
|
||||
fs.Debugf(nil, "newFullPath = %q", newFullPath)
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.EPERM) {
|
||||
return ErrorSymlinkCacheNoPermission
|
||||
}
|
||||
return fmt.Errorf("symlink cache read test failed: %w", err)
|
||||
}
|
||||
|
||||
// Check result all OK
|
||||
if string(newFullPath) != fullPath {
|
||||
return fmt.Errorf("symlink cache read test failed: expecting %q read %q", string(newFullPath), fullPath)
|
||||
}
|
||||
|
||||
// If OK install symlink cache
|
||||
dh.read = dh.symlinkCacheRead
|
||||
dh.write = dh.symlinkCacheWrite
|
||||
dh.remove = dh.symlinkCacheRemove
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write the fullPath into cachePath returning the possibly updated fh
|
||||
//
|
||||
// This writes the fullPath into the file with the cachePath given and
|
||||
// returns the handle for that file so we can look it up later.
|
||||
func (dh *diskHandler) symlinkCacheWrite(fh []byte, cachePath string, fullPath string) (newFh []byte, err error) {
|
||||
//defer log.Trace(nil, "fh=%x, cachePath=%q, fullPath=%q", fh, cachePath)("newFh=%x, err=%v", &newFh, &err)
|
||||
|
||||
// Can't write an empty symlink so write a substitution
|
||||
if fullPath == "" {
|
||||
fullPath = emptyPath
|
||||
}
|
||||
|
||||
// Write the symlink
|
||||
err = os.Symlink(fullPath, cachePath)
|
||||
if err != nil && !errors.Is(err, syscall.EEXIST) {
|
||||
return nil, fmt.Errorf("symlink cache create symlink: %w", err)
|
||||
}
|
||||
|
||||
// Read the newly created symlinks handle
|
||||
handle, _, err := unix.NameToHandleAt(unix.AT_FDCWD, cachePath, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("symlink cache name to handle at: %w", err)
|
||||
}
|
||||
|
||||
// Store the handle type if it hasn't changed
|
||||
// This should run once only when called by makeSymlinkCache
|
||||
if dh.handleType != handle.Type() {
|
||||
dh.handleType = handle.Type()
|
||||
}
|
||||
|
||||
return handle.Bytes(), nil
|
||||
}
|
||||
|
||||
// Read the contents of (fh, cachePath)
|
||||
//
|
||||
// This reads the symlink with the corresponding file handle and
|
||||
// returns the contents. It ignores the cachePath which will be
|
||||
// pointing in the wrong place.
|
||||
//
|
||||
// Note that the caller needs CAP_DAC_READ_SEARCH to use this.
|
||||
func (dh *diskHandler) symlinkCacheRead(fh []byte, cachePath string) (fullPath []byte, err error) {
|
||||
//defer log.Trace(nil, "fh=%x, cachePath=%q", fh, cachePath)("fullPath=%q, err=%v", &fullPath, &err)
|
||||
|
||||
// Find the file with the handle passed in
|
||||
handle := unix.NewFileHandle(dh.handleType, fh)
|
||||
fd, err := unix.OpenByHandleAt(unix.AT_FDCWD, handle, unix.O_RDONLY|unix.O_PATH|unix.O_NOFOLLOW) // needs O_PATH for symlinks
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("symlink cache open by handle at: %w", err)
|
||||
}
|
||||
|
||||
// Close it on exit
|
||||
defer func() {
|
||||
newErr := unix.Close(fd)
|
||||
if err != nil {
|
||||
err = newErr
|
||||
}
|
||||
}()
|
||||
|
||||
// Read the symlink which is the path required
|
||||
buf := make([]byte, 1024) // Max path length
|
||||
n, err := unix.Readlinkat(fd, "", buf) // It will (silently) truncate the contents, in case the buffer is too small to hold all of the contents.
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("symlink cache read: %w", err)
|
||||
}
|
||||
fullPath = buf[:n:n]
|
||||
|
||||
// Undo empty symlink substitution
|
||||
if bytes.Equal(fullPath, emptyPathBytes) {
|
||||
fullPath = buf[:0:0]
|
||||
}
|
||||
|
||||
return fullPath, nil
|
||||
}
|
||||
|
||||
// Remove the (fh, cachePath) file
|
||||
func (dh *diskHandler) symlinkCacheRemove(fh []byte, cachePath string) error {
|
||||
// First read the path
|
||||
fullPath, err := dh.symlinkCacheRead(fh, cachePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// fh for the actual cache file
|
||||
fh = hashPath(string(fullPath))
|
||||
|
||||
// cachePath for the actual cache file
|
||||
cachePath = dh.handleToPath(fh)
|
||||
|
||||
return os.Remove(cachePath)
|
||||
}
|
||||
8
cmd/serve/nfs/symlink_cache_other.go
Normal file
8
cmd/serve/nfs/symlink_cache_other.go
Normal file
@@ -0,0 +1,8 @@
|
||||
//go:build unix && !linux
|
||||
|
||||
package nfs
|
||||
|
||||
// Turn the diskHandler into a symlink cache
|
||||
func (dh *diskHandler) makeSymlinkCache() error {
|
||||
return ErrorSymlinkCacheNotSupported
|
||||
}
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/rclone/gofakes3"
|
||||
@@ -18,7 +19,6 @@ import (
|
||||
|
||||
var (
|
||||
emptyPrefix = &gofakes3.Prefix{}
|
||||
timeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
|
||||
)
|
||||
|
||||
// s3Backend implements the gofacess3.Backend interface to make an S3
|
||||
@@ -52,7 +52,7 @@ func (b *s3Backend) ListBuckets(ctx context.Context) ([]gofakes3.BucketInfo, err
|
||||
for _, entry := range dirEntries {
|
||||
if entry.IsDir() {
|
||||
response = append(response, gofakes3.BucketInfo{
|
||||
Name: gofakes3.URLEncode(entry.Name()),
|
||||
Name: entry.Name(),
|
||||
CreationDate: gofakes3.NewContentTime(entry.ModTime()),
|
||||
})
|
||||
}
|
||||
@@ -98,6 +98,13 @@ func (b *s3Backend) ListBucket(ctx context.Context, bucket string, prefix *gofak
|
||||
return b.pager(response, page)
|
||||
}
|
||||
|
||||
// formatHeaderTime makes an timestamp which is the same as that used by AWS.
|
||||
//
|
||||
// This is like RFC1123 always in UTC, but has GMT instead of UTC
|
||||
func formatHeaderTime(t time.Time) string {
|
||||
return t.UTC().Format("Mon, 02 Jan 2006 15:04:05") + " GMT"
|
||||
}
|
||||
|
||||
// HeadObject returns the fileinfo for the given object name.
|
||||
//
|
||||
// Note that the metadata is not supported yet.
|
||||
@@ -131,7 +138,7 @@ func (b *s3Backend) HeadObject(ctx context.Context, bucketName, objectName strin
|
||||
hash := getFileHashByte(fobj)
|
||||
|
||||
meta := map[string]string{
|
||||
"Last-Modified": node.ModTime().Format(timeFormat),
|
||||
"Last-Modified": formatHeaderTime(node.ModTime()),
|
||||
"Content-Type": fs.MimeType(context.Background(), fobj),
|
||||
}
|
||||
|
||||
@@ -208,7 +215,7 @@ func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string
|
||||
}
|
||||
|
||||
meta := map[string]string{
|
||||
"Last-Modified": node.ModTime().Format(timeFormat),
|
||||
"Last-Modified": formatHeaderTime(node.ModTime()),
|
||||
"Content-Type": fs.MimeType(context.Background(), fobj),
|
||||
}
|
||||
|
||||
@@ -220,7 +227,7 @@ func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string
|
||||
}
|
||||
|
||||
return &gofakes3.Object{
|
||||
Name: gofakes3.URLEncode(objectName),
|
||||
Name: objectName,
|
||||
Hash: hash,
|
||||
Metadata: meta,
|
||||
Size: size,
|
||||
|
||||
@@ -28,7 +28,7 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
|
||||
|
||||
if entry.IsDir() {
|
||||
if addPrefix {
|
||||
response.AddPrefix(gofakes3.URLEncode(objectPath))
|
||||
response.AddPrefix(objectPath)
|
||||
continue
|
||||
}
|
||||
err := b.entryListR(_vfs, bucket, path.Join(fdPath, object), "", false, response)
|
||||
@@ -37,7 +37,7 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
|
||||
}
|
||||
} else {
|
||||
item := &gofakes3.Content{
|
||||
Key: gofakes3.URLEncode(objectPath),
|
||||
Key: objectPath,
|
||||
LastModified: gofakes3.NewContentTime(entry.ModTime()),
|
||||
ETag: getFileHash(entry),
|
||||
Size: entry.Size(),
|
||||
|
||||
@@ -69,7 +69,7 @@ secret_access_key = SECRET_ACCESS_KEY
|
||||
use_multipart_uploads = false
|
||||
```
|
||||
|
||||
Note that setting `disable_multipart_uploads = true` is to work around
|
||||
Note that setting `use_multipart_uploads = false` is to work around
|
||||
[a bug](#bugs) which will be fixed in due course.
|
||||
|
||||
### Bugs
|
||||
|
||||
@@ -65,7 +65,7 @@ func (s *server) getVFS(what string, sshConn *ssh.ServerConn) (VFS *vfs.VFS) {
|
||||
if s.proxy == nil {
|
||||
return s.vfs
|
||||
}
|
||||
if sshConn.Permissions == nil && sshConn.Permissions.Extensions == nil {
|
||||
if sshConn.Permissions == nil || sshConn.Permissions.Extensions == nil {
|
||||
fs.Infof(what, "SSH Permissions Extensions not found")
|
||||
return nil
|
||||
}
|
||||
@@ -143,8 +143,13 @@ func (s *server) serve() (err error) {
|
||||
authKeysFile := env.ShellExpand(s.opt.AuthorizedKeys)
|
||||
authorizedKeysMap, err = loadAuthorizedKeys(authKeysFile)
|
||||
// If user set the flag away from the default then report an error
|
||||
if err != nil && s.opt.AuthorizedKeys != Opt.AuthorizedKeys {
|
||||
return err
|
||||
if s.opt.AuthorizedKeys != Opt.AuthorizedKeys {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(authorizedKeysMap) == 0 {
|
||||
return fmt.Errorf("failed to parse authorized keys")
|
||||
}
|
||||
}
|
||||
fs.Logf(nil, "Loaded %d authorized keys from %q", len(authorizedKeysMap), authKeysFile)
|
||||
}
|
||||
@@ -349,11 +354,10 @@ func loadAuthorizedKeys(authorizedKeysPath string) (authorizedKeysMap map[string
|
||||
authorizedKeysMap = make(map[string]struct{})
|
||||
for len(authorizedKeysBytes) > 0 {
|
||||
pubKey, _, _, rest, err := ssh.ParseAuthorizedKey(authorizedKeysBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse authorized keys: %w", err)
|
||||
if err == nil {
|
||||
authorizedKeysMap[string(pubKey.Marshal())] = struct{}{}
|
||||
authorizedKeysBytes = bytes.TrimSpace(rest)
|
||||
}
|
||||
authorizedKeysMap[string(pubKey.Marshal())] = struct{}{}
|
||||
authorizedKeysBytes = bytes.TrimSpace(rest)
|
||||
}
|
||||
return authorizedKeysMap, nil
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ package size
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
@@ -72,13 +71,13 @@ of the size command.
|
||||
count := strconv.FormatInt(results.Count, 10)
|
||||
countSuffix := fs.CountSuffix(results.Count).String()
|
||||
if count == countSuffix {
|
||||
fmt.Printf("Total objects: %s\n", count)
|
||||
operations.SyncPrintf("Total objects: %s\n", count)
|
||||
} else {
|
||||
fmt.Printf("Total objects: %s (%s)\n", countSuffix, count)
|
||||
operations.SyncPrintf("Total objects: %s (%s)\n", countSuffix, count)
|
||||
}
|
||||
fmt.Printf("Total size: %s (%d Byte)\n", fs.SizeSuffix(results.Bytes).ByteUnit(), results.Bytes)
|
||||
operations.SyncPrintf("Total size: %s (%d Byte)\n", fs.SizeSuffix(results.Bytes).ByteUnit(), results.Bytes)
|
||||
if results.Sizeless > 0 {
|
||||
fmt.Printf("Total objects with unknown size: %s (%d)\n", fs.CountSuffix(results.Sizeless), results.Sizeless)
|
||||
operations.SyncPrintf("Total objects with unknown size: %s (%d)\n", fs.CountSuffix(results.Sizeless), results.Sizeless)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
@@ -30,6 +30,7 @@ var (
|
||||
maxFileSize = fs.SizeSuffix(100)
|
||||
minFileNameLength = 4
|
||||
maxFileNameLength = 12
|
||||
flat = false
|
||||
seed = int64(1)
|
||||
zero = false
|
||||
sparse = false
|
||||
@@ -55,6 +56,7 @@ func init() {
|
||||
flags.FVarP(makefilesFlags, &maxFileSize, "max-file-size", "", "Maximum size of files to create", "")
|
||||
flags.IntVarP(makefilesFlags, &minFileNameLength, "min-name-length", "", minFileNameLength, "Minimum size of file names", "")
|
||||
flags.IntVarP(makefilesFlags, &maxFileNameLength, "max-name-length", "", maxFileNameLength, "Maximum size of file names", "")
|
||||
flags.BoolVarP(makefilesFlags, &flat, "flat", "", false, "If set create all files in the root directory", "")
|
||||
|
||||
test.Command.AddCommand(makefileCmd)
|
||||
makefileFlags := makefileCmd.Flags()
|
||||
@@ -81,6 +83,9 @@ var makefilesCmd = &cobra.Command{
|
||||
commonInit()
|
||||
outputDirectory := args[0]
|
||||
directoriesToCreate = numberOfFiles / averageFilesPerDirectory
|
||||
if flat {
|
||||
directoriesToCreate = 0
|
||||
}
|
||||
averageSize := (minFileSize + maxFileSize) / 2
|
||||
start := time.Now()
|
||||
fs.Logf(nil, "Creating %d files of average size %v in %d directories in %q.", numberOfFiles, averageSize, directoriesToCreate, outputDirectory)
|
||||
|
||||
@@ -3,10 +3,13 @@ package version
|
||||
|
||||
import (
|
||||
"context"
|
||||
"debug/buildinfo"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -20,12 +23,14 @@ import (
|
||||
|
||||
var (
|
||||
check = false
|
||||
deps = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &check, "check", "", false, "Check for new version", "")
|
||||
flags.BoolVarP(cmdFlags, &deps, "deps", "", false, "Show the Go dependencies", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -67,18 +72,25 @@ Or
|
||||
beta: 1.42.0.5 (released 2018-06-17)
|
||||
upgrade: https://beta.rclone.org/v1.42-005-g56e1e820
|
||||
|
||||
If you supply the --deps flag then rclone will print a list of all the
|
||||
packages it depends on and their versions along with some other
|
||||
information about the build.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
if deps {
|
||||
return printDependencies()
|
||||
}
|
||||
if check {
|
||||
CheckVersion(ctx)
|
||||
} else {
|
||||
cmd.ShowVersion()
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -151,3 +163,36 @@ func CheckVersion(ctx context.Context) {
|
||||
fmt.Println("Your version is compiled from git so comparisons may be wrong.")
|
||||
}
|
||||
}
|
||||
|
||||
// Print info about a build module
|
||||
func printModule(module *debug.Module) {
|
||||
if module.Replace != nil {
|
||||
fmt.Printf("- %s %s (replaced by %s %s)\n",
|
||||
module.Path, module.Version, module.Replace.Path, module.Replace.Version)
|
||||
} else {
|
||||
fmt.Printf("- %s %s\n", module.Path, module.Version)
|
||||
}
|
||||
}
|
||||
|
||||
// printDependencies shows the packages we use in a format like go.mod
|
||||
func printDependencies() error {
|
||||
info, err := buildinfo.ReadFile(os.Args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading build info: %w", err)
|
||||
}
|
||||
fmt.Println("Go Version:")
|
||||
fmt.Printf("- %s\n", info.GoVersion)
|
||||
fmt.Println("Main package:")
|
||||
printModule(&info.Main)
|
||||
fmt.Println("Binary path:")
|
||||
fmt.Printf("- %s\n", info.Path)
|
||||
fmt.Println("Settings:")
|
||||
for _, setting := range info.Settings {
|
||||
fmt.Printf("- %s: %s\n", setting.Key, setting.Value)
|
||||
}
|
||||
fmt.Println("Dependencies:")
|
||||
for _, dep := range info.Deps {
|
||||
printModule(dep)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestCmdTest demonstrates and verifies the test functions for end-to-end testing of rclone
|
||||
// TestEnvironmentVariables demonstrates and verifies the test functions for end-to-end testing of rclone
|
||||
func TestEnvironmentVariables(t *testing.T) {
|
||||
|
||||
createTestEnvironment(t)
|
||||
|
||||
@@ -66,12 +66,13 @@ so it is easy to tweak stuff.
|
||||
└── static - static content for the website
|
||||
├── css
|
||||
│ ├── bootstrap.css
|
||||
│ ├── custom.css - custom css goes here
|
||||
│ └── font-awesome.css
|
||||
│ └── custom.css - custom css goes here
|
||||
├── fontawesome
|
||||
│ ├── css
|
||||
│ └── webfonts
|
||||
├── img - images used
|
||||
├── js
|
||||
│ ├── bootstrap.js
|
||||
│ ├── custom.js - custom javascript goes here
|
||||
│ └── jquery.js
|
||||
└── webfonts
|
||||
```
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user