mirror of
https://github.com/rclone/rclone.git
synced 2026-01-30 16:24:01 +00:00
Compare commits
1 Commits
master
...
fix-9115-f
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8216d66d18 |
@@ -63,7 +63,6 @@ directories to and from different cloud storage providers.
|
||||
- iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
|
||||
- ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
|
||||
- Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
- Internxt [:page_facing_up:](https://rclone.org/internxt/)
|
||||
- Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
- IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
- Intercolo Object Storage [:page_facing_up:](https://rclone.org/s3/#intercolo)
|
||||
|
||||
@@ -35,7 +35,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/iclouddrive"
|
||||
_ "github.com/rclone/rclone/backend/imagekit"
|
||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
||||
_ "github.com/rclone/rclone/backend/internxt"
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
_ "github.com/rclone/rclone/backend/koofr"
|
||||
_ "github.com/rclone/rclone/backend/linkbox"
|
||||
|
||||
@@ -733,17 +733,6 @@ two accounts.
|
||||
Advanced: true,
|
||||
Default: rwOff,
|
||||
Examples: rwExamples,
|
||||
}, {
|
||||
Name: "metadata_enforce_expansive_access",
|
||||
Help: `Whether the request should enforce expansive access rules.
|
||||
|
||||
From Feb 2026 this flag will be set by default so this flag can be used for
|
||||
testing before then.
|
||||
|
||||
See: https://developers.google.com/workspace/drive/api/guides/limited-expansive-access
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -823,7 +812,6 @@ type Options struct {
|
||||
MetadataOwner rwChoice `config:"metadata_owner"`
|
||||
MetadataPermissions rwChoice `config:"metadata_permissions"`
|
||||
MetadataLabels rwChoice `config:"metadata_labels"`
|
||||
EnforceExpansiveAccess bool `config:"metadata_enforce_expansive_access"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
}
|
||||
@@ -3104,7 +3092,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
_, err = f.svc.Permissions.Create(id, permission).
|
||||
Fields("").
|
||||
SupportsAllDrives(true).
|
||||
EnforceExpansiveAccess(f.opt.EnforceExpansiveAccess).
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
@@ -149,7 +149,6 @@ func (f *Fs) setPermissions(ctx context.Context, info *drive.File, permissions [
|
||||
_, err := f.svc.Permissions.Create(info.Id, perm).
|
||||
SupportsAllDrives(true).
|
||||
SendNotificationEmail(false).
|
||||
EnforceExpansiveAccess(f.opt.EnforceExpansiveAccess).
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -484,7 +483,6 @@ func (f *Fs) setOwner(ctx context.Context, info *drive.File, owner string) (err
|
||||
SupportsAllDrives(true).
|
||||
TransferOwnership(true).
|
||||
// SendNotificationEmail(false). - required apparently!
|
||||
EnforceExpansiveAccess(f.opt.EnforceExpansiveAccess).
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
@@ -1,146 +0,0 @@
|
||||
// Package internxt provides authentication handling
|
||||
package internxt
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang-jwt/jwt/v5"
|
||||
internxtauth "github.com/internxt/rclone-adapter/auth"
|
||||
internxtconfig "github.com/internxt/rclone-adapter/config"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
type userInfo struct {
|
||||
RootFolderID string
|
||||
Bucket string
|
||||
BridgeUser string
|
||||
UserID string
|
||||
}
|
||||
|
||||
type userInfoConfig struct {
|
||||
Token string
|
||||
}
|
||||
|
||||
// getUserInfo fetches user metadata from the refresh endpoint
|
||||
func getUserInfo(ctx context.Context, cfg *userInfoConfig) (*userInfo, error) {
|
||||
// Call the refresh endpoint to get all user metadata
|
||||
refreshCfg := internxtconfig.NewDefaultToken(cfg.Token)
|
||||
resp, err := internxtauth.RefreshToken(ctx, refreshCfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch user info: %w", err)
|
||||
}
|
||||
|
||||
if resp.User.Bucket == "" {
|
||||
return nil, errors.New("API response missing user.bucket")
|
||||
}
|
||||
if resp.User.RootFolderID == "" {
|
||||
return nil, errors.New("API response missing user.rootFolderId")
|
||||
}
|
||||
if resp.User.BridgeUser == "" {
|
||||
return nil, errors.New("API response missing user.bridgeUser")
|
||||
}
|
||||
if resp.User.UserID == "" {
|
||||
return nil, errors.New("API response missing user.userId")
|
||||
}
|
||||
|
||||
info := &userInfo{
|
||||
RootFolderID: resp.User.RootFolderID,
|
||||
Bucket: resp.User.Bucket,
|
||||
BridgeUser: resp.User.BridgeUser,
|
||||
UserID: resp.User.UserID,
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "User info: rootFolderId=%s, bucket=%s",
|
||||
info.RootFolderID, info.Bucket)
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// parseJWTExpiry extracts the expiry time from a JWT token string
|
||||
func parseJWTExpiry(tokenString string) (time.Time, error) {
|
||||
parser := jwt.NewParser(jwt.WithoutClaimsValidation())
|
||||
token, _, err := parser.ParseUnverified(tokenString, jwt.MapClaims{})
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("failed to parse token: %w", err)
|
||||
}
|
||||
|
||||
claims, ok := token.Claims.(jwt.MapClaims)
|
||||
if !ok {
|
||||
return time.Time{}, errors.New("invalid token claims")
|
||||
}
|
||||
|
||||
exp, ok := claims["exp"].(float64)
|
||||
if !ok {
|
||||
return time.Time{}, errors.New("token missing expiration")
|
||||
}
|
||||
|
||||
return time.Unix(int64(exp), 0), nil
|
||||
}
|
||||
|
||||
// jwtToOAuth2Token converts a JWT string to an oauth2.Token with expiry
|
||||
func jwtToOAuth2Token(jwtString string) (*oauth2.Token, error) {
|
||||
expiry, err := parseJWTExpiry(jwtString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &oauth2.Token{
|
||||
AccessToken: jwtString,
|
||||
TokenType: "Bearer",
|
||||
Expiry: expiry,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// computeBasicAuthHeader creates the BasicAuthHeader for bucket operations
|
||||
// Following the pattern from SDK's auth/access.go:96-102
|
||||
func computeBasicAuthHeader(bridgeUser, userID string) string {
|
||||
sum := sha256.Sum256([]byte(userID))
|
||||
hexPass := hex.EncodeToString(sum[:])
|
||||
creds := fmt.Sprintf("%s:%s", bridgeUser, hexPass)
|
||||
return "Basic " + base64.StdEncoding.EncodeToString([]byte(creds))
|
||||
}
|
||||
|
||||
// refreshJWTToken refreshes the token using Internxt's refresh endpoint
|
||||
func refreshJWTToken(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
currentToken, err := oauthutil.GetToken(name, m)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get current token: %w", err)
|
||||
}
|
||||
|
||||
cfg := internxtconfig.NewDefaultToken(currentToken.AccessToken)
|
||||
resp, err := internxtauth.RefreshToken(ctx, cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("refresh request failed: %w", err)
|
||||
}
|
||||
|
||||
if resp.NewToken == "" {
|
||||
return errors.New("refresh response missing newToken")
|
||||
}
|
||||
|
||||
// Convert JWT to oauth2.Token format
|
||||
token, err := jwtToOAuth2Token(resp.NewToken)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse refreshed token: %w", err)
|
||||
}
|
||||
|
||||
err = oauthutil.PutToken(name, m, token, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to save token: %w", err)
|
||||
}
|
||||
|
||||
if resp.User.Bucket != "" {
|
||||
m.Set("bucket", resp.User.Bucket)
|
||||
}
|
||||
|
||||
fs.Debugf(name, "Token refreshed successfully, new expiry: %v", token.Expiry)
|
||||
return nil
|
||||
}
|
||||
@@ -1,988 +0,0 @@
|
||||
// Package internxt provides an interface to Internxt's Drive API
|
||||
package internxt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/internxt/rclone-adapter/auth"
|
||||
"github.com/internxt/rclone-adapter/buckets"
|
||||
config "github.com/internxt/rclone-adapter/config"
|
||||
sdkerrors "github.com/internxt/rclone-adapter/errors"
|
||||
"github.com/internxt/rclone-adapter/files"
|
||||
"github.com/internxt/rclone-adapter/folders"
|
||||
"github.com/internxt/rclone-adapter/users"
|
||||
"github.com/rclone/rclone/fs"
|
||||
rclone_config "github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
)
|
||||
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
// shouldRetry determines if an error should be retried
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
var httpErr *sdkerrors.HTTPError
|
||||
if errors.As(err, &httpErr) && httpErr.StatusCode() == 401 {
|
||||
return true, err
|
||||
}
|
||||
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "internxt",
|
||||
Description: "Internxt Drive",
|
||||
NewFs: NewFs,
|
||||
Config: Config,
|
||||
Options: []fs.Option{{
|
||||
Name: "email",
|
||||
Help: "Email of your Internxt account.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "Password.",
|
||||
Required: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "mnemonic",
|
||||
Help: "Mnemonic (internal use only)",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
Hide: fs.OptionHideBoth,
|
||||
}, {
|
||||
Name: "skip_hash_validation",
|
||||
Default: true,
|
||||
Advanced: true,
|
||||
Help: "Skip hash validation when downloading files.\n\nBy default, hash validation is disabled. Set this to false to enable validation.",
|
||||
}, {
|
||||
Name: rclone_config.ConfigEncoding,
|
||||
Help: rclone_config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encoder.EncodeInvalidUtf8 |
|
||||
encoder.EncodeSlash |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeRightPeriod |
|
||||
encoder.EncodeDot |
|
||||
encoder.EncodeCrLf,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Config configures the Internxt remote by performing login
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, configIn fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
email, _ := m.Get("email")
|
||||
if email == "" {
|
||||
return nil, errors.New("email is required")
|
||||
}
|
||||
|
||||
pass, _ := m.Get("pass")
|
||||
if pass != "" {
|
||||
var err error
|
||||
pass, err = obscure.Reveal(pass)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't decrypt password: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
cfg := config.NewDefaultToken("")
|
||||
|
||||
switch configIn.State {
|
||||
case "":
|
||||
// Check if 2FA is required
|
||||
loginResp, err := auth.Login(ctx, cfg, email)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check login requirements: %w", err)
|
||||
}
|
||||
|
||||
if loginResp.TFA {
|
||||
return fs.ConfigInput("2fa", "config_2fa", "Two-factor authentication code")
|
||||
}
|
||||
|
||||
// No 2FA required, do login directly
|
||||
return fs.ConfigGoto("login")
|
||||
|
||||
case "2fa":
|
||||
twoFA := configIn.Result
|
||||
if twoFA == "" {
|
||||
return fs.ConfigError("", "2FA code is required")
|
||||
}
|
||||
m.Set("2fa_code", twoFA)
|
||||
return fs.ConfigGoto("login")
|
||||
|
||||
case "login":
|
||||
twoFA, _ := m.Get("2fa_code")
|
||||
|
||||
loginResp, err := auth.DoLogin(ctx, cfg, email, pass, twoFA)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("login failed: %w", err)
|
||||
}
|
||||
|
||||
// Store mnemonic (obscured)
|
||||
m.Set("mnemonic", obscure.MustObscure(loginResp.User.Mnemonic))
|
||||
|
||||
// Store token
|
||||
oauthToken, err := jwtToOAuth2Token(loginResp.NewToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse token: %w", err)
|
||||
}
|
||||
err = oauthutil.PutToken(name, m, oauthToken, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to save token: %w", err)
|
||||
}
|
||||
|
||||
// Clear temporary 2FA code
|
||||
m.Set("2fa_code", "")
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unknown state %q", configIn.State)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Email string `config:"email"`
|
||||
Pass string `config:"pass"`
|
||||
TwoFA string `config:"2fa"`
|
||||
Mnemonic string `config:"mnemonic"`
|
||||
SkipHashValidation bool `config:"skip_hash_validation"`
|
||||
Encoding encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents an Internxt remote
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
opt Options
|
||||
dirCache *dircache.DirCache
|
||||
cfg *config.Config
|
||||
features *fs.Features
|
||||
pacer *fs.Pacer
|
||||
tokenRenewer *oauthutil.Renew
|
||||
bridgeUser string
|
||||
userID string
|
||||
}
|
||||
|
||||
// Object holds the data for a remote file object
|
||||
type Object struct {
|
||||
f *Fs
|
||||
remote string
|
||||
id string
|
||||
uuid string
|
||||
size int64
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string { return f.name }
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string { return f.root }
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string { return fmt.Sprintf("Internxt root '%s'", f.root) }
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Hashes returns type of hashes supported by Internxt
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.NewHashSet()
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt := new(Options)
|
||||
if err := configstruct.Set(m, opt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opt.Mnemonic == "" {
|
||||
return nil, errors.New("mnemonic is required - please run: rclone config reconnect " + name + ":")
|
||||
}
|
||||
|
||||
var err error
|
||||
opt.Mnemonic, err = obscure.Reveal(opt.Mnemonic)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't decrypt mnemonic: %w", err)
|
||||
}
|
||||
|
||||
oauthToken, err := oauthutil.GetToken(name, m)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get token - please run: rclone config reconnect %s: - %w", name, err)
|
||||
}
|
||||
|
||||
oauthConfig := &oauthutil.Config{
|
||||
TokenURL: "https://gateway.internxt.com/drive/users/refresh",
|
||||
}
|
||||
|
||||
_, ts, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create oauth client: %w", err)
|
||||
}
|
||||
|
||||
cfg := config.NewDefaultToken(oauthToken.AccessToken)
|
||||
cfg.Mnemonic = opt.Mnemonic
|
||||
cfg.SkipHashValidation = opt.SkipHashValidation
|
||||
|
||||
userInfo, err := getUserInfo(ctx, &userInfoConfig{Token: cfg.Token})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch user info: %w", err)
|
||||
}
|
||||
|
||||
cfg.RootFolderID = userInfo.RootFolderID
|
||||
cfg.Bucket = userInfo.Bucket
|
||||
cfg.BasicAuthHeader = computeBasicAuthHeader(userInfo.BridgeUser, userInfo.UserID)
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: strings.Trim(root, "/"),
|
||||
opt: *opt,
|
||||
cfg: cfg,
|
||||
bridgeUser: userInfo.BridgeUser,
|
||||
userID: userInfo.UserID,
|
||||
}
|
||||
|
||||
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
if ts != nil {
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
err := refreshJWTToken(ctx, name, m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newToken, err := oauthutil.GetToken(name, m)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get refreshed token: %w", err)
|
||||
}
|
||||
f.cfg.Token = newToken.AccessToken
|
||||
f.cfg.BasicAuthHeader = computeBasicAuthHeader(f.bridgeUser, f.userID)
|
||||
|
||||
return nil
|
||||
})
|
||||
f.tokenRenewer.Start()
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(f.root, cfg.RootFolderID, f)
|
||||
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
// Assume it might be a file
|
||||
newRoot, remote := dircache.SplitPath(f.root)
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, f.cfg.RootFolderID, &tempF)
|
||||
tempF.root = newRoot
|
||||
|
||||
err = tempF.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
_, err := tempF.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Mkdir creates a new directory
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
id, err := f.dirCache.FindDir(ctx, dir, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.dirCache.Put(dir, id)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir removes a directory
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
root := path.Join(f.root, dir)
|
||||
if root == "" {
|
||||
return errors.New("cannot remove root directory")
|
||||
}
|
||||
|
||||
id, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
// Check if directory is empty
|
||||
var childFolders []folders.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
childFolders, err = folders.ListAllFolders(ctx, f.cfg, id)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(childFolders) > 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
|
||||
var childFiles []folders.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
childFiles, err = folders.ListAllFiles(ctx, f.cfg, id)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(childFiles) > 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
|
||||
// Delete the directory
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err := folders.DeleteFolder(ctx, f.cfg, id)
|
||||
if err != nil && strings.Contains(err.Error(), "404") {
|
||||
return false, fs.ErrorDirNotFound
|
||||
}
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.dirCache.FlushDir(dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
// FindLeaf looks for a sub‑folder named `leaf` under the Internxt folder `pathID`.
|
||||
// If found, it returns its UUID and true. If not found, returns "", false.
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (string, bool, error) {
|
||||
var entries []folders.Folder
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
entries, err = folders.ListAllFolders(ctx, f.cfg, pathID)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
for _, e := range entries {
|
||||
if f.opt.Encoding.ToStandardName(e.PlainName) == leaf {
|
||||
return e.UUID, true, nil
|
||||
}
|
||||
}
|
||||
return "", false, nil
|
||||
}
|
||||
|
||||
// CreateDir creates a new directory
|
||||
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (string, error) {
|
||||
request := folders.CreateFolderRequest{
|
||||
PlainName: f.opt.Encoding.FromStandardName(leaf),
|
||||
ParentFolderUUID: pathID,
|
||||
ModificationTime: time.Now().UTC().Format(time.RFC3339),
|
||||
}
|
||||
|
||||
var resp *folders.Folder
|
||||
err := f.pacer.CallNoRetry(func() (bool, error) {
|
||||
var err error
|
||||
resp, err = folders.CreateFolder(ctx, f.cfg, request)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
// If folder already exists (409 conflict), try to find it
|
||||
if strings.Contains(err.Error(), "409") || strings.Contains(err.Error(), "Conflict") {
|
||||
existingID, found, findErr := f.FindLeaf(ctx, pathID, leaf)
|
||||
if findErr == nil && found {
|
||||
fs.Debugf(f, "Folder %q already exists in %q, using existing UUID: %s", leaf, pathID, existingID)
|
||||
return existingID, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("can't create folder, %w", err)
|
||||
}
|
||||
|
||||
return resp.UUID, nil
|
||||
}
|
||||
|
||||
// preUploadCheck checks if a file exists in the given directory
|
||||
// Returns the file metadata if it exists, nil if not
|
||||
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string) (*folders.File, error) {
|
||||
// Parse name and extension from the leaf
|
||||
baseName := f.opt.Encoding.FromStandardName(leaf)
|
||||
name := strings.TrimSuffix(baseName, filepath.Ext(baseName))
|
||||
ext := strings.TrimPrefix(filepath.Ext(baseName), ".")
|
||||
|
||||
checkResult, err := files.CheckFilesExistence(ctx, f.cfg, directoryID, []files.FileExistenceCheck{
|
||||
{
|
||||
PlainName: name,
|
||||
Type: ext,
|
||||
OriginalFile: struct{}{},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
// If existence check fails, assume file doesn't exist to allow upload to proceed
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if len(checkResult.Files) > 0 && checkResult.Files[0].FileExists() {
|
||||
result := checkResult.Files[0]
|
||||
if result.Type != ext {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
existingUUID := result.UUID
|
||||
if existingUUID != "" {
|
||||
fileMeta, err := files.GetFileMeta(ctx, f.cfg, existingUUID)
|
||||
if err == nil && fileMeta != nil {
|
||||
return convertFileMetaToFile(fileMeta), nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// convertFileMetaToFile converts files.FileMeta to folders.File
|
||||
func convertFileMetaToFile(meta *files.FileMeta) *folders.File {
|
||||
// FileMeta and folders.File have compatible structures
|
||||
return &folders.File{
|
||||
ID: meta.ID,
|
||||
UUID: meta.UUID,
|
||||
FileID: meta.FileID,
|
||||
PlainName: meta.PlainName,
|
||||
Type: meta.Type,
|
||||
Size: meta.Size,
|
||||
Bucket: meta.Bucket,
|
||||
FolderUUID: meta.FolderUUID,
|
||||
EncryptVersion: meta.EncryptVersion,
|
||||
ModificationTime: meta.ModificationTime,
|
||||
}
|
||||
}
|
||||
|
||||
// List lists a directory
|
||||
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||
dirID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var out fs.DirEntries
|
||||
|
||||
var foldersList []folders.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
foldersList, err = folders.ListAllFolders(ctx, f.cfg, dirID)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, e := range foldersList {
|
||||
remote := filepath.Join(dir, f.opt.Encoding.ToStandardName(e.PlainName))
|
||||
out = append(out, fs.NewDir(remote, e.ModificationTime))
|
||||
}
|
||||
var filesList []folders.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
filesList, err = folders.ListAllFiles(ctx, f.cfg, dirID)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, e := range filesList {
|
||||
remote := e.PlainName
|
||||
if len(e.Type) > 0 {
|
||||
remote += "." + e.Type
|
||||
}
|
||||
remote = filepath.Join(dir, f.opt.Encoding.ToStandardName(remote))
|
||||
out = append(out, newObjectWithFile(f, remote, &e))
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Put uploads a file
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
remote := src.Remote()
|
||||
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
o := &Object{
|
||||
f: f,
|
||||
remote: remote,
|
||||
size: src.Size(),
|
||||
modTime: src.ModTime(ctx),
|
||||
}
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check if file already exists
|
||||
existingFile, err := f.preUploadCheck(ctx, leaf, directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create object - if file exists, populate it with existing metadata
|
||||
o := &Object{
|
||||
f: f,
|
||||
remote: remote,
|
||||
size: src.Size(),
|
||||
modTime: src.ModTime(ctx),
|
||||
}
|
||||
|
||||
if existingFile != nil {
|
||||
// File exists - populate object with existing metadata
|
||||
size, _ := existingFile.Size.Int64()
|
||||
o.id = existingFile.FileID
|
||||
o.uuid = existingFile.UUID
|
||||
o.size = size
|
||||
o.modTime = existingFile.ModificationTime
|
||||
}
|
||||
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Remove removes an object
|
||||
func (f *Fs) Remove(ctx context.Context, remote string) error {
|
||||
obj, err := f.NewObject(ctx, remote)
|
||||
if err == nil {
|
||||
if err := obj.Remove(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
parent := path.Dir(remote)
|
||||
f.dirCache.FlushDir(parent)
|
||||
return nil
|
||||
}
|
||||
|
||||
dirID, err := f.dirCache.FindDir(ctx, remote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err := folders.DeleteFolder(ctx, f.cfg, dirID)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.dirCache.FlushDir(remote)
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewObject creates a new object
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
parentDir := filepath.Dir(remote)
|
||||
|
||||
if parentDir == "." {
|
||||
parentDir = ""
|
||||
}
|
||||
|
||||
dirID, err := f.dirCache.FindDir(ctx, parentDir, false)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
var files []folders.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
files, err = folders.ListAllFiles(ctx, f.cfg, dirID)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
targetName := filepath.Base(remote)
|
||||
for _, e := range files {
|
||||
name := e.PlainName
|
||||
if len(e.Type) > 0 {
|
||||
name += "." + e.Type
|
||||
}
|
||||
decodedName := f.opt.Encoding.ToStandardName(name)
|
||||
if decodedName == targetName {
|
||||
return newObjectWithFile(f, remote, &e), nil
|
||||
}
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// newObjectWithFile returns a new object by file info
|
||||
func newObjectWithFile(f *Fs, remote string, file *folders.File) fs.Object {
|
||||
size, _ := file.Size.Int64()
|
||||
return &Object{
|
||||
f: f,
|
||||
remote: remote,
|
||||
id: file.FileID,
|
||||
uuid: file.UUID,
|
||||
size: size,
|
||||
modTime: file.ModificationTime,
|
||||
}
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.f
|
||||
}
|
||||
|
||||
// String returns the remote path
|
||||
func (o *Object) String() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Size is the file length
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// ModTime is the last modified time (read-only)
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// Hash returns the hash value (not implemented)
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Storable returns if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime sets the modified time
|
||||
func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var internxtLimit *users.LimitResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
internxtLimit, err = users.GetLimit(ctx, f.cfg)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var internxtUsage *users.UsageResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
internxtUsage, err = users.GetUsage(ctx, f.cfg)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
usage := &fs.Usage{
|
||||
Used: fs.NewUsageValue(internxtUsage.Drive),
|
||||
}
|
||||
|
||||
usage.Total = fs.NewUsageValue(internxtLimit.MaxSpaceBytes)
|
||||
usage.Free = fs.NewUsageValue(*usage.Total - *usage.Used)
|
||||
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any cached
|
||||
// connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
buckets.WaitForPendingThumbnails()
|
||||
|
||||
if f.tokenRenewer != nil {
|
||||
f.tokenRenewer.Shutdown()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open opens a file for streaming
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
fs.FixRangeOption(options, o.size)
|
||||
rangeValue := ""
|
||||
for _, option := range options {
|
||||
switch option.(type) {
|
||||
case *fs.RangeOption, *fs.SeekOption:
|
||||
_, rangeValue = option.Header()
|
||||
}
|
||||
}
|
||||
|
||||
if o.size == 0 {
|
||||
return io.NopCloser(bytes.NewReader(nil)), nil
|
||||
}
|
||||
|
||||
var stream io.ReadCloser
|
||||
err := o.f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
stream, err = buckets.DownloadFileStream(ctx, o.f.cfg, o.id, rangeValue)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
// Update updates an existing file or creates a new one
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
remote := o.remote
|
||||
|
||||
origBaseName := filepath.Base(remote)
|
||||
origName := strings.TrimSuffix(origBaseName, filepath.Ext(origBaseName))
|
||||
origType := strings.TrimPrefix(filepath.Ext(origBaseName), ".")
|
||||
|
||||
// Create directory if it doesn't exist
|
||||
_, dirID, err := o.f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// rename based rollback pattern
|
||||
// old file is preserved until new upload succeeds
|
||||
|
||||
var backupUUID string
|
||||
var backupName, backupType string
|
||||
oldUUID := o.uuid
|
||||
|
||||
// Step 1: If file exists, rename to backup (preserves old file during upload)
|
||||
if oldUUID != "" {
|
||||
// Generate unique backup name
|
||||
baseName := filepath.Base(remote)
|
||||
name := strings.TrimSuffix(baseName, filepath.Ext(baseName))
|
||||
ext := strings.TrimPrefix(filepath.Ext(baseName), ".")
|
||||
|
||||
backupSuffix := fmt.Sprintf(".rclone-backup-%s", random.String(8))
|
||||
backupName = o.f.opt.Encoding.FromStandardName(name + backupSuffix)
|
||||
backupType = ext
|
||||
|
||||
// Rename existing file to backup name
|
||||
err = o.f.pacer.Call(func() (bool, error) {
|
||||
err := files.RenameFile(ctx, o.f.cfg, oldUUID, backupName, backupType)
|
||||
if err != nil {
|
||||
// Handle 409 Conflict: Treat as success.
|
||||
var httpErr *sdkerrors.HTTPError
|
||||
if errors.As(err, &httpErr) && httpErr.StatusCode() == 409 {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to rename existing file to backup: %w", err)
|
||||
}
|
||||
backupUUID = oldUUID
|
||||
|
||||
fs.Debugf(o.f, "Renamed existing file %s to backup %s.%s (UUID: %s)", remote, backupName, backupType, backupUUID)
|
||||
}
|
||||
|
||||
var meta *buckets.CreateMetaResponse
|
||||
err = o.f.pacer.CallNoRetry(func() (bool, error) {
|
||||
var err error
|
||||
meta, err = buckets.UploadFileStreamAuto(ctx,
|
||||
o.f.cfg,
|
||||
dirID,
|
||||
o.f.opt.Encoding.FromStandardName(filepath.Base(remote)),
|
||||
in,
|
||||
src.Size(),
|
||||
src.ModTime(ctx),
|
||||
)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
if err != nil && isEmptyFileLimitError(err) {
|
||||
o.restoreBackupFile(ctx, backupUUID, origName, origType)
|
||||
return fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
meta, err = o.recoverFromTimeoutConflict(ctx, err, remote, dirID)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
o.restoreBackupFile(ctx, backupUUID, origName, origType)
|
||||
return err
|
||||
}
|
||||
|
||||
// Update object metadata
|
||||
o.uuid = meta.UUID
|
||||
o.id = meta.FileID
|
||||
o.size = src.Size()
|
||||
o.remote = remote
|
||||
|
||||
// Step 3: Upload succeeded - delete the backup file
|
||||
if backupUUID != "" {
|
||||
fs.Debugf(o.f, "Upload succeeded, deleting backup file %s.%s (UUID: %s)", backupName, backupType, backupUUID)
|
||||
err := o.f.pacer.Call(func() (bool, error) {
|
||||
err := files.DeleteFile(ctx, o.f.cfg, backupUUID)
|
||||
if err != nil {
|
||||
var httpErr *sdkerrors.HTTPError
|
||||
if errors.As(err, &httpErr) {
|
||||
// Treat 404 (Not Found) and 204 (No Content) as success
|
||||
switch httpErr.StatusCode() {
|
||||
case 404, 204:
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(o.f, "Failed to delete backup file %s.%s (UUID: %s): %v. This may leave an orphaned backup file.",
|
||||
backupName, backupType, backupUUID, err)
|
||||
// Don't fail the upload just because backup deletion failed
|
||||
} else {
|
||||
fs.Debugf(o.f, "Successfully deleted backup file")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isTimeoutError checks if an error is a timeout using proper error type checking
|
||||
func isTimeoutError(err error) bool {
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return true
|
||||
}
|
||||
var netErr net.Error
|
||||
if errors.As(err, &netErr) && netErr.Timeout() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isConflictError checks if an error indicates a file conflict (409)
|
||||
func isConflictError(err error) bool {
|
||||
errMsg := err.Error()
|
||||
return strings.Contains(errMsg, "409") ||
|
||||
strings.Contains(errMsg, "Conflict") ||
|
||||
strings.Contains(errMsg, "already exists")
|
||||
}
|
||||
|
||||
func isEmptyFileLimitError(err error) bool {
|
||||
errMsg := strings.ToLower(err.Error())
|
||||
return strings.Contains(errMsg, "can not have more empty files") ||
|
||||
strings.Contains(errMsg, "cannot have more empty files") ||
|
||||
strings.Contains(errMsg, "you can not have empty files")
|
||||
}
|
||||
|
||||
// recoverFromTimeoutConflict attempts to recover from a timeout or conflict error
|
||||
func (o *Object) recoverFromTimeoutConflict(ctx context.Context, uploadErr error, remote, dirID string) (*buckets.CreateMetaResponse, error) {
|
||||
if !isTimeoutError(uploadErr) && !isConflictError(uploadErr) {
|
||||
return nil, uploadErr
|
||||
}
|
||||
|
||||
baseName := filepath.Base(remote)
|
||||
encodedName := o.f.opt.Encoding.FromStandardName(baseName)
|
||||
|
||||
var meta *buckets.CreateMetaResponse
|
||||
checkErr := o.f.pacer.Call(func() (bool, error) {
|
||||
existingFile, err := o.f.preUploadCheck(ctx, encodedName, dirID)
|
||||
if err != nil {
|
||||
return shouldRetry(ctx, err)
|
||||
}
|
||||
if existingFile != nil {
|
||||
name := strings.TrimSuffix(baseName, filepath.Ext(baseName))
|
||||
ext := strings.TrimPrefix(filepath.Ext(baseName), ".")
|
||||
|
||||
meta = &buckets.CreateMetaResponse{
|
||||
UUID: existingFile.UUID,
|
||||
FileID: existingFile.FileID,
|
||||
Name: name,
|
||||
PlainName: name,
|
||||
Type: ext,
|
||||
Size: existingFile.Size,
|
||||
}
|
||||
o.id = existingFile.FileID
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if checkErr != nil {
|
||||
return nil, uploadErr
|
||||
}
|
||||
|
||||
if meta != nil {
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
return nil, uploadErr
|
||||
}
|
||||
|
||||
// restoreBackupFile restores a backup file after upload failure
|
||||
func (o *Object) restoreBackupFile(ctx context.Context, backupUUID, origName, origType string) {
|
||||
if backupUUID == "" {
|
||||
return
|
||||
}
|
||||
|
||||
_ = o.f.pacer.Call(func() (bool, error) {
|
||||
err := files.RenameFile(ctx, o.f.cfg, backupUUID,
|
||||
o.f.opt.Encoding.FromStandardName(origName), origType)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
// Remove deletes a file
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.f.pacer.Call(func() (bool, error) {
|
||||
err := files.DeleteFile(ctx, o.f.cfg, o.uuid)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
package internxt_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestInternxt:",
|
||||
})
|
||||
}
|
||||
@@ -17,10 +17,12 @@ Improvements:
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
@@ -254,7 +256,25 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
defer megaCacheMu.Unlock()
|
||||
srv := megaCache[opt.User]
|
||||
if srv == nil {
|
||||
srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
||||
// srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
||||
|
||||
// Workaround for Mega's use of insecure cipher suites which are no longer supported by default since Go 1.22.
|
||||
// Relevant issues:
|
||||
// https://github.com/rclone/rclone/issues/8565
|
||||
// https://github.com/meganz/webclient/issues/103
|
||||
clt := fshttp.NewClient(ctx)
|
||||
clt.Transport = fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
|
||||
var ids []uint16
|
||||
// Read default ciphers
|
||||
for _, cs := range tls.CipherSuites() {
|
||||
ids = append(ids, cs.ID)
|
||||
}
|
||||
// Insecure but Mega uses TLS_RSA_WITH_AES_128_GCM_SHA256 for storage endpoints
|
||||
// (e.g. https://gfs302n114.userstorage.mega.co.nz) as of June 18, 2025.
|
||||
t.TLSClientConfig.CipherSuites = append(ids, tls.TLS_RSA_WITH_AES_128_GCM_SHA256)
|
||||
})
|
||||
srv = mega.New().SetClient(clt)
|
||||
|
||||
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
|
||||
srv.SetHTTPS(opt.UseHTTPS)
|
||||
srv.SetLogger(func(format string, v ...any) {
|
||||
|
||||
@@ -133,7 +133,7 @@ type RemoteItemFacet struct {
|
||||
|
||||
// FolderFacet groups folder-related data on OneDrive into a single structure
|
||||
type FolderFacet struct {
|
||||
ChildCount int64 `json:"childCount,omitempty"` // Number of children contained immediately within this container.
|
||||
ChildCount int64 `json:"childCount"` // Number of children contained immediately within this container.
|
||||
}
|
||||
|
||||
// HashesType groups different types of hashes into a single structure, for an item on OneDrive.
|
||||
|
||||
@@ -60,7 +60,7 @@ var systemMetadataInfo = map[string]fs.MetadataHelp{
|
||||
ReadOnly: true,
|
||||
},
|
||||
"description": {
|
||||
Help: "A short description of the file. Max 1024 characters. No longer supported by Microsoft.",
|
||||
Help: "A short description of the file. Max 1024 characters. Only supported for OneDrive Personal.",
|
||||
Type: "string",
|
||||
Example: "Contract for signing",
|
||||
},
|
||||
@@ -259,8 +259,12 @@ func (m *Metadata) Set(ctx context.Context, metadata fs.Metadata) (numSet int, e
|
||||
m.btime = t
|
||||
numSet++
|
||||
case "description":
|
||||
fs.Debugf(m.remote, "metadata description is no longer supported -- skipping: %s", v)
|
||||
continue
|
||||
if m.fs.driveType != driveTypePersonal {
|
||||
fs.Debugf(m.remote, "metadata description is only supported for OneDrive Personal -- skipping: %s", v)
|
||||
continue
|
||||
}
|
||||
m.description = v
|
||||
numSet++
|
||||
case "permissions":
|
||||
if !m.fs.opt.MetadataPermissions.IsSet(rwWrite) {
|
||||
continue
|
||||
@@ -288,6 +292,9 @@ func (m *Metadata) toAPIMetadata() api.Metadata {
|
||||
update := api.Metadata{
|
||||
FileSystemInfo: &api.FileSystemInfoFacet{},
|
||||
}
|
||||
if m.description != "" && m.fs.driveType == driveTypePersonal {
|
||||
update.Description = m.description
|
||||
}
|
||||
if !m.mtime.IsZero() {
|
||||
update.FileSystemInfo.LastModifiedDateTime = api.Timestamp(m.mtime)
|
||||
}
|
||||
@@ -596,10 +603,12 @@ func (m *Metadata) addPermission(ctx context.Context, p *api.PermissionsType) (n
|
||||
|
||||
req := &api.AddPermissionsRequest{
|
||||
Recipients: fillRecipients(p, m.fs.driveType),
|
||||
RequireSignIn: true,
|
||||
RequireSignIn: m.fs.driveType != driveTypePersonal, // personal and business have conflicting requirements
|
||||
Roles: p.Roles,
|
||||
}
|
||||
req.RetainInheritedPermissions = false
|
||||
if m.fs.driveType != driveTypePersonal {
|
||||
req.RetainInheritedPermissions = false // not supported for personal
|
||||
}
|
||||
|
||||
if p.Link != nil && p.Link.Scope == api.AnonymousScope {
|
||||
link, err := m.fs.PublicLink(ctx, m.remote, fs.DurationOff, false)
|
||||
@@ -807,13 +816,15 @@ func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, meta, err = f.createDir(ctx, parentID, dir, leaf, metadata)
|
||||
info, meta, err = f.createDir(ctx, parentID, dir, leaf, metadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// for some reason, OneDrive Business and Personal needs this extra step to set modtime. Seems like a bug...
|
||||
fs.Debugf(dir, "setting time %v", meta.mtime)
|
||||
info, err = meta.Write(ctx, false)
|
||||
if f.driveType != driveTypePersonal {
|
||||
// for some reason, OneDrive Business needs this extra step to set modtime, while Personal does not. Seems like a bug...
|
||||
fs.Debugf(dir, "setting time %v", meta.mtime)
|
||||
info, err = meta.Write(ctx, false)
|
||||
}
|
||||
} else if err == nil {
|
||||
// Directory exists and needs updating
|
||||
info, meta, err = f.updateDir(ctx, dirID, dir, metadata)
|
||||
|
||||
@@ -2554,7 +2554,7 @@ func (o *Object) cancelUploadSession(ctx context.Context, url string) (err error
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.unAuth.Call(ctx, &opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return
|
||||
|
||||
@@ -136,6 +136,11 @@ func (f *Fs) TestReadPermissions(t *testing.T, r *fstest.Run) {
|
||||
_, expectedMeta := f.putWithMeta(ctx, t, &file1, []*api.PermissionsType{}) // return var intentionally switched here
|
||||
permissions := defaultPermissions(f.driveType)
|
||||
_, actualMeta := f.putWithMeta(ctx, t, &file1, permissions)
|
||||
if f.driveType == driveTypePersonal {
|
||||
perms, ok := actualMeta["permissions"]
|
||||
assert.False(t, ok, fmt.Sprintf("permissions metadata key was unexpectedly found: %v", perms))
|
||||
return
|
||||
}
|
||||
assert.JSONEq(t, expectedMeta["permissions"], actualMeta["permissions"])
|
||||
}
|
||||
|
||||
@@ -154,7 +159,7 @@ func (f *Fs) TestReadMetadata(t *testing.T, r *fstest.Run) {
|
||||
if slices.Contains(optionals, k) {
|
||||
continue
|
||||
}
|
||||
if k == "description" {
|
||||
if k == "description" && f.driveType != driveTypePersonal {
|
||||
continue // not supported
|
||||
}
|
||||
gotV, ok := actualMeta[k]
|
||||
@@ -191,7 +196,7 @@ func (f *Fs) TestDirectoryMetadata(t *testing.T, r *fstest.Run) {
|
||||
if slices.Contains(optionals, k) {
|
||||
continue
|
||||
}
|
||||
if k == "description" {
|
||||
if k == "description" && f.driveType != driveTypePersonal {
|
||||
continue // not supported
|
||||
}
|
||||
gotV, ok := actualMeta[k]
|
||||
@@ -412,7 +417,9 @@ func (f *Fs) compareMeta(t *testing.T, expectedMeta, actualMeta fs.Metadata, ign
|
||||
compareTimeStrings(t, k, v, gotV, time.Second)
|
||||
continue
|
||||
case "description":
|
||||
continue // not supported
|
||||
if f.driveType != driveTypePersonal {
|
||||
continue // not supported
|
||||
}
|
||||
}
|
||||
assert.True(t, ok, fmt.Sprintf("expected metadata key is missing: %v", k))
|
||||
assert.Equal(t, v, gotV, actualMeta)
|
||||
|
||||
@@ -10,8 +10,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
protonDriveAPI "github.com/rclone/Proton-API-Bridge"
|
||||
"github.com/rclone/go-proton-api"
|
||||
protonDriveAPI "github.com/henrybear327/Proton-API-Bridge"
|
||||
"github.com/henrybear327/go-proton-api"
|
||||
|
||||
"github.com/pquerna/otp/totp"
|
||||
|
||||
|
||||
@@ -61,7 +61,6 @@ docs = [
|
||||
"imagekit.md",
|
||||
"iclouddrive.md",
|
||||
"internetarchive.md",
|
||||
"internxt.md",
|
||||
"jottacloud.md",
|
||||
"koofr.md",
|
||||
"linkbox.md",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || (openbsd && cgo) || windows)
|
||||
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows)
|
||||
|
||||
package cmount
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -211,12 +210,6 @@ func (fsys *FS) Readdir(dirPath string,
|
||||
// We can't seek in directories and FUSE should know that so
|
||||
// return an error if ofst is ever set.
|
||||
if ofst > 0 {
|
||||
// However openbsd doesn't seem to know this - perhaps a bug in its
|
||||
// FUSE implementation or a bug in cgofuse?
|
||||
// See: https://github.com/billziss-gh/cgofuse/issues/49
|
||||
if runtime.GOOS == "openbsd" {
|
||||
return 0
|
||||
}
|
||||
return -fuse.ESPIPE
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || (openbsd && cgo) || windows)
|
||||
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows)
|
||||
|
||||
// Package cmount implements a FUSE mounting system for rclone remotes.
|
||||
//
|
||||
@@ -8,9 +8,9 @@ package cmount
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
@@ -59,14 +59,12 @@ func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.
|
||||
} else {
|
||||
options = append(options, "-o", "fsname="+device)
|
||||
options = append(options, "-o", "subtype=rclone")
|
||||
if runtime.GOOS != "openbsd" {
|
||||
options = append(options, "-o", fmt.Sprintf("max_readahead=%d", opt.MaxReadAhead))
|
||||
// This causes FUSE to supply O_TRUNC with the Open
|
||||
// call which is more efficient for cmount. However
|
||||
// it does not work with cgofuse on Windows with
|
||||
// WinFSP so cmount must work with or without it.
|
||||
options = append(options, "-o", "atomic_o_trunc")
|
||||
}
|
||||
options = append(options, "-o", fmt.Sprintf("max_readahead=%d", opt.MaxReadAhead))
|
||||
// This causes FUSE to supply O_TRUNC with the Open
|
||||
// call which is more efficient for cmount. However
|
||||
// it does not work with cgofuse on Windows with
|
||||
// WinFSP so cmount must work with or without it.
|
||||
options = append(options, "-o", "atomic_o_trunc")
|
||||
if opt.DaemonTimeout != 0 {
|
||||
options = append(options, "-o", fmt.Sprintf("daemon_timeout=%d", int(time.Duration(opt.DaemonTimeout).Seconds())))
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || (openbsd && cgo) || windows) && (!race || !windows)
|
||||
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows) && (!race || !windows)
|
||||
|
||||
// Package cmount implements a FUSE mounting system for rclone remotes.
|
||||
//
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !((linux && cgo && cmount) || (darwin && cgo && cmount) || (freebsd && cgo && cmount) || (openbsd && cgo && cmount) || (windows && cmount))
|
||||
//go:build !((linux && cgo && cmount) || (darwin && cgo && cmount) || (freebsd && cgo && cmount) || (windows && cmount))
|
||||
|
||||
// Package cmount implements a FUSE mounting system for rclone remotes.
|
||||
//
|
||||
|
||||
@@ -66,7 +66,7 @@ at all, then 1 PiB is set as both the total and the free size.
|
||||
### Installing on Windows
|
||||
|
||||
To run `rclone @ on Windows`, you will need to
|
||||
download and install [WinFsp](https://winfsp.dev).
|
||||
download and install [WinFsp](http://www.secfs.net/winfsp/).
|
||||
|
||||
[WinFsp](https://github.com/winfsp/winfsp) is an open-source
|
||||
Windows File System Proxy which makes it easy to write user space file
|
||||
|
||||
@@ -153,7 +153,6 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="iCloud Drive" home="https://icloud.com/" config="/iclouddrive/" >}}
|
||||
{{< provider name="ImageKit" home="https://imagekit.io" config="/imagekit/" >}}
|
||||
{{< provider name="Internet Archive" home="https://archive.org/" config="/internetarchive/" >}}
|
||||
{{< provider name="Internxt" home="https://internxt.com/" config="/internxt/" >}}
|
||||
{{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}}
|
||||
{{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
|
||||
{{< provider name="IDrive e2" home="https://www.idrive.com/e2/?refer=rclone" config="/s3/#idrive-e2" >}}
|
||||
@@ -247,7 +246,7 @@ These backends adapt or modify other storage providers:
|
||||
|
||||
## Links
|
||||
|
||||
- {{< icon "fa fa-home fa-fw" >}} [Home page](https://rclone.org/)
|
||||
- {{< icon "fab fa-github fa-fw" >}} [GitHub project page for source and bug tracker](https://github.com/rclone/rclone)
|
||||
- {{< icon "fa fa-comments fa-fw" >}} [Rclone Forum](https://forum.rclone.org)
|
||||
- {{< icon "fas fa-cloud-download-alt fa-fw" >}}[Downloads](/downloads/)
|
||||
- {{< icon "fa fa-home" >}} [Home page](https://rclone.org/)
|
||||
- {{< icon "fab fa-github" >}} [GitHub project page for source and bug tracker](https://github.com/rclone/rclone)
|
||||
- {{< icon "fa fa-comments" >}} [Rclone Forum](https://forum.rclone.org)
|
||||
- {{< icon "fas fa-cloud-download-alt" >}}[Downloads](/downloads/)
|
||||
|
||||
@@ -1067,9 +1067,3 @@ put them back in again. -->
|
||||
- Qingwei Li <332664203@qq.com>
|
||||
- yy <yhymmt37@gmail.com>
|
||||
- Marc-Philip <marc-philip.werner@sap.com>
|
||||
- Mikel Olasagasti Uranga <mikel@olasagasti.info>
|
||||
- Nick Owens <mischief@offblast.org>
|
||||
- hyusap <paulayush@gmail.com>
|
||||
- jzunigax2 <125698953+jzunigax2@users.noreply.github.com>
|
||||
- lullius <lullius@users.noreply.github.com>
|
||||
- StarHack <StarHack@users.noreply.github.com>
|
||||
|
||||
@@ -61,7 +61,6 @@ See the following for detailed instructions for
|
||||
- [HTTP](/http/)
|
||||
- [iCloud Drive](/iclouddrive/)
|
||||
- [Internet Archive](/internetarchive/)
|
||||
- [Internxt](/internxt/)
|
||||
- [Jottacloud](/jottacloud/)
|
||||
- [Koofr](/koofr/)
|
||||
- [Linkbox](/linkbox/)
|
||||
|
||||
@@ -659,14 +659,8 @@ second that each client_id can do set by Google.
|
||||
If there is a problem with this client_id (eg quota too low or the
|
||||
client_id stops working) then you can make your own.
|
||||
|
||||
Please follow the steps in [the google drive docs](https://rclone.org/drive/#making-your-own-client-id)
|
||||
with the following differences:
|
||||
|
||||
- At step 3, instead of enabling the "Google Drive API", search for and
|
||||
enable the "Photos Library API".
|
||||
|
||||
- At step 5, you will need to add different scopes. Use these scopes
|
||||
instead of the drive ones:
|
||||
Please follow the steps in [the google drive docs](https://rclone.org/drive/#making-your-own-client-id).
|
||||
You will need these scopes instead of the drive ones detailed:
|
||||
|
||||
```text
|
||||
https://www.googleapis.com/auth/photoslibrary.appendonly
|
||||
|
||||
@@ -1,124 +0,0 @@
|
||||
---
|
||||
title: "Internxt Drive"
|
||||
description: "Rclone docs for Internxt Drive"
|
||||
versionIntroduced: "v1.73"
|
||||
---
|
||||
|
||||
# {{< icon "fas fa-cloud" >}} Internxt Drive
|
||||
|
||||
[Internxt Drive](https://internxt.com) is a zero-knowledge encrypted cloud storage service.
|
||||
|
||||
Paths are specified as `remote:path`
|
||||
|
||||
Paths may be as deep as required, e.g. `remote:directory/subdirectory`.
|
||||
|
||||
## Limitations
|
||||
|
||||
**Note:** The Internxt backend may not work with all account types. Please refer to [Internxt plan details](https://internxt.com/pricing) or contact [Internxt support](https://help.internxt.com) to verify rclone compatibility with your subscription.
|
||||
|
||||
## Configuration
|
||||
|
||||
Here is an example of how to make a remote called `internxt`. Run `rclone config` and follow the prompts:
|
||||
|
||||
```
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
name> internxt
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value
|
||||
[snip]
|
||||
XX / Internxt Drive
|
||||
\ "internxt"
|
||||
[snip]
|
||||
Storage> internxt
|
||||
|
||||
Option email.
|
||||
Email of your Internxt account.
|
||||
Enter a value.
|
||||
email> user@example.com
|
||||
|
||||
Option pass.
|
||||
Password.
|
||||
Enter a value.
|
||||
password>
|
||||
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
|
||||
Configuration complete.
|
||||
Options:
|
||||
- type: internxt
|
||||
- email: user@example.com
|
||||
- pass: *** ENCRYPTED ***
|
||||
Keep this "internxt" remote?
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
If you have two-factor authentication enabled on your Internxt account, you will be prompted to enter the code during login.
|
||||
|
||||
### Security Considerations
|
||||
|
||||
The authentication process stores your password and mnemonic in the rclone configuration file. It is **strongly recommended** to encrypt your rclone config to protect these sensitive credentials:
|
||||
|
||||
```
|
||||
rclone config password
|
||||
```
|
||||
|
||||
This will prompt you to set a password that encrypts your entire configuration file.
|
||||
|
||||
## Usage Examples
|
||||
|
||||
```
|
||||
# List files
|
||||
rclone ls internxt:
|
||||
|
||||
# Copy files to Internxt
|
||||
rclone copy /local/path internxt:remote/path
|
||||
|
||||
# Sync local directory to Internxt
|
||||
rclone sync /local/path internxt:remote/path
|
||||
|
||||
# Mount Internxt Drive as a local filesystem
|
||||
rclone mount internxt: /path/to/mountpoint
|
||||
|
||||
# Check storage usage
|
||||
rclone about internxt:
|
||||
```
|
||||
|
||||
### Modification times and hashes
|
||||
|
||||
The Internxt backend does not support hashes.
|
||||
|
||||
Modification times are read from the server but cannot be set. The backend reports `ModTimeNotSupported` precision, so modification times will not be used for sync comparisons.
|
||||
|
||||
### Restricted filename characters
|
||||
|
||||
The Internxt backend replaces the [default restricted characters
|
||||
set](/overview/#restricted-characters).
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/internxt/internxt.go then run make backenddocs" >}}
|
||||
|
||||
### Advanced options
|
||||
|
||||
Here are the Advanced options specific to internxt (Internxt Drive).
|
||||
|
||||
#### --internxt-skip-hash-validation
|
||||
|
||||
Skip hash validation when downloading files.
|
||||
|
||||
By default, hash validation is disabled. Set this to false to enable validation.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: skip_hash_validation
|
||||
- Env Var: RCLONE_INTERNXT_SKIP_HASH_VALIDATION
|
||||
- Type: bool
|
||||
- Default: true
|
||||
@@ -39,7 +39,6 @@ Here is an overview of the major features of each cloud storage system.
|
||||
| HTTP | - | R | No | No | R | R |
|
||||
| iCloud Drive | - | R | No | No | - | - |
|
||||
| Internet Archive | MD5, SHA1, CRC32 | R/W ¹¹ | No | No | - | RWU |
|
||||
| Internxt | - | - | No | No | - | - |
|
||||
| Jottacloud | MD5 | R/W | Yes | No | R | RW |
|
||||
| Koofr | MD5 | - | Yes | No | - | - |
|
||||
| Linkbox | - | R | No | No | - | - |
|
||||
|
||||
@@ -71,7 +71,7 @@
|
||||
<a class="dropdown-item" href="/filefabric/"><i class="fa fa-cloud fa-fw"></i> Enterprise File Fabric</a>
|
||||
<a class="dropdown-item" href="/filelu/"><i class="fa fa-folder fa-fw"></i> FileLu Cloud Storage</a>
|
||||
<a class="dropdown-item" href="/s3/#filelu-s5"><i class="fa fa-folder fa-fw"></i> FileLu S5 (S3-Compatible)</a>
|
||||
<a class="dropdown-item" href="/filen/"><i class="fa fa-solid fa-f fa-fw"></i> Filen</a>
|
||||
<a class="dropdown-item" href="/filen/"><i class="fa fa-solid fa-f"></i> Filen</a>
|
||||
<a class="dropdown-item" href="/filescom/"><i class="fa fa-brands fa-files-pinwheel fa-fw"></i> Files.com</a>
|
||||
<a class="dropdown-item" href="/ftp/"><i class="fa fa-file fa-fw"></i> FTP</a>
|
||||
<a class="dropdown-item" href="/gofile/"><i class="fa fa-folder fa-fw"></i> Gofile</a>
|
||||
|
||||
@@ -90,14 +90,14 @@ func validateHour(HHMM string) error {
|
||||
return fmt.Errorf("invalid hour in time specification %q: %v", HHMM, err)
|
||||
}
|
||||
if hh < 0 || hh > 23 {
|
||||
return fmt.Errorf("invalid hour (must be between 00 and 23): %d", hh)
|
||||
return fmt.Errorf("invalid hour (must be between 00 and 23): %q", hh)
|
||||
}
|
||||
mm, err := strconv.Atoi(HHMM[3:])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid minute in time specification: %q: %v", HHMM, err)
|
||||
}
|
||||
if mm < 0 || mm > 59 {
|
||||
return fmt.Errorf("invalid minute (must be between 00 and 59): %d", mm)
|
||||
return fmt.Errorf("invalid minute (must be between 00 and 59): %q", hh)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/errcount"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
@@ -626,6 +627,7 @@ func (f *Filter) MakeListR(ctx context.Context, NewObject func(ctx context.Conte
|
||||
remotes = make(chan string, checkers)
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
)
|
||||
ec := errcount.New()
|
||||
for range checkers {
|
||||
g.Go(func() (err error) {
|
||||
var entries = make(fs.DirEntries, 1)
|
||||
@@ -634,7 +636,8 @@ func (f *Filter) MakeListR(ctx context.Context, NewObject func(ctx context.Conte
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// Skip files that are not found
|
||||
} else if err != nil {
|
||||
return err
|
||||
fs.Errorf(remote, "--files-from failed to find file: %v", err)
|
||||
ec.Add(err)
|
||||
} else {
|
||||
err = callback(entries)
|
||||
if err != nil {
|
||||
@@ -654,7 +657,8 @@ func (f *Filter) MakeListR(ctx context.Context, NewObject func(ctx context.Conte
|
||||
}
|
||||
}
|
||||
close(remotes)
|
||||
return g.Wait()
|
||||
ec.Add(g.Wait())
|
||||
return ec.Err("failed to read --files-from files")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -394,7 +394,7 @@ func TestNewFilterMakeListR(t *testing.T) {
|
||||
// Now check an error is returned from NewObject
|
||||
require.NoError(t, f.AddFile("error"))
|
||||
err = listR(context.Background(), "", listRcallback)
|
||||
require.EqualError(t, err, assert.AnError.Error())
|
||||
require.EqualError(t, err, "failed to read --files-from files: assert.AnError general error for testing")
|
||||
|
||||
// The checker will exit by the error above
|
||||
ci := fs.GetConfig(context.Background())
|
||||
@@ -403,7 +403,7 @@ func TestNewFilterMakeListR(t *testing.T) {
|
||||
// Now check an error is returned from NewObject
|
||||
require.NoError(t, f.AddFile("error"))
|
||||
err = listR(context.Background(), "", listRcallback)
|
||||
require.EqualError(t, err, assert.AnError.Error())
|
||||
require.EqualError(t, err, "failed to read --files-from files: assert.AnError general error for testing")
|
||||
}
|
||||
|
||||
func TestNewFilterMinSize(t *testing.T) {
|
||||
|
||||
@@ -701,7 +701,6 @@ func Run(t *testing.T, opt *Opt) {
|
||||
if opt.SkipLeadingDot && test.name == "leading dot" {
|
||||
t.Skip("Skipping " + test.name)
|
||||
}
|
||||
|
||||
// turn raw strings into Standard encoding
|
||||
fileName := encoder.Standard.Encode(test.path)
|
||||
dirName := fileName
|
||||
|
||||
@@ -675,15 +675,9 @@ backends:
|
||||
# with the parent backend having a different precision.
|
||||
- TestServerSideCopyOverSelf
|
||||
- TestServerSideMoveOverSelf
|
||||
- backend: "internxt"
|
||||
remote: "TestInternxt:"
|
||||
fastlist: false
|
||||
ignore:
|
||||
- TestRWFileHandleWriteNoWrite
|
||||
- backend: "drime"
|
||||
remote: "TestDrime:"
|
||||
ignoretests:
|
||||
# The TestBisyncRemoteLocal/check_access_filters tests fail due to duplicated objects
|
||||
- cmd/bisync
|
||||
fastlist: false
|
||||
extratime: 2.0
|
||||
|
||||
18
go.mod
18
go.mod
@@ -42,10 +42,10 @@ require (
|
||||
github.com/go-chi/chi/v5 v5.2.3
|
||||
github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348
|
||||
github.com/go-git/go-billy/v5 v5.6.2
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/hanwen/go-fuse/v2 v2.9.0
|
||||
github.com/internxt/rclone-adapter v0.0.0-20260127164739-694d3672176e
|
||||
github.com/henrybear327/Proton-API-Bridge v1.0.0
|
||||
github.com/henrybear327/go-proton-api v1.0.0
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.4
|
||||
github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3
|
||||
github.com/josephspurrier/goversioninfo v1.5.0
|
||||
@@ -69,8 +69,6 @@ require (
|
||||
github.com/prometheus/client_golang v1.23.2
|
||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.23
|
||||
github.com/rclone/Proton-API-Bridge v1.0.1-0.20260127174007-77f974840d11
|
||||
github.com/rclone/go-proton-api v1.0.1-0.20260127173028-eb465cac3b18
|
||||
github.com/rclone/gofakes3 v0.0.4
|
||||
github.com/rfjakob/eme v1.1.2
|
||||
github.com/rivo/uniseg v0.4.7
|
||||
@@ -83,7 +81,7 @@ require (
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20251031123324-a804aaa87491
|
||||
github.com/unknwon/goconfig v1.0.0
|
||||
github.com/willscott/go-nfs v0.0.3
|
||||
github.com/winfsp/cgofuse v1.6.1-0.20260126094232-f2c4fccdb286
|
||||
github.com/winfsp/cgofuse v1.6.1-0.20250813110601-7d90b0992471
|
||||
github.com/wk8/go-ordered-map/v2 v2.1.8
|
||||
github.com/xanzy/ssh-agent v0.3.3
|
||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78
|
||||
@@ -157,7 +155,6 @@ require (
|
||||
github.com/creasty/defaults v1.8.0 // indirect
|
||||
github.com/cronokirby/saferith v0.33.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/disintegration/imaging v1.6.2 // indirect
|
||||
github.com/dromara/dongle v1.0.1 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
@@ -181,6 +178,7 @@ require (
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
github.com/gofrs/flock v0.13.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect
|
||||
@@ -245,7 +243,6 @@ require (
|
||||
github.com/tinylib/msgp v1.5.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.15 // indirect
|
||||
github.com/tklauser/numcpus v0.10.0 // indirect
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
|
||||
github.com/ulikunitz/xz v0.5.15 // indirect
|
||||
github.com/willscott/go-nfs-client v0.0.0-20251022144359-801f10d98886 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
@@ -259,14 +256,13 @@ require (
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
|
||||
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect
|
||||
golang.org/x/image v0.32.0 // indirect
|
||||
golang.org/x/mod v0.29.0 // indirect
|
||||
golang.org/x/tools v0.38.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect
|
||||
google.golang.org/grpc v1.76.0 // indirect
|
||||
google.golang.org/protobuf v1.36.10 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
moul.io/http2curl/v2 v2.3.0 // indirect
|
||||
sigs.k8s.io/yaml v1.6.0 // indirect
|
||||
storj.io/common v0.0.0-20251107171817-6221ae45072c // indirect
|
||||
storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55 // indirect
|
||||
storj.io/eventkit v0.0.0-20250410172343-61f26d3de156 // indirect
|
||||
@@ -275,8 +271,8 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/IBM/go-sdk-core/v5 v5.18.5
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/IBM/go-sdk-core/v5 v5.21.0
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.3.0
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2
|
||||
github.com/pkg/xattr v0.4.12
|
||||
|
||||
35
go.sum
35
go.sum
@@ -65,15 +65,15 @@ github.com/FilenCloudDienste/filen-sdk-go v0.0.35 h1:geuYpD/1ZXSp1H3kdW7si+KRUIr
|
||||
github.com/FilenCloudDienste/filen-sdk-go v0.0.35/go.mod h1:0cBhKXQg49XbKZZfk5TCDa3sVLP+xMxZTWL+7KY0XR0=
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.264 h1:lMHTplAYI9FtmCo/QOcpRxmPA5REVAct1r2riQmDQKw=
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.264/go.mod h1:wGqkOzRu/ClJibvDgcfuJNAqI2nLhe8g91tPlDKRCdE=
|
||||
github.com/IBM/go-sdk-core/v5 v5.18.5 h1:g0JRl3sYXJczB/yuDlrN6x22LJ6jIxhp0Sa4ARNW60c=
|
||||
github.com/IBM/go-sdk-core/v5 v5.18.5/go.mod h1:KonTFRR+8ZSgw5cxBSYo6E4WZoY1+7n1kfHM82VcjFU=
|
||||
github.com/IBM/go-sdk-core/v5 v5.21.0 h1:DUnYhvC4SoC8T84rx5omnhY3+xcQg/Whyoa3mDPIMkk=
|
||||
github.com/IBM/go-sdk-core/v5 v5.21.0/go.mod h1:Q3BYO6iDA2zweQPDGbNTtqft5tDcEpm6RTuqMlPcvbw=
|
||||
github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=
|
||||
github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE=
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc=
|
||||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20210511135022-227b4adcab57/go.mod h1:HecWFHognK8GfRDGnFQbW/LiV7A3MX3gZVs45vk5h8I=
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf h1:yc9daCCYUefEs69zUkSzubzjBbL+cmOXgnmt9Fyd9ug=
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf/go.mod h1:o0ESU9p83twszAU8LBeJKFAAMX14tISa0yk4Oo5TOqo=
|
||||
@@ -226,8 +226,6 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
|
||||
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
|
||||
github.com/diskfs/go-diskfs v1.7.0 h1:vonWmt5CMowXwUc79jWyGrf2DIMeoOjkLlMnQYGVOs8=
|
||||
github.com/diskfs/go-diskfs v1.7.0/go.mod h1:LhQyXqOugWFRahYUSw47NyZJPezFzB9UELwhpszLP/k=
|
||||
github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c=
|
||||
@@ -422,11 +420,13 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/henrybear327/Proton-API-Bridge v1.0.0 h1:gjKAaWfKu++77WsZTHg6FUyPC5W0LTKWQciUm8PMZb0=
|
||||
github.com/henrybear327/Proton-API-Bridge v1.0.0/go.mod h1:gunH16hf6U74W2b9CGDaWRadiLICsoJ6KRkSt53zLts=
|
||||
github.com/henrybear327/go-proton-api v1.0.0 h1:zYi/IbjLwFAW7ltCeqXneUGJey0TN//Xo851a/BgLXw=
|
||||
github.com/henrybear327/go-proton-api v1.0.0/go.mod h1:w63MZuzufKcIZ93pwRgiOtxMXYafI8H74D77AxytOBc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/internxt/rclone-adapter v0.0.0-20260127164739-694d3672176e h1:zfuAdtuqzhm5+iI3Mac5NcBWHlX44d1unGkaQHB5F3U=
|
||||
github.com/internxt/rclone-adapter v0.0.0-20260127164739-694d3672176e/go.mod h1:jpF/MwuBg+opa4Q9izanNl8KzdtYhfBoZWyv70vqmgc=
|
||||
github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
|
||||
github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
|
||||
github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo=
|
||||
@@ -539,8 +539,8 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.17.3 h1:oJcvKpIb7/8uLpDDtnQuf18xVnwKp8DTD7DQ6gTd/MU=
|
||||
github.com/onsi/ginkgo/v2 v2.17.3/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
|
||||
github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
|
||||
github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
|
||||
github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y=
|
||||
github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
|
||||
github.com/oracle/oci-go-sdk/v65 v65.104.0 h1:l9awEvzWvxmYhy/97A0hZ87pa7BncYXmcO/S8+rvgK0=
|
||||
github.com/oracle/oci-go-sdk/v65 v65.104.0/go.mod h1:oB8jFGVc/7/zJ+DbleE8MzGHjhs2ioCz5stRTdZdIcY=
|
||||
github.com/panjf2000/ants/v2 v2.11.3 h1:AfI0ngBoXJmYOpDh9m516vjqoUu2sLrIVgppI9TZVpg=
|
||||
@@ -590,10 +590,6 @@ github.com/quic-go/quic-go v0.53.0 h1:QHX46sISpG2S03dPeZBgVIZp8dGagIaiu2FiVYvpCZ
|
||||
github.com/quic-go/quic-go v0.53.0/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY=
|
||||
github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93 h1:UVArwN/wkKjMVhh2EQGC0tEc1+FqiLlvYXY5mQ2f8Wg=
|
||||
github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93/go.mod h1:Nfe4efndBz4TibWycNE+lqyJZiMX4ycx+QKV8Ta0f/o=
|
||||
github.com/rclone/Proton-API-Bridge v1.0.1-0.20260127174007-77f974840d11 h1:4MI2alxM/Ye2gIRBlYf28JGWTipZ4Zz7yAziPKrttjs=
|
||||
github.com/rclone/Proton-API-Bridge v1.0.1-0.20260127174007-77f974840d11/go.mod h1:3HLX7dwZgvB7nt+Yl/xdzVPcargQ1yBmJEUg3n+jMKM=
|
||||
github.com/rclone/go-proton-api v1.0.1-0.20260127173028-eb465cac3b18 h1:Lc+d3ISfQaMJKWZOE7z4ZSY4RVmdzbn1B0IM8xN18qM=
|
||||
github.com/rclone/go-proton-api v1.0.1-0.20260127173028-eb465cac3b18/go.mod h1:LB2kCEaZMzNn3ocdz+qYfxXmuLxxN0ka62KJd2x53Bc=
|
||||
github.com/rclone/gofakes3 v0.0.4 h1:LswpC49VY/UJ1zucoL5ktnOEX6lq3qK7e1aFIAfqCbk=
|
||||
github.com/rclone/gofakes3 v0.0.4/go.mod h1:j/UoS+2/Mr7xAlfKhyVC58YyFQmh9uoQA5YZQXQUqmg=
|
||||
github.com/relvacode/iso8601 v1.7.0 h1:BXy+V60stMP6cpswc+a93Mq3e65PfXCgDFfhvNNGrdo=
|
||||
@@ -674,8 +670,6 @@ github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDH
|
||||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
|
||||
github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
|
||||
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||
github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
@@ -687,8 +681,8 @@ github.com/willscott/go-nfs v0.0.3 h1:Z5fHVxMsppgEucdkKBN26Vou19MtEM875NmRwj156R
|
||||
github.com/willscott/go-nfs v0.0.3/go.mod h1:VhNccO67Oug787VNXcyx9JDI3ZoSpqoKMT/lWMhUIDg=
|
||||
github.com/willscott/go-nfs-client v0.0.0-20251022144359-801f10d98886 h1:DtrBtkgTJk2XGt4T7eKdKVkd9A5NCevN2e4inLXtsqA=
|
||||
github.com/willscott/go-nfs-client v0.0.0-20251022144359-801f10d98886/go.mod h1:Tq++Lr/FgiS3X48q5FETemXiSLGuYMQT2sPjYNPJSwA=
|
||||
github.com/winfsp/cgofuse v1.6.1-0.20260126094232-f2c4fccdb286 h1:tw5GqRXqExB/xghPoPLtVujBe9w9Pg1G78tvXCJNJAA=
|
||||
github.com/winfsp/cgofuse v1.6.1-0.20260126094232-f2c4fccdb286/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I=
|
||||
github.com/winfsp/cgofuse v1.6.1-0.20250813110601-7d90b0992471 h1:aSOo0k+aLWdhUQiUxzv4cZ7cUp3OLP+Qx7cjs6OUxME=
|
||||
github.com/winfsp/cgofuse v1.6.1-0.20250813110601-7d90b0992471/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I=
|
||||
github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc=
|
||||
github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw=
|
||||
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
|
||||
@@ -782,9 +776,6 @@ golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2
|
||||
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.32.0 h1:6lZQWq75h7L5IWNk0r+SCpUJ6tUVd3v4ZHnbRKLkUDQ=
|
||||
golang.org/x/image v0.32.0/go.mod h1:/R37rrQmKXtO6tYXAjtDLwQgFLHmhW+V6ayXlxzP2Pc=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
@@ -1135,6 +1126,8 @@ moul.io/http2curl/v2 v2.3.0/go.mod h1:RW4hyBjTWSYDOxapodpNEtX0g5Eb16sxklBqmd2RHc
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
|
||||
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
|
||||
storj.io/common v0.0.0-20251107171817-6221ae45072c h1:UDXSrdeLJe3QFouavSW10fYdpclK0YNu3KvQHzqq2+k=
|
||||
storj.io/common v0.0.0-20251107171817-6221ae45072c/go.mod h1:XNX7uykja6aco92y2y8RuqaXIDRPpt1YA2OQDKlKEUk=
|
||||
storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55 h1:8OE12DvUnB9lfZcHe7IDGsuhjrY9GBAr964PVHmhsro=
|
||||
|
||||
@@ -44,7 +44,7 @@ omit symbol table and debug information, reducing size by about 25% on Linux and
|
||||
|
||||
Note that on macOS and Windows the mount functions will not be available unless
|
||||
you add additional argument `-tags cmount`. On Windows this also requires you to
|
||||
first install the third party utility [WinFsp](https://winfsp.dev),
|
||||
first install the third party utility [WinFsp](http://www.secfs.net/winfsp/),
|
||||
with the "Developer" feature selected, and to set environment variable CPATH
|
||||
pointing to the fuse include directory within the WinFsp installation
|
||||
(typically `C:\Program Files (x86)\WinFsp\inc\fuse`). See also the
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -111,9 +110,6 @@ func TestWriteFileDup(t *testing.T) {
|
||||
|
||||
var dupFd uintptr
|
||||
dupFd, err = writeTestDup(fh.Fd())
|
||||
if err == vfs.ENOSYS {
|
||||
t.Skip("dup not supported on this platform")
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
dupFile := os.NewFile(dupFd, fh.Name())
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !linux && !darwin && !freebsd && !openbsd && !windows
|
||||
//go:build !linux && !darwin && !freebsd && !windows
|
||||
|
||||
package vfstest
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build linux || darwin || freebsd || openbsd
|
||||
//go:build linux || darwin || freebsd
|
||||
|
||||
package vfstest
|
||||
|
||||
|
||||
Reference in New Issue
Block a user