mirror of
https://github.com/rclone/rclone.git
synced 2026-01-30 16:24:01 +00:00
Compare commits
6 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e2a079d73f | ||
|
|
7d7f82854f | ||
|
|
b6113a505f | ||
|
|
2a754ef2fa | ||
|
|
85d13c4e34 | ||
|
|
49b4ca3412 |
@@ -63,6 +63,7 @@ directories to and from different cloud storage providers.
|
||||
- iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
|
||||
- ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
|
||||
- Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
- Internxt [:page_facing_up:](https://rclone.org/internxt/)
|
||||
- Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
- IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
- Intercolo Object Storage [:page_facing_up:](https://rclone.org/s3/#intercolo)
|
||||
|
||||
@@ -35,6 +35,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/iclouddrive"
|
||||
_ "github.com/rclone/rclone/backend/imagekit"
|
||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
||||
_ "github.com/rclone/rclone/backend/internxt"
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
_ "github.com/rclone/rclone/backend/koofr"
|
||||
_ "github.com/rclone/rclone/backend/linkbox"
|
||||
|
||||
@@ -733,6 +733,17 @@ two accounts.
|
||||
Advanced: true,
|
||||
Default: rwOff,
|
||||
Examples: rwExamples,
|
||||
}, {
|
||||
Name: "metadata_enforce_expansive_access",
|
||||
Help: `Whether the request should enforce expansive access rules.
|
||||
|
||||
From Feb 2026 this flag will be set by default so this flag can be used for
|
||||
testing before then.
|
||||
|
||||
See: https://developers.google.com/workspace/drive/api/guides/limited-expansive-access
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -812,6 +823,7 @@ type Options struct {
|
||||
MetadataOwner rwChoice `config:"metadata_owner"`
|
||||
MetadataPermissions rwChoice `config:"metadata_permissions"`
|
||||
MetadataLabels rwChoice `config:"metadata_labels"`
|
||||
EnforceExpansiveAccess bool `config:"metadata_enforce_expansive_access"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
}
|
||||
@@ -3092,6 +3104,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
_, err = f.svc.Permissions.Create(id, permission).
|
||||
Fields("").
|
||||
SupportsAllDrives(true).
|
||||
EnforceExpansiveAccess(f.opt.EnforceExpansiveAccess).
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
@@ -149,6 +149,7 @@ func (f *Fs) setPermissions(ctx context.Context, info *drive.File, permissions [
|
||||
_, err := f.svc.Permissions.Create(info.Id, perm).
|
||||
SupportsAllDrives(true).
|
||||
SendNotificationEmail(false).
|
||||
EnforceExpansiveAccess(f.opt.EnforceExpansiveAccess).
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -483,6 +484,7 @@ func (f *Fs) setOwner(ctx context.Context, info *drive.File, owner string) (err
|
||||
SupportsAllDrives(true).
|
||||
TransferOwnership(true).
|
||||
// SendNotificationEmail(false). - required apparently!
|
||||
EnforceExpansiveAccess(f.opt.EnforceExpansiveAccess).
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
146
backend/internxt/auth.go
Normal file
146
backend/internxt/auth.go
Normal file
@@ -0,0 +1,146 @@
|
||||
// Package internxt provides authentication handling
|
||||
package internxt
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang-jwt/jwt/v5"
|
||||
internxtauth "github.com/internxt/rclone-adapter/auth"
|
||||
internxtconfig "github.com/internxt/rclone-adapter/config"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
type userInfo struct {
|
||||
RootFolderID string
|
||||
Bucket string
|
||||
BridgeUser string
|
||||
UserID string
|
||||
}
|
||||
|
||||
type userInfoConfig struct {
|
||||
Token string
|
||||
}
|
||||
|
||||
// getUserInfo fetches user metadata from the refresh endpoint
|
||||
func getUserInfo(ctx context.Context, cfg *userInfoConfig) (*userInfo, error) {
|
||||
// Call the refresh endpoint to get all user metadata
|
||||
refreshCfg := internxtconfig.NewDefaultToken(cfg.Token)
|
||||
resp, err := internxtauth.RefreshToken(ctx, refreshCfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch user info: %w", err)
|
||||
}
|
||||
|
||||
if resp.User.Bucket == "" {
|
||||
return nil, errors.New("API response missing user.bucket")
|
||||
}
|
||||
if resp.User.RootFolderID == "" {
|
||||
return nil, errors.New("API response missing user.rootFolderId")
|
||||
}
|
||||
if resp.User.BridgeUser == "" {
|
||||
return nil, errors.New("API response missing user.bridgeUser")
|
||||
}
|
||||
if resp.User.UserID == "" {
|
||||
return nil, errors.New("API response missing user.userId")
|
||||
}
|
||||
|
||||
info := &userInfo{
|
||||
RootFolderID: resp.User.RootFolderID,
|
||||
Bucket: resp.User.Bucket,
|
||||
BridgeUser: resp.User.BridgeUser,
|
||||
UserID: resp.User.UserID,
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "User info: rootFolderId=%s, bucket=%s",
|
||||
info.RootFolderID, info.Bucket)
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// parseJWTExpiry extracts the expiry time from a JWT token string
|
||||
func parseJWTExpiry(tokenString string) (time.Time, error) {
|
||||
parser := jwt.NewParser(jwt.WithoutClaimsValidation())
|
||||
token, _, err := parser.ParseUnverified(tokenString, jwt.MapClaims{})
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("failed to parse token: %w", err)
|
||||
}
|
||||
|
||||
claims, ok := token.Claims.(jwt.MapClaims)
|
||||
if !ok {
|
||||
return time.Time{}, errors.New("invalid token claims")
|
||||
}
|
||||
|
||||
exp, ok := claims["exp"].(float64)
|
||||
if !ok {
|
||||
return time.Time{}, errors.New("token missing expiration")
|
||||
}
|
||||
|
||||
return time.Unix(int64(exp), 0), nil
|
||||
}
|
||||
|
||||
// jwtToOAuth2Token converts a JWT string to an oauth2.Token with expiry
|
||||
func jwtToOAuth2Token(jwtString string) (*oauth2.Token, error) {
|
||||
expiry, err := parseJWTExpiry(jwtString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &oauth2.Token{
|
||||
AccessToken: jwtString,
|
||||
TokenType: "Bearer",
|
||||
Expiry: expiry,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// computeBasicAuthHeader creates the BasicAuthHeader for bucket operations
|
||||
// Following the pattern from SDK's auth/access.go:96-102
|
||||
func computeBasicAuthHeader(bridgeUser, userID string) string {
|
||||
sum := sha256.Sum256([]byte(userID))
|
||||
hexPass := hex.EncodeToString(sum[:])
|
||||
creds := fmt.Sprintf("%s:%s", bridgeUser, hexPass)
|
||||
return "Basic " + base64.StdEncoding.EncodeToString([]byte(creds))
|
||||
}
|
||||
|
||||
// refreshJWTToken refreshes the token using Internxt's refresh endpoint
|
||||
func refreshJWTToken(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
currentToken, err := oauthutil.GetToken(name, m)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get current token: %w", err)
|
||||
}
|
||||
|
||||
cfg := internxtconfig.NewDefaultToken(currentToken.AccessToken)
|
||||
resp, err := internxtauth.RefreshToken(ctx, cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("refresh request failed: %w", err)
|
||||
}
|
||||
|
||||
if resp.NewToken == "" {
|
||||
return errors.New("refresh response missing newToken")
|
||||
}
|
||||
|
||||
// Convert JWT to oauth2.Token format
|
||||
token, err := jwtToOAuth2Token(resp.NewToken)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse refreshed token: %w", err)
|
||||
}
|
||||
|
||||
err = oauthutil.PutToken(name, m, token, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to save token: %w", err)
|
||||
}
|
||||
|
||||
if resp.User.Bucket != "" {
|
||||
m.Set("bucket", resp.User.Bucket)
|
||||
}
|
||||
|
||||
fs.Debugf(name, "Token refreshed successfully, new expiry: %v", token.Expiry)
|
||||
return nil
|
||||
}
|
||||
988
backend/internxt/internxt.go
Normal file
988
backend/internxt/internxt.go
Normal file
@@ -0,0 +1,988 @@
|
||||
// Package internxt provides an interface to Internxt's Drive API
|
||||
package internxt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/internxt/rclone-adapter/auth"
|
||||
"github.com/internxt/rclone-adapter/buckets"
|
||||
config "github.com/internxt/rclone-adapter/config"
|
||||
sdkerrors "github.com/internxt/rclone-adapter/errors"
|
||||
"github.com/internxt/rclone-adapter/files"
|
||||
"github.com/internxt/rclone-adapter/folders"
|
||||
"github.com/internxt/rclone-adapter/users"
|
||||
"github.com/rclone/rclone/fs"
|
||||
rclone_config "github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
)
|
||||
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
// shouldRetry determines if an error should be retried
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
var httpErr *sdkerrors.HTTPError
|
||||
if errors.As(err, &httpErr) && httpErr.StatusCode() == 401 {
|
||||
return true, err
|
||||
}
|
||||
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "internxt",
|
||||
Description: "Internxt Drive",
|
||||
NewFs: NewFs,
|
||||
Config: Config,
|
||||
Options: []fs.Option{{
|
||||
Name: "email",
|
||||
Help: "Email of your Internxt account.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "Password.",
|
||||
Required: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "mnemonic",
|
||||
Help: "Mnemonic (internal use only)",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
Hide: fs.OptionHideBoth,
|
||||
}, {
|
||||
Name: "skip_hash_validation",
|
||||
Default: true,
|
||||
Advanced: true,
|
||||
Help: "Skip hash validation when downloading files.\n\nBy default, hash validation is disabled. Set this to false to enable validation.",
|
||||
}, {
|
||||
Name: rclone_config.ConfigEncoding,
|
||||
Help: rclone_config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encoder.EncodeInvalidUtf8 |
|
||||
encoder.EncodeSlash |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeRightPeriod |
|
||||
encoder.EncodeDot |
|
||||
encoder.EncodeCrLf,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Config configures the Internxt remote by performing login
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, configIn fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
email, _ := m.Get("email")
|
||||
if email == "" {
|
||||
return nil, errors.New("email is required")
|
||||
}
|
||||
|
||||
pass, _ := m.Get("pass")
|
||||
if pass != "" {
|
||||
var err error
|
||||
pass, err = obscure.Reveal(pass)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't decrypt password: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
cfg := config.NewDefaultToken("")
|
||||
|
||||
switch configIn.State {
|
||||
case "":
|
||||
// Check if 2FA is required
|
||||
loginResp, err := auth.Login(ctx, cfg, email)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check login requirements: %w", err)
|
||||
}
|
||||
|
||||
if loginResp.TFA {
|
||||
return fs.ConfigInput("2fa", "config_2fa", "Two-factor authentication code")
|
||||
}
|
||||
|
||||
// No 2FA required, do login directly
|
||||
return fs.ConfigGoto("login")
|
||||
|
||||
case "2fa":
|
||||
twoFA := configIn.Result
|
||||
if twoFA == "" {
|
||||
return fs.ConfigError("", "2FA code is required")
|
||||
}
|
||||
m.Set("2fa_code", twoFA)
|
||||
return fs.ConfigGoto("login")
|
||||
|
||||
case "login":
|
||||
twoFA, _ := m.Get("2fa_code")
|
||||
|
||||
loginResp, err := auth.DoLogin(ctx, cfg, email, pass, twoFA)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("login failed: %w", err)
|
||||
}
|
||||
|
||||
// Store mnemonic (obscured)
|
||||
m.Set("mnemonic", obscure.MustObscure(loginResp.User.Mnemonic))
|
||||
|
||||
// Store token
|
||||
oauthToken, err := jwtToOAuth2Token(loginResp.NewToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse token: %w", err)
|
||||
}
|
||||
err = oauthutil.PutToken(name, m, oauthToken, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to save token: %w", err)
|
||||
}
|
||||
|
||||
// Clear temporary 2FA code
|
||||
m.Set("2fa_code", "")
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unknown state %q", configIn.State)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Email string `config:"email"`
|
||||
Pass string `config:"pass"`
|
||||
TwoFA string `config:"2fa"`
|
||||
Mnemonic string `config:"mnemonic"`
|
||||
SkipHashValidation bool `config:"skip_hash_validation"`
|
||||
Encoding encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents an Internxt remote
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
opt Options
|
||||
dirCache *dircache.DirCache
|
||||
cfg *config.Config
|
||||
features *fs.Features
|
||||
pacer *fs.Pacer
|
||||
tokenRenewer *oauthutil.Renew
|
||||
bridgeUser string
|
||||
userID string
|
||||
}
|
||||
|
||||
// Object holds the data for a remote file object
|
||||
type Object struct {
|
||||
f *Fs
|
||||
remote string
|
||||
id string
|
||||
uuid string
|
||||
size int64
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string { return f.name }
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string { return f.root }
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string { return fmt.Sprintf("Internxt root '%s'", f.root) }
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Hashes returns type of hashes supported by Internxt
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.NewHashSet()
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt := new(Options)
|
||||
if err := configstruct.Set(m, opt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opt.Mnemonic == "" {
|
||||
return nil, errors.New("mnemonic is required - please run: rclone config reconnect " + name + ":")
|
||||
}
|
||||
|
||||
var err error
|
||||
opt.Mnemonic, err = obscure.Reveal(opt.Mnemonic)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't decrypt mnemonic: %w", err)
|
||||
}
|
||||
|
||||
oauthToken, err := oauthutil.GetToken(name, m)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get token - please run: rclone config reconnect %s: - %w", name, err)
|
||||
}
|
||||
|
||||
oauthConfig := &oauthutil.Config{
|
||||
TokenURL: "https://gateway.internxt.com/drive/users/refresh",
|
||||
}
|
||||
|
||||
_, ts, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create oauth client: %w", err)
|
||||
}
|
||||
|
||||
cfg := config.NewDefaultToken(oauthToken.AccessToken)
|
||||
cfg.Mnemonic = opt.Mnemonic
|
||||
cfg.SkipHashValidation = opt.SkipHashValidation
|
||||
|
||||
userInfo, err := getUserInfo(ctx, &userInfoConfig{Token: cfg.Token})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch user info: %w", err)
|
||||
}
|
||||
|
||||
cfg.RootFolderID = userInfo.RootFolderID
|
||||
cfg.Bucket = userInfo.Bucket
|
||||
cfg.BasicAuthHeader = computeBasicAuthHeader(userInfo.BridgeUser, userInfo.UserID)
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: strings.Trim(root, "/"),
|
||||
opt: *opt,
|
||||
cfg: cfg,
|
||||
bridgeUser: userInfo.BridgeUser,
|
||||
userID: userInfo.UserID,
|
||||
}
|
||||
|
||||
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
if ts != nil {
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
err := refreshJWTToken(ctx, name, m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newToken, err := oauthutil.GetToken(name, m)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get refreshed token: %w", err)
|
||||
}
|
||||
f.cfg.Token = newToken.AccessToken
|
||||
f.cfg.BasicAuthHeader = computeBasicAuthHeader(f.bridgeUser, f.userID)
|
||||
|
||||
return nil
|
||||
})
|
||||
f.tokenRenewer.Start()
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(f.root, cfg.RootFolderID, f)
|
||||
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
// Assume it might be a file
|
||||
newRoot, remote := dircache.SplitPath(f.root)
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, f.cfg.RootFolderID, &tempF)
|
||||
tempF.root = newRoot
|
||||
|
||||
err = tempF.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
_, err := tempF.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Mkdir creates a new directory
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
id, err := f.dirCache.FindDir(ctx, dir, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.dirCache.Put(dir, id)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir removes a directory
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
root := path.Join(f.root, dir)
|
||||
if root == "" {
|
||||
return errors.New("cannot remove root directory")
|
||||
}
|
||||
|
||||
id, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
// Check if directory is empty
|
||||
var childFolders []folders.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
childFolders, err = folders.ListAllFolders(ctx, f.cfg, id)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(childFolders) > 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
|
||||
var childFiles []folders.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
childFiles, err = folders.ListAllFiles(ctx, f.cfg, id)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(childFiles) > 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
|
||||
// Delete the directory
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err := folders.DeleteFolder(ctx, f.cfg, id)
|
||||
if err != nil && strings.Contains(err.Error(), "404") {
|
||||
return false, fs.ErrorDirNotFound
|
||||
}
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.dirCache.FlushDir(dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
// FindLeaf looks for a sub‑folder named `leaf` under the Internxt folder `pathID`.
|
||||
// If found, it returns its UUID and true. If not found, returns "", false.
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (string, bool, error) {
|
||||
var entries []folders.Folder
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
entries, err = folders.ListAllFolders(ctx, f.cfg, pathID)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
for _, e := range entries {
|
||||
if f.opt.Encoding.ToStandardName(e.PlainName) == leaf {
|
||||
return e.UUID, true, nil
|
||||
}
|
||||
}
|
||||
return "", false, nil
|
||||
}
|
||||
|
||||
// CreateDir creates a new directory
|
||||
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (string, error) {
|
||||
request := folders.CreateFolderRequest{
|
||||
PlainName: f.opt.Encoding.FromStandardName(leaf),
|
||||
ParentFolderUUID: pathID,
|
||||
ModificationTime: time.Now().UTC().Format(time.RFC3339),
|
||||
}
|
||||
|
||||
var resp *folders.Folder
|
||||
err := f.pacer.CallNoRetry(func() (bool, error) {
|
||||
var err error
|
||||
resp, err = folders.CreateFolder(ctx, f.cfg, request)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
// If folder already exists (409 conflict), try to find it
|
||||
if strings.Contains(err.Error(), "409") || strings.Contains(err.Error(), "Conflict") {
|
||||
existingID, found, findErr := f.FindLeaf(ctx, pathID, leaf)
|
||||
if findErr == nil && found {
|
||||
fs.Debugf(f, "Folder %q already exists in %q, using existing UUID: %s", leaf, pathID, existingID)
|
||||
return existingID, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("can't create folder, %w", err)
|
||||
}
|
||||
|
||||
return resp.UUID, nil
|
||||
}
|
||||
|
||||
// preUploadCheck checks if a file exists in the given directory
|
||||
// Returns the file metadata if it exists, nil if not
|
||||
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string) (*folders.File, error) {
|
||||
// Parse name and extension from the leaf
|
||||
baseName := f.opt.Encoding.FromStandardName(leaf)
|
||||
name := strings.TrimSuffix(baseName, filepath.Ext(baseName))
|
||||
ext := strings.TrimPrefix(filepath.Ext(baseName), ".")
|
||||
|
||||
checkResult, err := files.CheckFilesExistence(ctx, f.cfg, directoryID, []files.FileExistenceCheck{
|
||||
{
|
||||
PlainName: name,
|
||||
Type: ext,
|
||||
OriginalFile: struct{}{},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
// If existence check fails, assume file doesn't exist to allow upload to proceed
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if len(checkResult.Files) > 0 && checkResult.Files[0].FileExists() {
|
||||
result := checkResult.Files[0]
|
||||
if result.Type != ext {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
existingUUID := result.UUID
|
||||
if existingUUID != "" {
|
||||
fileMeta, err := files.GetFileMeta(ctx, f.cfg, existingUUID)
|
||||
if err == nil && fileMeta != nil {
|
||||
return convertFileMetaToFile(fileMeta), nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// convertFileMetaToFile converts files.FileMeta to folders.File
|
||||
func convertFileMetaToFile(meta *files.FileMeta) *folders.File {
|
||||
// FileMeta and folders.File have compatible structures
|
||||
return &folders.File{
|
||||
ID: meta.ID,
|
||||
UUID: meta.UUID,
|
||||
FileID: meta.FileID,
|
||||
PlainName: meta.PlainName,
|
||||
Type: meta.Type,
|
||||
Size: meta.Size,
|
||||
Bucket: meta.Bucket,
|
||||
FolderUUID: meta.FolderUUID,
|
||||
EncryptVersion: meta.EncryptVersion,
|
||||
ModificationTime: meta.ModificationTime,
|
||||
}
|
||||
}
|
||||
|
||||
// List lists a directory
|
||||
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||
dirID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var out fs.DirEntries
|
||||
|
||||
var foldersList []folders.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
foldersList, err = folders.ListAllFolders(ctx, f.cfg, dirID)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, e := range foldersList {
|
||||
remote := filepath.Join(dir, f.opt.Encoding.ToStandardName(e.PlainName))
|
||||
out = append(out, fs.NewDir(remote, e.ModificationTime))
|
||||
}
|
||||
var filesList []folders.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
filesList, err = folders.ListAllFiles(ctx, f.cfg, dirID)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, e := range filesList {
|
||||
remote := e.PlainName
|
||||
if len(e.Type) > 0 {
|
||||
remote += "." + e.Type
|
||||
}
|
||||
remote = filepath.Join(dir, f.opt.Encoding.ToStandardName(remote))
|
||||
out = append(out, newObjectWithFile(f, remote, &e))
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Put uploads a file
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
remote := src.Remote()
|
||||
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
o := &Object{
|
||||
f: f,
|
||||
remote: remote,
|
||||
size: src.Size(),
|
||||
modTime: src.ModTime(ctx),
|
||||
}
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check if file already exists
|
||||
existingFile, err := f.preUploadCheck(ctx, leaf, directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create object - if file exists, populate it with existing metadata
|
||||
o := &Object{
|
||||
f: f,
|
||||
remote: remote,
|
||||
size: src.Size(),
|
||||
modTime: src.ModTime(ctx),
|
||||
}
|
||||
|
||||
if existingFile != nil {
|
||||
// File exists - populate object with existing metadata
|
||||
size, _ := existingFile.Size.Int64()
|
||||
o.id = existingFile.FileID
|
||||
o.uuid = existingFile.UUID
|
||||
o.size = size
|
||||
o.modTime = existingFile.ModificationTime
|
||||
}
|
||||
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Remove removes an object
|
||||
func (f *Fs) Remove(ctx context.Context, remote string) error {
|
||||
obj, err := f.NewObject(ctx, remote)
|
||||
if err == nil {
|
||||
if err := obj.Remove(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
parent := path.Dir(remote)
|
||||
f.dirCache.FlushDir(parent)
|
||||
return nil
|
||||
}
|
||||
|
||||
dirID, err := f.dirCache.FindDir(ctx, remote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err := folders.DeleteFolder(ctx, f.cfg, dirID)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.dirCache.FlushDir(remote)
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewObject creates a new object
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
parentDir := filepath.Dir(remote)
|
||||
|
||||
if parentDir == "." {
|
||||
parentDir = ""
|
||||
}
|
||||
|
||||
dirID, err := f.dirCache.FindDir(ctx, parentDir, false)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
var files []folders.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
files, err = folders.ListAllFiles(ctx, f.cfg, dirID)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
targetName := filepath.Base(remote)
|
||||
for _, e := range files {
|
||||
name := e.PlainName
|
||||
if len(e.Type) > 0 {
|
||||
name += "." + e.Type
|
||||
}
|
||||
decodedName := f.opt.Encoding.ToStandardName(name)
|
||||
if decodedName == targetName {
|
||||
return newObjectWithFile(f, remote, &e), nil
|
||||
}
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// newObjectWithFile returns a new object by file info
|
||||
func newObjectWithFile(f *Fs, remote string, file *folders.File) fs.Object {
|
||||
size, _ := file.Size.Int64()
|
||||
return &Object{
|
||||
f: f,
|
||||
remote: remote,
|
||||
id: file.FileID,
|
||||
uuid: file.UUID,
|
||||
size: size,
|
||||
modTime: file.ModificationTime,
|
||||
}
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.f
|
||||
}
|
||||
|
||||
// String returns the remote path
|
||||
func (o *Object) String() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Size is the file length
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// ModTime is the last modified time (read-only)
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// Hash returns the hash value (not implemented)
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Storable returns if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime sets the modified time
|
||||
func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var internxtLimit *users.LimitResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
internxtLimit, err = users.GetLimit(ctx, f.cfg)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var internxtUsage *users.UsageResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
internxtUsage, err = users.GetUsage(ctx, f.cfg)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
usage := &fs.Usage{
|
||||
Used: fs.NewUsageValue(internxtUsage.Drive),
|
||||
}
|
||||
|
||||
usage.Total = fs.NewUsageValue(internxtLimit.MaxSpaceBytes)
|
||||
usage.Free = fs.NewUsageValue(*usage.Total - *usage.Used)
|
||||
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any cached
|
||||
// connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
buckets.WaitForPendingThumbnails()
|
||||
|
||||
if f.tokenRenewer != nil {
|
||||
f.tokenRenewer.Shutdown()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open opens a file for streaming
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
fs.FixRangeOption(options, o.size)
|
||||
rangeValue := ""
|
||||
for _, option := range options {
|
||||
switch option.(type) {
|
||||
case *fs.RangeOption, *fs.SeekOption:
|
||||
_, rangeValue = option.Header()
|
||||
}
|
||||
}
|
||||
|
||||
if o.size == 0 {
|
||||
return io.NopCloser(bytes.NewReader(nil)), nil
|
||||
}
|
||||
|
||||
var stream io.ReadCloser
|
||||
err := o.f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
stream, err = buckets.DownloadFileStream(ctx, o.f.cfg, o.id, rangeValue)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
// Update updates an existing file or creates a new one
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
remote := o.remote
|
||||
|
||||
origBaseName := filepath.Base(remote)
|
||||
origName := strings.TrimSuffix(origBaseName, filepath.Ext(origBaseName))
|
||||
origType := strings.TrimPrefix(filepath.Ext(origBaseName), ".")
|
||||
|
||||
// Create directory if it doesn't exist
|
||||
_, dirID, err := o.f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// rename based rollback pattern
|
||||
// old file is preserved until new upload succeeds
|
||||
|
||||
var backupUUID string
|
||||
var backupName, backupType string
|
||||
oldUUID := o.uuid
|
||||
|
||||
// Step 1: If file exists, rename to backup (preserves old file during upload)
|
||||
if oldUUID != "" {
|
||||
// Generate unique backup name
|
||||
baseName := filepath.Base(remote)
|
||||
name := strings.TrimSuffix(baseName, filepath.Ext(baseName))
|
||||
ext := strings.TrimPrefix(filepath.Ext(baseName), ".")
|
||||
|
||||
backupSuffix := fmt.Sprintf(".rclone-backup-%s", random.String(8))
|
||||
backupName = o.f.opt.Encoding.FromStandardName(name + backupSuffix)
|
||||
backupType = ext
|
||||
|
||||
// Rename existing file to backup name
|
||||
err = o.f.pacer.Call(func() (bool, error) {
|
||||
err := files.RenameFile(ctx, o.f.cfg, oldUUID, backupName, backupType)
|
||||
if err != nil {
|
||||
// Handle 409 Conflict: Treat as success.
|
||||
var httpErr *sdkerrors.HTTPError
|
||||
if errors.As(err, &httpErr) && httpErr.StatusCode() == 409 {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to rename existing file to backup: %w", err)
|
||||
}
|
||||
backupUUID = oldUUID
|
||||
|
||||
fs.Debugf(o.f, "Renamed existing file %s to backup %s.%s (UUID: %s)", remote, backupName, backupType, backupUUID)
|
||||
}
|
||||
|
||||
var meta *buckets.CreateMetaResponse
|
||||
err = o.f.pacer.CallNoRetry(func() (bool, error) {
|
||||
var err error
|
||||
meta, err = buckets.UploadFileStreamAuto(ctx,
|
||||
o.f.cfg,
|
||||
dirID,
|
||||
o.f.opt.Encoding.FromStandardName(filepath.Base(remote)),
|
||||
in,
|
||||
src.Size(),
|
||||
src.ModTime(ctx),
|
||||
)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
if err != nil && isEmptyFileLimitError(err) {
|
||||
o.restoreBackupFile(ctx, backupUUID, origName, origType)
|
||||
return fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
meta, err = o.recoverFromTimeoutConflict(ctx, err, remote, dirID)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
o.restoreBackupFile(ctx, backupUUID, origName, origType)
|
||||
return err
|
||||
}
|
||||
|
||||
// Update object metadata
|
||||
o.uuid = meta.UUID
|
||||
o.id = meta.FileID
|
||||
o.size = src.Size()
|
||||
o.remote = remote
|
||||
|
||||
// Step 3: Upload succeeded - delete the backup file
|
||||
if backupUUID != "" {
|
||||
fs.Debugf(o.f, "Upload succeeded, deleting backup file %s.%s (UUID: %s)", backupName, backupType, backupUUID)
|
||||
err := o.f.pacer.Call(func() (bool, error) {
|
||||
err := files.DeleteFile(ctx, o.f.cfg, backupUUID)
|
||||
if err != nil {
|
||||
var httpErr *sdkerrors.HTTPError
|
||||
if errors.As(err, &httpErr) {
|
||||
// Treat 404 (Not Found) and 204 (No Content) as success
|
||||
switch httpErr.StatusCode() {
|
||||
case 404, 204:
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(o.f, "Failed to delete backup file %s.%s (UUID: %s): %v. This may leave an orphaned backup file.",
|
||||
backupName, backupType, backupUUID, err)
|
||||
// Don't fail the upload just because backup deletion failed
|
||||
} else {
|
||||
fs.Debugf(o.f, "Successfully deleted backup file")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isTimeoutError checks if an error is a timeout using proper error type checking
|
||||
func isTimeoutError(err error) bool {
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return true
|
||||
}
|
||||
var netErr net.Error
|
||||
if errors.As(err, &netErr) && netErr.Timeout() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isConflictError checks if an error indicates a file conflict (409)
|
||||
func isConflictError(err error) bool {
|
||||
errMsg := err.Error()
|
||||
return strings.Contains(errMsg, "409") ||
|
||||
strings.Contains(errMsg, "Conflict") ||
|
||||
strings.Contains(errMsg, "already exists")
|
||||
}
|
||||
|
||||
func isEmptyFileLimitError(err error) bool {
|
||||
errMsg := strings.ToLower(err.Error())
|
||||
return strings.Contains(errMsg, "can not have more empty files") ||
|
||||
strings.Contains(errMsg, "cannot have more empty files") ||
|
||||
strings.Contains(errMsg, "you can not have empty files")
|
||||
}
|
||||
|
||||
// recoverFromTimeoutConflict attempts to recover from a timeout or conflict error
|
||||
func (o *Object) recoverFromTimeoutConflict(ctx context.Context, uploadErr error, remote, dirID string) (*buckets.CreateMetaResponse, error) {
|
||||
if !isTimeoutError(uploadErr) && !isConflictError(uploadErr) {
|
||||
return nil, uploadErr
|
||||
}
|
||||
|
||||
baseName := filepath.Base(remote)
|
||||
encodedName := o.f.opt.Encoding.FromStandardName(baseName)
|
||||
|
||||
var meta *buckets.CreateMetaResponse
|
||||
checkErr := o.f.pacer.Call(func() (bool, error) {
|
||||
existingFile, err := o.f.preUploadCheck(ctx, encodedName, dirID)
|
||||
if err != nil {
|
||||
return shouldRetry(ctx, err)
|
||||
}
|
||||
if existingFile != nil {
|
||||
name := strings.TrimSuffix(baseName, filepath.Ext(baseName))
|
||||
ext := strings.TrimPrefix(filepath.Ext(baseName), ".")
|
||||
|
||||
meta = &buckets.CreateMetaResponse{
|
||||
UUID: existingFile.UUID,
|
||||
FileID: existingFile.FileID,
|
||||
Name: name,
|
||||
PlainName: name,
|
||||
Type: ext,
|
||||
Size: existingFile.Size,
|
||||
}
|
||||
o.id = existingFile.FileID
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if checkErr != nil {
|
||||
return nil, uploadErr
|
||||
}
|
||||
|
||||
if meta != nil {
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
return nil, uploadErr
|
||||
}
|
||||
|
||||
// restoreBackupFile restores a backup file after upload failure
|
||||
func (o *Object) restoreBackupFile(ctx context.Context, backupUUID, origName, origType string) {
|
||||
if backupUUID == "" {
|
||||
return
|
||||
}
|
||||
|
||||
_ = o.f.pacer.Call(func() (bool, error) {
|
||||
err := files.RenameFile(ctx, o.f.cfg, backupUUID,
|
||||
o.f.opt.Encoding.FromStandardName(origName), origType)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
// Remove deletes a file
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.f.pacer.Call(func() (bool, error) {
|
||||
err := files.DeleteFile(ctx, o.f.cfg, o.uuid)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
14
backend/internxt/internxt_test.go
Normal file
14
backend/internxt/internxt_test.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package internxt_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestInternxt:",
|
||||
})
|
||||
}
|
||||
@@ -61,6 +61,7 @@ docs = [
|
||||
"imagekit.md",
|
||||
"iclouddrive.md",
|
||||
"internetarchive.md",
|
||||
"internxt.md",
|
||||
"jottacloud.md",
|
||||
"koofr.md",
|
||||
"linkbox.md",
|
||||
|
||||
@@ -153,6 +153,7 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="iCloud Drive" home="https://icloud.com/" config="/iclouddrive/" >}}
|
||||
{{< provider name="ImageKit" home="https://imagekit.io" config="/imagekit/" >}}
|
||||
{{< provider name="Internet Archive" home="https://archive.org/" config="/internetarchive/" >}}
|
||||
{{< provider name="Internxt" home="https://internxt.com/" config="/internxt/" >}}
|
||||
{{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}}
|
||||
{{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
|
||||
{{< provider name="IDrive e2" home="https://www.idrive.com/e2/?refer=rclone" config="/s3/#idrive-e2" >}}
|
||||
|
||||
@@ -1070,3 +1070,6 @@ put them back in again. -->
|
||||
- Mikel Olasagasti Uranga <mikel@olasagasti.info>
|
||||
- Nick Owens <mischief@offblast.org>
|
||||
- hyusap <paulayush@gmail.com>
|
||||
- jzunigax2 <125698953+jzunigax2@users.noreply.github.com>
|
||||
- lullius <lullius@users.noreply.github.com>
|
||||
- StarHack <StarHack@users.noreply.github.com>
|
||||
|
||||
@@ -61,6 +61,7 @@ See the following for detailed instructions for
|
||||
- [HTTP](/http/)
|
||||
- [iCloud Drive](/iclouddrive/)
|
||||
- [Internet Archive](/internetarchive/)
|
||||
- [Internxt](/internxt/)
|
||||
- [Jottacloud](/jottacloud/)
|
||||
- [Koofr](/koofr/)
|
||||
- [Linkbox](/linkbox/)
|
||||
|
||||
124
docs/content/internxt.md
Normal file
124
docs/content/internxt.md
Normal file
@@ -0,0 +1,124 @@
|
||||
---
|
||||
title: "Internxt Drive"
|
||||
description: "Rclone docs for Internxt Drive"
|
||||
versionIntroduced: "v1.73"
|
||||
---
|
||||
|
||||
# {{< icon "fas fa-cloud" >}} Internxt Drive
|
||||
|
||||
[Internxt Drive](https://internxt.com) is a zero-knowledge encrypted cloud storage service.
|
||||
|
||||
Paths are specified as `remote:path`
|
||||
|
||||
Paths may be as deep as required, e.g. `remote:directory/subdirectory`.
|
||||
|
||||
## Limitations
|
||||
|
||||
**Note:** The Internxt backend may not work with all account types. Please refer to [Internxt plan details](https://internxt.com/pricing) or contact [Internxt support](https://help.internxt.com) to verify rclone compatibility with your subscription.
|
||||
|
||||
## Configuration
|
||||
|
||||
Here is an example of how to make a remote called `internxt`. Run `rclone config` and follow the prompts:
|
||||
|
||||
```
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
name> internxt
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value
|
||||
[snip]
|
||||
XX / Internxt Drive
|
||||
\ "internxt"
|
||||
[snip]
|
||||
Storage> internxt
|
||||
|
||||
Option email.
|
||||
Email of your Internxt account.
|
||||
Enter a value.
|
||||
email> user@example.com
|
||||
|
||||
Option pass.
|
||||
Password.
|
||||
Enter a value.
|
||||
password>
|
||||
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
|
||||
Configuration complete.
|
||||
Options:
|
||||
- type: internxt
|
||||
- email: user@example.com
|
||||
- pass: *** ENCRYPTED ***
|
||||
Keep this "internxt" remote?
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
If you have two-factor authentication enabled on your Internxt account, you will be prompted to enter the code during login.
|
||||
|
||||
### Security Considerations
|
||||
|
||||
The authentication process stores your password and mnemonic in the rclone configuration file. It is **strongly recommended** to encrypt your rclone config to protect these sensitive credentials:
|
||||
|
||||
```
|
||||
rclone config password
|
||||
```
|
||||
|
||||
This will prompt you to set a password that encrypts your entire configuration file.
|
||||
|
||||
## Usage Examples
|
||||
|
||||
```
|
||||
# List files
|
||||
rclone ls internxt:
|
||||
|
||||
# Copy files to Internxt
|
||||
rclone copy /local/path internxt:remote/path
|
||||
|
||||
# Sync local directory to Internxt
|
||||
rclone sync /local/path internxt:remote/path
|
||||
|
||||
# Mount Internxt Drive as a local filesystem
|
||||
rclone mount internxt: /path/to/mountpoint
|
||||
|
||||
# Check storage usage
|
||||
rclone about internxt:
|
||||
```
|
||||
|
||||
### Modification times and hashes
|
||||
|
||||
The Internxt backend does not support hashes.
|
||||
|
||||
Modification times are read from the server but cannot be set. The backend reports `ModTimeNotSupported` precision, so modification times will not be used for sync comparisons.
|
||||
|
||||
### Restricted filename characters
|
||||
|
||||
The Internxt backend replaces the [default restricted characters
|
||||
set](/overview/#restricted-characters).
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/internxt/internxt.go then run make backenddocs" >}}
|
||||
|
||||
### Advanced options
|
||||
|
||||
Here are the Advanced options specific to internxt (Internxt Drive).
|
||||
|
||||
#### --internxt-skip-hash-validation
|
||||
|
||||
Skip hash validation when downloading files.
|
||||
|
||||
By default, hash validation is disabled. Set this to false to enable validation.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: skip_hash_validation
|
||||
- Env Var: RCLONE_INTERNXT_SKIP_HASH_VALIDATION
|
||||
- Type: bool
|
||||
- Default: true
|
||||
@@ -39,6 +39,7 @@ Here is an overview of the major features of each cloud storage system.
|
||||
| HTTP | - | R | No | No | R | R |
|
||||
| iCloud Drive | - | R | No | No | - | - |
|
||||
| Internet Archive | MD5, SHA1, CRC32 | R/W ¹¹ | No | No | - | RWU |
|
||||
| Internxt | - | - | No | No | - | - |
|
||||
| Jottacloud | MD5 | R/W | Yes | No | R | RW |
|
||||
| Koofr | MD5 | - | Yes | No | - | - |
|
||||
| Linkbox | - | R | No | No | - | - |
|
||||
|
||||
@@ -701,6 +701,7 @@ func Run(t *testing.T, opt *Opt) {
|
||||
if opt.SkipLeadingDot && test.name == "leading dot" {
|
||||
t.Skip("Skipping " + test.name)
|
||||
}
|
||||
|
||||
// turn raw strings into Standard encoding
|
||||
fileName := encoder.Standard.Encode(test.path)
|
||||
dirName := fileName
|
||||
|
||||
@@ -675,6 +675,11 @@ backends:
|
||||
# with the parent backend having a different precision.
|
||||
- TestServerSideCopyOverSelf
|
||||
- TestServerSideMoveOverSelf
|
||||
- backend: "internxt"
|
||||
remote: "TestInternxt:"
|
||||
fastlist: false
|
||||
ignore:
|
||||
- TestRWFileHandleWriteNoWrite
|
||||
- backend: "drime"
|
||||
remote: "TestDrime:"
|
||||
ignoretests:
|
||||
|
||||
12
go.mod
12
go.mod
@@ -42,8 +42,10 @@ require (
|
||||
github.com/go-chi/chi/v5 v5.2.3
|
||||
github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348
|
||||
github.com/go-git/go-billy/v5 v5.6.2
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/hanwen/go-fuse/v2 v2.9.0
|
||||
github.com/internxt/rclone-adapter v0.0.0-20260127164739-694d3672176e
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.4
|
||||
github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3
|
||||
github.com/josephspurrier/goversioninfo v1.5.0
|
||||
@@ -155,6 +157,7 @@ require (
|
||||
github.com/creasty/defaults v1.8.0 // indirect
|
||||
github.com/cronokirby/saferith v0.33.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/disintegration/imaging v1.6.2 // indirect
|
||||
github.com/dromara/dongle v1.0.1 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
@@ -178,7 +181,6 @@ require (
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
github.com/gofrs/flock v0.13.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect
|
||||
@@ -243,6 +245,7 @@ require (
|
||||
github.com/tinylib/msgp v1.5.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.15 // indirect
|
||||
github.com/tklauser/numcpus v0.10.0 // indirect
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
|
||||
github.com/ulikunitz/xz v0.5.15 // indirect
|
||||
github.com/willscott/go-nfs-client v0.0.0-20251022144359-801f10d98886 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
@@ -256,13 +259,14 @@ require (
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
|
||||
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect
|
||||
golang.org/x/image v0.32.0 // indirect
|
||||
golang.org/x/mod v0.29.0 // indirect
|
||||
golang.org/x/tools v0.38.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect
|
||||
google.golang.org/grpc v1.76.0 // indirect
|
||||
google.golang.org/protobuf v1.36.10 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
moul.io/http2curl/v2 v2.3.0 // indirect
|
||||
sigs.k8s.io/yaml v1.6.0 // indirect
|
||||
storj.io/common v0.0.0-20251107171817-6221ae45072c // indirect
|
||||
storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55 // indirect
|
||||
storj.io/eventkit v0.0.0-20250410172343-61f26d3de156 // indirect
|
||||
@@ -271,8 +275,8 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/IBM/go-sdk-core/v5 v5.21.0
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/IBM/go-sdk-core/v5 v5.18.5
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.3.0
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2
|
||||
github.com/pkg/xattr v0.4.12
|
||||
|
||||
25
go.sum
25
go.sum
@@ -65,15 +65,15 @@ github.com/FilenCloudDienste/filen-sdk-go v0.0.35 h1:geuYpD/1ZXSp1H3kdW7si+KRUIr
|
||||
github.com/FilenCloudDienste/filen-sdk-go v0.0.35/go.mod h1:0cBhKXQg49XbKZZfk5TCDa3sVLP+xMxZTWL+7KY0XR0=
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.264 h1:lMHTplAYI9FtmCo/QOcpRxmPA5REVAct1r2riQmDQKw=
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.264/go.mod h1:wGqkOzRu/ClJibvDgcfuJNAqI2nLhe8g91tPlDKRCdE=
|
||||
github.com/IBM/go-sdk-core/v5 v5.21.0 h1:DUnYhvC4SoC8T84rx5omnhY3+xcQg/Whyoa3mDPIMkk=
|
||||
github.com/IBM/go-sdk-core/v5 v5.21.0/go.mod h1:Q3BYO6iDA2zweQPDGbNTtqft5tDcEpm6RTuqMlPcvbw=
|
||||
github.com/IBM/go-sdk-core/v5 v5.18.5 h1:g0JRl3sYXJczB/yuDlrN6x22LJ6jIxhp0Sa4ARNW60c=
|
||||
github.com/IBM/go-sdk-core/v5 v5.18.5/go.mod h1:KonTFRR+8ZSgw5cxBSYo6E4WZoY1+7n1kfHM82VcjFU=
|
||||
github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=
|
||||
github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE=
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc=
|
||||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20210511135022-227b4adcab57/go.mod h1:HecWFHognK8GfRDGnFQbW/LiV7A3MX3gZVs45vk5h8I=
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf h1:yc9daCCYUefEs69zUkSzubzjBbL+cmOXgnmt9Fyd9ug=
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf/go.mod h1:o0ESU9p83twszAU8LBeJKFAAMX14tISa0yk4Oo5TOqo=
|
||||
@@ -226,6 +226,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
|
||||
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
|
||||
github.com/diskfs/go-diskfs v1.7.0 h1:vonWmt5CMowXwUc79jWyGrf2DIMeoOjkLlMnQYGVOs8=
|
||||
github.com/diskfs/go-diskfs v1.7.0/go.mod h1:LhQyXqOugWFRahYUSw47NyZJPezFzB9UELwhpszLP/k=
|
||||
github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c=
|
||||
@@ -423,6 +425,8 @@ github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyf
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/internxt/rclone-adapter v0.0.0-20260127164739-694d3672176e h1:zfuAdtuqzhm5+iI3Mac5NcBWHlX44d1unGkaQHB5F3U=
|
||||
github.com/internxt/rclone-adapter v0.0.0-20260127164739-694d3672176e/go.mod h1:jpF/MwuBg+opa4Q9izanNl8KzdtYhfBoZWyv70vqmgc=
|
||||
github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
|
||||
github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
|
||||
github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo=
|
||||
@@ -535,8 +539,8 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.17.3 h1:oJcvKpIb7/8uLpDDtnQuf18xVnwKp8DTD7DQ6gTd/MU=
|
||||
github.com/onsi/ginkgo/v2 v2.17.3/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
|
||||
github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y=
|
||||
github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
|
||||
github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
|
||||
github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
|
||||
github.com/oracle/oci-go-sdk/v65 v65.104.0 h1:l9awEvzWvxmYhy/97A0hZ87pa7BncYXmcO/S8+rvgK0=
|
||||
github.com/oracle/oci-go-sdk/v65 v65.104.0/go.mod h1:oB8jFGVc/7/zJ+DbleE8MzGHjhs2ioCz5stRTdZdIcY=
|
||||
github.com/panjf2000/ants/v2 v2.11.3 h1:AfI0ngBoXJmYOpDh9m516vjqoUu2sLrIVgppI9TZVpg=
|
||||
@@ -670,6 +674,8 @@ github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDH
|
||||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
|
||||
github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
|
||||
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||
github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
@@ -681,8 +687,6 @@ github.com/willscott/go-nfs v0.0.3 h1:Z5fHVxMsppgEucdkKBN26Vou19MtEM875NmRwj156R
|
||||
github.com/willscott/go-nfs v0.0.3/go.mod h1:VhNccO67Oug787VNXcyx9JDI3ZoSpqoKMT/lWMhUIDg=
|
||||
github.com/willscott/go-nfs-client v0.0.0-20251022144359-801f10d98886 h1:DtrBtkgTJk2XGt4T7eKdKVkd9A5NCevN2e4inLXtsqA=
|
||||
github.com/willscott/go-nfs-client v0.0.0-20251022144359-801f10d98886/go.mod h1:Tq++Lr/FgiS3X48q5FETemXiSLGuYMQT2sPjYNPJSwA=
|
||||
github.com/winfsp/cgofuse v1.6.1-0.20250813110601-7d90b0992471 h1:aSOo0k+aLWdhUQiUxzv4cZ7cUp3OLP+Qx7cjs6OUxME=
|
||||
github.com/winfsp/cgofuse v1.6.1-0.20250813110601-7d90b0992471/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I=
|
||||
github.com/winfsp/cgofuse v1.6.1-0.20260126094232-f2c4fccdb286 h1:tw5GqRXqExB/xghPoPLtVujBe9w9Pg1G78tvXCJNJAA=
|
||||
github.com/winfsp/cgofuse v1.6.1-0.20260126094232-f2c4fccdb286/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I=
|
||||
github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc=
|
||||
@@ -778,6 +782,9 @@ golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2
|
||||
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.32.0 h1:6lZQWq75h7L5IWNk0r+SCpUJ6tUVd3v4ZHnbRKLkUDQ=
|
||||
golang.org/x/image v0.32.0/go.mod h1:/R37rrQmKXtO6tYXAjtDLwQgFLHmhW+V6ayXlxzP2Pc=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
@@ -1128,8 +1135,6 @@ moul.io/http2curl/v2 v2.3.0/go.mod h1:RW4hyBjTWSYDOxapodpNEtX0g5Eb16sxklBqmd2RHc
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
|
||||
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
|
||||
storj.io/common v0.0.0-20251107171817-6221ae45072c h1:UDXSrdeLJe3QFouavSW10fYdpclK0YNu3KvQHzqq2+k=
|
||||
storj.io/common v0.0.0-20251107171817-6221ae45072c/go.mod h1:XNX7uykja6aco92y2y8RuqaXIDRPpt1YA2OQDKlKEUk=
|
||||
storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55 h1:8OE12DvUnB9lfZcHe7IDGsuhjrY9GBAr964PVHmhsro=
|
||||
|
||||
Reference in New Issue
Block a user