mirror of
https://github.com/rclone/rclone.git
synced 2026-01-06 10:33:34 +00:00
Compare commits
2 Commits
v1.60.1
...
fix-6426-f
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ba940d11ad | ||
|
|
7a34fe8d82 |
14
.github/workflows/build.yml
vendored
14
.github/workflows/build.yml
vendored
@@ -97,12 +97,12 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
stable: 'false'
|
||||
go-version: ${{ matrix.go }}
|
||||
@@ -162,7 +162,7 @@ jobs:
|
||||
env
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
@@ -226,7 +226,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Code quality test
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
@@ -242,18 +242,18 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
@@ -12,7 +12,7 @@ jobs:
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish image
|
||||
|
||||
@@ -11,7 +11,7 @@ jobs:
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Get actual patch version
|
||||
@@ -40,7 +40,7 @@ jobs:
|
||||
name: Build docker plugin job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
|
||||
2470
MANUAL.html
generated
2470
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
2813
MANUAL.txt
generated
2813
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
@@ -45,6 +45,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
@@ -62,20 +63,17 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/hdfs"
|
||||
_ "github.com/rclone/rclone/backend/hidrive"
|
||||
_ "github.com/rclone/rclone/backend/http"
|
||||
_ "github.com/rclone/rclone/backend/hubic"
|
||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
_ "github.com/rclone/rclone/backend/koofr"
|
||||
@@ -34,7 +35,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/netstorage"
|
||||
_ "github.com/rclone/rclone/backend/onedrive"
|
||||
_ "github.com/rclone/rclone/backend/opendrive"
|
||||
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||
_ "github.com/rclone/rclone/backend/putio"
|
||||
@@ -44,7 +44,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
_ "github.com/rclone/rclone/backend/sharefile"
|
||||
_ "github.com/rclone/rclone/backend/sia"
|
||||
_ "github.com/rclone/rclone/backend/smb"
|
||||
_ "github.com/rclone/rclone/backend/storj"
|
||||
_ "github.com/rclone/rclone/backend/sugarsync"
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
|
||||
@@ -29,7 +29,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
@@ -368,16 +367,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
meta, err := readMetadata(ctx, mo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding metadata: %w", err)
|
||||
meta := readMetadata(ctx, mo)
|
||||
if meta == nil {
|
||||
return nil, errors.New("error decoding metadata")
|
||||
}
|
||||
// Create our Object
|
||||
o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.newObject(o, mo, meta), nil
|
||||
return f.newObject(o, mo, meta), err
|
||||
}
|
||||
|
||||
// checkCompressAndType checks if an object is compressible and determines it's mime type
|
||||
@@ -681,7 +677,7 @@ func (f *Fs) putWithCustomFunctions(ctx context.Context, in io.Reader, src fs.Ob
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return f.newObject(dataObject, mo, meta), nil
|
||||
return f.newObject(dataObject, mo, meta), err
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
@@ -1044,19 +1040,24 @@ func newMetadata(size int64, mode int, cmeta sgzip.GzipMetadata, md5 string, mim
|
||||
}
|
||||
|
||||
// This function will read the metadata from a metadata object.
|
||||
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) {
|
||||
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata) {
|
||||
// Open our meradata object
|
||||
rc, err := mo.Open(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil
|
||||
}
|
||||
defer fs.CheckClose(rc, &err)
|
||||
defer func() {
|
||||
err := rc.Close()
|
||||
if err != nil {
|
||||
fs.Errorf(mo, "Error closing object: %v", err)
|
||||
}
|
||||
}()
|
||||
jr := json.NewDecoder(rc)
|
||||
meta = new(ObjectMetadata)
|
||||
if err = jr.Decode(meta); err != nil {
|
||||
return nil, err
|
||||
return nil
|
||||
}
|
||||
return meta, nil
|
||||
return meta
|
||||
}
|
||||
|
||||
// Remove removes this object
|
||||
@@ -1101,9 +1102,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
origName := o.Remote()
|
||||
if o.meta.Mode != Uncompressed || compressible {
|
||||
newObject, err = o.f.putWithCustomFunctions(ctx, in, o.f.wrapInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, compressible, mimeType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if newObject.Object.Remote() != o.Object.Remote() {
|
||||
if removeErr := o.Object.Remove(ctx); removeErr != nil {
|
||||
return removeErr
|
||||
@@ -1117,9 +1115,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
// If we are, just update the object and metadata
|
||||
newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Update object metadata and return
|
||||
o.Object = newObject.Object
|
||||
@@ -1130,9 +1128,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// This will initialize the variables of a new press Object. The metadata object, mo, and metadata struct, meta, must be specified.
|
||||
func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object {
|
||||
if o == nil {
|
||||
log.Trace(nil, "newObject(%#v, %#v, %#v) called with nil o", o, mo, meta)
|
||||
}
|
||||
return &Object{
|
||||
Object: o,
|
||||
f: f,
|
||||
@@ -1145,9 +1140,6 @@ func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object
|
||||
|
||||
// This initializes the variables of a press Object with only the size. The metadata will be loaded later on demand.
|
||||
func (f *Fs) newObjectSizeAndNameOnly(o fs.Object, moName string, size int64) *Object {
|
||||
if o == nil {
|
||||
log.Trace(nil, "newObjectSizeAndNameOnly(%#v, %#v, %#v) called with nil o", o, moName, size)
|
||||
}
|
||||
return &Object{
|
||||
Object: o,
|
||||
f: f,
|
||||
@@ -1175,7 +1167,7 @@ func (o *Object) loadMetadataIfNotLoaded(ctx context.Context) (err error) {
|
||||
return err
|
||||
}
|
||||
if o.meta == nil {
|
||||
o.meta, err = readMetadata(ctx, o.mo)
|
||||
o.meta = readMetadata(ctx, o.mo)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/ftp"
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
|
||||
@@ -34,9 +34,9 @@ func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
|
||||
// test that big file uploads do not cause network i/o timeout
|
||||
func (f *Fs) testUploadTimeout(t *testing.T) {
|
||||
const (
|
||||
fileSize = 100000000 // 100 MiB
|
||||
idleTimeout = 1 * time.Second // small because test server is local
|
||||
maxTime = 10 * time.Second // prevent test hangup
|
||||
fileSize = 100000000 // 100 MiB
|
||||
idleTimeout = 40 * time.Millisecond // small because test server is local
|
||||
maxTime = 10 * time.Second // prevent test hangup
|
||||
)
|
||||
|
||||
if testing.Short() {
|
||||
|
||||
62
backend/hubic/auth.go
Normal file
62
backend/hubic/auth.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package hubic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// auth is an authenticator for swift
|
||||
type auth struct {
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// newAuth creates a swift authenticator
|
||||
func newAuth(f *Fs) *auth {
|
||||
return &auth{
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Request constructs an http.Request for authentication
|
||||
//
|
||||
// returns nil for not needed
|
||||
func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Request, err error) {
|
||||
const retries = 10
|
||||
for try := 1; try <= retries; try++ {
|
||||
err = a.f.getCredentials(context.TODO())
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
fs.Debugf(a.f, "retrying auth request %d/%d: %v", try, retries, err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Response parses the result of an http request
|
||||
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// The public storage URL - set Internal to true to read
|
||||
// internal/service net URL
|
||||
func (a *auth) StorageUrl(Internal bool) string { // nolint
|
||||
return a.f.credentials.Endpoint
|
||||
}
|
||||
|
||||
// The access token
|
||||
func (a *auth) Token() string {
|
||||
return a.f.credentials.Token
|
||||
}
|
||||
|
||||
// The CDN url if available
|
||||
func (a *auth) CdnUrl() string { // nolint
|
||||
return ""
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ swift.Authenticator = (*auth)(nil)
|
||||
200
backend/hubic/hubic.go
Normal file
200
backend/hubic/hubic.go
Normal file
@@ -0,0 +1,200 @@
|
||||
// Package hubic provides an interface to the Hubic object storage
|
||||
// system.
|
||||
package hubic
|
||||
|
||||
// This uses the normal swift mechanism to update the credentials and
|
||||
// ignores the expires field returned by the Hubic API. This may need
|
||||
// to be revisited after some actual experience.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
swiftLib "github.com/ncw/swift/v2"
|
||||
"github.com/rclone/rclone/backend/swift"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
rcloneClientID = "api_hubic_svWP970PvSWbw5G3PzrAqZ6X2uHeZBPI"
|
||||
rcloneEncryptedClientSecret = "leZKCcqy9movLhDWLVXX8cSLp_FzoiAPeEJOIOMRw1A5RuC4iLEPDYPWVF46adC_MVonnLdVEOTHVstfBOZ_lY4WNp8CK_YWlpRZ9diT5YI"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: []string{
|
||||
"credentials.r", // Read OpenStack credentials
|
||||
},
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://api.hubic.com/oauth/auth/",
|
||||
TokenURL: "https://api.hubic.com/oauth/token/",
|
||||
},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "hubic",
|
||||
Description: "Hubic",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
|
||||
})
|
||||
}
|
||||
|
||||
// credentials is the JSON returned from the Hubic API to read the
|
||||
// OpenStack credentials
|
||||
type credentials struct {
|
||||
Token string `json:"token"` // OpenStack token
|
||||
Endpoint string `json:"endpoint"` // OpenStack endpoint
|
||||
Expires string `json:"expires"` // Expires date - e.g. "2015-11-09T14:24:56+01:00"
|
||||
}
|
||||
|
||||
// Fs represents a remote hubic
|
||||
type Fs struct {
|
||||
fs.Fs // wrapped Fs
|
||||
features *fs.Features // optional features
|
||||
client *http.Client // client for oauth api
|
||||
credentials credentials // returned from the Hubic API
|
||||
expires time.Time // time credentials expire
|
||||
}
|
||||
|
||||
// Object describes a swift object
|
||||
type Object struct {
|
||||
*swift.Object
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Object.String()
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.Fs == nil {
|
||||
return "Hubic"
|
||||
}
|
||||
return fmt.Sprintf("Hubic %s", f.Fs.String())
|
||||
}
|
||||
|
||||
// getCredentials reads the OpenStack Credentials using the Hubic API
|
||||
//
|
||||
// The credentials are read into the Fs
|
||||
func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", "https://api.hubic.com/1.0/account/credentials", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
bodyStr := strings.TrimSpace(strings.ReplaceAll(string(body), "\n", " "))
|
||||
return fmt.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
|
||||
}
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
var result credentials
|
||||
err = decoder.Decode(&result)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// fs.Debugf(f, "Got credentials %+v", result)
|
||||
if result.Token == "" || result.Endpoint == "" || result.Expires == "" {
|
||||
return errors.New("couldn't read token, result and expired from credentials")
|
||||
}
|
||||
f.credentials = result
|
||||
expires, err := time.Parse(time.RFC3339, result.Expires)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.expires = expires
|
||||
fs.Debugf(f, "Got swift credentials (expiry %v in %v)", f.expires, time.Until(f.expires))
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Hubic: %w", err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
client: client,
|
||||
}
|
||||
|
||||
// Make the swift Connection
|
||||
ci := fs.GetConfig(ctx)
|
||||
c := &swiftLib.Connection{
|
||||
Auth: newAuth(f),
|
||||
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
|
||||
Transport: fshttp.NewTransport(ctx),
|
||||
}
|
||||
err = c.Authenticate(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error authenticating swift connection: %w", err)
|
||||
}
|
||||
|
||||
// Parse config into swift.Options struct
|
||||
opt := new(swift.Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Make inner swift Fs from the connection
|
||||
swiftFs, err := swift.NewFsWithConnection(ctx, opt, name, root, c, true)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, err
|
||||
}
|
||||
f.Fs = swiftFs
|
||||
f.features = f.Fs.Features().Wrap(f)
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
func (f *Fs) UnWrap() fs.Fs {
|
||||
return f.Fs
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
)
|
||||
19
backend/hubic/hubic_test.go
Normal file
19
backend/hubic/hubic_test.go
Normal file
@@ -0,0 +1,19 @@
|
||||
// Test Hubic filesystem interface
|
||||
package hubic_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/hubic"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestHubic:",
|
||||
NilObject: (*hubic.Object)(nil),
|
||||
SkipFsCheckWrap: true,
|
||||
SkipObjectCheckWrap: true,
|
||||
})
|
||||
}
|
||||
@@ -521,6 +521,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
name := fi.Name()
|
||||
mode := fi.Mode()
|
||||
newRemote := f.cleanRemote(dir, name)
|
||||
// Don't include non directory if not included
|
||||
// we leave directory filtering to the layer above
|
||||
if useFilter && !fi.IsDir() && !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
// Follow symlinks if required
|
||||
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
localPath := filepath.Join(fsDirPath, name)
|
||||
@@ -537,11 +542,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
mode = fi.Mode()
|
||||
}
|
||||
// Don't include non directory if not included
|
||||
// we leave directory filtering to the layer above
|
||||
if useFilter && !fi.IsDir() && !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
if fi.IsDir() {
|
||||
// Ignore directories which are symlinks. These are junction points under windows which
|
||||
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
|
||||
@@ -1400,27 +1400,30 @@ func (o *Object) writeMetadata(metadata fs.Metadata) (err error) {
|
||||
}
|
||||
|
||||
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
|
||||
if runtime.GOOS != "windows" || !strings.HasPrefix(s, "\\") {
|
||||
if !filepath.IsAbs(s) {
|
||||
if runtime.GOOS == "windows" {
|
||||
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
} else {
|
||||
s = filepath.Clean(s)
|
||||
}
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
s = filepath.ToSlash(s)
|
||||
vol := filepath.VolumeName(s)
|
||||
s = vol + enc.FromStandardPath(s[len(vol):])
|
||||
s = filepath.FromSlash(s)
|
||||
|
||||
if !noUNC {
|
||||
// Convert to UNC
|
||||
s = file.UNCPath(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
if !filepath.IsAbs(s) {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
s = enc.FromStandardPath(s)
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -5,41 +5,19 @@ package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
statxCheckOnce sync.Once
|
||||
readMetadataFromFileFn func(o *Object, m *fs.Metadata) (err error)
|
||||
)
|
||||
|
||||
// Read the metadata from the file into metadata where possible
|
||||
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
|
||||
statxCheckOnce.Do(func() {
|
||||
// Check statx() is available as it was only introduced in kernel 4.11
|
||||
// If not, fall back to fstatat() which was introduced in 2.6.16 which is guaranteed for all Go versions
|
||||
var stat unix.Statx_t
|
||||
if unix.Statx(unix.AT_FDCWD, ".", 0, unix.STATX_ALL, &stat) != unix.ENOSYS {
|
||||
readMetadataFromFileFn = readMetadataFromFileStatx
|
||||
} else {
|
||||
readMetadataFromFileFn = readMetadataFromFileFstatat
|
||||
}
|
||||
})
|
||||
return readMetadataFromFileFn(o, m)
|
||||
}
|
||||
|
||||
// Read the metadata from the file into metadata where possible
|
||||
func readMetadataFromFileStatx(o *Object, m *fs.Metadata) (err error) {
|
||||
flags := unix.AT_SYMLINK_NOFOLLOW
|
||||
if o.fs.opt.FollowSymlinks {
|
||||
flags = 0
|
||||
}
|
||||
var stat unix.Statx_t
|
||||
// statx() was added to Linux in kernel 4.11
|
||||
err = unix.Statx(unix.AT_FDCWD, o.path, flags, (0 |
|
||||
unix.STATX_TYPE | // Want stx_mode & S_IFMT
|
||||
unix.STATX_MODE | // Want stx_mode & ~S_IFMT
|
||||
@@ -67,36 +45,3 @@ func readMetadataFromFileStatx(o *Object, m *fs.Metadata) (err error) {
|
||||
setTime("btime", stat.Btime)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read the metadata from the file into metadata where possible
|
||||
func readMetadataFromFileFstatat(o *Object, m *fs.Metadata) (err error) {
|
||||
flags := unix.AT_SYMLINK_NOFOLLOW
|
||||
if o.fs.opt.FollowSymlinks {
|
||||
flags = 0
|
||||
}
|
||||
var stat unix.Stat_t
|
||||
// fstatat() was added to Linux in kernel 2.6.16
|
||||
// Go only supports 2.6.32 or later
|
||||
err = unix.Fstatat(unix.AT_FDCWD, o.path, &stat, flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
|
||||
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
|
||||
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
|
||||
if stat.Rdev != 0 {
|
||||
m.Set("rdev", fmt.Sprintf("%x", stat.Rdev))
|
||||
}
|
||||
setTime := func(key string, t unix.Timespec) {
|
||||
// The types of t.Sec and t.Nsec vary from int32 to int64 on
|
||||
// different Linux architectures so we need to cast them to
|
||||
// int64 here and hence need to quiet the linter about
|
||||
// unecessary casts.
|
||||
//
|
||||
// nolint: unconvert
|
||||
m.Set(key, time.Unix(int64(t.Sec), int64(t.Nsec)).Format(metadataTimeFormat))
|
||||
}
|
||||
setTime("atime", stat.Atim)
|
||||
setTime("mtime", stat.Mtim)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -69,11 +69,6 @@ func (w *BinWriter) WritePu64(val int64) {
|
||||
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
|
||||
}
|
||||
|
||||
// WriteP64 writes an signed long as unsigned varint
|
||||
func (w *BinWriter) WriteP64(val int64) {
|
||||
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
|
||||
}
|
||||
|
||||
// WriteString writes a zero-terminated string
|
||||
func (w *BinWriter) WriteString(str string) {
|
||||
buf := []byte(str)
|
||||
|
||||
@@ -91,13 +91,8 @@ func init() {
|
||||
Help: "User name (usually email).",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: `Password.
|
||||
|
||||
This must be an app password - rclone will not work with your normal
|
||||
password. See the Configuration section in the docs for how to make an
|
||||
app password.
|
||||
`,
|
||||
Name: "pass",
|
||||
Help: "Password.",
|
||||
Required: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
@@ -646,7 +641,12 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
|
||||
return nil, -1, err
|
||||
}
|
||||
|
||||
modTime := time.Unix(int64(item.Mtime), 0)
|
||||
mTime := int64(item.Mtime)
|
||||
if mTime < 0 {
|
||||
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
|
||||
mTime = 0
|
||||
}
|
||||
modTime := time.Unix(mTime, 0)
|
||||
|
||||
isDir, err := f.isDir(item.Kind, remote)
|
||||
if err != nil {
|
||||
@@ -2058,7 +2058,7 @@ func (o *Object) addFileMetaData(ctx context.Context, overwrite bool) error {
|
||||
req.WritePu16(0) // revision
|
||||
req.WriteString(o.fs.opt.Enc.FromStandardPath(o.absPath()))
|
||||
req.WritePu64(o.size)
|
||||
req.WriteP64(o.modTime.Unix())
|
||||
req.WritePu64(o.modTime.Unix())
|
||||
req.WritePu32(0)
|
||||
req.Write(o.mrHash)
|
||||
|
||||
|
||||
@@ -891,12 +891,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}).Fill(ctx, f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// Disable change polling in China region
|
||||
// See: https://github.com/rclone/rclone/issues/6444
|
||||
if f.opt.Region == regionCN {
|
||||
f.features.ChangeNotify = nil
|
||||
}
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, _, err := f.readMetaDataForPath(ctx, "")
|
||||
|
||||
@@ -1,158 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rsa"
|
||||
"errors"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/common/auth"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
)
|
||||
|
||||
func getConfigurationProvider(opt *Options) (common.ConfigurationProvider, error) {
|
||||
switch opt.Provider {
|
||||
case instancePrincipal:
|
||||
return auth.InstancePrincipalConfigurationProvider()
|
||||
case userPrincipal:
|
||||
if opt.ConfigFile != "" && !fileExists(opt.ConfigFile) {
|
||||
fs.Errorf(userPrincipal, "oci config file doesn't exist at %v", opt.ConfigFile)
|
||||
}
|
||||
return common.CustomProfileConfigProvider(opt.ConfigFile, opt.ConfigProfile), nil
|
||||
case resourcePrincipal:
|
||||
return auth.ResourcePrincipalConfigurationProvider()
|
||||
case noAuth:
|
||||
fs.Infof("client", "using no auth provider")
|
||||
return getNoAuthConfiguration()
|
||||
default:
|
||||
}
|
||||
return common.DefaultConfigProvider(), nil
|
||||
}
|
||||
|
||||
func newObjectStorageClient(ctx context.Context, opt *Options) (*objectstorage.ObjectStorageClient, error) {
|
||||
p, err := getConfigurationProvider(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := objectstorage.NewObjectStorageClientWithConfigurationProvider(p)
|
||||
if err != nil {
|
||||
fs.Errorf(opt.Provider, "failed to create object storage client, %v", err)
|
||||
return nil, err
|
||||
}
|
||||
if opt.Region != "" {
|
||||
client.SetRegion(opt.Region)
|
||||
}
|
||||
modifyClient(ctx, opt, &client.BaseClient)
|
||||
return &client, err
|
||||
}
|
||||
|
||||
func fileExists(filePath string) bool {
|
||||
if _, err := os.Stat(filePath); errors.Is(err, os.ErrNotExist) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func modifyClient(ctx context.Context, opt *Options, client *common.BaseClient) {
|
||||
client.HTTPClient = getHTTPClient(ctx)
|
||||
if opt.Provider == noAuth {
|
||||
client.Signer = getNoAuthSigner()
|
||||
}
|
||||
}
|
||||
|
||||
// getClient makes http client according to the global options
|
||||
// this has rclone specific options support like dump headers, body etc.
|
||||
func getHTTPClient(ctx context.Context) *http.Client {
|
||||
return fshttp.NewClient(ctx)
|
||||
}
|
||||
|
||||
var retryErrorCodes = []int{
|
||||
408, // Request Timeout
|
||||
429, // Rate exceeded.
|
||||
500, // Get occasional 500 Internal Server Error
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Time-out
|
||||
}
|
||||
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// If this is an ocierr object, try and extract more useful information to determine if we should retry
|
||||
if ociError, ok := err.(common.ServiceError); ok {
|
||||
// Simple case, check the original embedded error in case it's generically retryable
|
||||
if fserrors.ShouldRetry(err) {
|
||||
return true, err
|
||||
}
|
||||
// If it is a timeout then we want to retry that
|
||||
if ociError.GetCode() == "RequestTimeout" {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
// Ok, not an oci error, check for generic failure conditions
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
func getNoAuthConfiguration() (common.ConfigurationProvider, error) {
|
||||
return &noAuthConfigurator{}, nil
|
||||
}
|
||||
|
||||
func getNoAuthSigner() common.HTTPRequestSigner {
|
||||
return &noAuthSigner{}
|
||||
}
|
||||
|
||||
type noAuthConfigurator struct {
|
||||
}
|
||||
|
||||
type noAuthSigner struct {
|
||||
}
|
||||
|
||||
func (n *noAuthSigner) Sign(*http.Request) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) PrivateRSAKey() (*rsa.PrivateKey, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) KeyID() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) TenancyOCID() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) UserOCID() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) KeyFingerprint() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) Region() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) AuthType() (common.AuthConfig, error) {
|
||||
return common.AuthConfig{
|
||||
AuthType: common.UnknownAuthenticationType,
|
||||
IsFromConfigFile: false,
|
||||
OboToken: nil,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ common.ConfigurationProvider = &noAuthConfigurator{}
|
||||
_ common.HTTPRequestSigner = &noAuthSigner{}
|
||||
)
|
||||
@@ -1,228 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Command Interface Implementation
|
||||
// ------------------------------------------------------------
|
||||
|
||||
const (
|
||||
operationRename = "rename"
|
||||
operationListMultiPart = "list-multipart-uploads"
|
||||
operationCleanup = "cleanup"
|
||||
)
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: operationRename,
|
||||
Short: "change the name of an object",
|
||||
Long: `This command can be used to rename a object.
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name
|
||||
`,
|
||||
Opts: nil,
|
||||
}, {
|
||||
Name: operationListMultiPart,
|
||||
Short: "List the unfinished multipart uploads",
|
||||
Long: `This command lists the unfinished multipart uploads in JSON format.
|
||||
|
||||
rclone backend list-multipart-uploads oos:bucket/path/to/object
|
||||
|
||||
It returns a dictionary of buckets with values as lists of unfinished
|
||||
multipart uploads.
|
||||
|
||||
You can call it with no bucket in which case it lists all bucket, with
|
||||
a bucket or with a bucket and path.
|
||||
|
||||
{
|
||||
"test-bucket": [
|
||||
{
|
||||
"namespace": "test-namespace",
|
||||
"bucket": "test-bucket",
|
||||
"object": "600m.bin",
|
||||
"uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
|
||||
"timeCreated": "2022-07-29T06:21:16.595Z",
|
||||
"storageTier": "Standard"
|
||||
}
|
||||
]
|
||||
`,
|
||||
}, {
|
||||
Name: operationCleanup,
|
||||
Short: "Remove unfinished multipart uploads.",
|
||||
Long: `This command removes unfinished multipart uploads of age greater than
|
||||
max-age which defaults to 24 hours.
|
||||
|
||||
Note that you can use -i/--dry-run with this command to see what it
|
||||
would do.
|
||||
|
||||
rclone backend cleanup oos:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
|
||||
|
||||
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||
`,
|
||||
Opts: map[string]string{
|
||||
"max-age": "Max age of upload to delete",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
/*
|
||||
Command the backend to run a named command
|
||||
|
||||
The command run is name
|
||||
args may be used to read arguments from
|
||||
opts may be used to read optional arguments from
|
||||
|
||||
The result should be capable of being JSON encoded
|
||||
If it is a string or a []string it will be shown to the user
|
||||
otherwise it will be JSON encoded and shown to the user like that
|
||||
*/
|
||||
func (f *Fs) Command(ctx context.Context, commandName string, args []string,
|
||||
opt map[string]string) (result interface{}, err error) {
|
||||
// fs.Debugf(f, "command %v, args: %v, opts:%v", commandName, args, opt)
|
||||
switch commandName {
|
||||
case operationRename:
|
||||
if len(args) < 2 {
|
||||
return nil, fmt.Errorf("path to object or its new name to rename is empty")
|
||||
}
|
||||
remote := args[0]
|
||||
newName := args[1]
|
||||
return f.rename(ctx, remote, newName)
|
||||
case operationListMultiPart:
|
||||
return f.listMultipartUploadsAll(ctx)
|
||||
case operationCleanup:
|
||||
maxAge := 24 * time.Hour
|
||||
if opt["max-age"] != "" {
|
||||
maxAge, err = fs.ParseDuration(opt["max-age"])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bad max-age: %w", err)
|
||||
}
|
||||
}
|
||||
return nil, f.cleanUp(ctx, maxAge)
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) rename(ctx context.Context, remote, newName string) (interface{}, error) {
|
||||
if remote == "" {
|
||||
return nil, fmt.Errorf("path to object file cannot be empty")
|
||||
}
|
||||
if newName == "" {
|
||||
return nil, fmt.Errorf("the object's new name cannot be empty")
|
||||
}
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
bucketName, objectPath := o.split()
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "failed to read object:%v %v ", objectPath, err)
|
||||
if strings.HasPrefix(objectPath, bucketName) {
|
||||
fs.Errorf(f, "warn: ensure object path: %v is relative to bucket:%v and doesn't include the bucket name",
|
||||
objectPath, bucketName)
|
||||
}
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
details := objectstorage.RenameObjectDetails{
|
||||
SourceName: common.String(objectPath),
|
||||
NewName: common.String(newName),
|
||||
}
|
||||
request := objectstorage.RenameObjectRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
RenameObjectDetails: details,
|
||||
OpcClientRequestId: nil,
|
||||
RequestMetadata: common.RequestMetadata{},
|
||||
}
|
||||
var response objectstorage.RenameObjectResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
response, err = f.srv.RenameObject(ctx, request)
|
||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Infof(f, "success: renamed object-path: %v to %v", objectPath, newName)
|
||||
return "renamed successfully", nil
|
||||
}
|
||||
|
||||
func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string][]*objectstorage.MultipartUpload,
|
||||
err error) {
|
||||
uploadsMap = make(map[string][]*objectstorage.MultipartUpload)
|
||||
bucket, directory := f.split("")
|
||||
if bucket != "" {
|
||||
uploads, err := f.listMultipartUploads(ctx, bucket, directory)
|
||||
if err != nil {
|
||||
return uploadsMap, err
|
||||
}
|
||||
uploadsMap[bucket] = uploads
|
||||
return uploadsMap, nil
|
||||
}
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return uploadsMap, err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
bucket := entry.Remote()
|
||||
uploads, listErr := f.listMultipartUploads(ctx, bucket, "")
|
||||
if listErr != nil {
|
||||
err = listErr
|
||||
fs.Errorf(f, "%v", err)
|
||||
}
|
||||
uploadsMap[bucket] = uploads
|
||||
}
|
||||
return uploadsMap, err
|
||||
}
|
||||
|
||||
// listMultipartUploads lists all outstanding multipart uploads for (bucket, key)
|
||||
//
|
||||
// Note that rather lazily we treat key as a prefix, so it matches
|
||||
// directories and objects. This could surprise the user if they ask
|
||||
// for "dir" and it returns "dirKey"
|
||||
func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory string) (
|
||||
uploads []*objectstorage.MultipartUpload, err error) {
|
||||
|
||||
uploads = []*objectstorage.MultipartUpload{}
|
||||
req := objectstorage.ListMultipartUploadsRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
}
|
||||
|
||||
var response objectstorage.ListMultipartUploadsResponse
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
response, err = f.srv.ListMultipartUploads(ctx, req)
|
||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
// fs.Debugf(f, "failed to list multi part uploads %v", err)
|
||||
return uploads, err
|
||||
}
|
||||
for index, item := range response.Items {
|
||||
if directory != "" && item.Object != nil && !strings.HasPrefix(*item.Object, directory) {
|
||||
continue
|
||||
}
|
||||
uploads = append(uploads, &response.Items[index])
|
||||
}
|
||||
if response.OpcNextPage == nil {
|
||||
break
|
||||
}
|
||||
req.Page = response.OpcNextPage
|
||||
}
|
||||
return uploads, nil
|
||||
}
|
||||
@@ -1,155 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Implement Copier is an optional interfaces for Fs
|
||||
//------------------------------------------------------------
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
// This is stored with the remote path given
|
||||
// It returns the destination Object and a possible error
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
// fs.Debugf(f, "copying %v to %v", src.Remote(), remote)
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
// fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
// Temporary Object under construction
|
||||
dstObj := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
err := f.copy(ctx, dstObj, srcObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// copy does a server-side copy from dstObj <- srcObj
|
||||
//
|
||||
// If newInfo is nil then the metadata will be copied otherwise it
|
||||
// will be replaced with newInfo
|
||||
func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object) (err error) {
|
||||
srcBucket, srcPath := srcObj.split()
|
||||
dstBucket, dstPath := dstObj.split()
|
||||
if dstBucket != srcBucket {
|
||||
exists, err := f.bucketExists(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
err = f.makeBucket(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
copyObjectDetails := objectstorage.CopyObjectDetails{
|
||||
SourceObjectName: common.String(srcPath),
|
||||
DestinationRegion: common.String(dstObj.fs.opt.Region),
|
||||
DestinationNamespace: common.String(dstObj.fs.opt.Namespace),
|
||||
DestinationBucket: common.String(dstBucket),
|
||||
DestinationObjectName: common.String(dstPath),
|
||||
DestinationObjectMetadata: metadataWithOpcPrefix(srcObj.meta),
|
||||
}
|
||||
req := objectstorage.CopyObjectRequest{
|
||||
NamespaceName: common.String(srcObj.fs.opt.Namespace),
|
||||
BucketName: common.String(srcBucket),
|
||||
CopyObjectDetails: copyObjectDetails,
|
||||
}
|
||||
var resp objectstorage.CopyObjectResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CopyObject(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
workRequestID := resp.OpcWorkRequestId
|
||||
timeout := time.Duration(f.opt.CopyTimeout)
|
||||
dstName := dstObj.String()
|
||||
// https://docs.oracle.com/en-us/iaas/Content/Object/Tasks/copyingobjects.htm
|
||||
// To enable server side copy object, customers will have to
|
||||
// grant policy to objectstorage service to manage object-family
|
||||
// Allow service objectstorage-<region_identifier> to manage object-family in tenancy
|
||||
// Another option to avoid the policy is to download and reupload the file.
|
||||
// This download upload will work for maximum file size limit of 5GB
|
||||
err = copyObjectWaitForWorkRequest(ctx, workRequestID, dstName, timeout, f.srv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func copyObjectWaitForWorkRequest(ctx context.Context, wID *string, entityType string, timeout time.Duration,
|
||||
client *objectstorage.ObjectStorageClient) error {
|
||||
|
||||
stateConf := &StateChangeConf{
|
||||
Pending: []string{
|
||||
string(objectstorage.WorkRequestStatusAccepted),
|
||||
string(objectstorage.WorkRequestStatusInProgress),
|
||||
string(objectstorage.WorkRequestStatusCanceling),
|
||||
},
|
||||
Target: []string{
|
||||
string(objectstorage.WorkRequestSummaryStatusCompleted),
|
||||
string(objectstorage.WorkRequestSummaryStatusCanceled),
|
||||
string(objectstorage.WorkRequestStatusFailed),
|
||||
},
|
||||
Refresh: func() (interface{}, string, error) {
|
||||
getWorkRequestRequest := objectstorage.GetWorkRequestRequest{}
|
||||
getWorkRequestRequest.WorkRequestId = wID
|
||||
workRequestResponse, err := client.GetWorkRequest(context.Background(), getWorkRequestRequest)
|
||||
wr := &workRequestResponse.WorkRequest
|
||||
return workRequestResponse, string(wr.Status), err
|
||||
},
|
||||
Timeout: timeout,
|
||||
}
|
||||
|
||||
wrr, e := stateConf.WaitForStateContext(ctx, entityType)
|
||||
if e != nil {
|
||||
return fmt.Errorf("work request did not succeed, workId: %s, entity: %s. Message: %s", *wID, entityType, e)
|
||||
}
|
||||
|
||||
wr := wrr.(objectstorage.GetWorkRequestResponse).WorkRequest
|
||||
if wr.Status == objectstorage.WorkRequestStatusFailed {
|
||||
errorMessage, _ := getObjectStorageErrorFromWorkRequest(ctx, wID, client)
|
||||
return fmt.Errorf("work request did not succeed, workId: %s, entity: %s. Message: %s", *wID, entityType, errorMessage)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getObjectStorageErrorFromWorkRequest(ctx context.Context, workRequestID *string, client *objectstorage.ObjectStorageClient) (string, error) {
|
||||
req := objectstorage.ListWorkRequestErrorsRequest{}
|
||||
req.WorkRequestId = workRequestID
|
||||
res, err := client.ListWorkRequestErrors(ctx, req)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
allErrs := make([]string, 0)
|
||||
for _, errs := range res.Items {
|
||||
allErrs = append(allErrs, *errs.Message)
|
||||
}
|
||||
|
||||
errorMessage := strings.Join(allErrs, "\n")
|
||||
return errorMessage, nil
|
||||
}
|
||||
@@ -1,621 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Object Interface Implementation
|
||||
// ------------------------------------------------------------
|
||||
|
||||
const (
|
||||
metaMtime = "mtime" // the meta key to store mtime in - e.g. X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "md5chksum" // the meta key to store md5hash in
|
||||
// StandardTier object storage tier
|
||||
ociMetaPrefix = "opc-meta-"
|
||||
)
|
||||
|
||||
var archive = "archive"
|
||||
var infrequentAccess = "infrequentaccess"
|
||||
var standard = "standard"
|
||||
|
||||
var storageTierMap = map[string]*string{
|
||||
archive: &archive,
|
||||
infrequentAccess: &infrequentAccess,
|
||||
standard: &standard,
|
||||
}
|
||||
|
||||
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
|
||||
|
||||
// Object describes a oci bucket object
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
md5 string // MD5 hash if known
|
||||
bytes int64 // Size of the object
|
||||
lastModified time.Time // The modified time of the object if known
|
||||
meta map[string]string // The object metadata if known - may be nil
|
||||
mimeType string // Content-Type of the object
|
||||
|
||||
// Metadata as pointers to strings as they often won't be present
|
||||
storageTier *string // e.g. Standard
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the object
|
||||
func (o *Object) split() (bucket, bucketPath string) {
|
||||
return o.fs.split(o.remote)
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
fs.Debugf(o, "trying to read metadata %v", o.remote)
|
||||
if o.meta != nil {
|
||||
return nil
|
||||
}
|
||||
info, err := o.headObject(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return o.decodeMetaDataHead(info)
|
||||
}
|
||||
|
||||
// headObject gets the metadata from the object unconditionally
|
||||
func (o *Object) headObject(ctx context.Context) (info *objectstorage.HeadObjectResponse, err error) {
|
||||
bucketName, objectPath := o.split()
|
||||
req := objectstorage.HeadObjectRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(objectPath),
|
||||
}
|
||||
var response objectstorage.HeadObjectResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
response, err = o.fs.srv.HeadObject(ctx, req)
|
||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
if svcErr, ok := err.(common.ServiceError); ok {
|
||||
if svcErr.GetHTTPStatusCode() == http.StatusNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
o.fs.cache.MarkOK(bucketName)
|
||||
return &response, err
|
||||
}
|
||||
|
||||
func (o *Object) decodeMetaDataHead(info *objectstorage.HeadObjectResponse) (err error) {
|
||||
return o.setMetaData(
|
||||
info.ContentLength,
|
||||
info.ContentMd5,
|
||||
info.ContentType,
|
||||
info.LastModified,
|
||||
info.StorageTier,
|
||||
info.OpcMeta)
|
||||
}
|
||||
|
||||
func (o *Object) decodeMetaDataObject(info *objectstorage.GetObjectResponse) (err error) {
|
||||
return o.setMetaData(
|
||||
info.ContentLength,
|
||||
info.ContentMd5,
|
||||
info.ContentType,
|
||||
info.LastModified,
|
||||
info.StorageTier,
|
||||
info.OpcMeta)
|
||||
}
|
||||
|
||||
func (o *Object) setMetaData(
|
||||
contentLength *int64,
|
||||
contentMd5 *string,
|
||||
contentType *string,
|
||||
lastModified *common.SDKTime,
|
||||
storageTier interface{},
|
||||
meta map[string]string) error {
|
||||
|
||||
if contentLength != nil {
|
||||
o.bytes = *contentLength
|
||||
}
|
||||
if contentMd5 != nil {
|
||||
md5, err := o.base64ToMd5(*contentMd5)
|
||||
if err == nil {
|
||||
o.md5 = md5
|
||||
}
|
||||
}
|
||||
o.meta = meta
|
||||
if o.meta == nil {
|
||||
o.meta = map[string]string{}
|
||||
}
|
||||
// Read MD5 from metadata if present
|
||||
if md5sumBase64, ok := o.meta[metaMD5Hash]; ok {
|
||||
md5, err := o.base64ToMd5(md5sumBase64)
|
||||
if err != nil {
|
||||
o.md5 = md5
|
||||
}
|
||||
}
|
||||
if lastModified == nil {
|
||||
o.lastModified = time.Now()
|
||||
fs.Logf(o, "Failed to read last modified")
|
||||
} else {
|
||||
o.lastModified = lastModified.Time
|
||||
}
|
||||
if contentType != nil {
|
||||
o.mimeType = *contentType
|
||||
}
|
||||
if storageTier == nil || storageTier == "" {
|
||||
o.storageTier = storageTierMap[standard]
|
||||
} else {
|
||||
tier := strings.ToLower(fmt.Sprintf("%v", storageTier))
|
||||
o.storageTier = storageTierMap[tier]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) base64ToMd5(md5sumBase64 string) (md5 string, err error) {
|
||||
md5sumBytes, err := base64.StdEncoding.DecodeString(md5sumBase64)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to read md5sum from metadata %q: %v", md5sumBase64, err)
|
||||
return "", err
|
||||
} else if len(md5sumBytes) != 16 {
|
||||
fs.Debugf(o, "failed to read md5sum from metadata %q: wrong length", md5sumBase64)
|
||||
return "", fmt.Errorf("failed to read md5sum from metadata %q: wrong length", md5sumBase64)
|
||||
}
|
||||
return hex.EncodeToString(md5sumBytes), nil
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.bytes
|
||||
}
|
||||
|
||||
// GetTier returns storage class as string
|
||||
func (o *Object) GetTier() string {
|
||||
if o.storageTier == nil || *o.storageTier == "" {
|
||||
return standard
|
||||
}
|
||||
return *o.storageTier
|
||||
}
|
||||
|
||||
// SetTier performs changing storage class
|
||||
func (o *Object) SetTier(tier string) (err error) {
|
||||
ctx := context.TODO()
|
||||
tier = strings.ToLower(tier)
|
||||
bucketName, bucketPath := o.split()
|
||||
tierEnum, ok := objectstorage.GetMappingStorageTierEnum(tier)
|
||||
if !ok {
|
||||
return fmt.Errorf("not a valid storage tier %v ", tier)
|
||||
}
|
||||
|
||||
req := objectstorage.UpdateObjectStorageTierRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
UpdateObjectStorageTierDetails: objectstorage.UpdateObjectStorageTierDetails{
|
||||
ObjectName: common.String(bucketPath),
|
||||
StorageTier: tierEnum,
|
||||
},
|
||||
}
|
||||
_, err = o.fs.srv.UpdateObjectStorageTier(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.storageTier = storageTierMap[tier]
|
||||
return err
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return ""
|
||||
}
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
// Convert base64 encoded md5 into lower case hex
|
||||
if o.md5 == "" {
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return o.md5, nil
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned to the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) (result time.Time) {
|
||||
if o.fs.ci.UseServerModTime {
|
||||
return o.lastModified
|
||||
}
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
}
|
||||
// read mtime out of metadata if available
|
||||
d, ok := o.meta[metaMtime]
|
||||
if !ok || d == "" {
|
||||
return o.lastModified
|
||||
}
|
||||
modTime, err := swift.FloatStringToTime(d)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read mtime from object: %v", err)
|
||||
return o.lastModified
|
||||
}
|
||||
return modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.meta[metaMtime] = swift.TimeToFloatString(modTime)
|
||||
_, err = o.fs.Copy(ctx, o, o.remote)
|
||||
return err
|
||||
}
|
||||
|
||||
// Storable returns if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
bucketName, bucketPath := o.split()
|
||||
req := objectstorage.DeleteObjectRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.DeleteObject(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Open object file
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
bucketName, bucketPath := o.split()
|
||||
req := objectstorage.GetObjectRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
}
|
||||
o.applyGetObjectOptions(&req, options...)
|
||||
|
||||
var resp objectstorage.GetObjectResponse
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
resp, err = o.fs.srv.GetObject(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// read size from ContentLength or ContentRange
|
||||
bytes := resp.ContentLength
|
||||
if resp.ContentRange != nil {
|
||||
var contentRange = *resp.ContentRange
|
||||
slash := strings.IndexRune(contentRange, '/')
|
||||
if slash >= 0 {
|
||||
i, err := strconv.ParseInt(contentRange[slash+1:], 10, 64)
|
||||
if err == nil {
|
||||
bytes = &i
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to find parse integer from in %q: %v", contentRange, err)
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to find length in %q", contentRange)
|
||||
}
|
||||
}
|
||||
err = o.decodeMetaDataObject(&resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o.bytes = *bytes
|
||||
return resp.HTTPResponse().Body, nil
|
||||
}
|
||||
|
||||
// Update an object if it has changed
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
bucketName, bucketPath := o.split()
|
||||
err = o.fs.makeBucket(ctx, bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// determine if we like upload single or multipart.
|
||||
size := src.Size()
|
||||
multipart := size >= int64(o.fs.opt.UploadCutoff)
|
||||
|
||||
// Set the mtime in the metadata
|
||||
modTime := src.ModTime(ctx)
|
||||
metadata := map[string]string{
|
||||
metaMtime: swift.TimeToFloatString(modTime),
|
||||
}
|
||||
|
||||
// read the md5sum if available
|
||||
// - for non-multipart
|
||||
// - so we can add a ContentMD5
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
|
||||
// - for multipart provided checksums aren't disabled
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash
|
||||
var md5sumBase64 string
|
||||
var md5sumHex string
|
||||
if !multipart || !o.fs.opt.DisableChecksum {
|
||||
md5sumHex, err = src.Hash(ctx, hash.MD5)
|
||||
if err == nil && matchMd5.MatchString(md5sumHex) {
|
||||
hashBytes, err := hex.DecodeString(md5sumHex)
|
||||
if err == nil {
|
||||
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
|
||||
if multipart && !o.fs.opt.DisableChecksum {
|
||||
// Set the md5sum as metadata on the object if
|
||||
// - a multipart upload
|
||||
// - the ETag is not an MD5, e.g. when using SSE/SSE-C
|
||||
// provided checksums aren't disabled
|
||||
metadata[metaMD5Hash] = md5sumBase64
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Guess the content type
|
||||
mimeType := fs.MimeType(ctx, src)
|
||||
|
||||
if multipart {
|
||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||
uploadRequest := transfer.UploadRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
ContentType: common.String(mimeType),
|
||||
PartSize: common.Int64(chunkSize),
|
||||
AllowMultipartUploads: common.Bool(true),
|
||||
AllowParrallelUploads: common.Bool(true),
|
||||
ObjectStorageClient: o.fs.srv,
|
||||
EnableMultipartChecksumVerification: common.Bool(!o.fs.opt.DisableChecksum),
|
||||
NumberOfGoroutines: common.Int(o.fs.opt.UploadConcurrency),
|
||||
Metadata: metadataWithOpcPrefix(metadata),
|
||||
}
|
||||
if o.fs.opt.StorageTier != "" {
|
||||
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
|
||||
if !ok {
|
||||
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
|
||||
}
|
||||
uploadRequest.StorageTier = storageTier
|
||||
}
|
||||
o.applyMultiPutOptions(&uploadRequest, options...)
|
||||
uploadStreamRequest := transfer.UploadStreamRequest{
|
||||
UploadRequest: uploadRequest,
|
||||
StreamReader: in,
|
||||
}
|
||||
uploadMgr := transfer.NewUploadManager()
|
||||
var uploadID = ""
|
||||
|
||||
defer atexit.OnError(&err, func() {
|
||||
if uploadID == "" {
|
||||
return
|
||||
}
|
||||
if o.fs.opt.LeavePartsOnError {
|
||||
return
|
||||
}
|
||||
fs.Debugf(o, "Cancelling multipart upload")
|
||||
errCancel := o.fs.abortMultiPartUpload(
|
||||
context.Background(),
|
||||
bucketName,
|
||||
bucketPath,
|
||||
uploadID)
|
||||
if errCancel != nil {
|
||||
fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
|
||||
}
|
||||
})()
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
uploadResponse, err := uploadMgr.UploadStream(ctx, uploadStreamRequest)
|
||||
var httpResponse *http.Response
|
||||
if err == nil {
|
||||
if uploadResponse.Type == transfer.MultipartUpload {
|
||||
if uploadResponse.MultipartUploadResponse != nil {
|
||||
httpResponse = uploadResponse.MultipartUploadResponse.HTTPResponse()
|
||||
}
|
||||
} else {
|
||||
if uploadResponse.SinglepartUploadResponse != nil {
|
||||
httpResponse = uploadResponse.SinglepartUploadResponse.HTTPResponse()
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
uploadID := ""
|
||||
if uploadResponse.MultipartUploadResponse != nil && uploadResponse.MultipartUploadResponse.UploadID != nil {
|
||||
uploadID = *uploadResponse.MultipartUploadResponse.UploadID
|
||||
fs.Debugf(o, "multipart streaming upload failed, aborting uploadID: %v, may retry", uploadID)
|
||||
_ = o.fs.abortMultiPartUpload(ctx, bucketName, bucketPath, uploadID)
|
||||
}
|
||||
}
|
||||
return shouldRetry(ctx, httpResponse, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(o, "multipart streaming upload failed %v", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
req := objectstorage.PutObjectRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
ContentType: common.String(mimeType),
|
||||
PutObjectBody: io.NopCloser(in),
|
||||
OpcMeta: metadata,
|
||||
}
|
||||
if size >= 0 {
|
||||
req.ContentLength = common.Int64(size)
|
||||
}
|
||||
if o.fs.opt.StorageTier != "" {
|
||||
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
|
||||
if !ok {
|
||||
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
|
||||
}
|
||||
req.StorageTier = storageTier
|
||||
}
|
||||
o.applyPutOptions(&req, options...)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.PutObject(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(o, "put object failed %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Read the metadata from the newly created object
|
||||
o.meta = nil // wipe old metadata
|
||||
return o.readMetaData(ctx)
|
||||
}
|
||||
|
||||
func (o *Object) applyPutOptions(req *objectstorage.PutObjectRequest, options ...fs.OpenOption) {
|
||||
// Apply upload options
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
lowerKey := strings.ToLower(key)
|
||||
switch lowerKey {
|
||||
case "":
|
||||
// ignore
|
||||
case "cache-control":
|
||||
req.CacheControl = common.String(value)
|
||||
case "content-disposition":
|
||||
req.ContentDisposition = common.String(value)
|
||||
case "content-encoding":
|
||||
req.ContentEncoding = common.String(value)
|
||||
case "content-language":
|
||||
req.ContentLanguage = common.String(value)
|
||||
case "content-type":
|
||||
req.ContentType = common.String(value)
|
||||
default:
|
||||
if strings.HasPrefix(lowerKey, ociMetaPrefix) {
|
||||
req.OpcMeta[lowerKey] = value
|
||||
} else {
|
||||
fs.Errorf(o, "Don't know how to set key %q on upload", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Object) applyGetObjectOptions(req *objectstorage.GetObjectRequest, options ...fs.OpenOption) {
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
for _, option := range options {
|
||||
switch option.(type) {
|
||||
case *fs.RangeOption, *fs.SeekOption:
|
||||
_, value := option.Header()
|
||||
req.Range = &value
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Apply upload options
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
lowerKey := strings.ToLower(key)
|
||||
switch lowerKey {
|
||||
case "":
|
||||
// ignore
|
||||
case "cache-control":
|
||||
req.HttpResponseCacheControl = common.String(value)
|
||||
case "content-disposition":
|
||||
req.HttpResponseContentDisposition = common.String(value)
|
||||
case "content-encoding":
|
||||
req.HttpResponseContentEncoding = common.String(value)
|
||||
case "content-language":
|
||||
req.HttpResponseContentLanguage = common.String(value)
|
||||
case "content-type":
|
||||
req.HttpResponseContentType = common.String(value)
|
||||
case "range":
|
||||
// do nothing
|
||||
default:
|
||||
fs.Errorf(o, "Don't know how to set key %q on upload", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Object) applyMultiPutOptions(req *transfer.UploadRequest, options ...fs.OpenOption) {
|
||||
// Apply upload options
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
lowerKey := strings.ToLower(key)
|
||||
switch lowerKey {
|
||||
case "":
|
||||
// ignore
|
||||
case "content-encoding":
|
||||
req.ContentEncoding = common.String(value)
|
||||
case "content-language":
|
||||
req.ContentLanguage = common.String(value)
|
||||
case "content-type":
|
||||
req.ContentType = common.String(value)
|
||||
default:
|
||||
if strings.HasPrefix(lowerKey, ociMetaPrefix) {
|
||||
req.Metadata[lowerKey] = value
|
||||
} else {
|
||||
fs.Errorf(o, "Don't know how to set key %q on upload", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func metadataWithOpcPrefix(src map[string]string) map[string]string {
|
||||
dst := make(map[string]string)
|
||||
for lowerKey, value := range src {
|
||||
if !strings.HasPrefix(lowerKey, ociMetaPrefix) {
|
||||
dst[ociMetaPrefix+lowerKey] = value
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
@@ -1,242 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
)
|
||||
|
||||
const (
|
||||
maxSizeForCopy = 4768 * 1024 * 1024
|
||||
minChunkSize = fs.SizeSuffix(1024 * 1024 * 5)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
defaultUploadConcurrency = 10
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 5 * time.Minute
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
defaultCopyTimeoutDuration = fs.Duration(time.Minute)
|
||||
)
|
||||
|
||||
const (
|
||||
userPrincipal = "user_principal_auth"
|
||||
instancePrincipal = "instance_principal_auth"
|
||||
resourcePrincipal = "resource_principal_auth"
|
||||
environmentAuth = "env_auth"
|
||||
noAuth = "no_auth"
|
||||
|
||||
userPrincipalHelpText = `use an OCI user and an API key for authentication.
|
||||
you’ll need to put in a config file your tenancy OCID, user OCID, region, the path, fingerprint to an API key.
|
||||
https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm`
|
||||
|
||||
instancePrincipalHelpText = `use instance principals to authorize an instance to make API calls.
|
||||
each instance has its own identity, and authenticates using the certificates that are read from instance metadata.
|
||||
https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfrominstances.htm`
|
||||
|
||||
resourcePrincipalHelpText = `use resource principals to make API calls`
|
||||
|
||||
environmentAuthHelpText = `automatically pickup the credentials from runtime(env), first one to provide auth wins`
|
||||
|
||||
noAuthHelpText = `no credentials needed, this is typically for reading public buckets`
|
||||
)
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Provider string `config:"provider"`
|
||||
Compartment string `config:"compartment"`
|
||||
Namespace string `config:"namespace"`
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ConfigFile string `config:"config_file"`
|
||||
ConfigProfile string `config:"config_profile"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
CopyTimeout fs.Duration `config:"copy_timeout"`
|
||||
StorageTier string `config:"storage_tier"`
|
||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
}
|
||||
|
||||
func newOptions() []fs.Option {
|
||||
return []fs.Option{{
|
||||
Name: fs.ConfigProvider,
|
||||
Help: "Choose your Auth Provider",
|
||||
Required: true,
|
||||
Default: environmentAuth,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: environmentAuth,
|
||||
Help: environmentAuthHelpText,
|
||||
}, {
|
||||
Value: userPrincipal,
|
||||
Help: userPrincipalHelpText,
|
||||
}, {
|
||||
Value: instancePrincipal,
|
||||
Help: instancePrincipalHelpText,
|
||||
}, {
|
||||
Value: resourcePrincipal,
|
||||
Help: resourcePrincipalHelpText,
|
||||
}, {
|
||||
Value: noAuth,
|
||||
Help: noAuthHelpText,
|
||||
}},
|
||||
}, {
|
||||
Name: "namespace",
|
||||
Help: "Object storage namespace",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "compartment",
|
||||
Help: "Object storage compartment OCID",
|
||||
Provider: "!no_auth",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Object storage Region",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Object storage API.\n\nLeave blank to use the default endpoint for the region.",
|
||||
Required: false,
|
||||
}, {
|
||||
Name: "config_file",
|
||||
Help: "Path to OCI config file",
|
||||
Provider: userPrincipal,
|
||||
Default: "~/.oci/config",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "~/.oci/config",
|
||||
Help: "oci configuration file location",
|
||||
}},
|
||||
}, {
|
||||
Name: "config_profile",
|
||||
Help: "Profile name inside the oci config file",
|
||||
Provider: userPrincipal,
|
||||
Default: "Default",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "Default",
|
||||
Help: "Use the default profile",
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload.
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5 GiB.`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to use for uploading.
|
||||
|
||||
When uploading files larger than upload_cutoff or files with unknown
|
||||
size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google
|
||||
photos or google docs) they will be uploaded as multipart uploads
|
||||
using this chunk size.
|
||||
|
||||
Note that "upload_concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
|
||||
If you are transferring large files over high-speed links and you have
|
||||
enough memory, then increasing this will speed up the transfers.
|
||||
|
||||
Rclone will automatically increase the chunk size when uploading a
|
||||
large file of known size to stay below the 10,000 chunks limit.
|
||||
|
||||
Files of unknown size are uploaded with the configured
|
||||
chunk_size. Since the default chunk size is 5 MiB and there can be at
|
||||
most 10,000 chunks, this means that by default the maximum size of
|
||||
a file you can stream upload is 48 GiB. If you wish to stream upload
|
||||
larger files then you will need to increase chunk_size.
|
||||
|
||||
Increasing the chunk size decreases the accuracy of the progress
|
||||
statistics displayed with "-P" flag.
|
||||
`,
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
If you are uploading small numbers of large files over high-speed links
|
||||
and these uploads do not fully utilize your bandwidth, then increasing
|
||||
this may help to speed up the transfers.`,
|
||||
Default: defaultUploadConcurrency,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_cutoff",
|
||||
Help: `Cutoff for switching to multipart copy.
|
||||
|
||||
Any files larger than this that need to be server-side copied will be
|
||||
copied in chunks of this size.
|
||||
|
||||
The minimum is 0 and the maximum is 5 GiB.`,
|
||||
Default: fs.SizeSuffix(maxSizeForCopy),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_timeout",
|
||||
Help: `Timeout for copy.
|
||||
|
||||
Copy is an asynchronous operation, specify timeout to wait for copy to succeed
|
||||
`,
|
||||
Default: defaultCopyTimeoutDuration,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Don't store MD5 checksum with object metadata.
|
||||
|
||||
Normally rclone will calculate the MD5 checksum of the input before
|
||||
uploading it so it can add it to metadata on the object. This is great
|
||||
for data integrity checking but can cause long delays for large files
|
||||
to start uploading.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
// Any UTF-8 character is valid in a key, however it can't handle
|
||||
// invalid UTF-8 and / have a special meaning.
|
||||
//
|
||||
// The SDK can't seem to handle uploading files called '.
|
||||
// - initial / encoding
|
||||
// - doubled / encoding
|
||||
// - trailing / encoding
|
||||
// so that OSS keys are always valid file names
|
||||
Default: encoder.EncodeInvalidUtf8 |
|
||||
encoder.EncodeSlash |
|
||||
encoder.EncodeDot,
|
||||
}, {
|
||||
Name: "leave_parts_on_error",
|
||||
Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
|
||||
|
||||
It should be set to true for resuming uploads across different sessions.
|
||||
|
||||
WARNING: Storing parts of an incomplete multipart upload counts towards space usage on object storage and will add
|
||||
additional costs if not cleaned up.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_bucket",
|
||||
Help: `If set, don't attempt to check the bucket exists or create it.
|
||||
|
||||
This can be useful when trying to minimise the number of transactions
|
||||
rclone does if you know the bucket exists already.
|
||||
|
||||
It can also be needed if the user you are using does not have bucket
|
||||
creation permissions.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}}
|
||||
}
|
||||
@@ -1,695 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
// Package oracleobjectstorage provides an interface to the OCI object storage system.
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "oracleobjectstorage",
|
||||
Description: "Oracle Cloud Infrastructure Object Storage",
|
||||
Prefix: "oos",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: newOptions(),
|
||||
})
|
||||
}
|
||||
|
||||
// Fs represents a remote object storage server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
ci *fs.ConfigInfo // global config
|
||||
features *fs.Features // optional features
|
||||
srv *objectstorage.ObjectStorageClient // the connection to the object storage
|
||||
rootBucket string // bucket part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache for bucket creation status
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// NewFs Initialize backend
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
objectStorageClient, err := newObjectStorageClient(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p := pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
srv: objectStorageClient,
|
||||
cache: bucket.NewCache(),
|
||||
pacer: fs.NewPacer(ctx, p),
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
SlowModTime: true,
|
||||
}).Fill(ctx, f)
|
||||
if f.rootBucket != "" && f.rootDirectory != "" && !strings.HasSuffix(root, "/") {
|
||||
// Check to see if the (bucket,directory) is actually an existing file
|
||||
oldRoot := f.root
|
||||
newRoot, leaf := path.Split(oldRoot)
|
||||
f.setRoot(newRoot)
|
||||
_, err := f.NewObject(ctx, leaf)
|
||||
if err != nil {
|
||||
// File doesn't exist or is a directory so return old f
|
||||
f.setRoot(oldRoot)
|
||||
return f, nil
|
||||
}
|
||||
// return an error with fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, err
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if cs < minChunkSize {
|
||||
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||
if cs > maxUploadCutoff {
|
||||
return fmt.Errorf("%s is greater than %s", cs, maxUploadCutoff)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadCutoff(cs)
|
||||
if err == nil {
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Implement backed that represents a remote object storage server
|
||||
// Fs is the interface a cloud storage system must provide
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.rootBucket == "" {
|
||||
return "oos:root"
|
||||
}
|
||||
if f.rootDirectory == "" {
|
||||
return fmt.Sprintf("oos:bucket %s", f.rootBucket)
|
||||
}
|
||||
return fmt.Sprintf("oos:bucket %s, path %s", f.rootBucket, f.rootDirectory)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Precision of the remote
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Millisecond
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
func (f *Fs) setRoot(root string) {
|
||||
f.root = parsePath(root)
|
||||
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
|
||||
}
|
||||
|
||||
// parsePath parses a remote 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
bucketName, directory := f.split(dir)
|
||||
fs.Debugf(f, "listing: bucket : %v, directory: %v", bucketName, dir)
|
||||
if bucketName == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
return f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "")
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object.
|
||||
type listFn func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error
|
||||
|
||||
// list the objects into the function supplied from
|
||||
// the bucket and root supplied
|
||||
// (bucket, directory) is the starting directory
|
||||
// If prefix is set then it is removed from all file names
|
||||
// If addBucket is set then it adds the bucket to the start of the remotes generated
|
||||
// If recurse is set the function will recursively list
|
||||
// If limit is > 0 then it limits to that many files (must be less than 1000)
|
||||
// If hidden is set then it will list the hidden (deleted) files too.
|
||||
// if findFile is set it will look for files called (bucket, directory)
|
||||
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, limit int,
|
||||
fn listFn) (err error) {
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
}
|
||||
if directory != "" {
|
||||
directory += "/"
|
||||
}
|
||||
|
||||
delimiter := ""
|
||||
if !recurse {
|
||||
delimiter = "/"
|
||||
}
|
||||
chunkSize := 1000
|
||||
if limit > 0 {
|
||||
chunkSize = limit
|
||||
}
|
||||
var request = objectstorage.ListObjectsRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucket),
|
||||
Prefix: common.String(directory),
|
||||
Limit: common.Int(chunkSize),
|
||||
Fields: common.String("name,size,etag,timeCreated,md5,timeModified,storageTier,archivalState"),
|
||||
}
|
||||
if delimiter != "" {
|
||||
request.Delimiter = common.String(delimiter)
|
||||
}
|
||||
|
||||
for {
|
||||
var resp objectstorage.ListObjectsResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
resp, err = f.srv.ListObjects(ctx, request)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
if ociError, ok := err.(common.ServiceError); ok {
|
||||
// If it is a timeout then we want to retry that
|
||||
if ociError.GetHTTPStatusCode() == http.StatusNotFound {
|
||||
err = fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
if f.rootBucket == "" {
|
||||
// if listing from the root ignore wrong region requests returning
|
||||
// empty directory
|
||||
if reqErr, ok := err.(common.ServiceError); ok {
|
||||
// 301 if wrong region for bucket
|
||||
if reqErr.GetHTTPStatusCode() == http.StatusMovedPermanently {
|
||||
fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", bucket)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
if !recurse {
|
||||
for _, commonPrefix := range resp.ListObjects.Prefixes {
|
||||
if commonPrefix == "" {
|
||||
fs.Logf(f, "Nil common prefix received")
|
||||
continue
|
||||
}
|
||||
remote := commonPrefix
|
||||
remote = f.opt.Enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
remote = strings.TrimSuffix(remote, "/")
|
||||
err = fn(remote, &objectstorage.ObjectSummary{Name: &remote}, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range resp.Objects {
|
||||
object := &resp.Objects[i]
|
||||
// Finish if file name no longer has prefix
|
||||
//if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
|
||||
// return nil
|
||||
//}
|
||||
remote := *object.Name
|
||||
remote = f.opt.Enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
// fs.Debugf(f, "Odd name received %v", object.Name)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
// Check for directory
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
// is this a directory marker?
|
||||
if isDirectory && object.Size != nil && *object.Size == 0 {
|
||||
continue // skip directory marker
|
||||
}
|
||||
if isDirectory && len(remote) > 1 {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
err = fn(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// end if no NextFileName
|
||||
if resp.NextStartWith == nil {
|
||||
break
|
||||
}
|
||||
request.Start = resp.NextStartWith
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert a list item into a DirEntry
|
||||
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *objectstorage.ObjectSummary, isDirectory bool) (fs.DirEntry, error) {
|
||||
if isDirectory {
|
||||
size := int64(0)
|
||||
if object.Size != nil {
|
||||
size = *object.Size
|
||||
}
|
||||
d := fs.NewDir(remote, time.Time{}).SetSize(size)
|
||||
return d, nil
|
||||
}
|
||||
o, err := f.newObjectWithInfo(ctx, remote, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
fn := func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, 0, fn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// listBuckets returns all the buckets to out
|
||||
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
if f.opt.Provider == noAuth {
|
||||
return nil, fmt.Errorf("can't list buckets with %v provider, use a valid auth provider in config file", noAuth)
|
||||
}
|
||||
var request = objectstorage.ListBucketsRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
CompartmentId: common.String(f.opt.Compartment),
|
||||
}
|
||||
var resp objectstorage.ListBucketsResponse
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.ListBuckets(ctx, request)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, item := range resp.Items {
|
||||
bucketName := f.opt.Enc.ToStandardName(*item.Name)
|
||||
f.cache.MarkOK(bucketName)
|
||||
d := fs.NewDir(bucketName, item.TimeCreated.Time)
|
||||
entries = append(entries, d)
|
||||
}
|
||||
if resp.OpcNextPage == nil {
|
||||
break
|
||||
}
|
||||
request.Page = resp.OpcNextPage
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *objectstorage.ObjectSummary) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
// Set info but not meta
|
||||
if info.TimeModified == nil {
|
||||
fs.Logf(o, "Failed to read last modified")
|
||||
o.lastModified = time.Now()
|
||||
} else {
|
||||
o.lastModified = info.TimeModified.Time
|
||||
}
|
||||
if info.Md5 != nil {
|
||||
md5, err := o.base64ToMd5(*info.Md5)
|
||||
if err != nil {
|
||||
o.md5 = md5
|
||||
}
|
||||
}
|
||||
o.bytes = *info.Size
|
||||
o.storageTier = storageTierMap[strings.ToLower(string(info.StorageTier))]
|
||||
} else {
|
||||
err := o.readMetaData(ctx) // reads info and headers, returning an error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// Put the object into the bucket
|
||||
// Copy the reader in to the new object which is returned
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
bucketName, _ := f.split(dir)
|
||||
return f.makeBucket(ctx, bucketName)
|
||||
}
|
||||
|
||||
// makeBucket creates the bucket if it doesn't exist
|
||||
func (f *Fs) makeBucket(ctx context.Context, bucketName string) error {
|
||||
if f.opt.NoCheckBucket {
|
||||
return nil
|
||||
}
|
||||
return f.cache.Create(bucketName, func() error {
|
||||
details := objectstorage.CreateBucketDetails{
|
||||
Name: common.String(bucketName),
|
||||
CompartmentId: common.String(f.opt.Compartment),
|
||||
PublicAccessType: objectstorage.CreateBucketDetailsPublicAccessTypeNopublicaccess,
|
||||
}
|
||||
req := objectstorage.CreateBucketRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
CreateBucketDetails: details,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CreateBucket(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Infof(f, "Bucket %q created with accessType %q", bucketName,
|
||||
objectstorage.CreateBucketDetailsPublicAccessTypeNopublicaccess)
|
||||
}
|
||||
if svcErr, ok := err.(common.ServiceError); ok {
|
||||
if code := svcErr.GetCode(); code == "BucketAlreadyOwnedByYou" || code == "BucketAlreadyExists" {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}, func() (bool, error) {
|
||||
return f.bucketExists(ctx, bucketName)
|
||||
})
|
||||
}
|
||||
|
||||
// Check if the bucket exists
|
||||
//
|
||||
// NB this can return incorrect results if called immediately after bucket deletion
|
||||
func (f *Fs) bucketExists(ctx context.Context, bucketName string) (bool, error) {
|
||||
req := objectstorage.HeadBucketRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.HeadBucket(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if err, ok := err.(common.ServiceError); ok {
|
||||
if err.GetHTTPStatusCode() == http.StatusNotFound {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Rmdir delete an empty bucket. if bucket is not empty this is will fail with appropriate error
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
bucketName, directory := f.split(dir)
|
||||
if bucketName == "" || directory != "" {
|
||||
return nil
|
||||
}
|
||||
return f.cache.Remove(bucketName, func() error {
|
||||
req := objectstorage.DeleteBucketRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.DeleteBucket(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Infof(f, "Bucket %q deleted", bucketName)
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) abortMultiPartUpload(ctx context.Context, bucketName, bucketPath, uploadID string) (err error) {
|
||||
if uploadID == "" {
|
||||
return nil
|
||||
}
|
||||
request := objectstorage.AbortMultipartUploadRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
UploadId: common.String(uploadID),
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.AbortMultipartUpload(ctx, request)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// cleanUpBucket removes all pending multipart uploads for a given bucket over the age of maxAge
|
||||
func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Duration,
|
||||
uploads []*objectstorage.MultipartUpload) (err error) {
|
||||
fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than %v", bucket, maxAge)
|
||||
for _, upload := range uploads {
|
||||
if upload.TimeCreated != nil && upload.Object != nil && upload.UploadId != nil {
|
||||
age := time.Since(upload.TimeCreated.Time)
|
||||
what := fmt.Sprintf("pending multipart upload for bucket %q key %q dated %v (%v ago)", bucket, *upload.Object,
|
||||
upload.TimeCreated, age)
|
||||
if age > maxAge {
|
||||
fs.Infof(f, "removing %s", what)
|
||||
if operations.SkipDestructive(ctx, what, "remove pending upload") {
|
||||
continue
|
||||
}
|
||||
ignoreErr := f.abortMultiPartUpload(ctx, *upload.Bucket, *upload.Object, *upload.UploadId)
|
||||
if ignoreErr != nil {
|
||||
// fs.Debugf(f, "ignoring error %s", ignoreErr)
|
||||
}
|
||||
} else {
|
||||
// fs.Debugf(f, "ignoring %s", what)
|
||||
}
|
||||
} else {
|
||||
fs.Infof(f, "MultipartUpload doesn't have sufficient details to abort.")
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// CleanUp removes all pending multipart uploads
|
||||
func (f *Fs) cleanUp(ctx context.Context, maxAge time.Duration) (err error) {
|
||||
uploadsMap, err := f.listMultipartUploadsAll(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for bucketName, uploads := range uploadsMap {
|
||||
cleanErr := f.cleanUpBucket(ctx, bucketName, maxAge, uploads)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to cleanup bucket %q: %v", bucketName, cleanErr)
|
||||
err = cleanErr
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// CleanUp removes all pending multipart uploads older than 24 hours
|
||||
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
return f.cleanUp(ctx, 24*time.Hour)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Implement ListRer is an optional interfaces for Fs
|
||||
//------------------------------------------------------------
|
||||
|
||||
/*
|
||||
ListR lists the objects and directories of the Fs starting
|
||||
from dir recursively into out.
|
||||
|
||||
dir should be "" to start from the root, and should not
|
||||
have trailing slashes.
|
||||
|
||||
This should return ErrDirNotFound if the directory isn't
|
||||
found.
|
||||
|
||||
It should call callback for each tranche of entries read.
|
||||
These need not be returned in any particular order. If
|
||||
callback returns an error then the listing will stop
|
||||
immediately.
|
||||
|
||||
Don't implement this unless you have a more efficient way
|
||||
of listing recursively that doing a directory traversal.
|
||||
*/
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
bucketName, directory := f.split(dir)
|
||||
list := walk.NewListRHelper(callback)
|
||||
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
||||
return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Add(entry)
|
||||
})
|
||||
}
|
||||
if bucketName == "" {
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bucketName := entry.Remote()
|
||||
err = listR(bucketName, "", f.rootDirectory, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucketName)
|
||||
}
|
||||
} else {
|
||||
err = listR(bucketName, directory, f.rootDirectory, f.rootBucket == "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucketName)
|
||||
}
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.GetTierer = &Object{}
|
||||
_ fs.SetTierer = &Object{}
|
||||
)
|
||||
@@ -1,33 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestOracleObjectStorage:",
|
||||
TiersToTest: []string{"standard", "archive"},
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
@@ -1,7 +0,0 @@
|
||||
// Build for oracleobjectstorage for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || solaris || js
|
||||
// +build plan9 solaris js
|
||||
|
||||
package oracleobjectstorage
|
||||
@@ -1,362 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
var refreshGracePeriod = 30 * time.Second
|
||||
|
||||
// StateRefreshFunc is a function type used for StateChangeConf that is
|
||||
// responsible for refreshing the item being watched for a state change.
|
||||
//
|
||||
// It returns three results. `result` is any object that will be returned
|
||||
// as the final object after waiting for state change. This allows you to
|
||||
// return the final updated object, for example an EC2 instance after refreshing
|
||||
// it. A nil result represents not found.
|
||||
//
|
||||
// `state` is the latest state of that object. And `err` is any error that
|
||||
// may have happened while refreshing the state.
|
||||
type StateRefreshFunc func() (result interface{}, state string, err error)
|
||||
|
||||
// StateChangeConf is the configuration struct used for `WaitForState`.
|
||||
type StateChangeConf struct {
|
||||
Delay time.Duration // Wait this time before starting checks
|
||||
Pending []string // States that are "allowed" and will continue trying
|
||||
Refresh StateRefreshFunc // Refreshes the current state
|
||||
Target []string // Target state
|
||||
Timeout time.Duration // The amount of time to wait before timeout
|
||||
MinTimeout time.Duration // Smallest time to wait before refreshes
|
||||
PollInterval time.Duration // Override MinTimeout/backoff and only poll this often
|
||||
NotFoundChecks int // Number of times to allow not found (nil result from Refresh)
|
||||
|
||||
// This is to work around inconsistent APIs
|
||||
ContinuousTargetOccurrence int // Number of times the Target state has to occur continuously
|
||||
}
|
||||
|
||||
// WaitForStateContext watches an object and waits for it to achieve the state
|
||||
// specified in the configuration using the specified Refresh() func,
|
||||
// waiting the number of seconds specified in the timeout configuration.
|
||||
//
|
||||
// If the Refresh function returns an error, exit immediately with that error.
|
||||
//
|
||||
// If the Refresh function returns a state other than the Target state or one
|
||||
// listed in Pending, return immediately with an error.
|
||||
//
|
||||
// If the Timeout is exceeded before reaching the Target state, return an
|
||||
// error.
|
||||
//
|
||||
// Otherwise, the result is the result of the first call to the Refresh function to
|
||||
// reach the target state.
|
||||
//
|
||||
// Cancellation from the passed in context will cancel the refresh loop
|
||||
func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType string) (interface{}, error) {
|
||||
// fs.Debugf(entityType, "Waiting for state to become: %s", conf.Target)
|
||||
|
||||
notfoundTick := 0
|
||||
targetOccurrence := 0
|
||||
|
||||
// Set a default for times to check for not found
|
||||
if conf.NotFoundChecks == 0 {
|
||||
conf.NotFoundChecks = 20
|
||||
}
|
||||
|
||||
if conf.ContinuousTargetOccurrence == 0 {
|
||||
conf.ContinuousTargetOccurrence = 1
|
||||
}
|
||||
|
||||
type Result struct {
|
||||
Result interface{}
|
||||
State string
|
||||
Error error
|
||||
Done bool
|
||||
}
|
||||
|
||||
// Read every result from the refresh loop, waiting for a positive result.Done.
|
||||
resCh := make(chan Result, 1)
|
||||
// cancellation channel for the refresh loop
|
||||
cancelCh := make(chan struct{})
|
||||
|
||||
result := Result{}
|
||||
|
||||
go func() {
|
||||
defer close(resCh)
|
||||
|
||||
select {
|
||||
case <-time.After(conf.Delay):
|
||||
case <-cancelCh:
|
||||
return
|
||||
}
|
||||
|
||||
// start with 0 delay for the first loop
|
||||
var wait time.Duration
|
||||
|
||||
for {
|
||||
// store the last result
|
||||
resCh <- result
|
||||
|
||||
// wait and watch for cancellation
|
||||
select {
|
||||
case <-cancelCh:
|
||||
return
|
||||
case <-time.After(wait):
|
||||
// first round had no wait
|
||||
if wait == 0 {
|
||||
wait = 100 * time.Millisecond
|
||||
}
|
||||
}
|
||||
|
||||
res, currentState, err := conf.Refresh()
|
||||
result = Result{
|
||||
Result: res,
|
||||
State: currentState,
|
||||
Error: err,
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
resCh <- result
|
||||
return
|
||||
}
|
||||
|
||||
// If we're waiting for the absence of a thing, then return
|
||||
if res == nil && len(conf.Target) == 0 {
|
||||
targetOccurrence++
|
||||
if conf.ContinuousTargetOccurrence == targetOccurrence {
|
||||
result.Done = true
|
||||
resCh <- result
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if res == nil {
|
||||
// If we didn't find the resource, check if we have been
|
||||
// not finding it for a while, and if so, report an error.
|
||||
notfoundTick++
|
||||
if notfoundTick > conf.NotFoundChecks {
|
||||
result.Error = &NotFoundError{
|
||||
LastError: err,
|
||||
Retries: notfoundTick,
|
||||
}
|
||||
resCh <- result
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// Reset the counter for when a resource isn't found
|
||||
notfoundTick = 0
|
||||
found := false
|
||||
|
||||
for _, allowed := range conf.Target {
|
||||
if currentState == allowed {
|
||||
found = true
|
||||
targetOccurrence++
|
||||
if conf.ContinuousTargetOccurrence == targetOccurrence {
|
||||
result.Done = true
|
||||
resCh <- result
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
for _, allowed := range conf.Pending {
|
||||
if currentState == allowed {
|
||||
found = true
|
||||
targetOccurrence = 0
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found && len(conf.Pending) > 0 {
|
||||
result.Error = &UnexpectedStateError{
|
||||
LastError: err,
|
||||
State: result.State,
|
||||
ExpectedState: conf.Target,
|
||||
}
|
||||
resCh <- result
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Wait between refreshes using exponential backoff, except when
|
||||
// waiting for the target state to reoccur.
|
||||
if targetOccurrence == 0 {
|
||||
wait *= 2
|
||||
}
|
||||
|
||||
// If a poll interval has been specified, choose that interval.
|
||||
// Otherwise, bound the default value.
|
||||
if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second {
|
||||
wait = conf.PollInterval
|
||||
} else {
|
||||
if wait < conf.MinTimeout {
|
||||
wait = conf.MinTimeout
|
||||
} else if wait > 10*time.Second {
|
||||
wait = 10 * time.Second
|
||||
}
|
||||
}
|
||||
|
||||
// fs.Debugf(entityType, "[TRACE] Waiting %s before next try", wait)
|
||||
}
|
||||
}()
|
||||
|
||||
// store the last value result from the refresh loop
|
||||
lastResult := Result{}
|
||||
|
||||
timeout := time.After(conf.Timeout)
|
||||
for {
|
||||
select {
|
||||
case r, ok := <-resCh:
|
||||
// channel closed, so return the last result
|
||||
if !ok {
|
||||
return lastResult.Result, lastResult.Error
|
||||
}
|
||||
|
||||
// we reached the intended state
|
||||
if r.Done {
|
||||
return r.Result, r.Error
|
||||
}
|
||||
|
||||
// still waiting, store the last result
|
||||
lastResult = r
|
||||
case <-ctx.Done():
|
||||
close(cancelCh)
|
||||
return nil, ctx.Err()
|
||||
case <-timeout:
|
||||
// fs.Debugf(entityType, "[WARN] WaitForState timeout after %s", conf.Timeout)
|
||||
// fs.Debugf(entityType, "[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod)
|
||||
|
||||
// cancel the goroutine and start our grace period timer
|
||||
close(cancelCh)
|
||||
timeout := time.After(refreshGracePeriod)
|
||||
|
||||
// we need a for loop and a label to break on, because we may have
|
||||
// an extra response value to read, but still want to wait for the
|
||||
// channel to close.
|
||||
forSelect:
|
||||
for {
|
||||
select {
|
||||
case r, ok := <-resCh:
|
||||
if r.Done {
|
||||
// the last refresh loop reached the desired state
|
||||
return r.Result, r.Error
|
||||
}
|
||||
|
||||
if !ok {
|
||||
// the goroutine returned
|
||||
break forSelect
|
||||
}
|
||||
|
||||
// target state not reached, save the result for the
|
||||
// TimeoutError and wait for the channel to close
|
||||
lastResult = r
|
||||
case <-ctx.Done():
|
||||
fs.Errorf(entityType, "Context cancellation detected, abandoning grace period")
|
||||
break forSelect
|
||||
case <-timeout:
|
||||
fs.Errorf(entityType, "WaitForState exceeded refresh grace period")
|
||||
break forSelect
|
||||
}
|
||||
}
|
||||
|
||||
return nil, &TimeoutError{
|
||||
LastError: lastResult.Error,
|
||||
LastState: lastResult.State,
|
||||
Timeout: conf.Timeout,
|
||||
ExpectedState: conf.Target,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NotFoundError resource not found error
|
||||
type NotFoundError struct {
|
||||
LastError error
|
||||
LastRequest interface{}
|
||||
LastResponse interface{}
|
||||
Message string
|
||||
Retries int
|
||||
}
|
||||
|
||||
func (e *NotFoundError) Error() string {
|
||||
if e.Message != "" {
|
||||
return e.Message
|
||||
}
|
||||
|
||||
if e.Retries > 0 {
|
||||
return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries)
|
||||
}
|
||||
|
||||
return "couldn't find resource"
|
||||
}
|
||||
|
||||
func (e *NotFoundError) Unwrap() error {
|
||||
return e.LastError
|
||||
}
|
||||
|
||||
// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending
|
||||
type UnexpectedStateError struct {
|
||||
LastError error
|
||||
State string
|
||||
ExpectedState []string
|
||||
}
|
||||
|
||||
func (e *UnexpectedStateError) Error() string {
|
||||
return fmt.Sprintf(
|
||||
"unexpected state '%s', wanted target '%s'. last error: %s",
|
||||
e.State,
|
||||
strings.Join(e.ExpectedState, ", "),
|
||||
e.LastError,
|
||||
)
|
||||
}
|
||||
|
||||
func (e *UnexpectedStateError) Unwrap() error {
|
||||
return e.LastError
|
||||
}
|
||||
|
||||
// TimeoutError is returned when WaitForState times out
|
||||
type TimeoutError struct {
|
||||
LastError error
|
||||
LastState string
|
||||
Timeout time.Duration
|
||||
ExpectedState []string
|
||||
}
|
||||
|
||||
func (e *TimeoutError) Error() string {
|
||||
expectedState := "resource to be gone"
|
||||
if len(e.ExpectedState) > 0 {
|
||||
expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", "))
|
||||
}
|
||||
|
||||
extraInfo := make([]string, 0)
|
||||
if e.LastState != "" {
|
||||
extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState))
|
||||
}
|
||||
if e.Timeout > 0 {
|
||||
extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String()))
|
||||
}
|
||||
|
||||
suffix := ""
|
||||
if len(extraInfo) > 0 {
|
||||
suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", "))
|
||||
}
|
||||
|
||||
if e.LastError != nil {
|
||||
return fmt.Sprintf("timeout while waiting for %s%s: %s",
|
||||
expectedState, suffix, e.LastError)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("timeout while waiting for %s%s",
|
||||
expectedState, suffix)
|
||||
}
|
||||
|
||||
func (e *TimeoutError) Unwrap() error {
|
||||
return e.LastError
|
||||
}
|
||||
259
backend/s3/s3.go
259
backend/s3/s3.go
@@ -57,7 +57,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
"golang.org/x/net/http/httpguts"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
@@ -65,7 +64,7 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
@@ -150,9 +149,6 @@ func init() {
|
||||
}, {
|
||||
Value: "Wasabi",
|
||||
Help: "Wasabi Object Storage",
|
||||
}, {
|
||||
Value: "Qiniu",
|
||||
Help: "Qiniu Object Storage (Kodo)",
|
||||
}, {
|
||||
Value: "Other",
|
||||
Help: "Any other S3 compatible provider",
|
||||
@@ -391,34 +387,6 @@ func init() {
|
||||
Value: "auto",
|
||||
Help: "R2 buckets are automatically distributed across Cloudflare's data centers for low latency.",
|
||||
}},
|
||||
}, {
|
||||
// References:
|
||||
// https://developer.qiniu.com/kodo/4088/s3-access-domainname
|
||||
Name: "region",
|
||||
Help: "Region to connect to.",
|
||||
Provider: "Qiniu",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "cn-east-1",
|
||||
Help: "The default endpoint - a good choice if you are unsure.\nEast China Region 1.\nNeeds location constraint cn-east-1.",
|
||||
}, {
|
||||
Value: "cn-east-2",
|
||||
Help: "East China Region 2.\nNeeds location constraint cn-east-2.",
|
||||
}, {
|
||||
Value: "cn-north-1",
|
||||
Help: "North China Region 1.\nNeeds location constraint cn-north-1.",
|
||||
}, {
|
||||
Value: "cn-south-1",
|
||||
Help: "South China Region 1.\nNeeds location constraint cn-south-1.",
|
||||
}, {
|
||||
Value: "us-north-1",
|
||||
Help: "North America Region.\nNeeds location constraint us-north-1.",
|
||||
}, {
|
||||
Value: "ap-southeast-1",
|
||||
Help: "Southeast Asia Region 1.\nNeeds location constraint ap-southeast-1.",
|
||||
}, {
|
||||
Value: "ap-northeast-1",
|
||||
Help: "Northeast Asia Region 1.\nNeeds location constraint ap-northeast-1.",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region where your bucket will be created and your data stored.\n",
|
||||
@@ -436,7 +404,7 @@ func init() {
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive",
|
||||
Provider: "!AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
||||
@@ -1061,37 +1029,10 @@ func init() {
|
||||
Value: "nz.s3.rackcorp.com",
|
||||
Help: "Auckland (New Zealand) Endpoint",
|
||||
}},
|
||||
}, {
|
||||
// Qiniu endpoints: https://developer.qiniu.com/kodo/4088/s3-access-domainname
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Qiniu Object Storage.",
|
||||
Provider: "Qiniu",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "s3-cn-east-1.qiniucs.com",
|
||||
Help: "East China Endpoint 1",
|
||||
}, {
|
||||
Value: "s3-cn-east-2.qiniucs.com",
|
||||
Help: "East China Endpoint 2",
|
||||
}, {
|
||||
Value: "s3-cn-north-1.qiniucs.com",
|
||||
Help: "North China Endpoint 1",
|
||||
}, {
|
||||
Value: "s3-cn-south-1.qiniucs.com",
|
||||
Help: "South China Endpoint 1",
|
||||
}, {
|
||||
Value: "s3-us-north-1.qiniucs.com",
|
||||
Help: "North America Endpoint 1",
|
||||
}, {
|
||||
Value: "s3-ap-southeast-1.qiniucs.com",
|
||||
Help: "Southeast Asia Endpoint 1",
|
||||
}, {
|
||||
Value: "s3-ap-northeast-1.qiniucs.com",
|
||||
Help: "Northeast Asia Endpoint 1",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu",
|
||||
Provider: "!AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,ArvanCloud,Scaleway,StackPath,Storj,RackCorp",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -1126,39 +1067,15 @@ func init() {
|
||||
Provider: "LyveCloud",
|
||||
}, {
|
||||
Value: "s3.wasabisys.com",
|
||||
Help: "Wasabi US East 1 (N. Virginia)",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.us-east-2.wasabisys.com",
|
||||
Help: "Wasabi US East 2 (N. Virginia)",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.us-central-1.wasabisys.com",
|
||||
Help: "Wasabi US Central 1 (Texas)",
|
||||
Help: "Wasabi US East endpoint",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.us-west-1.wasabisys.com",
|
||||
Help: "Wasabi US West 1 (Oregon)",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.ca-central-1.wasabisys.com",
|
||||
Help: "Wasabi CA Central 1 (Toronto)",
|
||||
Help: "Wasabi US West endpoint",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.eu-central-1.wasabisys.com",
|
||||
Help: "Wasabi EU Central 1 (Amsterdam)",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.eu-central-2.wasabisys.com",
|
||||
Help: "Wasabi EU Central 2 (Frankfurt)",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.eu-west-1.wasabisys.com",
|
||||
Help: "Wasabi EU West 1 (London)",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.eu-west-2.wasabisys.com",
|
||||
Help: "Wasabi EU West 2 (Paris)",
|
||||
Help: "Wasabi EU Central endpoint",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.ap-northeast-1.wasabisys.com",
|
||||
@@ -1168,14 +1085,6 @@ func init() {
|
||||
Value: "s3.ap-northeast-2.wasabisys.com",
|
||||
Help: "Wasabi AP Northeast 2 (Osaka) endpoint",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.ap-southeast-1.wasabisys.com",
|
||||
Help: "Wasabi AP Southeast 1 (Singapore)",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.ap-southeast-2.wasabisys.com",
|
||||
Help: "Wasabi AP Southeast 2 (Sydney)",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.ir-thr-at1.arvanstorage.com",
|
||||
Help: "ArvanCloud Tehran Iran (Asiatech) endpoint",
|
||||
@@ -1530,36 +1439,10 @@ func init() {
|
||||
Value: "nz",
|
||||
Help: "Auckland (New Zealand) Region",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\n\nUsed when creating buckets only.",
|
||||
Provider: "Qiniu",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "cn-east-1",
|
||||
Help: "East China Region 1",
|
||||
}, {
|
||||
Value: "cn-east-2",
|
||||
Help: "East China Region 2",
|
||||
}, {
|
||||
Value: "cn-north-1",
|
||||
Help: "North China Region 1",
|
||||
}, {
|
||||
Value: "cn-south-1",
|
||||
Help: "South China Region 1",
|
||||
}, {
|
||||
Value: "us-north-1",
|
||||
Help: "North America Region 1",
|
||||
}, {
|
||||
Value: "ap-southeast-1",
|
||||
Help: "Southeast Asia Region 1",
|
||||
}, {
|
||||
Value: "ap-northeast-1",
|
||||
Help: "Northeast Asia Region 1",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,ArvanCloud,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS",
|
||||
Provider: "!AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,ArvanCloud,RackCorp,Scaleway,StackPath,Storj,TencentCOS",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -1683,21 +1566,8 @@ isn't set then "acl" is used instead.`,
|
||||
Help: "arn:aws:kms:*",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_key",
|
||||
Help: `To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.
|
||||
|
||||
Alternatively you can provide --sse-customer-key-base64.`,
|
||||
Provider: "AWS,Ceph,ChinaMobile,Minio",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_key_base64",
|
||||
Help: `If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.
|
||||
|
||||
Alternatively you can provide --sse-customer-key.`,
|
||||
Name: "sse_customer_key",
|
||||
Help: "If using SSE-C you must provide the secret encryption key used to encrypt/decrypt your data.",
|
||||
Provider: "AWS,Ceph,ChinaMobile,Minio",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
@@ -1826,24 +1696,6 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Value: "GLACIER",
|
||||
Help: "Archived storage.\nPrices are lower, but it needs to be restored first to be accessed.",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://developer.qiniu.com/kodo/5906/storage-type
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in Qiniu.",
|
||||
Provider: "Qiniu",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "STANDARD",
|
||||
Help: "Standard storage class",
|
||||
}, {
|
||||
Value: "LINE",
|
||||
Help: "Infrequent access storage mode",
|
||||
}, {
|
||||
Value: "GLACIER",
|
||||
Help: "Archive storage mode",
|
||||
}, {
|
||||
Value: "DEEP_ARCHIVE",
|
||||
Help: "Deep archive storage mode",
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload.
|
||||
@@ -2196,36 +2048,6 @@ can't check the size and hash but the file contents will be decompressed.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "might_gzip",
|
||||
Help: strings.ReplaceAll(`Set this if the backend might gzip objects.
|
||||
|
||||
Normally providers will not alter objects when they are downloaded. If
|
||||
an object was not uploaded with |Content-Encoding: gzip| then it won't
|
||||
be set on download.
|
||||
|
||||
However some providers may gzip objects even if they weren't uploaded
|
||||
with |Content-Encoding: gzip| (eg Cloudflare).
|
||||
|
||||
A symptom of this would be receiving errors like
|
||||
|
||||
ERROR corrupted on transfer: sizes differ NNN vs MMM
|
||||
|
||||
If you set this flag and rclone downloads an object with
|
||||
Content-Encoding: gzip set and chunked transfer encoding, then rclone
|
||||
will decompress the object on the fly.
|
||||
|
||||
If this is set to unset (the default) then rclone will choose
|
||||
according to the provider setting what to apply, but you can override
|
||||
rclone's choice here.
|
||||
`, "|", "`"),
|
||||
Default: fs.Tristate{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_system_metadata",
|
||||
Help: `Suppress setting and reading of system metadata`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
},
|
||||
}})
|
||||
}
|
||||
@@ -2320,7 +2142,6 @@ type Options struct {
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
|
||||
SSECustomerKey string `config:"sse_customer_key"`
|
||||
SSECustomerKeyBase64 string `config:"sse_customer_key_base64"`
|
||||
SSECustomerKeyMD5 string `config:"sse_customer_key_md5"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
@@ -2352,8 +2173,6 @@ type Options struct {
|
||||
Versions bool `config:"versions"`
|
||||
VersionAt fs.Time `config:"version_at"`
|
||||
Decompress bool `config:"decompress"`
|
||||
MightGzip fs.Tristate `config:"might_gzip"`
|
||||
NoSystemMetadata bool `config:"no_system_metadata"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
@@ -2713,12 +2532,10 @@ func setQuirks(opt *Options) {
|
||||
virtualHostStyle = true
|
||||
urlEncodeListings = true
|
||||
useMultipartEtag = true
|
||||
mightGzip = true // assume all providers might gzip until proven otherwise
|
||||
)
|
||||
switch opt.Provider {
|
||||
case "AWS":
|
||||
// No quirks
|
||||
mightGzip = false // Never auto gzips objects
|
||||
case "Alibaba":
|
||||
useMultipartEtag = false // Alibaba seems to calculate multipart Etags differently from AWS
|
||||
case "HuaweiOBS":
|
||||
@@ -2791,9 +2608,6 @@ func setQuirks(opt *Options) {
|
||||
useMultipartEtag = false // untested
|
||||
case "Wasabi":
|
||||
// No quirks
|
||||
case "Qiniu":
|
||||
useMultipartEtag = false
|
||||
urlEncodeListings = false
|
||||
case "Other":
|
||||
listObjectsV2 = false
|
||||
virtualHostStyle = false
|
||||
@@ -2832,12 +2646,6 @@ func setQuirks(opt *Options) {
|
||||
opt.UseMultipartEtag.Valid = true
|
||||
opt.UseMultipartEtag.Value = useMultipartEtag
|
||||
}
|
||||
|
||||
// set MightGzip if not manually set
|
||||
if !opt.MightGzip.Valid {
|
||||
opt.MightGzip.Valid = true
|
||||
opt.MightGzip.Value = mightGzip
|
||||
}
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
@@ -2871,16 +2679,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.BucketACL == "" {
|
||||
opt.BucketACL = opt.ACL
|
||||
}
|
||||
if opt.SSECustomerKeyBase64 != "" && opt.SSECustomerKey != "" {
|
||||
return nil, errors.New("s3: can't use sse_customer_key and sse_customer_key_base64 at the same time")
|
||||
} else if opt.SSECustomerKeyBase64 != "" {
|
||||
// Decode the base64-encoded key and store it in the SSECustomerKey field
|
||||
decoded, err := base64.StdEncoding.DecodeString(opt.SSECustomerKeyBase64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("s3: Could not decode sse_customer_key_base64: %w", err)
|
||||
}
|
||||
opt.SSECustomerKey = string(decoded)
|
||||
}
|
||||
if opt.SSECustomerKey != "" && opt.SSECustomerKeyMD5 == "" {
|
||||
// calculate CustomerKeyMD5 if not supplied
|
||||
md5sumBinary := md5.Sum([]byte(opt.SSECustomerKey))
|
||||
@@ -2955,6 +2753,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
if opt.Provider == "Storj" {
|
||||
f.features.Copy = nil
|
||||
f.features.SetTier = false
|
||||
f.features.GetTier = false
|
||||
}
|
||||
@@ -3183,11 +2982,8 @@ func (f *Fs) newV2List(req *s3.ListObjectsV2Input) bucketLister {
|
||||
// Do a V2 listing
|
||||
func (ls *v2List) List(ctx context.Context) (resp *s3.ListObjectsV2Output, versionIDs []*string, err error) {
|
||||
resp, err = ls.f.c.ListObjectsV2WithContext(ctx, &ls.req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ls.req.ContinuationToken = resp.NextContinuationToken
|
||||
return resp, nil, nil
|
||||
return resp, nil, err
|
||||
}
|
||||
|
||||
// URL Encode the listings
|
||||
@@ -4687,15 +4483,7 @@ func (o *Object) setMetaData(resp *s3.HeadObjectOutput) {
|
||||
o.lastModified = time.Now()
|
||||
fs.Logf(o, "Failed to read last modified")
|
||||
} else {
|
||||
// Try to keep the maximum precision in lastModified. If we read
|
||||
// it from listings then it may have millisecond precision, but
|
||||
// if we read it from a HEAD/GET request then it will have
|
||||
// second precision.
|
||||
equalToWithinOneSecond := o.lastModified.Truncate(time.Second).Equal((*resp.LastModified).Truncate(time.Second))
|
||||
newHasNs := (*resp.LastModified).Nanosecond() != 0
|
||||
if !equalToWithinOneSecond || newHasNs {
|
||||
o.lastModified = *resp.LastModified
|
||||
}
|
||||
o.lastModified = *resp.LastModified
|
||||
}
|
||||
o.mimeType = aws.StringValue(resp.ContentType)
|
||||
|
||||
@@ -4926,7 +4714,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// Decompress body if necessary
|
||||
if aws.StringValue(resp.ContentEncoding) == "gzip" {
|
||||
if o.fs.opt.Decompress || (resp.ContentLength == nil && o.fs.opt.MightGzip.Value) {
|
||||
if o.fs.opt.Decompress {
|
||||
return readers.NewGzipReader(resp.Body)
|
||||
}
|
||||
o.fs.warnCompressed.Do(func() {
|
||||
@@ -5301,10 +5089,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
for k, v := range meta {
|
||||
pv := aws.String(v)
|
||||
k = strings.ToLower(k)
|
||||
if o.fs.opt.NoSystemMetadata {
|
||||
req.Metadata[k] = pv
|
||||
continue
|
||||
}
|
||||
switch k {
|
||||
case "cache-control":
|
||||
req.CacheControl = pv
|
||||
@@ -5425,20 +5209,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
}
|
||||
|
||||
// Check metadata keys and values are valid
|
||||
for key, value := range req.Metadata {
|
||||
if !httpguts.ValidHeaderFieldName(key) {
|
||||
fs.Errorf(o, "Dropping invalid metadata key %q", key)
|
||||
delete(req.Metadata, key)
|
||||
} else if value == nil {
|
||||
fs.Errorf(o, "Dropping nil metadata value for key %q", key)
|
||||
delete(req.Metadata, key)
|
||||
} else if !httpguts.ValidHeaderFieldValue(*value) {
|
||||
fs.Errorf(o, "Dropping invalid metadata value %q for key %q", *value, key)
|
||||
delete(req.Metadata, key)
|
||||
}
|
||||
}
|
||||
|
||||
var wantETag string // Multipart upload Etag to check
|
||||
var gotEtag string // Etag we got from the upload
|
||||
var lastModified time.Time // Time we got from the upload
|
||||
@@ -5583,9 +5353,6 @@ func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error)
|
||||
|
||||
// Set system metadata
|
||||
setMetadata := func(k string, v *string) {
|
||||
if o.fs.opt.NoSystemMetadata {
|
||||
return
|
||||
}
|
||||
if v == nil || *v == "" {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1,233 +0,0 @@
|
||||
package smb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
smb2 "github.com/hirochachacha/go-smb2"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
)
|
||||
|
||||
// dial starts a client connection to the given SMB server. It is a
|
||||
// convenience function that connects to the given network address,
|
||||
// initiates the SMB handshake, and then sets up a Client.
|
||||
func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
|
||||
dialer := fshttp.NewDialer(ctx)
|
||||
tconn, err := dialer.Dial(network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pass := ""
|
||||
if f.opt.Pass != "" {
|
||||
pass, err = obscure.Reveal(f.opt.Pass)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
d := &smb2.Dialer{
|
||||
Initiator: &smb2.NTLMInitiator{
|
||||
User: f.opt.User,
|
||||
Password: pass,
|
||||
Domain: f.opt.Domain,
|
||||
},
|
||||
}
|
||||
|
||||
session, err := d.DialContext(ctx, tconn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &conn{
|
||||
smbSession: session,
|
||||
conn: &tconn,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// conn encapsulates a SMB client and corresponding SMB client
|
||||
type conn struct {
|
||||
conn *net.Conn
|
||||
smbSession *smb2.Session
|
||||
smbShare *smb2.Share
|
||||
shareName string
|
||||
}
|
||||
|
||||
// Closes the connection
|
||||
func (c *conn) close() (err error) {
|
||||
if c.smbShare != nil {
|
||||
err = c.smbShare.Umount()
|
||||
}
|
||||
sessionLogoffErr := c.smbSession.Logoff()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return sessionLogoffErr
|
||||
}
|
||||
|
||||
// True if it's closed
|
||||
func (c *conn) closed() bool {
|
||||
var nopErr error
|
||||
if c.smbShare != nil {
|
||||
// stat the current directory
|
||||
_, nopErr = c.smbShare.Stat(".")
|
||||
} else {
|
||||
// list the shares
|
||||
_, nopErr = c.smbSession.ListSharenames()
|
||||
}
|
||||
return nopErr == nil
|
||||
}
|
||||
|
||||
// Show that we are using a SMB session
|
||||
//
|
||||
// Call removeSession() when done
|
||||
func (f *Fs) addSession() {
|
||||
atomic.AddInt32(&f.sessions, 1)
|
||||
}
|
||||
|
||||
// Show the SMB session is no longer in use
|
||||
func (f *Fs) removeSession() {
|
||||
atomic.AddInt32(&f.sessions, -1)
|
||||
}
|
||||
|
||||
// getSessions shows whether there are any sessions in use
|
||||
func (f *Fs) getSessions() int32 {
|
||||
return atomic.LoadInt32(&f.sessions)
|
||||
}
|
||||
|
||||
// Open a new connection to the SMB server.
|
||||
func (f *Fs) newConnection(ctx context.Context, share string) (c *conn, err error) {
|
||||
// As we are pooling these connections we need to decouple
|
||||
// them from the current context
|
||||
ctx = context.Background()
|
||||
|
||||
c, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't connect SMB: %w", err)
|
||||
}
|
||||
if share != "" {
|
||||
// mount the specified share as well if user requested
|
||||
c.smbShare, err = c.smbSession.Mount(share)
|
||||
if err != nil {
|
||||
_ = c.smbSession.Logoff()
|
||||
return nil, fmt.Errorf("couldn't initialize SMB: %w", err)
|
||||
}
|
||||
c.smbShare = c.smbShare.WithContext(ctx)
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Ensure the specified share is mounted or the session is unmounted
|
||||
func (c *conn) mountShare(share string) (err error) {
|
||||
if c.shareName == share {
|
||||
return nil
|
||||
}
|
||||
if c.smbShare != nil {
|
||||
err = c.smbShare.Umount()
|
||||
c.smbShare = nil
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if share != "" {
|
||||
c.smbShare, err = c.smbSession.Mount(share)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
c.shareName = share
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get a SMB connection from the pool, or open a new one
|
||||
func (f *Fs) getConnection(ctx context.Context, share string) (c *conn, err error) {
|
||||
accounting.LimitTPS(ctx)
|
||||
f.poolMu.Lock()
|
||||
for len(f.pool) > 0 {
|
||||
c = f.pool[0]
|
||||
f.pool = f.pool[1:]
|
||||
err = c.mountShare(share)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
fs.Debugf(f, "Discarding unusable SMB connection: %v", err)
|
||||
c = nil
|
||||
}
|
||||
f.poolMu.Unlock()
|
||||
if c != nil {
|
||||
return c, nil
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
c, err = f.newConnection(ctx, share)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return c, err
|
||||
}
|
||||
|
||||
// Return a SMB connection to the pool
|
||||
//
|
||||
// It nils the pointed to connection out so it can't be reused
|
||||
func (f *Fs) putConnection(pc **conn) {
|
||||
c := *pc
|
||||
*pc = nil
|
||||
|
||||
var nopErr error
|
||||
if c.smbShare != nil {
|
||||
// stat the current directory
|
||||
_, nopErr = c.smbShare.Stat(".")
|
||||
} else {
|
||||
// list the shares
|
||||
_, nopErr = c.smbSession.ListSharenames()
|
||||
}
|
||||
if nopErr != nil {
|
||||
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
|
||||
_ = c.close()
|
||||
return
|
||||
}
|
||||
|
||||
f.poolMu.Lock()
|
||||
f.pool = append(f.pool, c)
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer
|
||||
}
|
||||
f.poolMu.Unlock()
|
||||
}
|
||||
|
||||
// Drain the pool of any connections
|
||||
func (f *Fs) drainPool(ctx context.Context) (err error) {
|
||||
f.poolMu.Lock()
|
||||
defer f.poolMu.Unlock()
|
||||
if sessions := f.getSessions(); sessions != 0 {
|
||||
fs.Debugf(f, "Not closing %d unused connections as %d sessions active", len(f.pool), sessions)
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Stop()
|
||||
}
|
||||
if len(f.pool) != 0 {
|
||||
fs.Debugf(f, "Closing %d unused connections", len(f.pool))
|
||||
}
|
||||
for i, c := range f.pool {
|
||||
if !c.closed() {
|
||||
cErr := c.close()
|
||||
if cErr != nil {
|
||||
err = cErr
|
||||
}
|
||||
}
|
||||
f.pool[i] = nil
|
||||
}
|
||||
f.pool = nil
|
||||
return err
|
||||
}
|
||||
@@ -1,789 +0,0 @@
|
||||
// Package smb provides an interface to SMB servers
|
||||
package smb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
const (
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
var (
|
||||
currentUser = env.CurrentUser()
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "smb",
|
||||
Description: "SMB / CIFS",
|
||||
NewFs: NewFs,
|
||||
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
Help: "SMB server hostname to connect to.\n\nE.g. \"example.com\".",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "SMB username.",
|
||||
Default: currentUser,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "SMB port number.",
|
||||
Default: 445,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "SMB password.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "domain",
|
||||
Help: "Domain name for NTLM authentication.",
|
||||
Default: "WORKGROUP",
|
||||
}, {
|
||||
Name: "idle_timeout",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
Help: `Max time before closing idle connections.
|
||||
|
||||
If no connections have been returned to the connection pool in the time
|
||||
given, rclone will empty the connection pool.
|
||||
|
||||
Set to 0 to keep connections indefinitely.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hide_special_share",
|
||||
Help: "Hide special shares (e.g. print$) which users aren't supposed to access.",
|
||||
Default: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "case_insensitive",
|
||||
Help: "Whether the server is configured to be case-insensitive.\n\nAlways true on Windows shares.",
|
||||
Default: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encoder.EncodeZero |
|
||||
// path separator
|
||||
encoder.EncodeSlash |
|
||||
encoder.EncodeBackSlash |
|
||||
// windows
|
||||
encoder.EncodeWin |
|
||||
encoder.EncodeCtl |
|
||||
encoder.EncodeDot |
|
||||
// the file turns into 8.3 names (and cannot be converted back)
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeRightPeriod |
|
||||
//
|
||||
encoder.EncodeInvalidUtf8,
|
||||
},
|
||||
}})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Host string `config:"host"`
|
||||
Port string `config:"port"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Domain string `config:"domain"`
|
||||
HideSpecial bool `config:"hide_special_share"`
|
||||
CaseInsensitive bool `config:"case_insensitive"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a SMB remote
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
pacer *fs.Pacer // pacer for operations
|
||||
|
||||
sessions int32
|
||||
poolMu sync.Mutex
|
||||
pool []*conn
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// Object describes a file at the server
|
||||
type Object struct {
|
||||
fs *Fs // reference to Fs
|
||||
remote string // the remote path
|
||||
statResult os.FileInfo
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
ctx: ctx,
|
||||
root: root,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: opt.CaseInsensitive,
|
||||
CanHaveEmptyDirectories: true,
|
||||
BucketBased: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||
// set the pool drainer timer going
|
||||
if opt.IdleTimeout > 0 {
|
||||
f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
|
||||
}
|
||||
|
||||
// test if the root exists as a file
|
||||
share, dir := f.split("")
|
||||
if share == "" || dir == "" {
|
||||
return f, nil
|
||||
}
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stat, err := cn.smbShare.Stat(f.toSambaPath(dir))
|
||||
f.putConnection(&cn)
|
||||
if err != nil {
|
||||
// ignore stat error here
|
||||
return f, nil
|
||||
}
|
||||
if !stat.IsDir() {
|
||||
f.root, err = path.Dir(root), fs.ErrorIsFile
|
||||
}
|
||||
fs.Debugf(f, "Using root directory %q", f.root)
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
bucket, file := f.split("")
|
||||
if bucket == "" {
|
||||
return fmt.Sprintf("smb://%s@%s:%s/", f.opt.User, f.opt.Host, f.opt.Port)
|
||||
}
|
||||
return fmt.Sprintf("smb://%s@%s:%s/%s/%s", f.opt.User, f.opt.Host, f.opt.Port, bucket, file)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Hashes returns nothing as SMB itself doesn't have a way to tell checksums
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.NewHashSet()
|
||||
}
|
||||
|
||||
// Precision returns the precision of mtime
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Millisecond
|
||||
}
|
||||
|
||||
// NewObject creates a new file object
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
share, path := f.split(remote)
|
||||
return f.findObjectSeparate(ctx, share, path)
|
||||
}
|
||||
|
||||
func (f *Fs) findObjectSeparate(ctx context.Context, share, path string) (fs.Object, error) {
|
||||
if share == "" || path == "" {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stat, err := cn.smbShare.Stat(f.toSambaPath(path))
|
||||
f.putConnection(&cn)
|
||||
if err != nil {
|
||||
return nil, translateError(err, false)
|
||||
}
|
||||
if stat.IsDir() {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
|
||||
return f.makeEntry(share, path, stat), nil
|
||||
}
|
||||
|
||||
// Mkdir creates a directory on the server
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
share, path := f.split(dir)
|
||||
if share == "" || path == "" {
|
||||
return nil
|
||||
}
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = cn.smbShare.MkdirAll(f.toSambaPath(path), 0o755)
|
||||
f.putConnection(&cn)
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir removes an empty directory on the server
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
share, path := f.split(dir)
|
||||
if share == "" || path == "" {
|
||||
return nil
|
||||
}
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = cn.smbShare.Remove(f.toSambaPath(path))
|
||||
f.putConnection(&cn)
|
||||
return err
|
||||
}
|
||||
|
||||
// Put uploads a file
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
|
||||
err := o.Update(ctx, in, src, options...)
|
||||
if err == nil {
|
||||
return o, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
|
||||
err := o.Update(ctx, in, src, options...)
|
||||
if err == nil {
|
||||
return o, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (_ fs.Object, err error) {
|
||||
dstShare, dstPath := f.split(remote)
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
srcShare, srcPath := srcObj.split()
|
||||
if dstShare != srcShare {
|
||||
fs.Debugf(src, "Can't move - must be on the same share")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
err = f.ensureDirectory(ctx, dstShare, dstPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make parent directories: %w", err)
|
||||
}
|
||||
|
||||
cn, err := f.getConnection(ctx, dstShare)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = cn.smbShare.Rename(f.toSambaPath(srcPath), f.toSambaPath(dstPath))
|
||||
f.putConnection(&cn)
|
||||
if err != nil {
|
||||
return nil, translateError(err, false)
|
||||
}
|
||||
return f.findObjectSeparate(ctx, dstShare, dstPath)
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
dstShare, dstPath := f.split(dstRemote)
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcShare, srcPath := srcFs.split(srcRemote)
|
||||
if dstShare != srcShare {
|
||||
fs.Debugf(src, "Can't move - must be on the same share")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
err = f.ensureDirectory(ctx, dstShare, dstPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to make parent directories: %w", err)
|
||||
}
|
||||
|
||||
cn, err := f.getConnection(ctx, dstShare)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.putConnection(&cn)
|
||||
|
||||
_, err = cn.smbShare.Stat(dstPath)
|
||||
if os.IsNotExist(err) {
|
||||
err = cn.smbShare.Rename(f.toSambaPath(srcPath), f.toSambaPath(dstPath))
|
||||
return translateError(err, true)
|
||||
}
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
// List files and directories in a directory
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
share, _path := f.split(dir)
|
||||
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.putConnection(&cn)
|
||||
|
||||
if share == "" {
|
||||
shares, err := cn.smbSession.ListSharenames()
|
||||
for _, shh := range shares {
|
||||
shh = f.toNativePath(shh)
|
||||
if strings.HasSuffix(shh, "$") && f.opt.HideSpecial {
|
||||
continue
|
||||
}
|
||||
entries = append(entries, fs.NewDir(shh, time.Time{}))
|
||||
}
|
||||
return entries, err
|
||||
}
|
||||
|
||||
dirents, err := cn.smbShare.ReadDir(f.toSambaPath(_path))
|
||||
if err != nil {
|
||||
return entries, translateError(err, true)
|
||||
}
|
||||
for _, file := range dirents {
|
||||
nfn := f.toNativePath(file.Name())
|
||||
if file.IsDir() {
|
||||
entries = append(entries, fs.NewDir(path.Join(dir, nfn), file.ModTime()))
|
||||
} else {
|
||||
entries = append(entries, f.makeEntryRelative(share, _path, nfn, file))
|
||||
}
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// About returns things about remaining and used spaces
|
||||
func (f *Fs) About(ctx context.Context) (_ *fs.Usage, err error) {
|
||||
share, dir := f.split("/")
|
||||
if share == "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
dir = f.toSambaPath(dir)
|
||||
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stat, err := cn.smbShare.Statfs(dir)
|
||||
f.putConnection(&cn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bs := int64(stat.BlockSize())
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(bs * int64(stat.TotalBlockCount())),
|
||||
Used: fs.NewUsageValue(bs * int64(stat.TotalBlockCount()-stat.FreeBlockCount())),
|
||||
Free: fs.NewUsageValue(bs * int64(stat.AvailableBlockCount())),
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any
|
||||
// cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
return f.drainPool(ctx)
|
||||
}
|
||||
|
||||
func (f *Fs) makeEntry(share, _path string, stat os.FileInfo) *Object {
|
||||
remote := path.Join(share, _path)
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: trimPathPrefix(remote, f.root),
|
||||
statResult: stat,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) makeEntryRelative(share, _path, relative string, stat os.FileInfo) *Object {
|
||||
return f.makeEntry(share, path.Join(_path, relative), stat)
|
||||
}
|
||||
|
||||
func (f *Fs) ensureDirectory(ctx context.Context, share, _path string) error {
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = cn.smbShare.MkdirAll(f.toSambaPath(path.Dir(_path)), 0o755)
|
||||
f.putConnection(&cn)
|
||||
return err
|
||||
}
|
||||
|
||||
/// Object
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime is the last modified time (read-only)
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.statResult.ModTime()
|
||||
}
|
||||
|
||||
// Size is the file length
|
||||
func (o *Object) Size() int64 {
|
||||
return o.statResult.Size()
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Hash always returns empty value
|
||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Storable returns if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime sets modTime on a particular file
|
||||
func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) {
|
||||
share, reqDir := o.split()
|
||||
if share == "" || reqDir == "" {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
reqDir = o.fs.toSambaPath(reqDir)
|
||||
|
||||
cn, err := o.fs.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer o.fs.putConnection(&cn)
|
||||
|
||||
err = cn.smbShare.Chtimes(reqDir, t, t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fi, err := cn.smbShare.Stat(reqDir)
|
||||
if err == nil {
|
||||
o.statResult = fi
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
share, filename := o.split()
|
||||
if share == "" || filename == "" {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
filename = o.fs.toSambaPath(filename)
|
||||
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.Size())
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
o.fs.addSession() // Show session in use
|
||||
defer o.fs.removeSession()
|
||||
|
||||
cn, err := o.fs.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fl, err := cn.smbShare.OpenFile(filename, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
o.fs.putConnection(&cn)
|
||||
return nil, fmt.Errorf("failed to open: %w", err)
|
||||
}
|
||||
pos, err := fl.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
o.fs.putConnection(&cn)
|
||||
return nil, fmt.Errorf("failed to seek: %w", err)
|
||||
}
|
||||
if pos != offset {
|
||||
o.fs.putConnection(&cn)
|
||||
return nil, fmt.Errorf("failed to seek: wrong position (expected=%d, reported=%d)", offset, pos)
|
||||
}
|
||||
|
||||
in = readers.NewLimitedReadCloser(fl, limit)
|
||||
in = &boundReadCloser{
|
||||
rc: in,
|
||||
close: func() error {
|
||||
o.fs.putConnection(&cn)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
return in, nil
|
||||
}
|
||||
|
||||
// Update the Object from in with modTime and size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
share, filename := o.split()
|
||||
if share == "" || filename == "" {
|
||||
return fs.ErrorIsDir
|
||||
}
|
||||
|
||||
err = o.fs.ensureDirectory(ctx, share, filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to make parent directories: %w", err)
|
||||
}
|
||||
|
||||
filename = o.fs.toSambaPath(filename)
|
||||
|
||||
o.fs.addSession() // Show session in use
|
||||
defer o.fs.removeSession()
|
||||
|
||||
cn, err := o.fs.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
o.statResult, _ = cn.smbShare.Stat(filename)
|
||||
o.fs.putConnection(&cn)
|
||||
}()
|
||||
|
||||
fl, err := cn.smbShare.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open: %w", err)
|
||||
}
|
||||
|
||||
// remove the file if upload failed
|
||||
remove := func() {
|
||||
// Windows doesn't allow removal of files without closing file
|
||||
removeErr := fl.Close()
|
||||
if removeErr != nil {
|
||||
fs.Debugf(src, "failed to close the file for delete: %v", removeErr)
|
||||
// try to remove the file anyway; the file may be already closed
|
||||
}
|
||||
|
||||
removeErr = cn.smbShare.Remove(filename)
|
||||
if removeErr != nil {
|
||||
fs.Debugf(src, "failed to remove: %v", removeErr)
|
||||
} else {
|
||||
fs.Debugf(src, "removed after failed upload: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = fl.ReadFrom(in)
|
||||
if err != nil {
|
||||
remove()
|
||||
return fmt.Errorf("Update ReadFrom failed: %w", err)
|
||||
}
|
||||
|
||||
err = fl.Close()
|
||||
if err != nil {
|
||||
remove()
|
||||
return fmt.Errorf("Update Close failed: %w", err)
|
||||
}
|
||||
|
||||
// Set the modified time
|
||||
err = o.SetModTime(ctx, src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Update SetModTime failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
share, filename := o.split()
|
||||
if share == "" || filename == "" {
|
||||
return fs.ErrorIsDir
|
||||
}
|
||||
filename = o.fs.toSambaPath(filename)
|
||||
|
||||
cn, err := o.fs.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = cn.smbShare.Remove(filename)
|
||||
o.fs.putConnection(&cn)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// String converts this Object to a string
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
/// Misc
|
||||
|
||||
// split returns share name and path in the share from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (shareName, filepath string) {
|
||||
return bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
}
|
||||
|
||||
// split returns share name and path in the share from the object
|
||||
func (o *Object) split() (shareName, filepath string) {
|
||||
return o.fs.split(o.remote)
|
||||
}
|
||||
|
||||
func (f *Fs) toSambaPath(path string) string {
|
||||
// 1. encode via Rclone's escaping system
|
||||
// 2. convert to backslash-separated path
|
||||
return strings.ReplaceAll(f.opt.Enc.FromStandardPath(path), "/", "\\")
|
||||
}
|
||||
|
||||
func (f *Fs) toNativePath(path string) string {
|
||||
// 1. convert *back* to slash-separated path
|
||||
// 2. encode via Rclone's escaping system
|
||||
return f.opt.Enc.ToStandardPath(strings.ReplaceAll(path, "\\", "/"))
|
||||
}
|
||||
|
||||
func ensureSuffix(s, suffix string) string {
|
||||
if strings.HasSuffix(s, suffix) {
|
||||
return s
|
||||
}
|
||||
return s + suffix
|
||||
}
|
||||
|
||||
func trimPathPrefix(s, prefix string) string {
|
||||
// we need to clean the paths to make tests pass!
|
||||
s = betterPathClean(s)
|
||||
prefix = betterPathClean(prefix)
|
||||
if s == prefix || s == prefix+"/" {
|
||||
return ""
|
||||
}
|
||||
prefix = ensureSuffix(prefix, "/")
|
||||
return strings.TrimPrefix(s, prefix)
|
||||
}
|
||||
|
||||
func betterPathClean(p string) string {
|
||||
d := path.Clean(p)
|
||||
if d == "." {
|
||||
return ""
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
type boundReadCloser struct {
|
||||
rc io.ReadCloser
|
||||
close func() error
|
||||
}
|
||||
|
||||
func (r *boundReadCloser) Read(p []byte) (n int, err error) {
|
||||
return r.rc.Read(p)
|
||||
}
|
||||
|
||||
func (r *boundReadCloser) Close() error {
|
||||
err1 := r.rc.Close()
|
||||
err2 := r.close()
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
func translateError(e error, dir bool) error {
|
||||
if os.IsNotExist(e) {
|
||||
if dir {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.DirMover = &Fs{}
|
||||
_ fs.Abouter = &Fs{}
|
||||
_ fs.Shutdowner = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ io.ReadCloser = &boundReadCloser{}
|
||||
)
|
||||
@@ -1,17 +0,0 @@
|
||||
// Test smb filesystem interface
|
||||
package smb_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/smb"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestSMB:rclone",
|
||||
NilObject: (*smb.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -40,7 +40,7 @@ const (
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
)
|
||||
|
||||
// SharedOptions are shared between swift and backends which depend on swift
|
||||
// SharedOptions are shared between swift and hubic
|
||||
var SharedOptions = []fs.Option{{
|
||||
Name: "chunk_size",
|
||||
Help: `Above this size files will be chunked into a _segments container.
|
||||
@@ -63,32 +63,6 @@ Rclone will still chunk files bigger than chunk_size when doing normal
|
||||
copy operations.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_large_objects",
|
||||
Help: strings.ReplaceAll(`Disable support for static and dynamic large objects
|
||||
|
||||
Swift cannot transparently store files bigger than 5 GiB. There are
|
||||
two schemes for doing that, static or dynamic large objects, and the
|
||||
API does not allow rclone to determine whether a file is a static or
|
||||
dynamic large object without doing a HEAD on the object. Since these
|
||||
need to be treated differently, this means rclone has to issue HEAD
|
||||
requests for objects for example when reading checksums.
|
||||
|
||||
When |no_large_objects| is set, rclone will assume that there are no
|
||||
static or dynamic large objects stored. This means it can stop doing
|
||||
the extra HEAD calls which in turn increases performance greatly
|
||||
especially when doing a swift to swift transfer with |--checksum| set.
|
||||
|
||||
Setting this option implies |no_chunk| and also that no files will be
|
||||
uploaded in chunks, so files bigger than 5 GiB will just fail on
|
||||
upload.
|
||||
|
||||
If you set this option and there *are* static or dynamic large objects,
|
||||
then this will give incorrect hashes for them. Downloads will succeed,
|
||||
but other operations such as Remove and Copy will fail.
|
||||
`, "|", "`"),
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -248,7 +222,6 @@ type Options struct {
|
||||
EndpointType string `config:"endpoint_type"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
NoChunk bool `config:"no_chunk"`
|
||||
NoLargeObjects bool `config:"no_large_objects"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -1127,24 +1100,15 @@ func (o *Object) hasHeader(ctx context.Context, header string) (bool, error) {
|
||||
|
||||
// isDynamicLargeObject checks for X-Object-Manifest header
|
||||
func (o *Object) isDynamicLargeObject(ctx context.Context) (bool, error) {
|
||||
if o.fs.opt.NoLargeObjects {
|
||||
return false, nil
|
||||
}
|
||||
return o.hasHeader(ctx, "X-Object-Manifest")
|
||||
}
|
||||
|
||||
// isStaticLargeObjectFile checks for the X-Static-Large-Object header
|
||||
func (o *Object) isStaticLargeObject(ctx context.Context) (bool, error) {
|
||||
if o.fs.opt.NoLargeObjects {
|
||||
return false, nil
|
||||
}
|
||||
return o.hasHeader(ctx, "X-Static-Large-Object")
|
||||
}
|
||||
|
||||
func (o *Object) isLargeObject(ctx context.Context) (result bool, err error) {
|
||||
if o.fs.opt.NoLargeObjects {
|
||||
return false, nil
|
||||
}
|
||||
result, err = o.hasHeader(ctx, "X-Static-Large-Object")
|
||||
if result {
|
||||
return
|
||||
@@ -1500,7 +1464,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
headers := m.ObjectHeaders()
|
||||
fs.OpenOptionAddHeaders(options, headers)
|
||||
|
||||
if (size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk)) && !o.fs.opt.NoLargeObjects {
|
||||
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
|
||||
_, err = o.updateChunks(ctx, in, headers, size, contentType)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -894,22 +894,18 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
}).Fill(ctx, f)
|
||||
canMove, slowHash := true, false
|
||||
canMove := true
|
||||
for _, f := range upstreams {
|
||||
features = features.Mask(ctx, f) // Mask all upstream fs
|
||||
if !operations.CanServerSideMove(f) {
|
||||
canMove = false
|
||||
}
|
||||
slowHash = slowHash || f.Features().SlowHash
|
||||
}
|
||||
// We can move if all remotes support Move or Copy
|
||||
if canMove {
|
||||
features.Move = f.Move
|
||||
}
|
||||
|
||||
// If any of upstreams are SlowHash, propagate it
|
||||
features.SlowHash = slowHash
|
||||
|
||||
// Enable ListR when upstreams either support ListR or is local
|
||||
// But not when all upstreams are local
|
||||
if features.ListR == nil {
|
||||
|
||||
@@ -991,7 +991,6 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
|
||||
}
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
srcFs := srcObj.fs
|
||||
dstPath := f.filePath(remote)
|
||||
err := f.mkParentDir(ctx, dstPath)
|
||||
if err != nil {
|
||||
@@ -1014,10 +1013,9 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
|
||||
if f.useOCMtime {
|
||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix())
|
||||
}
|
||||
// Direct the MOVE/COPY to the source server
|
||||
err = srcFs.pacer.Call(func() (bool, error) {
|
||||
resp, err = srcFs.srv.Call(ctx, &opts)
|
||||
return srcFs.shouldRetry(ctx, resp, err)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Copy call failed: %w", err)
|
||||
@@ -1111,10 +1109,9 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
"Overwrite": "F",
|
||||
},
|
||||
}
|
||||
// Direct the MOVE/COPY to the source server
|
||||
err = srcFs.pacer.Call(func() (bool, error) {
|
||||
resp, err = srcFs.srv.Call(ctx, &opts)
|
||||
return srcFs.shouldRetry(ctx, resp, err)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("DirMove MOVE call failed: %w", err)
|
||||
|
||||
@@ -49,6 +49,7 @@ docs = [
|
||||
"hdfs.md",
|
||||
"hidrive.md",
|
||||
"http.md",
|
||||
"hubic.md",
|
||||
"internetarchive.md",
|
||||
"jottacloud.md",
|
||||
"koofr.md",
|
||||
@@ -59,7 +60,6 @@ docs = [
|
||||
"azureblob.md",
|
||||
"onedrive.md",
|
||||
"opendrive.md",
|
||||
"oracleobjectstorage.md",
|
||||
"qingstor.md",
|
||||
"sia.md",
|
||||
"swift.md",
|
||||
@@ -68,7 +68,6 @@ docs = [
|
||||
"putio.md",
|
||||
"seafile.md",
|
||||
"sftp.md",
|
||||
"smb.md",
|
||||
"storj.md",
|
||||
"sugarsync.md",
|
||||
"tardigrade.md", # stub only to redirect to storj.md
|
||||
|
||||
@@ -235,8 +235,8 @@ applications won't work with their files on an rclone mount without
|
||||
|--vfs-cache-mode writes| or |--vfs-cache-mode full|.
|
||||
See the [VFS File Caching](#vfs-file-caching) section for more info.
|
||||
|
||||
The bucket-based remotes (e.g. Swift, S3, Google Compute Storage, B2)
|
||||
do not support the concept of empty directories, so empty
|
||||
The bucket-based remotes (e.g. Swift, S3, Google Compute Storage, B2,
|
||||
Hubic) do not support the concept of empty directories, so empty
|
||||
directories will have a tendency to disappear once they fall out of
|
||||
the directory cache.
|
||||
|
||||
|
||||
@@ -97,10 +97,6 @@ func mountRc(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if mountOpt.Daemon {
|
||||
return nil, errors.New("Daemon Option not supported over the API")
|
||||
}
|
||||
|
||||
mountType, err := in.GetString("mountType")
|
||||
|
||||
mountMu.Lock()
|
||||
@@ -126,15 +122,7 @@ func mountRc(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
log.Printf("mount FAILED: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
go func() {
|
||||
if err = mnt.Wait(); err != nil {
|
||||
log.Printf("unmount FAILED: %v", err)
|
||||
return
|
||||
}
|
||||
mountMu.Lock()
|
||||
defer mountMu.Unlock()
|
||||
delete(liveMounts, mountPoint)
|
||||
}()
|
||||
|
||||
// Add mount to list if mount point was successfully created
|
||||
liveMounts[mountPoint] = mnt
|
||||
|
||||
@@ -258,7 +246,7 @@ func listMountsRc(_ context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
for _, k := range keys {
|
||||
m := liveMounts[k]
|
||||
info := MountInfo{
|
||||
Fs: fs.ConfigString(m.Fs),
|
||||
Fs: m.Fs.Name(),
|
||||
MountPoint: m.MountPoint,
|
||||
MountedOn: m.MountedOn,
|
||||
}
|
||||
@@ -274,11 +262,8 @@ func init() {
|
||||
Path: "mount/unmountall",
|
||||
AuthRequired: true,
|
||||
Fn: unmountAll,
|
||||
Title: "Unmount all active mounts",
|
||||
Help: `
|
||||
rclone allows Linux, FreeBSD, macOS and Windows to
|
||||
mount any of Rclone's cloud storage systems as a file system with
|
||||
FUSE.
|
||||
Title: "Show current mount points",
|
||||
Help: `This shows currently mounted points, which can be used for performing an unmount.
|
||||
|
||||
This takes no parameters and returns error if unmount does not succeed.
|
||||
|
||||
|
||||
@@ -158,9 +158,7 @@ func mediaWithResources(nodes vfs.Nodes) (vfs.Nodes, map[vfs.Node]vfs.Nodes) {
|
||||
for _, node := range nodes {
|
||||
baseName, ext := splitExt(strings.ToLower(node.Name()))
|
||||
switch ext {
|
||||
case ".srt", ".ass", ".ssa", ".sub", ".idx", ".sup", ".jss", ".txt", ".usf", ".cue", ".vtt", ".css":
|
||||
// .idx should be with .sub, .css should be with vtt otherwise they should be culled,
|
||||
// and their mimeTypes are not consistent, but anyway these negatives don't throw errors.
|
||||
case ".srt":
|
||||
subtitlesByName[baseName] = node
|
||||
default:
|
||||
mediaByName[baseName] = append(mediaByName[baseName], node)
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -118,7 +118,7 @@ func newServer(f fs.Fs, opt *dlnaflags.Options) (*server, error) {
|
||||
}
|
||||
|
||||
s := &server{
|
||||
AnnounceInterval: opt.AnnounceInterval,
|
||||
AnnounceInterval: 10 * time.Second,
|
||||
FriendlyName: friendlyName,
|
||||
RootDeviceUUID: makeDeviceUUID(friendlyName),
|
||||
Interfaces: interfaces,
|
||||
@@ -279,14 +279,7 @@ func (s *server) resourceHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// use s.Wait() to block on the listener indefinitely.
|
||||
func (s *server) Serve() (err error) {
|
||||
if s.HTTPConn == nil {
|
||||
// Currently, the SSDP server only listens on an IPv4 multicast address.
|
||||
// Differentiate between two INADDR_ANY addresses,
|
||||
// so that 0.0.0.0 can only listen on IPv4 addresses.
|
||||
network := "tcp4"
|
||||
if strings.Count(s.httpListenAddr, ":") > 1 {
|
||||
network = "tcp"
|
||||
}
|
||||
s.HTTPConn, err = net.Listen(network, s.httpListenAddr)
|
||||
s.HTTPConn, err = net.Listen("tcp", s.httpListenAddr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -343,30 +336,6 @@ func (s *server) startSSDP() {
|
||||
|
||||
// Run SSDP server on an interface.
|
||||
func (s *server) ssdpInterface(intf net.Interface) {
|
||||
// Figure out whether should an ip be announced
|
||||
ipfilterFn := func(ip net.IP) bool {
|
||||
listenaddr := s.HTTPConn.Addr().String()
|
||||
listenip := listenaddr[:strings.LastIndex(listenaddr, ":")]
|
||||
switch listenip {
|
||||
case "0.0.0.0":
|
||||
if strings.Contains(ip.String(), ":") {
|
||||
// Any IPv6 address should not be announced
|
||||
// because SSDP only listen on IPv4 multicast address
|
||||
return false
|
||||
}
|
||||
return true
|
||||
case "[::]":
|
||||
// In the @Serve() section, the default settings have been made to not listen on IPv6 addresses.
|
||||
// If actually still listening on [::], then allow to announce any address.
|
||||
return true
|
||||
default:
|
||||
if listenip == ip.String() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Figure out which HTTP location to advertise based on the interface IP.
|
||||
advertiseLocationFn := func(ip net.IP) string {
|
||||
url := url.URL{
|
||||
@@ -380,12 +349,6 @@ func (s *server) ssdpInterface(intf net.Interface) {
|
||||
return url.String()
|
||||
}
|
||||
|
||||
_, err := intf.Addrs()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fs.Logf(s, "Started SSDP on %v", intf.Name)
|
||||
|
||||
// Note that the devices and services advertised here via SSDP should be
|
||||
// in agreement with the rootDesc XML descriptor that is defined above.
|
||||
ssdpServer := ssdp.Server{
|
||||
@@ -396,7 +359,6 @@ func (s *server) ssdpInterface(intf net.Interface) {
|
||||
"urn:schemas-upnp-org:service:ContentDirectory:1",
|
||||
"urn:schemas-upnp-org:service:ConnectionManager:1",
|
||||
"urn:microsoft.com:service:X_MS_MediaReceiverRegistrar:1"},
|
||||
IPFilter: ipfilterFn,
|
||||
Location: advertiseLocationFn,
|
||||
Server: serverField,
|
||||
UUID: s.RootDeviceUUID,
|
||||
|
||||
@@ -2,8 +2,6 @@
|
||||
package dlnaflags
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/spf13/pflag"
|
||||
@@ -26,20 +24,18 @@ logging of all UPNP traffic.
|
||||
|
||||
// Options is the type for DLNA serving options.
|
||||
type Options struct {
|
||||
ListenAddr string
|
||||
FriendlyName string
|
||||
LogTrace bool
|
||||
InterfaceNames []string
|
||||
AnnounceInterval time.Duration
|
||||
ListenAddr string
|
||||
FriendlyName string
|
||||
LogTrace bool
|
||||
InterfaceNames []string
|
||||
}
|
||||
|
||||
// DefaultOpt contains the defaults options for DLNA serving.
|
||||
var DefaultOpt = Options{
|
||||
ListenAddr: ":7879",
|
||||
FriendlyName: "",
|
||||
LogTrace: false,
|
||||
InterfaceNames: []string{},
|
||||
AnnounceInterval: 12 * time.Minute,
|
||||
ListenAddr: ":7879",
|
||||
FriendlyName: "",
|
||||
LogTrace: false,
|
||||
InterfaceNames: []string{},
|
||||
}
|
||||
|
||||
// Opt contains the options for DLNA serving.
|
||||
@@ -53,7 +49,6 @@ func addFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *Options) {
|
||||
flags.StringVarP(flagSet, &Opt.FriendlyName, prefix+"name", "", Opt.FriendlyName, "Name of DLNA server")
|
||||
flags.BoolVarP(flagSet, &Opt.LogTrace, prefix+"log-trace", "", Opt.LogTrace, "Enable trace logging of SOAP traffic")
|
||||
flags.StringArrayVarP(flagSet, &Opt.InterfaceNames, prefix+"interface", "", Opt.InterfaceNames, "The interface to use for SSDP (repeat as necessary)")
|
||||
flags.DurationVarP(flagSet, &Opt.AnnounceInterval, prefix+"announce-interval", "", Opt.AnnounceInterval, "The interval between SSDP announcements")
|
||||
}
|
||||
|
||||
// AddFlags add the command line flags for DLNA serving.
|
||||
|
||||
@@ -29,7 +29,6 @@ func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *httplib.Options)
|
||||
flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication")
|
||||
flags.StringVarP(flagSet, &Opt.BaseURL, prefix+"baseurl", "", Opt.BaseURL, "Prefix for URLs - leave blank for root")
|
||||
flags.StringVarP(flagSet, &Opt.Template, prefix+"template", "", Opt.Template, "User-specified template")
|
||||
flags.StringVarP(flagSet, &Opt.MinTLSVersion, prefix+"min-tls-version", "", Opt.MinTLSVersion, "Minimum TLS version that is acceptable")
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -108,10 +108,6 @@ supply ` + "`--client-ca`" + ` also.
|
||||
of that with the CA certificate. ` + "`--key`" + ` should be the PEM encoded
|
||||
private key and ` + "`--client-ca`" + ` should be the PEM encoded client
|
||||
certificate authority certificate.
|
||||
|
||||
--min-tls-version is minimum TLS version that is acceptable. Valid
|
||||
values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default
|
||||
"tls1.0").
|
||||
`
|
||||
|
||||
// Options contains options for the http Server
|
||||
@@ -130,7 +126,6 @@ type Options struct {
|
||||
BasicPass string // password for BasicUser
|
||||
Auth AuthFn `json:"-"` // custom Auth (not set by command line flags)
|
||||
Template string // User specified template
|
||||
MinTLSVersion string // MinTLSVersion contains the minimum TLS version that is acceptable
|
||||
}
|
||||
|
||||
// AuthFn if used will be used to authenticate user, pass. If an error
|
||||
@@ -146,7 +141,6 @@ var DefaultOpt = Options{
|
||||
ServerReadTimeout: 1 * time.Hour,
|
||||
ServerWriteTimeout: 1 * time.Hour,
|
||||
MaxHeaderBytes: 4096,
|
||||
MinTLSVersion: "tls1.0",
|
||||
}
|
||||
|
||||
// Server contains info about the running http server
|
||||
@@ -282,20 +276,6 @@ func NewServer(handler http.Handler, opt *Options) *Server {
|
||||
s.Opt.BaseURL = "/" + s.Opt.BaseURL
|
||||
}
|
||||
|
||||
var minTLSVersion uint16
|
||||
switch opt.MinTLSVersion {
|
||||
case "tls1.0":
|
||||
minTLSVersion = tls.VersionTLS10
|
||||
case "tls1.1":
|
||||
minTLSVersion = tls.VersionTLS11
|
||||
case "tls1.2":
|
||||
minTLSVersion = tls.VersionTLS12
|
||||
case "tls1.3":
|
||||
minTLSVersion = tls.VersionTLS13
|
||||
default:
|
||||
log.Fatalf("Invalid value for --min-tls-version")
|
||||
}
|
||||
|
||||
// FIXME make a transport?
|
||||
s.httpServer = &http.Server{
|
||||
Addr: s.Opt.ListenAddr,
|
||||
@@ -306,7 +286,7 @@ func NewServer(handler http.Handler, opt *Options) *Server {
|
||||
ReadHeaderTimeout: 10 * time.Second, // time to send the headers
|
||||
IdleTimeout: 60 * time.Second, // time to keep idle connections open
|
||||
TLSConfig: &tls.Config{
|
||||
MinVersion: minTLSVersion,
|
||||
MinVersion: tls.VersionTLS10, // disable SSL v3.0 and earlier
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ TestDropbox:
|
||||
TestFichier:
|
||||
TestFTP:
|
||||
TestGoogleCloudStorage:
|
||||
TestHubic:
|
||||
TestNetStorage:
|
||||
TestOneDrive:
|
||||
TestPcloud:
|
||||
|
||||
@@ -15,3 +15,5 @@ exec rclone --check-normalization=true --check-control=true --check-length=true
|
||||
TestSwift:testInfo \
|
||||
TestYandex:testInfo \
|
||||
TestFTP:testInfo
|
||||
|
||||
# TestHubic:testInfo \
|
||||
|
||||
@@ -115,6 +115,7 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="China Mobile Ecloud Elastic Object Storage (EOS)" home="https://ecloud.10086.cn/home/product-introduction/eos/" config="/s3/#china-mobile-ecloud-eos" >}}
|
||||
{{< provider name="Arvan Cloud Object Storage (AOS)" home="https://www.arvancloud.com/en/products/cloud-storage" config="/s3/#arvan-cloud-object-storage-aos" >}}
|
||||
{{< provider name="Citrix ShareFile" home="http://sharefile.com/" config="/sharefile/" >}}
|
||||
{{< provider name="C14" home="https://www.online.net/en/storage/c14-cold-storage" config="/s3/#scaleway" >}}
|
||||
{{< provider name="Cloudflare R2" home="https://blog.cloudflare.com/r2-open-beta/" config="/s3/#cloudflare-r2" >}}
|
||||
{{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}}
|
||||
{{< provider name="Digi Storage" home="https://storage.rcs-rds.ro/" config="/koofr/#digi-storage" >}}
|
||||
@@ -129,6 +130,7 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="Hetzner Storage Box" home="https://www.hetzner.com/storage/storage-box" config="/sftp/#hetzner-storage-box" >}}
|
||||
{{< provider name="HiDrive" home="https://www.strato.de/cloud-speicher/" config="/hidrive/" >}}
|
||||
{{< provider name="HTTP" home="https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol" config="/http/" >}}
|
||||
{{< provider name="Hubic" home="https://hubic.com/" config="/hubic/" >}}
|
||||
{{< provider name="Internet Archive" home="https://archive.org/" config="/internetarchive/" >}}
|
||||
{{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}}
|
||||
{{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
|
||||
@@ -146,14 +148,12 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="OVH" home="https://www.ovh.co.uk/public-cloud/storage/object-storage/" config="/swift/" >}}
|
||||
{{< provider name="OpenDrive" home="https://www.opendrive.com/" config="/opendrive/" >}}
|
||||
{{< provider name="OpenStack Swift" home="https://docs.openstack.org/swift/latest/" config="/swift/" >}}
|
||||
{{< provider name="Oracle Cloud Storage Swift" home="https://docs.oracle.com/en-us/iaas/integration/doc/configure-object-storage.html" config="/swift/" >}}
|
||||
{{< provider name="Oracle Object Storage" home="https://www.oracle.com/cloud/storage/object-storage" config="/oracleobjectstorage/" >}}
|
||||
{{< provider name="Oracle Cloud Storage" home="https://cloud.oracle.com/object-storage/buckets" config="/swift/" >}}
|
||||
{{< provider name="ownCloud" home="https://owncloud.org/" config="/webdav/#owncloud" >}}
|
||||
{{< provider name="pCloud" home="https://www.pcloud.com/" config="/pcloud/" >}}
|
||||
{{< provider name="premiumize.me" home="https://premiumize.me/" config="/premiumizeme/" >}}
|
||||
{{< provider name="put.io" home="https://put.io/" config="/putio/" >}}
|
||||
{{< provider name="QingStor" home="https://www.qingcloud.com/products/storage" config="/qingstor/" >}}
|
||||
{{< provider name="Qiniu Cloud Object Storage (Kodo)" home="https://www.qiniu.com/en/products/kodo" config="/s3/#qiniu" >}}
|
||||
{{< provider name="Rackspace Cloud Files" home="https://www.rackspace.com/cloud/files" config="/swift/" >}}
|
||||
{{< provider name="rsync.net" home="https://rsync.net/products/rclone.html" config="/sftp/#rsync-net" >}}
|
||||
{{< provider name="Scaleway" home="https://www.scaleway.com/object-storage/" config="/s3/#scaleway" >}}
|
||||
@@ -162,7 +162,6 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="SeaweedFS" home="https://github.com/chrislusf/seaweedfs/" config="/s3/#seaweedfs" >}}
|
||||
{{< provider name="SFTP" home="https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol" config="/sftp/" >}}
|
||||
{{< provider name="Sia" home="https://sia.tech/" config="/sia/" >}}
|
||||
{{< provider name="SMB / CIFS" home="https://en.wikipedia.org/wiki/Server_Message_Block" config="/smb/" >}}
|
||||
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
|
||||
{{< provider name="Storj" home="https://storj.io/" config="/storj/" >}}
|
||||
{{< provider name="SugarSync" home="https://sugarsync.com/" config="/sugarsync/" >}}
|
||||
|
||||
@@ -23,11 +23,6 @@ Invoking `rclone mkdir backup:../desktop` is exactly the same as invoking
|
||||
The empty path is not allowed as a remote. To alias the current directory
|
||||
use `.` instead.
|
||||
|
||||
The target remote can also be a [connection string](/docs/#connection-strings).
|
||||
This can be used to modify the config of a remote for different uses, e.g.
|
||||
the alias `myDriveTrash` with the target remote `myDrive,trashed_only:`
|
||||
can be used to only show the trashed files in `myDrive`.
|
||||
|
||||
## Configuration
|
||||
|
||||
Here is an example of how to make an alias called `remote` for local folder.
|
||||
|
||||
@@ -637,16 +637,3 @@ put them back in again.` >}}
|
||||
* Ryan Morey <4590343+rmorey@users.noreply.github.com>
|
||||
* Simon Bos <simonbos9@gmail.com>
|
||||
* YFdyh000 <yfdyh000@gmail.com> * Josh Soref <2119212+jsoref@users.noreply.github.com>
|
||||
* Øyvind Heddeland Instefjord <instefjord@outlook.com>
|
||||
* Dmitry Deniskin <110819396+ddeniskin@users.noreply.github.com>
|
||||
* Alexander Knorr <106825+opexxx@users.noreply.github.com>
|
||||
* Richard Bateman <richard@batemansr.us>
|
||||
* Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com>
|
||||
* Lorenzo Milesi <lorenzo.milesi@yetopen.com>
|
||||
* Isaac Aymerich <isaac.aymerich@gmail.com>
|
||||
* YanceyChiew <35898533+YanceyChiew@users.noreply.github.com>
|
||||
* Manoj Ghosh <msays2000@gmail.com>
|
||||
* Bachue Zhou <bachue.shu@gmail.com>
|
||||
* Manoj Ghosh <manoj.ghosh@oracle.com>
|
||||
* Tom Mombourquette <tom@devnode.com>
|
||||
* Robert Newson <rnewson@apache.org>
|
||||
|
||||
@@ -5,116 +5,6 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.60.1 - 2022-11-17
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.60.0...v1.60.1)
|
||||
|
||||
* Bug Fixes
|
||||
* lib/cache: Fix alias backend shutting down too soon (Nick Craig-Wood)
|
||||
* wasm: Fix walltime link error by adding up-to-date wasm_exec.js (João Henrique Franco)
|
||||
* docs
|
||||
* Update faq.md with bisync (Samuel Johnson)
|
||||
* Corrected download links in windows install docs (coultonluke)
|
||||
* Add direct download link for windows arm64 (albertony)
|
||||
* Remove link to rclone slack as it is no longer supported (Nick Craig-Wood)
|
||||
* Faq: how to use a proxy server that requires a username and password (asdffdsazqqq)
|
||||
* Oracle-object-storage: doc fix (Manoj Ghosh)
|
||||
* Fix typo `remove` in rclone_serve_restic command (Joda Stößer)
|
||||
* Fix character that was incorrectly interpreted as markdown (Clément Notin)
|
||||
* VFS
|
||||
* Fix deadlock caused by cache cleaner and upload finishing (Nick Craig-Wood)
|
||||
* Local
|
||||
* Clean absolute paths (albertony)
|
||||
* Fix -L/--copy-links with filters missing directories (Nick Craig-Wood)
|
||||
* Mailru
|
||||
* Note that an app password is now needed (Nick Craig-Wood)
|
||||
* Allow timestamps to be before the epoch 1970-01-01 (Nick Craig-Wood)
|
||||
* S3
|
||||
* Add provider quirk `--s3-might-gzip` to fix corrupted on transfer: sizes differ (Nick Craig-Wood)
|
||||
* Allow Storj to server side copy since it seems to work now (Nick Craig-Wood)
|
||||
* Fix for unchecked err value in s3 listv2 (Aaron Gokaslan)
|
||||
* Add additional Wasabi locations (techknowlogick)
|
||||
* Smb
|
||||
* Fix `Failed to sync: context canceled` at the end of syncs (Nick Craig-Wood)
|
||||
* WebDAV
|
||||
* Fix Move/Copy/DirMove when using -server-side-across-configs (Nick Craig-Wood)
|
||||
|
||||
## v1.60.0 - 2022-10-21
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.59.0...v1.60.0)
|
||||
|
||||
* New backends
|
||||
* [Oracle object storage](/oracleobjectstorage/) (Manoj Ghosh)
|
||||
* [SMB](/smb/) / CIFS (Windows file sharing) (Lesmiscore)
|
||||
* New S3 providers
|
||||
* [IONOS Cloud Storage](/s3/#ionos) (Dmitry Deniskin)
|
||||
* [Qiniu KODO](/s3/#qiniu) (Bachue Zhou)
|
||||
* New Features
|
||||
* build
|
||||
* Update to go1.19 and make go1.17 the minimum required version (Nick Craig-Wood)
|
||||
* Install.sh: fix arm-v7 download (Ole Frost)
|
||||
* fs: Warn the user when using an existing remote name without a colon (Nick Craig-Wood)
|
||||
* httplib: Add `--xxx-min-tls-version` option to select minimum TLS version for HTTP servers (Robert Newson)
|
||||
* librclone: Add PHP bindings and test program (Jordi Gonzalez Muñoz)
|
||||
* operations
|
||||
* Add `--server-side-across-configs` global flag for any backend (Nick Craig-Wood)
|
||||
* Optimise `--copy-dest` and `--compare-dest` (Nick Craig-Wood)
|
||||
* rc: add `job/stopgroup` to stop group (Evan Spensley)
|
||||
* serve dlna
|
||||
* Add `--announce-interval` to control SSDP Announce Interval (YanceyChiew)
|
||||
* Add `--interface` to Specify SSDP interface names line (Simon Bos)
|
||||
* Add support for more external subtitles (YanceyChiew)
|
||||
* Add verification of addresses (YanceyChiew)
|
||||
* sync: Optimise `--copy-dest` and `--compare-dest` (Nick Craig-Wood)
|
||||
* doc updates (albertony, Alexander Knorr, anonion, João Henrique Franco, Josh Soref, Lorenzo Milesi, Marco Molteni, Mark Trolley, Ole Frost, partev, Ryan Morey, Tom Mombourquette, YFdyh000)
|
||||
* Bug Fixes
|
||||
* filter
|
||||
* Fix incorrect filtering with `UseFilter` context flag and wrapping backends (Nick Craig-Wood)
|
||||
* Make sure we check `--files-from` when looking for a single file (Nick Craig-Wood)
|
||||
* rc
|
||||
* Fix `mount/listmounts` not returning the full Fs entered in `mount/mount` (Tom Mombourquette)
|
||||
* Handle external unmount when mounting (Isaac Aymerich)
|
||||
* Validate Daemon option is not set when mounting a volume via RC (Isaac Aymerich)
|
||||
* sync: Update docs and error messages to reflect fixes to overlap checks (Nick Naumann)
|
||||
* VFS
|
||||
* Reduce memory use by embedding `sync.Cond` (Nick Craig-Wood)
|
||||
* Reduce memory usage by re-ordering commonly used structures (Nick Craig-Wood)
|
||||
* Fix excess CPU used by VFS cache cleaner looping (Nick Craig-Wood)
|
||||
* Local
|
||||
* Obey file filters in listing to fix errors on excluded files (Nick Craig-Wood)
|
||||
* Fix "Failed to read metadata: function not implemented" on old Linux kernels (Nick Craig-Wood)
|
||||
* Compress
|
||||
* Fix crash due to nil metadata (Nick Craig-Wood)
|
||||
* Fix error handling to not use or return nil objects (Nick Craig-Wood)
|
||||
* Drive
|
||||
* Make `--drive-stop-on-upload-limit` obey quota exceeded error (Steve Kowalik)
|
||||
* FTP
|
||||
* Add `--ftp-force-list-hidden` option to show hidden items (Øyvind Heddeland Instefjord)
|
||||
* Fix hang when using ExplicitTLS to certain servers. (Nick Craig-Wood)
|
||||
* Google Cloud Storage
|
||||
* Add `--gcs-endpoint` flag and config parameter (Nick Craig-Wood)
|
||||
* Hubic
|
||||
* Remove backend as service has now shut down (Nick Craig-Wood)
|
||||
* Onedrive
|
||||
* Rename Onedrive(cn) 21Vianet to Vnet Group (Yen Hu)
|
||||
* Disable change notify in China region since it is not supported (Nick Craig-Wood)
|
||||
* S3
|
||||
* Implement `--s3-versions` flag to show old versions of objects if enabled (Nick Craig-Wood)
|
||||
* Implement `--s3-version-at` flag to show versions of objects at a particular time (Nick Craig-Wood)
|
||||
* Implement `backend versioning` command to get/set bucket versioning (Nick Craig-Wood)
|
||||
* Implement `Purge` to purge versions and `backend cleanup-hidden` (Nick Craig-Wood)
|
||||
* Add `--s3-decompress` flag to decompress gzip-encoded files (Nick Craig-Wood)
|
||||
* Add `--s3-sse-customer-key-base64` to supply keys with binary data (Richard Bateman)
|
||||
* Try to keep the maximum precision in ModTime with `--user-server-modtime` (Nick Craig-Wood)
|
||||
* Drop binary metadata with an ERROR message as it can't be stored (Nick Craig-Wood)
|
||||
* Add `--s3-no-system-metadata` to suppress read and write of system metadata (Nick Craig-Wood)
|
||||
* SFTP
|
||||
* Fix directory creation races (Lesmiscore)
|
||||
* Swift
|
||||
* Add `--swift-no-large-objects` to reduce HEAD requests (Nick Craig-Wood)
|
||||
* Union
|
||||
* Propagate SlowHash feature to fix hasher interaction (Lesmiscore)
|
||||
|
||||
## v1.59.2 - 2022-09-15
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.59.1...v1.59.2)
|
||||
|
||||
@@ -37,7 +37,7 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
* [rclone about](/commands/rclone_about/) - Get quota information from the remote.
|
||||
* [rclone authorize](/commands/rclone_authorize/) - Remote authorization.
|
||||
* [rclone backend](/commands/rclone_backend/) - Run a backend-specific command.
|
||||
* [rclone bisync](/commands/rclone_bisync/) - Perform bidirectional synchronization between two paths.
|
||||
* [rclone bisync](/commands/rclone_bisync/) - Perform bidirectonal synchronization between two paths.
|
||||
* [rclone cat](/commands/rclone_cat/) - Concatenates any files and sends them to stdout.
|
||||
* [rclone check](/commands/rclone_check/) - Checks the files in the source and destination match.
|
||||
* [rclone checksum](/commands/rclone_checksum/) - Checks the files in the source against a SUM file.
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
---
|
||||
title: "rclone bisync"
|
||||
description: "Perform bidirectional synchronization between two paths."
|
||||
description: "Perform bidirectonal synchronization between two paths."
|
||||
slug: rclone_bisync
|
||||
url: /commands/rclone_bisync/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/bisync/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone bisync
|
||||
|
||||
Perform bidirectional synchronization between two paths.
|
||||
Perform bidirectonal synchronization between two paths.
|
||||
|
||||
## Synopsis
|
||||
|
||||
Perform bidirectional synchronization between two paths.
|
||||
Perform bidirectonal synchronization between two paths.
|
||||
|
||||
[Bisync](https://rclone.org/bisync/) provides a
|
||||
bidirectional cloud sync solution in rclone.
|
||||
|
||||
@@ -28,7 +28,7 @@ To load completions for every new session, execute once:
|
||||
|
||||
### macOS:
|
||||
|
||||
rclone completion bash > $(brew --prefix)/etc/bash_completion.d/rclone
|
||||
rclone completion bash > /usr/local/etc/bash_completion.d/rclone
|
||||
|
||||
You will need to start a new shell for this setup to take effect.
|
||||
|
||||
|
||||
@@ -18,10 +18,6 @@ to enable it. You can execute the following once:
|
||||
|
||||
echo "autoload -U compinit; compinit" >> ~/.zshrc
|
||||
|
||||
To load completions in your current shell session:
|
||||
|
||||
source <(rclone completion zsh); compdef _rclone rclone
|
||||
|
||||
To load completions for every new session, execute once:
|
||||
|
||||
### Linux:
|
||||
@@ -30,7 +26,7 @@ To load completions for every new session, execute once:
|
||||
|
||||
### macOS:
|
||||
|
||||
rclone completion zsh > $(brew --prefix)/share/zsh/site-functions/_rclone
|
||||
rclone completion zsh > /usr/local/share/zsh/site-functions/_rclone
|
||||
|
||||
You will need to start a new shell for this setup to take effect.
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ are 100% certain you are already passing obscured passwords then use
|
||||
`rclone config password` command.
|
||||
|
||||
The flag `--non-interactive` is for use by applications that wish to
|
||||
configure rclone themselves, rather than using rclone's text based
|
||||
configure rclone themeselves, rather than using rclone's text based
|
||||
configuration questions. If this flag is set, and rclone needs to ask
|
||||
the user a question, a JSON blob will be returned with the question in
|
||||
it.
|
||||
|
||||
@@ -45,7 +45,7 @@ are 100% certain you are already passing obscured passwords then use
|
||||
`rclone config password` command.
|
||||
|
||||
The flag `--non-interactive` is for use by applications that wish to
|
||||
configure rclone themselves, rather than using rclone's text based
|
||||
configure rclone themeselves, rather than using rclone's text based
|
||||
configuration questions. If this flag is set, and rclone needs to ask
|
||||
the user a question, a JSON blob will be returned with the question in
|
||||
it.
|
||||
|
||||
@@ -26,7 +26,7 @@ For the MD5 and SHA1 algorithms there are also dedicated commands,
|
||||
|
||||
This command can also hash data received on standard input (stdin),
|
||||
by not passing a remote:path, or by passing a hyphen as remote:path
|
||||
when there is data to read (if not, the hyphen will be treated literally,
|
||||
when there is data to read (if not, the hypen will be treated literaly,
|
||||
as a relative path).
|
||||
|
||||
Run without a hash to see the list of all supported hashes, e.g.
|
||||
|
||||
@@ -42,7 +42,7 @@ Note that `ls` and `lsl` recurse by default - use `--max-depth 1` to stop the re
|
||||
|
||||
The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - use `-R` to make them recurse.
|
||||
|
||||
Listing a nonexistent directory will produce an error except for
|
||||
Listing a non-existent directory will produce an error except for
|
||||
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
||||
the bucket-based remotes).
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ Note that `ls` and `lsl` recurse by default - use `--max-depth 1` to stop the re
|
||||
|
||||
The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - use `-R` to make them recurse.
|
||||
|
||||
Listing a nonexistent directory will produce an error except for
|
||||
Listing a non-existent directory will produce an error except for
|
||||
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
||||
the bucket-based remotes).
|
||||
|
||||
|
||||
@@ -126,7 +126,7 @@ Note that `ls` and `lsl` recurse by default - use `--max-depth 1` to stop the re
|
||||
|
||||
The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - use `-R` to make them recurse.
|
||||
|
||||
Listing a nonexistent directory will produce an error except for
|
||||
Listing a non-existent directory will produce an error except for
|
||||
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
||||
the bucket-based remotes).
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ If `--files-only` is not specified directories in addition to the files
|
||||
will be returned.
|
||||
|
||||
If `--metadata` is set then an additional Metadata key will be returned.
|
||||
This will have metadata in rclone standard format as a JSON object.
|
||||
This will have metdata in rclone standard format as a JSON object.
|
||||
|
||||
if `--stat` is set then a single JSON blob will be returned about the
|
||||
item pointed to. This will return an error if the item isn't found.
|
||||
@@ -102,7 +102,7 @@ Note that `ls` and `lsl` recurse by default - use `--max-depth 1` to stop the re
|
||||
|
||||
The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - use `-R` to make them recurse.
|
||||
|
||||
Listing a nonexistent directory will produce an error except for
|
||||
Listing a non-existent directory will produce an error except for
|
||||
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
||||
the bucket-based remotes).
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ Note that `ls` and `lsl` recurse by default - use `--max-depth 1` to stop the re
|
||||
|
||||
The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - use `-R` to make them recurse.
|
||||
|
||||
Listing a nonexistent directory will produce an error except for
|
||||
Listing a non-existent directory will produce an error except for
|
||||
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
||||
the bucket-based remotes).
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ to running `rclone hashsum MD5 remote:path`.
|
||||
|
||||
This command can also hash data received on standard input (stdin),
|
||||
by not passing a remote:path, or by passing a hyphen as remote:path
|
||||
when there is data to read (if not, the hyphen will be treated literally,
|
||||
when there is data to read (if not, the hypen will be treated literaly,
|
||||
as a relative path).
|
||||
|
||||
|
||||
|
||||
@@ -98,7 +98,7 @@ and experience unexpected program errors, freezes or other issues, consider moun
|
||||
as a network drive instead.
|
||||
|
||||
When mounting as a fixed disk drive you can either mount to an unused drive letter,
|
||||
or to a path representing a **nonexistent** subdirectory of an **existing** parent
|
||||
or to a path representing a **non-existent** subdirectory of an **existing** parent
|
||||
directory or drive. Using the special value `*` will tell rclone to
|
||||
automatically assign the next available drive letter, starting with Z: and moving backward.
|
||||
Examples:
|
||||
@@ -129,7 +129,7 @@ the mapped drive, shown in Windows Explorer etc, while the complete
|
||||
`\\server\share` will be reported as the remote UNC path by
|
||||
`net use` etc, just like a normal network drive mapping.
|
||||
|
||||
If you specify a full network share UNC path with `--volname`, this will implicitly
|
||||
If you specify a full network share UNC path with `--volname`, this will implicitely
|
||||
set the `--network-mode` option, so the following two examples have same result:
|
||||
|
||||
rclone mount remote:path/to/files X: --network-mode
|
||||
@@ -138,7 +138,7 @@ set the `--network-mode` option, so the following two examples have same result:
|
||||
You may also specify the network share UNC path as the mountpoint itself. Then rclone
|
||||
will automatically assign a drive letter, same as with `*` and use that as
|
||||
mountpoint, and instead use the UNC path specified as the volume name, as if it were
|
||||
specified with the `--volname` option. This will also implicitly set
|
||||
specified with the `--volname` option. This will also implicitely set
|
||||
the `--network-mode` option. This means the following two examples have same result:
|
||||
|
||||
rclone mount remote:path/to/files \\cloud\remote
|
||||
@@ -174,7 +174,7 @@ The permissions on each entry will be set according to [options](#options)
|
||||
|
||||
The default permissions corresponds to `--file-perms 0666 --dir-perms 0777`,
|
||||
i.e. read and write permissions to everyone. This means you will not be able
|
||||
to start any programs from the mount. To be able to do that you must add
|
||||
to start any programs from the the mount. To be able to do that you must add
|
||||
execute permissions, e.g. `--file-perms 0777 --dir-perms 0777` to add it
|
||||
to everyone. If the program needs to write files, chances are you will have
|
||||
to enable [VFS File Caching](#vfs-file-caching) as well (see also [limitations](#limitations)).
|
||||
@@ -245,8 +245,8 @@ applications won't work with their files on an rclone mount without
|
||||
`--vfs-cache-mode writes` or `--vfs-cache-mode full`.
|
||||
See the [VFS File Caching](#vfs-file-caching) section for more info.
|
||||
|
||||
The bucket-based remotes (e.g. Swift, S3, Google Compute Storage, B2)
|
||||
do not support the concept of empty directories, so empty
|
||||
The bucket-based remotes (e.g. Swift, S3, Google Compute Storage, B2,
|
||||
Hubic) do not support the concept of empty directories, so empty
|
||||
directories will have a tendency to disappear once they fall out of
|
||||
the directory cache.
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ press '?' to toggle the help on and off. The supported keys are:
|
||||
q/ESC/^c to quit
|
||||
|
||||
Listed files/directories may be prefixed by a one-character flag,
|
||||
some of them combined with a description in brackets at end of line.
|
||||
some of them combined with a description in brackes at end of line.
|
||||
These flags have the following meaning:
|
||||
|
||||
e means this is an empty directory, i.e. contains no files (but
|
||||
|
||||
@@ -362,13 +362,11 @@ rclone serve dlna remote:path [flags]
|
||||
|
||||
```
|
||||
--addr string The ip:port or :port to bind the DLNA http server to (default ":7879")
|
||||
--announce-interval duration The interval between SSDP announcements (default 12m0s)
|
||||
--dir-cache-time duration Time to cache directory entries for (default 5m0s)
|
||||
--dir-perms FileMode Directory permissions (default 0777)
|
||||
--file-perms FileMode File permissions (default 0666)
|
||||
--gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
|
||||
-h, --help help for dlna
|
||||
--interface stringArray The interface to use for SSDP (repeat as necessary)
|
||||
--log-trace Enable trace logging of SOAP traffic
|
||||
--name string Name of DLNA server
|
||||
--no-checksum Don't compare checksums on up/download
|
||||
|
||||
@@ -60,10 +60,6 @@ of that with the CA certificate. `--key` should be the PEM encoded
|
||||
private key and `--client-ca` should be the PEM encoded client
|
||||
certificate authority certificate.
|
||||
|
||||
--min-tls-version is minimum TLS version that is acceptable. Valid
|
||||
values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default
|
||||
"tls1.0").
|
||||
|
||||
### Template
|
||||
|
||||
`--template` allows a user to specify a custom markup template for HTTP
|
||||
@@ -450,7 +446,6 @@ rclone serve http remote:path [flags]
|
||||
--htpasswd string A htpasswd file - if not provided no authentication is done
|
||||
--key string SSL PEM Private key
|
||||
--max-header-bytes int Maximum size of request header (default 4096)
|
||||
--min-tls-version string Minimum TLS version that is acceptable (default "tls1.0")
|
||||
--no-checksum Don't compare checksums on up/download
|
||||
--no-modtime Don't read/write the modification time (can speed things up)
|
||||
--no-seek Don't allow seeking in files
|
||||
|
||||
@@ -174,10 +174,6 @@ of that with the CA certificate. `--key` should be the PEM encoded
|
||||
private key and `--client-ca` should be the PEM encoded client
|
||||
certificate authority certificate.
|
||||
|
||||
--min-tls-version is minimum TLS version that is acceptable. Valid
|
||||
values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default
|
||||
"tls1.0").
|
||||
|
||||
|
||||
```
|
||||
rclone serve restic remote:path [flags]
|
||||
@@ -196,7 +192,6 @@ rclone serve restic remote:path [flags]
|
||||
--htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--key string SSL PEM Private key
|
||||
--max-header-bytes int Maximum size of request header (default 4096)
|
||||
--min-tls-version string Minimum TLS version that is acceptable (default "tls1.0")
|
||||
--pass string Password for authentication
|
||||
--private-repos Users can only access their private repo
|
||||
--realm string Realm for authentication (default "rclone")
|
||||
|
||||
@@ -11,19 +11,11 @@ Serve the remote over SFTP.
|
||||
|
||||
## Synopsis
|
||||
|
||||
Run an SFTP server to serve a remote over SFTP. This can be used
|
||||
with an SFTP client or you can make a remote of type [sftp](/sftp) to use with it.
|
||||
Run a SFTP server to serve a remote over SFTP. This can be used
|
||||
with an SFTP client or you can make a remote of type sftp to use with it.
|
||||
|
||||
You can use the [filter](/filtering) flags (e.g. `--include`, `--exclude`)
|
||||
to control what is served.
|
||||
|
||||
The server will respond to a small number of shell commands, mainly
|
||||
md5sum, sha1sum and df, which enable it to provide support for checksums
|
||||
and the about feature when accessed from an sftp remote.
|
||||
|
||||
Note that this server uses standard 32 KiB packet payload size, which
|
||||
means you must not configure the client to expect anything else, e.g.
|
||||
with the [chunk_size](/sftp/#sftp-chunk-size) option on an sftp remote.
|
||||
You can use the filter flags (e.g. `--include`, `--exclude`) to control what
|
||||
is served.
|
||||
|
||||
The server will log errors. Use `-v` to see access logs.
|
||||
|
||||
@@ -36,6 +28,11 @@ You must provide some means of authentication, either with
|
||||
`--auth-proxy`, or set the `--no-auth` flag for no
|
||||
authentication when logging in.
|
||||
|
||||
Note that this also implements a small number of shell commands so
|
||||
that it can provide md5sum/sha1sum/df information for the rclone sftp
|
||||
backend. This means that is can support SHA1SUMs, MD5SUMs and the
|
||||
about command when paired with the rclone sftp backend.
|
||||
|
||||
If you don't supply a host `--key` then rclone will generate rsa, ecdsa
|
||||
and ed25519 variants, and cache them for later use in rclone's cache
|
||||
directory (see `rclone help flags cache-dir`) in the "serve-sftp"
|
||||
@@ -487,7 +484,7 @@ rclone serve sftp remote:path [flags]
|
||||
--pass string Password for authentication
|
||||
--poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s)
|
||||
--read-only Only allow read-only access
|
||||
--stdio Run an sftp server on stdin/stdout
|
||||
--stdio Run an sftp server on run stdin/stdout
|
||||
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
|
||||
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
|
||||
--user string User name for authentication
|
||||
|
||||
@@ -109,10 +109,6 @@ of that with the CA certificate. `--key` should be the PEM encoded
|
||||
private key and `--client-ca` should be the PEM encoded client
|
||||
certificate authority certificate.
|
||||
|
||||
--min-tls-version is minimum TLS version that is acceptable. Valid
|
||||
values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default
|
||||
"tls1.0").
|
||||
|
||||
## VFS - Virtual File System
|
||||
|
||||
This command uses the VFS layer. This adapts the cloud storage objects
|
||||
@@ -535,7 +531,6 @@ rclone serve webdav remote:path [flags]
|
||||
--htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--key string SSL PEM Private key
|
||||
--max-header-bytes int Maximum size of request header (default 4096)
|
||||
--min-tls-version string Minimum TLS version that is acceptable (default "tls1.0")
|
||||
--no-checksum Don't compare checksums on up/download
|
||||
--no-modtime Don't read/write the modification time (can speed things up)
|
||||
--no-seek Don't allow seeking in files
|
||||
|
||||
@@ -26,7 +26,7 @@ to running `rclone hashsum SHA1 remote:path`.
|
||||
|
||||
This command can also hash data received on standard input (stdin),
|
||||
by not passing a remote:path, or by passing a hyphen as remote:path
|
||||
when there is data to read (if not, the hyphen will be treated literally,
|
||||
when there is data to read (if not, the hypen will be treated literaly,
|
||||
as a relative path).
|
||||
|
||||
This command can also hash data received on STDIN, if not passing
|
||||
|
||||
@@ -37,11 +37,6 @@ extended explanation in the [copy](/commands/rclone_copy/) command if unsure.
|
||||
If dest:path doesn't exist, it is created and the source:path contents
|
||||
go there.
|
||||
|
||||
It is not possible to sync overlapping remotes. However, you may exclude
|
||||
the destination from the sync with a filter rule or by putting an
|
||||
exclude-if-present file inside the destination directory and sync to a
|
||||
destination that is inside the source directory.
|
||||
|
||||
**Note**: Use the `-P`/`--progress` flag to view real-time transfer statistics
|
||||
|
||||
**Note**: Use the `rclone dedupe` command to deal with "Duplicate object/directory found in source/destination - ignoring" errors.
|
||||
|
||||
@@ -225,7 +225,7 @@ If you intend to use the wrapped remote both directly for keeping
|
||||
unencrypted content, as well as through a crypt remote for encrypted
|
||||
content, it is recommended to point the crypt remote to a separate
|
||||
directory within the wrapped remote. If you use a bucket-based storage
|
||||
system (e.g. Swift, S3, Google Compute Storage, B2) it is generally
|
||||
system (e.g. Swift, S3, Google Compute Storage, B2, Hubic) it is generally
|
||||
advisable to wrap the crypt remote around a specific bucket (`s3:bucket`).
|
||||
If wrapping around the entire root of the storage (`s3:`), and use the
|
||||
optional file name encryption, rclone will encrypt the bucket name.
|
||||
|
||||
@@ -51,6 +51,7 @@ See the following for detailed instructions for
|
||||
* [HDFS](/hdfs/)
|
||||
* [HiDrive](/hidrive/)
|
||||
* [HTTP](/http/)
|
||||
* [Hubic](/hubic/)
|
||||
* [Internet Archive](/internetarchive/)
|
||||
* [Jottacloud](/jottacloud/)
|
||||
* [Koofr](/koofr/)
|
||||
@@ -61,7 +62,6 @@ See the following for detailed instructions for
|
||||
* [Microsoft OneDrive](/onedrive/)
|
||||
* [OpenStack Swift / Rackspace Cloudfiles / Memset Memstore](/swift/)
|
||||
* [OpenDrive](/opendrive/)
|
||||
* [Oracle Object Storage](/oracleobjectstorage/)
|
||||
* [Pcloud](/pcloud/)
|
||||
* [premiumize.me](/premiumizeme/)
|
||||
* [put.io](/putio/)
|
||||
@@ -69,7 +69,6 @@ See the following for detailed instructions for
|
||||
* [Seafile](/seafile/)
|
||||
* [SFTP](/sftp/)
|
||||
* [Sia](/sia/)
|
||||
* [SMB](/smb/)
|
||||
* [Storj](/storj/)
|
||||
* [SugarSync](/sugarsync/)
|
||||
* [Union](/union/)
|
||||
@@ -1955,7 +1954,7 @@ quickly using the least amount of memory.
|
||||
|
||||
However, some remotes have a way of listing all files beneath a
|
||||
directory in one (or a small number) of transactions. These tend to
|
||||
be the bucket-based remotes (e.g. S3, B2, GCS, Swift).
|
||||
be the bucket-based remotes (e.g. S3, B2, GCS, Swift, Hubic).
|
||||
|
||||
If you use the `--fast-list` flag then rclone will use this method for
|
||||
listing directories. This will have the following consequences for
|
||||
|
||||
@@ -85,7 +85,7 @@ script) from a URL which doesn't change then you can use these links.
|
||||
| Intel/AMD - 32 Bit | {{< cdownload windows 386 >}} | - | {{< cdownload linux 386 >}} | {{< cdownload linux 386 deb >}} | {{< cdownload linux 386 rpm >}} | {{< cdownload freebsd 386 >}} | {{< cdownload netbsd 386 >}} | {{< cdownload openbsd 386 >}} | {{< cdownload plan9 386 >}} | - |
|
||||
| ARMv6 - 32 Bit | - | - | {{< cdownload linux arm >}} | {{< cdownload linux arm deb >}} | {{< cdownload linux arm rpm >}} | {{< cdownload freebsd arm >}} | {{< cdownload netbsd arm >}} | - | - | - |
|
||||
| ARMv7 - 32 Bit | - | - | {{< cdownload linux arm-v7 >}} | {{< cdownload linux arm-v7 deb >}} | {{< cdownload linux arm-v7 rpm >}} | {{< cdownload freebsd arm-v7 >}} | {{< cdownload netbsd arm-v7 >}} | - | - | - |
|
||||
| ARM - 64 Bit | {{< cdownload windows arm64 >}} | {{< cdownload osx arm64 >}} | {{< cdownload linux arm64 >}} | {{< cdownload linux arm64 deb >}} | {{< cdownload linux arm64 rpm >}} | - | - | - | - | - |
|
||||
| ARM - 64 Bit | - | {{< cdownload osx arm64 >}} | {{< cdownload linux arm64 >}} | {{< cdownload linux arm64 deb >}} | {{< cdownload linux arm64 rpm >}} | - | - | - | - | - |
|
||||
| MIPS - Big Endian | - | - | {{< cdownload linux mips >}} | {{< cdownload linux mips deb >}} | {{< cdownload linux mips rpm >}} | - | - | - | - | - |
|
||||
| MIPS - Little Endian | - | - | {{< cdownload linux mipsle >}} | {{< cdownload linux mipsle deb >}} | {{< cdownload linux mipsle rpm >}} | - | - | - | - | - |
|
||||
|
||||
|
||||
@@ -82,8 +82,9 @@ of metadata, which breaks the desired 1:1 mapping of files to objects.
|
||||
|
||||
### Can rclone do bi-directional sync? ###
|
||||
|
||||
Yes, since rclone v1.58.0, [bidirectional cloud sync](/bisync/) is
|
||||
available.
|
||||
No, not at present. rclone only does uni-directional sync from A ->
|
||||
B. It may do in the future though since it has all the primitives - it
|
||||
just requires writing the algorithm to do it.
|
||||
|
||||
### Can I use rclone with an HTTP proxy? ###
|
||||
|
||||
@@ -108,14 +109,6 @@ possibilities. So, on Linux, you may end up with code similar to
|
||||
export HTTP_PROXY=$http_proxy
|
||||
export HTTPS_PROXY=$http_proxy
|
||||
|
||||
|
||||
Note: If the proxy server requires a username and password, then use
|
||||
|
||||
export http_proxy=http://username:password@proxyserver:12345
|
||||
export https_proxy=$http_proxy
|
||||
export HTTP_PROXY=$http_proxy
|
||||
export HTTPS_PROXY=$http_proxy
|
||||
|
||||
The `NO_PROXY` allows you to disable the proxy for specific hosts.
|
||||
Hosts must be comma separated, and can contain domains or parts.
|
||||
For instance "foo.com" also matches "bar.foo.com".
|
||||
|
||||
@@ -119,7 +119,6 @@ These flags are available for every command.
|
||||
--rc-job-expire-interval duration Interval to check for expired async jobs (default 10s)
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-min-tls-version string Minimum TLS version that is acceptable (default "tls1.0")
|
||||
--rc-no-auth Don't require auth for certain methods
|
||||
--rc-pass string Password for authentication
|
||||
--rc-realm string Realm for authentication (default "rclone")
|
||||
@@ -136,7 +135,6 @@ These flags are available for every command.
|
||||
--refresh-times Refresh the modtime of remote files
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable)
|
||||
--server-side-across-configs Allow server-side operations (e.g. copy) to work across different configs
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--stats duration Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats (0 for no limit) (default 45)
|
||||
@@ -162,7 +160,7 @@ These flags are available for every command.
|
||||
--use-json-log Use json log format
|
||||
--use-mmap Use mmap allocator (see docs)
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.60.1")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.59.0")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
```
|
||||
|
||||
@@ -349,7 +347,6 @@ and may be set in the config file.
|
||||
--ftp-disable-utf8 Disable using UTF-8 even if server advertises support
|
||||
--ftp-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,RightSpace,Dot)
|
||||
--ftp-explicit-tls Use Explicit FTPS (FTP over TLS)
|
||||
--ftp-force-list-hidden Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
|
||||
--ftp-no-check-certificate Do not verify the TLS certificate of the server
|
||||
@@ -360,6 +357,7 @@ and may be set in the config file.
|
||||
--ftp-tls-cache-size int Size of TLS session cache for all control and data connections (default 32)
|
||||
--ftp-user string FTP username (default "$USER")
|
||||
--ftp-writing-mdtm Use MDTM to set modification time (VsFtpd quirk)
|
||||
--ftp-force-list-hidden Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.
|
||||
--gcs-anonymous Access public buckets and objects without credentials
|
||||
--gcs-auth-url string Auth server URL
|
||||
--gcs-bucket-acl string Access Control List for new buckets
|
||||
@@ -368,7 +366,6 @@ and may be set in the config file.
|
||||
--gcs-client-secret string OAuth Client Secret
|
||||
--gcs-decompress If set this will decompress gzip encoded objects
|
||||
--gcs-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
|
||||
--gcs-endpoint string Endpoint for the service
|
||||
--gcs-location string Location for the newly created buckets
|
||||
--gcs-no-check-bucket If set, don't attempt to check the bucket exists or create it
|
||||
--gcs-object-acl string Access Control List for new objects
|
||||
@@ -414,6 +411,14 @@ and may be set in the config file.
|
||||
--http-no-head Don't use HEAD requests
|
||||
--http-no-slash Set this if the site doesn't end directories with /
|
||||
--http-url string URL of HTTP host to connect to
|
||||
--hubic-auth-url string Auth server URL
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container (default 5Gi)
|
||||
--hubic-client-id string OAuth Client Id
|
||||
--hubic-client-secret string OAuth Client Secret
|
||||
--hubic-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8)
|
||||
--hubic-no-chunk Don't chunk files during streaming upload
|
||||
--hubic-token string OAuth Access Token as a JSON blob
|
||||
--hubic-token-url string Token server url
|
||||
--internetarchive-access-key-id string IAS3 Access Key
|
||||
--internetarchive-disable-checksum Don't ask the server to test against MD5 checksum calculated by rclone (default true)
|
||||
--internetarchive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot)
|
||||
@@ -482,22 +487,6 @@ and may be set in the config file.
|
||||
--onedrive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different onedrive configs
|
||||
--onedrive-token string OAuth Access Token as a JSON blob
|
||||
--onedrive-token-url string Token server url
|
||||
--oos-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
|
||||
--oos-compartment string Object storage compartment OCID
|
||||
--oos-config-file string Path to OCI config file (default "~/.oci/config")
|
||||
--oos-config-profile string Profile name inside the oci config file (default "Default")
|
||||
--oos-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
|
||||
--oos-copy-timeout Duration Timeout for copy (default 1m0s)
|
||||
--oos-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--oos-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
|
||||
--oos-endpoint string Endpoint for Object storage API
|
||||
--oos-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery
|
||||
--oos-namespace string Object storage namespace
|
||||
--oos-no-check-bucket If set, don't attempt to check the bucket exists or create it
|
||||
--oos-provider string Choose your Auth Provider (default "env_auth")
|
||||
--oos-region string Object storage Region
|
||||
--oos-upload-concurrency int Concurrency for multipart uploads (default 10)
|
||||
--oos-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
|
||||
--opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size (default 10Mi)
|
||||
--opendrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot)
|
||||
--opendrive-password string Password (obscured)
|
||||
@@ -529,7 +518,6 @@ and may be set in the config file.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
|
||||
--s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
|
||||
--s3-decompress If set this will decompress gzip encoded objects
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-disable-http2 Disable usage of http2 for S3 backends
|
||||
--s3-download-url string Custom endpoint for downloads
|
||||
@@ -545,11 +533,9 @@ and may be set in the config file.
|
||||
--s3-max-upload-parts int Maximum number of parts in a multipart upload (default 10000)
|
||||
--s3-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
|
||||
--s3-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool
|
||||
--s3-might-gzip Tristate Set this if the backend might gzip objects (default unset)
|
||||
--s3-no-check-bucket If set, don't attempt to check the bucket exists or create it
|
||||
--s3-no-head If set, don't HEAD uploaded objects to check integrity
|
||||
--s3-no-head-object If set, do not do HEAD before GET when getting objects
|
||||
--s3-no-system-metadata Suppress setting and reading of system metadata
|
||||
--s3-profile string Profile to use in the shared credentials file
|
||||
--s3-provider string Choose your S3 provider
|
||||
--s3-region string Region to connect to
|
||||
@@ -559,8 +545,7 @@ and may be set in the config file.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-shared-credentials-file string Path to the shared credentials file
|
||||
--s3-sse-customer-algorithm string If using SSE-C, the server-side encryption algorithm used when storing this object in S3
|
||||
--s3-sse-customer-key string To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data
|
||||
--s3-sse-customer-key-base64 string If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data
|
||||
--s3-sse-customer-key string If using SSE-C you must provide the secret encryption key used to encrypt/decrypt your data
|
||||
--s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional)
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3
|
||||
@@ -570,8 +555,6 @@ and may be set in the config file.
|
||||
--s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset)
|
||||
--s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads
|
||||
--s3-v2-auth If true use v2 authentication
|
||||
--s3-version-at Time Show file versions as they were at the specified time (default off)
|
||||
--s3-versions Include old versions in directory listings
|
||||
--seafile-2fa Two-factor authentication ('true' if the account has 2FA enabled)
|
||||
--seafile-create-library Should rclone create a library if it doesn't exist
|
||||
--seafile-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8)
|
||||
@@ -618,15 +601,6 @@ and may be set in the config file.
|
||||
--sia-encoding MultiEncoder The encoding for the backend (default Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot)
|
||||
--sia-user-agent string Siad User Agent (default "Sia-Agent")
|
||||
--skip-links Don't warn about skipped symlinks
|
||||
--smb-case-insensitive Whether the server is configured to be case-insensitive (default true)
|
||||
--smb-domain string Domain name for NTLM authentication (default "WORKGROUP")
|
||||
--smb-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot)
|
||||
--smb-hide-special-share Hide special shares (e.g. print$) which users aren't supposed to access (default true)
|
||||
--smb-host string SMB server hostname to connect to
|
||||
--smb-idle-timeout Duration Max time before closing idle connections (default 1m0s)
|
||||
--smb-pass string SMB password (obscured)
|
||||
--smb-port int SMB port number (default 445)
|
||||
--smb-user string SMB username (default "$USER")
|
||||
--storj-access-grant string Access grant
|
||||
--storj-api-key string API key
|
||||
--storj-passphrase string Encryption passphrase
|
||||
@@ -657,7 +631,6 @@ and may be set in the config file.
|
||||
--swift-key string API key or password (OS_PASSWORD)
|
||||
--swift-leave-parts-on-error If true avoid calling abort upload on a failure
|
||||
--swift-no-chunk Don't chunk files during streaming upload
|
||||
--swift-no-large-objects Disable support for static and dynamic large objects
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
|
||||
@@ -248,20 +248,6 @@ Here are the Advanced options specific to ftp (FTP).
|
||||
|
||||
Maximum number of FTP simultaneous connections, 0 for unlimited.
|
||||
|
||||
Note that setting this is very likely to cause deadlocks so it should
|
||||
be used with care.
|
||||
|
||||
If you are doing a sync or copy then make sure concurrency is one more
|
||||
than the sum of `--transfers` and `--checkers`.
|
||||
|
||||
If you use `--check-first` then it just needs to be one more than the
|
||||
maximum of `--checkers` and `--transfers`.
|
||||
|
||||
So for `concurrency 3` you'd use `--checkers 2 --transfers 2
|
||||
--check-first` or `--checkers 1 --transfers 1`.
|
||||
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: concurrency
|
||||
|
||||
@@ -621,19 +621,6 @@ Properties:
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --gcs-endpoint
|
||||
|
||||
Endpoint for the service.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: endpoint
|
||||
- Env Var: RCLONE_GCS_ENDPOINT
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --gcs-encoding
|
||||
|
||||
The encoding for the backend.
|
||||
|
||||
238
docs/content/hubic.md
Normal file
238
docs/content/hubic.md
Normal file
@@ -0,0 +1,238 @@
|
||||
---
|
||||
title: "Hubic"
|
||||
description: "Rclone docs for Hubic"
|
||||
---
|
||||
|
||||
# {{< icon "fa fa-space-shuttle" >}} Hubic
|
||||
|
||||
Paths are specified as `remote:path`
|
||||
|
||||
Paths are specified as `remote:container` (or `remote:` for the `lsd`
|
||||
command.) You may put subdirectories in too, e.g. `remote:container/path/to/dir`.
|
||||
|
||||
## Configuration
|
||||
|
||||
The initial setup for Hubic involves getting a token from Hubic which
|
||||
you need to do in your browser. `rclone config` walks you through it.
|
||||
|
||||
Here is an example of how to make a remote called `remote`. First run:
|
||||
|
||||
rclone config
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
n/s> n
|
||||
name> remote
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value
|
||||
[snip]
|
||||
XX / Hubic
|
||||
\ "hubic"
|
||||
[snip]
|
||||
Storage> hubic
|
||||
Hubic Client Id - leave blank normally.
|
||||
client_id>
|
||||
Hubic Client Secret - leave blank normally.
|
||||
client_secret>
|
||||
Remote config
|
||||
Use auto config?
|
||||
* Say Y if not sure
|
||||
* Say N if you are working on a remote or headless machine
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> y
|
||||
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
|
||||
Log in and authorize rclone for access
|
||||
Waiting for code...
|
||||
Got code
|
||||
--------------------
|
||||
[remote]
|
||||
client_id =
|
||||
client_secret =
|
||||
token = {"access_token":"XXXXXX"}
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
See the [remote setup docs](/remote_setup/) for how to set it up on a
|
||||
machine with no Internet browser available.
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Hubic. This only runs from the moment it opens
|
||||
your browser to the moment you get back the verification code. This
|
||||
is on `http://127.0.0.1:53682/` and this it may require you to unblock
|
||||
it temporarily if you are running a host firewall.
|
||||
|
||||
Once configured you can then use `rclone` like this,
|
||||
|
||||
List containers in the top level of your Hubic
|
||||
|
||||
rclone lsd remote:
|
||||
|
||||
List all the files in your Hubic
|
||||
|
||||
rclone ls remote:
|
||||
|
||||
To copy a local directory to an Hubic directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
If you want the directory to be visible in the official *Hubic
|
||||
browser*, you need to copy your files to the `default` directory
|
||||
|
||||
rclone copy /home/source remote:default/backup
|
||||
|
||||
### --fast-list ###
|
||||
|
||||
This remote supports `--fast-list` which allows you to use fewer
|
||||
transactions in exchange for more memory. See the [rclone
|
||||
docs](/docs/#fast-list) for more details.
|
||||
|
||||
### Modified time ###
|
||||
|
||||
The modified time is stored as metadata on the object as
|
||||
`X-Object-Meta-Mtime` as floating point since the epoch accurate to 1
|
||||
ns.
|
||||
|
||||
This is a de facto standard (used in the official python-swiftclient
|
||||
amongst others) for storing the modification time for an object.
|
||||
|
||||
Note that Hubic wraps the Swift backend, so most of the properties of
|
||||
are the same.
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/hubic/hubic.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
|
||||
Here are the Standard options specific to hubic (Hubic).
|
||||
|
||||
#### --hubic-client-id
|
||||
|
||||
OAuth Client Id.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_HUBIC_CLIENT_ID
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --hubic-client-secret
|
||||
|
||||
OAuth Client Secret.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_secret
|
||||
- Env Var: RCLONE_HUBIC_CLIENT_SECRET
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
### Advanced options
|
||||
|
||||
Here are the Advanced options specific to hubic (Hubic).
|
||||
|
||||
#### --hubic-token
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_HUBIC_TOKEN
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --hubic-auth-url
|
||||
|
||||
Auth server URL.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_HUBIC_AUTH_URL
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --hubic-token-url
|
||||
|
||||
Token server url.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_HUBIC_TOKEN_URL
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --hubic-chunk-size
|
||||
|
||||
Above this size files will be chunked into a _segments container.
|
||||
|
||||
Above this size files will be chunked into a _segments container. The
|
||||
default for this is 5 GiB which is its maximum value.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: chunk_size
|
||||
- Env Var: RCLONE_HUBIC_CHUNK_SIZE
|
||||
- Type: SizeSuffix
|
||||
- Default: 5Gi
|
||||
|
||||
#### --hubic-no-chunk
|
||||
|
||||
Don't chunk files during streaming upload.
|
||||
|
||||
When doing streaming uploads (e.g. using rcat or mount) setting this
|
||||
flag will cause the swift backend to not upload chunked files.
|
||||
|
||||
This will limit the maximum upload size to 5 GiB. However non chunked
|
||||
files are easier to deal with and have an MD5SUM.
|
||||
|
||||
Rclone will still chunk files bigger than chunk_size when doing normal
|
||||
copy operations.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: no_chunk
|
||||
- Env Var: RCLONE_HUBIC_NO_CHUNK
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --hubic-encoding
|
||||
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_HUBIC_ENCODING
|
||||
- Type: MultiEncoder
|
||||
- Default: Slash,InvalidUtf8
|
||||
|
||||
{{< rem autogenerated options stop >}}
|
||||
|
||||
## Limitations
|
||||
|
||||
This uses the normal OpenStack Swift mechanism to refresh the Swift
|
||||
API credentials and ignores the expires field returned by the Hubic
|
||||
API.
|
||||
|
||||
The Swift API doesn't return a correct MD5SUM for segmented files
|
||||
(Dynamic or Static Large Objects) so rclone won't check or use the
|
||||
MD5SUM for these.
|
||||
@@ -14,7 +14,7 @@ Rclone is a Go program and comes as a single binary file.
|
||||
* Run `rclone config` to setup. See [rclone config docs](/docs/) for more details.
|
||||
* Optionally configure [automatic execution](#autostart).
|
||||
|
||||
See below for some expanded Linux / macOS / Windows instructions.
|
||||
See below for some expanded Linux / macOS instructions.
|
||||
|
||||
See the [usage](/docs/) docs for how to use rclone, or
|
||||
run `rclone -h`.
|
||||
@@ -35,9 +35,7 @@ For beta installation, run:
|
||||
Note that this script checks the version of rclone installed first and
|
||||
won't re-download if not needed.
|
||||
|
||||
## Linux installation {#linux}
|
||||
|
||||
### Precompiled binary {#linux-precompiled}
|
||||
## Linux installation from precompiled binary
|
||||
|
||||
Fetch and unpack
|
||||
|
||||
@@ -61,9 +59,7 @@ Run `rclone config` to setup. See [rclone config docs](/docs/) for more details.
|
||||
|
||||
rclone config
|
||||
|
||||
## macOS installation {#macos}
|
||||
|
||||
### Installation with brew {#macos-brew}
|
||||
## macOS installation with brew
|
||||
|
||||
brew install rclone
|
||||
|
||||
@@ -72,12 +68,7 @@ NOTE: This version of rclone will not support `mount` any more (see
|
||||
on macOS, either install a precompiled binary or enable the relevant option
|
||||
when [installing from source](#install-from-source).
|
||||
|
||||
Note that this is a third party installer not controlled by the rclone
|
||||
developers so it may be out of date. Its current version is as below.
|
||||
|
||||
[](https://repology.org/project/rclone/versions)
|
||||
|
||||
### Precompiled binary, using curl {#macos-precompiled}
|
||||
## macOS installation from precompiled binary, using curl
|
||||
|
||||
To avoid problems with macOS gatekeeper enforcing the binary to be signed and
|
||||
notarized it is enough to download with `curl`.
|
||||
@@ -105,7 +96,7 @@ Run `rclone config` to setup. See [rclone config docs](/docs/) for more details.
|
||||
|
||||
rclone config
|
||||
|
||||
### Precompiled binary, using a web browser {#macos-precompiled-web}
|
||||
## macOS installation from precompiled binary, using a web browser
|
||||
|
||||
When downloading a binary with a web browser, the browser will set the macOS
|
||||
gatekeeper quarantine attribute. Starting from Catalina, when attempting to run
|
||||
@@ -118,73 +109,11 @@ The simplest fix is to run
|
||||
|
||||
xattr -d com.apple.quarantine rclone
|
||||
|
||||
## Windows installation {#windows}
|
||||
## Install with docker
|
||||
|
||||
### Precompiled binary {#windows-precompiled}
|
||||
|
||||
Fetch the correct binary for your processor type by clicking on these
|
||||
links. If not sure, use the first link.
|
||||
|
||||
- [Intel/AMD - 64 Bit](https://downloads.rclone.org/rclone-current-windows-amd64.zip)
|
||||
- [Intel/AMD - 32 Bit](https://downloads.rclone.org/rclone-current-windows-386.zip)
|
||||
- [ARM - 64 Bit](https://downloads.rclone.org/rclone-current-windows-arm64.zip)
|
||||
|
||||
Open this file in the Explorer and extract `rclone.exe`. Rclone is a
|
||||
portable executable so you can place it wherever is convenient.
|
||||
|
||||
Open a CMD window (or powershell) and run the binary. Note that rclone
|
||||
does not launch a GUI by default, it runs in the CMD Window.
|
||||
|
||||
- Run `rclone.exe config` to setup. See [rclone config docs](/docs/) for more details.
|
||||
- Optionally configure [automatic execution](#autostart).
|
||||
|
||||
If you are planning to use the [rclone mount](/commands/rclone_mount/)
|
||||
feature then you will need to install the third party utility
|
||||
[WinFsp](https://winfsp.dev/) also.
|
||||
|
||||
### Chocolatey package manager {#windows-chocolatey}
|
||||
|
||||
Make sure you have [Choco](https://chocolatey.org/) installed
|
||||
|
||||
```
|
||||
choco search rclone
|
||||
choco install rclone
|
||||
```
|
||||
|
||||
This will install rclone on your Windows machine. If you are planning
|
||||
to use [rclone mount](/commands/rclone_mount/) then
|
||||
|
||||
```
|
||||
choco install winfsp
|
||||
```
|
||||
|
||||
will install that too.
|
||||
|
||||
Note that this is a third party installer not controlled by the rclone
|
||||
developers so it may be out of date. Its current version is as below.
|
||||
|
||||
[](https://repology.org/project/rclone/versions)
|
||||
|
||||
## Package manager installation {#package-manager}
|
||||
|
||||
Many Linux, Windows, macOS and other OS distributions package and
|
||||
distribute rclone.
|
||||
|
||||
The distributed versions of rclone are often quite out of date and for
|
||||
this reason we recommend one of the other installation methods if
|
||||
possible.
|
||||
|
||||
You can get an idea of how up to date or not your OS distribution's
|
||||
package is here.
|
||||
|
||||
[](https://repology.org/project/rclone/versions)
|
||||
|
||||
## Docker installation {#docker}
|
||||
|
||||
The rclone developers maintain a [docker image for rclone](https://hub.docker.com/r/rclone/rclone).
|
||||
|
||||
These images are built as part of the release process based on a
|
||||
minimal Alpine Linux.
|
||||
The rclone maintains a [docker image for rclone](https://hub.docker.com/r/rclone/rclone).
|
||||
These images are autobuilt by docker hub from the rclone source based
|
||||
on a minimal Alpine linux image.
|
||||
|
||||
The `:latest` tag will always point to the latest stable release. You
|
||||
can use the `:beta` tag to get the latest build from master. You can
|
||||
@@ -259,7 +188,16 @@ ls ~/data/mount
|
||||
kill %1
|
||||
```
|
||||
|
||||
## Source installation {#source}
|
||||
## Install on Windows via Chocolateley Packet Manager
|
||||
|
||||
Make sure you have [Choco](https://chocolatey.org/) installed
|
||||
```
|
||||
choco search rclone
|
||||
choco install rclone
|
||||
```
|
||||
This will install rclone on your windows machine
|
||||
|
||||
## Install from source
|
||||
|
||||
Make sure you have git and [Go](https://golang.org/) installed.
|
||||
Go version 1.17 or newer is required, latest release is recommended.
|
||||
@@ -278,7 +216,7 @@ in the same folder. As an initial check you can now run `./rclone version`
|
||||
(`.\rclone version` on Windows).
|
||||
|
||||
Note that on macOS and Windows the [mount](https://rclone.org/commands/rclone_mount/)
|
||||
command will not be available unless you specify an additional build tag `cmount`.
|
||||
command will not be available unless you specify additional build tag `cmount`.
|
||||
|
||||
```
|
||||
go build -tags cmount
|
||||
@@ -297,7 +235,7 @@ distribution (make sure you install it in the classic mingw64 subsystem, the
|
||||
ucrt64 version is not compatible).
|
||||
|
||||
Additionally, on Windows, you must install the third party utility
|
||||
[WinFsp](https://winfsp.dev/), with the "Developer" feature selected.
|
||||
[WinFsp](http://www.secfs.net/winfsp/), with the "Developer" feature selected.
|
||||
If building with cgo, you must also set environment variable CPATH pointing to
|
||||
the fuse include directory within the WinFsp installation
|
||||
(normally `C:\Program Files (x86)\WinFsp\inc\fuse`).
|
||||
@@ -312,10 +250,9 @@ go build -trimpath -ldflags -s -tags cmount
|
||||
```
|
||||
|
||||
Instead of executing the `go build` command directly, you can run it via the
|
||||
Makefile. It changes the version number suffix from "-DEV" to "-beta" and
|
||||
appends commit details. It also copies the resulting rclone executable into
|
||||
your GOPATH bin folder (`$(go env GOPATH)/bin`, which corresponds to
|
||||
`~/go/bin/rclone` by default).
|
||||
Makefile, which also sets version information and copies the resulting rclone
|
||||
executable into your GOPATH bin folder (`$(go env GOPATH)/bin`, which
|
||||
corresponds to `~/go/bin/rclone` by default).
|
||||
|
||||
```
|
||||
make
|
||||
@@ -327,15 +264,7 @@ To include mount command on macOS and Windows with Makefile build:
|
||||
make GOTAGS=cmount
|
||||
```
|
||||
|
||||
There are other make targets that can be used for more advanced builds,
|
||||
such as cross-compiling for all supported os/architectures, embedding
|
||||
icon and version info resources into windows executable, and packaging
|
||||
results into release artifacts.
|
||||
See [Makefile](https://github.com/rclone/rclone/blob/master/Makefile)
|
||||
and [cross-compile.go](https://github.com/rclone/rclone/blob/master/bin/cross-compile.go)
|
||||
for details.
|
||||
|
||||
Another alternative is to download the source, build and install rclone in one
|
||||
As an alternative you can download the source, build and install rclone in one
|
||||
operation, as a regular Go package. The source will be stored it in the Go
|
||||
module cache, and the resulting executable will be in your GOPATH bin folder
|
||||
(`$(go env GOPATH)/bin`, which corresponds to `~/go/bin/rclone` by default).
|
||||
@@ -354,7 +283,7 @@ with the current version):
|
||||
go get github.com/rclone/rclone
|
||||
```
|
||||
|
||||
## Ansible installation {#ansible}
|
||||
## Installation with Ansible
|
||||
|
||||
This can be done with [Stefan Weichinger's ansible
|
||||
role](https://github.com/stefangweichinger/ansible-rclone).
|
||||
@@ -370,7 +299,7 @@ Instructions
|
||||
- rclone
|
||||
```
|
||||
|
||||
## Portable installation {#portable}
|
||||
## Portable installation
|
||||
|
||||
As mentioned [above](https://rclone.org/install/#quickstart), rclone is single
|
||||
executable (`rclone`, or `rclone.exe` on Windows) that you can download as a
|
||||
|
||||
@@ -106,9 +106,6 @@ case "$OS_type" in
|
||||
aarch64|arm64)
|
||||
OS_type='arm64'
|
||||
;;
|
||||
armv7*)
|
||||
OS_type='arm-v7'
|
||||
;;
|
||||
arm*)
|
||||
OS_type='arm'
|
||||
;;
|
||||
|
||||
@@ -24,21 +24,8 @@ Currently it is recommended to disable 2FA on Mail.ru accounts intended for rclo
|
||||
|
||||
## Configuration
|
||||
|
||||
Here is an example of making a mailru configuration.
|
||||
|
||||
First create a Mail.ru Cloud account and choose a tariff.
|
||||
|
||||
You will need to log in and create an app password for rclone. Rclone
|
||||
**will not work** with your normal username and password - it will
|
||||
give an error like `oauth2: server response missing access_token`.
|
||||
|
||||
- Click on your user icon in the top right
|
||||
- Go to Security / "Пароль и безопасность"
|
||||
- Click password for apps / "Пароли для внешних приложений"
|
||||
- Add the password - give it a name - eg "rclone"
|
||||
- Copy the password and use this password below - your normal login password won't work.
|
||||
|
||||
Now run
|
||||
Here is an example of making a mailru configuration. First create a Mail.ru Cloud
|
||||
account and choose a tariff, then run
|
||||
|
||||
rclone config
|
||||
|
||||
@@ -64,10 +51,6 @@ User name (usually email)
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
user> username@mail.ru
|
||||
Password
|
||||
|
||||
This must be an app password - rclone will not work with your normal
|
||||
password. See the Configuration section in the docs for how to make an
|
||||
app password.
|
||||
y) Yes type in my own password
|
||||
g) Generate random password
|
||||
y/g> y
|
||||
@@ -190,11 +173,6 @@ Properties:
|
||||
|
||||
Password.
|
||||
|
||||
This must be an app password - rclone will not work with your normal
|
||||
password. See the Configuration section in the docs for how to make an
|
||||
app password.
|
||||
|
||||
|
||||
**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).
|
||||
|
||||
Properties:
|
||||
|
||||
@@ -1,533 +0,0 @@
|
||||
---
|
||||
title: "Oracle Object Storage"
|
||||
description: "Rclone docs for Oracle Object Storage"
|
||||
---
|
||||
|
||||
# {{< icon "fa fa-cloud" >}} Oracle Object Storage
|
||||
|
||||
[Oracle Object Storage Overview](https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/objectstorageoverview.htm)
|
||||
|
||||
[Oracle Object Storage FAQ](https://www.oracle.com/cloud/storage/object-storage/faq/)
|
||||
|
||||
Paths are specified as `remote:bucket` (or `remote:` for the `lsd`
|
||||
command.) You may put subdirectories in too, e.g. `remote:bucket/path/to/dir`.
|
||||
|
||||
## Configuration
|
||||
|
||||
Here is an example of making an oracle object storage configuration. `rclone config` walks you
|
||||
through it.
|
||||
|
||||
Here is an example of how to make a remote called `remote`. First run:
|
||||
|
||||
rclone config
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
|
||||
```
|
||||
n) New remote
|
||||
d) Delete remote
|
||||
r) Rename remote
|
||||
c) Copy remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
e/n/d/r/c/s/q> n
|
||||
|
||||
Enter name for new remote.
|
||||
name> remote
|
||||
|
||||
Option Storage.
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
[snip]
|
||||
XX / Oracle Cloud Infrastructure Object Storage
|
||||
\ (oracleobjectstorage)
|
||||
Storage> oracleobjectstorage
|
||||
|
||||
Option provider.
|
||||
Choose your Auth Provider
|
||||
Choose a number from below, or type in your own string value.
|
||||
Press Enter for the default (env_auth).
|
||||
1 / automatically pickup the credentials from runtime(env), first one to provide auth wins
|
||||
\ (env_auth)
|
||||
/ use an OCI user and an API key for authentication.
|
||||
2 | you’ll need to put in a config file your tenancy OCID, user OCID, region, the path, fingerprint to an API key.
|
||||
| https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm
|
||||
\ (user_principal_auth)
|
||||
/ use instance principals to authorize an instance to make API calls.
|
||||
3 | each instance has its own identity, and authenticates using the certificates that are read from instance metadata.
|
||||
| https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfrominstances.htm
|
||||
\ (instance_principal_auth)
|
||||
4 / use resource principals to make API calls
|
||||
\ (resource_principal_auth)
|
||||
5 / no credentials needed, this is typically for reading public buckets
|
||||
\ (no_auth)
|
||||
provider> 2
|
||||
|
||||
Option namespace.
|
||||
Object storage namespace
|
||||
Enter a value.
|
||||
namespace> idbamagbg734
|
||||
|
||||
Option compartment.
|
||||
Object storage compartment OCID
|
||||
Enter a value.
|
||||
compartment> ocid1.compartment.oc1..aaaaaaaapufkxc7ame3sthry5i7ujrwfc7ejnthhu6bhanm5oqfjpyasjkba
|
||||
|
||||
Option region.
|
||||
Object storage Region
|
||||
Enter a value.
|
||||
region> us-ashburn-1
|
||||
|
||||
Option endpoint.
|
||||
Endpoint for Object storage API.
|
||||
Leave blank to use the default endpoint for the region.
|
||||
Enter a value. Press Enter to leave empty.
|
||||
endpoint>
|
||||
|
||||
Option config_file.
|
||||
Path to OCI config file
|
||||
Choose a number from below, or type in your own string value.
|
||||
Press Enter for the default (~/.oci/config).
|
||||
1 / oci configuration file location
|
||||
\ (~/.oci/config)
|
||||
config_file> /etc/oci/dev.conf
|
||||
|
||||
Option config_profile.
|
||||
Profile name inside OCI config file
|
||||
Choose a number from below, or type in your own string value.
|
||||
Press Enter for the default (Default).
|
||||
1 / Use the default profile
|
||||
\ (Default)
|
||||
config_profile> Test
|
||||
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
|
||||
Configuration complete.
|
||||
Options:
|
||||
- type: oracleobjectstorage
|
||||
- namespace: idbamagbg734
|
||||
- compartment: ocid1.compartment.oc1..aaaaaaaapufkxc7ame3sthry5i7ujrwfc7ejnthhu6bhanm5oqfjpyasjkba
|
||||
- region: us-ashburn-1
|
||||
- provider: user_principal_auth
|
||||
- config_file: /etc/oci/dev.conf
|
||||
- config_profile: Test
|
||||
Keep this "remote" remote?
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
See all buckets
|
||||
|
||||
rclone lsd remote:
|
||||
|
||||
Create a new bucket
|
||||
|
||||
rclone mkdir remote:bucket
|
||||
|
||||
List the contents of a bucket
|
||||
|
||||
rclone ls remote:bucket
|
||||
rclone ls remote:bucket --max-depth 1
|
||||
|
||||
### Modified time
|
||||
|
||||
The modified time is stored as metadata on the object as
|
||||
`opc-meta-mtime` as floating point since the epoch, accurate to 1 ns.
|
||||
|
||||
If the modification time needs to be updated rclone will attempt to perform a server
|
||||
side copy to update the modification if the object can be copied in a single part.
|
||||
In the case the object is larger than 5Gb, the object will be uploaded rather than copied.
|
||||
|
||||
Note that reading this from the object takes an additional `HEAD` request as the metadata
|
||||
isn't returned in object listings.
|
||||
|
||||
### Multipart uploads
|
||||
|
||||
rclone supports multipart uploads with OOS which means that it can
|
||||
upload files bigger than 5 GiB.
|
||||
|
||||
Note that files uploaded *both* with multipart upload *and* through
|
||||
crypt remotes do not have MD5 sums.
|
||||
|
||||
rclone switches from single part uploads to multipart uploads at the
|
||||
point specified by `--oos-upload-cutoff`. This can be a maximum of 5 GiB
|
||||
and a minimum of 0 (ie always upload multipart files).
|
||||
|
||||
The chunk sizes used in the multipart upload are specified by
|
||||
`--oos-chunk-size` and the number of chunks uploaded concurrently is
|
||||
specified by `--oos-upload-concurrency`.
|
||||
|
||||
Multipart uploads will use `--transfers` * `--oos-upload-concurrency` *
|
||||
`--oos-chunk-size` extra memory. Single part uploads to not use extra
|
||||
memory.
|
||||
|
||||
Single part transfers can be faster than multipart transfers or slower
|
||||
depending on your latency from oos - the more latency, the more likely
|
||||
single part transfers will be faster.
|
||||
|
||||
Increasing `--oos-upload-concurrency` will increase throughput (8 would
|
||||
be a sensible value) and increasing `--oos-chunk-size` also increases
|
||||
throughput (16M would be sensible). Increasing either of these will
|
||||
use more memory. The default values are high enough to gain most of
|
||||
the possible performance without using too much memory.
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/oracleobjectstorage/oracleobjectstorage.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
|
||||
Here are the Standard options specific to oracleobjectstorage (Oracle Cloud Infrastructure Object Storage).
|
||||
|
||||
#### --oos-provider
|
||||
|
||||
Choose your Auth Provider
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: provider
|
||||
- Env Var: RCLONE_OOS_PROVIDER
|
||||
- Type: string
|
||||
- Default: "env_auth"
|
||||
- Examples:
|
||||
- "env_auth"
|
||||
- automatically pickup the credentials from runtime(env), first one to provide auth wins
|
||||
- "user_principal_auth"
|
||||
- use an OCI user and an API key for authentication.
|
||||
- you’ll need to put in a config file your tenancy OCID, user OCID, region, the path, fingerprint to an API key.
|
||||
- https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm
|
||||
- "instance_principal_auth"
|
||||
- use instance principals to authorize an instance to make API calls.
|
||||
- each instance has its own identity, and authenticates using the certificates that are read from instance metadata.
|
||||
- https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfrominstances.htm
|
||||
- "resource_principal_auth"
|
||||
- use resource principals to make API calls
|
||||
- "no_auth"
|
||||
- no credentials needed, this is typically for reading public buckets
|
||||
|
||||
#### --oos-namespace
|
||||
|
||||
Object storage namespace
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: namespace
|
||||
- Env Var: RCLONE_OOS_NAMESPACE
|
||||
- Type: string
|
||||
- Required: true
|
||||
|
||||
#### --oos-compartment
|
||||
|
||||
Object storage compartment OCID
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: compartment
|
||||
- Env Var: RCLONE_OOS_COMPARTMENT
|
||||
- Provider: !no_auth
|
||||
- Type: string
|
||||
- Required: true
|
||||
|
||||
#### --oos-region
|
||||
|
||||
Object storage Region
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: region
|
||||
- Env Var: RCLONE_OOS_REGION
|
||||
- Type: string
|
||||
- Required: true
|
||||
|
||||
#### --oos-endpoint
|
||||
|
||||
Endpoint for Object storage API.
|
||||
|
||||
Leave blank to use the default endpoint for the region.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: endpoint
|
||||
- Env Var: RCLONE_OOS_ENDPOINT
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --oos-config-file
|
||||
|
||||
Path to OCI config file
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: config_file
|
||||
- Env Var: RCLONE_OOS_CONFIG_FILE
|
||||
- Provider: user_principal_auth
|
||||
- Type: string
|
||||
- Default: "~/.oci/config"
|
||||
- Examples:
|
||||
- "~/.oci/config"
|
||||
- oci configuration file location
|
||||
|
||||
#### --oos-config-profile
|
||||
|
||||
Profile name inside the oci config file
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: config_profile
|
||||
- Env Var: RCLONE_OOS_CONFIG_PROFILE
|
||||
- Provider: user_principal_auth
|
||||
- Type: string
|
||||
- Default: "Default"
|
||||
- Examples:
|
||||
- "Default"
|
||||
- Use the default profile
|
||||
|
||||
### Advanced options
|
||||
|
||||
Here are the Advanced options specific to oracleobjectstorage (Oracle Cloud Infrastructure Object Storage).
|
||||
|
||||
#### --oos-upload-cutoff
|
||||
|
||||
Cutoff for switching to chunked upload.
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5 GiB.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: upload_cutoff
|
||||
- Env Var: RCLONE_OOS_UPLOAD_CUTOFF
|
||||
- Type: SizeSuffix
|
||||
- Default: 200Mi
|
||||
|
||||
#### --oos-chunk-size
|
||||
|
||||
Chunk size to use for uploading.
|
||||
|
||||
When uploading files larger than upload_cutoff or files with unknown
|
||||
size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google
|
||||
photos or google docs) they will be uploaded as multipart uploads
|
||||
using this chunk size.
|
||||
|
||||
Note that "upload_concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
|
||||
If you are transferring large files over high-speed links and you have
|
||||
enough memory, then increasing this will speed up the transfers.
|
||||
|
||||
Rclone will automatically increase the chunk size when uploading a
|
||||
large file of known size to stay below the 10,000 chunks limit.
|
||||
|
||||
Files of unknown size are uploaded with the configured
|
||||
chunk_size. Since the default chunk size is 5 MiB and there can be at
|
||||
most 10,000 chunks, this means that by default the maximum size of
|
||||
a file you can stream upload is 48 GiB. If you wish to stream upload
|
||||
larger files then you will need to increase chunk_size.
|
||||
|
||||
Increasing the chunk size decreases the accuracy of the progress
|
||||
statistics displayed with "-P" flag.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: chunk_size
|
||||
- Env Var: RCLONE_OOS_CHUNK_SIZE
|
||||
- Type: SizeSuffix
|
||||
- Default: 5Mi
|
||||
|
||||
#### --oos-upload-concurrency
|
||||
|
||||
Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
If you are uploading small numbers of large files over high-speed links
|
||||
and these uploads do not fully utilize your bandwidth, then increasing
|
||||
this may help to speed up the transfers.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: upload_concurrency
|
||||
- Env Var: RCLONE_OOS_UPLOAD_CONCURRENCY
|
||||
- Type: int
|
||||
- Default: 10
|
||||
|
||||
#### --oos-copy-cutoff
|
||||
|
||||
Cutoff for switching to multipart copy.
|
||||
|
||||
Any files larger than this that need to be server-side copied will be
|
||||
copied in chunks of this size.
|
||||
|
||||
The minimum is 0 and the maximum is 5 GiB.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: copy_cutoff
|
||||
- Env Var: RCLONE_OOS_COPY_CUTOFF
|
||||
- Type: SizeSuffix
|
||||
- Default: 4.656Gi
|
||||
|
||||
#### --oos-copy-timeout
|
||||
|
||||
Timeout for copy.
|
||||
|
||||
Copy is an asynchronous operation, specify timeout to wait for copy to succeed
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: copy_timeout
|
||||
- Env Var: RCLONE_OOS_COPY_TIMEOUT
|
||||
- Type: Duration
|
||||
- Default: 1m0s
|
||||
|
||||
#### --oos-disable-checksum
|
||||
|
||||
Don't store MD5 checksum with object metadata.
|
||||
|
||||
Normally rclone will calculate the MD5 checksum of the input before
|
||||
uploading it so it can add it to metadata on the object. This is great
|
||||
for data integrity checking but can cause long delays for large files
|
||||
to start uploading.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: disable_checksum
|
||||
- Env Var: RCLONE_OOS_DISABLE_CHECKSUM
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --oos-encoding
|
||||
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_OOS_ENCODING
|
||||
- Type: MultiEncoder
|
||||
- Default: Slash,InvalidUtf8,Dot
|
||||
|
||||
#### --oos-leave-parts-on-error
|
||||
|
||||
If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
|
||||
|
||||
It should be set to true for resuming uploads across different sessions.
|
||||
|
||||
WARNING: Storing parts of an incomplete multipart upload counts towards space usage on object storage and will add
|
||||
additional costs if not cleaned up.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: leave_parts_on_error
|
||||
- Env Var: RCLONE_OOS_LEAVE_PARTS_ON_ERROR
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --oos-no-check-bucket
|
||||
|
||||
If set, don't attempt to check the bucket exists or create it.
|
||||
|
||||
This can be useful when trying to minimise the number of transactions
|
||||
rclone does if you know the bucket exists already.
|
||||
|
||||
It can also be needed if the user you are using does not have bucket
|
||||
creation permissions.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: no_check_bucket
|
||||
- Env Var: RCLONE_OOS_NO_CHECK_BUCKET
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
## Backend commands
|
||||
|
||||
Here are the commands specific to the oracleobjectstorage backend.
|
||||
|
||||
Run them with
|
||||
|
||||
rclone backend COMMAND remote:
|
||||
|
||||
The help below will explain what arguments each command takes.
|
||||
|
||||
See the [backend](/commands/rclone_backend/) command for more
|
||||
info on how to pass options and arguments.
|
||||
|
||||
These can be run on a running backend using the rc command
|
||||
[backend/command](/rc/#backend-command).
|
||||
|
||||
### rename
|
||||
|
||||
change the name of an object
|
||||
|
||||
rclone backend rename remote: [options] [<arguments>+]
|
||||
|
||||
This command can be used to rename a object.
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name
|
||||
|
||||
|
||||
### list-multipart-uploads
|
||||
|
||||
List the unfinished multipart uploads
|
||||
|
||||
rclone backend list-multipart-uploads remote: [options] [<arguments>+]
|
||||
|
||||
This command lists the unfinished multipart uploads in JSON format.
|
||||
|
||||
rclone backend list-multipart-uploads oos:bucket/path/to/object
|
||||
|
||||
It returns a dictionary of buckets with values as lists of unfinished
|
||||
multipart uploads.
|
||||
|
||||
You can call it with no bucket in which case it lists all bucket, with
|
||||
a bucket or with a bucket and path.
|
||||
|
||||
{
|
||||
"test-bucket": [
|
||||
{
|
||||
"namespace": "test-namespace",
|
||||
"bucket": "test-bucket",
|
||||
"object": "600m.bin",
|
||||
"uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
|
||||
"timeCreated": "2022-07-29T06:21:16.595Z",
|
||||
"storageTier": "Standard"
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
### cleanup
|
||||
|
||||
Remove unfinished multipart uploads.
|
||||
|
||||
rclone backend cleanup remote: [options] [<arguments>+]
|
||||
|
||||
This command removes unfinished multipart uploads of age greater than
|
||||
max-age which defaults to 24 hours.
|
||||
|
||||
Note that you can use -i/--dry-run with this command to see what it
|
||||
would do.
|
||||
|
||||
rclone backend cleanup oos:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
|
||||
|
||||
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||
|
||||
|
||||
Options:
|
||||
|
||||
- "max-age": Max age of upload to delete
|
||||
|
||||
{{< rem autogenerated options stop >}}
|
||||
@@ -32,6 +32,7 @@ Here is an overview of the major features of each cloud storage system.
|
||||
| HDFS | - | R/W | No | No | - | - |
|
||||
| HiDrive | HiDrive ¹² | R/W | No | No | - | - |
|
||||
| HTTP | - | R | No | No | R | - |
|
||||
| Hubic | MD5 | R/W | No | No | R/W | - |
|
||||
| Internet Archive | MD5, SHA1, CRC32 | R/W ¹¹ | No | No | - | RWU |
|
||||
| Jottacloud | MD5 | R/W | Yes | No | R | - |
|
||||
| Koofr | MD5 | - | Yes | No | - | - |
|
||||
@@ -42,7 +43,6 @@ Here is an overview of the major features of each cloud storage system.
|
||||
| Microsoft OneDrive | SHA1 ⁵ | R/W | Yes | No | R | - |
|
||||
| OpenDrive | MD5 | R/W | Yes | Partial ⁸ | - | - |
|
||||
| OpenStack Swift | MD5 | R/W | No | No | R/W | - |
|
||||
| Oracle Object Storage | MD5 | R/W | No | No | R/W | - |
|
||||
| pCloud | MD5, SHA1 ⁷ | R | No | No | W | - |
|
||||
| premiumize.me | - | - | Yes | No | R | - |
|
||||
| put.io | CRC-32 | R/W | No | Yes | R | - |
|
||||
@@ -50,7 +50,6 @@ Here is an overview of the major features of each cloud storage system.
|
||||
| Seafile | - | - | No | No | - | - |
|
||||
| SFTP | MD5, SHA1 ² | R/W | Depends | No | - | - |
|
||||
| Sia | - | - | No | No | - | - |
|
||||
| SMB | - | - | Yes | No | - | - |
|
||||
| SugarSync | - | - | No | No | - | - |
|
||||
| Storj | - | R | No | No | - | - |
|
||||
| Uptobox | - | - | No | Yes | - | - |
|
||||
@@ -484,6 +483,7 @@ upon backend-specific capabilities.
|
||||
| HDFS | Yes | No | Yes | Yes | No | No | Yes | No | Yes | Yes |
|
||||
| HiDrive | Yes | Yes | Yes | Yes | No | No | Yes | No | No | Yes |
|
||||
| HTTP | No | No | No | No | No | No | No | No | No | Yes |
|
||||
| Hubic | Yes † | Yes | No | No | No | Yes | Yes | No | Yes | No |
|
||||
| Internet Archive | No | Yes | No | No | Yes | Yes | No | Yes | Yes | No |
|
||||
| Jottacloud | Yes | Yes | Yes | Yes | Yes | Yes | No | Yes | Yes | Yes |
|
||||
| Koofr | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes | Yes |
|
||||
@@ -494,7 +494,6 @@ upon backend-specific capabilities.
|
||||
| Microsoft OneDrive | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes |
|
||||
| OpenDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes |
|
||||
| OpenStack Swift | Yes † | Yes | No | No | No | Yes | Yes | No | Yes | No |
|
||||
| Oracle Object Storage | No | Yes | No | No | Yes | Yes | Yes | No | No | No |
|
||||
| pCloud | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes |
|
||||
| premiumize.me | Yes | No | Yes | Yes | No | No | No | Yes | Yes | Yes |
|
||||
| put.io | Yes | No | Yes | Yes | Yes | No | Yes | No | Yes | Yes |
|
||||
@@ -502,7 +501,6 @@ upon backend-specific capabilities.
|
||||
| Seafile | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes |
|
||||
| SFTP | No | No | Yes | Yes | No | No | Yes | No | Yes | Yes |
|
||||
| Sia | No | No | No | No | No | No | Yes | No | No | Yes |
|
||||
| SMB | No | No | Yes | Yes | No | No | Yes | No | No | Yes |
|
||||
| SugarSync | Yes | Yes | Yes | Yes | No | No | Yes | Yes | No | Yes |
|
||||
| Storj | Yes † | No | Yes | No | No | Yes | Yes | No | No | No |
|
||||
| Uptobox | No | Yes | Yes | Yes | No | No | No | No | No | No |
|
||||
@@ -516,7 +514,7 @@ upon backend-specific capabilities.
|
||||
This deletes a directory quicker than just deleting all the files in
|
||||
the directory.
|
||||
|
||||
† Note Swift and Storj implement this in order to delete
|
||||
† Note Swift, Hubic, and Storj implement this in order to delete
|
||||
directory markers but they don't actually have a quicker way of deleting
|
||||
files other than deleting them individually.
|
||||
|
||||
|
||||
@@ -41,11 +41,6 @@ SSL PEM Private key
|
||||
|
||||
Maximum size of request header (default 4096)
|
||||
|
||||
### --rc-min-tls-version=VALUE
|
||||
|
||||
The minimum TLS version that is acceptable. Valid values are "tls1.0",
|
||||
"tls1.1", "tls1.2" and "tls1.3" (default "tls1.0").
|
||||
|
||||
### --rc-user=VALUE
|
||||
|
||||
User name for authentication.
|
||||
@@ -982,12 +977,6 @@ Parameters:
|
||||
|
||||
- jobid - id of the job (integer).
|
||||
|
||||
### job/stopgroup: Stop all running jobs in a group {#job-stopgroup}
|
||||
|
||||
Parameters:
|
||||
|
||||
- group - name of the group (string).
|
||||
|
||||
### mount/listmounts: Show current mount points {#mount-listmounts}
|
||||
|
||||
This shows currently mounted points, which can be used for performing an unmount.
|
||||
@@ -1063,11 +1052,9 @@ Example:
|
||||
|
||||
**Authentication is required for this call.**
|
||||
|
||||
### mount/unmountall: Unmount all active mounts {#mount-unmountall}
|
||||
### mount/unmountall: Show current mount points {#mount-unmountall}
|
||||
|
||||
rclone allows Linux, FreeBSD, macOS and Windows to
|
||||
mount any of Rclone's cloud storage systems as a file system with
|
||||
FUSE.
|
||||
This shows currently mounted points, which can be used for performing an unmount.
|
||||
|
||||
This takes no parameters and returns error if unmount does not succeed.
|
||||
|
||||
|
||||
@@ -21,7 +21,6 @@ The S3 backend can be used with a number of different providers:
|
||||
{{< provider name="IDrive e2" home="https://www.idrive.com/e2/" config="/s3/#idrive-e2" >}}
|
||||
{{< provider name="IONOS Cloud" home="https://cloud.ionos.com/storage/object-storage" config="/s3/#ionos" >}}
|
||||
{{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
|
||||
{{< provider name="Qiniu Cloud Object Storage (Kodo)" home="https://www.qiniu.com/en/products/kodo" config="/s3/#qiniu" >}}
|
||||
{{< provider name="RackCorp Object Storage" home="https://www.rackcorp.com/" config="/s3/#RackCorp" >}}
|
||||
{{< provider name="Scaleway" home="https://www.scaleway.com/en/object-storage/" config="/s3/#scaleway" >}}
|
||||
{{< provider name="Seagate Lyve Cloud" home="https://www.seagate.com/gb/en/services/cloud/storage/" config="/s3/#lyve" >}}
|
||||
@@ -641,7 +640,7 @@ A simple solution is to set the `--s3-upload-cutoff 0` and force all the files t
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/s3/s3.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
|
||||
Here are the Standard options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi).
|
||||
Here are the Standard options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi).
|
||||
|
||||
#### --s3-provider
|
||||
|
||||
@@ -676,8 +675,6 @@ Properties:
|
||||
- IBM COS S3
|
||||
- "IDrive"
|
||||
- IDrive e2
|
||||
- "IONOS"
|
||||
- IONOS Cloud
|
||||
- "LyveCloud"
|
||||
- Seagate Lyve Cloud
|
||||
- "Minio"
|
||||
@@ -698,8 +695,6 @@ Properties:
|
||||
- Tencent Cloud Object Storage (COS)
|
||||
- "Wasabi"
|
||||
- Wasabi Object Storage
|
||||
- "Qiniu"
|
||||
- Qiniu Object Storage (Kodo)
|
||||
- "Other"
|
||||
- Any other S3 compatible provider
|
||||
|
||||
@@ -970,68 +965,13 @@ Properties:
|
||||
|
||||
Region to connect to.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: region
|
||||
- Env Var: RCLONE_S3_REGION
|
||||
- Provider: Qiniu
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Examples:
|
||||
- "cn-east-1"
|
||||
- The default endpoint - a good choice if you are unsure.
|
||||
- East China Region 1.
|
||||
- Needs location constraint cn-east-1.
|
||||
- "cn-east-2"
|
||||
- East China Region 2.
|
||||
- Needs location constraint cn-east-2.
|
||||
- "cn-north-1"
|
||||
- North China Region 1.
|
||||
- Needs location constraint cn-north-1.
|
||||
- "cn-south-1"
|
||||
- South China Region 1.
|
||||
- Needs location constraint cn-south-1.
|
||||
- "us-north-1"
|
||||
- North America Region.
|
||||
- Needs location constraint us-north-1.
|
||||
- "ap-southeast-1"
|
||||
- Southeast Asia Region 1.
|
||||
- Needs location constraint ap-southeast-1.
|
||||
- "ap-northeast-1"
|
||||
- Northeast Asia Region 1.
|
||||
- Needs location constraint ap-northeast-1.
|
||||
|
||||
#### --s3-region
|
||||
|
||||
Region where your bucket will be created and your data stored.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: region
|
||||
- Env Var: RCLONE_S3_REGION
|
||||
- Provider: IONOS
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Examples:
|
||||
- "de"
|
||||
- Frankfurt, Germany
|
||||
- "eu-central-2"
|
||||
- Berlin, Germany
|
||||
- "eu-south-2"
|
||||
- Logrono, Spain
|
||||
|
||||
#### --s3-region
|
||||
|
||||
Region to connect to.
|
||||
|
||||
Leave blank if you are using an S3 clone and you don't have a region.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: region
|
||||
- Env Var: RCLONE_S3_REGION
|
||||
- Provider: !AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive
|
||||
- Provider: !AWS,Alibaba,ChinaMobile,Cloudflare,ArvanCloud,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Examples:
|
||||
@@ -1289,27 +1229,6 @@ Properties:
|
||||
|
||||
#### --s3-endpoint
|
||||
|
||||
Endpoint for IONOS S3 Object Storage.
|
||||
|
||||
Specify the endpoint from the same region.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: endpoint
|
||||
- Env Var: RCLONE_S3_ENDPOINT
|
||||
- Provider: IONOS
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Examples:
|
||||
- "s3-eu-central-1.ionoscloud.com"
|
||||
- Frankfurt, Germany
|
||||
- "s3-eu-central-2.ionoscloud.com"
|
||||
- Berlin, Germany
|
||||
- "s3-eu-south-2.ionoscloud.com"
|
||||
- Logrono, Spain
|
||||
|
||||
#### --s3-endpoint
|
||||
|
||||
Endpoint for OSS API.
|
||||
|
||||
Properties:
|
||||
@@ -1575,33 +1494,6 @@ Properties:
|
||||
|
||||
#### --s3-endpoint
|
||||
|
||||
Endpoint for Qiniu Object Storage.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: endpoint
|
||||
- Env Var: RCLONE_S3_ENDPOINT
|
||||
- Provider: Qiniu
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Examples:
|
||||
- "s3-cn-east-1.qiniucs.com"
|
||||
- East China Endpoint 1
|
||||
- "s3-cn-east-2.qiniucs.com"
|
||||
- East China Endpoint 2
|
||||
- "s3-cn-north-1.qiniucs.com"
|
||||
- North China Endpoint 1
|
||||
- "s3-cn-south-1.qiniucs.com"
|
||||
- South China Endpoint 1
|
||||
- "s3-us-north-1.qiniucs.com"
|
||||
- North America Endpoint 1
|
||||
- "s3-ap-southeast-1.qiniucs.com"
|
||||
- Southeast Asia Endpoint 1
|
||||
- "s3-ap-northeast-1.qiniucs.com"
|
||||
- Northeast Asia Endpoint 1
|
||||
|
||||
#### --s3-endpoint
|
||||
|
||||
Endpoint for S3 API.
|
||||
|
||||
Required when using an S3 clone.
|
||||
@@ -1610,7 +1502,7 @@ Properties:
|
||||
|
||||
- Config: endpoint
|
||||
- Env Var: RCLONE_S3_ENDPOINT
|
||||
- Provider: !AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu
|
||||
- Provider: !AWS,IBMCOS,IDrive,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,ArvanCloud,Scaleway,StackPath,Storj,RackCorp
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Examples:
|
||||
@@ -1631,31 +1523,15 @@ Properties:
|
||||
- "s3.ap-southeast-1.lyvecloud.seagate.com"
|
||||
- Seagate Lyve Cloud AP Southeast 1 (Singapore)
|
||||
- "s3.wasabisys.com"
|
||||
- Wasabi US East 1 (N. Virginia)
|
||||
- "s3.us-east-2.wasabisys.com"
|
||||
- Wasabi US East 2 (N. Virginia)
|
||||
- "s3.us-central-1.wasabisys.com"
|
||||
- Wasabi US Central 1 (Texas)
|
||||
- Wasabi US East endpoint
|
||||
- "s3.us-west-1.wasabisys.com"
|
||||
- Wasabi US West 1 (Oregon)
|
||||
- "s3.ca-central-1.wasabisys.com"
|
||||
- Wasabi CA Central 1 (Toronto)
|
||||
- Wasabi US West endpoint
|
||||
- "s3.eu-central-1.wasabisys.com"
|
||||
- Wasabi EU Central 1 (Amsterdam)
|
||||
- "s3.eu-central-2.wasabisys.com"
|
||||
- Wasabi EU Central 2 (Frankfurt)
|
||||
- "s3.eu-west-1.wasabisys.com"
|
||||
- Wasabi EU West 1 (London)
|
||||
- "s3.eu-west-2.wasabisys.com"
|
||||
- Wasabi EU West 2 (Paris)
|
||||
- Wasabi EU Central endpoint
|
||||
- "s3.ap-northeast-1.wasabisys.com"
|
||||
- Wasabi AP Northeast 1 (Tokyo) endpoint
|
||||
- "s3.ap-northeast-2.wasabisys.com"
|
||||
- Wasabi AP Northeast 2 (Osaka) endpoint
|
||||
- "s3.ap-southeast-1.wasabisys.com"
|
||||
- Wasabi AP Southeast 1 (Singapore)
|
||||
- "s3.ap-southeast-2.wasabisys.com"
|
||||
- Wasabi AP Southeast 2 (Sydney)
|
||||
- "s3.ir-thr-at1.arvanstorage.com"
|
||||
- ArvanCloud Tehran Iran (Asiatech) endpoint
|
||||
|
||||
@@ -1953,42 +1829,13 @@ Properties:
|
||||
|
||||
Location constraint - must be set to match the Region.
|
||||
|
||||
Used when creating buckets only.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: location_constraint
|
||||
- Env Var: RCLONE_S3_LOCATION_CONSTRAINT
|
||||
- Provider: Qiniu
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Examples:
|
||||
- "cn-east-1"
|
||||
- East China Region 1
|
||||
- "cn-east-2"
|
||||
- East China Region 2
|
||||
- "cn-north-1"
|
||||
- North China Region 1
|
||||
- "cn-south-1"
|
||||
- South China Region 1
|
||||
- "us-north-1"
|
||||
- North America Region 1
|
||||
- "ap-southeast-1"
|
||||
- Southeast Asia Region 1
|
||||
- "ap-northeast-1"
|
||||
- Northeast Asia Region 1
|
||||
|
||||
#### --s3-location-constraint
|
||||
|
||||
Location constraint - must be set to match the Region.
|
||||
|
||||
Leave blank if not sure. Used when creating buckets only.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: location_constraint
|
||||
- Env Var: RCLONE_S3_LOCATION_CONSTRAINT
|
||||
- Provider: !AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,ArvanCloud,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS
|
||||
- Provider: !AWS,IBMCOS,IDrive,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,ArvanCloud,RackCorp,Scaleway,StackPath,Storj,TencentCOS
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
@@ -2218,30 +2065,9 @@ Properties:
|
||||
- Archived storage.
|
||||
- Prices are lower, but it needs to be restored first to be accessed.
|
||||
|
||||
#### --s3-storage-class
|
||||
|
||||
The storage class to use when storing new objects in Qiniu.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: storage_class
|
||||
- Env Var: RCLONE_S3_STORAGE_CLASS
|
||||
- Provider: Qiniu
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Examples:
|
||||
- "STANDARD"
|
||||
- Standard storage class
|
||||
- "LINE"
|
||||
- Infrequent access storage mode
|
||||
- "GLACIER"
|
||||
- Archive storage mode
|
||||
- "DEEP_ARCHIVE"
|
||||
- Deep archive storage mode
|
||||
|
||||
### Advanced options
|
||||
|
||||
Here are the Advanced options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi).
|
||||
Here are the Advanced options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi).
|
||||
|
||||
#### --s3-bucket-acl
|
||||
|
||||
@@ -2304,9 +2130,7 @@ Properties:
|
||||
|
||||
#### --s3-sse-customer-key
|
||||
|
||||
To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.
|
||||
|
||||
Alternatively you can provide --sse-customer-key-base64.
|
||||
If using SSE-C you must provide the secret encryption key used to encrypt/decrypt your data.
|
||||
|
||||
Properties:
|
||||
|
||||
@@ -2319,23 +2143,6 @@ Properties:
|
||||
- ""
|
||||
- None
|
||||
|
||||
#### --s3-sse-customer-key-base64
|
||||
|
||||
If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.
|
||||
|
||||
Alternatively you can provide --sse-customer-key.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: sse_customer_key_base64
|
||||
- Env Var: RCLONE_S3_SSE_CUSTOMER_KEY_BASE64
|
||||
- Provider: AWS,Ceph,ChinaMobile,Minio
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Examples:
|
||||
- ""
|
||||
- None
|
||||
|
||||
#### --s3-sse-customer-key-md5
|
||||
|
||||
If using SSE-C you may provide the secret encryption key MD5 checksum (optional).
|
||||
@@ -2855,67 +2662,6 @@ Properties:
|
||||
- Type: Time
|
||||
- Default: off
|
||||
|
||||
#### --s3-decompress
|
||||
|
||||
If set this will decompress gzip encoded objects.
|
||||
|
||||
It is possible to upload objects to S3 with "Content-Encoding: gzip"
|
||||
set. Normally rclone will download these files as compressed objects.
|
||||
|
||||
If this flag is set then rclone will decompress these files with
|
||||
"Content-Encoding: gzip" as they are received. This means that rclone
|
||||
can't check the size and hash but the file contents will be decompressed.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: decompress
|
||||
- Env Var: RCLONE_S3_DECOMPRESS
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --s3-might-gzip
|
||||
|
||||
Set this if the backend might gzip objects.
|
||||
|
||||
Normally providers will not alter objects when they are downloaded. If
|
||||
an object was not uploaded with `Content-Encoding: gzip` then it won't
|
||||
be set on download.
|
||||
|
||||
However some providers may gzip objects even if they weren't uploaded
|
||||
with `Content-Encoding: gzip` (eg Cloudflare).
|
||||
|
||||
A symptom of this would be receiving errors like
|
||||
|
||||
ERROR corrupted on transfer: sizes differ NNN vs MMM
|
||||
|
||||
If you set this flag and rclone downloads an object with
|
||||
Content-Encoding: gzip set and chunked transfer encoding, then rclone
|
||||
will decompress the object on the fly.
|
||||
|
||||
If this is set to unset (the default) then rclone will choose
|
||||
according to the provider setting what to apply, but you can override
|
||||
rclone's choice here.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: might_gzip
|
||||
- Env Var: RCLONE_S3_MIGHT_GZIP
|
||||
- Type: Tristate
|
||||
- Default: unset
|
||||
|
||||
#### --s3-no-system-metadata
|
||||
|
||||
Suppress setting and reading of system metadata
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: no_system_metadata
|
||||
- Env Var: RCLONE_S3_NO_SYSTEM_METADATA
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
### Metadata
|
||||
|
||||
User metadata is stored as x-amz-meta- keys. S3 metadata keys are case insensitive and are always returned in lower case.
|
||||
@@ -4018,207 +3764,6 @@ So once set up, for example, to copy files into a bucket
|
||||
rclone copy /path/to/files minio:bucket
|
||||
```
|
||||
|
||||
### Qiniu Cloud Object Storage (Kodo) {#qiniu}
|
||||
|
||||
[Qiniu Cloud Object Storage (Kodo)](https://www.qiniu.com/en/products/kodo), a completely independent-researched core technology which is proven by repeated customer experience has occupied absolute leading market leader position. Kodo can be widely applied to mass data management.
|
||||
|
||||
To configure access to Qiniu Kodo, follow the steps below:
|
||||
|
||||
1. Run `rclone config` and select `n` for a new remote.
|
||||
|
||||
```
|
||||
rclone config
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
```
|
||||
|
||||
2. Give the name of the configuration. For example, name it 'qiniu'.
|
||||
|
||||
```
|
||||
name> qiniu
|
||||
```
|
||||
|
||||
3. Select `s3` storage.
|
||||
|
||||
```
|
||||
Choose a number from below, or type in your own value
|
||||
1 / 1Fichier
|
||||
\ (fichier)
|
||||
2 / Akamai NetStorage
|
||||
\ (netstorage)
|
||||
3 / Alias for an existing remote
|
||||
\ (alias)
|
||||
4 / Amazon Drive
|
||||
\ (amazon cloud drive)
|
||||
5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi
|
||||
\ (s3)
|
||||
[snip]
|
||||
Storage> s3
|
||||
```
|
||||
|
||||
4. Select `Qiniu` provider.
|
||||
```
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Amazon Web Services (AWS) S3
|
||||
\ "AWS"
|
||||
[snip]
|
||||
22 / Qiniu Object Storage (Kodo)
|
||||
\ (Qiniu)
|
||||
[snip]
|
||||
provider> Qiniu
|
||||
```
|
||||
|
||||
5. Enter your SecretId and SecretKey of Qiniu Kodo.
|
||||
|
||||
```
|
||||
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
Only applies if access_key_id and secret_access_key is blank.
|
||||
Enter a boolean value (true or false). Press Enter for the default ("false").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Enter AWS credentials in the next step
|
||||
\ "false"
|
||||
2 / Get AWS credentials from the environment (env vars or IAM)
|
||||
\ "true"
|
||||
env_auth> 1
|
||||
AWS Access Key ID.
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
access_key_id> AKIDxxxxxxxxxx
|
||||
AWS Secret Access Key (password)
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
secret_access_key> xxxxxxxxxxx
|
||||
```
|
||||
|
||||
6. Select endpoint for Qiniu Kodo. This is the standard endpoint for different region.
|
||||
|
||||
```
|
||||
/ The default endpoint - a good choice if you are unsure.
|
||||
1 | East China Region 1.
|
||||
| Needs location constraint cn-east-1.
|
||||
\ (cn-east-1)
|
||||
/ East China Region 2.
|
||||
2 | Needs location constraint cn-east-2.
|
||||
\ (cn-east-2)
|
||||
/ North China Region 1.
|
||||
3 | Needs location constraint cn-north-1.
|
||||
\ (cn-north-1)
|
||||
/ South China Region 1.
|
||||
4 | Needs location constraint cn-south-1.
|
||||
\ (cn-south-1)
|
||||
/ North America Region.
|
||||
5 | Needs location constraint us-north-1.
|
||||
\ (us-north-1)
|
||||
/ Southeast Asia Region 1.
|
||||
6 | Needs location constraint ap-southeast-1.
|
||||
\ (ap-southeast-1)
|
||||
/ Northeast Asia Region 1.
|
||||
7 | Needs location constraint ap-northeast-1.
|
||||
\ (ap-northeast-1)
|
||||
[snip]
|
||||
endpoint> 1
|
||||
|
||||
Option endpoint.
|
||||
Endpoint for Qiniu Object Storage.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / East China Endpoint 1
|
||||
\ (s3-cn-east-1.qiniucs.com)
|
||||
2 / East China Endpoint 2
|
||||
\ (s3-cn-east-2.qiniucs.com)
|
||||
3 / North China Endpoint 1
|
||||
\ (s3-cn-north-1.qiniucs.com)
|
||||
4 / South China Endpoint 1
|
||||
\ (s3-cn-south-1.qiniucs.com)
|
||||
5 / North America Endpoint 1
|
||||
\ (s3-us-north-1.qiniucs.com)
|
||||
6 / Southeast Asia Endpoint 1
|
||||
\ (s3-ap-southeast-1.qiniucs.com)
|
||||
7 / Northeast Asia Endpoint 1
|
||||
\ (s3-ap-northeast-1.qiniucs.com)
|
||||
endpoint> 1
|
||||
|
||||
Option location_constraint.
|
||||
Location constraint - must be set to match the Region.
|
||||
Used when creating buckets only.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / East China Region 1
|
||||
\ (cn-east-1)
|
||||
2 / East China Region 2
|
||||
\ (cn-east-2)
|
||||
3 / North China Region 1
|
||||
\ (cn-north-1)
|
||||
4 / South China Region 1
|
||||
\ (cn-south-1)
|
||||
5 / North America Region 1
|
||||
\ (us-north-1)
|
||||
6 / Southeast Asia Region 1
|
||||
\ (ap-southeast-1)
|
||||
7 / Northeast Asia Region 1
|
||||
\ (ap-northeast-1)
|
||||
location_constraint> 1
|
||||
```
|
||||
|
||||
7. Choose acl and storage class.
|
||||
|
||||
```
|
||||
Note that this ACL is applied when server-side copying objects as S3
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
/ Owner gets FULL_CONTROL.
|
||||
1 | No one else has access rights (default).
|
||||
\ (private)
|
||||
/ Owner gets FULL_CONTROL.
|
||||
2 | The AllUsers group gets READ access.
|
||||
\ (public-read)
|
||||
[snip]
|
||||
acl> 2
|
||||
The storage class to use when storing new objects in Tencent COS.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Standard storage class
|
||||
\ (STANDARD)
|
||||
2 / Infrequent access storage mode
|
||||
\ (LINE)
|
||||
3 / Archive storage mode
|
||||
\ (GLACIER)
|
||||
4 / Deep archive storage mode
|
||||
\ (DEEP_ARCHIVE)
|
||||
[snip]
|
||||
storage_class> 1
|
||||
Edit advanced config? (y/n)
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
Remote config
|
||||
--------------------
|
||||
[qiniu]
|
||||
- type: s3
|
||||
- provider: Qiniu
|
||||
- access_key_id: xxx
|
||||
- secret_access_key: xxx
|
||||
- region: cn-east-1
|
||||
- endpoint: s3-cn-east-1.qiniucs.com
|
||||
- location_constraint: cn-east-1
|
||||
- acl: public-read
|
||||
- storage_class: STANDARD
|
||||
--------------------
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
Current remotes:
|
||||
|
||||
Name Type
|
||||
==== ====
|
||||
qiniu s3
|
||||
```
|
||||
|
||||
### RackCorp {#RackCorp}
|
||||
|
||||
[RackCorp Object Storage](https://www.rackcorp.com/storage/s3storage) is an S3 compatible object storage platform from your friendly cloud provider RackCorp.
|
||||
|
||||
@@ -789,24 +789,19 @@ Properties:
|
||||
|
||||
Upload and download chunk size.
|
||||
|
||||
This controls the maximum size of payload in SFTP protocol packets.
|
||||
The RFC limits this to 32768 bytes (32k), which is the default. However,
|
||||
a lot of servers support larger sizes, typically limited to a maximum
|
||||
total package size of 256k, and setting it larger will increase transfer
|
||||
speed dramatically on high latency links. This includes OpenSSH, and,
|
||||
for example, using the value of 255k works well, leaving plenty of room
|
||||
for overhead while still being within a total packet size of 256k.
|
||||
This controls the maximum packet size used in the SFTP protocol. The
|
||||
RFC limits this to 32768 bytes (32k), however a lot of servers
|
||||
support larger sizes and setting it larger will increase transfer
|
||||
speed dramatically on high latency links.
|
||||
|
||||
Make sure to test thoroughly before using a value higher than 32k,
|
||||
and only use it if you always connect to the same server or after
|
||||
sufficiently broad testing. If you get errors such as
|
||||
"failed to send packet payload: EOF", lots of "connection lost",
|
||||
or "corrupted on transfer", when copying a larger file, try lowering
|
||||
the value. The server run by [rclone serve sftp](/commands/rclone_serve_sftp)
|
||||
sends packets with standard 32k maximum payload so you must not
|
||||
set a different chunk_size when downloading files, but it accepts
|
||||
packets up to the 256k total size, so for uploads the chunk_size
|
||||
can be set as for the OpenSSH example above.
|
||||
Only use a setting higher than 32k if you always connect to the same
|
||||
server or after sufficiently broad testing.
|
||||
|
||||
For example using the value of 252k with OpenSSH works well with its
|
||||
maximum packet size of 256k.
|
||||
|
||||
If you get the error "failed to send packet header: EOF" when copying
|
||||
a large file, try lowering this number.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
@@ -1,231 +0,0 @@
|
||||
---
|
||||
title: "SMB / CIFS"
|
||||
description: "Rclone docs for SMB backend"
|
||||
---
|
||||
|
||||
# {{< icon "fa fa-server" >}} SMB
|
||||
|
||||
SMB is [a communication protocol to share files over network](https://en.wikipedia.org/wiki/Server_Message_Block).
|
||||
|
||||
This relies on [go-smb2 library](https://github.com/hirochachacha/go-smb2/) for communication with SMB protocol.
|
||||
|
||||
Paths are specified as `remote:sharename` (or `remote:` for the `lsd`
|
||||
command.) You may put subdirectories in too, e.g. `remote:item/path/to/dir`.
|
||||
|
||||
## Notes
|
||||
|
||||
The first path segment must be the name of the share, which you entered when you started to share on Windows. On smbd, it's the section title in `smb.conf` (usually in `/etc/samba/`) file.
|
||||
You can find shares by quering the root if you're unsure (e.g. `rclone lsd remote:`).
|
||||
|
||||
You can't access to the shared printers from rclone, obviously.
|
||||
|
||||
You can't use Anonymous access for logging in. You have to use the `guest` user with an empty password instead.
|
||||
The rclone client tries to avoid 8.3 names when uploading files by encoding trailing spaces and periods.
|
||||
Alternatively, [the local backend](/local/#paths-on-windows) on Windows can access SMB servers using UNC paths, by `\\server\share`. This doesn't apply to non-Windows OSes, such as Linux and macOS.
|
||||
|
||||
## Configuration
|
||||
|
||||
Here is an example of making a SMB configuration.
|
||||
|
||||
First run
|
||||
|
||||
rclone config
|
||||
|
||||
This will guide you through an interactive setup process.
|
||||
|
||||
```
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
name> remote
|
||||
Option Storage.
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
XX / SMB / CIFS
|
||||
\ (smb)
|
||||
Storage> smb
|
||||
|
||||
Option host.
|
||||
Samba hostname to connect to.
|
||||
E.g. "example.com".
|
||||
Enter a value.
|
||||
host> localhost
|
||||
|
||||
Option user.
|
||||
Samba username.
|
||||
Enter a string value. Press Enter for the default (lesmi).
|
||||
user> guest
|
||||
|
||||
Option port.
|
||||
Samba port number.
|
||||
Enter a signed integer. Press Enter for the default (445).
|
||||
port>
|
||||
|
||||
Option pass.
|
||||
Samba password.
|
||||
Choose an alternative below. Press Enter for the default (n).
|
||||
y) Yes, type in my own password
|
||||
g) Generate random password
|
||||
n) No, leave this optional password blank (default)
|
||||
y/g/n> g
|
||||
Password strength in bits.
|
||||
64 is just about memorable
|
||||
128 is secure
|
||||
1024 is the maximum
|
||||
Bits> 64
|
||||
Your password is: XXXX
|
||||
Use this password? Please note that an obscured version of this
|
||||
password (and not the password itself) will be stored under your
|
||||
configuration file, so keep this generated password in a safe place.
|
||||
y) Yes (default)
|
||||
n) No
|
||||
y/n> y
|
||||
|
||||
Option domain.
|
||||
Domain name for NTLM authentication.
|
||||
Enter a string value. Press Enter for the default (WORKGROUP).
|
||||
domain>
|
||||
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
|
||||
Configuration complete.
|
||||
Options:
|
||||
- type: samba
|
||||
- host: localhost
|
||||
- user: guest
|
||||
- pass: *** ENCRYPTED ***
|
||||
Keep this "remote" remote?
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> d
|
||||
```
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/smb/smb.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
|
||||
Here are the Standard options specific to smb (SMB / CIFS).
|
||||
|
||||
#### --smb-host
|
||||
|
||||
SMB server hostname to connect to.
|
||||
|
||||
E.g. "example.com".
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: host
|
||||
- Env Var: RCLONE_SMB_HOST
|
||||
- Type: string
|
||||
- Required: true
|
||||
|
||||
#### --smb-user
|
||||
|
||||
SMB username.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: user
|
||||
- Env Var: RCLONE_SMB_USER
|
||||
- Type: string
|
||||
- Default: "$USER"
|
||||
|
||||
#### --smb-port
|
||||
|
||||
SMB port number.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: port
|
||||
- Env Var: RCLONE_SMB_PORT
|
||||
- Type: int
|
||||
- Default: 445
|
||||
|
||||
#### --smb-pass
|
||||
|
||||
SMB password.
|
||||
|
||||
**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: pass
|
||||
- Env Var: RCLONE_SMB_PASS
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --smb-domain
|
||||
|
||||
Domain name for NTLM authentication.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: domain
|
||||
- Env Var: RCLONE_SMB_DOMAIN
|
||||
- Type: string
|
||||
- Default: "WORKGROUP"
|
||||
|
||||
### Advanced options
|
||||
|
||||
Here are the Advanced options specific to smb (SMB / CIFS).
|
||||
|
||||
#### --smb-idle-timeout
|
||||
|
||||
Max time before closing idle connections.
|
||||
|
||||
If no connections have been returned to the connection pool in the time
|
||||
given, rclone will empty the connection pool.
|
||||
|
||||
Set to 0 to keep connections indefinitely.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: idle_timeout
|
||||
- Env Var: RCLONE_SMB_IDLE_TIMEOUT
|
||||
- Type: Duration
|
||||
- Default: 1m0s
|
||||
|
||||
#### --smb-hide-special-share
|
||||
|
||||
Hide special shares (e.g. print$) which users aren't supposed to access.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: hide_special_share
|
||||
- Env Var: RCLONE_SMB_HIDE_SPECIAL_SHARE
|
||||
- Type: bool
|
||||
- Default: true
|
||||
|
||||
#### --smb-case-insensitive
|
||||
|
||||
Whether the server is configured to be case-insensitive.
|
||||
|
||||
Always true on Windows shares.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: case_insensitive
|
||||
- Env Var: RCLONE_SMB_CASE_INSENSITIVE
|
||||
- Type: bool
|
||||
- Default: true
|
||||
|
||||
#### --smb-encoding
|
||||
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_SMB_ENCODING
|
||||
- Type: MultiEncoder
|
||||
- Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot
|
||||
|
||||
{{< rem autogenerated options stop >}}
|
||||
@@ -11,7 +11,7 @@ Commercial implementations of that being:
|
||||
* [Rackspace Cloud Files](https://www.rackspace.com/cloud/files/)
|
||||
* [Memset Memstore](https://www.memset.com/cloud/storage/)
|
||||
* [OVH Object Storage](https://www.ovh.co.uk/public-cloud/storage/object-storage/)
|
||||
* [Oracle Cloud Storage](https://docs.oracle.com/en-us/iaas/integration/doc/configure-object-storage.html)
|
||||
* [Oracle Cloud Storage](https://cloud.oracle.com/object-storage/buckets)
|
||||
* [IBM Bluemix Cloud ObjectStorage Swift](https://console.bluemix.net/docs/infrastructure/objectstorage-swift/index.html)
|
||||
|
||||
Paths are specified as `remote:container` (or `remote:` for the `lsd`
|
||||
@@ -534,38 +534,6 @@ Properties:
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --swift-no-large-objects
|
||||
|
||||
Disable support for static and dynamic large objects
|
||||
|
||||
Swift cannot transparently store files bigger than 5 GiB. There are
|
||||
two schemes for doing that, static or dynamic large objects, and the
|
||||
API does not allow rclone to determine whether a file is a static or
|
||||
dynamic large object without doing a HEAD on the object. Since these
|
||||
need to be treated differently, this means rclone has to issue HEAD
|
||||
requests for objects for example when reading checksums.
|
||||
|
||||
When `no_large_objects` is set, rclone will assume that there are no
|
||||
static or dynamic large objects stored. This means it can stop doing
|
||||
the extra HEAD calls which in turn increases performance greatly
|
||||
especially when doing a swift to swift transfer with `--checksum` set.
|
||||
|
||||
Setting this option implies `no_chunk` and also that no files will be
|
||||
uploaded in chunks, so files bigger than 5 GiB will just fail on
|
||||
upload.
|
||||
|
||||
If you set this option and there *are* static or dynamic large objects,
|
||||
then this will give incorrect hashes for them. Downloads will succeed,
|
||||
but other operations such as Remove and Copy will fail.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: no_large_objects
|
||||
- Env Var: RCLONE_SWIFT_NO_LARGE_OBJECTS
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --swift-encoding
|
||||
|
||||
The encoding for the backend.
|
||||
|
||||
@@ -42,8 +42,9 @@
|
||||
© <a href="https://www.craig-wood.com/nick/">Nick Craig-Wood</a> 2014-{{ now.Format "2006" }}<br>
|
||||
{{ if .File}}{{ with $path := strings.TrimPrefix "/" .File.Path }}Source file <a href="https://github.com/rclone/rclone/blob/master/docs/content/{{ $path }}">{{ $path }}</a>{{ end }}
|
||||
last updated <a href="https://github.com/rclone/rclone/commit/{{ with .GitInfo }}{{ .Hash }}{{ end }}">{{ .Lastmod.Format "2006-01-02" }}</a><br>{{end}}
|
||||
Uploaded with <a href="https://rclone.org">rclone</a>.
|
||||
Built with <a href="https://github.com/spf13/hugo">Hugo</a>.
|
||||
Website hosted on a <a href="https://www.memset.com/dedicated-servers/vps/"><span style="font-weight: bold; font-family: arial black, arial, sans-serif; font-style: italic;">MEMSET CLOUD VPS</span></a>,
|
||||
uploaded with <a href="https://rclone.org">rclone</a>
|
||||
and built with <a href="https://github.com/spf13/hugo">Hugo</a>.
|
||||
Logo by <a href="https://twitter.com/andy23">@andy23</a>.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
<p class="menu">
|
||||
<i class="fa fa-comments fa-fw" aria-hidden="true"></i> <a href="https://forum.rclone.org">Rclone forum</a><br />
|
||||
<i class="fab fa-github fa-fw" aria-hidden="true"></i> <a href="https://github.com/rclone/rclone">GitHub project</a><br />
|
||||
<i class="fab fa-slack fa-fw" aria-hidden="true"></i> <a href="https://slack-invite.rclone.org/">Rclone slack</a><br />
|
||||
<i class="fa fa-book fa-fw" aria-hidden="true"></i> <a href="https://github.com/rclone/rclone/wiki">Rclone Wiki</a><br />
|
||||
<i class="fa fa-heart heart fa-fw" aria-hidden="true"></i> <a href="/donate/">Donate</a><br />
|
||||
<i class="fab fa-twitter fa-fw" aria-hidden="true"></i> <a href="https://twitter.com/njcw">@njcw</a>
|
||||
|
||||
@@ -74,6 +74,7 @@
|
||||
<a class="dropdown-item" href="/hdfs/"><i class="fa fa-globe fa-fw"></i> HDFS (Hadoop Distributed Filesystem)</a>
|
||||
<a class="dropdown-item" href="/hidrive/"><i class="fa fa-cloud fa-fw"></i> HiDrive</a>
|
||||
<a class="dropdown-item" href="/http/"><i class="fa fa-globe fa-fw"></i> HTTP</a>
|
||||
<a class="dropdown-item" href="/hubic/"><i class="fa fa-space-shuttle fa-fw"></i> Hubic</a>
|
||||
<a class="dropdown-item" href="/internetarchive/"><i class="fa fa-archive fa-fw"></i> Internet Archive</a>
|
||||
<a class="dropdown-item" href="/jottacloud/"><i class="fa fa-cloud fa-fw"></i> Jottacloud</a>
|
||||
<a class="dropdown-item" href="/koofr/"><i class="fa fa-suitcase fa-fw"></i> Koofr</a>
|
||||
@@ -85,14 +86,12 @@
|
||||
<a class="dropdown-item" href="/opendrive/"><i class="fa fa-space-shuttle fa-fw"></i> OpenDrive</a>
|
||||
<a class="dropdown-item" href="/qingstor/"><i class="fas fa-hdd fa-fw"></i> QingStor</a>
|
||||
<a class="dropdown-item" href="/swift/"><i class="fa fa-space-shuttle fa-fw"></i> Openstack Swift</a>
|
||||
<a class="dropdown-item" href="/oracleobjectstorage/"><i class="fa fa-cloud fa-fw"></i> Oracle Object Storage</a>
|
||||
<a class="dropdown-item" href="/pcloud/"><i class="fa fa-cloud fa-fw"></i> pCloud</a>
|
||||
<a class="dropdown-item" href="/premiumizeme/"><i class="fa fa-user fa-fw"></i> premiumize.me</a>
|
||||
<a class="dropdown-item" href="/putio/"><i class="fas fa-parking fa-fw"></i> put.io</a>
|
||||
<a class="dropdown-item" href="/seafile/"><i class="fa fa-server fa-fw"></i> Seafile</a>
|
||||
<a class="dropdown-item" href="/sftp/"><i class="fa fa-server fa-fw"></i> SFTP</a>
|
||||
<a class="dropdown-item" href="/sia/"><i class="fa fa-globe fa-fw"></i> Sia</a>
|
||||
<a class="dropdown-item" href="/smb/"><i class="fa fa-server fa-fw"></i> SMB / CIFS</a>
|
||||
<a class="dropdown-item" href="/storj/"><i class="fas fa-dove fa-fw"></i> Storj</a>
|
||||
<a class="dropdown-item" href="/sugarsync/"><i class="fas fa-dove fa-fw"></i> SugarSync</a>
|
||||
<a class="dropdown-item" href="/uptobox/"><i class="fa fa-archive fa-fw"></i> Uptobox</a>
|
||||
|
||||
@@ -1 +1 @@
|
||||
v1.60.1
|
||||
v1.60.0
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
||||
package fs
|
||||
|
||||
// VersionTag of rclone
|
||||
var VersionTag = "v1.60.1"
|
||||
var VersionTag = "v1.60.0"
|
||||
|
||||
@@ -136,6 +136,9 @@ backends:
|
||||
- backend: "hidrive"
|
||||
remote: "TestHiDrive:"
|
||||
fastlist: false
|
||||
- backend: "hubic"
|
||||
remote: "TestHubic:"
|
||||
fastlist: false
|
||||
- backend: "internetarchive"
|
||||
remote: "TestIA:rclone-integration-test"
|
||||
fastlist: true
|
||||
@@ -210,17 +213,6 @@ backends:
|
||||
- backend: "s3"
|
||||
remote: "TestS3Alibaba:"
|
||||
fastlist: true
|
||||
# - backend: "s3"
|
||||
# remote: "TestS3Qiniu:"
|
||||
# fastlist: true
|
||||
# ignore:
|
||||
# - TestIntegration/FsMkdir/FsEncoding/control_chars
|
||||
# - TestIntegration/FsMkdir/FsEncoding/leading_VT
|
||||
# - TestIntegration/FsMkdir/FsEncoding/trailing_VT
|
||||
# - TestIntegration/FsMkdir/FsPutFiles/FromRoot/ListR
|
||||
# - TestIntegration/FsMkdir/FsPutFiles/SetTier
|
||||
# - TestIntegration/FsMkdir/FsPutFiles/FsPutStream/0
|
||||
# - TestIntegration/FsMkdir/FsPutFiles/Internal/Metadata
|
||||
- backend: "s3"
|
||||
remote: "TestS3R2:"
|
||||
fastlist: true
|
||||
@@ -377,9 +369,6 @@ backends:
|
||||
- backend: "sia"
|
||||
remote: "TestSia:"
|
||||
fastlist: false
|
||||
- backend: "smb"
|
||||
remote: "TestSMB:rclone"
|
||||
fastlist: false
|
||||
- backend: "storj"
|
||||
remote: "TestStorj:"
|
||||
fastlist: true
|
||||
@@ -396,13 +385,3 @@ backends:
|
||||
fastlist: false
|
||||
ignore:
|
||||
- TestRWFileHandleWriteNoWrite
|
||||
- backend: "oracleobjectstorage"
|
||||
remote: "TestOracleObjectStorage:"
|
||||
fastlist: true
|
||||
ignore:
|
||||
- TestIntegration/FsMkdir/FsEncoding/control_chars
|
||||
- TestIntegration/FsMkdir/FsEncoding/leading_CR
|
||||
- TestIntegration/FsMkdir/FsEncoding/leading_LF
|
||||
- TestIntegration/FsMkdir/FsEncoding/trailing_CR
|
||||
- TestIntegration/FsMkdir/FsEncoding/trailing_LF
|
||||
- TestIntegration/FsMkdir/FsEncoding/leading_HT
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
NAME=smb
|
||||
USER=rclone
|
||||
PASS=GNF3Cqeu
|
||||
WORKGROUP=thepub
|
||||
|
||||
. $(dirname "$0")/docker.bash
|
||||
|
||||
start() {
|
||||
docker run --rm -d --name $NAME dperson/samba \
|
||||
-p \
|
||||
-u "rclone;${PASS}" \
|
||||
-w "${WORKGROUP}" \
|
||||
-s "public;/share" \
|
||||
-s "rclone;/rclone;yes;no;no;rclone"
|
||||
|
||||
echo type=smb
|
||||
echo host=$(docker_ip)
|
||||
echo user=$USER
|
||||
echo pass=$(rclone obscure $PASS)
|
||||
echo domain=$WORKGROUP
|
||||
echo _connect=$(docker_ip):139
|
||||
}
|
||||
|
||||
. $(dirname "$0")/run.bash
|
||||
9
go.mod
9
go.mod
@@ -2,6 +2,8 @@ module github.com/rclone/rclone
|
||||
|
||||
go 1.17
|
||||
|
||||
replace github.com/jlaffaye/ftp v0.0.0-20220904184306-99be0634ab9a => github.com/ncw/ftp v0.0.0-20220916150959-909597844e2a
|
||||
|
||||
require (
|
||||
bazil.org/fuse v0.0.0-20200524192727-fb710f7dfd05
|
||||
github.com/Azure/azure-pipeline-go v0.2.3
|
||||
@@ -28,7 +30,6 @@ require (
|
||||
github.com/go-chi/chi/v5 v5.0.7
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/hanwen/go-fuse/v2 v2.1.0
|
||||
github.com/hirochachacha/go-smb2 v1.1.0
|
||||
github.com/iguanesolutions/go-systemd/v5 v5.1.0
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.3
|
||||
github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004
|
||||
@@ -40,13 +41,11 @@ require (
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/ncw/go-acd v0.0.0-20201019170801-fe55f33415b1
|
||||
github.com/ncw/swift/v2 v2.0.1
|
||||
github.com/oracle/oci-go-sdk/v65 v65.1.0
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pkg/sftp v1.13.5
|
||||
github.com/pmezard/go-difflib v1.0.0
|
||||
github.com/prometheus/client_golang v1.12.2
|
||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8
|
||||
github.com/rclone/ftp v0.0.0-20221014110213-e44dedbc76c6
|
||||
github.com/rfjakob/eme v1.1.2
|
||||
github.com/shirou/gopsutil/v3 v3.22.7
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
@@ -87,9 +86,7 @@ require (
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/gdamore/encoding v1.0.0 // indirect
|
||||
github.com/geoffgarside/ber v1.1.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect
|
||||
@@ -118,7 +115,6 @@ require (
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sony/gobreaker v0.5.0 // indirect
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.17 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.10 // indirect
|
||||
github.com/tklauser/numcpus v0.4.0 // indirect
|
||||
@@ -141,6 +137,7 @@ require (
|
||||
github.com/golang-jwt/jwt/v4 v4.1.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/jlaffaye/ftp v0.0.0-20220904184306-99be0634ab9a
|
||||
github.com/pkg/xattr v0.4.7
|
||||
golang.org/x/mobile v0.0.0-20220722155234-aaac322e2105
|
||||
golang.org/x/term v0.0.0-20220722155259-a9ba230a4035
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user